map 138 arch/alpha/include/asm/hwrpb.h struct vf_map_struct map[1]; map 163 arch/alpha/mm/init.c - crb->map[0].va); map 166 arch/alpha/mm/init.c - crb->map[0].va); map 199 arch/alpha/mm/init.c nr_pages += crb->map[i].count; map 211 arch/alpha/mm/init.c unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT; map 212 arch/alpha/mm/init.c crb->map[i].va = vaddr; map 213 arch/alpha/mm/init.c for (j = 0; j < crb->map[i].count; ++j) { map 156 arch/arc/kernel/intc-arcv2.c .map = arcv2_irq_map, map 104 arch/arc/kernel/intc-compact.c .map = arc_intc_domain_map, map 369 arch/arc/kernel/mcip.c .map = idu_irq_map, map 229 arch/arc/plat-axs10x/axs10x.c axs101_set_memmap(void __iomem *base, const struct aperture map[16]) map 236 arch/arc/plat-axs10x/axs10x.c slave_select |= map[i].slave_sel << (i << 2); map 237 arch/arc/plat-axs10x/axs10x.c slave_offset |= map[i].slave_off << (i << 2); map 245 arch/arc/plat-axs10x/axs10x.c slave_select |= map[i+8].slave_sel << (i << 2); map 246 arch/arc/plat-axs10x/axs10x.c slave_offset |= map[i+8].slave_off << (i << 2); map 32 arch/arc/plat-eznps/include/plat/mtm.h #define get_thread(map) map.thread map 40 arch/arc/plat-eznps/include/plat/mtm.h #define get_thread(map) 0 map 377 arch/arm/common/sa1111.c .map = sa1111_irqdomain_map, map 60 arch/arm/include/asm/mach/map.h #define iotable_init(map,num) do { } while (0) map 199 arch/arm/mach-omap2/pm44xx.c static inline int omap4plus_init_static_deps(const struct static_dep_map *map) map 204 arch/arm/mach-omap2/pm44xx.c if (!map) map 207 arch/arm/mach-omap2/pm44xx.c while (map->from) { map 208 arch/arm/mach-omap2/pm44xx.c from = clkdm_lookup(map->from); map 209 arch/arm/mach-omap2/pm44xx.c to = clkdm_lookup(map->to); map 212 arch/arm/mach-omap2/pm44xx.c map->from, map->to); map 218 arch/arm/mach-omap2/pm44xx.c map->from, map->to, ret); map 222 arch/arm/mach-omap2/pm44xx.c map++; map 138 arch/arm/mach-pxa/irq.c .map = pxa_irq_map, map 95 arch/arm/mach-pxa/pxa_cplds_irqs.c .map = cplds_irq_domain_map, map 39 arch/arm/mach-realview/platsmp-dt.c struct regmap *map; map 68 arch/arm/mach-realview/platsmp-dt.c map = syscon_node_to_regmap(np); map 69 arch/arm/mach-realview/platsmp-dt.c if (IS_ERR(map)) { map 74 arch/arm/mach-realview/platsmp-dt.c regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, map 71 arch/arm/mach-shmobile/pm-rcar-gen2.c goto map; map 81 arch/arm/mach-shmobile/pm-rcar-gen2.c map: map 431 arch/arm/mm/dma-mapping.c struct map_desc map; map 439 arch/arm/mm/dma-mapping.c map.pfn = __phys_to_pfn(start); map 440 arch/arm/mm/dma-mapping.c map.virtual = __phys_to_virt(start); map 441 arch/arm/mm/dma-mapping.c map.length = end - start; map 442 arch/arm/mm/dma-mapping.c map.type = MT_MEMORY_DMA_READY; map 460 arch/arm/mm/dma-mapping.c iotable_init(&map, 1); map 1120 arch/arm/mm/mmu.c struct map_desc map; map 1122 arch/arm/mm/mmu.c debug_ll_addr(&map.pfn, &map.virtual); map 1123 arch/arm/mm/mmu.c if (!map.pfn || !map.virtual) map 1125 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(map.pfn); map 1126 arch/arm/mm/mmu.c map.virtual &= PAGE_MASK; map 1127 arch/arm/mm/mmu.c map.length = PAGE_SIZE; map 1128 arch/arm/mm/mmu.c map.type = MT_DEVICE; map 1129 arch/arm/mm/mmu.c iotable_init(&map, 1); map 1338 arch/arm/mm/mmu.c struct map_desc map; map 1360 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); map 1361 arch/arm/mm/mmu.c map.virtual = MODULES_VADDR; map 1362 arch/arm/mm/mmu.c map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK; map 1363 arch/arm/mm/mmu.c map.type = MT_ROM; map 1364 arch/arm/mm/mmu.c create_mapping(&map); map 1371 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); map 1372 arch/arm/mm/mmu.c map.virtual = FLUSH_BASE; map 1373 arch/arm/mm/mmu.c map.length = SZ_1M; map 1374 arch/arm/mm/mmu.c map.type = MT_CACHECLEAN; map 1375 arch/arm/mm/mmu.c create_mapping(&map); map 1378 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); map 1379 arch/arm/mm/mmu.c map.virtual = FLUSH_BASE_MINICACHE; map 1380 arch/arm/mm/mmu.c map.length = SZ_1M; map 1381 arch/arm/mm/mmu.c map.type = MT_MINICLEAN; map 1382 arch/arm/mm/mmu.c create_mapping(&map); map 1390 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map 1391 arch/arm/mm/mmu.c map.virtual = 0xffff0000; map 1392 arch/arm/mm/mmu.c map.length = PAGE_SIZE; map 1394 arch/arm/mm/mmu.c map.type = MT_HIGH_VECTORS; map 1396 arch/arm/mm/mmu.c map.type = MT_LOW_VECTORS; map 1398 arch/arm/mm/mmu.c create_mapping(&map); map 1401 arch/arm/mm/mmu.c map.virtual = 0; map 1402 arch/arm/mm/mmu.c map.length = PAGE_SIZE * 2; map 1403 arch/arm/mm/mmu.c map.type = MT_LOW_VECTORS; map 1404 arch/arm/mm/mmu.c create_mapping(&map); map 1408 arch/arm/mm/mmu.c map.pfn += 1; map 1409 arch/arm/mm/mmu.c map.virtual = 0xffff0000 + PAGE_SIZE; map 1410 arch/arm/mm/mmu.c map.length = PAGE_SIZE; map 1411 arch/arm/mm/mmu.c map.type = MT_LOW_VECTORS; map 1412 arch/arm/mm/mmu.c create_mapping(&map); map 1460 arch/arm/mm/mmu.c struct map_desc map; map 1471 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); map 1472 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); map 1473 arch/arm/mm/mmu.c map.length = end - start; map 1474 arch/arm/mm/mmu.c map.type = MT_MEMORY_RWX; map 1476 arch/arm/mm/mmu.c create_mapping(&map); map 1478 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); map 1479 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); map 1480 arch/arm/mm/mmu.c map.length = end - start; map 1481 arch/arm/mm/mmu.c map.type = MT_MEMORY_RW; map 1483 arch/arm/mm/mmu.c create_mapping(&map); map 1487 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(start); map 1488 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(start); map 1489 arch/arm/mm/mmu.c map.length = kernel_x_start - start; map 1490 arch/arm/mm/mmu.c map.type = MT_MEMORY_RW; map 1492 arch/arm/mm/mmu.c create_mapping(&map); map 1495 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(kernel_x_start); map 1496 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(kernel_x_start); map 1497 arch/arm/mm/mmu.c map.length = kernel_x_end - kernel_x_start; map 1498 arch/arm/mm/mmu.c map.type = MT_MEMORY_RWX; map 1500 arch/arm/mm/mmu.c create_mapping(&map); map 1503 arch/arm/mm/mmu.c map.pfn = __phys_to_pfn(kernel_x_end); map 1504 arch/arm/mm/mmu.c map.virtual = __phys_to_virt(kernel_x_end); map 1505 arch/arm/mm/mmu.c map.length = end - kernel_x_end; map 1506 arch/arm/mm/mmu.c map.type = MT_MEMORY_RW; map 1508 arch/arm/mm/mmu.c create_mapping(&map); map 1621 arch/arm/mm/mmu.c struct map_desc map; map 1623 arch/arm/mm/mmu.c map.virtual = fix_to_virt(i); map 1624 arch/arm/mm/mmu.c pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual); map 1631 arch/arm/mm/mmu.c map.pfn = pte_pfn(*pte); map 1632 arch/arm/mm/mmu.c map.type = MT_DEVICE; map 1633 arch/arm/mm/mmu.c map.length = PAGE_SIZE; map 1635 arch/arm/mm/mmu.c create_mapping(&map); map 1166 arch/arm/net/bpf_jit_32.c BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > map 1168 arch/arm/net/bpf_jit_32.c off = offsetof(struct bpf_array, map.max_entries); map 457 arch/arm/plat-pxa/include/plat/mfp.h void __init mfp_init_addr(struct mfp_addr_map *map); map 240 arch/arm/plat-pxa/mfp.c void __init mfp_init_addr(struct mfp_addr_map *map) map 249 arch/arm/plat-pxa/mfp.c mfpr_off_readback = map[0].offset; map 251 arch/arm/plat-pxa/mfp.c for (p = map; p->start != MFP_PIN_INVALID; p++) { map 101 arch/arm64/kernel/acpi.c void __init __acpi_unmap_table(void __iomem *map, unsigned long size) map 103 arch/arm64/kernel/acpi.c if (!map || !size) map 106 arch/arm64/kernel/acpi.c early_memunmap(map, size); map 688 arch/arm64/kernel/fpsimd.c static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX)) map 693 arch/arm64/kernel/fpsimd.c bitmap_zero(map, SVE_VQ_MAX); map 702 arch/arm64/kernel/fpsimd.c set_bit(__vq_to_bit(vq), map); map 260 arch/arm64/net/bpf_jit_comp.c off = offsetof(struct bpf_array, map.max_entries); map 87 arch/c6x/kernel/irq.c .map = core_domain_map, map 138 arch/c6x/platforms/megamod-pic.c .map = megamod_map, map 179 arch/c6x/platforms/megamod-pic.c const __be32 *map; map 183 arch/c6x/platforms/megamod-pic.c map = of_get_property(np, "ti,c64x+megamod-pic-mux", &maplen); map 184 arch/c6x/platforms/megamod-pic.c if (map) { map 190 arch/c6x/platforms/megamod-pic.c val = be32_to_cpup(map); map 193 arch/c6x/platforms/megamod-pic.c ++map; map 14 arch/csky/include/asm/asid.h unsigned long *map; map 30 arch/csky/mm/asid.c bitmap_clear(info->map, 0, NUM_CTXT_ASIDS(info)); map 43 arch/csky/mm/asid.c __set_bit(asid2idx(info, asid), info->map); map 100 arch/csky/mm/asid.c if (!__test_and_set_bit(asid2idx(info, asid), info->map)) map 111 arch/csky/mm/asid.c asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), cur_idx); map 121 arch/csky/mm/asid.c asid = find_next_zero_bit(info->map, NUM_CTXT_ASIDS(info), 1); map 124 arch/csky/mm/asid.c __set_bit(asid, info->map); map 181 arch/csky/mm/asid.c info->map = kcalloc(BITS_TO_LONGS(NUM_CTXT_ASIDS(info)), map 182 arch/csky/mm/asid.c sizeof(*info->map), GFP_KERNEL); map 183 arch/csky/mm/asid.c if (!info->map) map 78 arch/ia64/kernel/acpi.c void __init __acpi_unmap_table(void __iomem *map, unsigned long size) map 876 arch/mips/alchemy/common/irq.c static void __init au1000_init_irq(struct alchemy_irqmap *map) map 900 arch/mips/alchemy/common/irq.c while (map->irq != -1) { map 901 arch/mips/alchemy/common/irq.c irq_nr = map->irq; map 910 arch/mips/alchemy/common/irq.c if (map->prio == 0) map 913 arch/mips/alchemy/common/irq.c au1x_ic_settype(irq_get_irq_data(irq_nr), map->type); map 914 arch/mips/alchemy/common/irq.c ++map; map 120 arch/mips/ath25/ar2315.c .map = ar2315_misc_irq_map, map 125 arch/mips/ath25/ar5312.c .map = ar5312_misc_irq_map, map 30 arch/mips/cavium-octeon/flash_setup.c static map_word octeon_flash_map_read(struct map_info *map, unsigned long ofs) map 35 arch/mips/cavium-octeon/flash_setup.c r = inline_map_read(map, ofs); map 41 arch/mips/cavium-octeon/flash_setup.c static void octeon_flash_map_write(struct map_info *map, const map_word datum, map 45 arch/mips/cavium-octeon/flash_setup.c inline_map_write(map, datum, ofs); map 49 arch/mips/cavium-octeon/flash_setup.c static void octeon_flash_map_copy_from(struct map_info *map, void *to, map 53 arch/mips/cavium-octeon/flash_setup.c inline_map_copy_from(map, to, from, len); map 57 arch/mips/cavium-octeon/flash_setup.c static void octeon_flash_map_copy_to(struct map_info *map, unsigned long to, map 61 arch/mips/cavium-octeon/flash_setup.c inline_map_copy_to(map, to, from, len); map 1278 arch/mips/cavium-octeon/octeon-irq.c .map = octeon_irq_ciu_map, map 1284 arch/mips/cavium-octeon/octeon-irq.c .map = octeon_irq_gpio_map, map 1963 arch/mips/cavium-octeon/octeon-irq.c .map = octeon_irq_ciu2_map, map 2215 arch/mips/cavium-octeon/octeon-irq.c .map = octeon_irq_cib_map, map 2567 arch/mips/cavium-octeon/octeon-irq.c .map = octeon_irq_ciu3_map, map 27 arch/mips/include/asm/mach-loongson64/boot_param.h } map[LOONGSON3_BOOT_MEM_MAP_MAX]; map 239 arch/mips/include/asm/mips-gic.h GIC_VX_ACCESSOR_RW_INTR_REG(32, 0x040, 0x4, map) map 206 arch/mips/include/asm/msa.h __BUILD_MSA_CTL_REG(map, 6) map 103 arch/mips/include/asm/netlogic/psb-bootinfo.h } map[NLM_BOOT_MEM_MAP_MAX]; map 139 arch/mips/include/asm/txx9/rbtx4939.h void (*map_init)(struct map_info *map); map 334 arch/mips/lantiq/irq.c .map = icu_map, map 65 arch/mips/loongson64/common/mem.c node_id = loongson_memmap->map[i].node_id; map 66 arch/mips/loongson64/common/mem.c mem_type = loongson_memmap->map[i].mem_type; map 73 arch/mips/loongson64/common/mem.c memblock_add(loongson_memmap->map[i].mem_start, map 74 arch/mips/loongson64/common/mem.c (u64)loongson_memmap->map[i].mem_size << 20); map 77 arch/mips/loongson64/common/mem.c memblock_add(loongson_memmap->map[i].mem_start, map 78 arch/mips/loongson64/common/mem.c (u64)loongson_memmap->map[i].mem_size << 20); map 81 arch/mips/loongson64/common/mem.c memblock_reserve(loongson_memmap->map[i].mem_start, map 82 arch/mips/loongson64/common/mem.c (u64)loongson_memmap->map[i].mem_size << 20); map 127 arch/mips/loongson64/loongson-3/numa.c node_id = loongson_memmap->map[i].node_id; map 131 arch/mips/loongson64/loongson-3/numa.c mem_type = loongson_memmap->map[i].mem_type; map 132 arch/mips/loongson64/loongson-3/numa.c mem_size = loongson_memmap->map[i].mem_size; map 133 arch/mips/loongson64/loongson-3/numa.c mem_start = loongson_memmap->map[i].mem_start; map 71 arch/mips/mti-malta/malta-dtshim.c enum mem_map map) map 95 arch/mips/mti-malta/malta-dtshim.c if (map == MEM_MAP_V2) { map 172 arch/mips/mti-malta/malta-init.c u32 start, map, mask, data; map 189 arch/mips/mti-malta/malta-init.c map = GT_READ(GT_PCI0IOREMAP_OFS); map 190 arch/mips/mti-malta/malta-init.c if ((start & map) != 0) { map 191 arch/mips/mti-malta/malta-init.c map &= ~start; map 192 arch/mips/mti-malta/malta-init.c GT_WRITE(GT_PCI0IOREMAP_OFS, map); map 614 arch/mips/net/ebpf_jit.c off = offsetof(struct bpf_array, map.max_entries); map 81 arch/mips/netlogic/xlp/setup.c uint64_t map[16]; map 84 arch/mips/netlogic/xlp/setup.c n = nlm_get_dram_map(-1, map, ARRAY_SIZE(map)); /* -1 : all nodes */ map 87 arch/mips/netlogic/xlp/setup.c if (map[i] <= 0x10000000 && map[i+1] > 0x10000000) map 88 arch/mips/netlogic/xlp/setup.c map[i+1] = 0x10000000; map 89 arch/mips/netlogic/xlp/setup.c if (map[i] > 0x10000000 && map[i] < 0x20000000) map 90 arch/mips/netlogic/xlp/setup.c map[i] = 0x20000000; map 92 arch/mips/netlogic/xlp/setup.c add_memory_region(map[i], map[i+1] - map[i], BOOT_MEM_RAM); map 152 arch/mips/netlogic/xlr/setup.c if (bootm->map[i].type != BOOT_MEM_RAM) map 154 arch/mips/netlogic/xlr/setup.c start = bootm->map[i].addr; map 155 arch/mips/netlogic/xlr/setup.c size = bootm->map[i].size; map 389 arch/mips/pci/pci-ar2315.c .map = ar2315_pci_irq_map, map 81 arch/mips/pci/pci-malta.c resource_size_t start, end, map, start1, end1, map1, map2, map3, mask; map 105 arch/mips/pci/pci-malta.c map = GT_READ(GT_PCI0M0REMAP_OFS); map 115 arch/mips/pci/pci-malta.c map = map1; map 119 arch/mips/pci/pci-malta.c BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && map 123 arch/mips/pci/pci-malta.c gt64120_controller.mem_offset = (start & mask) - (map & mask); map 132 arch/mips/pci/pci-malta.c map = GT_READ(GT_PCI0IOREMAP_OFS); map 136 arch/mips/pci/pci-malta.c BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) && map 138 arch/mips/pci/pci-malta.c gt64120_io_resource.start = map & mask; map 139 arch/mips/pci/pci-malta.c gt64120_io_resource.end = (map & mask) | ~mask; map 151 arch/mips/pci/pci-malta.c map = BONITO_PCIMAP; map 159 arch/mips/pci/pci-malta.c map = map1; map 163 arch/mips/pci/pci-malta.c map = map2; map 168 arch/mips/pci/pci-malta.c map = map1; map 176 arch/mips/pci/pci-malta.c BONITO_PCIMAP_WINBASE(map); map 188 arch/mips/pci/pci-malta.c MSC_READ(MSC01_PCI_SC2PMMAPL, map); map 191 arch/mips/pci/pci-malta.c msc_controller.mem_offset = (start & mask) - (map & mask); map 199 arch/mips/pci/pci-malta.c MSC_READ(MSC01_PCI_SC2PIOMAPL, map); map 200 arch/mips/pci/pci-malta.c msc_io_resource.start = map & mask; map 201 arch/mips/pci/pci-malta.c msc_io_resource.end = (map & mask) | ~mask; map 196 arch/mips/pci/pci-rt3883.c .map = rt3883_pci_irq_map, map 143 arch/mips/ralink/irq.c .map = intc_map, map 309 arch/mips/txx9/rbtx4939/setup.c static map_word rbtx4939_flash_read16(struct map_info *map, unsigned long ofs) map 314 arch/mips/txx9/rbtx4939/setup.c r.x[0] = __raw_readw(map->virt + ofs); map 318 arch/mips/txx9/rbtx4939/setup.c static void rbtx4939_flash_write16(struct map_info *map, const map_word datum, map 322 arch/mips/txx9/rbtx4939/setup.c __raw_writew(datum.x[0], map->virt + ofs); map 326 arch/mips/txx9/rbtx4939/setup.c static void rbtx4939_flash_copy_from(struct map_info *map, void *to, map 333 arch/mips/txx9/rbtx4939/setup.c from += (unsigned long)map->virt; map 367 arch/mips/txx9/rbtx4939/setup.c static void rbtx4939_flash_map_init(struct map_info *map) map 369 arch/mips/txx9/rbtx4939/setup.c map->read = rbtx4939_flash_read16; map 370 arch/mips/txx9/rbtx4939/setup.c map->write = rbtx4939_flash_write16; map 371 arch/mips/txx9/rbtx4939/setup.c map->copy_from = rbtx4939_flash_copy_from; map 58 arch/nios2/kernel/irq.c .map = irq_map, map 222 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, unsigned long *map); map 259 arch/powerpc/include/asm/kvm_book3s.h struct kvm_memory_slot *memslot, unsigned long *map); map 262 arch/powerpc/include/asm/kvm_book3s.h unsigned long *map); map 559 arch/powerpc/include/asm/kvm_book3s_64.h static inline void set_dirty_bits(unsigned long *map, unsigned long i, map 564 arch/powerpc/include/asm/kvm_book3s_64.h memset((char *)map + i / 8, 0xff, npages / 8); map 567 arch/powerpc/include/asm/kvm_book3s_64.h __set_bit_le(i, map); map 570 arch/powerpc/include/asm/kvm_book3s_64.h static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i, map 574 arch/powerpc/include/asm/kvm_book3s_64.h memset((char *)map + i / 8, 0xff, npages / 8); map 577 arch/powerpc/include/asm/kvm_book3s_64.h set_bit_le(i, map); map 100 arch/powerpc/include/asm/ps3.h int (*map)(struct ps3_dma_region *, map 88 arch/powerpc/include/asm/xics.h int (*map)(struct ics *ics, unsigned int virq); map 81 arch/powerpc/kvm/book3s_32_mmu_host.c struct kvmppc_sid_map *map; map 88 arch/powerpc/kvm/book3s_32_mmu_host.c map = &to_book3s(vcpu)->sid_map[sid_map_mask]; map 89 arch/powerpc/kvm/book3s_32_mmu_host.c if (map->guest_vsid == gvsid) { map 91 arch/powerpc/kvm/book3s_32_mmu_host.c gvsid, map->host_vsid); map 92 arch/powerpc/kvm/book3s_32_mmu_host.c return map; map 95 arch/powerpc/kvm/book3s_32_mmu_host.c map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; map 96 arch/powerpc/kvm/book3s_32_mmu_host.c if (map->guest_vsid == gvsid) { map 98 arch/powerpc/kvm/book3s_32_mmu_host.c gvsid, map->host_vsid); map 99 arch/powerpc/kvm/book3s_32_mmu_host.c return map; map 136 arch/powerpc/kvm/book3s_32_mmu_host.c struct kvmppc_sid_map *map; map 159 arch/powerpc/kvm/book3s_32_mmu_host.c map = find_sid_vsid(vcpu, vsid); map 160 arch/powerpc/kvm/book3s_32_mmu_host.c if (!map) { map 162 arch/powerpc/kvm/book3s_32_mmu_host.c map = find_sid_vsid(vcpu, vsid); map 164 arch/powerpc/kvm/book3s_32_mmu_host.c BUG_ON(!map); map 166 arch/powerpc/kvm/book3s_32_mmu_host.c vsid = map->host_vsid; map 265 arch/powerpc/kvm/book3s_32_mmu_host.c struct kvmppc_sid_map *map; map 280 arch/powerpc/kvm/book3s_32_mmu_host.c map = &to_book3s(vcpu)->sid_map[sid_map_mask]; map 293 arch/powerpc/kvm/book3s_32_mmu_host.c map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; map 296 arch/powerpc/kvm/book3s_32_mmu_host.c map->guest_vsid = gvsid; map 297 arch/powerpc/kvm/book3s_32_mmu_host.c map->valid = true; map 299 arch/powerpc/kvm/book3s_32_mmu_host.c return map; map 307 arch/powerpc/kvm/book3s_32_mmu_host.c struct kvmppc_sid_map *map; map 318 arch/powerpc/kvm/book3s_32_mmu_host.c map = find_sid_vsid(vcpu, gvsid); map 319 arch/powerpc/kvm/book3s_32_mmu_host.c if (!map) map 320 arch/powerpc/kvm/book3s_32_mmu_host.c map = create_sid_map(vcpu, gvsid); map 322 arch/powerpc/kvm/book3s_32_mmu_host.c map->guest_esid = esid; map 323 arch/powerpc/kvm/book3s_32_mmu_host.c sr = map->host_vsid | SR_KP; map 47 arch/powerpc/kvm/book3s_64_mmu_host.c struct kvmppc_sid_map *map; map 54 arch/powerpc/kvm/book3s_64_mmu_host.c map = &to_book3s(vcpu)->sid_map[sid_map_mask]; map 55 arch/powerpc/kvm/book3s_64_mmu_host.c if (map->valid && (map->guest_vsid == gvsid)) { map 56 arch/powerpc/kvm/book3s_64_mmu_host.c trace_kvm_book3s_slb_found(gvsid, map->host_vsid); map 57 arch/powerpc/kvm/book3s_64_mmu_host.c return map; map 60 arch/powerpc/kvm/book3s_64_mmu_host.c map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; map 61 arch/powerpc/kvm/book3s_64_mmu_host.c if (map->valid && (map->guest_vsid == gvsid)) { map 62 arch/powerpc/kvm/book3s_64_mmu_host.c trace_kvm_book3s_slb_found(gvsid, map->host_vsid); map 63 arch/powerpc/kvm/book3s_64_mmu_host.c return map; map 81 arch/powerpc/kvm/book3s_64_mmu_host.c struct kvmppc_sid_map *map; map 107 arch/powerpc/kvm/book3s_64_mmu_host.c map = find_sid_vsid(vcpu, vsid); map 108 arch/powerpc/kvm/book3s_64_mmu_host.c if (!map) { map 111 arch/powerpc/kvm/book3s_64_mmu_host.c map = find_sid_vsid(vcpu, vsid); map 113 arch/powerpc/kvm/book3s_64_mmu_host.c if (!map) { map 121 arch/powerpc/kvm/book3s_64_mmu_host.c vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); map 226 arch/powerpc/kvm/book3s_64_mmu_host.c struct kvmppc_sid_map *map; map 241 arch/powerpc/kvm/book3s_64_mmu_host.c map = &to_book3s(vcpu)->sid_map[sid_map_mask]; map 258 arch/powerpc/kvm/book3s_64_mmu_host.c map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, map 261 arch/powerpc/kvm/book3s_64_mmu_host.c map->guest_vsid = gvsid; map 262 arch/powerpc/kvm/book3s_64_mmu_host.c map->valid = true; map 264 arch/powerpc/kvm/book3s_64_mmu_host.c trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); map 266 arch/powerpc/kvm/book3s_64_mmu_host.c return map; map 318 arch/powerpc/kvm/book3s_64_mmu_host.c struct kvmppc_sid_map *map; map 330 arch/powerpc/kvm/book3s_64_mmu_host.c map = find_sid_vsid(vcpu, gvsid); map 331 arch/powerpc/kvm/book3s_64_mmu_host.c if (!map) map 332 arch/powerpc/kvm/book3s_64_mmu_host.c map = create_sid_map(vcpu, gvsid); map 334 arch/powerpc/kvm/book3s_64_mmu_host.c map->guest_esid = esid; map 336 arch/powerpc/kvm/book3s_64_mmu_host.c slb_vsid |= (map->host_vsid << 12); map 1131 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long *map) map 1143 arch/powerpc/kvm/book3s_64_mmu_hv.c if (map) map 1144 arch/powerpc/kvm/book3s_64_mmu_hv.c __set_bit_le(gfn - memslot->base_gfn, map); map 1148 arch/powerpc/kvm/book3s_64_mmu_hv.c struct kvm_memory_slot *memslot, unsigned long *map) map 1163 arch/powerpc/kvm/book3s_64_mmu_hv.c set_dirty_bits(map, i, npages); map 1053 arch/powerpc/kvm/book3s_64_mmu_radix.c struct kvm_memory_slot *memslot, unsigned long *map) map 1070 arch/powerpc/kvm/book3s_64_mmu_radix.c set_dirty_bits(map, i, npages); map 189 arch/powerpc/mm/book3s64/subpage_prot.c unsigned long, len, u32 __user *, map) map 211 arch/powerpc/mm/book3s64/subpage_prot.c if (!map) { map 217 arch/powerpc/mm/book3s64/subpage_prot.c if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32))) map 271 arch/powerpc/mm/book3s64/subpage_prot.c if (__copy_from_user(spp, map, nw * sizeof(u32))) map 273 arch/powerpc/mm/book3s64/subpage_prot.c map += nw; map 268 arch/powerpc/mm/nohash/mmu_context.c unsigned long *map; map 303 arch/powerpc/mm/nohash/mmu_context.c map = context_map; map 324 arch/powerpc/mm/nohash/mmu_context.c while (__test_and_set_bit(id, map)) { map 325 arch/powerpc/mm/nohash/mmu_context.c id = find_next_zero_bit(map, LAST_CONTEXT+1, id); map 633 arch/powerpc/mm/nohash/tlb.c bool map = true; map 644 arch/powerpc/mm/nohash/tlb.c map = false; map 647 arch/powerpc/mm/nohash/tlb.c if (map) map 242 arch/powerpc/net/bpf_jit_comp64.c PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)); map 83 arch/powerpc/oprofile/cell/pr_util.h unsigned int vma_map_lookup(struct vma_to_fileoffset_map *map, map 86 arch/powerpc/oprofile/cell/pr_util.h void vma_map_free(struct vma_to_fileoffset_map *map); map 135 arch/powerpc/oprofile/cell/spu_task_sync.c struct vma_to_fileoffset_map *map; map 147 arch/powerpc/oprofile/cell/spu_task_sync.c vma_map_free(info->map); map 225 arch/powerpc/oprofile/cell/spu_task_sync.c info->map = new_map; map 557 arch/powerpc/oprofile/cell/spu_task_sync.c struct vma_to_fileoffset_map *map; map 580 arch/powerpc/oprofile/cell/spu_task_sync.c map = c_info->map; map 589 arch/powerpc/oprofile/cell/spu_task_sync.c file_offset = vma_map_lookup( map, sample, the_spu, &grd_val); map 23 arch/powerpc/oprofile/cell/vma_map.c void vma_map_free(struct vma_to_fileoffset_map *map) map 25 arch/powerpc/oprofile/cell/vma_map.c while (map) { map 26 arch/powerpc/oprofile/cell/vma_map.c struct vma_to_fileoffset_map *next = map->next; map 27 arch/powerpc/oprofile/cell/vma_map.c kfree(map); map 28 arch/powerpc/oprofile/cell/vma_map.c map = next; map 33 arch/powerpc/oprofile/cell/vma_map.c vma_map_lookup(struct vma_to_fileoffset_map *map, unsigned int vma, map 46 arch/powerpc/oprofile/cell/vma_map.c for (; map; map = map->next) { map 47 arch/powerpc/oprofile/cell/vma_map.c if (vma < map->vma || vma >= map->vma + map->size) map 50 arch/powerpc/oprofile/cell/vma_map.c if (map->guard_ptr) { map 51 arch/powerpc/oprofile/cell/vma_map.c ovly_grd = *(u32 *)(aSpu->local_store + map->guard_ptr); map 52 arch/powerpc/oprofile/cell/vma_map.c if (ovly_grd != map->guard_val) map 56 arch/powerpc/oprofile/cell/vma_map.c offset = vma - map->vma + map->offset; map 64 arch/powerpc/oprofile/cell/vma_map.c vma_map_add(struct vma_to_fileoffset_map *map, unsigned int vma, map 73 arch/powerpc/oprofile/cell/vma_map.c vma_map_free(map); map 77 arch/powerpc/oprofile/cell/vma_map.c new->next = map; map 106 arch/powerpc/oprofile/cell/vma_map.c struct vma_to_fileoffset_map *map = NULL; map 162 arch/powerpc/oprofile/cell/vma_map.c map = vma_map_add(map, phdr.p_vaddr, phdr.p_memsz, map 164 arch/powerpc/oprofile/cell/vma_map.c if (!map) map 227 arch/powerpc/oprofile/cell/vma_map.c overlay_tbl_offset = vma_map_lookup(map, ovly_table_sym, map 268 arch/powerpc/oprofile/cell/vma_map.c map = vma_map_add(map, ovly.vma, ovly.size, ovly.offset, map 270 arch/powerpc/oprofile/cell/vma_map.c if (!map) map 276 arch/powerpc/oprofile/cell/vma_map.c map = NULL; map 278 arch/powerpc/oprofile/cell/vma_map.c return map; map 190 arch/powerpc/platforms/4xx/uic.c .map = uic_host_map, map 140 arch/powerpc/platforms/512x/mpc5121_ads_cpld.c .map = cpld_pic_host_map, map 135 arch/powerpc/platforms/52xx/media5200.c .map = media5200_irq_map, map 236 arch/powerpc/platforms/52xx/mpc52xx_gpt.c .map = mpc52xx_gpt_irq_map, map 390 arch/powerpc/platforms/52xx/mpc52xx_pic.c .map = mpc52xx_irqhost_map, map 112 arch/powerpc/platforms/82xx/pq2ads-pci-pic.c .map = pci_pic_host_map, map 270 arch/powerpc/platforms/85xx/socrates_fpga_pic.c .map = socrates_fpga_pic_host_map, map 130 arch/powerpc/platforms/8xx/cpm1.c .map = cpm_pic_host_map, map 124 arch/powerpc/platforms/8xx/pic.c .map = mpc8xx_pic_host_map, map 317 arch/powerpc/platforms/cell/axon_msi.c .map = msic_host_map, map 84 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_find_map(np); map 85 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 87 arch/powerpc/platforms/cell/cbe_regs.c return map->pmd_regs; map 93 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_thread_map[cpu].regs; map 94 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 96 arch/powerpc/platforms/cell/cbe_regs.c return map->pmd_regs; map 102 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_find_map(np); map 103 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 105 arch/powerpc/platforms/cell/cbe_regs.c return &map->pmd_shadow_regs; map 110 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_thread_map[cpu].regs; map 111 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 113 arch/powerpc/platforms/cell/cbe_regs.c return &map->pmd_shadow_regs; map 118 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_find_map(np); map 119 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 121 arch/powerpc/platforms/cell/cbe_regs.c return map->iic_regs; map 126 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_thread_map[cpu].regs; map 127 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 129 arch/powerpc/platforms/cell/cbe_regs.c return map->iic_regs; map 134 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_find_map(np); map 135 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 137 arch/powerpc/platforms/cell/cbe_regs.c return map->mic_tm_regs; map 142 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map = cbe_thread_map[cpu].regs; map 143 arch/powerpc/platforms/cell/cbe_regs.c if (map == NULL) map 145 arch/powerpc/platforms/cell/cbe_regs.c return map->mic_tm_regs; map 193 arch/powerpc/platforms/cell/cbe_regs.c static void __init cbe_fill_regs_map(struct cbe_regs_map *map) map 195 arch/powerpc/platforms/cell/cbe_regs.c if(map->be_node) { map 198 arch/powerpc/platforms/cell/cbe_regs.c be = map->be_node; map 202 arch/powerpc/platforms/cell/cbe_regs.c map->pmd_regs = of_iomap(np, 0); map 206 arch/powerpc/platforms/cell/cbe_regs.c map->iic_regs = of_iomap(np, 2); map 210 arch/powerpc/platforms/cell/cbe_regs.c map->mic_tm_regs = of_iomap(np, 0); map 219 arch/powerpc/platforms/cell/cbe_regs.c cpu = map->cpu_node; map 223 arch/powerpc/platforms/cell/cbe_regs.c map->pmd_regs = ioremap(prop->address, prop->len); map 227 arch/powerpc/platforms/cell/cbe_regs.c map->iic_regs = ioremap(prop->address, prop->len); map 231 arch/powerpc/platforms/cell/cbe_regs.c map->mic_tm_regs = ioremap(prop->address, prop->len); map 251 arch/powerpc/platforms/cell/cbe_regs.c struct cbe_regs_map *map; map 255 arch/powerpc/platforms/cell/cbe_regs.c map = &cbe_regs_maps[cbe_id]; map 264 arch/powerpc/platforms/cell/cbe_regs.c map->cpu_node = cpu; map 270 arch/powerpc/platforms/cell/cbe_regs.c thread->regs = map; map 272 arch/powerpc/platforms/cell/cbe_regs.c map->be_node = thread->be_node; map 279 arch/powerpc/platforms/cell/cbe_regs.c cbe_fill_regs_map(map); map 273 arch/powerpc/platforms/cell/interrupt.c .map = iic_host_map, map 185 arch/powerpc/platforms/cell/spider-pic.c .map = spider_host_map, map 107 arch/powerpc/platforms/embedded6xx/flipper-pic.c .map = flipper_pic_map, map 105 arch/powerpc/platforms/embedded6xx/hlwd-pic.c .map = hlwd_pic_map, map 290 arch/powerpc/platforms/powermac/pic.c .map = pmac_pic_host_map, map 184 arch/powerpc/platforms/powermac/smp.c .map = psurge_host_map, map 155 arch/powerpc/platforms/powernv/opal-irqchip.c .map = opal_event_map, map 3498 arch/powerpc/platforms/powernv/pci-ioda.c unsigned int *map) map 3505 arch/powerpc/platforms/powernv/pci-ioda.c if (map[idx] != pe->pe_number) map 3521 arch/powerpc/platforms/powernv/pci-ioda.c map[idx] = IODA_INVALID_PE; map 71 arch/powerpc/platforms/powernv/vas-window.c void *map; map 89 arch/powerpc/platforms/powernv/vas-window.c map = ioremap_cache(start, len); map 90 arch/powerpc/platforms/powernv/vas-window.c if (!map) { map 96 arch/powerpc/platforms/powernv/vas-window.c pr_devel("Mapped paste addr 0x%llx to kaddr 0x%p\n", start, map); map 97 arch/powerpc/platforms/powernv/vas-window.c return map; map 106 arch/powerpc/platforms/powernv/vas-window.c void *map; map 114 arch/powerpc/platforms/powernv/vas-window.c map = ioremap(start, len); map 115 arch/powerpc/platforms/powernv/vas-window.c if (!map) { map 121 arch/powerpc/platforms/powernv/vas-window.c return map; map 677 arch/powerpc/platforms/ps3/interrupt.c .map = ps3_host_map, map 106 arch/powerpc/platforms/ps3/mm.c static void __maybe_unused _debug_dump_map(const struct map *m, map 118 arch/powerpc/platforms/ps3/mm.c static struct map map; map 128 arch/powerpc/platforms/ps3/mm.c return (phys_addr < map.rm.size || phys_addr >= map.total) map 129 arch/powerpc/platforms/ps3/mm.c ? phys_addr : phys_addr + map.r1.offset; map 169 arch/powerpc/platforms/ps3/mm.c &map.vas_id, &map.htab_size); map 177 arch/powerpc/platforms/ps3/mm.c result = lv1_select_virtual_address_space(map.vas_id); map 185 arch/powerpc/platforms/ps3/mm.c *htab_size = map.htab_size; map 187 arch/powerpc/platforms/ps3/mm.c debug_dump_map(&map); map 203 arch/powerpc/platforms/ps3/mm.c DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id); map 205 arch/powerpc/platforms/ps3/mm.c if (map.vas_id) { map 208 arch/powerpc/platforms/ps3/mm.c result = lv1_destruct_virtual_address_space(map.vas_id); map 210 arch/powerpc/platforms/ps3/mm.c map.vas_id = 0; map 230 arch/powerpc/platforms/ps3/mm.c r->offset = r->base - map.rm.size; map 282 arch/powerpc/platforms/ps3/mm.c if (result || r->base < map.rm.size) { map 289 arch/powerpc/platforms/ps3/mm.c r->offset = r->base - map.rm.size; map 318 arch/powerpc/platforms/ps3/mm.c map.total = map.rm.size; map 336 arch/powerpc/platforms/ps3/mm.c if (lpar_addr >= map.rm.size) map 337 arch/powerpc/platforms/ps3/mm.c lpar_addr -= map.r1.offset; map 984 arch/powerpc/platforms/ps3/mm.c if (r->offset < map.rm.size) { map 986 arch/powerpc/platforms/ps3/mm.c virt_addr = map.rm.base + r->offset; map 987 arch/powerpc/platforms/ps3/mm.c len = map.rm.size - r->offset; map 996 arch/powerpc/platforms/ps3/mm.c if (r->offset + r->len > map.rm.size) { map 998 arch/powerpc/platforms/ps3/mm.c virt_addr = map.rm.size; map 1000 arch/powerpc/platforms/ps3/mm.c if (r->offset >= map.rm.size) map 1001 arch/powerpc/platforms/ps3/mm.c virt_addr += r->offset - map.rm.size; map 1003 arch/powerpc/platforms/ps3/mm.c len -= map.rm.size - r->offset; map 1026 arch/powerpc/platforms/ps3/mm.c if (r->offset < map.rm.size) { map 1028 arch/powerpc/platforms/ps3/mm.c lpar_addr = map.rm.base + r->offset; map 1029 arch/powerpc/platforms/ps3/mm.c len = map.rm.size - r->offset; map 1037 arch/powerpc/platforms/ps3/mm.c if (r->offset + r->len > map.rm.size) { map 1039 arch/powerpc/platforms/ps3/mm.c lpar_addr = map.r1.base; map 1041 arch/powerpc/platforms/ps3/mm.c if (r->offset >= map.rm.size) map 1042 arch/powerpc/platforms/ps3/mm.c lpar_addr += r->offset - map.rm.size; map 1044 arch/powerpc/platforms/ps3/mm.c len -= map.rm.size - r->offset; map 1096 arch/powerpc/platforms/ps3/mm.c .map = dma_sb_map_area, map 1103 arch/powerpc/platforms/ps3/mm.c .map = dma_sb_map_area_linear, map 1110 arch/powerpc/platforms/ps3/mm.c .map = dma_ioc0_map_area, map 1126 arch/powerpc/platforms/ps3/mm.c if (r->offset >= map.rm.size) map 1127 arch/powerpc/platforms/ps3/mm.c r->offset -= map.r1.offset; map 1128 arch/powerpc/platforms/ps3/mm.c r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size); map 1169 arch/powerpc/platforms/ps3/mm.c return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag); map 1192 arch/powerpc/platforms/ps3/mm.c result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size, map 1193 arch/powerpc/platforms/ps3/mm.c &map.total); map 1198 arch/powerpc/platforms/ps3/mm.c map.rm.offset = map.rm.base; map 1199 arch/powerpc/platforms/ps3/mm.c map.vas_id = map.htab_size = 0; map 1203 arch/powerpc/platforms/ps3/mm.c BUG_ON(map.rm.base); map 1204 arch/powerpc/platforms/ps3/mm.c BUG_ON(!map.rm.size); map 1208 arch/powerpc/platforms/ps3/mm.c if (ps3_mm_get_repository_highmem(&map.r1)) { map 1209 arch/powerpc/platforms/ps3/mm.c result = ps3_mm_region_create(&map.r1, map.total - map.rm.size); map 1212 arch/powerpc/platforms/ps3/mm.c ps3_mm_set_repository_highmem(&map.r1); map 1216 arch/powerpc/platforms/ps3/mm.c map.total = map.rm.size + map.r1.size; map 1218 arch/powerpc/platforms/ps3/mm.c if (!map.r1.size) { map 1222 arch/powerpc/platforms/ps3/mm.c __func__, __LINE__, map.rm.size, map 1223 arch/powerpc/platforms/ps3/mm.c map.total - map.rm.size); map 1224 arch/powerpc/platforms/ps3/mm.c memblock_add(map.rm.size, map.total - map.rm.size); map 1236 arch/powerpc/platforms/ps3/mm.c ps3_mm_region_destroy(&map.r1); map 226 arch/powerpc/sysdev/cpm2_pic.c .map = cpm2_pic_host_map, map 250 arch/powerpc/sysdev/ehv_pic.c .map = ehv_pic_host_map, map 100 arch/powerpc/sysdev/fsl_msi.c .map = fsl_msi_host_map, map 181 arch/powerpc/sysdev/ge/ge_pic.c .map = gef_pic_host_map, map 207 arch/powerpc/sysdev/i8259.c .map = i8259_host_map, map 695 arch/powerpc/sysdev/ipic.c .map = ipic_host_map, map 1182 arch/powerpc/sysdev/mpic.c .map = mpic_host_map, map 112 arch/powerpc/sysdev/mpic_timer.c unsigned int map; map 121 arch/powerpc/sysdev/mpic_timer.c map = casc_priv->cascade_map & priv->idle; map 122 arch/powerpc/sysdev/mpic_timer.c if (map == casc_priv->cascade_map) { map 386 arch/powerpc/sysdev/tsi108_pci.c .map = pci_irq_host_map, map 171 arch/powerpc/sysdev/xics/ics-opal.c .map = ics_opal_map, map 34 arch/powerpc/sysdev/xics/ics-rtas.c .map = ics_rtas_map, map 353 arch/powerpc/sysdev/xics/xics-common.c if (ics->map(ics, virq) == 0) map 417 arch/powerpc/sysdev/xics/xics-common.c .map = xics_host_map, map 1301 arch/powerpc/sysdev/xive/common.c .map = xive_irq_domain_map, map 631 arch/riscv/net/bpf_jit_comp.c off = offsetof(struct bpf_array, map.max_entries); map 2361 arch/s390/kvm/interrupt.c struct s390_map_info *map; map 2367 arch/s390/kvm/interrupt.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 2368 arch/s390/kvm/interrupt.c if (!map) { map 2372 arch/s390/kvm/interrupt.c INIT_LIST_HEAD(&map->list); map 2373 arch/s390/kvm/interrupt.c map->guest_addr = addr; map 2374 arch/s390/kvm/interrupt.c map->addr = gmap_translate(kvm->arch.gmap, addr); map 2375 arch/s390/kvm/interrupt.c if (map->addr == -EFAULT) { map 2379 arch/s390/kvm/interrupt.c ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page); map 2385 arch/s390/kvm/interrupt.c list_add_tail(&map->list, &adapter->maps); map 2388 arch/s390/kvm/interrupt.c put_page(map->page); map 2394 arch/s390/kvm/interrupt.c kfree(map); map 2401 arch/s390/kvm/interrupt.c struct s390_map_info *map, *tmp; map 2408 arch/s390/kvm/interrupt.c list_for_each_entry_safe(map, tmp, &adapter->maps, list) { map 2409 arch/s390/kvm/interrupt.c if (map->guest_addr == addr) { map 2412 arch/s390/kvm/interrupt.c list_del(&map->list); map 2413 arch/s390/kvm/interrupt.c put_page(map->page); map 2414 arch/s390/kvm/interrupt.c kfree(map); map 2426 arch/s390/kvm/interrupt.c struct s390_map_info *map, *tmp; map 2431 arch/s390/kvm/interrupt.c list_for_each_entry_safe(map, tmp, map 2433 arch/s390/kvm/interrupt.c list_del(&map->list); map 2434 arch/s390/kvm/interrupt.c put_page(map->page); map 2435 arch/s390/kvm/interrupt.c kfree(map); map 2706 arch/s390/kvm/interrupt.c struct s390_map_info *map; map 2711 arch/s390/kvm/interrupt.c list_for_each_entry(map, &adapter->maps, list) { map 2712 arch/s390/kvm/interrupt.c if (map->guest_addr == addr) map 2713 arch/s390/kvm/interrupt.c return map; map 2725 arch/s390/kvm/interrupt.c void *map; map 2730 arch/s390/kvm/interrupt.c map = page_address(info->page); map 2732 arch/s390/kvm/interrupt.c set_bit(bit, map); map 2741 arch/s390/kvm/interrupt.c map = page_address(info->page); map 2744 arch/s390/kvm/interrupt.c summary_set = test_and_set_bit(bit, map); map 1057 arch/s390/net/bpf_jit_comp.c offsetof(struct bpf_array, map.max_entries)); map 404 arch/s390/pci/pci_dma.c dma_addr_t map; map 412 arch/s390/pci/pci_dma.c map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0); map 413 arch/s390/pci/pci_dma.c if (dma_mapping_error(dev, map)) { map 420 arch/s390/pci/pci_dma.c *dma_handle = map; map 95 arch/sh/boards/mach-x3proto/gpio.c .map = x3proto_gpio_irq_map, map 70 arch/sh/kernel/cpu/sh4/sq.c static inline void sq_mapping_list_add(struct sq_mapping *map) map 80 arch/sh/kernel/cpu/sh4/sq.c map->next = tmp; map 81 arch/sh/kernel/cpu/sh4/sq.c *p = map; map 86 arch/sh/kernel/cpu/sh4/sq.c static inline void sq_mapping_list_del(struct sq_mapping *map) map 93 arch/sh/kernel/cpu/sh4/sq.c if (tmp == map) { map 101 arch/sh/kernel/cpu/sh4/sq.c static int __sq_remap(struct sq_mapping *map, pgprot_t prot) map 106 arch/sh/kernel/cpu/sh4/sq.c vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX); map 110 arch/sh/kernel/cpu/sh4/sq.c vma->phys_addr = map->addr; map 113 arch/sh/kernel/cpu/sh4/sq.c (unsigned long)vma->addr + map->size, map 124 arch/sh/kernel/cpu/sh4/sq.c __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0); map 125 arch/sh/kernel/cpu/sh4/sq.c __raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); map 145 arch/sh/kernel/cpu/sh4/sq.c struct sq_mapping *map; map 161 arch/sh/kernel/cpu/sh4/sq.c map = kmem_cache_alloc(sq_cache, GFP_KERNEL); map 162 arch/sh/kernel/cpu/sh4/sq.c if (unlikely(!map)) map 165 arch/sh/kernel/cpu/sh4/sq.c map->addr = phys; map 166 arch/sh/kernel/cpu/sh4/sq.c map->size = size; map 167 arch/sh/kernel/cpu/sh4/sq.c map->name = name; map 170 arch/sh/kernel/cpu/sh4/sq.c get_order(map->size)); map 176 arch/sh/kernel/cpu/sh4/sq.c map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT); map 178 arch/sh/kernel/cpu/sh4/sq.c ret = __sq_remap(map, prot); map 184 arch/sh/kernel/cpu/sh4/sq.c likely(map->name) ? map->name : "???", map 186 arch/sh/kernel/cpu/sh4/sq.c map->sq_addr, map->addr); map 188 arch/sh/kernel/cpu/sh4/sq.c sq_mapping_list_add(map); map 190 arch/sh/kernel/cpu/sh4/sq.c return map->sq_addr; map 193 arch/sh/kernel/cpu/sh4/sq.c kmem_cache_free(sq_cache, map); map 208 arch/sh/kernel/cpu/sh4/sq.c struct sq_mapping **p, *map; map 211 arch/sh/kernel/cpu/sh4/sq.c for (p = &sq_mapping_list; (map = *p); p = &map->next) map 212 arch/sh/kernel/cpu/sh4/sq.c if (map->sq_addr == vaddr) map 215 arch/sh/kernel/cpu/sh4/sq.c if (unlikely(!map)) { map 221 arch/sh/kernel/cpu/sh4/sq.c page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT; map 222 arch/sh/kernel/cpu/sh4/sq.c bitmap_release_region(sq_bitmap, page, get_order(map->size)); map 231 arch/sh/kernel/cpu/sh4/sq.c vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK)); map 234 arch/sh/kernel/cpu/sh4/sq.c __func__, map->sq_addr); map 240 arch/sh/kernel/cpu/sh4/sq.c sq_mapping_list_del(map); map 242 arch/sh/kernel/cpu/sh4/sq.c kmem_cache_free(sq_cache, map); map 38 arch/sh/mm/ioremap_fixed.c struct ioremap_map *map; map 42 arch/sh/mm/ioremap_fixed.c map = &ioremap_maps[i]; map 43 arch/sh/mm/ioremap_fixed.c map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); map 51 arch/sh/mm/ioremap_fixed.c struct ioremap_map *map; map 65 arch/sh/mm/ioremap_fixed.c map = &ioremap_maps[i]; map 66 arch/sh/mm/ioremap_fixed.c if (!map->addr) { map 67 arch/sh/mm/ioremap_fixed.c map->size = size; map 96 arch/sh/mm/ioremap_fixed.c map->addr = (void __iomem *)(offset + map->fixmap_addr); map 97 arch/sh/mm/ioremap_fixed.c return map->addr; map 103 arch/sh/mm/ioremap_fixed.c struct ioremap_map *map; map 109 arch/sh/mm/ioremap_fixed.c map = &ioremap_maps[i]; map 110 arch/sh/mm/ioremap_fixed.c if (map->addr == addr) { map 122 arch/sh/mm/ioremap_fixed.c nrpages = map->size >> PAGE_SHIFT; map 131 arch/sh/mm/ioremap_fixed.c map->size = 0; map 132 arch/sh/mm/ioremap_fixed.c map->addr = NULL; map 103 arch/sparc/boot/piggyback.c FILE *map; map 108 arch/sparc/boot/piggyback.c map = fopen(filename, "r"); map 109 arch/sparc/boot/piggyback.c if (!map) map 111 arch/sparc/boot/piggyback.c while (fgets(buffer, 1024, map)) { map 117 arch/sparc/boot/piggyback.c fclose (map); map 15 arch/sparc/include/asm/bitext.h unsigned long *map; map 26 arch/sparc/include/asm/bitext.h void bit_map_init(struct bit_map *t, unsigned long *map, int size); map 32 arch/sparc/include/asm/iommu-common.h unsigned long *map; map 23 arch/sparc/include/asm/iommu_64.h unsigned long *map; map 68 arch/sparc/kernel/chmc.c struct chmc_obp_map map[2]; map 158 arch/sparc/kernel/chmc.c struct jbusmc_obp_map map; map 247 arch/sparc/kernel/chmc.c map_val = p->map.dimm_map[dimm_map_index]; map 250 arch/sparc/kernel/chmc.c *pin_p = p->map.pin_map[cache_line_offset]; map 258 arch/sparc/kernel/chmc.c mp = &p->map[0]; map 260 arch/sparc/kernel/chmc.c mp = &p->map[1]; map 185 arch/sparc/kernel/iommu-common.c n = iommu_area_alloc(iommu->map, limit, start, npages, shift, map 262 arch/sparc/kernel/iommu-common.c bitmap_clear(iommu->map, entry, npages); map 111 arch/sparc/kernel/iommu.c iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node); map 112 arch/sparc/kernel/iommu.c if (!iommu->tbl.map) map 150 arch/sparc/kernel/iommu.c kfree(iommu->tbl.map); map 151 arch/sparc/kernel/iommu.c iommu->tbl.map = NULL; map 1056 arch/sparc/kernel/ldc.c iommu->map = kzalloc(sz, GFP_KERNEL); map 1057 arch/sparc/kernel/ldc.c if (!iommu->map) { map 1094 arch/sparc/kernel/ldc.c kfree(iommu->map); map 1095 arch/sparc/kernel/ldc.c iommu->map = NULL; map 1115 arch/sparc/kernel/ldc.c kfree(iommu->map); map 1116 arch/sparc/kernel/ldc.c iommu->map = NULL; map 145 arch/sparc/kernel/of_device_32.c .map = of_bus_pci_map, map 154 arch/sparc/kernel/of_device_32.c .map = of_bus_default_map, map 163 arch/sparc/kernel/of_device_32.c .map = of_bus_ambapp_map, map 172 arch/sparc/kernel/of_device_32.c .map = of_bus_default_map, map 216 arch/sparc/kernel/of_device_32.c if (!bus->map(addr, ranges, na, ns, pna)) map 190 arch/sparc/kernel/of_device_64.c .map = of_bus_pci_map, map 199 arch/sparc/kernel/of_device_64.c .map = of_bus_simba_map, map 208 arch/sparc/kernel/of_device_64.c .map = of_bus_default_map, map 217 arch/sparc/kernel/of_device_64.c .map = of_bus_default_map, map 226 arch/sparc/kernel/of_device_64.c .map = of_bus_default_map, map 269 arch/sparc/kernel/of_device_64.c if (!bus->map(addr, ranges, na, ns, pna)) map 32 arch/sparc/kernel/of_device_common.h int (*map)(u32 *addr, const u32 *range, map 362 arch/sparc/kernel/pci.c static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) map 369 arch/sparc/kernel/pci.c if ((map & (1 << idx)) != 0) { map 391 arch/sparc/kernel/pci.c u8 map; map 393 arch/sparc/kernel/pci.c pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); map 394 arch/sparc/kernel/pci.c apb_calc_first_last(map, &first, &last); map 401 arch/sparc/kernel/pci.c pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); map 402 arch/sparc/kernel/pci.c apb_calc_first_last(map, &first, &last); map 733 arch/sparc/kernel/pci_sun4v.c __set_bit(i, iommu->map); map 855 arch/sparc/kernel/pci_sun4v.c atu->tbl.map = kzalloc(map_size, GFP_KERNEL); map 856 arch/sparc/kernel/pci_sun4v.c if (!atu->tbl.map) map 899 arch/sparc/kernel/pci_sun4v.c iommu->tbl.map = kzalloc(sz, GFP_KERNEL); map 900 arch/sparc/kernel/pci_sun4v.c if (!iommu->tbl.map) { map 147 arch/sparc/kernel/pcic.c #define SN2L_INIT(name, map) \ map 148 arch/sparc/kernel/pcic.c { name, map, ARRAY_SIZE(map) } map 60 arch/sparc/lib/bitext.c off_new = find_next_zero_bit(t->map, t->size, offset); map 81 arch/sparc/lib/bitext.c while (test_bit(offset + i, t->map) == 0) { map 84 arch/sparc/lib/bitext.c bitmap_set(t->map, offset, len); map 87 arch/sparc/lib/bitext.c (t->map, t->size, map 111 arch/sparc/lib/bitext.c if (test_bit(offset + i, t->map) == 0) map 113 arch/sparc/lib/bitext.c __clear_bit(offset + i, t->map); map 121 arch/sparc/lib/bitext.c void bit_map_init(struct bit_map *t, unsigned long *map, int size) map 123 arch/sparc/lib/bitext.c bitmap_zero(map, size); map 126 arch/sparc/lib/bitext.c t->map = map; map 859 arch/sparc/net/bpf_jit_comp_64.c off = offsetof(struct bpf_array, map.max_entries); map 274 arch/um/include/shared/os.h extern int map(struct mm_id * mm_idp, unsigned long virt, map 74 arch/um/kernel/tlb.c ret = map(&hvc->mm->context.id, op->u.mmap.addr, map 469 arch/um/kernel/tlb.c err = map(mm_id, address, PAGE_SIZE, prot, fd, offset, map 346 arch/unicore32/mm/mmu.c struct map_desc map; map 366 arch/unicore32/mm/mmu.c map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map 367 arch/unicore32/mm/mmu.c map.virtual = VECTORS_BASE; map 368 arch/unicore32/mm/mmu.c map.length = PAGE_SIZE; map 369 arch/unicore32/mm/mmu.c map.type = MT_HIGH_VECTORS; map 370 arch/unicore32/mm/mmu.c create_mapping(&map); map 376 arch/unicore32/mm/mmu.c map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map 377 arch/unicore32/mm/mmu.c map.virtual = KUSER_VECPAGE_BASE; map 378 arch/unicore32/mm/mmu.c map.length = PAGE_SIZE; map 379 arch/unicore32/mm/mmu.c map.type = MT_KUSER; map 380 arch/unicore32/mm/mmu.c create_mapping(&map); map 400 arch/unicore32/mm/mmu.c struct map_desc map; map 407 arch/unicore32/mm/mmu.c map.pfn = __phys_to_pfn(start); map 408 arch/unicore32/mm/mmu.c map.virtual = __phys_to_virt(start); map 409 arch/unicore32/mm/mmu.c map.length = end - start; map 410 arch/unicore32/mm/mmu.c map.type = MT_MEMORY; map 412 arch/unicore32/mm/mmu.c create_mapping(&map); map 638 arch/x86/boot/compressed/eboot.c efi_memory_desc_t *map; map 642 arch/x86/boot/compressed/eboot.c boot_map.map = ↦ map 672 arch/x86/boot/compressed/eboot.c struct efi_boot_memmap *map, map 683 arch/x86/boot/compressed/eboot.c p->efi->efi_memdesc_size = *map->desc_size; map 684 arch/x86/boot/compressed/eboot.c p->efi->efi_memdesc_version = *map->desc_ver; map 685 arch/x86/boot/compressed/eboot.c p->efi->efi_memmap = (unsigned long)*map->map; map 686 arch/x86/boot/compressed/eboot.c p->efi->efi_memmap_size = *map->map_size; map 690 arch/x86/boot/compressed/eboot.c p->efi->efi_memmap_hi = (unsigned long)*map->map >> 32; map 704 arch/x86/boot/compressed/eboot.c struct efi_boot_memmap map; map 707 arch/x86/boot/compressed/eboot.c map.map = &mem_map; map 708 arch/x86/boot/compressed/eboot.c map.map_size = &map_sz; map 709 arch/x86/boot/compressed/eboot.c map.desc_size = &desc_size; map 710 arch/x86/boot/compressed/eboot.c map.desc_ver = &desc_version; map 711 arch/x86/boot/compressed/eboot.c map.key_ptr = &key; map 712 arch/x86/boot/compressed/eboot.c map.buff_size = &buff_size; map 721 arch/x86/boot/compressed/eboot.c status = efi_exit_boot_services(sys_table, handle, &map, &priv, map 179 arch/x86/boot/compressed/misc.c unsigned long delta, map, ptr; map 196 arch/x86/boot/compressed/misc.c map = delta - __START_KERNEL_map; map 233 arch/x86/boot/compressed/misc.c extended += map; map 244 arch/x86/boot/compressed/misc.c extended += map; map 254 arch/x86/boot/compressed/misc.c extended += map; map 34 arch/x86/events/intel/uncore.c struct pci2phy_map *map; map 38 arch/x86/events/intel/uncore.c list_for_each_entry(map, &pci2phy_map_head, list) { map 39 arch/x86/events/intel/uncore.c if (map->segment == pci_domain_nr(bus)) { map 40 arch/x86/events/intel/uncore.c phys_id = map->pbus_to_physid[bus->number]; map 51 arch/x86/events/intel/uncore.c struct pci2phy_map *map, *tmp; map 53 arch/x86/events/intel/uncore.c list_for_each_entry_safe(map, tmp, &pci2phy_map_head, list) { map 54 arch/x86/events/intel/uncore.c list_del(&map->list); map 55 arch/x86/events/intel/uncore.c kfree(map); map 61 arch/x86/events/intel/uncore.c struct pci2phy_map *map, *alloc = NULL; map 67 arch/x86/events/intel/uncore.c list_for_each_entry(map, &pci2phy_map_head, list) { map 68 arch/x86/events/intel/uncore.c if (map->segment == segment) map 83 arch/x86/events/intel/uncore.c map = alloc; map 85 arch/x86/events/intel/uncore.c map->segment = segment; map 87 arch/x86/events/intel/uncore.c map->pbus_to_physid[i] = -1; map 88 arch/x86/events/intel/uncore.c list_add_tail(&map->list, &pci2phy_map_head); map 92 arch/x86/events/intel/uncore.c return map; map 878 arch/x86/events/intel/uncore_nhmex.c DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); map 532 arch/x86/events/intel/uncore_snb.c struct pci2phy_map *map; map 543 arch/x86/events/intel/uncore_snb.c map = __find_pci2phy_map(segment); map 544 arch/x86/events/intel/uncore_snb.c if (!map) { map 549 arch/x86/events/intel/uncore_snb.c map->pbus_to_physid[bus] = 0; map 1297 arch/x86/events/intel/uncore_snbep.c struct pci2phy_map *map; map 1319 arch/x86/events/intel/uncore_snbep.c map = __find_pci2phy_map(segment); map 1320 arch/x86/events/intel/uncore_snbep.c if (!map) { map 1332 arch/x86/events/intel/uncore_snbep.c map->pbus_to_physid[bus] = i; map 1345 arch/x86/events/intel/uncore_snbep.c list_for_each_entry(map, &pci2phy_map_head, list) { map 1349 arch/x86/events/intel/uncore_snbep.c if (map->pbus_to_physid[bus] >= 0) map 1350 arch/x86/events/intel/uncore_snbep.c i = map->pbus_to_physid[bus]; map 1352 arch/x86/events/intel/uncore_snbep.c map->pbus_to_physid[bus] = i; map 1356 arch/x86/events/intel/uncore_snbep.c if (map->pbus_to_physid[bus] >= 0) map 1357 arch/x86/events/intel/uncore_snbep.c i = map->pbus_to_physid[bus]; map 1359 arch/x86/events/intel/uncore_snbep.c map->pbus_to_physid[bus] = i; map 326 arch/x86/include/asm/apic.h bool (*check_apicid_used)(physid_mask_t *map, int apicid); map 507 arch/x86/include/asm/apic.h extern bool default_check_apicid_used(physid_mask_t *map, int apicid); map 99 arch/x86/include/asm/mpspec.h #define physid_set(physid, map) set_bit(physid, (map).mask) map 100 arch/x86/include/asm/mpspec.h #define physid_clear(physid, map) clear_bit(physid, (map).mask) map 101 arch/x86/include/asm/mpspec.h #define physid_isset(physid, map) test_bit(physid, (map).mask) map 102 arch/x86/include/asm/mpspec.h #define physid_test_and_set(physid, map) \ map 103 arch/x86/include/asm/mpspec.h test_and_set_bit(physid, (map).mask) map 111 arch/x86/include/asm/mpspec.h #define physids_clear(map) \ map 112 arch/x86/include/asm/mpspec.h bitmap_zero((map).mask, MAX_LOCAL_APIC) map 117 arch/x86/include/asm/mpspec.h #define physids_empty(map) \ map 118 arch/x86/include/asm/mpspec.h bitmap_empty((map).mask, MAX_LOCAL_APIC) map 123 arch/x86/include/asm/mpspec.h #define physids_weight(map) \ map 124 arch/x86/include/asm/mpspec.h bitmap_weight((map).mask, MAX_LOCAL_APIC) map 132 arch/x86/include/asm/mpspec.h static inline unsigned long physids_coerce(physid_mask_t *map) map 134 arch/x86/include/asm/mpspec.h return map->mask[0]; map 137 arch/x86/include/asm/mpspec.h static inline void physids_promote(unsigned long physids, physid_mask_t *map) map 139 arch/x86/include/asm/mpspec.h physids_clear(*map); map 140 arch/x86/include/asm/mpspec.h map->mask[0] = physids; map 143 arch/x86/include/asm/mpspec.h static inline void physid_set_mask_of_physid(int physid, physid_mask_t *map) map 145 arch/x86/include/asm/mpspec.h physids_clear(*map); map 146 arch/x86/include/asm/mpspec.h physid_set(physid, *map); map 67 arch/x86/include/uapi/asm/e820.h struct e820entry map[E820_X_MAX]; map 114 arch/x86/kernel/acpi/boot.c void __init __acpi_unmap_table(void __iomem *map, unsigned long size) map 116 arch/x86/kernel/acpi/boot.c if (!map || !size) map 119 arch/x86/kernel/acpi/boot.c early_memunmap(map, size); map 1529 arch/x86/kernel/apic/apic.c unsigned long map[APIC_IR_MAPSIZE]; map 1550 arch/x86/kernel/apic/apic.c if (!bitmap_empty(isr->map, APIC_IR_BITS)) { map 1556 arch/x86/kernel/apic/apic.c for_each_set_bit(bit, isr->map, APIC_IR_BITS) map 1561 arch/x86/kernel/apic/apic.c return !bitmap_empty(irr->map, APIC_IR_BITS); map 1589 arch/x86/kernel/apic/apic.c pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map); map 19 arch/x86/kernel/apic/apic_common.c bool default_check_apicid_used(physid_mask_t *map, int apicid) map 21 arch/x86/kernel/apic/apic_common.c return physid_isset(apicid, *map); map 25 arch/x86/kernel/apic/bigsmp_32.c static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid) map 376 arch/x86/kvm/hyperv.c hlist_for_each_entry(e, &irq_rt->map[gsi], link) { map 94 arch/x86/kvm/ioapic.c bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); map 118 arch/x86/kvm/ioapic.c old_val = test_bit(vcpu->vcpu_id, dest_map->map); map 124 arch/x86/kvm/ioapic.c __set_bit(vcpu->vcpu_id, dest_map->map); map 128 arch/x86/kvm/ioapic.c __clear_bit(vcpu->vcpu_id, dest_map->map); map 159 arch/x86/kvm/ioapic.c ioapic->rtc_status.dest_map.map)) { map 246 arch/x86/kvm/ioapic.c if (test_bit(vcpu->vcpu_id, dest_map->map)) map 430 arch/x86/kvm/ioapic.c if (test_bit(vcpu->vcpu_id, dest_map->map) && map 46 arch/x86/kvm/ioapic.h DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); map 411 arch/x86/kvm/irq_comm.c hlist_for_each_entry(entry, &table->map[i], link) { map 130 arch/x86/kvm/lapic.c static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, map 132 arch/x86/kvm/lapic.c switch (map->mode) { map 135 arch/x86/kvm/lapic.c u32 max_apic_id = map->max_apic_id; map 140 arch/x86/kvm/lapic.c offset = array_index_nospec(offset, map->max_apic_id + 1); map 141 arch/x86/kvm/lapic.c *cluster = &map->phys_map[offset]; map 150 arch/x86/kvm/lapic.c *cluster = map->xapic_flat_map; map 154 arch/x86/kvm/lapic.c *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; map 165 arch/x86/kvm/lapic.c struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu); map 167 arch/x86/kvm/lapic.c kvfree(map); map 565 arch/x86/kvm/lapic.c struct kvm_apic_map *map; map 582 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); map 584 arch/x86/kvm/lapic.c if (unlikely(!map)) { map 589 arch/x86/kvm/lapic.c if (min > map->max_apic_id) map 593 arch/x86/kvm/lapic.c min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { map 594 arch/x86/kvm/lapic.c if (map->phys_map[min + i]) { map 595 arch/x86/kvm/lapic.c vcpu = map->phys_map[min + i]->vcpu; map 602 arch/x86/kvm/lapic.c if (min > map->max_apic_id) map 606 arch/x86/kvm/lapic.c min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) { map 607 arch/x86/kvm/lapic.c if (map->phys_map[min + i]) { map 608 arch/x86/kvm/lapic.c vcpu = map->phys_map[min + i]->vcpu; map 854 arch/x86/kvm/lapic.c struct kvm_lapic_irq *irq, struct kvm_apic_map *map) map 858 arch/x86/kvm/lapic.c map->mode != KVM_APIC_MODE_X2APIC)) map 881 arch/x86/kvm/lapic.c struct kvm_apic_map *map, struct kvm_lapic ***dst, map 893 arch/x86/kvm/lapic.c if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map)) map 897 arch/x86/kvm/lapic.c if (irq->dest_id > map->max_apic_id) { map 900 arch/x86/kvm/lapic.c u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1); map 901 arch/x86/kvm/lapic.c *dst = &map->phys_map[dest_id]; map 908 arch/x86/kvm/lapic.c if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst, map 948 arch/x86/kvm/lapic.c struct kvm_apic_map *map; map 962 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); map 964 arch/x86/kvm/lapic.c ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap); map 995 arch/x86/kvm/lapic.c struct kvm_apic_map *map; map 1004 arch/x86/kvm/lapic.c map = rcu_dereference(kvm->arch.apic_map); map 1006 arch/x86/kvm/lapic.c if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) && map 1048 arch/x86/kvm/lapic.c __set_bit(vcpu->vcpu_id, dest_map->map); map 3356 arch/x86/kvm/svm.c struct kvm_host_map map; map 3365 arch/x86/kvm/svm.c rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); map 3372 arch/x86/kvm/svm.c nested_vmcb = map.hva; map 3476 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); map 3542 arch/x86/kvm/svm.c struct vmcb *nested_vmcb, struct kvm_host_map *map) map 3626 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, map, true); map 3650 arch/x86/kvm/svm.c struct kvm_host_map map; map 3655 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); map 3665 arch/x86/kvm/svm.c nested_vmcb = map.hva; map 3673 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); map 3717 arch/x86/kvm/svm.c enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map); map 3750 arch/x86/kvm/svm.c struct kvm_host_map map; map 3756 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); map 3763 arch/x86/kvm/svm.c nested_vmcb = map.hva; map 3768 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); map 3776 arch/x86/kvm/svm.c struct kvm_host_map map; map 3782 arch/x86/kvm/svm.c ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); map 3789 arch/x86/kvm/svm.c nested_vmcb = map.hva; map 3794 arch/x86/kvm/svm.c kvm_vcpu_unmap(&svm->vcpu, &map, true); map 5338 arch/x86/kvm/svm.c hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { map 6300 arch/x86/kvm/svm.c struct kvm_host_map map; map 6308 arch/x86/kvm/svm.c if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) map 6310 arch/x86/kvm/svm.c nested_vmcb = map.hva; map 6311 arch/x86/kvm/svm.c enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map); map 567 arch/x86/kvm/vmx/nested.c struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; map 574 arch/x86/kvm/vmx/nested.c if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map)) map 577 arch/x86/kvm/vmx/nested.c msr_bitmap_l1 = (unsigned long *)map->hva; map 661 arch/x86/kvm/vmx/nested.c struct kvm_host_map map; map 670 arch/x86/kvm/vmx/nested.c if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)) map 673 arch/x86/kvm/vmx/nested.c memcpy(shadow, map.hva, VMCS12_SIZE); map 674 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &map, false); map 2739 arch/x86/kvm/vmx/nested.c struct kvm_host_map map; map 2747 arch/x86/kvm/vmx/nested.c if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))) map 2750 arch/x86/kvm/vmx/nested.c shadow = map.hva; map 2756 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &map, false); map 2935 arch/x86/kvm/vmx/nested.c struct kvm_host_map *map; map 2967 arch/x86/kvm/vmx/nested.c map = &vmx->nested.virtual_apic_map; map 2969 arch/x86/kvm/vmx/nested.c if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) { map 2970 arch/x86/kvm/vmx/nested.c vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn)); map 2993 arch/x86/kvm/vmx/nested.c map = &vmx->nested.pi_desc_map; map 2995 arch/x86/kvm/vmx/nested.c if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) { map 2997 arch/x86/kvm/vmx/nested.c (struct pi_desc *)(((void *)map->hva) + map 3000 arch/x86/kvm/vmx/nested.c pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr)); map 4834 arch/x86/kvm/vmx/nested.c struct kvm_host_map map; map 4837 arch/x86/kvm/vmx/nested.c if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) { map 4848 arch/x86/kvm/vmx/nested.c new_vmcs12 = map.hva; map 4853 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &map, false); map 4865 arch/x86/kvm/vmx/nested.c kvm_vcpu_unmap(vcpu, &map, false); map 7476 arch/x86/kvm/vmx/vmx.c hlist_empty(&irq_rt->map[guest_irq])) { map 7482 arch/x86/kvm/vmx/vmx.c hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) { map 2649 arch/x86/kvm/x86.c struct kvm_host_map map; map 2657 arch/x86/kvm/x86.c &map, &vcpu->arch.st.cache, false)) map 2660 arch/x86/kvm/x86.c st = map.hva + map 2689 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); map 3545 arch/x86/kvm/x86.c struct kvm_host_map map; map 3554 arch/x86/kvm/x86.c if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, map 3558 arch/x86/kvm/x86.c st = map.hva + map 3563 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); map 5836 arch/x86/kvm/x86.c struct kvm_host_map map; map 5855 arch/x86/kvm/x86.c if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) map 5858 arch/x86/kvm/x86.c kaddr = map.hva + offset_in_page(gpa); map 5877 arch/x86/kvm/x86.c kvm_vcpu_unmap(vcpu, &map, true); map 7436 arch/x86/kvm/x86.c struct kvm_apic_map *map; map 7439 arch/x86/kvm/x86.c map = rcu_dereference(kvm->arch.apic_map); map 7441 arch/x86/kvm/x86.c if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) map 7442 arch/x86/kvm/x86.c target = map->phys_map[dest_id]->vcpu; map 120 arch/x86/mm/mem_encrypt.c bool map) map 129 arch/x86/mm/mem_encrypt.c pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0; map 227 arch/x86/mm/mmio-mod.c struct mmiotrace_map map = { map 250 arch/x86/mm/mmio-mod.c map.map_id = trace->id; map 258 arch/x86/mm/mmio-mod.c mmio_trace_mapping(&map); map 282 arch/x86/mm/mmio-mod.c struct mmiotrace_map map = { map 307 arch/x86/mm/mmio-mod.c map.map_id = (found_trace) ? found_trace->id : -1; map 308 arch/x86/mm/mmio-mod.c mmio_trace_mapping(&map); map 261 arch/x86/net/bpf_jit_comp.c offsetof(struct bpf_array, map.max_entries)); map 1305 arch/x86/net/bpf_jit_comp32.c offsetof(struct bpf_array, map.max_entries)); map 60 arch/x86/pci/i386.c struct pcibios_fwaddrmap *map; map 64 arch/x86/pci/i386.c list_for_each_entry(map, &pcibios_fwaddrmappings, list) map 65 arch/x86/pci/i386.c if (map->dev == dev) map 66 arch/x86/pci/i386.c return map; map 75 arch/x86/pci/i386.c struct pcibios_fwaddrmap *map; map 81 arch/x86/pci/i386.c map = pcibios_fwaddrmap_lookup(dev); map 82 arch/x86/pci/i386.c if (!map) { map 84 arch/x86/pci/i386.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 85 arch/x86/pci/i386.c if (!map) map 88 arch/x86/pci/i386.c map->dev = pci_dev_get(dev); map 89 arch/x86/pci/i386.c map->fw_addr[idx] = fw_addr; map 90 arch/x86/pci/i386.c INIT_LIST_HEAD(&map->list); map 93 arch/x86/pci/i386.c list_add_tail(&map->list, &pcibios_fwaddrmappings); map 95 arch/x86/pci/i386.c map->fw_addr[idx] = fw_addr; map 102 arch/x86/pci/i386.c struct pcibios_fwaddrmap *map; map 109 arch/x86/pci/i386.c map = pcibios_fwaddrmap_lookup(dev); map 110 arch/x86/pci/i386.c if (map) map 111 arch/x86/pci/i386.c fw_addr = map->fw_addr[idx]; map 357 arch/x86/pci/pcbios.c int ret, map; map 379 arch/x86/pci/pcbios.c "=b" (map), map 387 arch/x86/pci/pcbios.c DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); map 395 arch/x86/pci/pcbios.c rt->exclusive_irqs = map; map 41 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping map[STA2X11_NR_EP]; map 104 arch/x86/pci/sta2x11-fixup.c return instance->map + ep; map 123 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map; map 126 arch/x86/pci/sta2x11-fixup.c map = sta2x11_pdev_to_mapping(pdev); map 127 arch/x86/pci/sta2x11-fixup.c a = p + map->amba_base; map 139 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map; map 142 arch/x86/pci/sta2x11-fixup.c map = sta2x11_pdev_to_mapping(pdev); map 143 arch/x86/pci/sta2x11-fixup.c p = a - map->amba_base; map 174 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map; map 182 arch/x86/pci/sta2x11-fixup.c map = sta2x11_pdev_to_mapping(to_pci_dev(dev)); map 184 arch/x86/pci/sta2x11-fixup.c if (!map || (addr < map->amba_base)) map 186 arch/x86/pci/sta2x11-fixup.c if (addr + size >= map->amba_base + STA2X11_AMBA_SIZE) { map 237 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev); map 240 arch/x86/pci/sta2x11-fixup.c if (!map) map 242 arch/x86/pci/sta2x11-fixup.c pci_read_config_dword(pdev, AHB_BASE(0), &map->amba_base); map 256 arch/x86/pci/sta2x11-fixup.c sta2x11_pdev_to_ep(pdev), map->amba_base, map 257 arch/x86/pci/sta2x11-fixup.c map->amba_base + STA2X11_AMBA_SIZE - 1); map 265 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev); map 268 arch/x86/pci/sta2x11-fixup.c if (!map) map 271 arch/x86/pci/sta2x11-fixup.c if (map->is_suspended) map 273 arch/x86/pci/sta2x11-fixup.c map->is_suspended = 1; map 277 arch/x86/pci/sta2x11-fixup.c struct sta2x11_ahb_regs *regs = map->regs + i; map 289 arch/x86/pci/sta2x11-fixup.c struct sta2x11_mapping *map = sta2x11_pdev_to_mapping(pdev); map 292 arch/x86/pci/sta2x11-fixup.c if (!map) map 296 arch/x86/pci/sta2x11-fixup.c if (!map->is_suspended) map 298 arch/x86/pci/sta2x11-fixup.c map->is_suspended = 0; map 302 arch/x86/pci/sta2x11-fixup.c struct sta2x11_ahb_regs *regs = map->regs + i; map 281 arch/x86/platform/efi/efi.c efi_memory_desc_t *out = efi.memmap.map; map 717 arch/x86/platform/efi/efi.c if (entry < efi.memmap.map) map 759 arch/x86/platform/efi/efi.c return efi.memmap.map; map 37 block/blk-mq-cpumap.c unsigned int *map = qmap->mq_map; map 42 block/blk-mq-cpumap.c map[cpu] = -1; map 51 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); map 55 block/blk-mq-cpumap.c if (map[cpu] != -1) map 64 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); map 68 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); map 70 block/blk-mq-cpumap.c map[cpu] = map[first_sibling]; map 24 block/blk-mq-rdma.c int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, map 30 block/blk-mq-rdma.c for (queue = 0; queue < map->nr_queues; queue++) { map 36 block/blk-mq-rdma.c map->mq_map[cpu] = map->queue_offset + queue; map 42 block/blk-mq-rdma.c return blk_mq_map_queues(map); map 2108 block/blk-mq.c node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); map 2164 block/blk-mq.c node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx); map 2496 block/blk-mq.c hctx_idx = set->map[HCTX_TYPE_DEFAULT].mq_map[i]; map 2506 block/blk-mq.c set->map[HCTX_TYPE_DEFAULT].mq_map[i] = 0; map 2511 block/blk-mq.c if (!set->map[j].nr_queues) { map 2804 block/blk-mq.c node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], i); map 2905 block/blk-mq.c set->map[HCTX_TYPE_POLL].nr_queues) map 3018 block/blk-mq.c set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues; map 3038 block/blk-mq.c blk_mq_clear_mq_map(&set->map[i]); map 3043 block/blk-mq.c return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); map 3107 block/blk-mq.c set->map[i].mq_map = kcalloc_node(nr_cpu_ids, map 3108 block/blk-mq.c sizeof(set->map[i].mq_map[0]), map 3110 block/blk-mq.c if (!set->map[i].mq_map) map 3112 block/blk-mq.c set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; map 3130 block/blk-mq.c kfree(set->map[i].mq_map); map 3131 block/blk-mq.c set->map[i].mq_map = NULL; map 3147 block/blk-mq.c kfree(set->map[j].mq_map); map 3148 block/blk-mq.c set->map[j].mq_map = NULL; map 3315 block/blk-mq.c blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); map 94 block/blk-mq.h return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; map 405 block/blk-sysfs.c !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) map 1267 block/partitions/ldm.c f->map = 0xFF << num; map 1275 block/partitions/ldm.c if (f->map & (1 << rec)) { map 1277 block/partitions/ldm.c f->map &= 0x7F; /* Mark the group as broken */ map 1280 block/partitions/ldm.c f->map |= (1 << rec); map 1328 block/partitions/ldm.c if (f->map != 0xFF) { map 1330 block/partitions/ldm.c f->group, f->map); map 95 block/partitions/ldm.h u8 map; /* Which portions are in use */ map 301 drivers/acpi/arm64/iort.c static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, map 305 drivers/acpi/arm64/iort.c if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { map 308 drivers/acpi/arm64/iort.c *rid_out = map->output_base; map 313 drivers/acpi/arm64/iort.c map, type); map 317 drivers/acpi/arm64/iort.c if (rid_in < map->input_base || map 318 drivers/acpi/arm64/iort.c (rid_in >= map->input_base + map->id_count)) map 321 drivers/acpi/arm64/iort.c *rid_out = map->output_base + (rid_in - map->input_base); map 329 drivers/acpi/arm64/iort.c struct acpi_iort_id_mapping *map; map 335 drivers/acpi/arm64/iort.c map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, map 336 drivers/acpi/arm64/iort.c node->mapping_offset + index * sizeof(*map)); map 339 drivers/acpi/arm64/iort.c if (!map->output_reference) { map 346 drivers/acpi/arm64/iort.c map->output_reference); map 348 drivers/acpi/arm64/iort.c if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { map 353 drivers/acpi/arm64/iort.c *id_out = map->output_base; map 405 drivers/acpi/arm64/iort.c struct acpi_iort_id_mapping *map; map 417 drivers/acpi/arm64/iort.c map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, map 421 drivers/acpi/arm64/iort.c if (!map->output_reference) { map 435 drivers/acpi/arm64/iort.c for (i = 0; i < node->mapping_count; i++, map++) { map 440 drivers/acpi/arm64/iort.c if (!iort_id_map(map, node->type, id, &id)) map 448 drivers/acpi/arm64/iort.c map->output_reference); map 651 drivers/acpi/arm64/iort.c struct acpi_iort_id_mapping *map; map 660 drivers/acpi/arm64/iort.c map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node, map 661 drivers/acpi/arm64/iort.c node->mapping_offset + index * sizeof(*map)); map 664 drivers/acpi/arm64/iort.c if (!map->output_reference || map 665 drivers/acpi/arm64/iort.c !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) { map 672 drivers/acpi/arm64/iort.c map->output_reference); map 1547 drivers/acpi/arm64/iort.c struct acpi_iort_id_mapping *map; map 1550 drivers/acpi/arm64/iort.c map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, map 1553 drivers/acpi/arm64/iort.c for (i = 0; i < iort_node->mapping_count; i++, map++) { map 1554 drivers/acpi/arm64/iort.c if (!map->output_reference) map 1558 drivers/acpi/arm64/iort.c iort_table, map->output_reference); map 2313 drivers/acpi/nfit/core.c struct nfit_set_info_map *map = &info->mapping[i]; map 2326 drivers/acpi/nfit/core.c map->region_offset = memdev->region_offset; map 2327 drivers/acpi/nfit/core.c map->serial_number = dcr->serial_number; map 221 drivers/acpi/osl.c struct acpi_ioremap *map; map 223 drivers/acpi/osl.c list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) map 224 drivers/acpi/osl.c if (map->phys <= phys && map 225 drivers/acpi/osl.c phys + size <= map->phys + map->size) map 226 drivers/acpi/osl.c return map; map 235 drivers/acpi/osl.c struct acpi_ioremap *map; map 237 drivers/acpi/osl.c map = acpi_map_lookup(phys, size); map 238 drivers/acpi/osl.c if (map) map 239 drivers/acpi/osl.c return map->virt + (phys - map->phys); map 246 drivers/acpi/osl.c struct acpi_ioremap *map; map 250 drivers/acpi/osl.c map = acpi_map_lookup(phys, size); map 251 drivers/acpi/osl.c if (map) { map 252 drivers/acpi/osl.c virt = map->virt + (phys - map->phys); map 253 drivers/acpi/osl.c map->refcount++; map 264 drivers/acpi/osl.c struct acpi_ioremap *map; map 266 drivers/acpi/osl.c list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held()) map 267 drivers/acpi/osl.c if (map->virt <= virt && map 268 drivers/acpi/osl.c virt + size <= map->virt + map->size) map 269 drivers/acpi/osl.c return map; map 321 drivers/acpi/osl.c struct acpi_ioremap *map; map 336 drivers/acpi/osl.c map = acpi_map_lookup(phys, size); map 337 drivers/acpi/osl.c if (map) { map 338 drivers/acpi/osl.c map->refcount++; map 342 drivers/acpi/osl.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 343 drivers/acpi/osl.c if (!map) { map 353 drivers/acpi/osl.c kfree(map); map 357 drivers/acpi/osl.c INIT_LIST_HEAD(&map->list); map 358 drivers/acpi/osl.c map->virt = virt; map 359 drivers/acpi/osl.c map->phys = pg_off; map 360 drivers/acpi/osl.c map->size = pg_sz; map 361 drivers/acpi/osl.c map->refcount = 1; map 363 drivers/acpi/osl.c list_add_tail_rcu(&map->list, &acpi_ioremaps); map 367 drivers/acpi/osl.c return map->virt + (phys - map->phys); map 378 drivers/acpi/osl.c static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map) map 380 drivers/acpi/osl.c unsigned long refcount = --map->refcount; map 383 drivers/acpi/osl.c list_del_rcu(&map->list); map 387 drivers/acpi/osl.c static void acpi_os_map_cleanup(struct acpi_ioremap *map) map 390 drivers/acpi/osl.c acpi_unmap(map->phys, map->virt); map 391 drivers/acpi/osl.c kfree(map); map 410 drivers/acpi/osl.c struct acpi_ioremap *map; map 419 drivers/acpi/osl.c map = acpi_map_lookup_virt(virt, size); map 420 drivers/acpi/osl.c if (!map) { map 425 drivers/acpi/osl.c refcount = acpi_os_drop_map_ref(map); map 429 drivers/acpi/osl.c acpi_os_map_cleanup(map); map 463 drivers/acpi/osl.c struct acpi_ioremap *map; map 475 drivers/acpi/osl.c map = acpi_map_lookup(addr, gas->bit_width / 8); map 476 drivers/acpi/osl.c if (!map) { map 480 drivers/acpi/osl.c refcount = acpi_os_drop_map_ref(map); map 484 drivers/acpi/osl.c acpi_os_map_cleanup(map); map 1680 drivers/ata/ahci.c u8 map; map 1686 drivers/ata/ahci.c pci_read_config_byte(pdev, ICH_MAP, &map); map 1687 drivers/ata/ahci.c if (map & 0x3) { map 143 drivers/ata/ata_piix.c const int map[][4]; map 147 drivers/ata/ata_piix.c const int *map; map 341 drivers/ata/ata_piix.c .map = { map 357 drivers/ata/ata_piix.c .map = { map 374 drivers/ata/ata_piix.c .map = { map 386 drivers/ata/ata_piix.c .map = { map 398 drivers/ata/ata_piix.c .map = { map 410 drivers/ata/ata_piix.c .map = { map 422 drivers/ata/ata_piix.c .map = { map 1358 drivers/ata/ata_piix.c const int *map; map 1366 drivers/ata/ata_piix.c map = map_db->map[map_value & map_db->mask]; map 1369 drivers/ata/ata_piix.c switch (map[i]) { map 1380 drivers/ata/ata_piix.c WARN_ON((i & 1) || map[i + 1] != IDE); map 1387 drivers/ata/ata_piix.c p += scnprintf(p, end - p, " P%d", map[i]); map 1398 drivers/ata/ata_piix.c return map; map 1444 drivers/ata/ata_piix.c if (hpriv->map[i] == IDE) map 1699 drivers/ata/ata_piix.c hpriv->map = piix_init_sata_map(pdev, port_info, map 320 drivers/ata/sata_gemini.c struct regmap *map; map 340 drivers/ata/sata_gemini.c map = syscon_regmap_lookup_by_phandle(np, "syscon"); map 341 drivers/ata/sata_gemini.c if (IS_ERR(map)) { map 343 drivers/ata/sata_gemini.c return PTR_ERR(map); map 376 drivers/ata/sata_gemini.c ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode); map 622 drivers/atm/fore200e.c .map = fore200e_pca_map, map 758 drivers/atm/fore200e.c .map = fore200e_sba_map, map 2509 drivers/atm/fore200e.c if (fore200e->bus->map(fore200e) < 0) map 809 drivers/atm/fore200e.h int (*map)(struct fore200e*); map 388 drivers/base/arch_topology.c struct device_node *cn, *map; map 402 drivers/base/arch_topology.c map = of_get_child_by_name(cn, "cpu-map"); map 403 drivers/base/arch_topology.c if (!map) map 406 drivers/base/arch_topology.c ret = parse_cluster(map, 0); map 421 drivers/base/arch_topology.c of_node_put(map); map 206 drivers/base/cpu.c const struct cpumask *const map; map 215 drivers/base/cpu.c return cpumap_print_to_pagebuf(true, buf, ca->map); map 218 drivers/base/cpu.c #define _CPU_ATTR(name, map) \ map 219 drivers/base/cpu.c { __ATTR(name, 0444, show_cpus_attr, NULL), map } map 35 drivers/base/regmap/internal.h void (*format_write)(struct regmap *map, map 45 drivers/base/regmap/internal.h struct regmap *map; map 169 drivers/base/regmap/internal.h int (*init)(struct regmap *map); map 170 drivers/base/regmap/internal.h int (*exit)(struct regmap *map); map 172 drivers/base/regmap/internal.h void (*debugfs_init)(struct regmap *map); map 174 drivers/base/regmap/internal.h int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); map 175 drivers/base/regmap/internal.h int (*write)(struct regmap *map, unsigned int reg, unsigned int value); map 176 drivers/base/regmap/internal.h int (*sync)(struct regmap *map, unsigned int min, unsigned int max); map 177 drivers/base/regmap/internal.h int (*drop)(struct regmap *map, unsigned int min, unsigned int max); map 180 drivers/base/regmap/internal.h bool regmap_cached(struct regmap *map, unsigned int reg); map 181 drivers/base/regmap/internal.h bool regmap_writeable(struct regmap *map, unsigned int reg); map 182 drivers/base/regmap/internal.h bool regmap_readable(struct regmap *map, unsigned int reg); map 183 drivers/base/regmap/internal.h bool regmap_volatile(struct regmap *map, unsigned int reg); map 184 drivers/base/regmap/internal.h bool regmap_precious(struct regmap *map, unsigned int reg); map 185 drivers/base/regmap/internal.h bool regmap_writeable_noinc(struct regmap *map, unsigned int reg); map 186 drivers/base/regmap/internal.h bool regmap_readable_noinc(struct regmap *map, unsigned int reg); map 188 drivers/base/regmap/internal.h int _regmap_write(struct regmap *map, unsigned int reg, map 194 drivers/base/regmap/internal.h struct regmap *map; map 220 drivers/base/regmap/internal.h extern void regmap_debugfs_init(struct regmap *map, const char *name); map 221 drivers/base/regmap/internal.h extern void regmap_debugfs_exit(struct regmap *map); map 223 drivers/base/regmap/internal.h static inline void regmap_debugfs_disable(struct regmap *map) map 225 drivers/base/regmap/internal.h map->debugfs_disable = true; map 230 drivers/base/regmap/internal.h static inline void regmap_debugfs_init(struct regmap *map, const char *name) { } map 231 drivers/base/regmap/internal.h static inline void regmap_debugfs_exit(struct regmap *map) { } map 232 drivers/base/regmap/internal.h static inline void regmap_debugfs_disable(struct regmap *map) { } map 236 drivers/base/regmap/internal.h int regcache_init(struct regmap *map, const struct regmap_config *config); map 237 drivers/base/regmap/internal.h void regcache_exit(struct regmap *map); map 238 drivers/base/regmap/internal.h int regcache_read(struct regmap *map, map 240 drivers/base/regmap/internal.h int regcache_write(struct regmap *map, map 242 drivers/base/regmap/internal.h int regcache_sync(struct regmap *map); map 243 drivers/base/regmap/internal.h int regcache_sync_block(struct regmap *map, void *block, map 248 drivers/base/regmap/internal.h static inline const void *regcache_get_val_addr(struct regmap *map, map 252 drivers/base/regmap/internal.h return base + (map->cache_word_size * idx); map 255 drivers/base/regmap/internal.h unsigned int regcache_get_val(struct regmap *map, const void *base, map 257 drivers/base/regmap/internal.h bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, map 259 drivers/base/regmap/internal.h int regcache_lookup_reg(struct regmap *map, unsigned int reg); map 261 drivers/base/regmap/internal.h int _regmap_raw_write(struct regmap *map, unsigned int reg, map 274 drivers/base/regmap/internal.h static inline const char *regmap_name(const struct regmap *map) map 276 drivers/base/regmap/internal.h if (map->dev) map 277 drivers/base/regmap/internal.h return dev_name(map->dev); map 279 drivers/base/regmap/internal.h return map->name; map 282 drivers/base/regmap/internal.h static inline unsigned int regmap_get_offset(const struct regmap *map, map 285 drivers/base/regmap/internal.h if (map->reg_stride_order >= 0) map 286 drivers/base/regmap/internal.h return index << map->reg_stride_order; map 288 drivers/base/regmap/internal.h return index * map->reg_stride; map 291 drivers/base/regmap/internal.h static inline unsigned int regcache_get_index_by_order(const struct regmap *map, map 294 drivers/base/regmap/internal.h return reg >> map->reg_stride_order; map 15 drivers/base/regmap/regcache-flat.c static inline unsigned int regcache_flat_get_index(const struct regmap *map, map 18 drivers/base/regmap/regcache-flat.c return regcache_get_index_by_order(map, reg); map 21 drivers/base/regmap/regcache-flat.c static int regcache_flat_init(struct regmap *map) map 26 drivers/base/regmap/regcache-flat.c if (!map || map->reg_stride_order < 0 || !map->max_register) map 29 drivers/base/regmap/regcache-flat.c map->cache = kcalloc(regcache_flat_get_index(map, map->max_register) map 31 drivers/base/regmap/regcache-flat.c if (!map->cache) map 34 drivers/base/regmap/regcache-flat.c cache = map->cache; map 36 drivers/base/regmap/regcache-flat.c for (i = 0; i < map->num_reg_defaults; i++) { map 37 drivers/base/regmap/regcache-flat.c unsigned int reg = map->reg_defaults[i].reg; map 38 drivers/base/regmap/regcache-flat.c unsigned int index = regcache_flat_get_index(map, reg); map 40 drivers/base/regmap/regcache-flat.c cache[index] = map->reg_defaults[i].def; map 46 drivers/base/regmap/regcache-flat.c static int regcache_flat_exit(struct regmap *map) map 48 drivers/base/regmap/regcache-flat.c kfree(map->cache); map 49 drivers/base/regmap/regcache-flat.c map->cache = NULL; map 54 drivers/base/regmap/regcache-flat.c static int regcache_flat_read(struct regmap *map, map 57 drivers/base/regmap/regcache-flat.c unsigned int *cache = map->cache; map 58 drivers/base/regmap/regcache-flat.c unsigned int index = regcache_flat_get_index(map, reg); map 65 drivers/base/regmap/regcache-flat.c static int regcache_flat_write(struct regmap *map, unsigned int reg, map 68 drivers/base/regmap/regcache-flat.c unsigned int *cache = map->cache; map 69 drivers/base/regmap/regcache-flat.c unsigned int index = regcache_flat_get_index(map, reg); map 15 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_exit(struct regmap *map); map 29 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_block_count(struct regmap *map) map 68 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_compress_cache_block(struct regmap *map, map 86 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_decompress_cache_block(struct regmap *map, map 104 drivers/base/regmap/regcache-lzo.c static inline int regcache_lzo_get_blkindex(struct regmap *map, map 107 drivers/base/regmap/regcache-lzo.c return ((reg / map->reg_stride) * map->cache_word_size) / map 108 drivers/base/regmap/regcache-lzo.c DIV_ROUND_UP(map->cache_size_raw, map 109 drivers/base/regmap/regcache-lzo.c regcache_lzo_block_count(map)); map 112 drivers/base/regmap/regcache-lzo.c static inline int regcache_lzo_get_blkpos(struct regmap *map, map 115 drivers/base/regmap/regcache-lzo.c return (reg / map->reg_stride) % map 116 drivers/base/regmap/regcache-lzo.c (DIV_ROUND_UP(map->cache_size_raw, map 117 drivers/base/regmap/regcache-lzo.c regcache_lzo_block_count(map)) / map 118 drivers/base/regmap/regcache-lzo.c map->cache_word_size); map 121 drivers/base/regmap/regcache-lzo.c static inline int regcache_lzo_get_blksize(struct regmap *map) map 123 drivers/base/regmap/regcache-lzo.c return DIV_ROUND_UP(map->cache_size_raw, map 124 drivers/base/regmap/regcache-lzo.c regcache_lzo_block_count(map)); map 127 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_init(struct regmap *map) map 137 drivers/base/regmap/regcache-lzo.c blkcount = regcache_lzo_block_count(map); map 138 drivers/base/regmap/regcache-lzo.c map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), map 140 drivers/base/regmap/regcache-lzo.c if (!map->cache) map 142 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; map 150 drivers/base/regmap/regcache-lzo.c bmp_size = map->num_reg_defaults_raw; map 174 drivers/base/regmap/regcache-lzo.c blksize = regcache_lzo_get_blksize(map); map 175 drivers/base/regmap/regcache-lzo.c p = map->reg_defaults_raw; map 176 drivers/base/regmap/regcache-lzo.c end = map->reg_defaults_raw + map->cache_size_raw; map 184 drivers/base/regmap/regcache-lzo.c ret = regcache_lzo_compress_cache_block(map, map 194 drivers/base/regmap/regcache-lzo.c regcache_lzo_exit(map); map 198 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_exit(struct regmap *map) map 203 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; map 207 drivers/base/regmap/regcache-lzo.c blkcount = regcache_lzo_block_count(map); map 224 drivers/base/regmap/regcache-lzo.c map->cache = NULL; map 228 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_read(struct regmap *map, map 237 drivers/base/regmap/regcache-lzo.c blkindex = regcache_lzo_get_blkindex(map, reg); map 239 drivers/base/regmap/regcache-lzo.c blkpos = regcache_lzo_get_blkpos(map, reg); map 240 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; map 252 drivers/base/regmap/regcache-lzo.c ret = regcache_lzo_decompress_cache_block(map, lzo_block); map 255 drivers/base/regmap/regcache-lzo.c *value = regcache_get_val(map, lzo_block->dst, blkpos); map 265 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_write(struct regmap *map, map 274 drivers/base/regmap/regcache-lzo.c blkindex = regcache_lzo_get_blkindex(map, reg); map 276 drivers/base/regmap/regcache-lzo.c blkpos = regcache_lzo_get_blkpos(map, reg); map 277 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; map 289 drivers/base/regmap/regcache-lzo.c ret = regcache_lzo_decompress_cache_block(map, lzo_block); map 296 drivers/base/regmap/regcache-lzo.c if (regcache_set_val(map, lzo_block->dst, blkpos, value)) { map 306 drivers/base/regmap/regcache-lzo.c ret = regcache_lzo_compress_cache_block(map, lzo_block); map 314 drivers/base/regmap/regcache-lzo.c set_bit(reg / map->reg_stride, lzo_block->sync_bmp); map 324 drivers/base/regmap/regcache-lzo.c static int regcache_lzo_sync(struct regmap *map, unsigned int min, map 332 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; map 339 drivers/base/regmap/regcache-lzo.c ret = regcache_read(map, i, &val); map 344 drivers/base/regmap/regcache-lzo.c ret = regcache_lookup_reg(map, i); map 345 drivers/base/regmap/regcache-lzo.c if (ret > 0 && val == map->reg_defaults[ret].def) map 348 drivers/base/regmap/regcache-lzo.c map->cache_bypass = true; map 349 drivers/base/regmap/regcache-lzo.c ret = _regmap_write(map, i, val); map 350 drivers/base/regmap/regcache-lzo.c map->cache_bypass = false; map 353 drivers/base/regmap/regcache-lzo.c dev_dbg(map->dev, "Synced register %#x, value %#x\n", map 17 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_write(struct regmap *map, unsigned int reg, map 19 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_exit(struct regmap *map); map 40 drivers/base/regmap/regcache-rbtree.c struct regmap *map, map 45 drivers/base/regmap/regcache-rbtree.c *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); map 48 drivers/base/regmap/regcache-rbtree.c static unsigned int regcache_rbtree_get_register(struct regmap *map, map 51 drivers/base/regmap/regcache-rbtree.c return regcache_get_val(map, rbnode->block, idx); map 54 drivers/base/regmap/regcache-rbtree.c static void regcache_rbtree_set_register(struct regmap *map, map 59 drivers/base/regmap/regcache-rbtree.c regcache_set_val(map, rbnode->block, idx, val); map 62 drivers/base/regmap/regcache-rbtree.c static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, map 65 drivers/base/regmap/regcache-rbtree.c struct regcache_rbtree_ctx *rbtree_ctx = map->cache; map 72 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, map 81 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, map 96 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root, map 109 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp, map 134 drivers/base/regmap/regcache-rbtree.c struct regmap *map = s->private; map 135 drivers/base/regmap/regcache-rbtree.c struct regcache_rbtree_ctx *rbtree_ctx = map->cache; map 144 drivers/base/regmap/regcache-rbtree.c map->lock(map->lock_arg); map 152 drivers/base/regmap/regcache-rbtree.c mem_size += (n->blklen * map->cache_word_size); map 155 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, n, &base, &top); map 156 drivers/base/regmap/regcache-rbtree.c this_registers = ((top - base) / map->reg_stride) + 1; map 171 drivers/base/regmap/regcache-rbtree.c map->unlock(map->lock_arg); map 178 drivers/base/regmap/regcache-rbtree.c static void rbtree_debugfs_init(struct regmap *map) map 180 drivers/base/regmap/regcache-rbtree.c debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); map 184 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_init(struct regmap *map) map 190 drivers/base/regmap/regcache-rbtree.c map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); map 191 drivers/base/regmap/regcache-rbtree.c if (!map->cache) map 194 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; map 198 drivers/base/regmap/regcache-rbtree.c for (i = 0; i < map->num_reg_defaults; i++) { map 199 drivers/base/regmap/regcache-rbtree.c ret = regcache_rbtree_write(map, map 200 drivers/base/regmap/regcache-rbtree.c map->reg_defaults[i].reg, map 201 drivers/base/regmap/regcache-rbtree.c map->reg_defaults[i].def); map 209 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_exit(map); map 213 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_exit(struct regmap *map) map 220 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; map 236 drivers/base/regmap/regcache-rbtree.c kfree(map->cache); map 237 drivers/base/regmap/regcache-rbtree.c map->cache = NULL; map 242 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_read(struct regmap *map, map 248 drivers/base/regmap/regcache-rbtree.c rbnode = regcache_rbtree_lookup(map, reg); map 250 drivers/base/regmap/regcache-rbtree.c reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; map 253 drivers/base/regmap/regcache-rbtree.c *value = regcache_rbtree_get_register(map, rbnode, reg_tmp); map 262 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_insert_to_block(struct regmap *map, map 274 drivers/base/regmap/regcache-rbtree.c blklen = (top_reg - base_reg) / map->reg_stride + 1; map 275 drivers/base/regmap/regcache-rbtree.c pos = (reg - base_reg) / map->reg_stride; map 276 drivers/base/regmap/regcache-rbtree.c offset = (rbnode->base_reg - base_reg) / map->reg_stride; map 279 drivers/base/regmap/regcache-rbtree.c blklen * map->cache_word_size, map 302 drivers/base/regmap/regcache-rbtree.c memmove(blk + offset * map->cache_word_size, map 303 drivers/base/regmap/regcache-rbtree.c blk, rbnode->blklen * map->cache_word_size); map 313 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_set_register(map, rbnode, pos, value); map 318 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg) map 329 drivers/base/regmap/regcache-rbtree.c if (map->rd_table) { map 330 drivers/base/regmap/regcache-rbtree.c for (i = 0; i < map->rd_table->n_yes_ranges; i++) { map 332 drivers/base/regmap/regcache-rbtree.c &map->rd_table->yes_ranges[i])) map 336 drivers/base/regmap/regcache-rbtree.c if (i != map->rd_table->n_yes_ranges) { map 337 drivers/base/regmap/regcache-rbtree.c range = &map->rd_table->yes_ranges[i]; map 339 drivers/base/regmap/regcache-rbtree.c map->reg_stride + 1; map 349 drivers/base/regmap/regcache-rbtree.c rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size, map 369 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_write(struct regmap *map, unsigned int reg, map 378 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; map 383 drivers/base/regmap/regcache-rbtree.c rbnode = regcache_rbtree_lookup(map, reg); map 385 drivers/base/regmap/regcache-rbtree.c reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; map 386 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_set_register(map, rbnode, reg_tmp, value); map 394 drivers/base/regmap/regcache-rbtree.c max_dist = map->reg_stride * sizeof(*rbnode_tmp) / map 395 drivers/base/regmap/regcache-rbtree.c map->cache_word_size; map 408 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode_tmp, map 440 drivers/base/regmap/regcache-rbtree.c ret = regcache_rbtree_insert_to_block(map, rbnode, map 453 drivers/base/regmap/regcache-rbtree.c rbnode = regcache_rbtree_node_alloc(map, reg); map 456 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_set_register(map, rbnode, map 458 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); map 465 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_sync(struct regmap *map, unsigned int min, map 475 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; map 479 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, map 487 drivers/base/regmap/regcache-rbtree.c start = (min - base_reg) / map->reg_stride; map 492 drivers/base/regmap/regcache-rbtree.c end = (max - base_reg) / map->reg_stride + 1; map 496 drivers/base/regmap/regcache-rbtree.c ret = regcache_sync_block(map, rbnode->block, map 503 drivers/base/regmap/regcache-rbtree.c return regmap_async_complete(map); map 506 drivers/base/regmap/regcache-rbtree.c static int regcache_rbtree_drop(struct regmap *map, unsigned int min, map 515 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; map 519 drivers/base/regmap/regcache-rbtree.c regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg, map 527 drivers/base/regmap/regcache-rbtree.c start = (min - base_reg) / map->reg_stride; map 532 drivers/base/regmap/regcache-rbtree.c end = (max - base_reg) / map->reg_stride + 1; map 26 drivers/base/regmap/regcache.c static int regcache_hw_init(struct regmap *map) map 34 drivers/base/regmap/regcache.c if (!map->num_reg_defaults_raw) map 38 drivers/base/regmap/regcache.c for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) map 39 drivers/base/regmap/regcache.c if (regmap_readable(map, i * map->reg_stride) && map 40 drivers/base/regmap/regcache.c !regmap_volatile(map, i * map->reg_stride)) map 45 drivers/base/regmap/regcache.c map->cache_bypass = true; map 49 drivers/base/regmap/regcache.c map->num_reg_defaults = count; map 50 drivers/base/regmap/regcache.c map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default), map 52 drivers/base/regmap/regcache.c if (!map->reg_defaults) map 55 drivers/base/regmap/regcache.c if (!map->reg_defaults_raw) { map 56 drivers/base/regmap/regcache.c bool cache_bypass = map->cache_bypass; map 57 drivers/base/regmap/regcache.c dev_warn(map->dev, "No cache defaults, reading back from HW\n"); map 60 drivers/base/regmap/regcache.c map->cache_bypass = true; map 61 drivers/base/regmap/regcache.c tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); map 66 drivers/base/regmap/regcache.c ret = regmap_raw_read(map, 0, tmp_buf, map 67 drivers/base/regmap/regcache.c map->cache_size_raw); map 68 drivers/base/regmap/regcache.c map->cache_bypass = cache_bypass; map 70 drivers/base/regmap/regcache.c map->reg_defaults_raw = tmp_buf; map 71 drivers/base/regmap/regcache.c map->cache_free = 1; map 78 drivers/base/regmap/regcache.c for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { map 79 drivers/base/regmap/regcache.c reg = i * map->reg_stride; map 81 drivers/base/regmap/regcache.c if (!regmap_readable(map, reg)) map 84 drivers/base/regmap/regcache.c if (regmap_volatile(map, reg)) map 87 drivers/base/regmap/regcache.c if (map->reg_defaults_raw) { map 88 drivers/base/regmap/regcache.c val = regcache_get_val(map, map->reg_defaults_raw, i); map 90 drivers/base/regmap/regcache.c bool cache_bypass = map->cache_bypass; map 92 drivers/base/regmap/regcache.c map->cache_bypass = true; map 93 drivers/base/regmap/regcache.c ret = regmap_read(map, reg, &val); map 94 drivers/base/regmap/regcache.c map->cache_bypass = cache_bypass; map 96 drivers/base/regmap/regcache.c dev_err(map->dev, "Failed to read %d: %d\n", map 102 drivers/base/regmap/regcache.c map->reg_defaults[j].reg = reg; map 103 drivers/base/regmap/regcache.c map->reg_defaults[j].def = val; map 110 drivers/base/regmap/regcache.c kfree(map->reg_defaults); map 115 drivers/base/regmap/regcache.c int regcache_init(struct regmap *map, const struct regmap_config *config) map 121 drivers/base/regmap/regcache.c if (map->cache_type == REGCACHE_NONE) { map 123 drivers/base/regmap/regcache.c dev_warn(map->dev, map 126 drivers/base/regmap/regcache.c map->cache_bypass = true; map 131 drivers/base/regmap/regcache.c dev_err(map->dev, map 137 drivers/base/regmap/regcache.c if (config->reg_defaults[i].reg % map->reg_stride) map 141 drivers/base/regmap/regcache.c if (cache_types[i]->type == map->cache_type) map 145 drivers/base/regmap/regcache.c dev_err(map->dev, "Could not match compress type: %d\n", map 146 drivers/base/regmap/regcache.c map->cache_type); map 150 drivers/base/regmap/regcache.c map->num_reg_defaults = config->num_reg_defaults; map 151 drivers/base/regmap/regcache.c map->num_reg_defaults_raw = config->num_reg_defaults_raw; map 152 drivers/base/regmap/regcache.c map->reg_defaults_raw = config->reg_defaults_raw; map 153 drivers/base/regmap/regcache.c map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); map 154 drivers/base/regmap/regcache.c map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; map 156 drivers/base/regmap/regcache.c map->cache = NULL; map 157 drivers/base/regmap/regcache.c map->cache_ops = cache_types[i]; map 159 drivers/base/regmap/regcache.c if (!map->cache_ops->read || map 160 drivers/base/regmap/regcache.c !map->cache_ops->write || map 161 drivers/base/regmap/regcache.c !map->cache_ops->name) map 169 drivers/base/regmap/regcache.c tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults * map 173 drivers/base/regmap/regcache.c map->reg_defaults = tmp_buf; map 174 drivers/base/regmap/regcache.c } else if (map->num_reg_defaults_raw) { map 179 drivers/base/regmap/regcache.c ret = regcache_hw_init(map); map 182 drivers/base/regmap/regcache.c if (map->cache_bypass) map 186 drivers/base/regmap/regcache.c if (!map->max_register) map 187 drivers/base/regmap/regcache.c map->max_register = map->num_reg_defaults_raw; map 189 drivers/base/regmap/regcache.c if (map->cache_ops->init) { map 190 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Initializing %s cache\n", map 191 drivers/base/regmap/regcache.c map->cache_ops->name); map 192 drivers/base/regmap/regcache.c ret = map->cache_ops->init(map); map 199 drivers/base/regmap/regcache.c kfree(map->reg_defaults); map 200 drivers/base/regmap/regcache.c if (map->cache_free) map 201 drivers/base/regmap/regcache.c kfree(map->reg_defaults_raw); map 206 drivers/base/regmap/regcache.c void regcache_exit(struct regmap *map) map 208 drivers/base/regmap/regcache.c if (map->cache_type == REGCACHE_NONE) map 211 drivers/base/regmap/regcache.c BUG_ON(!map->cache_ops); map 213 drivers/base/regmap/regcache.c kfree(map->reg_defaults); map 214 drivers/base/regmap/regcache.c if (map->cache_free) map 215 drivers/base/regmap/regcache.c kfree(map->reg_defaults_raw); map 217 drivers/base/regmap/regcache.c if (map->cache_ops->exit) { map 218 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Destroying %s cache\n", map 219 drivers/base/regmap/regcache.c map->cache_ops->name); map 220 drivers/base/regmap/regcache.c map->cache_ops->exit(map); map 233 drivers/base/regmap/regcache.c int regcache_read(struct regmap *map, map 238 drivers/base/regmap/regcache.c if (map->cache_type == REGCACHE_NONE) map 241 drivers/base/regmap/regcache.c BUG_ON(!map->cache_ops); map 243 drivers/base/regmap/regcache.c if (!regmap_volatile(map, reg)) { map 244 drivers/base/regmap/regcache.c ret = map->cache_ops->read(map, reg, value); map 247 drivers/base/regmap/regcache.c trace_regmap_reg_read_cache(map, reg, *value); map 264 drivers/base/regmap/regcache.c int regcache_write(struct regmap *map, map 267 drivers/base/regmap/regcache.c if (map->cache_type == REGCACHE_NONE) map 270 drivers/base/regmap/regcache.c BUG_ON(!map->cache_ops); map 272 drivers/base/regmap/regcache.c if (!regmap_volatile(map, reg)) map 273 drivers/base/regmap/regcache.c return map->cache_ops->write(map, reg, value); map 278 drivers/base/regmap/regcache.c static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg, map 284 drivers/base/regmap/regcache.c if (!map->no_sync_defaults) map 288 drivers/base/regmap/regcache.c ret = regcache_lookup_reg(map, reg); map 289 drivers/base/regmap/regcache.c if (ret >= 0 && val == map->reg_defaults[ret].def) map 294 drivers/base/regmap/regcache.c static int regcache_default_sync(struct regmap *map, unsigned int min, map 299 drivers/base/regmap/regcache.c for (reg = min; reg <= max; reg += map->reg_stride) { map 303 drivers/base/regmap/regcache.c if (regmap_volatile(map, reg) || map 304 drivers/base/regmap/regcache.c !regmap_writeable(map, reg)) map 307 drivers/base/regmap/regcache.c ret = regcache_read(map, reg, &val); map 311 drivers/base/regmap/regcache.c if (!regcache_reg_needs_sync(map, reg, val)) map 314 drivers/base/regmap/regcache.c map->cache_bypass = true; map 315 drivers/base/regmap/regcache.c ret = _regmap_write(map, reg, val); map 316 drivers/base/regmap/regcache.c map->cache_bypass = false; map 318 drivers/base/regmap/regcache.c dev_err(map->dev, "Unable to sync register %#x. %d\n", map 322 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val); map 339 drivers/base/regmap/regcache.c int regcache_sync(struct regmap *map) map 346 drivers/base/regmap/regcache.c BUG_ON(!map->cache_ops); map 348 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 350 drivers/base/regmap/regcache.c bypass = map->cache_bypass; map 351 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Syncing %s cache\n", map 352 drivers/base/regmap/regcache.c map->cache_ops->name); map 353 drivers/base/regmap/regcache.c name = map->cache_ops->name; map 354 drivers/base/regmap/regcache.c trace_regcache_sync(map, name, "start"); map 356 drivers/base/regmap/regcache.c if (!map->cache_dirty) map 359 drivers/base/regmap/regcache.c map->async = true; map 362 drivers/base/regmap/regcache.c map->cache_bypass = true; map 363 drivers/base/regmap/regcache.c for (i = 0; i < map->patch_regs; i++) { map 364 drivers/base/regmap/regcache.c ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def); map 366 drivers/base/regmap/regcache.c dev_err(map->dev, "Failed to write %x = %x: %d\n", map 367 drivers/base/regmap/regcache.c map->patch[i].reg, map->patch[i].def, ret); map 371 drivers/base/regmap/regcache.c map->cache_bypass = false; map 373 drivers/base/regmap/regcache.c if (map->cache_ops->sync) map 374 drivers/base/regmap/regcache.c ret = map->cache_ops->sync(map, 0, map->max_register); map 376 drivers/base/regmap/regcache.c ret = regcache_default_sync(map, 0, map->max_register); map 379 drivers/base/regmap/regcache.c map->cache_dirty = false; map 383 drivers/base/regmap/regcache.c map->async = false; map 384 drivers/base/regmap/regcache.c map->cache_bypass = bypass; map 385 drivers/base/regmap/regcache.c map->no_sync_defaults = false; map 386 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 388 drivers/base/regmap/regcache.c regmap_async_complete(map); map 390 drivers/base/regmap/regcache.c trace_regcache_sync(map, name, "stop"); map 408 drivers/base/regmap/regcache.c int regcache_sync_region(struct regmap *map, unsigned int min, map 415 drivers/base/regmap/regcache.c BUG_ON(!map->cache_ops); map 417 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 420 drivers/base/regmap/regcache.c bypass = map->cache_bypass; map 422 drivers/base/regmap/regcache.c name = map->cache_ops->name; map 423 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max); map 425 drivers/base/regmap/regcache.c trace_regcache_sync(map, name, "start region"); map 427 drivers/base/regmap/regcache.c if (!map->cache_dirty) map 430 drivers/base/regmap/regcache.c map->async = true; map 432 drivers/base/regmap/regcache.c if (map->cache_ops->sync) map 433 drivers/base/regmap/regcache.c ret = map->cache_ops->sync(map, min, max); map 435 drivers/base/regmap/regcache.c ret = regcache_default_sync(map, min, max); map 439 drivers/base/regmap/regcache.c map->cache_bypass = bypass; map 440 drivers/base/regmap/regcache.c map->async = false; map 441 drivers/base/regmap/regcache.c map->no_sync_defaults = false; map 442 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 444 drivers/base/regmap/regcache.c regmap_async_complete(map); map 446 drivers/base/regmap/regcache.c trace_regcache_sync(map, name, "stop region"); map 463 drivers/base/regmap/regcache.c int regcache_drop_region(struct regmap *map, unsigned int min, map 468 drivers/base/regmap/regcache.c if (!map->cache_ops || !map->cache_ops->drop) map 471 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 473 drivers/base/regmap/regcache.c trace_regcache_drop_region(map, min, max); map 475 drivers/base/regmap/regcache.c ret = map->cache_ops->drop(map, min, max); map 477 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 495 drivers/base/regmap/regcache.c void regcache_cache_only(struct regmap *map, bool enable) map 497 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 498 drivers/base/regmap/regcache.c WARN_ON(map->cache_bypass && enable); map 499 drivers/base/regmap/regcache.c map->cache_only = enable; map 500 drivers/base/regmap/regcache.c trace_regmap_cache_only(map, enable); map 501 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 518 drivers/base/regmap/regcache.c void regcache_mark_dirty(struct regmap *map) map 520 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 521 drivers/base/regmap/regcache.c map->cache_dirty = true; map 522 drivers/base/regmap/regcache.c map->no_sync_defaults = true; map 523 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 538 drivers/base/regmap/regcache.c void regcache_cache_bypass(struct regmap *map, bool enable) map 540 drivers/base/regmap/regcache.c map->lock(map->lock_arg); map 541 drivers/base/regmap/regcache.c WARN_ON(map->cache_only && enable); map 542 drivers/base/regmap/regcache.c map->cache_bypass = enable; map 543 drivers/base/regmap/regcache.c trace_regmap_cache_bypass(map, enable); map 544 drivers/base/regmap/regcache.c map->unlock(map->lock_arg); map 548 drivers/base/regmap/regcache.c bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, map 551 drivers/base/regmap/regcache.c if (regcache_get_val(map, base, idx) == val) map 555 drivers/base/regmap/regcache.c if (map->format.format_val) { map 556 drivers/base/regmap/regcache.c map->format.format_val(base + (map->cache_word_size * idx), map 561 drivers/base/regmap/regcache.c switch (map->cache_word_size) { map 594 drivers/base/regmap/regcache.c unsigned int regcache_get_val(struct regmap *map, const void *base, map 601 drivers/base/regmap/regcache.c if (map->format.parse_val) map 602 drivers/base/regmap/regcache.c return map->format.parse_val(regcache_get_val_addr(map, base, map 605 drivers/base/regmap/regcache.c switch (map->cache_word_size) { map 643 drivers/base/regmap/regcache.c int regcache_lookup_reg(struct regmap *map, unsigned int reg) map 651 drivers/base/regmap/regcache.c r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, map 655 drivers/base/regmap/regcache.c return r - map->reg_defaults; map 668 drivers/base/regmap/regcache.c static int regcache_sync_block_single(struct regmap *map, void *block, map 677 drivers/base/regmap/regcache.c regtmp = block_base + (i * map->reg_stride); map 680 drivers/base/regmap/regcache.c !regmap_writeable(map, regtmp)) map 683 drivers/base/regmap/regcache.c val = regcache_get_val(map, block, i); map 684 drivers/base/regmap/regcache.c if (!regcache_reg_needs_sync(map, regtmp, val)) map 687 drivers/base/regmap/regcache.c map->cache_bypass = true; map 689 drivers/base/regmap/regcache.c ret = _regmap_write(map, regtmp, val); map 691 drivers/base/regmap/regcache.c map->cache_bypass = false; map 693 drivers/base/regmap/regcache.c dev_err(map->dev, "Unable to sync register %#x. %d\n", map 697 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Synced register %#x, value %#x\n", map 704 drivers/base/regmap/regcache.c static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, map 707 drivers/base/regmap/regcache.c size_t val_bytes = map->format.val_bytes; map 713 drivers/base/regmap/regcache.c count = (cur - base) / map->reg_stride; map 715 drivers/base/regmap/regcache.c dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n", map 716 drivers/base/regmap/regcache.c count * val_bytes, count, base, cur - map->reg_stride); map 718 drivers/base/regmap/regcache.c map->cache_bypass = true; map 720 drivers/base/regmap/regcache.c ret = _regmap_raw_write(map, base, *data, count * val_bytes); map 722 drivers/base/regmap/regcache.c dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", map 723 drivers/base/regmap/regcache.c base, cur - map->reg_stride, ret); map 725 drivers/base/regmap/regcache.c map->cache_bypass = false; map 732 drivers/base/regmap/regcache.c static int regcache_sync_block_raw(struct regmap *map, void *block, map 744 drivers/base/regmap/regcache.c regtmp = block_base + (i * map->reg_stride); map 747 drivers/base/regmap/regcache.c !regmap_writeable(map, regtmp)) { map 748 drivers/base/regmap/regcache.c ret = regcache_sync_block_raw_flush(map, &data, map 755 drivers/base/regmap/regcache.c val = regcache_get_val(map, block, i); map 756 drivers/base/regmap/regcache.c if (!regcache_reg_needs_sync(map, regtmp, val)) { map 757 drivers/base/regmap/regcache.c ret = regcache_sync_block_raw_flush(map, &data, map 765 drivers/base/regmap/regcache.c data = regcache_get_val_addr(map, block, i); map 770 drivers/base/regmap/regcache.c return regcache_sync_block_raw_flush(map, &data, base, regtmp + map 771 drivers/base/regmap/regcache.c map->reg_stride); map 774 drivers/base/regmap/regcache.c int regcache_sync_block(struct regmap *map, void *block, map 779 drivers/base/regmap/regcache.c if (regmap_can_raw_write(map) && !map->use_single_write) map 780 drivers/base/regmap/regcache.c return regcache_sync_block_raw(map, block, cache_present, map 783 drivers/base/regmap/regcache.c return regcache_sync_block_single(map, block, cache_present, map 19 drivers/base/regmap/regmap-debugfs.c struct regmap *map; map 39 drivers/base/regmap/regmap-debugfs.c struct regmap *map = file->private_data; map 48 drivers/base/regmap/regmap-debugfs.c if (map->dev && map->dev->driver) map 49 drivers/base/regmap/regmap-debugfs.c name = map->dev->driver->name; map 68 drivers/base/regmap/regmap-debugfs.c static void regmap_debugfs_free_dump_cache(struct regmap *map) map 72 drivers/base/regmap/regmap-debugfs.c while (!list_empty(&map->debugfs_off_cache)) { map 73 drivers/base/regmap/regmap-debugfs.c c = list_first_entry(&map->debugfs_off_cache, map 81 drivers/base/regmap/regmap-debugfs.c static bool regmap_printable(struct regmap *map, unsigned int reg) map 83 drivers/base/regmap/regmap-debugfs.c if (regmap_precious(map, reg)) map 86 drivers/base/regmap/regmap-debugfs.c if (!regmap_readable(map, reg) && !regmap_cached(map, reg)) map 96 drivers/base/regmap/regmap-debugfs.c static unsigned int regmap_debugfs_get_dump_start(struct regmap *map, map 115 drivers/base/regmap/regmap-debugfs.c mutex_lock(&map->cache_lock); map 117 drivers/base/regmap/regmap-debugfs.c if (list_empty(&map->debugfs_off_cache)) { map 118 drivers/base/regmap/regmap-debugfs.c for (; i <= map->max_register; i += map->reg_stride) { map 120 drivers/base/regmap/regmap-debugfs.c if (!regmap_printable(map, i)) { map 123 drivers/base/regmap/regmap-debugfs.c c->max_reg = i - map->reg_stride; map 125 drivers/base/regmap/regmap-debugfs.c &map->debugfs_off_cache); map 136 drivers/base/regmap/regmap-debugfs.c regmap_debugfs_free_dump_cache(map); map 137 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 144 drivers/base/regmap/regmap-debugfs.c p += map->debugfs_tot_len; map 151 drivers/base/regmap/regmap-debugfs.c c->max_reg = i - map->reg_stride; map 153 drivers/base/regmap/regmap-debugfs.c &map->debugfs_off_cache); map 161 drivers/base/regmap/regmap-debugfs.c WARN_ON(list_empty(&map->debugfs_off_cache)); map 165 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { map 168 drivers/base/regmap/regmap-debugfs.c reg_offset = fpos_offset / map->debugfs_tot_len; map 169 drivers/base/regmap/regmap-debugfs.c *pos = c->min + (reg_offset * map->debugfs_tot_len); map 170 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 171 drivers/base/regmap/regmap-debugfs.c return c->base_reg + (reg_offset * map->reg_stride); map 177 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 182 drivers/base/regmap/regmap-debugfs.c static inline void regmap_calc_tot_len(struct regmap *map, map 186 drivers/base/regmap/regmap-debugfs.c if (!map->debugfs_tot_len) { map 187 drivers/base/regmap/regmap-debugfs.c map->debugfs_reg_len = regmap_calc_reg_len(map->max_register), map 188 drivers/base/regmap/regmap-debugfs.c map->debugfs_val_len = 2 * map->format.val_bytes; map 189 drivers/base/regmap/regmap-debugfs.c map->debugfs_tot_len = map->debugfs_reg_len + map 190 drivers/base/regmap/regmap-debugfs.c map->debugfs_val_len + 3; /* : \n */ map 194 drivers/base/regmap/regmap-debugfs.c static int regmap_next_readable_reg(struct regmap *map, int reg) map 199 drivers/base/regmap/regmap-debugfs.c if (regmap_printable(map, reg + map->reg_stride)) { map 200 drivers/base/regmap/regmap-debugfs.c ret = reg + map->reg_stride; map 202 drivers/base/regmap/regmap-debugfs.c mutex_lock(&map->cache_lock); map 203 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { map 211 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 216 drivers/base/regmap/regmap-debugfs.c static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, map 234 drivers/base/regmap/regmap-debugfs.c regmap_calc_tot_len(map, buf, count); map 237 drivers/base/regmap/regmap-debugfs.c start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); map 240 drivers/base/regmap/regmap-debugfs.c i = regmap_next_readable_reg(map, i)) { map 245 drivers/base/regmap/regmap-debugfs.c if (buf_pos + map->debugfs_tot_len > count) map 250 drivers/base/regmap/regmap-debugfs.c map->debugfs_reg_len, i - from); map 251 drivers/base/regmap/regmap-debugfs.c buf_pos += map->debugfs_reg_len + 2; map 254 drivers/base/regmap/regmap-debugfs.c ret = regmap_read(map, i, &val); map 257 drivers/base/regmap/regmap-debugfs.c "%.*x", map->debugfs_val_len, val); map 260 drivers/base/regmap/regmap-debugfs.c map->debugfs_val_len); map 261 drivers/base/regmap/regmap-debugfs.c buf_pos += 2 * map->format.val_bytes; map 265 drivers/base/regmap/regmap-debugfs.c p += map->debugfs_tot_len; map 285 drivers/base/regmap/regmap-debugfs.c struct regmap *map = file->private_data; map 287 drivers/base/regmap/regmap-debugfs.c return regmap_read_debugfs(map, 0, map->max_register, user_buf, map 307 drivers/base/regmap/regmap-debugfs.c struct regmap *map = file->private_data; map 326 drivers/base/regmap/regmap-debugfs.c ret = regmap_write(map, reg, value); map 346 drivers/base/regmap/regmap-debugfs.c struct regmap *map = range->map; map 348 drivers/base/regmap/regmap-debugfs.c return regmap_read_debugfs(map, range->range_min, range->range_max, map 362 drivers/base/regmap/regmap-debugfs.c struct regmap *map = file->private_data; map 389 drivers/base/regmap/regmap-debugfs.c regmap_calc_tot_len(map, buf, count); map 390 drivers/base/regmap/regmap-debugfs.c regmap_debugfs_get_dump_start(map, 0, *ppos, &p); map 395 drivers/base/regmap/regmap-debugfs.c mutex_lock(&map->cache_lock); map 396 drivers/base/regmap/regmap-debugfs.c list_for_each_entry(c, &map->debugfs_off_cache, list) { map 407 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 431 drivers/base/regmap/regmap-debugfs.c struct regmap *map = s->private; map 434 drivers/base/regmap/regmap-debugfs.c reg_len = regmap_calc_reg_len(map->max_register); map 436 drivers/base/regmap/regmap-debugfs.c for (i = 0; i <= map->max_register; i += map->reg_stride) { map 438 drivers/base/regmap/regmap-debugfs.c if (!regmap_readable(map, i) && !regmap_writeable(map, i)) map 443 drivers/base/regmap/regmap-debugfs.c regmap_readable(map, i) ? 'y' : 'n', map 444 drivers/base/regmap/regmap-debugfs.c regmap_writeable(map, i) ? 'y' : 'n', map 445 drivers/base/regmap/regmap-debugfs.c regmap_volatile(map, i) ? 'y' : 'n', map 446 drivers/base/regmap/regmap-debugfs.c regmap_precious(map, i) ? 'y' : 'n'); map 458 drivers/base/regmap/regmap-debugfs.c struct regmap *map = container_of(file->private_data, map 464 drivers/base/regmap/regmap-debugfs.c map->lock(map->lock_arg); map 466 drivers/base/regmap/regmap-debugfs.c was_enabled = map->cache_only; map 470 drivers/base/regmap/regmap-debugfs.c map->unlock(map->lock_arg); map 474 drivers/base/regmap/regmap-debugfs.c if (map->cache_only && !was_enabled) { map 475 drivers/base/regmap/regmap-debugfs.c dev_warn(map->dev, "debugfs cache_only=Y forced\n"); map 477 drivers/base/regmap/regmap-debugfs.c } else if (!map->cache_only && was_enabled) { map 478 drivers/base/regmap/regmap-debugfs.c dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n"); map 482 drivers/base/regmap/regmap-debugfs.c map->unlock(map->lock_arg); map 485 drivers/base/regmap/regmap-debugfs.c err = regcache_sync(map); map 487 drivers/base/regmap/regmap-debugfs.c dev_err(map->dev, "Failed to sync cache %d\n", err); map 503 drivers/base/regmap/regmap-debugfs.c struct regmap *map = container_of(file->private_data, map 508 drivers/base/regmap/regmap-debugfs.c map->lock(map->lock_arg); map 510 drivers/base/regmap/regmap-debugfs.c was_enabled = map->cache_bypass; map 516 drivers/base/regmap/regmap-debugfs.c if (map->cache_bypass && !was_enabled) { map 517 drivers/base/regmap/regmap-debugfs.c dev_warn(map->dev, "debugfs cache_bypass=Y forced\n"); map 519 drivers/base/regmap/regmap-debugfs.c } else if (!map->cache_bypass && was_enabled) { map 520 drivers/base/regmap/regmap-debugfs.c dev_warn(map->dev, "debugfs cache_bypass=N forced\n"); map 524 drivers/base/regmap/regmap-debugfs.c map->unlock(map->lock_arg); map 535 drivers/base/regmap/regmap-debugfs.c void regmap_debugfs_init(struct regmap *map, const char *name) map 548 drivers/base/regmap/regmap-debugfs.c if (map->debugfs_disable) { map 549 drivers/base/regmap/regmap-debugfs.c dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n"); map 559 drivers/base/regmap/regmap-debugfs.c node->map = map; map 567 drivers/base/regmap/regmap-debugfs.c INIT_LIST_HEAD(&map->debugfs_off_cache); map 568 drivers/base/regmap/regmap-debugfs.c mutex_init(&map->cache_lock); map 570 drivers/base/regmap/regmap-debugfs.c if (map->dev) map 571 drivers/base/regmap/regmap-debugfs.c devname = dev_name(map->dev); map 574 drivers/base/regmap/regmap-debugfs.c map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", map 576 drivers/base/regmap/regmap-debugfs.c name = map->debugfs_name; map 582 drivers/base/regmap/regmap-debugfs.c kfree(map->debugfs_name); map 584 drivers/base/regmap/regmap-debugfs.c map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d", map 586 drivers/base/regmap/regmap-debugfs.c name = map->debugfs_name; map 590 drivers/base/regmap/regmap-debugfs.c map->debugfs = debugfs_create_dir(name, regmap_debugfs_root); map 592 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("name", 0400, map->debugfs, map 593 drivers/base/regmap/regmap-debugfs.c map, ®map_name_fops); map 595 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("range", 0400, map->debugfs, map 596 drivers/base/regmap/regmap-debugfs.c map, ®map_reg_ranges_fops); map 598 drivers/base/regmap/regmap-debugfs.c if (map->max_register || regmap_readable(map, 0)) { map 607 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("registers", registers_mode, map->debugfs, map 608 drivers/base/regmap/regmap-debugfs.c map, ®map_map_fops); map 609 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("access", 0400, map->debugfs, map 610 drivers/base/regmap/regmap-debugfs.c map, ®map_access_fops); map 613 drivers/base/regmap/regmap-debugfs.c if (map->cache_type) { map 614 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("cache_only", 0600, map->debugfs, map 615 drivers/base/regmap/regmap-debugfs.c &map->cache_only, ®map_cache_only_fops); map 616 drivers/base/regmap/regmap-debugfs.c debugfs_create_bool("cache_dirty", 0400, map->debugfs, map 617 drivers/base/regmap/regmap-debugfs.c &map->cache_dirty); map 618 drivers/base/regmap/regmap-debugfs.c debugfs_create_file("cache_bypass", 0600, map->debugfs, map 619 drivers/base/regmap/regmap-debugfs.c &map->cache_bypass, map 623 drivers/base/regmap/regmap-debugfs.c next = rb_first(&map->range_tree); map 629 drivers/base/regmap/regmap-debugfs.c map->debugfs, range_node, map 635 drivers/base/regmap/regmap-debugfs.c if (map->cache_ops && map->cache_ops->debugfs_init) map 636 drivers/base/regmap/regmap-debugfs.c map->cache_ops->debugfs_init(map); map 639 drivers/base/regmap/regmap-debugfs.c void regmap_debugfs_exit(struct regmap *map) map 641 drivers/base/regmap/regmap-debugfs.c if (map->debugfs) { map 642 drivers/base/regmap/regmap-debugfs.c debugfs_remove_recursive(map->debugfs); map 643 drivers/base/regmap/regmap-debugfs.c mutex_lock(&map->cache_lock); map 644 drivers/base/regmap/regmap-debugfs.c regmap_debugfs_free_dump_cache(map); map 645 drivers/base/regmap/regmap-debugfs.c mutex_unlock(&map->cache_lock); map 646 drivers/base/regmap/regmap-debugfs.c kfree(map->debugfs_name); map 653 drivers/base/regmap/regmap-debugfs.c if (node->map == map) { map 670 drivers/base/regmap/regmap-debugfs.c regmap_debugfs_init(node->map, node->name); map 24 drivers/base/regmap/regmap-irq.c struct regmap *map; map 67 drivers/base/regmap/regmap-irq.c return regmap_write_bits(d->map, reg, mask, val); map 69 drivers/base/regmap/regmap-irq.c return regmap_update_bits(d->map, reg, mask, val); map 75 drivers/base/regmap/regmap-irq.c struct regmap *map = d->map; map 82 drivers/base/regmap/regmap-irq.c ret = pm_runtime_get_sync(map->dev); map 84 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "IRQ sync failed to resume: %d\n", map 91 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 93 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, reg, &val); map 95 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, map 112 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 121 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, map 136 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, "Failed to sync masks in %x\n", map 140 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 151 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, map 165 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 168 drivers/base/regmap/regmap-irq.c ret = regmap_write(map, reg, ~d->mask_buf[i]); map 170 drivers/base/regmap/regmap-irq.c ret = regmap_write(map, reg, d->mask_buf[i]); map 172 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, "Failed to ack 0x%x: %d\n", map 183 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->type_reg_stride); map 191 drivers/base/regmap/regmap-irq.c dev_err(d->map->dev, "Failed to sync type in %x\n", map 197 drivers/base/regmap/regmap-irq.c pm_runtime_put(map->dev); map 215 drivers/base/regmap/regmap-irq.c struct regmap *map = d->map; map 233 drivers/base/regmap/regmap-irq.c mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; map 240 drivers/base/regmap/regmap-irq.c d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; map 246 drivers/base/regmap/regmap-irq.c struct regmap *map = d->map; map 249 drivers/base/regmap/regmap-irq.c d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask; map 255 drivers/base/regmap/regmap-irq.c struct regmap *map = d->map; map 263 drivers/base/regmap/regmap-irq.c reg = t->type_reg_offset / map->reg_stride; map 302 drivers/base/regmap/regmap-irq.c struct regmap *map = d->map; map 307 drivers/base/regmap/regmap-irq.c d->wake_buf[irq_data->reg_offset / map->reg_stride] map 312 drivers/base/regmap/regmap-irq.c d->wake_buf[irq_data->reg_offset / map->reg_stride] map 333 drivers/base/regmap/regmap-irq.c struct regmap *map = data->map; map 339 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, chip->status_base + map 340 drivers/base/regmap/regmap-irq.c (b * map->reg_stride * data->irq_reg_stride), map 347 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, chip->status_base + offset, map 360 drivers/base/regmap/regmap-irq.c struct regmap *map = data->map; map 369 drivers/base/regmap/regmap-irq.c ret = pm_runtime_get_sync(map->dev); map 371 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "IRQ thread failed to resume: %d\n", map 400 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, chip->main_status + map 401 drivers/base/regmap/regmap-irq.c (i * map->reg_stride map 405 drivers/base/regmap/regmap-irq.c dev_err(map->dev, map 417 drivers/base/regmap/regmap-irq.c for_each_set_bit(b, &mreg, map->format.val_bytes * 8) { map 418 drivers/base/regmap/regmap-irq.c if (i * map->format.val_bytes * 8 + b > map 424 drivers/base/regmap/regmap-irq.c dev_err(map->dev, map 432 drivers/base/regmap/regmap-irq.c } else if (!map->use_single_read && map->reg_stride == 1 && map 441 drivers/base/regmap/regmap-irq.c ret = regmap_bulk_read(map, chip->status_base, map 445 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to read IRQ status: %d\n", map 451 drivers/base/regmap/regmap-irq.c switch (map->format.val_bytes) { map 469 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, chip->status_base + map 470 drivers/base/regmap/regmap-irq.c (i * map->reg_stride map 475 drivers/base/regmap/regmap-irq.c dev_err(map->dev, map 495 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * data->irq_reg_stride); map 496 drivers/base/regmap/regmap-irq.c ret = regmap_write(map, reg, data->status_buf[i]); map 498 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to ack 0x%x: %d\n", map 505 drivers/base/regmap/regmap-irq.c map->reg_stride] & chip->irqs[i].mask) { map 513 drivers/base/regmap/regmap-irq.c pm_runtime_put(map->dev); map 539 drivers/base/regmap/regmap-irq.c .map = regmap_irq_map, map 559 drivers/base/regmap/regmap-irq.c int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, map 577 drivers/base/regmap/regmap-irq.c if (chip->irqs[i].reg_offset % map->reg_stride) map 579 drivers/base/regmap/regmap-irq.c if (chip->irqs[i].reg_offset / map->reg_stride >= map 587 drivers/base/regmap/regmap-irq.c dev_warn(map->dev, "Failed to allocate IRQs: %d\n", map 644 drivers/base/regmap/regmap-irq.c d->map = map; map 658 drivers/base/regmap/regmap-irq.c if (!map->use_single_read && map->reg_stride == 1 && map 661 drivers/base/regmap/regmap-irq.c map->format.val_bytes, map 670 drivers/base/regmap/regmap-irq.c d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride] map 680 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 695 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", map 705 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 706 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, reg, &d->status_buf[i]); map 708 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to read IRQ status: %d\n", map 715 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 717 drivers/base/regmap/regmap-irq.c ret = regmap_write(map, reg, map 720 drivers/base/regmap/regmap-irq.c ret = regmap_write(map, reg, map 723 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to ack 0x%x: %d\n", map 735 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->irq_reg_stride); map 746 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", map 756 drivers/base/regmap/regmap-irq.c (i * map->reg_stride * d->type_reg_stride); map 758 drivers/base/regmap/regmap-irq.c ret = regmap_read(map, reg, &d->type_buf_def[i]); map 764 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n", map 772 drivers/base/regmap/regmap-irq.c d->domain = irq_domain_add_legacy(map->dev->of_node, map 776 drivers/base/regmap/regmap-irq.c d->domain = irq_domain_add_linear(map->dev->of_node, map 780 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to create IRQ domain\n"); map 789 drivers/base/regmap/regmap-irq.c dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n", map 893 drivers/base/regmap/regmap-irq.c int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, map 906 drivers/base/regmap/regmap-irq.c ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base, map 357 drivers/base/regmap/regmap-mmio.c int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk) map 359 drivers/base/regmap/regmap-mmio.c struct regmap_mmio_context *ctx = map->bus_context; map 368 drivers/base/regmap/regmap-mmio.c void regmap_mmio_detach_clk(struct regmap *map) map 370 drivers/base/regmap/regmap-mmio.c struct regmap_mmio_context *ctx = map->bus_context; map 35 drivers/base/regmap/regmap.c static inline bool regmap_should_log(struct regmap *map) map 37 drivers/base/regmap/regmap.c return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0); map 40 drivers/base/regmap/regmap.c static inline bool regmap_should_log(struct regmap *map) { return false; } map 44 drivers/base/regmap/regmap.c static int _regmap_update_bits(struct regmap *map, unsigned int reg, map 73 drivers/base/regmap/regmap.c bool regmap_check_range_table(struct regmap *map, unsigned int reg, map 89 drivers/base/regmap/regmap.c bool regmap_writeable(struct regmap *map, unsigned int reg) map 91 drivers/base/regmap/regmap.c if (map->max_register && reg > map->max_register) map 94 drivers/base/regmap/regmap.c if (map->writeable_reg) map 95 drivers/base/regmap/regmap.c return map->writeable_reg(map->dev, reg); map 97 drivers/base/regmap/regmap.c if (map->wr_table) map 98 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->wr_table); map 103 drivers/base/regmap/regmap.c bool regmap_cached(struct regmap *map, unsigned int reg) map 108 drivers/base/regmap/regmap.c if (map->cache_type == REGCACHE_NONE) map 111 drivers/base/regmap/regmap.c if (!map->cache_ops) map 114 drivers/base/regmap/regmap.c if (map->max_register && reg > map->max_register) map 117 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 118 drivers/base/regmap/regmap.c ret = regcache_read(map, reg, &val); map 119 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 126 drivers/base/regmap/regmap.c bool regmap_readable(struct regmap *map, unsigned int reg) map 128 drivers/base/regmap/regmap.c if (!map->reg_read) map 131 drivers/base/regmap/regmap.c if (map->max_register && reg > map->max_register) map 134 drivers/base/regmap/regmap.c if (map->format.format_write) map 137 drivers/base/regmap/regmap.c if (map->readable_reg) map 138 drivers/base/regmap/regmap.c return map->readable_reg(map->dev, reg); map 140 drivers/base/regmap/regmap.c if (map->rd_table) map 141 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->rd_table); map 146 drivers/base/regmap/regmap.c bool regmap_volatile(struct regmap *map, unsigned int reg) map 148 drivers/base/regmap/regmap.c if (!map->format.format_write && !regmap_readable(map, reg)) map 151 drivers/base/regmap/regmap.c if (map->volatile_reg) map 152 drivers/base/regmap/regmap.c return map->volatile_reg(map->dev, reg); map 154 drivers/base/regmap/regmap.c if (map->volatile_table) map 155 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->volatile_table); map 157 drivers/base/regmap/regmap.c if (map->cache_ops) map 163 drivers/base/regmap/regmap.c bool regmap_precious(struct regmap *map, unsigned int reg) map 165 drivers/base/regmap/regmap.c if (!regmap_readable(map, reg)) map 168 drivers/base/regmap/regmap.c if (map->precious_reg) map 169 drivers/base/regmap/regmap.c return map->precious_reg(map->dev, reg); map 171 drivers/base/regmap/regmap.c if (map->precious_table) map 172 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->precious_table); map 177 drivers/base/regmap/regmap.c bool regmap_writeable_noinc(struct regmap *map, unsigned int reg) map 179 drivers/base/regmap/regmap.c if (map->writeable_noinc_reg) map 180 drivers/base/regmap/regmap.c return map->writeable_noinc_reg(map->dev, reg); map 182 drivers/base/regmap/regmap.c if (map->wr_noinc_table) map 183 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->wr_noinc_table); map 188 drivers/base/regmap/regmap.c bool regmap_readable_noinc(struct regmap *map, unsigned int reg) map 190 drivers/base/regmap/regmap.c if (map->readable_noinc_reg) map 191 drivers/base/regmap/regmap.c return map->readable_noinc_reg(map->dev, reg); map 193 drivers/base/regmap/regmap.c if (map->rd_noinc_table) map 194 drivers/base/regmap/regmap.c return regmap_check_range_table(map, reg, map->rd_noinc_table); map 199 drivers/base/regmap/regmap.c static bool regmap_volatile_range(struct regmap *map, unsigned int reg, map 205 drivers/base/regmap/regmap.c if (!regmap_volatile(map, reg + regmap_get_offset(map, i))) map 211 drivers/base/regmap/regmap.c static void regmap_format_2_6_write(struct regmap *map, map 214 drivers/base/regmap/regmap.c u8 *out = map->work_buf; map 219 drivers/base/regmap/regmap.c static void regmap_format_4_12_write(struct regmap *map, map 222 drivers/base/regmap/regmap.c __be16 *out = map->work_buf; map 226 drivers/base/regmap/regmap.c static void regmap_format_7_9_write(struct regmap *map, map 229 drivers/base/regmap/regmap.c __be16 *out = map->work_buf; map 233 drivers/base/regmap/regmap.c static void regmap_format_10_14_write(struct regmap *map, map 236 drivers/base/regmap/regmap.c u8 *out = map->work_buf; map 447 drivers/base/regmap/regmap.c struct regmap *map = __map; map 449 drivers/base/regmap/regmap.c hwspin_lock_timeout(map->hwlock, UINT_MAX); map 454 drivers/base/regmap/regmap.c struct regmap *map = __map; map 456 drivers/base/regmap/regmap.c hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); map 461 drivers/base/regmap/regmap.c struct regmap *map = __map; map 463 drivers/base/regmap/regmap.c hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, map 464 drivers/base/regmap/regmap.c &map->spinlock_flags); map 469 drivers/base/regmap/regmap.c struct regmap *map = __map; map 471 drivers/base/regmap/regmap.c hwspin_unlock(map->hwlock); map 476 drivers/base/regmap/regmap.c struct regmap *map = __map; map 478 drivers/base/regmap/regmap.c hwspin_unlock_irq(map->hwlock); map 483 drivers/base/regmap/regmap.c struct regmap *map = __map; map 485 drivers/base/regmap/regmap.c hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); map 495 drivers/base/regmap/regmap.c struct regmap *map = __map; map 496 drivers/base/regmap/regmap.c mutex_lock(&map->mutex); map 501 drivers/base/regmap/regmap.c struct regmap *map = __map; map 502 drivers/base/regmap/regmap.c mutex_unlock(&map->mutex); map 506 drivers/base/regmap/regmap.c __acquires(&map->spinlock) map 508 drivers/base/regmap/regmap.c struct regmap *map = __map; map 511 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->spinlock, flags); map 512 drivers/base/regmap/regmap.c map->spinlock_flags = flags; map 516 drivers/base/regmap/regmap.c __releases(&map->spinlock) map 518 drivers/base/regmap/regmap.c struct regmap *map = __map; map 519 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags); map 531 drivers/base/regmap/regmap.c static bool _regmap_range_add(struct regmap *map, map 534 drivers/base/regmap/regmap.c struct rb_root *root = &map->range_tree; map 556 drivers/base/regmap/regmap.c static struct regmap_range_node *_regmap_range_lookup(struct regmap *map, map 559 drivers/base/regmap/regmap.c struct rb_node *node = map->range_tree.rb_node; map 576 drivers/base/regmap/regmap.c static void regmap_range_exit(struct regmap *map) map 581 drivers/base/regmap/regmap.c next = rb_first(&map->range_tree); map 585 drivers/base/regmap/regmap.c rb_erase(&range_node->node, &map->range_tree); map 589 drivers/base/regmap/regmap.c kfree(map->selector_work_buf); map 592 drivers/base/regmap/regmap.c int regmap_attach_dev(struct device *dev, struct regmap *map, map 597 drivers/base/regmap/regmap.c map->dev = dev; map 599 drivers/base/regmap/regmap.c regmap_debugfs_init(map, config->name); map 604 drivers/base/regmap/regmap.c regmap_debugfs_exit(map); map 607 drivers/base/regmap/regmap.c *m = map; map 689 drivers/base/regmap/regmap.c struct regmap *map; map 697 drivers/base/regmap/regmap.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 698 drivers/base/regmap/regmap.c if (map == NULL) { map 704 drivers/base/regmap/regmap.c map->name = kstrdup_const(config->name, GFP_KERNEL); map 705 drivers/base/regmap/regmap.c if (!map->name) { map 712 drivers/base/regmap/regmap.c map->lock = map->unlock = regmap_lock_unlock_none; map 713 drivers/base/regmap/regmap.c regmap_debugfs_disable(map); map 715 drivers/base/regmap/regmap.c map->lock = config->lock; map 716 drivers/base/regmap/regmap.c map->unlock = config->unlock; map 717 drivers/base/regmap/regmap.c map->lock_arg = config->lock_arg; map 719 drivers/base/regmap/regmap.c map->hwlock = hwspin_lock_request_specific(config->hwlock_id); map 720 drivers/base/regmap/regmap.c if (!map->hwlock) { map 727 drivers/base/regmap/regmap.c map->lock = regmap_lock_hwlock_irqsave; map 728 drivers/base/regmap/regmap.c map->unlock = regmap_unlock_hwlock_irqrestore; map 731 drivers/base/regmap/regmap.c map->lock = regmap_lock_hwlock_irq; map 732 drivers/base/regmap/regmap.c map->unlock = regmap_unlock_hwlock_irq; map 735 drivers/base/regmap/regmap.c map->lock = regmap_lock_hwlock; map 736 drivers/base/regmap/regmap.c map->unlock = regmap_unlock_hwlock; map 740 drivers/base/regmap/regmap.c map->lock_arg = map; map 744 drivers/base/regmap/regmap.c spin_lock_init(&map->spinlock); map 745 drivers/base/regmap/regmap.c map->lock = regmap_lock_spinlock; map 746 drivers/base/regmap/regmap.c map->unlock = regmap_unlock_spinlock; map 747 drivers/base/regmap/regmap.c lockdep_set_class_and_name(&map->spinlock, map 750 drivers/base/regmap/regmap.c mutex_init(&map->mutex); map 751 drivers/base/regmap/regmap.c map->lock = regmap_lock_mutex; map 752 drivers/base/regmap/regmap.c map->unlock = regmap_unlock_mutex; map 753 drivers/base/regmap/regmap.c lockdep_set_class_and_name(&map->mutex, map 756 drivers/base/regmap/regmap.c map->lock_arg = map; map 764 drivers/base/regmap/regmap.c map->alloc_flags = GFP_ATOMIC; map 766 drivers/base/regmap/regmap.c map->alloc_flags = GFP_KERNEL; map 768 drivers/base/regmap/regmap.c map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); map 769 drivers/base/regmap/regmap.c map->format.pad_bytes = config->pad_bits / 8; map 770 drivers/base/regmap/regmap.c map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); map 771 drivers/base/regmap/regmap.c map->format.buf_size = DIV_ROUND_UP(config->reg_bits + map 773 drivers/base/regmap/regmap.c map->reg_shift = config->pad_bits % 8; map 775 drivers/base/regmap/regmap.c map->reg_stride = config->reg_stride; map 777 drivers/base/regmap/regmap.c map->reg_stride = 1; map 778 drivers/base/regmap/regmap.c if (is_power_of_2(map->reg_stride)) map 779 drivers/base/regmap/regmap.c map->reg_stride_order = ilog2(map->reg_stride); map 781 drivers/base/regmap/regmap.c map->reg_stride_order = -1; map 782 drivers/base/regmap/regmap.c map->use_single_read = config->use_single_read || !bus || !bus->read; map 783 drivers/base/regmap/regmap.c map->use_single_write = config->use_single_write || !bus || !bus->write; map 784 drivers/base/regmap/regmap.c map->can_multi_write = config->can_multi_write && bus && bus->write; map 786 drivers/base/regmap/regmap.c map->max_raw_read = bus->max_raw_read; map 787 drivers/base/regmap/regmap.c map->max_raw_write = bus->max_raw_write; map 789 drivers/base/regmap/regmap.c map->dev = dev; map 790 drivers/base/regmap/regmap.c map->bus = bus; map 791 drivers/base/regmap/regmap.c map->bus_context = bus_context; map 792 drivers/base/regmap/regmap.c map->max_register = config->max_register; map 793 drivers/base/regmap/regmap.c map->wr_table = config->wr_table; map 794 drivers/base/regmap/regmap.c map->rd_table = config->rd_table; map 795 drivers/base/regmap/regmap.c map->volatile_table = config->volatile_table; map 796 drivers/base/regmap/regmap.c map->precious_table = config->precious_table; map 797 drivers/base/regmap/regmap.c map->wr_noinc_table = config->wr_noinc_table; map 798 drivers/base/regmap/regmap.c map->rd_noinc_table = config->rd_noinc_table; map 799 drivers/base/regmap/regmap.c map->writeable_reg = config->writeable_reg; map 800 drivers/base/regmap/regmap.c map->readable_reg = config->readable_reg; map 801 drivers/base/regmap/regmap.c map->volatile_reg = config->volatile_reg; map 802 drivers/base/regmap/regmap.c map->precious_reg = config->precious_reg; map 803 drivers/base/regmap/regmap.c map->writeable_noinc_reg = config->writeable_noinc_reg; map 804 drivers/base/regmap/regmap.c map->readable_noinc_reg = config->readable_noinc_reg; map 805 drivers/base/regmap/regmap.c map->cache_type = config->cache_type; map 807 drivers/base/regmap/regmap.c spin_lock_init(&map->async_lock); map 808 drivers/base/regmap/regmap.c INIT_LIST_HEAD(&map->async_list); map 809 drivers/base/regmap/regmap.c INIT_LIST_HEAD(&map->async_free); map 810 drivers/base/regmap/regmap.c init_waitqueue_head(&map->async_waitq); map 815 drivers/base/regmap/regmap.c map->read_flag_mask = config->read_flag_mask; map 816 drivers/base/regmap/regmap.c map->write_flag_mask = config->write_flag_mask; map 818 drivers/base/regmap/regmap.c map->read_flag_mask = bus->read_flag_mask; map 822 drivers/base/regmap/regmap.c map->reg_read = config->reg_read; map 823 drivers/base/regmap/regmap.c map->reg_write = config->reg_write; map 825 drivers/base/regmap/regmap.c map->defer_caching = false; map 828 drivers/base/regmap/regmap.c map->reg_read = _regmap_bus_reg_read; map 829 drivers/base/regmap/regmap.c map->reg_write = _regmap_bus_reg_write; map 831 drivers/base/regmap/regmap.c map->defer_caching = false; map 834 drivers/base/regmap/regmap.c map->reg_read = _regmap_bus_read; map 835 drivers/base/regmap/regmap.c map->reg_update_bits = bus->reg_update_bits; map 841 drivers/base/regmap/regmap.c switch (config->reg_bits + map->reg_shift) { map 845 drivers/base/regmap/regmap.c map->format.format_write = regmap_format_2_6_write; map 855 drivers/base/regmap/regmap.c map->format.format_write = regmap_format_4_12_write; map 865 drivers/base/regmap/regmap.c map->format.format_write = regmap_format_7_9_write; map 875 drivers/base/regmap/regmap.c map->format.format_write = regmap_format_10_14_write; map 883 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_8; map 889 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_16_be; map 892 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_16_le; map 895 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_16_native; map 905 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_24; map 911 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_32_be; map 914 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_32_le; map 917 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_32_native; map 928 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_64_be; map 931 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_64_le; map 934 drivers/base/regmap/regmap.c map->format.format_reg = regmap_format_64_native; map 947 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_inplace_noop; map 951 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_8; map 952 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_8; map 953 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_inplace_noop; map 958 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_16_be; map 959 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_16_be; map 960 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_16_be_inplace; map 963 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_16_le; map 964 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_16_le; map 965 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_16_le_inplace; map 968 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_16_native; map 969 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_16_native; map 978 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_24; map 979 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_24; map 984 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_32_be; map 985 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_32_be; map 986 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_32_be_inplace; map 989 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_32_le; map 990 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_32_le; map 991 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_32_le_inplace; map 994 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_32_native; map 995 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_32_native; map 1005 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_64_be; map 1006 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_64_be; map 1007 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_64_be_inplace; map 1010 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_64_le; map 1011 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_64_le; map 1012 drivers/base/regmap/regmap.c map->format.parse_inplace = regmap_parse_64_le_inplace; map 1015 drivers/base/regmap/regmap.c map->format.format_val = regmap_format_64_native; map 1016 drivers/base/regmap/regmap.c map->format.parse_val = regmap_parse_64_native; map 1025 drivers/base/regmap/regmap.c if (map->format.format_write) { map 1029 drivers/base/regmap/regmap.c map->use_single_write = true; map 1032 drivers/base/regmap/regmap.c if (!map->format.format_write && map 1033 drivers/base/regmap/regmap.c !(map->format.format_reg && map->format.format_val)) map 1036 drivers/base/regmap/regmap.c map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); map 1037 drivers/base/regmap/regmap.c if (map->work_buf == NULL) { map 1042 drivers/base/regmap/regmap.c if (map->format.format_write) { map 1043 drivers/base/regmap/regmap.c map->defer_caching = false; map 1044 drivers/base/regmap/regmap.c map->reg_write = _regmap_bus_formatted_write; map 1045 drivers/base/regmap/regmap.c } else if (map->format.format_val) { map 1046 drivers/base/regmap/regmap.c map->defer_caching = true; map 1047 drivers/base/regmap/regmap.c map->reg_write = _regmap_bus_raw_write; map 1052 drivers/base/regmap/regmap.c map->range_tree = RB_ROOT; map 1059 drivers/base/regmap/regmap.c dev_err(map->dev, "Invalid range %d: %d < %d\n", i, map 1064 drivers/base/regmap/regmap.c if (range_cfg->range_max > map->max_register) { map 1065 drivers/base/regmap/regmap.c dev_err(map->dev, "Invalid range %d: %d > %d\n", i, map 1066 drivers/base/regmap/regmap.c range_cfg->range_max, map->max_register); map 1070 drivers/base/regmap/regmap.c if (range_cfg->selector_reg > map->max_register) { map 1071 drivers/base/regmap/regmap.c dev_err(map->dev, map 1077 drivers/base/regmap/regmap.c dev_err(map->dev, "Invalid range %d: window_len 0\n", map 1096 drivers/base/regmap/regmap.c dev_err(map->dev, map 1104 drivers/base/regmap/regmap.c dev_err(map->dev, map 1117 drivers/base/regmap/regmap.c new->map = map; map 1127 drivers/base/regmap/regmap.c if (!_regmap_range_add(map, new)) { map 1128 drivers/base/regmap/regmap.c dev_err(map->dev, "Failed to add range %d\n", i); map 1133 drivers/base/regmap/regmap.c if (map->selector_work_buf == NULL) { map 1134 drivers/base/regmap/regmap.c map->selector_work_buf = map 1135 drivers/base/regmap/regmap.c kzalloc(map->format.buf_size, GFP_KERNEL); map 1136 drivers/base/regmap/regmap.c if (map->selector_work_buf == NULL) { map 1143 drivers/base/regmap/regmap.c ret = regcache_init(map, config); map 1148 drivers/base/regmap/regmap.c ret = regmap_attach_dev(dev, map, config); map 1152 drivers/base/regmap/regmap.c regmap_debugfs_init(map, config->name); map 1155 drivers/base/regmap/regmap.c return map; map 1158 drivers/base/regmap/regmap.c regcache_exit(map); map 1160 drivers/base/regmap/regmap.c regmap_range_exit(map); map 1161 drivers/base/regmap/regmap.c kfree(map->work_buf); map 1163 drivers/base/regmap/regmap.c if (map->hwlock) map 1164 drivers/base/regmap/regmap.c hwspin_lock_free(map->hwlock); map 1166 drivers/base/regmap/regmap.c kfree_const(map->name); map 1168 drivers/base/regmap/regmap.c kfree(map); map 1310 drivers/base/regmap/regmap.c int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) map 1312 drivers/base/regmap/regmap.c regcache_exit(map); map 1313 drivers/base/regmap/regmap.c regmap_debugfs_exit(map); map 1315 drivers/base/regmap/regmap.c map->max_register = config->max_register; map 1316 drivers/base/regmap/regmap.c map->writeable_reg = config->writeable_reg; map 1317 drivers/base/regmap/regmap.c map->readable_reg = config->readable_reg; map 1318 drivers/base/regmap/regmap.c map->volatile_reg = config->volatile_reg; map 1319 drivers/base/regmap/regmap.c map->precious_reg = config->precious_reg; map 1320 drivers/base/regmap/regmap.c map->writeable_noinc_reg = config->writeable_noinc_reg; map 1321 drivers/base/regmap/regmap.c map->readable_noinc_reg = config->readable_noinc_reg; map 1322 drivers/base/regmap/regmap.c map->cache_type = config->cache_type; map 1324 drivers/base/regmap/regmap.c regmap_debugfs_init(map, config->name); map 1326 drivers/base/regmap/regmap.c map->cache_bypass = false; map 1327 drivers/base/regmap/regmap.c map->cache_only = false; map 1329 drivers/base/regmap/regmap.c return regcache_init(map, config); map 1338 drivers/base/regmap/regmap.c void regmap_exit(struct regmap *map) map 1342 drivers/base/regmap/regmap.c regcache_exit(map); map 1343 drivers/base/regmap/regmap.c regmap_debugfs_exit(map); map 1344 drivers/base/regmap/regmap.c regmap_range_exit(map); map 1345 drivers/base/regmap/regmap.c if (map->bus && map->bus->free_context) map 1346 drivers/base/regmap/regmap.c map->bus->free_context(map->bus_context); map 1347 drivers/base/regmap/regmap.c kfree(map->work_buf); map 1348 drivers/base/regmap/regmap.c while (!list_empty(&map->async_free)) { map 1349 drivers/base/regmap/regmap.c async = list_first_entry_or_null(&map->async_free, map 1356 drivers/base/regmap/regmap.c if (map->hwlock) map 1357 drivers/base/regmap/regmap.c hwspin_lock_free(map->hwlock); map 1358 drivers/base/regmap/regmap.c kfree_const(map->name); map 1359 drivers/base/regmap/regmap.c kfree(map); map 1408 drivers/base/regmap/regmap.c struct device *regmap_get_device(struct regmap *map) map 1410 drivers/base/regmap/regmap.c return map->dev; map 1414 drivers/base/regmap/regmap.c static int _regmap_select_page(struct regmap *map, unsigned int *reg, map 1443 drivers/base/regmap/regmap.c orig_work_buf = map->work_buf; map 1444 drivers/base/regmap/regmap.c map->work_buf = map->selector_work_buf; map 1446 drivers/base/regmap/regmap.c ret = _regmap_update_bits(map, range->selector_reg, map 1451 drivers/base/regmap/regmap.c map->work_buf = orig_work_buf; map 1462 drivers/base/regmap/regmap.c static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, map 1468 drivers/base/regmap/regmap.c if (!mask || !map->work_buf) map 1471 drivers/base/regmap/regmap.c buf = map->work_buf; map 1477 drivers/base/regmap/regmap.c static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, map 1482 drivers/base/regmap/regmap.c void *work_val = map->work_buf + map->format.reg_bytes + map 1483 drivers/base/regmap/regmap.c map->format.pad_bytes; map 1489 drivers/base/regmap/regmap.c WARN_ON(!map->bus); map 1494 drivers/base/regmap/regmap.c if (!regmap_writeable_noinc(map, reg)) { map 1495 drivers/base/regmap/regmap.c for (i = 0; i < val_len / map->format.val_bytes; i++) { map 1497 drivers/base/regmap/regmap.c reg + regmap_get_offset(map, i); map 1498 drivers/base/regmap/regmap.c if (!regmap_writeable(map, element) || map 1499 drivers/base/regmap/regmap.c regmap_writeable_noinc(map, element)) map 1504 drivers/base/regmap/regmap.c if (!map->cache_bypass && map->format.parse_val) { map 1506 drivers/base/regmap/regmap.c int val_bytes = map->format.val_bytes; map 1508 drivers/base/regmap/regmap.c ival = map->format.parse_val(val + (i * val_bytes)); map 1509 drivers/base/regmap/regmap.c ret = regcache_write(map, map 1510 drivers/base/regmap/regmap.c reg + regmap_get_offset(map, i), map 1513 drivers/base/regmap/regmap.c dev_err(map->dev, map 1519 drivers/base/regmap/regmap.c if (map->cache_only) { map 1520 drivers/base/regmap/regmap.c map->cache_dirty = true; map 1525 drivers/base/regmap/regmap.c range = _regmap_range_lookup(map, reg); map 1527 drivers/base/regmap/regmap.c int val_num = val_len / map->format.val_bytes; map 1533 drivers/base/regmap/regmap.c dev_dbg(map->dev, "Writing window %d/%zu\n", map 1534 drivers/base/regmap/regmap.c win_residue, val_len / map->format.val_bytes); map 1535 drivers/base/regmap/regmap.c ret = _regmap_raw_write_impl(map, reg, val, map 1537 drivers/base/regmap/regmap.c map->format.val_bytes); map 1543 drivers/base/regmap/regmap.c val += win_residue * map->format.val_bytes; map 1544 drivers/base/regmap/regmap.c val_len -= win_residue * map->format.val_bytes; map 1551 drivers/base/regmap/regmap.c ret = _regmap_select_page(map, ®, range, val_num); map 1556 drivers/base/regmap/regmap.c map->format.format_reg(map->work_buf, reg, map->reg_shift); map 1557 drivers/base/regmap/regmap.c regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map 1558 drivers/base/regmap/regmap.c map->write_flag_mask); map 1565 drivers/base/regmap/regmap.c if (val != work_val && val_len == map->format.val_bytes) { map 1566 drivers/base/regmap/regmap.c memcpy(work_val, val, map->format.val_bytes); map 1570 drivers/base/regmap/regmap.c if (map->async && map->bus->async_write) { map 1573 drivers/base/regmap/regmap.c trace_regmap_async_write_start(map, reg, val_len); map 1575 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->async_lock, flags); map 1576 drivers/base/regmap/regmap.c async = list_first_entry_or_null(&map->async_free, map 1581 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->async_lock, flags); map 1584 drivers/base/regmap/regmap.c async = map->bus->async_alloc(); map 1588 drivers/base/regmap/regmap.c async->work_buf = kzalloc(map->format.buf_size, map 1596 drivers/base/regmap/regmap.c async->map = map; map 1599 drivers/base/regmap/regmap.c memcpy(async->work_buf, map->work_buf, map->format.pad_bytes + map 1600 drivers/base/regmap/regmap.c map->format.reg_bytes + map->format.val_bytes); map 1602 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->async_lock, flags); map 1603 drivers/base/regmap/regmap.c list_add_tail(&async->list, &map->async_list); map 1604 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->async_lock, flags); map 1607 drivers/base/regmap/regmap.c ret = map->bus->async_write(map->bus_context, map 1609 drivers/base/regmap/regmap.c map->format.reg_bytes + map 1610 drivers/base/regmap/regmap.c map->format.pad_bytes, map 1613 drivers/base/regmap/regmap.c ret = map->bus->async_write(map->bus_context, map 1615 drivers/base/regmap/regmap.c map->format.reg_bytes + map 1616 drivers/base/regmap/regmap.c map->format.pad_bytes + map 1620 drivers/base/regmap/regmap.c dev_err(map->dev, "Failed to schedule write: %d\n", map 1623 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->async_lock, flags); map 1624 drivers/base/regmap/regmap.c list_move(&async->list, &map->async_free); map 1625 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->async_lock, flags); map 1631 drivers/base/regmap/regmap.c trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes); map 1638 drivers/base/regmap/regmap.c ret = map->bus->write(map->bus_context, map->work_buf, map 1639 drivers/base/regmap/regmap.c map->format.reg_bytes + map 1640 drivers/base/regmap/regmap.c map->format.pad_bytes + map 1642 drivers/base/regmap/regmap.c else if (map->bus->gather_write) map 1643 drivers/base/regmap/regmap.c ret = map->bus->gather_write(map->bus_context, map->work_buf, map 1644 drivers/base/regmap/regmap.c map->format.reg_bytes + map 1645 drivers/base/regmap/regmap.c map->format.pad_bytes, map 1652 drivers/base/regmap/regmap.c len = map->format.reg_bytes + map->format.pad_bytes + val_len; map 1657 drivers/base/regmap/regmap.c memcpy(buf, map->work_buf, map->format.reg_bytes); map 1658 drivers/base/regmap/regmap.c memcpy(buf + map->format.reg_bytes + map->format.pad_bytes, map 1660 drivers/base/regmap/regmap.c ret = map->bus->write(map->bus_context, buf, len); map 1663 drivers/base/regmap/regmap.c } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { map 1667 drivers/base/regmap/regmap.c if (map->cache_ops && map->cache_ops->drop) map 1668 drivers/base/regmap/regmap.c map->cache_ops->drop(map, reg, reg + 1); map 1671 drivers/base/regmap/regmap.c trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes); map 1681 drivers/base/regmap/regmap.c bool regmap_can_raw_write(struct regmap *map) map 1683 drivers/base/regmap/regmap.c return map->bus && map->bus->write && map->format.format_val && map 1684 drivers/base/regmap/regmap.c map->format.format_reg; map 1693 drivers/base/regmap/regmap.c size_t regmap_get_raw_read_max(struct regmap *map) map 1695 drivers/base/regmap/regmap.c return map->max_raw_read; map 1704 drivers/base/regmap/regmap.c size_t regmap_get_raw_write_max(struct regmap *map) map 1706 drivers/base/regmap/regmap.c return map->max_raw_write; map 1715 drivers/base/regmap/regmap.c struct regmap *map = context; map 1717 drivers/base/regmap/regmap.c WARN_ON(!map->bus || !map->format.format_write); map 1719 drivers/base/regmap/regmap.c range = _regmap_range_lookup(map, reg); map 1721 drivers/base/regmap/regmap.c ret = _regmap_select_page(map, ®, range, 1); map 1726 drivers/base/regmap/regmap.c map->format.format_write(map, reg, val); map 1728 drivers/base/regmap/regmap.c trace_regmap_hw_write_start(map, reg, 1); map 1730 drivers/base/regmap/regmap.c ret = map->bus->write(map->bus_context, map->work_buf, map 1731 drivers/base/regmap/regmap.c map->format.buf_size); map 1733 drivers/base/regmap/regmap.c trace_regmap_hw_write_done(map, reg, 1); map 1741 drivers/base/regmap/regmap.c struct regmap *map = context; map 1743 drivers/base/regmap/regmap.c return map->bus->reg_write(map->bus_context, reg, val); map 1749 drivers/base/regmap/regmap.c struct regmap *map = context; map 1751 drivers/base/regmap/regmap.c WARN_ON(!map->bus || !map->format.format_val); map 1753 drivers/base/regmap/regmap.c map->format.format_val(map->work_buf + map->format.reg_bytes map 1754 drivers/base/regmap/regmap.c + map->format.pad_bytes, val, 0); map 1755 drivers/base/regmap/regmap.c return _regmap_raw_write_impl(map, reg, map 1756 drivers/base/regmap/regmap.c map->work_buf + map 1757 drivers/base/regmap/regmap.c map->format.reg_bytes + map 1758 drivers/base/regmap/regmap.c map->format.pad_bytes, map 1759 drivers/base/regmap/regmap.c map->format.val_bytes); map 1762 drivers/base/regmap/regmap.c static inline void *_regmap_map_get_context(struct regmap *map) map 1764 drivers/base/regmap/regmap.c return (map->bus) ? map : map->bus_context; map 1767 drivers/base/regmap/regmap.c int _regmap_write(struct regmap *map, unsigned int reg, map 1771 drivers/base/regmap/regmap.c void *context = _regmap_map_get_context(map); map 1773 drivers/base/regmap/regmap.c if (!regmap_writeable(map, reg)) map 1776 drivers/base/regmap/regmap.c if (!map->cache_bypass && !map->defer_caching) { map 1777 drivers/base/regmap/regmap.c ret = regcache_write(map, reg, val); map 1780 drivers/base/regmap/regmap.c if (map->cache_only) { map 1781 drivers/base/regmap/regmap.c map->cache_dirty = true; map 1786 drivers/base/regmap/regmap.c if (regmap_should_log(map)) map 1787 drivers/base/regmap/regmap.c dev_info(map->dev, "%x <= %x\n", reg, val); map 1789 drivers/base/regmap/regmap.c trace_regmap_reg_write(map, reg, val); map 1791 drivers/base/regmap/regmap.c return map->reg_write(context, reg, val); map 1804 drivers/base/regmap/regmap.c int regmap_write(struct regmap *map, unsigned int reg, unsigned int val) map 1808 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 1811 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 1813 drivers/base/regmap/regmap.c ret = _regmap_write(map, reg, val); map 1815 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 1831 drivers/base/regmap/regmap.c int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) map 1835 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 1838 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 1840 drivers/base/regmap/regmap.c map->async = true; map 1842 drivers/base/regmap/regmap.c ret = _regmap_write(map, reg, val); map 1844 drivers/base/regmap/regmap.c map->async = false; map 1846 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 1852 drivers/base/regmap/regmap.c int _regmap_raw_write(struct regmap *map, unsigned int reg, map 1855 drivers/base/regmap/regmap.c size_t val_bytes = map->format.val_bytes; map 1864 drivers/base/regmap/regmap.c if (map->use_single_write) map 1866 drivers/base/regmap/regmap.c else if (map->max_raw_write && val_len > map->max_raw_write) map 1867 drivers/base/regmap/regmap.c chunk_regs = map->max_raw_write / val_bytes; map 1874 drivers/base/regmap/regmap.c ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); map 1878 drivers/base/regmap/regmap.c reg += regmap_get_offset(map, chunk_regs); map 1885 drivers/base/regmap/regmap.c ret = _regmap_raw_write_impl(map, reg, val, val_len); map 1906 drivers/base/regmap/regmap.c int regmap_raw_write(struct regmap *map, unsigned int reg, map 1911 drivers/base/regmap/regmap.c if (!regmap_can_raw_write(map)) map 1913 drivers/base/regmap/regmap.c if (val_len % map->format.val_bytes) map 1916 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 1918 drivers/base/regmap/regmap.c ret = _regmap_raw_write(map, reg, val, val_len); map 1920 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 1947 drivers/base/regmap/regmap.c int regmap_noinc_write(struct regmap *map, unsigned int reg, map 1953 drivers/base/regmap/regmap.c if (!map->bus) map 1955 drivers/base/regmap/regmap.c if (!map->bus->write) map 1957 drivers/base/regmap/regmap.c if (val_len % map->format.val_bytes) map 1959 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 1964 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 1966 drivers/base/regmap/regmap.c if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) { map 1972 drivers/base/regmap/regmap.c if (map->max_raw_write && map->max_raw_write < val_len) map 1973 drivers/base/regmap/regmap.c write_len = map->max_raw_write; map 1976 drivers/base/regmap/regmap.c ret = _regmap_raw_write(map, reg, val, write_len); map 1984 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2063 drivers/base/regmap/regmap.c int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, map 2067 drivers/base/regmap/regmap.c size_t val_bytes = map->format.val_bytes; map 2069 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2076 drivers/base/regmap/regmap.c if (!map->bus || !map->format.parse_inplace) { map 2077 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2101 drivers/base/regmap/regmap.c ret = _regmap_write(map, map 2102 drivers/base/regmap/regmap.c reg + regmap_get_offset(map, i), map 2108 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2112 drivers/base/regmap/regmap.c wval = kmemdup(val, val_count * val_bytes, map->alloc_flags); map 2117 drivers/base/regmap/regmap.c map->format.parse_inplace(wval + i); map 2119 drivers/base/regmap/regmap.c ret = regmap_raw_write(map, reg, wval, val_bytes * val_count); map 2134 drivers/base/regmap/regmap.c static int _regmap_raw_multi_reg_write(struct regmap *map, map 2142 drivers/base/regmap/regmap.c size_t val_bytes = map->format.val_bytes; map 2143 drivers/base/regmap/regmap.c size_t reg_bytes = map->format.reg_bytes; map 2144 drivers/base/regmap/regmap.c size_t pad_bytes = map->format.pad_bytes; map 2162 drivers/base/regmap/regmap.c trace_regmap_hw_write_start(map, reg, 1); map 2163 drivers/base/regmap/regmap.c map->format.format_reg(u8, reg, map->reg_shift); map 2165 drivers/base/regmap/regmap.c map->format.format_val(u8, val, 0); map 2169 drivers/base/regmap/regmap.c *u8 |= map->write_flag_mask; map 2171 drivers/base/regmap/regmap.c ret = map->bus->write(map->bus_context, buf, len); map 2177 drivers/base/regmap/regmap.c trace_regmap_hw_write_done(map, reg, 1); map 2182 drivers/base/regmap/regmap.c static unsigned int _regmap_register_page(struct regmap *map, map 2191 drivers/base/regmap/regmap.c static int _regmap_range_multi_paged_reg_write(struct regmap *map, map 2211 drivers/base/regmap/regmap.c range = _regmap_range_lookup(map, reg); map 2213 drivers/base/regmap/regmap.c unsigned int win_page = _regmap_register_page(map, reg, map 2240 drivers/base/regmap/regmap.c ret = _regmap_raw_multi_reg_write(map, base, n); map 2251 drivers/base/regmap/regmap.c ret = _regmap_select_page(map, map 2264 drivers/base/regmap/regmap.c return _regmap_raw_multi_reg_write(map, base, n); map 2268 drivers/base/regmap/regmap.c static int _regmap_multi_reg_write(struct regmap *map, map 2275 drivers/base/regmap/regmap.c if (!map->can_multi_write) { map 2277 drivers/base/regmap/regmap.c ret = _regmap_write(map, regs[i].reg, regs[i].def); map 2287 drivers/base/regmap/regmap.c if (!map->format.parse_inplace) map 2290 drivers/base/regmap/regmap.c if (map->writeable_reg) map 2293 drivers/base/regmap/regmap.c if (!map->writeable_reg(map->dev, reg)) map 2295 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2299 drivers/base/regmap/regmap.c if (!map->cache_bypass) { map 2303 drivers/base/regmap/regmap.c ret = regcache_write(map, reg, val); map 2305 drivers/base/regmap/regmap.c dev_err(map->dev, map 2311 drivers/base/regmap/regmap.c if (map->cache_only) { map 2312 drivers/base/regmap/regmap.c map->cache_dirty = true; map 2317 drivers/base/regmap/regmap.c WARN_ON(!map->bus); map 2326 drivers/base/regmap/regmap.c range = _regmap_range_lookup(map, reg); map 2333 drivers/base/regmap/regmap.c ret = _regmap_range_multi_paged_reg_write(map, base, map 2340 drivers/base/regmap/regmap.c return _regmap_raw_multi_reg_write(map, regs, num_regs); map 2362 drivers/base/regmap/regmap.c int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, map 2367 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2369 drivers/base/regmap/regmap.c ret = _regmap_multi_reg_write(map, regs, num_regs); map 2371 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2395 drivers/base/regmap/regmap.c int regmap_multi_reg_write_bypassed(struct regmap *map, map 2402 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2404 drivers/base/regmap/regmap.c bypass = map->cache_bypass; map 2405 drivers/base/regmap/regmap.c map->cache_bypass = true; map 2407 drivers/base/regmap/regmap.c ret = _regmap_multi_reg_write(map, regs, num_regs); map 2409 drivers/base/regmap/regmap.c map->cache_bypass = bypass; map 2411 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2439 drivers/base/regmap/regmap.c int regmap_raw_write_async(struct regmap *map, unsigned int reg, map 2444 drivers/base/regmap/regmap.c if (val_len % map->format.val_bytes) map 2446 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2449 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2451 drivers/base/regmap/regmap.c map->async = true; map 2453 drivers/base/regmap/regmap.c ret = _regmap_raw_write(map, reg, val, val_len); map 2455 drivers/base/regmap/regmap.c map->async = false; map 2457 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2463 drivers/base/regmap/regmap.c static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, map 2469 drivers/base/regmap/regmap.c WARN_ON(!map->bus); map 2471 drivers/base/regmap/regmap.c if (!map->bus || !map->bus->read) map 2474 drivers/base/regmap/regmap.c range = _regmap_range_lookup(map, reg); map 2476 drivers/base/regmap/regmap.c ret = _regmap_select_page(map, ®, range, map 2477 drivers/base/regmap/regmap.c val_len / map->format.val_bytes); map 2482 drivers/base/regmap/regmap.c map->format.format_reg(map->work_buf, reg, map->reg_shift); map 2483 drivers/base/regmap/regmap.c regmap_set_work_buf_flag_mask(map, map->format.reg_bytes, map 2484 drivers/base/regmap/regmap.c map->read_flag_mask); map 2485 drivers/base/regmap/regmap.c trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes); map 2487 drivers/base/regmap/regmap.c ret = map->bus->read(map->bus_context, map->work_buf, map 2488 drivers/base/regmap/regmap.c map->format.reg_bytes + map->format.pad_bytes, map 2491 drivers/base/regmap/regmap.c trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes); map 2499 drivers/base/regmap/regmap.c struct regmap *map = context; map 2501 drivers/base/regmap/regmap.c return map->bus->reg_read(map->bus_context, reg, val); map 2508 drivers/base/regmap/regmap.c struct regmap *map = context; map 2509 drivers/base/regmap/regmap.c void *work_val = map->work_buf + map->format.reg_bytes + map 2510 drivers/base/regmap/regmap.c map->format.pad_bytes; map 2512 drivers/base/regmap/regmap.c if (!map->format.parse_val) map 2515 drivers/base/regmap/regmap.c ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); map 2517 drivers/base/regmap/regmap.c *val = map->format.parse_val(work_val); map 2522 drivers/base/regmap/regmap.c static int _regmap_read(struct regmap *map, unsigned int reg, map 2526 drivers/base/regmap/regmap.c void *context = _regmap_map_get_context(map); map 2528 drivers/base/regmap/regmap.c if (!map->cache_bypass) { map 2529 drivers/base/regmap/regmap.c ret = regcache_read(map, reg, val); map 2534 drivers/base/regmap/regmap.c if (map->cache_only) map 2537 drivers/base/regmap/regmap.c if (!regmap_readable(map, reg)) map 2540 drivers/base/regmap/regmap.c ret = map->reg_read(context, reg, val); map 2542 drivers/base/regmap/regmap.c if (regmap_should_log(map)) map 2543 drivers/base/regmap/regmap.c dev_info(map->dev, "%x => %x\n", reg, *val); map 2545 drivers/base/regmap/regmap.c trace_regmap_reg_read(map, reg, *val); map 2547 drivers/base/regmap/regmap.c if (!map->cache_bypass) map 2548 drivers/base/regmap/regmap.c regcache_write(map, reg, *val); map 2564 drivers/base/regmap/regmap.c int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) map 2568 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2571 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2573 drivers/base/regmap/regmap.c ret = _regmap_read(map, reg, val); map 2575 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2592 drivers/base/regmap/regmap.c int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, map 2595 drivers/base/regmap/regmap.c size_t val_bytes = map->format.val_bytes; map 2600 drivers/base/regmap/regmap.c if (!map->bus) map 2602 drivers/base/regmap/regmap.c if (val_len % map->format.val_bytes) map 2604 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2609 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2611 drivers/base/regmap/regmap.c if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || map 2612 drivers/base/regmap/regmap.c map->cache_type == REGCACHE_NONE) { map 2616 drivers/base/regmap/regmap.c if (!map->bus->read) { map 2621 drivers/base/regmap/regmap.c if (map->use_single_read) map 2623 drivers/base/regmap/regmap.c else if (map->max_raw_read && val_len > map->max_raw_read) map 2624 drivers/base/regmap/regmap.c chunk_regs = map->max_raw_read / val_bytes; map 2631 drivers/base/regmap/regmap.c ret = _regmap_raw_read(map, reg, val, chunk_bytes); map 2635 drivers/base/regmap/regmap.c reg += regmap_get_offset(map, chunk_regs); map 2642 drivers/base/regmap/regmap.c ret = _regmap_raw_read(map, reg, val, val_len); map 2651 drivers/base/regmap/regmap.c ret = _regmap_read(map, reg + regmap_get_offset(map, i), map 2656 drivers/base/regmap/regmap.c map->format.format_val(val + (i * val_bytes), v, 0); map 2661 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2688 drivers/base/regmap/regmap.c int regmap_noinc_read(struct regmap *map, unsigned int reg, map 2694 drivers/base/regmap/regmap.c if (!map->bus) map 2696 drivers/base/regmap/regmap.c if (!map->bus->read) map 2698 drivers/base/regmap/regmap.c if (val_len % map->format.val_bytes) map 2700 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2705 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2707 drivers/base/regmap/regmap.c if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) { map 2713 drivers/base/regmap/regmap.c if (map->max_raw_read && map->max_raw_read < val_len) map 2714 drivers/base/regmap/regmap.c read_len = map->max_raw_read; map 2717 drivers/base/regmap/regmap.c ret = _regmap_raw_read(map, reg, val, read_len); map 2725 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2799 drivers/base/regmap/regmap.c int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, map 2803 drivers/base/regmap/regmap.c size_t val_bytes = map->format.val_bytes; map 2804 drivers/base/regmap/regmap.c bool vol = regmap_volatile_range(map, reg, val_count); map 2806 drivers/base/regmap/regmap.c if (!IS_ALIGNED(reg, map->reg_stride)) map 2811 drivers/base/regmap/regmap.c if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { map 2812 drivers/base/regmap/regmap.c ret = regmap_raw_read(map, reg, val, val_bytes * val_count); map 2817 drivers/base/regmap/regmap.c map->format.parse_inplace(val + i); map 2826 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2831 drivers/base/regmap/regmap.c ret = _regmap_read(map, reg + regmap_get_offset(map, i), map 2836 drivers/base/regmap/regmap.c switch (map->format.val_bytes) { map 2858 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2865 drivers/base/regmap/regmap.c static int _regmap_update_bits(struct regmap *map, unsigned int reg, map 2875 drivers/base/regmap/regmap.c if (regmap_volatile(map, reg) && map->reg_update_bits) { map 2876 drivers/base/regmap/regmap.c ret = map->reg_update_bits(map->bus_context, reg, mask, val); map 2880 drivers/base/regmap/regmap.c ret = _regmap_read(map, reg, &orig); map 2888 drivers/base/regmap/regmap.c ret = _regmap_write(map, reg, tmp); map 2919 drivers/base/regmap/regmap.c int regmap_update_bits_base(struct regmap *map, unsigned int reg, map 2925 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 2927 drivers/base/regmap/regmap.c map->async = async; map 2929 drivers/base/regmap/regmap.c ret = _regmap_update_bits(map, reg, mask, val, change, force); map 2931 drivers/base/regmap/regmap.c map->async = false; map 2933 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 2941 drivers/base/regmap/regmap.c struct regmap *map = async->map; map 2944 drivers/base/regmap/regmap.c trace_regmap_async_io_complete(map); map 2946 drivers/base/regmap/regmap.c spin_lock(&map->async_lock); map 2947 drivers/base/regmap/regmap.c list_move(&async->list, &map->async_free); map 2948 drivers/base/regmap/regmap.c wake = list_empty(&map->async_list); map 2951 drivers/base/regmap/regmap.c map->async_ret = ret; map 2953 drivers/base/regmap/regmap.c spin_unlock(&map->async_lock); map 2956 drivers/base/regmap/regmap.c wake_up(&map->async_waitq); map 2960 drivers/base/regmap/regmap.c static int regmap_async_is_done(struct regmap *map) map 2965 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->async_lock, flags); map 2966 drivers/base/regmap/regmap.c ret = list_empty(&map->async_list); map 2967 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->async_lock, flags); map 2980 drivers/base/regmap/regmap.c int regmap_async_complete(struct regmap *map) map 2986 drivers/base/regmap/regmap.c if (!map->bus || !map->bus->async_write) map 2989 drivers/base/regmap/regmap.c trace_regmap_async_complete_start(map); map 2991 drivers/base/regmap/regmap.c wait_event(map->async_waitq, regmap_async_is_done(map)); map 2993 drivers/base/regmap/regmap.c spin_lock_irqsave(&map->async_lock, flags); map 2994 drivers/base/regmap/regmap.c ret = map->async_ret; map 2995 drivers/base/regmap/regmap.c map->async_ret = 0; map 2996 drivers/base/regmap/regmap.c spin_unlock_irqrestore(&map->async_lock, flags); map 2998 drivers/base/regmap/regmap.c trace_regmap_async_complete_done(map); map 3021 drivers/base/regmap/regmap.c int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, map 3032 drivers/base/regmap/regmap.c p = krealloc(map->patch, map 3033 drivers/base/regmap/regmap.c sizeof(struct reg_sequence) * (map->patch_regs + num_regs), map 3036 drivers/base/regmap/regmap.c memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs)); map 3037 drivers/base/regmap/regmap.c map->patch = p; map 3038 drivers/base/regmap/regmap.c map->patch_regs += num_regs; map 3043 drivers/base/regmap/regmap.c map->lock(map->lock_arg); map 3045 drivers/base/regmap/regmap.c bypass = map->cache_bypass; map 3047 drivers/base/regmap/regmap.c map->cache_bypass = true; map 3048 drivers/base/regmap/regmap.c map->async = true; map 3050 drivers/base/regmap/regmap.c ret = _regmap_multi_reg_write(map, regs, num_regs); map 3052 drivers/base/regmap/regmap.c map->async = false; map 3053 drivers/base/regmap/regmap.c map->cache_bypass = bypass; map 3055 drivers/base/regmap/regmap.c map->unlock(map->lock_arg); map 3057 drivers/base/regmap/regmap.c regmap_async_complete(map); map 3071 drivers/base/regmap/regmap.c int regmap_get_val_bytes(struct regmap *map) map 3073 drivers/base/regmap/regmap.c if (map->format.format_write) map 3076 drivers/base/regmap/regmap.c return map->format.val_bytes; map 3088 drivers/base/regmap/regmap.c int regmap_get_max_register(struct regmap *map) map 3090 drivers/base/regmap/regmap.c return map->max_register ? map->max_register : -EINVAL; map 3102 drivers/base/regmap/regmap.c int regmap_get_reg_stride(struct regmap *map) map 3104 drivers/base/regmap/regmap.c return map->reg_stride; map 3108 drivers/base/regmap/regmap.c int regmap_parse_val(struct regmap *map, const void *buf, map 3111 drivers/base/regmap/regmap.c if (!map->format.parse_val) map 3114 drivers/base/regmap/regmap.c *val = map->format.parse_val(buf); map 18 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, map 21 drivers/base/regmap/trace.h TP_ARGS(map, reg, val), map 24 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 30 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 42 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, map 45 drivers/base/regmap/trace.h TP_ARGS(map, reg, val) map 51 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, map 54 drivers/base/regmap/trace.h TP_ARGS(map, reg, val) map 60 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, map 63 drivers/base/regmap/trace.h TP_ARGS(map, reg, val) map 69 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 71 drivers/base/regmap/trace.h TP_ARGS(map, reg, count), map 74 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 80 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 92 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 94 drivers/base/regmap/trace.h TP_ARGS(map, reg, count) map 99 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 101 drivers/base/regmap/trace.h TP_ARGS(map, reg, count) map 106 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 108 drivers/base/regmap/trace.h TP_ARGS(map, reg, count) map 113 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 115 drivers/base/regmap/trace.h TP_ARGS(map, reg, count) map 120 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, const char *type, map 123 drivers/base/regmap/trace.h TP_ARGS(map, type, status), map 126 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 133 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 144 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, bool flag), map 146 drivers/base/regmap/trace.h TP_ARGS(map, flag), map 149 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 154 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 164 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, bool flag), map 166 drivers/base/regmap/trace.h TP_ARGS(map, flag) map 172 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, bool flag), map 174 drivers/base/regmap/trace.h TP_ARGS(map, flag) map 180 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map), map 182 drivers/base/regmap/trace.h TP_ARGS(map), map 185 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 189 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 197 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int reg, int count), map 199 drivers/base/regmap/trace.h TP_ARGS(map, reg, count) map 204 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map), map 206 drivers/base/regmap/trace.h TP_ARGS(map) map 212 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map), map 214 drivers/base/regmap/trace.h TP_ARGS(map) map 220 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map), map 222 drivers/base/regmap/trace.h TP_ARGS(map) map 228 drivers/base/regmap/trace.h TP_PROTO(struct regmap *map, unsigned int from, map 231 drivers/base/regmap/trace.h TP_ARGS(map, from, to), map 234 drivers/base/regmap/trace.h __string( name, regmap_name(map) ) map 240 drivers/base/regmap/trace.h __assign_str(name, regmap_name(map)); map 769 drivers/block/virtio_blk.c return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT], map 808 drivers/block/xen-blkback/blkback.c struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; map 852 drivers/block/xen-blkback/blkback.c gnttab_set_map_op(&map[segs_to_map++], addr, map 862 drivers/block/xen-blkback/blkback.c ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map); map 875 drivers/block/xen-blkback/blkback.c if (unlikely(map[new_map_idx].status != 0)) { map 882 drivers/block/xen-blkback/blkback.c pages[seg_idx]->handle = map[new_map_idx].handle; map 902 drivers/block/xen-blkback/blkback.c persistent_gnt->gnt = map[new_map_idx].ref; map 903 drivers/block/xen-blkback/blkback.c persistent_gnt->handle = map[new_map_idx].handle; map 637 drivers/bus/moxtet.c .map = moxtet_irq_domain_map, map 674 drivers/bus/moxtet.c static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map) map 684 drivers/bus/moxtet.c *map = 0; map 688 drivers/bus/moxtet.c set_bit(i, map); map 447 drivers/bus/mvebu-mbus.c u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i)); map 451 drivers/bus/mvebu-mbus.c if (!(map & 1)) { map 456 drivers/bus/mvebu-mbus.c base = map & 0xff800000; map 457 drivers/bus/mvebu-mbus.c size = 0x100000 << (((map & 0x000f0000) >> 16) - 4); map 760 drivers/bus/mvebu-mbus.c u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i)); map 765 drivers/bus/mvebu-mbus.c if (map & 1) { map 773 drivers/bus/mvebu-mbus.c w->base = map & 0xff800000; map 774 drivers/bus/mvebu-mbus.c w->size = 0x100000 << (((map & 0x000f0000) >> 16) - 4); map 788 drivers/bus/mvebu-mbus.c u32 map = readl(mbus->sdramwins_base + DOVE_DDR_BASE_CS_OFF(i)); map 792 drivers/bus/mvebu-mbus.c writel(map, store_addr++); map 62 drivers/char/ipmi/bt-bmc.c struct regmap *map; map 83 drivers/char/ipmi/bt-bmc.c rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val); map 93 drivers/char/ipmi/bt-bmc.c rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data); map 381 drivers/char/ipmi/bt-bmc.c rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, ®); map 390 drivers/char/ipmi/bt-bmc.c regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg); map 420 drivers/char/ipmi/bt-bmc.c rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1, map 445 drivers/char/ipmi/bt-bmc.c bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node); map 446 drivers/char/ipmi/bt-bmc.c if (IS_ERR(bt_bmc->map)) { map 459 drivers/char/ipmi/bt-bmc.c bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg); map 491 drivers/char/ipmi/bt-bmc.c regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0, map 65 drivers/char/ipmi/kcs_bmc_aspeed.c struct regmap *map; map 75 drivers/char/ipmi/kcs_bmc_aspeed.c rc = regmap_read(priv->map, reg, &val); map 86 drivers/char/ipmi/kcs_bmc_aspeed.c rc = regmap_write(priv->map, reg, data); map 110 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR4, map 112 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR12H, addr >> 8); map 113 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR12L, addr & 0xFF); map 117 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR4, map 119 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR12H, addr >> 8); map 120 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR12L, addr & 0xFF); map 124 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR3H, addr >> 8); map 125 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR3L, addr & 0xFF); map 129 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_write(priv->map, LPC_LADR4, ((addr + 1) << 16) | map 145 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 147 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 150 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 152 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 159 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 161 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 164 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 166 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 173 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 175 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 177 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR4, map 180 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR0, map 182 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR4, map 184 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICR2, map 191 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICRB, map 195 drivers/char/ipmi/kcs_bmc_aspeed.c regmap_update_bits(priv->map, LPC_HICRB, map 261 drivers/char/ipmi/kcs_bmc_aspeed.c priv->map = syscon_node_to_regmap(dev->parent->of_node); map 262 drivers/char/ipmi/kcs_bmc_aspeed.c if (IS_ERR(priv->map)) { map 68 drivers/char/ipmi/kcs_bmc_npcm7xx.c struct regmap *map; map 85 drivers/char/ipmi/kcs_bmc_npcm7xx.c rc = regmap_read(priv->map, reg, &val); map 96 drivers/char/ipmi/kcs_bmc_npcm7xx.c rc = regmap_write(priv->map, reg, data); map 104 drivers/char/ipmi/kcs_bmc_npcm7xx.c regmap_update_bits(priv->map, priv->reg->ctl, KCS_CTL_IBFIE, map 107 drivers/char/ipmi/kcs_bmc_npcm7xx.c regmap_update_bits(priv->map, priv->reg->ie, KCS_IE_IRQE | KCS_IE_HIRQE, map 154 drivers/char/ipmi/kcs_bmc_npcm7xx.c priv->map = syscon_node_to_regmap(dev->parent->of_node); map 155 drivers/char/ipmi/kcs_bmc_npcm7xx.c if (IS_ERR(priv->map)) { map 18 drivers/clk/actions/owl-reset.c const struct owl_reset_map *map = &reset->reset_map[id]; map 20 drivers/clk/actions/owl-reset.c return regmap_update_bits(reset->regmap, map->reg, map->bit, 0); map 27 drivers/clk/actions/owl-reset.c const struct owl_reset_map *map = &reset->reset_map[id]; map 29 drivers/clk/actions/owl-reset.c return regmap_update_bits(reset->regmap, map->reg, map->bit, map->bit); map 46 drivers/clk/actions/owl-reset.c const struct owl_reset_map *map = &reset->reset_map[id]; map 50 drivers/clk/actions/owl-reset.c ret = regmap_read(reset->regmap, map->reg, ®); map 58 drivers/clk/actions/owl-reset.c return !(map->bit & reg); map 57 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map map; map 68 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 74 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->gate_offs); map 75 drivers/clk/berlin/berlin2-div.c reg >>= map->gate_shift; map 86 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 92 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->gate_offs); map 93 drivers/clk/berlin/berlin2-div.c reg |= BIT(map->gate_shift); map 94 drivers/clk/berlin/berlin2-div.c writel_relaxed(reg, div->base + map->gate_offs); map 105 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 111 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->gate_offs); map 112 drivers/clk/berlin/berlin2-div.c reg &= ~BIT(map->gate_shift); map 113 drivers/clk/berlin/berlin2-div.c writel_relaxed(reg, div->base + map->gate_offs); map 122 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 129 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->pll_switch_offs); map 131 drivers/clk/berlin/berlin2-div.c reg &= ~BIT(map->pll_switch_shift); map 133 drivers/clk/berlin/berlin2-div.c reg |= BIT(map->pll_switch_shift); map 134 drivers/clk/berlin/berlin2-div.c writel_relaxed(reg, div->base + map->pll_switch_offs); map 138 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->pll_select_offs); map 139 drivers/clk/berlin/berlin2-div.c reg &= ~(PLL_SELECT_MASK << map->pll_select_shift); map 140 drivers/clk/berlin/berlin2-div.c reg |= (index - 1) << map->pll_select_shift; map 141 drivers/clk/berlin/berlin2-div.c writel_relaxed(reg, div->base + map->pll_select_offs); map 153 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 161 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->pll_switch_offs); map 162 drivers/clk/berlin/berlin2-div.c reg &= BIT(map->pll_switch_shift); map 164 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->pll_select_offs); map 165 drivers/clk/berlin/berlin2-div.c reg >>= map->pll_select_shift; map 180 drivers/clk/berlin/berlin2-div.c struct berlin2_div_map *map = &div->map; map 186 drivers/clk/berlin/berlin2-div.c divsw = readl_relaxed(div->base + map->div_switch_offs) & map 187 drivers/clk/berlin/berlin2-div.c (1 << map->div_switch_shift); map 188 drivers/clk/berlin/berlin2-div.c div3sw = readl_relaxed(div->base + map->div3_switch_offs) & map 189 drivers/clk/berlin/berlin2-div.c (1 << map->div3_switch_shift); map 200 drivers/clk/berlin/berlin2-div.c reg = readl_relaxed(div->base + map->div_select_offs); map 201 drivers/clk/berlin/berlin2-div.c reg >>= map->div_select_shift; map 228 drivers/clk/berlin/berlin2-div.c berlin2_div_register(const struct berlin2_div_map *map, map 243 drivers/clk/berlin/berlin2-div.c memcpy(&div->map, map, sizeof(*map)); map 68 drivers/clk/berlin/berlin2-div.h struct berlin2_div_map map; map 73 drivers/clk/berlin/berlin2-div.h berlin2_div_register(const struct berlin2_div_map *map, map 22 drivers/clk/berlin/berlin2-pll.c struct berlin2_pll_map map; map 45 drivers/clk/berlin/berlin2-pll.c struct berlin2_pll_map *map = &pll->map; map 50 drivers/clk/berlin/berlin2-pll.c fbdiv = (val >> map->fbdiv_shift) & FBDIV_MASK; map 51 drivers/clk/berlin/berlin2-pll.c rfdiv = (val >> map->rfdiv_shift) & RFDIV_MASK; map 58 drivers/clk/berlin/berlin2-pll.c vcodivsel = (val >> map->divsel_shift) & DIVSEL_MASK; map 59 drivers/clk/berlin/berlin2-pll.c vcodiv = map->vcodiv[vcodivsel]; map 66 drivers/clk/berlin/berlin2-pll.c rate *= fbdiv * map->mult; map 77 drivers/clk/berlin/berlin2-pll.c berlin2_pll_register(const struct berlin2_pll_map *map, map 89 drivers/clk/berlin/berlin2-pll.c memcpy(&pll->map, map, sizeof(*map)); map 19 drivers/clk/berlin/berlin2-pll.h int berlin2_pll_register(const struct berlin2_pll_map *map, map 153 drivers/clk/berlin/bg2.c .map = { map 170 drivers/clk/berlin/bg2.c .map = { map 184 drivers/clk/berlin/bg2.c .map = { map 199 drivers/clk/berlin/bg2.c .map = { map 214 drivers/clk/berlin/bg2.c .map = { map 229 drivers/clk/berlin/bg2.c .map = { map 244 drivers/clk/berlin/bg2.c .map = { map 259 drivers/clk/berlin/bg2.c .map = { map 274 drivers/clk/berlin/bg2.c .map = { map 289 drivers/clk/berlin/bg2.c .map = { map 304 drivers/clk/berlin/bg2.c .map = { map 319 drivers/clk/berlin/bg2.c .map = { map 334 drivers/clk/berlin/bg2.c .map = { map 347 drivers/clk/berlin/bg2.c .map = { map 360 drivers/clk/berlin/bg2.c .map = { map 373 drivers/clk/berlin/bg2.c .map = { map 386 drivers/clk/berlin/bg2.c .map = { map 396 drivers/clk/berlin/bg2.c .map = { map 406 drivers/clk/berlin/bg2.c .map = { map 416 drivers/clk/berlin/bg2.c .map = { map 426 drivers/clk/berlin/bg2.c .map = { map 436 drivers/clk/berlin/bg2.c .map = { map 446 drivers/clk/berlin/bg2.c .map = { map 456 drivers/clk/berlin/bg2.c .map = { map 466 drivers/clk/berlin/bg2.c .map = { map 648 drivers/clk/berlin/bg2.c hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase, map 82 drivers/clk/berlin/bg2q.c .map = { map 97 drivers/clk/berlin/bg2q.c .map = { map 112 drivers/clk/berlin/bg2q.c .map = { map 127 drivers/clk/berlin/bg2q.c .map = { map 142 drivers/clk/berlin/bg2q.c .map = { map 157 drivers/clk/berlin/bg2q.c .map = { map 172 drivers/clk/berlin/bg2q.c .map = { map 187 drivers/clk/berlin/bg2q.c .map = { map 202 drivers/clk/berlin/bg2q.c .map = { map 217 drivers/clk/berlin/bg2q.c .map = { map 232 drivers/clk/berlin/bg2q.c .map = { map 247 drivers/clk/berlin/bg2q.c .map = { map 257 drivers/clk/berlin/bg2q.c .map = { map 341 drivers/clk/berlin/bg2q.c hws[CLKID_SYS + n] = berlin2_div_register(&dd->map, gbase, map 198 drivers/clk/clk-aspeed.c regmap_read(gate->map, ASPEED_RESET_CTRL, ®); map 203 drivers/clk/clk-aspeed.c regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, ®); map 225 drivers/clk/clk-aspeed.c regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); map 233 drivers/clk/clk-aspeed.c regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval); map 240 drivers/clk/clk-aspeed.c regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, 0); map 258 drivers/clk/clk-aspeed.c regmap_update_bits(gate->map, ASPEED_CLK_STOP_CTRL, clk, enval); map 300 drivers/clk/clk-aspeed.c return regmap_update_bits(ar->map, reg, BIT(bit), 0); map 315 drivers/clk/clk-aspeed.c return regmap_update_bits(ar->map, reg, BIT(bit), BIT(bit)); map 331 drivers/clk/clk-aspeed.c ret = regmap_read(ar->map, reg, &val); map 346 drivers/clk/clk-aspeed.c struct regmap *map, u8 clock_idx, u8 reset_idx, map 364 drivers/clk/clk-aspeed.c gate->map = map; map 386 drivers/clk/clk-aspeed.c struct regmap *map; map 391 drivers/clk/clk-aspeed.c map = syscon_node_to_regmap(dev->of_node); map 392 drivers/clk/clk-aspeed.c if (IS_ERR(map)) { map 394 drivers/clk/clk-aspeed.c return PTR_ERR(map); map 401 drivers/clk/clk-aspeed.c ar->map = map; map 421 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_MISC_CTRL, &val); map 436 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_MPLL_PARAM, &val); map 529 drivers/clk/clk-aspeed.c map, map 558 drivers/clk/clk-aspeed.c static void __init aspeed_ast2400_cc(struct regmap *map) map 572 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_STRAP, &val); map 592 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_HPLL_PARAM, &val); map 608 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_STRAP, &val); map 626 drivers/clk/clk-aspeed.c static void __init aspeed_ast2500_cc(struct regmap *map) map 632 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_STRAP, &val); map 644 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_HPLL_PARAM, &val); map 648 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_STRAP, &val); map 656 drivers/clk/clk-aspeed.c regmap_read(map, ASPEED_CLK_SELECTION, &val); map 665 drivers/clk/clk-aspeed.c struct regmap *map; map 687 drivers/clk/clk-aspeed.c map = syscon_node_to_regmap(np); map 688 drivers/clk/clk-aspeed.c if (IS_ERR(map)) { map 698 drivers/clk/clk-aspeed.c ret = regmap_read(map, ASPEED_STRAP, &val); map 705 drivers/clk/clk-aspeed.c aspeed_ast2400_cc(map); map 707 drivers/clk/clk-aspeed.c aspeed_ast2500_cc(map); map 49 drivers/clk/clk-aspeed.h struct regmap *map; map 64 drivers/clk/clk-aspeed.h struct regmap *map; map 233 drivers/clk/clk-ast2600.c regmap_read(gate->map, get_reset_reg(gate), ®); map 239 drivers/clk/clk-ast2600.c regmap_read(gate->map, get_clock_reg(gate), ®); map 262 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_reset_reg(gate), rst); map 270 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_clock_reg(gate) + 0x04, clk); map 273 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_clock_reg(gate), clk); map 280 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_reset_reg(gate) + 0x4, rst); map 297 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_clock_reg(gate), clk); map 300 drivers/clk/clk-ast2600.c regmap_write(gate->map, get_clock_reg(gate) + 0x4, clk); map 320 drivers/clk/clk-ast2600.c return regmap_write(ar->map, reg + 0x04, rst); map 330 drivers/clk/clk-ast2600.c return regmap_write(ar->map, reg, rst); map 342 drivers/clk/clk-ast2600.c ret = regmap_read(ar->map, reg, &val); map 357 drivers/clk/clk-ast2600.c struct regmap *map, u8 clock_idx, u8 reset_idx, map 375 drivers/clk/clk-ast2600.c gate->map = map; map 411 drivers/clk/clk-ast2600.c struct regmap *map; map 416 drivers/clk/clk-ast2600.c map = syscon_node_to_regmap(dev->of_node); map 417 drivers/clk/clk-ast2600.c if (IS_ERR(map)) { map 419 drivers/clk/clk-ast2600.c return PTR_ERR(map); map 426 drivers/clk/clk-ast2600.c ar->map = map; map 440 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_G6_MISC_CTRL, &val); map 451 drivers/clk/clk-ast2600.c regmap_read(map, 0x80, &val); map 517 drivers/clk/clk-ast2600.c regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10)); map 528 drivers/clk/clk-ast2600.c regmap_write(map, 0x308, 0x12000); /* 3x3 = 9 */ map 570 drivers/clk/clk-ast2600.c map, map 606 drivers/clk/clk-ast2600.c static void __init aspeed_g6_cc(struct regmap *map) map 617 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_HPLL_PARAM, &val); map 620 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_MPLL_PARAM, &val); map 623 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_DPLL_PARAM, &val); map 626 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_EPLL_PARAM, &val); map 629 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_APLL_PARAM, &val); map 633 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_G6_STRAP1, &val); map 639 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_G6_SILICON_REV, &chip_id); map 648 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_G6_CLK_SELECTION1, &val); map 654 drivers/clk/clk-ast2600.c regmap_read(map, ASPEED_G6_CLK_SELECTION4, &val); map 667 drivers/clk/clk-ast2600.c struct regmap *map; map 693 drivers/clk/clk-ast2600.c map = syscon_node_to_regmap(np); map 694 drivers/clk/clk-ast2600.c if (IS_ERR(map)) { map 699 drivers/clk/clk-ast2600.c aspeed_g6_cc(map); map 74 drivers/clk/clk-gemini.c struct regmap *map; map 84 drivers/clk/clk-gemini.c struct regmap *map; map 125 drivers/clk/clk-gemini.c regmap_read(pciclk->map, GEMINI_GLOBAL_MISC_CONTROL, &val); map 146 drivers/clk/clk-gemini.c return regmap_update_bits(pciclk->map, map 150 drivers/clk/clk-gemini.c return regmap_update_bits(pciclk->map, map 160 drivers/clk/clk-gemini.c regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, map 169 drivers/clk/clk-gemini.c regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, map 178 drivers/clk/clk-gemini.c regmap_read(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL, &val); map 193 drivers/clk/clk-gemini.c struct regmap *map) map 208 drivers/clk/clk-gemini.c pciclk->map = map; map 229 drivers/clk/clk-gemini.c return regmap_write(gr->map, map 253 drivers/clk/clk-gemini.c ret = regmap_read(gr->map, GEMINI_GLOBAL_SOFT_RESET, &val); map 274 drivers/clk/clk-gemini.c struct regmap *map; map 294 drivers/clk/clk-gemini.c map = syscon_node_to_regmap(np); map 295 drivers/clk/clk-gemini.c if (IS_ERR(map)) { map 297 drivers/clk/clk-gemini.c return PTR_ERR(map); map 300 drivers/clk/clk-gemini.c gr->map = map; map 317 drivers/clk/clk-gemini.c regmap_read(map, GEMINI_GLOBAL_STATUS, &val); map 326 drivers/clk/clk-gemini.c regmap_read(map, GEMINI_GLOBAL_CLOCK_CONTROL, &val); map 367 drivers/clk/clk-gemini.c hw = gemini_pci_clk_setup("PCI", "xtal", map); map 394 drivers/clk/clk-gemini.c struct regmap *map; map 415 drivers/clk/clk-gemini.c map = syscon_node_to_regmap(np); map 416 drivers/clk/clk-gemini.c if (IS_ERR(map)) { map 426 drivers/clk/clk-gemini.c ret = regmap_read(map, GEMINI_GLOBAL_STATUS, &val); map 51 drivers/clk/ingenic/tcu.c struct regmap *map; map 70 drivers/clk/ingenic/tcu.c regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); map 81 drivers/clk/ingenic/tcu.c regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); map 90 drivers/clk/ingenic/tcu.c regmap_read(tcu_clk->tcu->map, TCU_REG_TSR, &value); map 110 drivers/clk/ingenic/tcu.c regmap_write(tcu->map, TCU_REG_TSCR, BIT(info->gate_bit)); map 123 drivers/clk/ingenic/tcu.c regmap_write(tcu->map, TCU_REG_TSSR, BIT(info->gate_bit)); map 133 drivers/clk/ingenic/tcu.c ret = regmap_read(tcu_clk->tcu->map, info->tcsr_reg, &val); map 148 drivers/clk/ingenic/tcu.c ret = regmap_update_bits(tcu_clk->tcu->map, info->tcsr_reg, map 166 drivers/clk/ingenic/tcu.c ret = regmap_read(tcu_clk->tcu->map, info->tcsr_reg, &prescale); map 210 drivers/clk/ingenic/tcu.c ret = regmap_update_bits(tcu_clk->tcu->map, info->tcsr_reg, map 288 drivers/clk/ingenic/tcu.c regmap_update_bits(tcu->map, info->tcsr_reg, 0xffff, BIT(parent)); map 331 drivers/clk/ingenic/tcu.c struct regmap *map; map 335 drivers/clk/ingenic/tcu.c map = device_node_to_regmap(np); map 336 drivers/clk/ingenic/tcu.c if (IS_ERR(map)) map 337 drivers/clk/ingenic/tcu.c return PTR_ERR(map); map 343 drivers/clk/ingenic/tcu.c tcu->map = map; map 924 drivers/clk/meson/axg-audio.c struct regmap *map; map 933 drivers/clk/meson/axg-audio.c unsigned int stride = regmap_get_reg_stride(rst->map); map 949 drivers/clk/meson/axg-audio.c regmap_update_bits(rst->map, offset, BIT(bit), map 964 drivers/clk/meson/axg-audio.c regmap_read(rst->map, offset, &val); map 1018 drivers/clk/meson/axg-audio.c struct regmap *map; map 1033 drivers/clk/meson/axg-audio.c map = devm_regmap_init_mmio(dev, regs, &axg_audio_regmap_cfg); map 1034 drivers/clk/meson/axg-audio.c if (IS_ERR(map)) { map 1035 drivers/clk/meson/axg-audio.c dev_err(dev, "failed to init regmap: %ld\n", PTR_ERR(map)); map 1036 drivers/clk/meson/axg-audio.c return PTR_ERR(map); map 1052 drivers/clk/meson/axg-audio.c aud_clk_regmaps[i]->map = map; map 1085 drivers/clk/meson/axg-audio.c rst->map = map; map 26 drivers/clk/meson/clk-cpu-dyndiv.c meson_parm_read(clk->map, &data->div), map 55 drivers/clk/meson/clk-cpu-dyndiv.c meson_parm_write(clk->map, &data->dyn, 1); map 58 drivers/clk/meson/clk-cpu-dyndiv.c return regmap_update_bits(clk->map, data->div.reg_off, map 54 drivers/clk/meson/clk-dualdiv.c setting.dual = meson_parm_read(clk->map, &dualdiv->dual); map 55 drivers/clk/meson/clk-dualdiv.c setting.n1 = meson_parm_read(clk->map, &dualdiv->n1) + 1; map 56 drivers/clk/meson/clk-dualdiv.c setting.m1 = meson_parm_read(clk->map, &dualdiv->m1) + 1; map 57 drivers/clk/meson/clk-dualdiv.c setting.n2 = meson_parm_read(clk->map, &dualdiv->n2) + 1; map 58 drivers/clk/meson/clk-dualdiv.c setting.m2 = meson_parm_read(clk->map, &dualdiv->m2) + 1; map 114 drivers/clk/meson/clk-dualdiv.c meson_parm_write(clk->map, &dualdiv->dual, setting->dual); map 115 drivers/clk/meson/clk-dualdiv.c meson_parm_write(clk->map, &dualdiv->n1, setting->n1 - 1); map 116 drivers/clk/meson/clk-dualdiv.c meson_parm_write(clk->map, &dualdiv->m1, setting->m1 - 1); map 117 drivers/clk/meson/clk-dualdiv.c meson_parm_write(clk->map, &dualdiv->n2, setting->n2 - 1); map 118 drivers/clk/meson/clk-dualdiv.c meson_parm_write(clk->map, &dualdiv->m2, setting->m2 - 1); map 83 drivers/clk/meson/clk-mpll.c sdm = meson_parm_read(clk->map, &mpll->sdm); map 84 drivers/clk/meson/clk-mpll.c n2 = meson_parm_read(clk->map, &mpll->n2); map 119 drivers/clk/meson/clk-mpll.c meson_parm_write(clk->map, &mpll->sdm, sdm); map 122 drivers/clk/meson/clk-mpll.c meson_parm_write(clk->map, &mpll->n2, n2); map 138 drivers/clk/meson/clk-mpll.c regmap_multi_reg_write(clk->map, mpll->init_regs, map 142 drivers/clk/meson/clk-mpll.c meson_parm_write(clk->map, &mpll->sdm_en, 1); map 148 drivers/clk/meson/clk-mpll.c meson_parm_write(clk->map, &mpll->ssen, ss); map 153 drivers/clk/meson/clk-mpll.c meson_parm_write(clk->map, &mpll->misc, 1); map 43 drivers/clk/meson/clk-phase.c val = meson_parm_read(clk->map, &phase->ph); map 55 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &phase->ph, val); map 88 drivers/clk/meson/clk-phase.c val = meson_parm_read(clk->map, &tph->ph0); map 89 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &tph->ph1, val); map 90 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &tph->ph2, val); map 100 drivers/clk/meson/clk-phase.c val = meson_parm_read(clk->map, &tph->ph0); map 112 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &tph->ph0, val); map 113 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &tph->ph1, val); map 114 drivers/clk/meson/clk-phase.c meson_parm_write(clk->map, &tph->ph2, val); map 79 drivers/clk/meson/clk-pll.c n = meson_parm_read(clk->map, &pll->n); map 89 drivers/clk/meson/clk-pll.c m = meson_parm_read(clk->map, &pll->m); map 92 drivers/clk/meson/clk-pll.c meson_parm_read(clk->map, &pll->frac) : map 280 drivers/clk/meson/clk-pll.c if (meson_parm_read(clk->map, &pll->l)) map 295 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->rst, 1); map 296 drivers/clk/meson/clk-pll.c regmap_multi_reg_write(clk->map, pll->init_regs, map 298 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->rst, 0); map 307 drivers/clk/meson/clk-pll.c if (meson_parm_read(clk->map, &pll->rst) || map 308 drivers/clk/meson/clk-pll.c !meson_parm_read(clk->map, &pll->en) || map 309 drivers/clk/meson/clk-pll.c !meson_parm_read(clk->map, &pll->l)) map 335 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->rst, 1); map 338 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->en, 1); map 341 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->rst, 0); map 355 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->rst, 1); map 358 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->en, 0); map 378 drivers/clk/meson/clk-pll.c enabled = meson_parm_read(clk->map, &pll->en); map 382 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->n, n); map 383 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->m, m); map 387 drivers/clk/meson/clk-pll.c meson_parm_write(clk->map, &pll->frac, frac); map 18 drivers/clk/meson/clk-regmap.c return regmap_update_bits(clk->map, gate->offset, BIT(gate->bit_idx), map 38 drivers/clk/meson/clk-regmap.c regmap_read(clk->map, gate->offset, &val); map 67 drivers/clk/meson/clk-regmap.c ret = regmap_read(clk->map, div->offset, &val); map 88 drivers/clk/meson/clk-regmap.c ret = regmap_read(clk->map, div->offset, &val); map 118 drivers/clk/meson/clk-regmap.c return regmap_update_bits(clk->map, div->offset, map 144 drivers/clk/meson/clk-regmap.c ret = regmap_read(clk->map, mux->offset, &val); map 159 drivers/clk/meson/clk-regmap.c return regmap_update_bits(clk->map, mux->offset, map 25 drivers/clk/meson/clk-regmap.h struct regmap *map; map 70 drivers/clk/meson/meson-aoclk.c data->clks[clkid]->map = regmap; map 20 drivers/clk/meson/meson-eeclk.c struct regmap *map; map 28 drivers/clk/meson/meson-eeclk.c map = syscon_node_to_regmap(of_get_parent(dev->of_node)); map 29 drivers/clk/meson/meson-eeclk.c if (IS_ERR(map)) { map 32 drivers/clk/meson/meson-eeclk.c return PTR_ERR(map); map 36 drivers/clk/meson/meson-eeclk.c regmap_multi_reg_write(map, data->init_regs, data->init_count); map 40 drivers/clk/meson/meson-eeclk.c data->regmap_clks[i]->map = map; map 3648 drivers/clk/meson/meson8b.c struct regmap *map; map 3651 drivers/clk/meson/meson8b.c map = syscon_node_to_regmap(of_get_parent(np)); map 3652 drivers/clk/meson/meson8b.c if (IS_ERR(map)) { map 3662 drivers/clk/meson/meson8b.c map = regmap_init_mmio(NULL, clk_base, &clkc_regmap_config); map 3663 drivers/clk/meson/meson8b.c if (IS_ERR(map)) map 3672 drivers/clk/meson/meson8b.c rstc->regmap = map; map 3685 drivers/clk/meson/meson8b.c meson8b_clk_regmaps[i]->map = map; map 30 drivers/clk/meson/parm.h static inline unsigned int meson_parm_read(struct regmap *map, struct parm *p) map 34 drivers/clk/meson/parm.h regmap_read(map, p->reg_off, &val); map 38 drivers/clk/meson/parm.h static inline void meson_parm_write(struct regmap *map, struct parm *p, map 41 drivers/clk/meson/parm.h regmap_update_bits(map, p->reg_off, SETPMASK(p->width, p->shift), map 121 drivers/clk/meson/sclk-div.c meson_parm_write(clk->map, &sclk->hi, hi); map 151 drivers/clk/meson/sclk-div.c hi = meson_parm_read(clk->map, &sclk->hi); map 163 drivers/clk/meson/sclk-div.c meson_parm_write(clk->map, &sclk->div, sclk->cached_div - 1); map 205 drivers/clk/meson/sclk-div.c meson_parm_write(clk->map, &sclk->div, 0); map 213 drivers/clk/meson/sclk-div.c if (meson_parm_read(clk->map, &sclk->div)) map 225 drivers/clk/meson/sclk-div.c val = meson_parm_read(clk->map, &sclk->div); map 82 drivers/clk/meson/vid-pll-div.c div = _get_table_val(meson_parm_read(clk->map, &pll_div->val), map 83 drivers/clk/meson/vid-pll-div.c meson_parm_read(clk->map, &pll_div->sel)); map 661 drivers/clk/mvebu/armada-37xx-periph.c struct regmap *map; map 672 drivers/clk/mvebu/armada-37xx-periph.c map = syscon_regmap_lookup_by_compatible( map 674 drivers/clk/mvebu/armada-37xx-periph.c pmcpu_clk->nb_pm_base = map; map 60 drivers/clk/qcom/common.c int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, u8 src) map 65 drivers/clk/qcom/common.c if (src == map[i].src) map 89 drivers/clk/qcom/common.c qcom_pll_set_fsm_mode(struct regmap *map, u32 reg, u8 bias_count, u8 lock_count) map 95 drivers/clk/qcom/common.c regmap_update_bits(map, reg, PLL_VOTE_FSM_RESET, 0); map 102 drivers/clk/qcom/common.c regmap_update_bits(map, reg, mask, val); map 105 drivers/clk/qcom/common.c regmap_update_bits(map, reg, PLL_VOTE_FSM_ENA, PLL_VOTE_FSM_ENA); map 50 drivers/clk/qcom/common.h extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map, map 26 drivers/clk/qcom/reset.c const struct qcom_reset_map *map; map 30 drivers/clk/qcom/reset.c map = &rst->reset_map[id]; map 31 drivers/clk/qcom/reset.c mask = BIT(map->bit); map 33 drivers/clk/qcom/reset.c return regmap_update_bits(rst->regmap, map->reg, mask, mask); map 40 drivers/clk/qcom/reset.c const struct qcom_reset_map *map; map 44 drivers/clk/qcom/reset.c map = &rst->reset_map[id]; map 45 drivers/clk/qcom/reset.c mask = BIT(map->bit); map 47 drivers/clk/qcom/reset.c return regmap_update_bits(rst->regmap, map->reg, mask, 0); map 17 drivers/clk/sunxi-ng/ccu_reset.c const struct ccu_reset_map *map = &ccu->reset_map[id]; map 23 drivers/clk/sunxi-ng/ccu_reset.c reg = readl(ccu->base + map->reg); map 24 drivers/clk/sunxi-ng/ccu_reset.c writel(reg & ~map->bit, ccu->base + map->reg); map 35 drivers/clk/sunxi-ng/ccu_reset.c const struct ccu_reset_map *map = &ccu->reset_map[id]; map 41 drivers/clk/sunxi-ng/ccu_reset.c reg = readl(ccu->base + map->reg); map 42 drivers/clk/sunxi-ng/ccu_reset.c writel(reg | map->bit, ccu->base + map->reg); map 63 drivers/clk/sunxi-ng/ccu_reset.c const struct ccu_reset_map *map = &ccu->reset_map[id]; map 69 drivers/clk/sunxi-ng/ccu_reset.c return !(map->bit & readl(ccu->base + map->reg)); map 59 drivers/clk/versatile/clk-icst.c struct regmap *map; map 79 drivers/clk/versatile/clk-icst.c ret = regmap_read(icst->map, icst->vcoreg_off, &val); map 217 drivers/clk/versatile/clk-icst.c ret = regmap_write(icst->map, icst->lockreg_off, VERSATILE_LOCK_VAL); map 220 drivers/clk/versatile/clk-icst.c ret = regmap_update_bits(icst->map, icst->vcoreg_off, mask, val); map 224 drivers/clk/versatile/clk-icst.c ret = regmap_write(icst->map, icst->lockreg_off, 0); map 318 drivers/clk/versatile/clk-icst.c ret = regmap_write(icst->map, icst->lockreg_off, map 322 drivers/clk/versatile/clk-icst.c ret = regmap_update_bits(icst->map, icst->vcoreg_off, map 328 drivers/clk/versatile/clk-icst.c ret = regmap_write(icst->map, icst->lockreg_off, 0); map 351 drivers/clk/versatile/clk-icst.c struct regmap *map, map 374 drivers/clk/versatile/clk-icst.c icst->map = map; map 401 drivers/clk/versatile/clk-icst.c struct regmap *map; map 403 drivers/clk/versatile/clk-icst.c map = regmap_init_mmio(dev, base, &icst_regmap_conf); map 404 drivers/clk/versatile/clk-icst.c if (IS_ERR(map)) { map 406 drivers/clk/versatile/clk-icst.c return ERR_CAST(map); map 408 drivers/clk/versatile/clk-icst.c return icst_clk_setup(dev, desc, name, parent_name, map, map 495 drivers/clk/versatile/clk-icst.c struct regmap *map; map 508 drivers/clk/versatile/clk-icst.c map = syscon_node_to_regmap(parent); map 509 drivers/clk/versatile/clk-icst.c if (IS_ERR(map)) { map 552 drivers/clk/versatile/clk-icst.c regclk = icst_clk_setup(NULL, &icst_desc, name, parent_name, map, ctype); map 29 drivers/clocksource/ingenic-timer.c struct regmap *map; map 45 drivers/clocksource/ingenic-timer.c regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count); map 64 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TECR, BIT(tcu->timer_channel)); map 77 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TDFRc(tcu->timer_channel), next); map 78 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TCNTc(tcu->timer_channel), 0); map 79 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TESR, BIT(tcu->timer_channel)); map 89 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TECR, BIT(tcu->timer_channel)); map 192 drivers/clocksource/ingenic-timer.c regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel), map 196 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff); map 197 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0); map 200 drivers/clocksource/ingenic-timer.c regmap_write(tcu->map, TCU_REG_TESR, BIT(channel)); map 241 drivers/clocksource/ingenic-timer.c struct regmap *map; map 247 drivers/clocksource/ingenic-timer.c map = device_node_to_regmap(np); map 248 drivers/clocksource/ingenic-timer.c if (IS_ERR(map)) map 249 drivers/clocksource/ingenic-timer.c return PTR_ERR(map); map 268 drivers/clocksource/ingenic-timer.c tcu->map = map; map 608 drivers/crypto/axis/artpec6_crypto.c struct artpec6_crypto_dma_map *map; map 620 drivers/crypto/axis/artpec6_crypto.c map = &dma->maps[dma->map_count++]; map 621 drivers/crypto/axis/artpec6_crypto.c map->size = size; map 622 drivers/crypto/axis/artpec6_crypto.c map->dma_addr = dma_addr; map 623 drivers/crypto/axis/artpec6_crypto.c map->dir = dir; map 683 drivers/crypto/axis/artpec6_crypto.c struct artpec6_crypto_dma_map *map = &dma->maps[i]; map 685 drivers/crypto/axis/artpec6_crypto.c dma_unmap_page(dev, map->dma_addr, map->size, map->dir); map 114 drivers/crypto/qat/qat_common/adf_dev_mgr.c struct vf_id_map *map; map 119 drivers/crypto/qat/qat_common/adf_dev_mgr.c map = list_entry(ptr, struct vf_id_map, list); map 120 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (map->bdf != -1) { map 121 drivers/crypto/qat/qat_common/adf_dev_mgr.c id_map[map->id] = 0; map 125 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (vf && map->bdf == -1) map 129 drivers/crypto/qat/qat_common/adf_dev_mgr.c kfree(map); map 200 drivers/crypto/qat/qat_common/adf_dev_mgr.c struct vf_id_map *map; map 219 drivers/crypto/qat/qat_common/adf_dev_mgr.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 220 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (!map) { map 224 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->bdf = ~0; map 225 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->id = accel_dev->accel_id; map 226 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->fake_id = map->id; map 227 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->attached = true; map 228 drivers/crypto/qat/qat_common/adf_dev_mgr.c list_add_tail(&map->list, &vfs_table); map 231 drivers/crypto/qat/qat_common/adf_dev_mgr.c struct vf_id_map *map; map 233 drivers/crypto/qat/qat_common/adf_dev_mgr.c map = adf_find_vf(adf_get_vf_num(accel_dev)); map 234 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (map) { map 237 drivers/crypto/qat/qat_common/adf_dev_mgr.c accel_dev->accel_id = map->id; map 239 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->fake_id++; map 240 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->attached = true; map 241 drivers/crypto/qat/qat_common/adf_dev_mgr.c next = list_next_entry(map, list); map 251 drivers/crypto/qat/qat_common/adf_dev_mgr.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 252 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (!map) { map 258 drivers/crypto/qat/qat_common/adf_dev_mgr.c kfree(map); map 264 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->bdf = adf_get_vf_num(accel_dev); map 265 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->id = accel_dev->accel_id; map 266 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->fake_id = map->id; map 267 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->attached = true; map 268 drivers/crypto/qat/qat_common/adf_dev_mgr.c list_add_tail(&map->list, &vfs_table); map 299 drivers/crypto/qat/qat_common/adf_dev_mgr.c struct vf_id_map *map, *next; map 301 drivers/crypto/qat/qat_common/adf_dev_mgr.c map = adf_find_vf(adf_get_vf_num(accel_dev)); map 302 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (!map) { map 306 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->fake_id--; map 307 drivers/crypto/qat/qat_common/adf_dev_mgr.c map->attached = false; map 308 drivers/crypto/qat/qat_common/adf_dev_mgr.c next = list_next_entry(map, list); map 1001 drivers/dma-buf/dma-buf.c if (!dmabuf->ops->map) map 1003 drivers/dma-buf/dma-buf.c return dmabuf->ops->map(dmabuf, page_num); map 114 drivers/dma-buf/udmabuf.c .map = kmap_udmabuf, map 2822 drivers/dma/amba-pl08x.c pl08x->slave.filter.map = pl08x->pd->slave_map; map 677 drivers/dma/dmaengine.c const struct dma_slave_map *map = &device->filter.map[i]; map 679 drivers/dma/dmaengine.c if (!strcmp(map->devname, dev_name(dev)) && map 680 drivers/dma/dmaengine.c !strcmp(map->slave, name)) map 681 drivers/dma/dmaengine.c return map; map 717 drivers/dma/dmaengine.c const struct dma_slave_map *map = dma_filter_match(d, name, dev); map 719 drivers/dma/dmaengine.c if (!map) map 725 drivers/dma/dmaengine.c chan = find_candidate(d, &mask, d->filter.fn, map->param); map 95 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); map 102 drivers/dma/ipu/ipu_irq.c bank = map->bank; map 110 drivers/dma/ipu/ipu_irq.c reg |= (1UL << (map->source & 31)); map 118 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); map 125 drivers/dma/ipu/ipu_irq.c bank = map->bank; map 133 drivers/dma/ipu/ipu_irq.c reg &= ~(1UL << (map->source & 31)); map 141 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map = irq_data_get_irq_chip_data(d); map 147 drivers/dma/ipu/ipu_irq.c bank = map->bank; map 154 drivers/dma/ipu/ipu_irq.c ipu_write_reg(bank->ipu, 1UL << (map->source & 31), bank->status); map 166 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map = irq_get_chip_data(irq); map 172 drivers/dma/ipu/ipu_irq.c bank = map->bank; map 174 drivers/dma/ipu/ipu_irq.c (1UL << (map->source & 31)); map 196 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map; map 201 drivers/dma/ipu/ipu_irq.c map = src2map(source); map 202 drivers/dma/ipu/ipu_irq.c if (map) { map 203 drivers/dma/ipu/ipu_irq.c pr_err("IPU: Source %u already mapped to IRQ %u\n", source, map->irq); map 285 drivers/dma/ipu/ipu_irq.c struct ipu_irq_map *map; map 292 drivers/dma/ipu/ipu_irq.c map = src2map(32 * i + line); map 293 drivers/dma/ipu/ipu_irq.c if (!map) { map 299 drivers/dma/ipu/ipu_irq.c irq = map->irq; map 260 drivers/dma/mcf-edma.c mcf_edma->dma_dev.filter.map = pdata->slave_map; map 1403 drivers/dma/pxa_dma.c pdev->slave.filter.map = slave_map; map 1302 drivers/dma/s3c24xx-dma.c s3cdma->slave.filter.map = pdata->slave_map; map 923 drivers/dma/sa11x0-dma.c d->slave.filter.map = sa11x0_dma_map; map 167 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_chan_map map; map 1044 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_chan_map *map = &rchan->map; map 1083 drivers/dma/sh/rcar-dmac.c if (map->slave.xfer_size) { map 1084 drivers/dma/sh/rcar-dmac.c dma_unmap_resource(chan->device->dev, map->addr, map 1085 drivers/dma/sh/rcar-dmac.c map->slave.xfer_size, map->dir, 0); map 1086 drivers/dma/sh/rcar-dmac.c map->slave.xfer_size = 0; map 1116 drivers/dma/sh/rcar-dmac.c struct rcar_dmac_chan_map *map = &rchan->map; map 1132 drivers/dma/sh/rcar-dmac.c if (dev_addr == map->slave.slave_addr && map 1133 drivers/dma/sh/rcar-dmac.c dev_size == map->slave.xfer_size && map 1134 drivers/dma/sh/rcar-dmac.c dev_dir == map->dir) map 1138 drivers/dma/sh/rcar-dmac.c if (map->slave.xfer_size) map 1139 drivers/dma/sh/rcar-dmac.c dma_unmap_resource(chan->device->dev, map->addr, map 1140 drivers/dma/sh/rcar-dmac.c map->slave.xfer_size, map->dir, 0); map 1141 drivers/dma/sh/rcar-dmac.c map->slave.xfer_size = 0; map 1144 drivers/dma/sh/rcar-dmac.c map->addr = dma_map_resource(chan->device->dev, dev_addr, dev_size, map 1147 drivers/dma/sh/rcar-dmac.c if (dma_mapping_error(chan->device->dev, map->addr)) { map 1155 drivers/dma/sh/rcar-dmac.c rchan->index, dev_size, &dev_addr, &map->addr, map 1158 drivers/dma/sh/rcar-dmac.c map->slave.slave_addr = dev_addr; map 1159 drivers/dma/sh/rcar-dmac.c map->slave.xfer_size = dev_size; map 1160 drivers/dma/sh/rcar-dmac.c map->dir = dev_dir; map 1183 drivers/dma/sh/rcar-dmac.c return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, map 1238 drivers/dma/sh/rcar-dmac.c desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, rchan->map.addr, map 67 drivers/dma/ti/dma-crossbar.c struct ti_am335x_xbar_map *map = route_data; map 70 drivers/dma/ti/dma-crossbar.c map->mux_val, map->dma_line); map 72 drivers/dma/ti/dma-crossbar.c ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); map 73 drivers/dma/ti/dma-crossbar.c kfree(map); map 81 drivers/dma/ti/dma-crossbar.c struct ti_am335x_xbar_map *map; map 105 drivers/dma/ti/dma-crossbar.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 106 drivers/dma/ti/dma-crossbar.c if (!map) { map 111 drivers/dma/ti/dma-crossbar.c map->dma_line = (u16)dma_spec->args[0]; map 112 drivers/dma/ti/dma-crossbar.c map->mux_val = (u8)dma_spec->args[2]; map 118 drivers/dma/ti/dma-crossbar.c map->mux_val, map->dma_line); map 120 drivers/dma/ti/dma-crossbar.c ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); map 122 drivers/dma/ti/dma-crossbar.c return map; map 228 drivers/dma/ti/dma-crossbar.c struct ti_dra7_xbar_map *map = route_data; map 231 drivers/dma/ti/dma-crossbar.c map->xbar_in, map->xbar_out); map 233 drivers/dma/ti/dma-crossbar.c ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); map 235 drivers/dma/ti/dma-crossbar.c clear_bit(map->xbar_out, xbar->dma_inuse); map 237 drivers/dma/ti/dma-crossbar.c kfree(map); map 245 drivers/dma/ti/dma-crossbar.c struct ti_dra7_xbar_map *map; map 260 drivers/dma/ti/dma-crossbar.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 261 drivers/dma/ti/dma-crossbar.c if (!map) { map 267 drivers/dma/ti/dma-crossbar.c map->xbar_out = find_first_zero_bit(xbar->dma_inuse, map 269 drivers/dma/ti/dma-crossbar.c if (map->xbar_out == xbar->dma_requests) { map 272 drivers/dma/ti/dma-crossbar.c kfree(map); map 275 drivers/dma/ti/dma-crossbar.c set_bit(map->xbar_out, xbar->dma_inuse); map 278 drivers/dma/ti/dma-crossbar.c map->xbar_in = (u16)dma_spec->args[0]; map 280 drivers/dma/ti/dma-crossbar.c dma_spec->args[0] = map->xbar_out + xbar->dma_offset; map 283 drivers/dma/ti/dma-crossbar.c map->xbar_in, map->xbar_out); map 285 drivers/dma/ti/dma-crossbar.c ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); map 287 drivers/dma/ti/dma-crossbar.c return map; map 2449 drivers/dma/ti/edma.c ecc->dma_slave.filter.map = info->slave_map; map 1569 drivers/dma/ti/omap-dma.c od->ddev.filter.map = od->plat->slave_map; map 2112 drivers/edac/altera_edac.c .map = a10_eccmgr_irqdomain_map, map 217 drivers/edac/e752x_edac.c u8 map[8]; map 347 drivers/edac/e752x_edac.c pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], map 348 drivers/edac/e752x_edac.c pvt->map[4], pvt->map[5], pvt->map[6], map 349 drivers/edac/e752x_edac.c pvt->map[7]); map 353 drivers/edac/e752x_edac.c if (pvt->map[i] == row) map 1153 drivers/edac/e752x_edac.c pvt->map[index] = 0xff; map 1154 drivers/edac/e752x_edac.c pvt->map[index + 1] = 0xff; map 1156 drivers/edac/e752x_edac.c pvt->map[index] = row; map 1167 drivers/edac/e752x_edac.c pvt->map[index + 1] = (value == last) ? 0xff : row; map 494 drivers/firewire/core-topology.c __be32 *map = card->topology_map; map 496 drivers/firewire/core-topology.c *map++ = cpu_to_be32((self_id_count + 2) << 16); map 497 drivers/firewire/core-topology.c *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1); map 498 drivers/firewire/core-topology.c *map++ = cpu_to_be32((node_count << 16) | self_id_count); map 501 drivers/firewire/core-topology.c *map++ = cpu_to_be32p(self_ids++); map 75 drivers/firmware/efi/libstub/arm32-stub.c struct efi_boot_memmap map = { map 76 drivers/firmware/efi/libstub/arm32-stub.c .map = &memory_map, map 122 drivers/firmware/efi/libstub/arm32-stub.c status = efi_get_memory_map(sys_table_arg, &map); map 83 drivers/firmware/efi/libstub/efi-stub-helper.c struct efi_boot_memmap *map) map 90 drivers/firmware/efi/libstub/efi-stub-helper.c *map->desc_size = sizeof(*m); map 91 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map_size = *map->desc_size * 32; map 92 drivers/firmware/efi/libstub/efi-stub-helper.c *map->buff_size = *map->map_size; map 95 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map_size, (void **)&m); map 99 drivers/firmware/efi/libstub/efi-stub-helper.c *map->desc_size = 0; map 101 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_early(get_memory_map, map->map_size, m, map 102 drivers/firmware/efi/libstub/efi-stub-helper.c &key, map->desc_size, &desc_version); map 104 drivers/firmware/efi/libstub/efi-stub-helper.c !mmap_has_headroom(*map->buff_size, *map->map_size, map 105 drivers/firmware/efi/libstub/efi-stub-helper.c *map->desc_size)) { map 114 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map_size += *map->desc_size * EFI_MMAP_NR_SLACK_SLOTS; map 115 drivers/firmware/efi/libstub/efi-stub-helper.c *map->buff_size = *map->map_size; map 122 drivers/firmware/efi/libstub/efi-stub-helper.c if (map->key_ptr && status == EFI_SUCCESS) map 123 drivers/firmware/efi/libstub/efi-stub-helper.c *map->key_ptr = key; map 124 drivers/firmware/efi/libstub/efi-stub-helper.c if (map->desc_ver && status == EFI_SUCCESS) map 125 drivers/firmware/efi/libstub/efi-stub-helper.c *map->desc_ver = desc_version; map 128 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map = m; map 138 drivers/firmware/efi/libstub/efi-stub-helper.c struct efi_memory_map map; map 142 drivers/firmware/efi/libstub/efi-stub-helper.c boot_map.map = (efi_memory_desc_t **)&map.map; map 144 drivers/firmware/efi/libstub/efi-stub-helper.c boot_map.desc_size = &map.desc_size; map 153 drivers/firmware/efi/libstub/efi-stub-helper.c map.map_end = map.map + map_size; map 155 drivers/firmware/efi/libstub/efi-stub-helper.c for_each_efi_memory_desc_in_map(&map, md) { map 162 drivers/firmware/efi/libstub/efi-stub-helper.c efi_call_early(free_pool, map.map); map 175 drivers/firmware/efi/libstub/efi-stub-helper.c efi_memory_desc_t *map; map 182 drivers/firmware/efi/libstub/efi-stub-helper.c boot_map.map = ↦ map 207 drivers/firmware/efi/libstub/efi-stub-helper.c unsigned long m = (unsigned long)map; map 257 drivers/firmware/efi/libstub/efi-stub-helper.c efi_call_early(free_pool, map); map 270 drivers/firmware/efi/libstub/efi-stub-helper.c efi_memory_desc_t *map; map 276 drivers/firmware/efi/libstub/efi-stub-helper.c boot_map.map = ↦ map 300 drivers/firmware/efi/libstub/efi-stub-helper.c unsigned long m = (unsigned long)map; map 333 drivers/firmware/efi/libstub/efi-stub-helper.c efi_call_early(free_pool, map); map 863 drivers/firmware/efi/libstub/efi-stub-helper.c struct efi_boot_memmap *map, map 869 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_get_memory_map(sys_table_arg, map); map 874 drivers/firmware/efi/libstub/efi-stub-helper.c status = priv_func(sys_table_arg, map, priv); map 878 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_early(exit_boot_services, handle, *map->key_ptr); map 894 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map_size = *map->buff_size; map 896 drivers/firmware/efi/libstub/efi-stub-helper.c map->map_size, map 897 drivers/firmware/efi/libstub/efi-stub-helper.c *map->map, map 898 drivers/firmware/efi/libstub/efi-stub-helper.c map->key_ptr, map 899 drivers/firmware/efi/libstub/efi-stub-helper.c map->desc_size, map 900 drivers/firmware/efi/libstub/efi-stub-helper.c map->desc_ver); map 906 drivers/firmware/efi/libstub/efi-stub-helper.c status = priv_func(sys_table_arg, map, priv); map 911 drivers/firmware/efi/libstub/efi-stub-helper.c status = efi_call_early(exit_boot_services, handle, *map->key_ptr); map 921 drivers/firmware/efi/libstub/efi-stub-helper.c efi_call_early(free_pool, *map->map); map 166 drivers/firmware/efi/libstub/fdt.c static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map) map 176 drivers/firmware/efi/libstub/fdt.c fdt_val64 = cpu_to_fdt64((unsigned long)*map->map); map 182 drivers/firmware/efi/libstub/fdt.c fdt_val32 = cpu_to_fdt32(*map->map_size); map 188 drivers/firmware/efi/libstub/fdt.c fdt_val32 = cpu_to_fdt32(*map->desc_size); map 194 drivers/firmware/efi/libstub/fdt.c fdt_val32 = cpu_to_fdt32(*map->desc_ver); map 214 drivers/firmware/efi/libstub/fdt.c struct efi_boot_memmap *map, map 223 drivers/firmware/efi/libstub/fdt.c efi_get_virtmap(*map->map, *map->map_size, *map->desc_size, map 226 drivers/firmware/efi/libstub/fdt.c return update_fdt_memmap(p->new_fdt_addr, map); map 262 drivers/firmware/efi/libstub/fdt.c struct efi_boot_memmap map; map 265 drivers/firmware/efi/libstub/fdt.c map.map = &runtime_map; map 266 drivers/firmware/efi/libstub/fdt.c map.map_size = &map_size; map 267 drivers/firmware/efi/libstub/fdt.c map.desc_size = &desc_size; map 268 drivers/firmware/efi/libstub/fdt.c map.desc_ver = &desc_ver; map 269 drivers/firmware/efi/libstub/fdt.c map.key_ptr = &mmap_key; map 270 drivers/firmware/efi/libstub/fdt.c map.buff_size = &buff_size; map 278 drivers/firmware/efi/libstub/fdt.c status = efi_get_memory_map(sys_table, &map); map 286 drivers/firmware/efi/libstub/fdt.c map.map = &memory_map; map 298 drivers/firmware/efi/libstub/fdt.c status = efi_get_memory_map(sys_table, &map); map 316 drivers/firmware/efi/libstub/fdt.c status = efi_exit_boot_services(sys_table, handle, &map, &priv, exit_boot_func); map 79 drivers/firmware/efi/libstub/random.c struct efi_boot_memmap map; map 81 drivers/firmware/efi/libstub/random.c map.map = &memory_map; map 82 drivers/firmware/efi/libstub/random.c map.map_size = &map_size; map 83 drivers/firmware/efi/libstub/random.c map.desc_size = &desc_size; map 84 drivers/firmware/efi/libstub/random.c map.desc_ver = NULL; map 85 drivers/firmware/efi/libstub/random.c map.key_ptr = NULL; map 86 drivers/firmware/efi/libstub/random.c map.buff_size = &buff_size; map 88 drivers/firmware/efi/libstub/random.c status = efi_get_memory_map(sys_table_arg, &map); map 71 drivers/firmware/efi/memmap.c struct efi_memory_map map; map 80 drivers/firmware/efi/memmap.c map.map = memremap(phys_map, data->size, MEMREMAP_WB); map 82 drivers/firmware/efi/memmap.c map.map = early_memremap(phys_map, data->size); map 84 drivers/firmware/efi/memmap.c if (!map.map) { map 89 drivers/firmware/efi/memmap.c map.phys_map = data->phys_map; map 90 drivers/firmware/efi/memmap.c map.nr_map = data->size / data->desc_size; map 91 drivers/firmware/efi/memmap.c map.map_end = map.map + data->size; map 93 drivers/firmware/efi/memmap.c map.desc_version = data->desc_version; map 94 drivers/firmware/efi/memmap.c map.desc_size = data->desc_size; map 95 drivers/firmware/efi/memmap.c map.late = late; map 99 drivers/firmware/efi/memmap.c efi.memmap = map; map 128 drivers/firmware/efi/memmap.c early_memunmap(efi.memmap.map, size); map 130 drivers/firmware/efi/memmap.c memunmap(efi.memmap.map); map 133 drivers/firmware/efi/memmap.c efi.memmap.map = NULL; map 168 drivers/firmware/efi/memmap.c WARN_ON(efi.memmap.map); map 281 drivers/firmware/efi/memmap.c for (old = old_memmap->map, new = buf; map 155 drivers/firmware/efi/runtime-map.c memcpy(buf, efi.memmap.map, sz); map 77 drivers/fpga/altera-cvp.c void __iomem *map; map 135 drivers/fpga/altera-cvp.c writel(val, conf->map); map 646 drivers/fpga/altera-cvp.c conf->map = pci_iomap(pdev, CVP_BAR, 0); map 647 drivers/fpga/altera-cvp.c if (!conf->map) { map 671 drivers/fpga/altera-cvp.c if (conf->map) map 672 drivers/fpga/altera-cvp.c pci_iounmap(pdev, conf->map); map 687 drivers/fpga/altera-cvp.c if (conf->map) map 688 drivers/fpga/altera-cvp.c pci_iounmap(pdev, conf->map); map 661 drivers/fpga/dfl-afu-main.c struct dfl_fpga_port_dma_map map; map 667 drivers/fpga/dfl-afu-main.c if (copy_from_user(&map, arg, minsz)) map 670 drivers/fpga/dfl-afu-main.c if (map.argsz < minsz || map.flags) map 673 drivers/fpga/dfl-afu-main.c ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova); map 677 drivers/fpga/dfl-afu-main.c if (copy_to_user(arg, &map, sizeof(map))) { map 678 drivers/fpga/dfl-afu-main.c afu_dma_unmap_region(pdata, map.iova); map 683 drivers/fpga/dfl-afu-main.c (unsigned long long)map.user_addr, map 684 drivers/fpga/dfl-afu-main.c (unsigned long long)map.length, map 685 drivers/fpga/dfl-afu-main.c (unsigned long long)map.iova); map 545 drivers/gpio/gpio-bcm-kona.c .map = bcm_kona_gpio_irq_map, map 363 drivers/gpio/gpio-brcmstb.c .map = brcmstb_gpio_irq_map, map 457 drivers/gpio/gpio-davinci.c .map = davinci_gpio_irq_map, map 258 drivers/gpio/gpio-em.c .map = em_gio_irq_domain_map, map 322 drivers/gpio/gpio-grgpio.c .map = grgpio_irq_map, map 26 drivers/gpio/gpio-lp87565.c struct regmap *map; map 34 drivers/gpio/gpio-lp87565.c ret = regmap_read(gpio->map, LP87565_REG_GPIO_IN, &val); map 46 drivers/gpio/gpio-lp87565.c regmap_update_bits(gpio->map, LP87565_REG_GPIO_OUT, map 56 drivers/gpio/gpio-lp87565.c ret = regmap_read(gpio->map, LP87565_REG_GPIO_CONFIG, &val); map 68 drivers/gpio/gpio-lp87565.c return regmap_update_bits(gpio->map, map 80 drivers/gpio/gpio-lp87565.c return regmap_update_bits(gpio->map, map 99 drivers/gpio/gpio-lp87565.c ret = regmap_update_bits(gpio->map, map 120 drivers/gpio/gpio-lp87565.c return regmap_update_bits(gpio->map, map 127 drivers/gpio/gpio-lp87565.c return regmap_update_bits(gpio->map, map 164 drivers/gpio/gpio-lp87565.c gpio->map = lp87565->regmap; map 35 drivers/gpio/gpio-max77650.c struct regmap *map; map 45 drivers/gpio/gpio-max77650.c return regmap_update_bits(chip->map, map 61 drivers/gpio/gpio-max77650.c return regmap_update_bits(chip->map, map 73 drivers/gpio/gpio-max77650.c rv = regmap_update_bits(chip->map, MAX77650_REG_CNFG_GPIO, map 86 drivers/gpio/gpio-max77650.c rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val); map 100 drivers/gpio/gpio-max77650.c rv = regmap_read(chip->map, MAX77650_REG_CNFG_GPIO, &val); map 114 drivers/gpio/gpio-max77650.c return regmap_update_bits(chip->map, map 119 drivers/gpio/gpio-max77650.c return regmap_update_bits(chip->map, map 124 drivers/gpio/gpio-max77650.c return regmap_update_bits(chip->map, map 154 drivers/gpio/gpio-max77650.c chip->map = dev_get_regmap(parent, NULL); map 155 drivers/gpio/gpio-max77650.c if (!chip->map) map 281 drivers/gpio/gpio-mpc8xxx.c .map = mpc8xxx_gpio_irq_map, map 98 drivers/gpio/gpio-mt7621.c u32 map = irq_find_mapping(gc->irq.domain, bit); map 100 drivers/gpio/gpio-mt7621.c generic_handle_irq(map); map 138 drivers/gpio/gpio-mvebu.c struct regmap **map, unsigned int *offset) map 146 drivers/gpio/gpio-mvebu.c *map = mvchip->regs; map 151 drivers/gpio/gpio-mvebu.c *map = mvchip->percpu_regs; map 162 drivers/gpio/gpio-mvebu.c struct regmap *map; map 166 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_edge_cause(mvchip, &map, &offset); map 167 drivers/gpio/gpio-mvebu.c regmap_read(map, offset, &val); map 175 drivers/gpio/gpio-mvebu.c struct regmap *map; map 178 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_edge_cause(mvchip, &map, &offset); map 179 drivers/gpio/gpio-mvebu.c regmap_write(map, offset, val); map 184 drivers/gpio/gpio-mvebu.c struct regmap **map, unsigned int *offset) map 191 drivers/gpio/gpio-mvebu.c *map = mvchip->regs; map 196 drivers/gpio/gpio-mvebu.c *map = mvchip->regs; map 201 drivers/gpio/gpio-mvebu.c *map = mvchip->percpu_regs; map 212 drivers/gpio/gpio-mvebu.c struct regmap *map; map 216 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_edge_mask(mvchip, &map, &offset); map 217 drivers/gpio/gpio-mvebu.c regmap_read(map, offset, &val); map 225 drivers/gpio/gpio-mvebu.c struct regmap *map; map 228 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_edge_mask(mvchip, &map, &offset); map 229 drivers/gpio/gpio-mvebu.c regmap_write(map, offset, val); map 234 drivers/gpio/gpio-mvebu.c struct regmap **map, unsigned int *offset) map 241 drivers/gpio/gpio-mvebu.c *map = mvchip->regs; map 246 drivers/gpio/gpio-mvebu.c *map = mvchip->regs; map 251 drivers/gpio/gpio-mvebu.c *map = mvchip->percpu_regs; map 262 drivers/gpio/gpio-mvebu.c struct regmap *map; map 266 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_level_mask(mvchip, &map, &offset); map 267 drivers/gpio/gpio-mvebu.c regmap_read(map, offset, &val); map 275 drivers/gpio/gpio-mvebu.c struct regmap *map; map 278 drivers/gpio/gpio-mvebu.c mvebu_gpioreg_level_mask(mvchip, &map, &offset); map 279 drivers/gpio/gpio-mvebu.c regmap_write(map, offset, val); map 60 drivers/gpio/gpio-pmic-eic-sprd.c struct regmap *map; map 73 drivers/gpio/gpio-pmic-eic-sprd.c regmap_update_bits(pmic_eic->map, pmic_eic->offset + reg, map 84 drivers/gpio/gpio-pmic-eic-sprd.c ret = regmap_read(pmic_eic->map, pmic_eic->offset + reg, &value); map 129 drivers/gpio/gpio-pmic-eic-sprd.c ret = regmap_read(pmic_eic->map, pmic_eic->offset + reg, &value); map 135 drivers/gpio/gpio-pmic-eic-sprd.c return regmap_write(pmic_eic->map, pmic_eic->offset + reg, value); map 271 drivers/gpio/gpio-pmic-eic-sprd.c ret = regmap_read(pmic_eic->map, pmic_eic->offset + SPRD_PMIC_EIC_MIS, map 311 drivers/gpio/gpio-pmic-eic-sprd.c pmic_eic->map = dev_get_regmap(pdev->dev.parent, NULL); map 312 drivers/gpio/gpio-pmic-eic-sprd.c if (!pmic_eic->map) map 578 drivers/gpio/gpio-pxa.c .map = pxa_irq_domain_map, map 215 drivers/gpio/gpio-sa1100.c .map = sa1100_gpio_irqdomain_map, map 400 drivers/gpio/gpio-tegra186.c .map = gpiochip_irq_map, map 501 drivers/gpio/gpio-tegra186.c irq->map = devm_kcalloc(&pdev->dev, gpio->gpio.ngpio, map 502 drivers/gpio/gpio-tegra186.c sizeof(*irq->map), GFP_KERNEL); map 503 drivers/gpio/gpio-tegra186.c if (!irq->map) map 510 drivers/gpio/gpio-tegra186.c irq->map[offset + j] = irq->parents[port->irq]; map 2068 drivers/gpio/gpiolib.c else if (chip->irq.map) map 2069 drivers/gpio/gpiolib.c ret = irq_set_parent(irq, chip->irq.map[hwirq]); map 2097 drivers/gpio/gpiolib.c .map = gpiochip_irq_map, map 1713 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_bo_va_mapping **map) map 1728 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c *map = mapping; map 612 drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c .map = amdgpu_irqdomain_map, map 102 drivers/gpu/drm/amd/display/modules/inc/mod_shared.h enum lut3d_control_gamut_map map; map 495 drivers/gpu/drm/arm/malidp_crtc.c hwdev->hw->map.de_irq_map.vsync_irq); map 505 drivers/gpu/drm/arm/malidp_crtc.c hwdev->hw->map.de_irq_map.vsync_irq); map 53 drivers/gpu/drm/arm/malidp_drv.c hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); map 56 drivers/gpu/drm/arm/malidp_drv.c hwdev->hw->map.coeffs_base + map 109 drivers/gpu/drm/arm/malidp_drv.c hwdev->hw->map.coeffs_base + map 126 drivers/gpu/drm/arm/malidp_drv.c u32 se_control = hwdev->hw->map.se_base + map 127 drivers/gpu/drm/arm/malidp_drv.c ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? map 814 drivers/gpu/drm/arm/malidp_drv.c version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); map 829 drivers/gpu/drm/arm/malidp_drv.c malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); map 290 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 310 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 323 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 343 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.out_depth_base); map 602 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 622 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 635 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); map 655 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.out_depth_base); map 929 drivers/gpu/drm/arm/malidp_hw.c .map = { map 978 drivers/gpu/drm/arm/malidp_hw.c .map = { map 1027 drivers/gpu/drm/arm/malidp_hw.c .map = { map 1082 drivers/gpu/drm/arm/malidp_hw.c u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, map 1087 drivers/gpu/drm/arm/malidp_hw.c for (i = 0; i < map->n_pixel_formats; i++) { map 1088 drivers/gpu/drm/arm/malidp_hw.c if (((map->pixel_formats[i].layer & layer_id) == layer_id) && map 1089 drivers/gpu/drm/arm/malidp_hw.c (map->pixel_formats[i].format == format)) { map 1097 drivers/gpu/drm/arm/malidp_hw.c (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2)) map 1100 drivers/gpu/drm/arm/malidp_hw.c return map->pixel_formats[i].id; map 1153 drivers/gpu/drm/arm/malidp_hw.c if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) map 1171 drivers/gpu/drm/arm/malidp_hw.c de = &hw->map.de_irq_map; map 1182 drivers/gpu/drm/arm/malidp_hw.c dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); map 1183 drivers/gpu/drm/arm/malidp_hw.c if (dc_status & hw->map.dc_irq_map.vsync_irq) { map 1237 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.dc_irq_map.irq_mask); map 1241 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.de_irq_map.irq_mask); map 1272 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.de_irq_map.irq_mask); map 1274 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.dc_irq_map.irq_mask); map 1283 drivers/gpu/drm/arm/malidp_hw.c const struct malidp_irq_map *se = &hw->map.se_irq_map; map 1294 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); map 1303 drivers/gpu/drm/arm/malidp_hw.c mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ); map 1327 drivers/gpu/drm/arm/malidp_hw.c status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); map 1329 drivers/gpu/drm/arm/malidp_hw.c (status & hw->map.dc_irq_map.vsync_irq)) map 1347 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.se_irq_map.irq_mask); map 1382 drivers/gpu/drm/arm/malidp_hw.c hwdev->hw->map.se_irq_map.irq_mask); map 142 drivers/gpu/drm/arm/malidp_hw.h const struct malidp_hw_regmap map; map 292 drivers/gpu/drm/arm/malidp_hw.h return hwdev->hw->map.se_base; map 294 drivers/gpu/drm/arm/malidp_hw.h return hwdev->hw->map.dc_base; map 323 drivers/gpu/drm/arm/malidp_hw.h u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map, map 334 drivers/gpu/drm/arm/malidp_hw.h if (hwdev->hw->map.bus_align_bytes == 8) map 337 drivers/gpu/drm/arm/malidp_hw.h return hwdev->hw->map.bus_align_bytes << (rotated ? 2 : 0); map 370 drivers/gpu/drm/arm/malidp_hw.h u32 image_enh = hwdev->hw->map.se_base + map 371 drivers/gpu/drm/arm/malidp_hw.h ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? map 151 drivers/gpu/drm/arm/malidp_mw.c malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE, map 190 drivers/gpu/drm/arm/malidp_mw.c const struct malidp_hw_regmap *map = &malidp->dev->hw->map; map 194 drivers/gpu/drm/arm/malidp_mw.c formats = kcalloc(map->n_pixel_formats, sizeof(*formats), map 199 drivers/gpu/drm/arm/malidp_mw.c for (n = 0, i = 0; i < map->n_pixel_formats; i++) { map 200 drivers/gpu/drm/arm/malidp_mw.c if (map->pixel_formats[i].layer & SE_MEMWRITE) map 201 drivers/gpu/drm/arm/malidp_mw.c formats[n++] = map->pixel_formats[i].format; map 154 drivers/gpu/drm/arm/malidp_planes.c const struct malidp_hw_regmap *map = &malidp->dev->hw->map; map 236 drivers/gpu/drm/arm/malidp_planes.c (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) { map 520 drivers/gpu/drm/arm/malidp_planes.c ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, map 928 drivers/gpu/drm/arm/malidp_planes.c const struct malidp_hw_regmap *map = &malidp->dev->hw->map; map 944 drivers/gpu/drm/arm/malidp_planes.c if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) { map 959 drivers/gpu/drm/arm/malidp_planes.c formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL); map 965 drivers/gpu/drm/arm/malidp_planes.c for (i = 0; i < map->n_layers; i++) { map 966 drivers/gpu/drm/arm/malidp_planes.c u8 id = map->layers[i].id; map 975 drivers/gpu/drm/arm/malidp_planes.c for (n = 0, j = 0; j < map->n_pixel_formats; j++) { map 976 drivers/gpu/drm/arm/malidp_planes.c if ((map->pixel_formats[j].layer & id) == id) map 977 drivers/gpu/drm/arm/malidp_planes.c formats[n++] = map->pixel_formats[j].format; map 997 drivers/gpu/drm/arm/malidp_planes.c plane->layer = &map->layers[i]; map 484 drivers/gpu/drm/armada/armada_gem.c .map = armada_gem_dmabuf_no_kmap, map 102 drivers/gpu/drm/ati_pcigart.c struct drm_local_map *map = &gart_info->mapping; map 152 drivers/gpu/drm/ati_pcigart.c memset_io((void __iomem *)map->handle, 0, max_ati_pages * sizeof(u32)); map 191 drivers/gpu/drm/ati_pcigart.c writel(val, (void __iomem *)map->handle + offset); map 74 drivers/gpu/drm/bridge/analogix-anx78xx.c struct regmap *map[I2C_NUM_ADDRESSES]; map 92 drivers/gpu/drm/bridge/analogix-anx78xx.c static int anx78xx_set_bits(struct regmap *map, u8 reg, u8 mask) map 94 drivers/gpu/drm/bridge/analogix-anx78xx.c return regmap_update_bits(map, reg, mask, mask); map 97 drivers/gpu/drm/bridge/analogix-anx78xx.c static int anx78xx_clear_bits(struct regmap *map, u8 reg, u8 mask) map 99 drivers/gpu/drm/bridge/analogix-anx78xx.c return regmap_update_bits(map, reg, mask, 0); map 107 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG, map 137 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_CH_STATUS_REG, map 157 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_7_0_REG, map 162 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_ADDR_15_8_REG, map 172 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0], map 204 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P0], map 216 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL1_REG, map 222 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P0], map 236 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_bulk_read(anx78xx->map[I2C_IDX_TX_P0], map 243 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], map 255 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], map 260 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG, map 272 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL3_REG, map 277 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], map 299 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, map 304 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_CHIP_CTRL_REG, map 310 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], map 316 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], map 323 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], map 329 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], map 335 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], SP_AUDVID_CTRL_REG, map 340 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_RX_P0], map 345 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_RX_P0], map 351 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 357 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_RX_P0], map 384 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_ANALOG_CTRL0_REG, map 392 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P1], map 408 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_update_bits(anx78xx->map[I2C_IDX_TX_P2], map 415 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL3_REG, map 420 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL4_REG, map 425 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 430 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 436 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_MISC_CTRL_REG, map 441 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], map 447 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], map 468 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_AUX_CH_CTRL2_REG, map 474 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 479 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], map 485 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_multi_reg_write(anx78xx->map[I2C_IDX_TX_P0], map 491 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 496 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL8_REG, map 505 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_HDCP_AUTO_TIMER_REG, map 510 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 515 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 520 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], map 529 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_AUX_DEFER_CTRL_REG, map 534 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 544 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 549 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 555 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 565 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 581 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_INT_CTRL_REG, 0x01); map 585 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], map 590 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_MASK1_REG, map 595 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_MASK1_REG, map 631 drivers/gpu/drm/bridge/analogix-anx78xx.c anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, map 633 drivers/gpu/drm/bridge/analogix-anx78xx.c anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, map 672 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], map 745 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, map 750 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], map 771 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, map 776 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], map 790 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], map 810 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 818 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 835 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 839 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], map 846 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], map 858 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], SP_DP_LT_CTRL_REG, map 870 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, map 876 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_VID_CTRL1_REG, map 896 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_clear_bits(anx78xx->map[I2C_IDX_TX_P0], map 901 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_bulk_write(anx78xx->map[I2C_IDX_TX_P2], map 907 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 912 drivers/gpu/drm/bridge/analogix-anx78xx.c err = anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P0], map 1072 drivers/gpu/drm/bridge/analogix-anx78xx.c anx78xx_set_bits(anx78xx->map[I2C_IDX_TX_P2], SP_POWERDOWN_CTRL_REG, map 1157 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG, map 1177 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_TX_P2], map 1206 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG, map 1216 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], map 1248 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DP_INT_STATUS1_REG, map 1258 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], map 1273 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_RX_P0], SP_INT_STATUS1_REG, map 1366 drivers/gpu/drm/bridge/analogix-anx78xx.c anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i], map 1368 drivers/gpu/drm/bridge/analogix-anx78xx.c if (IS_ERR(anx78xx->map[i])) { map 1369 drivers/gpu/drm/bridge/analogix-anx78xx.c err = PTR_ERR(anx78xx->map[i]); map 1379 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDL_REG, map 1384 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_IDH_REG, map 1391 drivers/gpu/drm/bridge/analogix-anx78xx.c err = regmap_read(anx78xx->map[I2C_IDX_TX_P2], SP_DEVICE_VERSION_REG, map 53 drivers/gpu/drm/drm_bufs.c struct drm_local_map *map) map 65 drivers/gpu/drm/drm_bufs.c if (!entry->map || map 66 drivers/gpu/drm/drm_bufs.c map->type != entry->map->type || map 69 drivers/gpu/drm/drm_bufs.c switch (map->type) { map 71 drivers/gpu/drm/drm_bufs.c if (map->flags != _DRM_CONTAINS_LOCK) map 76 drivers/gpu/drm/drm_bufs.c if ((entry->map->offset & 0xffffffff) == map 77 drivers/gpu/drm/drm_bufs.c (map->offset & 0xffffffff)) map 82 drivers/gpu/drm/drm_bufs.c if (entry->map->offset == map->offset) map 150 drivers/gpu/drm/drm_bufs.c struct drm_local_map *map; map 156 drivers/gpu/drm/drm_bufs.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 157 drivers/gpu/drm/drm_bufs.c if (!map) map 160 drivers/gpu/drm/drm_bufs.c map->offset = offset; map 161 drivers/gpu/drm/drm_bufs.c map->size = size; map 162 drivers/gpu/drm/drm_bufs.c map->flags = flags; map 163 drivers/gpu/drm/drm_bufs.c map->type = type; map 169 drivers/gpu/drm/drm_bufs.c if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) { map 170 drivers/gpu/drm/drm_bufs.c kfree(map); map 174 drivers/gpu/drm/drm_bufs.c (unsigned long long)map->offset, map->size, map->type); map 180 drivers/gpu/drm/drm_bufs.c if (map->type == _DRM_SHM) map 181 drivers/gpu/drm/drm_bufs.c map->size = PAGE_ALIGN(map->size); map 183 drivers/gpu/drm/drm_bufs.c if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { map 184 drivers/gpu/drm/drm_bufs.c kfree(map); map 187 drivers/gpu/drm/drm_bufs.c map->mtrr = -1; map 188 drivers/gpu/drm/drm_bufs.c map->handle = NULL; map 190 drivers/gpu/drm/drm_bufs.c switch (map->type) { map 194 drivers/gpu/drm/drm_bufs.c if (map->offset + (map->size-1) < map->offset || map 195 drivers/gpu/drm/drm_bufs.c map->offset < virt_to_phys(high_memory)) { map 196 drivers/gpu/drm/drm_bufs.c kfree(map); map 204 drivers/gpu/drm/drm_bufs.c list = drm_find_matching_map(dev, map); map 206 drivers/gpu/drm/drm_bufs.c if (list->map->size != map->size) { map 209 drivers/gpu/drm/drm_bufs.c map->type, map->size, map 210 drivers/gpu/drm/drm_bufs.c list->map->size); map 211 drivers/gpu/drm/drm_bufs.c list->map->size = map->size; map 214 drivers/gpu/drm/drm_bufs.c kfree(map); map 219 drivers/gpu/drm/drm_bufs.c if (map->type == _DRM_FRAME_BUFFER || map 220 drivers/gpu/drm/drm_bufs.c (map->flags & _DRM_WRITE_COMBINING)) { map 221 drivers/gpu/drm/drm_bufs.c map->mtrr = map 222 drivers/gpu/drm/drm_bufs.c arch_phys_wc_add(map->offset, map->size); map 224 drivers/gpu/drm/drm_bufs.c if (map->type == _DRM_REGISTERS) { map 225 drivers/gpu/drm/drm_bufs.c if (map->flags & _DRM_WRITE_COMBINING) map 226 drivers/gpu/drm/drm_bufs.c map->handle = ioremap_wc(map->offset, map 227 drivers/gpu/drm/drm_bufs.c map->size); map 229 drivers/gpu/drm/drm_bufs.c map->handle = ioremap(map->offset, map->size); map 230 drivers/gpu/drm/drm_bufs.c if (!map->handle) { map 231 drivers/gpu/drm/drm_bufs.c kfree(map); map 238 drivers/gpu/drm/drm_bufs.c list = drm_find_matching_map(dev, map); map 240 drivers/gpu/drm/drm_bufs.c if (list->map->size != map->size) { map 243 drivers/gpu/drm/drm_bufs.c map->type, map->size, list->map->size); map 244 drivers/gpu/drm/drm_bufs.c list->map->size = map->size; map 247 drivers/gpu/drm/drm_bufs.c kfree(map); map 251 drivers/gpu/drm/drm_bufs.c map->handle = vmalloc_user(map->size); map 253 drivers/gpu/drm/drm_bufs.c map->size, order_base_2(map->size), map->handle); map 254 drivers/gpu/drm/drm_bufs.c if (!map->handle) { map 255 drivers/gpu/drm/drm_bufs.c kfree(map); map 258 drivers/gpu/drm/drm_bufs.c map->offset = (unsigned long)map->handle; map 259 drivers/gpu/drm/drm_bufs.c if (map->flags & _DRM_CONTAINS_LOCK) { map 262 drivers/gpu/drm/drm_bufs.c vfree(map->handle); map 263 drivers/gpu/drm/drm_bufs.c kfree(map); map 266 drivers/gpu/drm/drm_bufs.c dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */ map 274 drivers/gpu/drm/drm_bufs.c kfree(map); map 278 drivers/gpu/drm/drm_bufs.c map->offset += dev->hose->mem_space->start; map 286 drivers/gpu/drm/drm_bufs.c if (map->offset < dev->agp->base || map 287 drivers/gpu/drm/drm_bufs.c map->offset > dev->agp->base + map 289 drivers/gpu/drm/drm_bufs.c map->offset += dev->agp->base; map 291 drivers/gpu/drm/drm_bufs.c map->mtrr = dev->agp->agp_mtrr; /* for getmap */ map 300 drivers/gpu/drm/drm_bufs.c if ((map->offset >= entry->bound) && map 301 drivers/gpu/drm/drm_bufs.c (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) { map 307 drivers/gpu/drm/drm_bufs.c kfree(map); map 311 drivers/gpu/drm/drm_bufs.c (unsigned long long)map->offset, map->size); map 317 drivers/gpu/drm/drm_bufs.c kfree(map); map 320 drivers/gpu/drm/drm_bufs.c map->offset += (unsigned long)dev->sg->virtual; map 327 drivers/gpu/drm/drm_bufs.c dmah = drm_pci_alloc(dev, map->size, map->size); map 329 drivers/gpu/drm/drm_bufs.c kfree(map); map 332 drivers/gpu/drm/drm_bufs.c map->handle = dmah->vaddr; map 333 drivers/gpu/drm/drm_bufs.c map->offset = (unsigned long)dmah->busaddr; map 337 drivers/gpu/drm/drm_bufs.c kfree(map); map 343 drivers/gpu/drm/drm_bufs.c if (map->type == _DRM_REGISTERS) map 344 drivers/gpu/drm/drm_bufs.c iounmap(map->handle); map 345 drivers/gpu/drm/drm_bufs.c kfree(map); map 348 drivers/gpu/drm/drm_bufs.c list->map = map; map 355 drivers/gpu/drm/drm_bufs.c user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle : map 356 drivers/gpu/drm/drm_bufs.c map->offset; map 358 drivers/gpu/drm/drm_bufs.c (map->type == _DRM_SHM)); map 360 drivers/gpu/drm/drm_bufs.c if (map->type == _DRM_REGISTERS) map 361 drivers/gpu/drm/drm_bufs.c iounmap(map->handle); map 362 drivers/gpu/drm/drm_bufs.c kfree(map); map 371 drivers/gpu/drm/drm_bufs.c if (!(map->flags & _DRM_DRIVER)) map 386 drivers/gpu/drm/drm_bufs.c *map_ptr = list->map; map 397 drivers/gpu/drm/drm_bufs.c return _entry->map; map 416 drivers/gpu/drm/drm_bufs.c struct drm_map *map = data; map 420 drivers/gpu/drm/drm_bufs.c if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP || map->type == _DRM_SHM)) map 427 drivers/gpu/drm/drm_bufs.c err = drm_addmap_core(dev, map->offset, map->size, map->type, map 428 drivers/gpu/drm/drm_bufs.c map->flags, &maplist); map 434 drivers/gpu/drm/drm_bufs.c map->handle = (void *)(unsigned long)maplist->user_token; map 442 drivers/gpu/drm/drm_bufs.c map->mtrr = -1; map 463 drivers/gpu/drm/drm_bufs.c struct drm_map *map = data; map 473 drivers/gpu/drm/drm_bufs.c idx = map->offset; map 486 drivers/gpu/drm/drm_bufs.c if (!r_list || !r_list->map) { map 491 drivers/gpu/drm/drm_bufs.c map->offset = r_list->map->offset; map 492 drivers/gpu/drm/drm_bufs.c map->size = r_list->map->size; map 493 drivers/gpu/drm/drm_bufs.c map->type = r_list->map->type; map 494 drivers/gpu/drm/drm_bufs.c map->flags = r_list->map->flags; map 495 drivers/gpu/drm/drm_bufs.c map->handle = (void *)(unsigned long) r_list->user_token; map 496 drivers/gpu/drm/drm_bufs.c map->mtrr = arch_phys_wc_index(r_list->map->mtrr); map 513 drivers/gpu/drm/drm_bufs.c int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) map 522 drivers/gpu/drm/drm_bufs.c if (r_list->map == map) { map 536 drivers/gpu/drm/drm_bufs.c switch (map->type) { map 538 drivers/gpu/drm/drm_bufs.c iounmap(map->handle); map 541 drivers/gpu/drm/drm_bufs.c arch_phys_wc_del(map->mtrr); map 544 drivers/gpu/drm/drm_bufs.c vfree(map->handle); map 557 drivers/gpu/drm/drm_bufs.c dmah.vaddr = map->handle; map 558 drivers/gpu/drm/drm_bufs.c dmah.busaddr = map->offset; map 559 drivers/gpu/drm/drm_bufs.c dmah.size = map->size; map 563 drivers/gpu/drm/drm_bufs.c kfree(map); map 569 drivers/gpu/drm/drm_bufs.c void drm_legacy_rmmap(struct drm_device *dev, struct drm_local_map *map) map 576 drivers/gpu/drm/drm_bufs.c drm_legacy_rmmap_locked(dev, map); map 591 drivers/gpu/drm/drm_bufs.c drm_legacy_rmmap_locked(dev, r_list->map); map 603 drivers/gpu/drm/drm_bufs.c drm_legacy_rmmap(dev, r_list->map); map 625 drivers/gpu/drm/drm_bufs.c struct drm_local_map *map = NULL; map 635 drivers/gpu/drm/drm_bufs.c if (r_list->map && map 637 drivers/gpu/drm/drm_bufs.c r_list->map->flags & _DRM_REMOVABLE) { map 638 drivers/gpu/drm/drm_bufs.c map = r_list->map; map 646 drivers/gpu/drm/drm_bufs.c if (list_empty(&dev->maplist) || !map) { map 652 drivers/gpu/drm/drm_bufs.c if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { map 657 drivers/gpu/drm/drm_bufs.c ret = drm_legacy_rmmap_locked(dev, map); map 1514 drivers/gpu/drm/drm_bufs.c struct drm_local_map *map = dev->agp_buffer_map; map 1517 drivers/gpu/drm/drm_bufs.c if (!map) { map 1521 drivers/gpu/drm/drm_bufs.c virtual = vm_mmap(file_priv->filp, 0, map->size, map 1597 drivers/gpu/drm/drm_bufs.c if (entry->map && entry->map->type == _DRM_SHM && map 1598 drivers/gpu/drm/drm_bufs.c (entry->map->flags & _DRM_CONTAINS_LOCK)) { map 1599 drivers/gpu/drm/drm_bufs.c return entry->map; map 182 drivers/gpu/drm/drm_context.c struct drm_local_map *map; map 191 drivers/gpu/drm/drm_context.c map = idr_find(&dev->ctx_idr, request->ctx_id); map 192 drivers/gpu/drm/drm_context.c if (!map) { map 199 drivers/gpu/drm/drm_context.c if (_entry->map == map) { map 230 drivers/gpu/drm/drm_context.c struct drm_local_map *map = NULL; map 239 drivers/gpu/drm/drm_context.c if (r_list->map map 248 drivers/gpu/drm/drm_context.c map = r_list->map; map 249 drivers/gpu/drm/drm_context.c if (!map) map 252 drivers/gpu/drm/drm_context.c if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) map 3814 drivers/gpu/drm/drm_edid.c u64 map = 0; map 3839 drivers/gpu/drm/drm_edid.c map |= (u64)db[2 + count] << (8 * count); map 3841 drivers/gpu/drm/drm_edid.c if (map) map 3844 drivers/gpu/drm/drm_edid.c hdmi->y420_cmdb_map = map; map 303 drivers/gpu/drm/drm_gem_vram_helper.c void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, map 309 drivers/gpu/drm/drm_gem_vram_helper.c if (kmap->virtual || !map) map 177 drivers/gpu/drm/drm_ioc32.c struct drm_map map; map 183 drivers/gpu/drm/drm_ioc32.c map.offset = m32.offset; map 184 drivers/gpu/drm/drm_ioc32.c err = drm_ioctl_kernel(file, drm_legacy_getmap_ioctl, &map, 0); map 188 drivers/gpu/drm/drm_ioc32.c m32.offset = map.offset; map 189 drivers/gpu/drm/drm_ioc32.c m32.size = map.size; map 190 drivers/gpu/drm/drm_ioc32.c m32.type = map.type; map 191 drivers/gpu/drm/drm_ioc32.c m32.flags = map.flags; map 192 drivers/gpu/drm/drm_ioc32.c m32.handle = ptr_to_compat((void __user *)map.handle); map 193 drivers/gpu/drm/drm_ioc32.c m32.mtrr = map.mtrr; map 205 drivers/gpu/drm/drm_ioc32.c struct drm_map map; map 211 drivers/gpu/drm/drm_ioc32.c map.offset = m32.offset; map 212 drivers/gpu/drm/drm_ioc32.c map.size = m32.size; map 213 drivers/gpu/drm/drm_ioc32.c map.type = m32.type; map 214 drivers/gpu/drm/drm_ioc32.c map.flags = m32.flags; map 216 drivers/gpu/drm/drm_ioc32.c err = drm_ioctl_kernel(file, drm_legacy_addmap_ioctl, &map, map 221 drivers/gpu/drm/drm_ioc32.c m32.offset = map.offset; map 222 drivers/gpu/drm/drm_ioc32.c m32.mtrr = map.mtrr; map 223 drivers/gpu/drm/drm_ioc32.c m32.handle = ptr_to_compat((void __user *)map.handle); map 224 drivers/gpu/drm/drm_ioc32.c if (map.handle != compat_ptr(m32.handle)) map 226 drivers/gpu/drm/drm_ioc32.c map.handle, m32.type, m32.offset); map 238 drivers/gpu/drm/drm_ioc32.c struct drm_map map; map 243 drivers/gpu/drm/drm_ioc32.c map.handle = compat_ptr(handle); map 244 drivers/gpu/drm/drm_ioc32.c return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH); map 129 drivers/gpu/drm/drm_memory.c void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev) map 131 drivers/gpu/drm/drm_memory.c if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map 132 drivers/gpu/drm/drm_memory.c map->handle = agp_remap(map->offset, map->size, dev); map 134 drivers/gpu/drm/drm_memory.c map->handle = ioremap(map->offset, map->size); map 138 drivers/gpu/drm/drm_memory.c void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) map 140 drivers/gpu/drm/drm_memory.c if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map 141 drivers/gpu/drm/drm_memory.c map->handle = agp_remap(map->offset, map->size, dev); map 143 drivers/gpu/drm/drm_memory.c map->handle = ioremap_wc(map->offset, map->size); map 147 drivers/gpu/drm/drm_memory.c void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) map 149 drivers/gpu/drm/drm_memory.c if (!map->handle || !map->size) map 152 drivers/gpu/drm/drm_memory.c if (dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) map 153 drivers/gpu/drm/drm_memory.c vunmap(map->handle); map 155 drivers/gpu/drm/drm_memory.c iounmap(map->handle); map 69 drivers/gpu/drm/drm_vm.c static pgprot_t drm_io_prot(struct drm_local_map *map, map 79 drivers/gpu/drm/drm_vm.c if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING)) map 121 drivers/gpu/drm/drm_vm.c struct drm_local_map *map = NULL; map 138 drivers/gpu/drm/drm_vm.c map = r_list->map; map 140 drivers/gpu/drm/drm_vm.c if (map && map->type == _DRM_AGP) { map 146 drivers/gpu/drm/drm_vm.c resource_size_t baddr = map->offset + offset; map 208 drivers/gpu/drm/drm_vm.c struct drm_local_map *map = vma->vm_private_data; map 213 drivers/gpu/drm/drm_vm.c if (!map) map 217 drivers/gpu/drm/drm_vm.c i = (unsigned long)map->handle + offset; map 241 drivers/gpu/drm/drm_vm.c struct drm_local_map *map; map 248 drivers/gpu/drm/drm_vm.c map = vma->vm_private_data; map 252 drivers/gpu/drm/drm_vm.c if (pt->vma->vm_private_data == map) map 261 drivers/gpu/drm/drm_vm.c if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { map 267 drivers/gpu/drm/drm_vm.c if (r_list->map == map) map 274 drivers/gpu/drm/drm_vm.c switch (map->type) { map 277 drivers/gpu/drm/drm_vm.c arch_phys_wc_del(map->mtrr); map 278 drivers/gpu/drm/drm_vm.c iounmap(map->handle); map 281 drivers/gpu/drm/drm_vm.c vfree(map->handle); map 287 drivers/gpu/drm/drm_vm.c dmah.vaddr = map->handle; map 288 drivers/gpu/drm/drm_vm.c dmah.busaddr = map->offset; map 289 drivers/gpu/drm/drm_vm.c dmah.size = map->size; map 293 drivers/gpu/drm/drm_vm.c kfree(map); map 345 drivers/gpu/drm/drm_vm.c struct drm_local_map *map = vma->vm_private_data; map 360 drivers/gpu/drm/drm_vm.c map_offset = map->offset - (unsigned long)dev->sg->virtual; map 535 drivers/gpu/drm/drm_vm.c struct drm_local_map *map = NULL; map 562 drivers/gpu/drm/drm_vm.c map = drm_hash_entry(hash, struct drm_map_list, hash)->map; map 563 drivers/gpu/drm/drm_vm.c if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) map 567 drivers/gpu/drm/drm_vm.c if (map->size < vma->vm_end - vma->vm_start) map 570 drivers/gpu/drm/drm_vm.c if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { map 585 drivers/gpu/drm/drm_vm.c switch (map->type) { map 605 drivers/gpu/drm/drm_vm.c vma->vm_page_prot = drm_io_prot(map, vma); map 607 drivers/gpu/drm/drm_vm.c (map->offset + offset) >> PAGE_SHIFT, map 613 drivers/gpu/drm/drm_vm.c map->type, map 614 drivers/gpu/drm/drm_vm.c vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset)); map 622 drivers/gpu/drm/drm_vm.c page_to_pfn(virt_to_page(map->handle)), map 625 drivers/gpu/drm/drm_vm.c vma->vm_page_prot = drm_dma_prot(map->type, vma); map 629 drivers/gpu/drm/drm_vm.c vma->vm_private_data = (void *)map; map 633 drivers/gpu/drm/drm_vm.c vma->vm_private_data = (void *)map; map 634 drivers/gpu/drm/drm_vm.c vma->vm_page_prot = drm_dma_prot(map->type, vma); map 115 drivers/gpu/drm/etnaviv/etnaviv_iommu.c .map = etnaviv_iommuv1_map, map 254 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c .map = etnaviv_iommuv2_map, map 55 drivers/gpu/drm/etnaviv/etnaviv_mmu.c ret = context->global->ops->map(context, iova, paddr, pgsize, map 25 drivers/gpu/drm/etnaviv/etnaviv_mmu.h int (*map)(struct etnaviv_iommu_context *context, unsigned long iova, map 315 drivers/gpu/drm/exynos/exynos_drm_ipp.c const struct exynos_drm_param_map *map = exynos_drm_ipp_params_maps; map 326 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (map[i].id == id) map 329 drivers/gpu/drm/exynos/exynos_drm_ipp.c map[i].size > size) map 332 drivers/gpu/drm/exynos/exynos_drm_ipp.c if (copy_from_user((void *)task + map[i].offset, params, map 333 drivers/gpu/drm/exynos/exynos_drm_ipp.c map[i].size)) map 336 drivers/gpu/drm/exynos/exynos_drm_ipp.c params += map[i].size; map 337 drivers/gpu/drm/exynos/exynos_drm_ipp.c size -= map[i].size; map 578 drivers/gpu/drm/gma500/cdv_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 688 drivers/gpu/drm/gma500/cdv_intel_display.c pipeconf = REG_READ(map->conf); map 726 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->dpll, dpll | DPLL_VGA_MODE_DIS | DPLL_SYNCLOCK_ENABLE); map 727 drivers/gpu/drm/gma500/cdv_intel_display.c REG_READ(map->dpll); map 771 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->dpll, map 772 drivers/gpu/drm/gma500/cdv_intel_display.c (REG_READ(map->dpll) & ~DPLL_LOCK) | DPLL_VCO_ENABLE); map 773 drivers/gpu/drm/gma500/cdv_intel_display.c REG_READ(map->dpll); map 777 drivers/gpu/drm/gma500/cdv_intel_display.c if (!(REG_READ(map->dpll) & DPLL_LOCK)) { map 784 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->dpll_md, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); map 787 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | map 789 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | map 791 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | map 793 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | map 795 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | map 797 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | map 802 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->size, map 804 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->pos, 0); map 805 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->src, map 807 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->conf, pipeconf); map 808 drivers/gpu/drm/gma500/cdv_intel_display.c REG_READ(map->conf); map 812 drivers/gpu/drm/gma500/cdv_intel_display.c REG_WRITE(map->cntr, dspcntr); map 845 drivers/gpu/drm/gma500/cdv_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 853 drivers/gpu/drm/gma500/cdv_intel_display.c dpll = REG_READ(map->dpll); map 855 drivers/gpu/drm/gma500/cdv_intel_display.c fp = REG_READ(map->fp0); map 857 drivers/gpu/drm/gma500/cdv_intel_display.c fp = REG_READ(map->fp1); map 925 drivers/gpu/drm/gma500/cdv_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 933 drivers/gpu/drm/gma500/cdv_intel_display.c htot = REG_READ(map->htotal); map 934 drivers/gpu/drm/gma500/cdv_intel_display.c hsync = REG_READ(map->hsync); map 935 drivers/gpu/drm/gma500/cdv_intel_display.c vtot = REG_READ(map->vtotal); map 936 drivers/gpu/drm/gma500/cdv_intel_display.c vsync = REG_READ(map->vsync); map 59 drivers/gpu/drm/gma500/gma_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 83 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->stride, fb->pitches[0]); map 85 drivers/gpu/drm/gma500/gma_display.c dspcntr = REG_READ(map->cntr); map 107 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->cntr, dspcntr); map 116 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, offset + start); map 117 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->base); map 119 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, offset); map 120 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->base); map 121 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->surf, start); map 122 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->surf); map 141 drivers/gpu/drm/gma500/gma_display.c const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; map 142 drivers/gpu/drm/gma500/gma_display.c int palreg = map->palette; map 195 drivers/gpu/drm/gma500/gma_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 215 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->dpll); map 217 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, temp); map 218 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 221 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); map 222 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 225 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); map 226 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 232 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->cntr); map 234 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->cntr, map 237 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, REG_READ(map->base)); map 243 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->conf); map 245 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); map 247 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->status); map 250 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->status, temp); map 251 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->status); map 279 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->cntr); map 281 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->cntr, map 284 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, REG_READ(map->base)); map 285 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->base); map 289 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->conf); map 291 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); map 292 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->conf); map 301 drivers/gpu/drm/gma500/gma_display.c temp = REG_READ(map->dpll); map 303 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); map 304 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 530 drivers/gpu/drm/gma500/gma_display.c const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; map 539 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDSPCNTR = REG_READ(map->cntr); map 540 drivers/gpu/drm/gma500/gma_display.c crtc_state->savePIPECONF = REG_READ(map->conf); map 541 drivers/gpu/drm/gma500/gma_display.c crtc_state->savePIPESRC = REG_READ(map->src); map 542 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveFP0 = REG_READ(map->fp0); map 543 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveFP1 = REG_READ(map->fp1); map 544 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDPLL = REG_READ(map->dpll); map 545 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveHTOTAL = REG_READ(map->htotal); map 546 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveHBLANK = REG_READ(map->hblank); map 547 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveHSYNC = REG_READ(map->hsync); map 548 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveVTOTAL = REG_READ(map->vtotal); map 549 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveVBLANK = REG_READ(map->vblank); map 550 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveVSYNC = REG_READ(map->vsync); map 551 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDSPSTRIDE = REG_READ(map->stride); map 554 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDSPSIZE = REG_READ(map->size); map 555 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDSPPOS = REG_READ(map->pos); map 557 drivers/gpu/drm/gma500/gma_display.c crtc_state->saveDSPBASE = REG_READ(map->base); map 559 drivers/gpu/drm/gma500/gma_display.c palette_reg = map->palette; map 573 drivers/gpu/drm/gma500/gma_display.c const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; map 583 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, map 585 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 589 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->fp0, crtc_state->saveFP0); map 590 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->fp0); map 592 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->fp1, crtc_state->saveFP1); map 593 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->fp1); map 595 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->dpll, crtc_state->saveDPLL); map 596 drivers/gpu/drm/gma500/gma_display.c REG_READ(map->dpll); map 599 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->htotal, crtc_state->saveHTOTAL); map 600 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->hblank, crtc_state->saveHBLANK); map 601 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->hsync, crtc_state->saveHSYNC); map 602 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); map 603 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->vblank, crtc_state->saveVBLANK); map 604 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->vsync, crtc_state->saveVSYNC); map 605 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); map 607 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->size, crtc_state->saveDSPSIZE); map 608 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->pos, crtc_state->saveDSPPOS); map 610 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->src, crtc_state->savePIPESRC); map 611 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, crtc_state->saveDSPBASE); map 612 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->conf, crtc_state->savePIPECONF); map 616 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); map 617 drivers/gpu/drm/gma500/gma_display.c REG_WRITE(map->base, crtc_state->saveDSPBASE); map 621 drivers/gpu/drm/gma500/gma_display.c palette_reg = map->palette; map 161 drivers/gpu/drm/gma500/mdfld_device.c const struct psb_offset *map = &dev_priv->regmap[pipenum]; map 187 drivers/gpu/drm/gma500/mdfld_device.c pipe->dpll = PSB_RVDC32(map->dpll); map 188 drivers/gpu/drm/gma500/mdfld_device.c pipe->fp0 = PSB_RVDC32(map->fp0); map 189 drivers/gpu/drm/gma500/mdfld_device.c pipe->conf = PSB_RVDC32(map->conf); map 190 drivers/gpu/drm/gma500/mdfld_device.c pipe->htotal = PSB_RVDC32(map->htotal); map 191 drivers/gpu/drm/gma500/mdfld_device.c pipe->hblank = PSB_RVDC32(map->hblank); map 192 drivers/gpu/drm/gma500/mdfld_device.c pipe->hsync = PSB_RVDC32(map->hsync); map 193 drivers/gpu/drm/gma500/mdfld_device.c pipe->vtotal = PSB_RVDC32(map->vtotal); map 194 drivers/gpu/drm/gma500/mdfld_device.c pipe->vblank = PSB_RVDC32(map->vblank); map 195 drivers/gpu/drm/gma500/mdfld_device.c pipe->vsync = PSB_RVDC32(map->vsync); map 196 drivers/gpu/drm/gma500/mdfld_device.c pipe->src = PSB_RVDC32(map->src); map 197 drivers/gpu/drm/gma500/mdfld_device.c pipe->stride = PSB_RVDC32(map->stride); map 198 drivers/gpu/drm/gma500/mdfld_device.c pipe->linoff = PSB_RVDC32(map->linoff); map 199 drivers/gpu/drm/gma500/mdfld_device.c pipe->tileoff = PSB_RVDC32(map->tileoff); map 200 drivers/gpu/drm/gma500/mdfld_device.c pipe->size = PSB_RVDC32(map->size); map 201 drivers/gpu/drm/gma500/mdfld_device.c pipe->pos = PSB_RVDC32(map->pos); map 202 drivers/gpu/drm/gma500/mdfld_device.c pipe->surf = PSB_RVDC32(map->surf); map 203 drivers/gpu/drm/gma500/mdfld_device.c pipe->cntr = PSB_RVDC32(map->cntr); map 204 drivers/gpu/drm/gma500/mdfld_device.c pipe->status = PSB_RVDC32(map->status); map 208 drivers/gpu/drm/gma500/mdfld_device.c pipe->palette[i] = PSB_RVDC32(map->palette + (i << 2)); map 239 drivers/gpu/drm/gma500/mdfld_device.c const struct psb_offset *map = &dev_priv->regmap[pipenum]; map 273 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, map->dpll); map 274 drivers/gpu/drm/gma500/mdfld_device.c PSB_RVDC32(map->dpll); map 276 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->fp0, map->fp0); map 279 drivers/gpu/drm/gma500/mdfld_device.c dpll = PSB_RVDC32(map->dpll); map 287 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(dpll, map->dpll); map 292 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->fp0, map->fp0); map 293 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(dpll_val, map->dpll); map 298 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(dpll_val, map->dpll); map 299 drivers/gpu/drm/gma500/mdfld_device.c PSB_RVDC32(map->dpll); map 303 drivers/gpu/drm/gma500/mdfld_device.c !(PSB_RVDC32(map->conf) & PIPECONF_DSIPLL_LOCK)) { map 316 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->htotal, map->htotal); map 317 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->hblank, map->hblank); map 318 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->hsync, map->hsync); map 319 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->vtotal, map->vtotal); map 320 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->vblank, map->vblank); map 321 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->vsync, map->vsync); map 322 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->src, map->src); map 323 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->status, map->status); map 326 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->stride, map->stride); map 327 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->linoff, map->linoff); map 328 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->tileoff, map->tileoff); map 329 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->size, map->size); map 330 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->pos, map->pos); map 331 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->surf, map->surf); map 337 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->palette[i], map->palette + (i << 2)); map 347 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->cntr & ~DISPLAY_PLANE_ENABLE, map->cntr); map 365 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->cntr, map->cntr); map 394 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->conf, map->conf); map 399 drivers/gpu/drm/gma500/mdfld_device.c PSB_WVDC32(pipe->palette[i], map->palette + (i << 2)); map 600 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 629 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->dpll_reg = map->dpll; map 630 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->dspcntr_reg = map->cntr; map 631 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->pipeconf_reg = map->conf; map 632 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->dsplinoff_reg = map->linoff; map 633 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->dspsurf_reg = map->surf; map 634 drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c pkg_sender->pipestat_reg = map->status; map 45 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 64 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 73 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 92 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 162 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 192 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->stride, fb->pitches[0]); map 193 drivers/gpu/drm/gma500/mdfld_intel_display.c dspcntr = REG_READ(map->cntr); map 211 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, dspcntr); map 215 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->linoff, offset); map 216 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->linoff); map 217 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->surf, start); map 218 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->surf); map 232 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 243 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->cntr); map 245 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, map 248 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->base, REG_READ(map->base)); map 249 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->base); map 255 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 259 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, temp); map 260 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->conf); map 266 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->dpll); map 272 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp); map 273 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 280 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp | MDFLD_PWR_GATE_EN); map 301 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 322 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->dpll); map 329 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp); map 334 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp); map 335 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 339 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); map 340 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 348 drivers/gpu/drm/gma500/mdfld_intel_display.c !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { map 355 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->cntr); map 357 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, map 360 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->base, REG_READ(map->base)); map 364 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 366 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, pipeconf); map 375 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->status, REG_READ(map->status)); map 377 drivers/gpu/drm/gma500/mdfld_intel_display.c if (PIPE_VBLANK_STATUS & REG_READ(map->status)) map 382 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->cntr); map 383 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, map 385 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->base, REG_READ(map->base)); map 389 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 391 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, temp); map 398 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->cntr); map 399 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, map 401 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->base, REG_READ(map->base)); map 405 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 407 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, temp); map 431 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->cntr); map 433 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, map 436 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->base, REG_READ(map->base)); map 437 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->base); map 441 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->conf); map 445 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, temp); map 446 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->conf); map 452 drivers/gpu/drm/gma500/mdfld_intel_display.c temp = REG_READ(map->dpll); map 458 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, temp); map 459 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 666 drivers/gpu/drm/gma500/mdfld_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 774 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->size, ((min(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16) map 777 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->src, ((mode->crtc_hdisplay - 1) << 16) map 780 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->size, map 783 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->src, map 788 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->pos, 0); map 806 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->htotal, (mode->crtc_hdisplay - 1) | map 808 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vtotal, (mode->crtc_vdisplay - 1) | map 810 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - map 813 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - map 816 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - map 819 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - map 823 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | map 825 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | map 827 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | map 829 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | map 831 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | map 833 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | map 848 drivers/gpu/drm/gma500/mdfld_intel_display.c dev_priv->dspcntr[pipe] = REG_READ(map->cntr); map 915 drivers/gpu/drm/gma500/mdfld_intel_display.c dpll = REG_READ(map->dpll); map 919 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, dpll); map 920 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 927 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->fp0, 0); map 929 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, dpll); map 938 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, dpll); map 979 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->fp0, fp); map 980 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, dpll); map 985 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->dpll, dpll); map 986 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->dpll); map 990 drivers/gpu/drm/gma500/mdfld_intel_display.c !(REG_READ(map->conf) & PIPECONF_DSIPLL_LOCK)) { map 1000 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->conf, dev_priv->pipeconf[pipe]); map 1001 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_READ(map->conf); map 1004 drivers/gpu/drm/gma500/mdfld_intel_display.c REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]); map 220 drivers/gpu/drm/gma500/oaktrail_crtc.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 242 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->dpll, i); map 244 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, temp, i); map 245 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 248 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, map 250 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 253 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, map 255 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 261 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->conf, i); map 263 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->conf, map 268 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->cntr, i); map 270 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->cntr, map 274 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->base, map 275 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->base, i), i); map 294 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->cntr, i); map 296 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->cntr, map 299 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->base, map 300 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ(map->base), i); map 301 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->base, i); map 305 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->conf, i); map 307 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->conf, map 309 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->conf, i); map 314 drivers/gpu/drm/gma500/oaktrail_crtc.c temp = REG_READ_WITH_AUX(map->dpll, i); map 316 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, map 318 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 366 drivers/gpu/drm/gma500/oaktrail_crtc.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 422 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->src, ((mode->crtc_hdisplay - 1) << 16) | map 442 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->htotal, (mode->crtc_hdisplay - 1) | map 444 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vtotal, (mode->crtc_vdisplay - 1) | map 446 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->hblank, map 449 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->hsync, map 452 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vblank, map 455 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vsync, map 461 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | map 463 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | map 465 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | map 467 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | map 469 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | map 471 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | map 484 drivers/gpu/drm/gma500/oaktrail_crtc.c pipeconf = REG_READ(map->conf); map 487 drivers/gpu/drm/gma500/oaktrail_crtc.c dspcntr = REG_READ(map->cntr); map 554 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->fp0, fp, i); map 555 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, dpll & ~DPLL_VCO_ENABLE, i); map 556 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 563 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->fp0, fp, i); map 564 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, dpll, i); map 565 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 570 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->dpll, dpll, i); map 571 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->dpll, i); map 575 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->conf, pipeconf, i); map 576 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ_WITH_AUX(map->conf, i); map 579 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE_WITH_AUX(map->cntr, dspcntr, i); map 596 drivers/gpu/drm/gma500/oaktrail_crtc.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 614 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE(map->stride, fb->pitches[0]); map 616 drivers/gpu/drm/gma500/oaktrail_crtc.c dspcntr = REG_READ(map->cntr); map 638 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE(map->cntr, dspcntr); map 640 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE(map->base, offset); map 641 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ(map->base); map 642 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_WRITE(map->surf, start); map 643 drivers/gpu/drm/gma500/oaktrail_crtc.c REG_READ(map->surf); map 102 drivers/gpu/drm/gma500/psb_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 191 drivers/gpu/drm/gma500/psb_intel_display.c pipeconf = REG_READ(map->conf); map 213 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->fp0, fp); map 214 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->dpll, dpll & ~DPLL_VCO_ENABLE); map 215 drivers/gpu/drm/gma500/psb_intel_display.c REG_READ(map->dpll); map 248 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->fp0, fp); map 249 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->dpll, dpll); map 250 drivers/gpu/drm/gma500/psb_intel_display.c REG_READ(map->dpll); map 255 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->dpll, dpll); map 257 drivers/gpu/drm/gma500/psb_intel_display.c REG_READ(map->dpll); map 261 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->htotal, (adjusted_mode->crtc_hdisplay - 1) | map 263 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->hblank, (adjusted_mode->crtc_hblank_start - 1) | map 265 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->hsync, (adjusted_mode->crtc_hsync_start - 1) | map 267 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->vtotal, (adjusted_mode->crtc_vdisplay - 1) | map 269 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->vblank, (adjusted_mode->crtc_vblank_start - 1) | map 271 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->vsync, (adjusted_mode->crtc_vsync_start - 1) | map 276 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->size, map 278 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->pos, 0); map 279 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->src, map 281 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->conf, pipeconf); map 282 drivers/gpu/drm/gma500/psb_intel_display.c REG_READ(map->conf); map 286 drivers/gpu/drm/gma500/psb_intel_display.c REG_WRITE(map->cntr, dspcntr); map 303 drivers/gpu/drm/gma500/psb_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 311 drivers/gpu/drm/gma500/psb_intel_display.c dpll = REG_READ(map->dpll); map 313 drivers/gpu/drm/gma500/psb_intel_display.c fp = REG_READ(map->fp0); map 315 drivers/gpu/drm/gma500/psb_intel_display.c fp = REG_READ(map->fp1); map 385 drivers/gpu/drm/gma500/psb_intel_display.c const struct psb_offset *map = &dev_priv->regmap[pipe]; map 388 drivers/gpu/drm/gma500/psb_intel_display.c htot = REG_READ(map->htotal); map 389 drivers/gpu/drm/gma500/psb_intel_display.c hsync = REG_READ(map->hsync); map 390 drivers/gpu/drm/gma500/psb_intel_display.c vtot = REG_READ(map->vtotal); map 391 drivers/gpu/drm/gma500/psb_intel_display.c vsync = REG_READ(map->vsync); map 262 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c struct regmap *map = ctx->noc_regmap; map 264 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c regmap_update_bits(map, ADE0_QOSGENERATOR_MODE, map 266 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c regmap_update_bits(map, ADE0_QOSGENERATOR_EXTCONTROL, map 269 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c regmap_update_bits(map, ADE1_QOSGENERATOR_MODE, map 271 drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c regmap_update_bits(map, ADE1_QOSGENERATOR_EXTCONTROL, map 221 drivers/gpu/drm/i810/i810_dma.c drm_legacy_ioremapfree(&dev_priv->ring.map, dev); map 235 drivers/gpu/drm/i810/i810_dma.c drm_legacy_ioremapfree(&buf_priv->map, dev); map 308 drivers/gpu/drm/i810/i810_dma.c buf_priv->map.offset = buf->bus_address; map 309 drivers/gpu/drm/i810/i810_dma.c buf_priv->map.size = buf->total; map 310 drivers/gpu/drm/i810/i810_dma.c buf_priv->map.type = _DRM_AGP; map 311 drivers/gpu/drm/i810/i810_dma.c buf_priv->map.flags = 0; map 312 drivers/gpu/drm/i810/i810_dma.c buf_priv->map.mtrr = 0; map 314 drivers/gpu/drm/i810/i810_dma.c drm_legacy_ioremap(&buf_priv->map, dev); map 315 drivers/gpu/drm/i810/i810_dma.c buf_priv->kernel_virtual = buf_priv->map.handle; map 329 drivers/gpu/drm/i810/i810_dma.c if (r_list->map && map 330 drivers/gpu/drm/i810/i810_dma.c r_list->map->type == _DRM_SHM && map 331 drivers/gpu/drm/i810/i810_dma.c r_list->map->flags & _DRM_CONTAINS_LOCK) { map 332 drivers/gpu/drm/i810/i810_dma.c dev_priv->sarea_map = r_list->map; map 365 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.map.offset = dev->agp->base + init->ring_start; map 366 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.map.size = init->ring_size; map 367 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.map.type = _DRM_AGP; map 368 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.map.flags = 0; map 369 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.map.mtrr = 0; map 371 drivers/gpu/drm/i810/i810_dma.c drm_legacy_ioremap(&dev_priv->ring.map, dev); map 373 drivers/gpu/drm/i810/i810_dma.c if (dev_priv->ring.map.handle == NULL) { map 381 drivers/gpu/drm/i810/i810_dma.c dev_priv->ring.virtual_start = dev_priv->ring.map.handle; map 68 drivers/gpu/drm/i810/i810_drv.h drm_local_map_t map; map 80 drivers/gpu/drm/i810/i810_drv.h drm_local_map_t map; map 219 drivers/gpu/drm/i915/display/intel_dsi_vbt.c struct gpio_map *map; map 229 drivers/gpu/drm/i915/display/intel_dsi_vbt.c map = &vlv_gpio_table[gpio_index]; map 246 drivers/gpu/drm/i915/display/intel_dsi_vbt.c pconf0 = VLV_GPIO_PCONF0(map->base_offset); map 247 drivers/gpu/drm/i915/display/intel_dsi_vbt.c padval = VLV_GPIO_PAD_VAL(map->base_offset); map 250 drivers/gpu/drm/i915/display/intel_dsi_vbt.c if (!map->init) { map 253 drivers/gpu/drm/i915/display/intel_dsi_vbt.c map->init = true; map 198 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c .map = i915_gem_dmabuf_kmap, map 20 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c void *map; map 29 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = kmap_atomic(page); map 30 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c cpu = map + offset_in_page(offset); map 40 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c kunmap_atomic(map); map 52 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c void *map; map 61 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = kmap_atomic(page); map 62 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c cpu = map + offset_in_page(offset); map 69 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c kunmap_atomic(map); map 80 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c u32 __iomem *map; map 93 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = i915_vma_pin_iomap(vma); map 95 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (IS_ERR(map)) map 96 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c return PTR_ERR(map); map 98 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c iowrite32(v, &map[offset / sizeof(*map)]); map 109 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c u32 __iomem *map; map 122 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = i915_vma_pin_iomap(vma); map 124 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (IS_ERR(map)) map 125 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c return PTR_ERR(map); map 127 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *v = ioread32(&map[offset / sizeof(*map)]); map 137 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c u32 *map; map 146 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = i915_gem_object_pin_map(obj, I915_MAP_WC); map 147 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (IS_ERR(map)) map 148 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c return PTR_ERR(map); map 150 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map[offset / sizeof(*map)] = v; map 160 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c u32 *map; map 169 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c map = i915_gem_object_pin_map(obj, I915_MAP_WC); map 170 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c if (IS_ERR(map)) map 171 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c return PTR_ERR(map); map 173 drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c *v = map[offset / sizeof(*map)]; map 226 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c u32 *map; map 228 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map = kmap_atomic(i915_gem_object_get_page(obj, n)); map 230 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map[m] = value; map 232 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c drm_clflush_virt_range(map, PAGE_SIZE); map 233 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c kunmap_atomic(map); map 253 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c u32 *map; map 255 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map = kmap_atomic(i915_gem_object_get_page(obj, n)); map 257 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c drm_clflush_virt_range(map, PAGE_SIZE); map 260 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c if (map[m] != m) { map 264 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map[m], m); map 271 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c if (map[m] != STACK_MAGIC) { map 274 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c map[m], STACK_MAGIC); map 281 drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c kunmap_atomic(map); map 102 drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c .map = mock_dmabuf_kmap, map 19 drivers/gpu/drm/i915/gt/intel_engine_pm.c void *map; map 26 drivers/gpu/drm/i915/gt/intel_engine_pm.c map = NULL; map 28 drivers/gpu/drm/i915/gt/intel_engine_pm.c map = i915_gem_object_pin_map(engine->default_state, map 30 drivers/gpu/drm/i915/gt/intel_engine_pm.c if (!IS_ERR_OR_NULL(map)) map 31 drivers/gpu/drm/i915/gt/intel_engine_pm.c engine->pinned_default_state = map; map 95 drivers/gpu/drm/i915/gt/intel_engine_user.c } map[] = { map 117 drivers/gpu/drm/i915/gt/intel_engine_user.c for (i = 0; i < ARRAY_SIZE(map); i++) { map 118 drivers/gpu/drm/i915/gt/intel_engine_user.c if (engine->flags & BIT(map[i].engine)) map 119 drivers/gpu/drm/i915/gt/intel_engine_user.c enabled |= BIT(map[i].sched); map 121 drivers/gpu/drm/i915/gt/intel_engine_user.c disabled |= BIT(map[i].sched); map 155 drivers/gpu/drm/i915/gt/intel_engine_user.c } map[] = { map 162 drivers/gpu/drm/i915/gt/intel_engine_user.c if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) map 165 drivers/gpu/drm/i915/gt/intel_engine_user.c if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) map 168 drivers/gpu/drm/i915/gt/intel_engine_user.c return map[ring->class].base + ring->instance; map 312 drivers/gpu/drm/i915/gt/selftest_lrc.c u32 *map; map 340 drivers/gpu/drm/i915/gt/selftest_lrc.c map = i915_gem_object_pin_map(obj, I915_MAP_WC); map 341 drivers/gpu/drm/i915/gt/selftest_lrc.c if (IS_ERR(map)) { map 342 drivers/gpu/drm/i915/gt/selftest_lrc.c err = PTR_ERR(map); map 412 drivers/gpu/drm/i915/gt/selftest_lrc.c if (wait_for(READ_ONCE(*map), 10)) { map 459 drivers/gpu/drm/i915/gt/selftest_lrc.c GEM_BUG_ON(READ_ONCE(*map)); map 119 drivers/gpu/drm/i915/gvt/cfg_space.c static int map_aperture(struct intel_vgpu *vgpu, bool map) map 127 drivers/gpu/drm/i915/gvt/cfg_space.c if (map == vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked) map 141 drivers/gpu/drm/i915/gvt/cfg_space.c map); map 145 drivers/gpu/drm/i915/gvt/cfg_space.c vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].tracked = map; map 2939 drivers/gpu/drm/i915/gvt/cmd_parser.c void *map; map 2948 drivers/gpu/drm/i915/gvt/cmd_parser.c map = i915_gem_object_pin_map(obj, I915_MAP_WB); map 2949 drivers/gpu/drm/i915/gvt/cmd_parser.c if (IS_ERR(map)) { map 2951 drivers/gpu/drm/i915/gvt/cmd_parser.c ret = PTR_ERR(map); map 2966 drivers/gpu/drm/i915/gvt/cmd_parser.c map); map 2973 drivers/gpu/drm/i915/gvt/cmd_parser.c wa_ctx->indirect_ctx.shadow_va = map; map 66 drivers/gpu/drm/i915/gvt/hypercall.h unsigned long mfn, unsigned int nr, bool map); map 68 drivers/gpu/drm/i915/gvt/hypercall.h bool map); map 323 drivers/gpu/drm/i915/gvt/interrupt.c struct intel_gvt_irq_map *map = irq->irq_map; map 336 drivers/gpu/drm/i915/gvt/interrupt.c for (map = irq->irq_map; map->up_irq_bit != -1; map++) { map 337 drivers/gpu/drm/i915/gvt/interrupt.c if (info->group != map->down_irq_group) map 341 drivers/gpu/drm/i915/gvt/interrupt.c up_irq_info = irq->info[map->up_irq_group]; map 343 drivers/gpu/drm/i915/gvt/interrupt.c WARN_ON(up_irq_info != irq->info[map->up_irq_group]); map 345 drivers/gpu/drm/i915/gvt/interrupt.c bit = map->up_irq_bit; map 347 drivers/gpu/drm/i915/gvt/interrupt.c if (val & map->down_irq_bitmask) map 376 drivers/gpu/drm/i915/gvt/interrupt.c struct intel_gvt_irq_map *map; map 380 drivers/gpu/drm/i915/gvt/interrupt.c for (map = irq->irq_map; map->up_irq_bit != -1; map++) { map 381 drivers/gpu/drm/i915/gvt/interrupt.c up_info = irq->info[map->up_irq_group]; map 382 drivers/gpu/drm/i915/gvt/interrupt.c up_bit = map->up_irq_bit; map 383 drivers/gpu/drm/i915/gvt/interrupt.c down_info = irq->info[map->down_irq_group]; map 390 drivers/gpu/drm/i915/gvt/interrupt.c down_info->group, map->down_irq_bitmask); map 271 drivers/gpu/drm/i915/gvt/mpt.h bool map) map 278 drivers/gpu/drm/i915/gvt/mpt.h map); map 292 drivers/gpu/drm/i915/gvt/mpt.h struct intel_vgpu *vgpu, u64 start, u64 end, bool map) map 298 drivers/gpu/drm/i915/gvt/mpt.h return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); map 259 drivers/gpu/drm/i915/gvt/opregion.c static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) map 273 drivers/gpu/drm/i915/gvt/opregion.c mfn, 1, map); map 281 drivers/gpu/drm/i915/gvt/opregion.c vgpu_opregion(vgpu)->mapped = map; map 893 drivers/gpu/drm/i915/selftests/i915_vma.c u32 __iomem *map; map 911 drivers/gpu/drm/i915/selftests/i915_vma.c map = i915_vma_pin_iomap(vma); map 913 drivers/gpu/drm/i915/selftests/i915_vma.c if (IS_ERR(map)) { map 914 drivers/gpu/drm/i915/selftests/i915_vma.c err = PTR_ERR(map); map 928 drivers/gpu/drm/i915/selftests/i915_vma.c iowrite32(val, &map[offset / sizeof(*map)]); map 942 drivers/gpu/drm/i915/selftests/i915_vma.c map = i915_vma_pin_iomap(vma); map 944 drivers/gpu/drm/i915/selftests/i915_vma.c if (IS_ERR(map)) { map 945 drivers/gpu/drm/i915/selftests/i915_vma.c err = PTR_ERR(map); map 961 drivers/gpu/drm/i915/selftests/i915_vma.c val = ioread32(&map[offset / sizeof(*map)]); map 164 drivers/gpu/drm/ingenic/ingenic-drm.c struct regmap *map; map 230 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_STATE, 0); map 232 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, map 247 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, map 250 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_read_poll_timeout(priv->map, JZ_REG_LCD_STATE, var, map 270 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_VSYNC, map 274 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_HSYNC, map 278 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_VAT, map 282 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_DAH, map 285 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_DAV, map 290 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_PS, hde << 16 | (hde + 1)); map 291 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_CLS, hde << 16 | (hde + 1)); map 292 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_SPL, hpe << 16 | (hpe + 1)); map 293 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_REV, mode->htotal << 16); map 314 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, map 353 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_DA0, priv->dma_hwdesc->next); map 442 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_write(priv->map, JZ_REG_LCD_CFG, cfg); map 473 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_read(priv->map, JZ_REG_LCD_STATE, &state); map 475 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_STATE, map 497 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, map 507 drivers/gpu/drm/ingenic/ingenic-drm.c regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0); map 648 drivers/gpu/drm/ingenic/ingenic-drm.c priv->map = devm_regmap_init_mmio(dev, base, map 650 drivers/gpu/drm/ingenic/ingenic-drm.c if (IS_ERR(priv->map)) { map 652 drivers/gpu/drm/ingenic/ingenic-drm.c return PTR_ERR(priv->map); map 264 drivers/gpu/drm/mediatek/mtk_dpi.c enum mtk_dpi_out_yc_map map) map 268 drivers/gpu/drm/mediatek/mtk_dpi.c switch (map) { map 570 drivers/gpu/drm/mga/mga_dma.c if (_entry->map == dev->agp_buffer_map) map 970 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c const struct dpu_format *map = NULL; map 981 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c map = dpu_format_map; map 985 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c map = dpu_format_map_ubwc; map 996 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c if (format == map[i].base.pixel_format) { map 997 drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c fmt = &map[i]; map 141 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c .map = dpu_mdss_irqdomain_map, map 111 drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c .map = mdss_hw_irqdomain_map, map 78 drivers/gpu/drm/msm/msm_gem_vma.c ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, map 92 drivers/gpu/drm/msm/msm_gpummu.c .map = msm_gpummu_map, map 73 drivers/gpu/drm/msm/msm_iommu.c .map = msm_iommu_map, map 15 drivers/gpu/drm/msm/msm_mmu.h int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, map 161 drivers/gpu/drm/nouveau/dispnv50/disp.c dmac->ptr = dmac->push.object.map.ptr; map 36 drivers/gpu/drm/nouveau/dispnv50/lut.c void __iomem *mem = lut->mem[buffer].object.map.ptr; map 15 drivers/gpu/drm/nouveau/include/nvif/driver.h void __iomem *(*map)(void *priv, u64 handle, u32 size); map 21 drivers/gpu/drm/nouveau/include/nvif/object.h } map; map 45 drivers/gpu/drm/nouveau/include/nvif/object.h if (likely(_object->map.ptr)) \ map 46 drivers/gpu/drm/nouveau/include/nvif/object.h _data = f((u8 __iomem *)_object->map.ptr + (c)); \ map 53 drivers/gpu/drm/nouveau/include/nvif/object.h if (likely(_object->map.ptr)) \ map 54 drivers/gpu/drm/nouveau/include/nvif/object.h f((d), (u8 __iomem *)_object->map.ptr + (c)); \ map 23 drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h void __iomem *map; map 31 drivers/gpu/drm/nouveau/include/nvkm/core/gpuobj.h int (*map)(struct nvkm_gpuobj *, u64 offset, struct nvkm_vmm *, map 38 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h int (*map)(struct nvkm_memory *, u64 offset, struct nvkm_vmm *, map 65 drivers/gpu/drm/nouveau/include/nvkm/core/memory.h (p)->func->map((p),(o),(vm),(va),(av),(ac)) map 34 drivers/gpu/drm/nouveau/include/nvkm/core/object.h int (*map)(struct nvkm_object *, void *argv, u32 argc, map 1820 drivers/gpu/drm/nouveau/nouveau_bios.c u8 map[16] = { }; map 1846 drivers/gpu/drm/nouveau/nouveau_bios.c if (!map[i2c]) map 1847 drivers/gpu/drm/nouveau/nouveau_bios.c map[i2c] = ++idx; map 1848 drivers/gpu/drm/nouveau/nouveau_bios.c dcbt->entry[i].connector = map[i2c] - 1; map 117 drivers/gpu/drm/nouveau/nouveau_nvif.c .map = nvkm_client_map, map 187 drivers/gpu/drm/nouveau/nvif/object.c struct nvif_ioctl_map_v0 map; map 195 drivers/gpu/drm/nouveau/nvif/object.c memcpy(args->map.data, argv, argc); map 198 drivers/gpu/drm/nouveau/nvif/object.c *handle = args->map.handle; map 199 drivers/gpu/drm/nouveau/nvif/object.c *length = args->map.length; map 200 drivers/gpu/drm/nouveau/nvif/object.c maptype = args->map.type; map 209 drivers/gpu/drm/nouveau/nvif/object.c if (object->map.ptr) { map 210 drivers/gpu/drm/nouveau/nvif/object.c if (object->map.size) { map 211 drivers/gpu/drm/nouveau/nvif/object.c client->driver->unmap(client, object->map.ptr, map 212 drivers/gpu/drm/nouveau/nvif/object.c object->map.size); map 213 drivers/gpu/drm/nouveau/nvif/object.c object->map.size = 0; map 215 drivers/gpu/drm/nouveau/nvif/object.c object->map.ptr = NULL; map 228 drivers/gpu/drm/nouveau/nvif/object.c object->map.ptr = client->driver->map(client, map 231 drivers/gpu/drm/nouveau/nvif/object.c if (ret = -ENOMEM, object->map.ptr) { map 232 drivers/gpu/drm/nouveau/nvif/object.c object->map.size = length; map 236 drivers/gpu/drm/nouveau/nvif/object.c object->map.ptr = (void *)(unsigned long)handle; map 275 drivers/gpu/drm/nouveau/nvif/object.c object->map.ptr = NULL; map 276 drivers/gpu/drm/nouveau/nvif/object.c object->map.size = 0; map 35 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c return ioread32_native(gpuobj->map + offset); map 41 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c iowrite32_native(data, gpuobj->map + offset); map 78 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_heap_map, map 86 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_heap_map, map 92 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c gpuobj->map = nvkm_kmap(gpuobj->memory); map 93 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c if (likely(gpuobj->map)) map 97 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c return gpuobj->map; map 103 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_heap_map, map 141 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_map, map 149 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_map, map 155 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c gpuobj->map = nvkm_kmap(gpuobj->parent); map 156 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c if (likely(gpuobj->map)) { map 157 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset; map 162 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c return gpuobj->map; map 168 drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c .map = nvkm_gpuobj_map, map 108 drivers/gpu/drm/nouveau/nvkm/core/object.c if (likely(object->func->map)) map 109 drivers/gpu/drm/nouveau/nvkm/core/object.c return object->func->map(object, argv, argc, type, addr, size); map 181 drivers/gpu/drm/nouveau/nvkm/core/oproxy.c .map = nvkm_oproxy_map, map 2896 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c void __iomem *map; map 2924 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c map = ioremap(mmio_base, 0x102000); map 2925 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c if (ret = -ENOMEM, map == NULL) map 2930 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c if (ioread32_native(map + 0x000004) != 0x00000000) { map 2932 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c if (ioread32_native(map + 0x000004) == 0x00000000) { map 2934 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c iowrite32_native(0x01000001, map + 0x000004); map 2935 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c ioread32_native(map); map 2939 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c boot0 = ioread32_native(map + 0x000000); map 2940 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c strap = ioread32_native(map + 0x101000); map 2941 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c iounmap(map); map 390 drivers/gpu/drm/nouveau/nvkm/engine/device/user.c .map = nvkm_udevice_map, map 333 drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.c .map = nv50_disp_chan_map, map 346 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c .map = nvkm_fifo_chan_map, map 911 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c u32 *map; map 917 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (!(map = kcalloc(fifo->pbdma_nr, sizeof(*map), GFP_KERNEL))) map 921 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04)); map 928 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (map[j] & (1 << runl)) { map 945 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c kfree(map); map 37 drivers/gpu/drm/nouveau/nvkm/engine/fifo/usergv100.c .map = gv100_fifo_user_map, map 37 drivers/gpu/drm/nouveau/nvkm/engine/fifo/usertu102.c .map = tu102_fifo_user_map, map 81 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c u16 map = nvbios_rd16(bios, mxm + 4); map 82 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c if (map) { map 83 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c ver = nvbios_rd08(bios, map); map 85 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c if (conn < nvbios_rd08(bios, map + 3)) { map 86 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c map += nvbios_rd08(bios, map + 1); map 87 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c map += conn; map 88 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c return nvbios_rd08(bios, map); map 118 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c u16 map = nvbios_rd16(bios, mxm + 6); map 119 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c if (map) { map 120 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c ver = nvbios_rd08(bios, map); map 122 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c if (port < nvbios_rd08(bios, map + 3)) { map 123 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c map += nvbios_rd08(bios, map + 1); map 124 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c map += port; map 125 drivers/gpu/drm/nouveau/nvkm/subdev/bios/mxm.c return nvbios_rd08(bios, map); map 146 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c struct pll_mapping *map; map 163 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c map = pll_map(bios); map 164 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c while (map && map->reg) { map 165 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (map->reg == reg && *ver >= 0x20) { map 167 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c *type = map->type; map 169 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (nvbios_rd32(bios, data) == map->reg) map 175 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (map->reg == reg) { map 176 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c *type = map->type; map 179 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c map++; map 188 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c struct pll_mapping *map; map 208 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c map = pll_map(bios); map 209 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c while (map && map->reg) { map 210 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (map->type == type && *ver >= 0x20) { map 212 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c *reg = map->reg; map 214 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (nvbios_rd32(bios, data) == map->reg) map 220 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c if (map->type == type) { map 221 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c *reg = map->reg; map 224 drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c map++; map 82 drivers/gpu/drm/nouveau/nvkm/subdev/fault/user.c .map = nvkm_ufault_map, map 42 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c struct nvkm_vmm_map map = { map 48 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c return nvkm_vmm_map(vmm, vma, argv, argc, &map); map 100 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c .map = nvkm_vram_map, map 36 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c void __iomem *map; map 39 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c if (!(map = nvkm_kmap(memory))) { map 43 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c memcpy_toio(map, iobj->suspend, size); map 56 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c void __iomem *map; map 63 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c if (!(map = nvkm_kmap(memory))) { map 67 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c memcpy_fromio(iobj->suspend, map, size); map 111 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c void __iomem *map = nvkm_kmap(memory); map 112 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c if (unlikely(!map)) { map 116 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c memset_io(map, 0x00, size); map 285 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c struct nvkm_vmm_map map = { map 291 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c return nvkm_vmm_map(vmm, vma, argv, argc, &map); map 361 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c .map = gk20a_instobj_map, map 373 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c .map = gk20a_instobj_map, map 51 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c void *map; map 104 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c iowrite32_native(data, nv50_instobj(memory)->map + offset); map 110 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c return ioread32_native(nv50_instobj(memory)->map + offset); map 151 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c emap = eobj->map; map 152 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c eobj->map = NULL; map 174 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c iobj->map = ioremap_wc(device->func->resource_addr(device, 3) + map 176 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (!iobj->map) { map 204 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (likely(iobj->lru.next) && iobj->map) { map 221 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c void __iomem *map = NULL; map 225 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c return iobj->map; map 233 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c return iobj->map; map 238 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (!iobj->map) map 240 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c map = iobj->map; map 248 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (map) map 256 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c return map; map 316 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c void *map = map; map 321 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c map = iobj->map; map 325 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c if (map) { map 327 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c iounmap(map); map 347 drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c .map = nv50_instobj_map, map 41 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c const struct nvkm_mc_map *map; map 44 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c for (map = mc->func->intr; !mask && map->stat; map++) { map 45 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c if (map->unit == devidx) map 46 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c mask = map->stat; map 82 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c const struct nvkm_mc_map *map; map 99 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c for (map = mc->func->intr; map->stat; map++) { map 100 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c if (intr & map->stat) { map 101 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c subdev = nvkm_device_subdev(device, map->unit); map 104 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c stat &= ~map->stat; map 121 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c const struct nvkm_mc_map *map; map 125 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c for (map = mc->func->reset; map && map->stat; map++) { map 126 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c if (!isauto || !map->noauto) { map 127 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c if (map->unit == devidx) { map 128 drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c pmc_enable = map->stat; map 74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c struct nvkm_vmm_map map = { map 79 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c return nvkm_vmm_map(vmm, vma, argv, argc, &map); map 106 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c .map = nvkm_mem_map_dma, map 114 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c struct nvkm_vmm_map map = { map 119 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c return nvkm_vmm_map(vmm, vma, argv, argc, &map); map 129 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/mem.c .map = nvkm_mem_map_sgl, map 68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c if (!umem->map) map 79 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c vunmap(umem->map); map 80 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c umem->map = NULL; map 95 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c if (umem->map) map 99 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c int ret = nvkm_mem_map_host(umem->memory, &umem->map); map 103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c *handle = (unsigned long)(void *)umem->map; map 138 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.c .map = nvkm_umem_map, map 20 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/umem.h void *map; map 504 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_pte_func MAP_PTES, struct nvkm_vmm_map *map, map 567 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c MAP_PTES(vmm, pt, ptei, ptes, map); map 692 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 addr, u64 size, struct nvkm_vmm_map *map, map 696 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c false, nvkm_vmm_ref_ptes, func, map, NULL); map 718 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 addr, u64 size, struct nvkm_vmm_map *map, map 722 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c NULL, func, map, NULL); map 1152 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 addr, u64 size, u8 page, bool map) map 1158 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (prev->memory || prev->mapped != map) map 1164 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c next->memory || next->mapped != map) map 1239 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c bool map = !!(pfn[pi] & NVKM_VMM_PFN_V); map 1249 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map != !!(pfn[pi + pn] & NVKM_VMM_PFN_V)) map 1274 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map != mapped) { map 1277 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vmm->func->page, map); map 1283 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if ((tmp->mapped = map)) map 1291 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map) { map 1377 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c void *argv, u32 argc, struct nvkm_vmm_map *map) map 1379 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c switch (nvkm_memory_target(map->memory)) { map 1381 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (!(map->page->type & NVKM_VMM_PAGE_VRAM)) { map 1382 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c VMM_DEBUG(vmm, "%d !VRAM", map->page->shift); map 1388 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (!(map->page->type & NVKM_VMM_PAGE_HOST)) { map 1389 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c VMM_DEBUG(vmm, "%d !HOST", map->page->shift); map 1398 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (!IS_ALIGNED( vma->addr, 1ULL << map->page->shift) || map 1399 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c !IS_ALIGNED((u64)vma->size, 1ULL << map->page->shift) || map 1400 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c !IS_ALIGNED( map->offset, 1ULL << map->page->shift) || map 1401 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_memory_page(map->memory) < map->page->shift) { map 1403 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vma->addr, (u64)vma->size, map->offset, map->page->shift, map 1404 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_memory_page(map->memory)); map 1408 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c return vmm->func->valid(vmm, argv, argc, map); map 1413 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c void *argv, u32 argc, struct nvkm_vmm_map *map) map 1415 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c for (map->page = vmm->func->page; map->page->shift; map->page++) { map 1416 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c VMM_DEBUG(vmm, "trying %d", map->page->shift); map 1417 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (!nvkm_vmm_map_valid(vmm, vma, argv, argc, map)) map 1425 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c void *argv, u32 argc, struct nvkm_vmm_map *map) map 1431 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (unlikely(nvkm_memory_size(map->memory) < map->offset + vma->size)) { map 1433 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_memory_size(map->memory), map 1434 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->offset, (u64)vma->size); map 1444 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c ret = nvkm_vmm_map_choose(vmm, vma, argv, argc, map); map 1448 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_map_choose(vmm, vma, argv, argc, map); map 1454 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->page = &vmm->func->page[vma->refd]; map 1456 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->page = &vmm->func->page[vma->page]; map 1458 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c ret = nvkm_vmm_map_valid(vmm, vma, argv, argc, map); map 1466 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->off = map->offset; map 1467 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map->mem) { map 1468 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c for (; map->off; map->mem = map->mem->next) { map 1469 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 size = (u64)map->mem->length << NVKM_RAM_MM_SHIFT; map 1470 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (size > map->off) map 1472 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->off -= size; map 1474 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c func = map->page->desc->func->mem; map 1476 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map->sgl) { map 1477 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c for (; map->off; map->sgl = sg_next(map->sgl)) { map 1478 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c u64 size = sg_dma_len(map->sgl); map 1479 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (size > map->off) map 1481 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->off -= size; map 1483 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c func = map->page->desc->func->sgl; map 1485 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->dma += map->offset >> PAGE_SHIFT; map 1486 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c map->off = map->offset & PAGE_MASK; map 1487 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c func = map->page->desc->func->dma; map 1492 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c ret = nvkm_vmm_ptes_get_map(vmm, map->page, vma->addr, vma->size, map, func); map 1496 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vma->refd = map->page - vmm->func->page; map 1498 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c nvkm_vmm_ptes_map(vmm, map->page, vma->addr, vma->size, map, func); map 1503 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vma->memory = nvkm_memory_ref(map->memory); map 1505 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c vma->tags = map->tags; map 1511 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c struct nvkm_vmm_map *map) map 1515 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c ret = nvkm_vmm_map_locked(vmm, vma, argv, argc, map); map 1551 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c const bool map = next->mapped; map 1558 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c (next->mapped == map) && map 1563 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c if (map) { map 33 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u64 base = (addr >> 8) | map->type; map 38 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c if (map->ctag && !(map->next & (1ULL << 44))) { map 40 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c data = base | ((map->ctag >> 1) << 44); map 41 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c if (!(map->ctag++ & 1)) map 45 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c base += map->next; map 48 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type += ptes * map->ctag; map 52 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c data += map->next; map 59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 61 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); map 66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c if (map->page->shift == PAGE_SHIFT) { map 72 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c const u64 data = (*map->dma++ >> 8) | map->type; map 74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type += map->ctag; map 80 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); map 85 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gf100_vmm_pgt_pte); map 239 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c struct nvkm_vmm_map *map) map 241 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c const enum nvkm_memory_target target = nvkm_memory_target(map->memory); map 242 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c const struct nvkm_vmm_page *page = map->page; map 249 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c struct nvkm_memory *memory = map->memory; map 254 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->next = (1 << page->shift) >> 8; map 255 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type = map->ctag = 0; map 293 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c &map->tags); map 299 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c if (map->tags->mn) { map 300 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c u64 tags = map->tags->mn->offset + (map->offset >> 17); map 302 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= tags << 44; map 303 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->ctag |= 1ULL << 44; map 304 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->next |= 1ULL << 44; map 306 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->ctag |= tags << 1 | 1; map 313 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= BIT(0); map 314 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= (u64)priv << 1; map 315 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= (u64) ro << 2; map 316 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= (u64) vol << 32; map 317 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= (u64)aper << 33; map 318 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgf100.c map->type |= (u64)kind << 36; map 76 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 84 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c if (!(*map->pfn & NVKM_VMM_PFN_W)) map 87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) { map 88 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT; map 98 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4; map 103 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->pfn++; map 110 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 112 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u64 data = (addr >> 4) | map->type; map 114 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type += ptes * map->ctag; map 118 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c data += map->next; map 124 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 126 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); map 131 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 133 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c if (map->page->shift == PAGE_SHIFT) { map 137 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c const u64 data = (*map->dma++ >> 4) | map->type; map 139 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type += map->ctag; map 145 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); map 150 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 152 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pgt_pte); map 193 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 195 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u64 data = (addr >> 4) | map->type; map 197 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type += ptes * map->ctag; map 201 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c data += map->next; map 207 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 209 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, gp100_vmm_pd0_pte); map 313 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c struct nvkm_vmm_map *map) map 315 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c const enum nvkm_memory_target target = nvkm_memory_target(map->memory); map 316 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c const struct nvkm_vmm_page *page = map->page; map 322 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c struct nvkm_memory *memory = map->memory; map 327 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->next = (1ULL << page->shift) >> 4; map 328 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type = 0; map 365 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c &map->tags); map 371 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c if (map->tags->mn) { map 372 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c tags = map->tags->mn->offset + (map->offset >> 16); map 373 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->ctag |= ((1ULL << page->shift) >> 16) << 36; map 374 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= tags << 36; map 375 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->next |= map->ctag; map 381 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= BIT(0); map 382 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= (u64)aper << 1; map 383 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= (u64) vol << 3; map 384 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= (u64)priv << 5; map 385 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= (u64) ro << 6; map 386 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c map->type |= (u64)kind << 56; map 29 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 40 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 42 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); map 47 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 52 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c VMM_WO032(pt, vmm, 8 + (ptei++ * 4), *map->dma++ | 0x00000003); map 55 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv04_vmm_pgt_pte); map 81 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv04.c struct nvkm_vmm_map *map) map 28 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 39 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 41 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); map 46 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 51 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c const u32 data = (*map->dma++ >> 7) | 0x00000001; map 56 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv41.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv41_vmm_pgt_pte); map 74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 106 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 108 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte); map 113 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 119 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, pten); map 122 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c map->dma += pten; map 128 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c tmp[i] = *map->dma++ >> 12; map 137 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c nv44_vmm_pgt_fill(vmm, pt, map->dma, ptei, ptes); map 138 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c map->dma += ptes; map 142 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv44.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv44_vmm_pgt_pte); map 33 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map, u64 addr) map 35 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u64 next = addr + map->type, data; map 39 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type += ptes * map->ctag; map 49 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c next += pten * map->next; map 59 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 61 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c VMM_MAP_ITER_SGL(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); map 66 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 68 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c if (map->page->shift == PAGE_SHIFT) { map 72 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c const u64 data = *map->dma++ + map->type; map 74 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type += map->ctag; map 80 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c VMM_MAP_ITER_DMA(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); map 85 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 ptei, u32 ptes, struct nvkm_vmm_map *map) map 87 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c VMM_MAP_ITER_MEM(vmm, pt, ptei, ptes, map, nv50_vmm_pgt_pte); map 228 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c struct nvkm_vmm_map *map) map 230 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c const struct nvkm_vmm_page *page = map->page; map 237 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c struct nvkm_memory *memory = map->memory; map 242 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type = map->ctag = 0; map 243 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->next = 1 << page->shift; map 264 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= ram->stolen; map 287 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c if (map->mem && map->mem->type != kindm[kind]) { map 289 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c kindm[kind], map->mem->type); map 301 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c &map->tags); map 307 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c if (map->tags->mn) { map 308 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c u32 tags = map->tags->mn->offset + (map->offset >> 16); map 309 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->ctag |= (u64)comp << 49; map 310 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)comp << 47; map 311 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)tags << 49; map 312 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->next |= map->ctag; map 316 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= BIT(0); /* Valid. */ map 317 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)ro << 3; map 318 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)aper << 4; map 319 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)priv << 6; map 320 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmnv50.c map->type |= (u64)kind << 40; map 1010 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a, map 1016 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c if (map[y][x] == ' ' || ovw) map 1017 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[y][x] = c; map 1020 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p, map 1023 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[p->y / ydiv][p->x / xdiv] = c; map 1026 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p) map 1028 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c return map[p->y / ydiv][p->x / xdiv]; map 1036 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1) map 1038 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c char *p = map[yd] + (x0 / xdiv); map 1047 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void map_1d_info(char **map, int xdiv, int ydiv, char *nice, map 1052 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0, map 1056 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c text_map(map, xdiv, nice, a->p0.y / ydiv, map 1059 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c text_map(map, xdiv, nice, a->p1.y / ydiv, map 1062 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x); map 1066 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c static void map_2d_info(char **map, int xdiv, int ydiv, char *nice, map 1071 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, map 1078 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c char **map = NULL, *global_map; map 1100 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map = kmalloc_array(h_adj, sizeof(*map), GFP_KERNEL); map 1103 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c if (!map || !global_map) map 1107 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c memset(map, 0, h_adj * sizeof(*map)); map 1111 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[i] = global_map + i * (w_adj + 1); map 1112 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map[i][w_adj] = 0; map 1120 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c fill_map(map, xdiv, ydiv, &block->area, map 1126 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map_2d_info(map, xdiv, ydiv, nice, map 1129 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c bool start = read_map_pt(map, xdiv, map 1131 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c bool end = read_map_pt(map, xdiv, ydiv, map 1135 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c fill_map(map, xdiv, ydiv, &a, map 1137 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c fill_map_pt(map, xdiv, ydiv, map 1140 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c fill_map_pt(map, xdiv, ydiv, map 1143 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c map_1d_info(map, xdiv, ydiv, nice, map 1154 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c seq_printf(s, "%03d:%s\n", i, map[i]); map 1160 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]); map 1167 drivers/gpu/drm/omapdrm/omap_dmm_tiler.c kfree(map); map 126 drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c .map = omap_gem_dmabuf_kmap, map 37 drivers/gpu/drm/omapdrm/tcm-sita.c unsigned long *map, u16 stride) map 42 drivers/gpu/drm/omapdrm/tcm-sita.c bitmap_clear(map, pos, w); map 51 drivers/gpu/drm/omapdrm/tcm-sita.c static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map, map 61 drivers/gpu/drm/omapdrm/tcm-sita.c bit = find_next_bit(map, num_bits, *pos); map 65 drivers/gpu/drm/omapdrm/tcm-sita.c bitmap_set(map, *pos, w); map 89 drivers/gpu/drm/omapdrm/tcm-sita.c unsigned long *map, size_t num_bits, size_t slot_stride) map 105 drivers/gpu/drm/omapdrm/tcm-sita.c *pos = bitmap_find_next_zero_area(map, num_bits, curr_bit, w, map 136 drivers/gpu/drm/omapdrm/tcm-sita.c if (bitmap_intersects(&map[index], mask, map 156 drivers/gpu/drm/omapdrm/tcm-sita.c bitmap_set(map, index, w); map 118 drivers/gpu/drm/panel/panel-arm-versatile.c struct regmap *map; map 295 drivers/gpu/drm/panel/panel-arm-versatile.c struct regmap *map; map 305 drivers/gpu/drm/panel/panel-arm-versatile.c map = syscon_node_to_regmap(parent->of_node); map 306 drivers/gpu/drm/panel/panel-arm-versatile.c if (IS_ERR(map)) { map 308 drivers/gpu/drm/panel/panel-arm-versatile.c return PTR_ERR(map); map 315 drivers/gpu/drm/panel/panel-arm-versatile.c ret = regmap_read(map, SYS_CLCD, &val); map 341 drivers/gpu/drm/panel/panel-arm-versatile.c vpanel->map = map; map 60 drivers/gpu/drm/panel/panel-novatek-nt39016.c struct regmap *map; map 145 drivers/gpu/drm/panel/panel-novatek-nt39016.c err = regmap_multi_reg_write(panel->map, nt39016_panel_regs, map 175 drivers/gpu/drm/panel/panel-novatek-nt39016.c ret = regmap_write(panel->map, NT39016_REG_SYSTEM, map 199 drivers/gpu/drm/panel/panel-novatek-nt39016.c err = regmap_write(panel->map, NT39016_REG_SYSTEM, map 281 drivers/gpu/drm/panel/panel-novatek-nt39016.c panel->map = devm_regmap_init_spi(spi, &nt39016_regmap_config); map 282 drivers/gpu/drm/panel/panel-novatek-nt39016.c if (IS_ERR(panel->map)) { map 284 drivers/gpu/drm/panel/panel-novatek-nt39016.c return PTR_ERR(panel->map); map 265 drivers/gpu/drm/panfrost/panfrost_mmu.c ops->map(ops, iova, paddr, pgsize, prot); map 316 drivers/gpu/drm/pl111/pl111_versatile.c struct regmap *map; map 346 drivers/gpu/drm/pl111/pl111_versatile.c map = dev_get_drvdata(&pdev->dev); map 347 drivers/gpu/drm/pl111/pl111_versatile.c if (!map) { map 354 drivers/gpu/drm/pl111/pl111_versatile.c map = syscon_node_to_regmap(np); map 358 drivers/gpu/drm/pl111/pl111_versatile.c if (IS_ERR(map)) { map 360 drivers/gpu/drm/pl111/pl111_versatile.c return PTR_ERR(map); map 365 drivers/gpu/drm/pl111/pl111_versatile.c versatile_syscon_map = map; map 371 drivers/gpu/drm/pl111/pl111_versatile.c versatile_syscon_map = map; map 390 drivers/gpu/drm/pl111/pl111_versatile.c versatile_syscon_map = map; map 399 drivers/gpu/drm/pl111/pl111_versatile.c ret = pl111_vexpress_clcd_init(dev, priv, map); map 27 drivers/gpu/drm/pl111/pl111_vexpress.c struct regmap *map) map 89 drivers/gpu/drm/pl111/pl111_vexpress.c ret = regmap_write(map, 0, val); map 106 drivers/gpu/drm/pl111/pl111_vexpress.c struct regmap *map; map 108 drivers/gpu/drm/pl111/pl111_vexpress.c map = devm_regmap_init_vexpress_config(&pdev->dev); map 109 drivers/gpu/drm/pl111/pl111_vexpress.c if (IS_ERR(map)) map 110 drivers/gpu/drm/pl111/pl111_vexpress.c return PTR_ERR(map); map 111 drivers/gpu/drm/pl111/pl111_vexpress.c dev_set_drvdata(dev, map); map 11 drivers/gpu/drm/pl111/pl111_vexpress.h struct regmap *map); map 19 drivers/gpu/drm/pl111/pl111_vexpress.h struct regmap *map) map 154 drivers/gpu/drm/qxl/qxl_object.c struct io_mapping *map; map 157 drivers/gpu/drm/qxl/qxl_object.c map = qdev->vram_mapping; map 159 drivers/gpu/drm/qxl/qxl_object.c map = qdev->surface_mapping; map 167 drivers/gpu/drm/qxl/qxl_object.c return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); map 97 drivers/gpu/drm/qxl/qxl_object.h void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); map 228 drivers/gpu/drm/sis/sis_mm.c drm_local_map_t *map; map 231 drivers/gpu/drm/sis/sis_mm.c map = entry->map; map 232 drivers/gpu/drm/sis/sis_mm.c if (!map) map 234 drivers/gpu/drm/sis/sis_mm.c if (map->type == _DRM_REGISTERS) map 235 drivers/gpu/drm/sis/sis_mm.c return map; map 153 drivers/gpu/drm/sun4i/sun8i_csc.c static void sun8i_csc_set_coefficients(struct regmap *map, u32 base, map 174 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_bulk_write(map, base_reg, table, 12); map 177 drivers/gpu/drm/sun4i/sun8i_csc.c static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer, map 198 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_bulk_write(map, base_reg, table, 12); map 201 drivers/gpu/drm/sun4i/sun8i_csc.c static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable) map 210 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_update_bits(map, SUN8I_CSC_CTRL(base), SUN8I_CSC_CTRL_EN, val); map 213 drivers/gpu/drm/sun4i/sun8i_csc.c static void sun8i_de3_ccsc_enable(struct regmap *map, int layer, bool enable) map 224 drivers/gpu/drm/sun4i/sun8i_csc.c regmap_update_bits(map, SUN50I_MIXER_BLEND_CSC_CTL(DE3_BLD_BASE), map 870 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c static void sun8i_vi_scaler_set_coeff(struct regmap *map, u32 base, map 890 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF0(base, i), map 892 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_YHCOEFF1(base, i), map 894 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF0(base, i), map 896 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_CHCOEFF1(base, i), map 903 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_YVCOEFF(base, i), map 905 drivers/gpu/drm/sun4i/sun8i_vi_scaler.c regmap_write(map, SUN8I_SCALER_VSU_CVCOEFF(base, i), map 625 drivers/gpu/drm/tegra/gem.c .map = tegra_gem_prime_kmap, map 558 drivers/gpu/drm/ttm/ttm_bo_util.c struct ttm_bo_kmap_obj *map) map 563 drivers/gpu/drm/ttm/ttm_bo_util.c map->bo_kmap_type = ttm_bo_map_premapped; map 564 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); map 566 drivers/gpu/drm/ttm/ttm_bo_util.c map->bo_kmap_type = ttm_bo_map_iomap; map 568 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, map 571 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, map 574 drivers/gpu/drm/ttm/ttm_bo_util.c return (!map->virtual) ? -ENOMEM : 0; map 580 drivers/gpu/drm/ttm/ttm_bo_util.c struct ttm_bo_kmap_obj *map) map 603 drivers/gpu/drm/ttm/ttm_bo_util.c map->bo_kmap_type = ttm_bo_map_kmap; map 604 drivers/gpu/drm/ttm/ttm_bo_util.c map->page = ttm->pages[start_page]; map 605 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = kmap(map->page); map 612 drivers/gpu/drm/ttm/ttm_bo_util.c map->bo_kmap_type = ttm_bo_map_vmap; map 613 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = vmap(ttm->pages + start_page, num_pages, map 616 drivers/gpu/drm/ttm/ttm_bo_util.c return (!map->virtual) ? -ENOMEM : 0; map 621 drivers/gpu/drm/ttm/ttm_bo_util.c struct ttm_bo_kmap_obj *map) map 628 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = NULL; map 629 drivers/gpu/drm/ttm/ttm_bo_util.c map->bo = bo; map 641 drivers/gpu/drm/ttm/ttm_bo_util.c return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); map 645 drivers/gpu/drm/ttm/ttm_bo_util.c return ttm_bo_ioremap(bo, offset, size, map); map 650 drivers/gpu/drm/ttm/ttm_bo_util.c void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) map 652 drivers/gpu/drm/ttm/ttm_bo_util.c struct ttm_buffer_object *bo = map->bo; map 656 drivers/gpu/drm/ttm/ttm_bo_util.c if (!map->virtual) map 658 drivers/gpu/drm/ttm/ttm_bo_util.c switch (map->bo_kmap_type) { map 660 drivers/gpu/drm/ttm/ttm_bo_util.c iounmap(map->virtual); map 663 drivers/gpu/drm/ttm/ttm_bo_util.c vunmap(map->virtual); map 666 drivers/gpu/drm/ttm/ttm_bo_util.c kunmap(map->page); map 674 drivers/gpu/drm/ttm/ttm_bo_util.c ttm_mem_io_free(map->bo->bdev, &map->bo->mem); map 676 drivers/gpu/drm/ttm/ttm_bo_util.c map->virtual = NULL; map 677 drivers/gpu/drm/ttm/ttm_bo_util.c map->page = NULL; map 333 drivers/gpu/drm/ttm/ttm_bo_vm.c struct ttm_bo_kmap_obj map; map 337 drivers/gpu/drm/ttm/ttm_bo_vm.c ret = ttm_bo_kmap(bo, page, 1, &map); map 341 drivers/gpu/drm/ttm/ttm_bo_vm.c ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset; map 347 drivers/gpu/drm/ttm/ttm_bo_vm.c ttm_bo_kunmap(&map); map 169 drivers/gpu/drm/udl/udl_dmabuf.c .map = udl_dmabuf_kmap, map 55 drivers/gpu/drm/vc4/vc4_drv.c void __iomem *map; map 58 drivers/gpu/drm/vc4/vc4_drv.c map = devm_ioremap_resource(&dev->dev, res); map 59 drivers/gpu/drm/vc4/vc4_drv.c if (IS_ERR(map)) { map 60 drivers/gpu/drm/vc4/vc4_drv.c DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map)); map 61 drivers/gpu/drm/vc4/vc4_drv.c return map; map 64 drivers/gpu/drm/vc4/vc4_drv.c return map; map 171 drivers/gpu/drm/via/via_dma.c drm_legacy_ioremapfree(&dev_priv->ring.map, dev); map 204 drivers/gpu/drm/via/via_dma.c dev_priv->ring.map.offset = dev->agp->base + init->offset; map 205 drivers/gpu/drm/via/via_dma.c dev_priv->ring.map.size = init->size; map 206 drivers/gpu/drm/via/via_dma.c dev_priv->ring.map.type = 0; map 207 drivers/gpu/drm/via/via_dma.c dev_priv->ring.map.flags = 0; map 208 drivers/gpu/drm/via/via_dma.c dev_priv->ring.map.mtrr = 0; map 210 drivers/gpu/drm/via/via_dma.c drm_legacy_ioremap(&dev_priv->ring.map, dev); map 212 drivers/gpu/drm/via/via_dma.c if (dev_priv->ring.map.handle == NULL) { map 219 drivers/gpu/drm/via/via_dma.c dev_priv->ring.virtual_start = dev_priv->ring.map.handle; map 57 drivers/gpu/drm/via/via_drv.h drm_local_map_t map; map 259 drivers/gpu/drm/via/via_verifier.c drm_local_map_t *map = seq->map_cache; map 261 drivers/gpu/drm/via/via_verifier.c if (map && map->offset <= offset map 262 drivers/gpu/drm/via/via_verifier.c && (offset + size) <= (map->offset + map->size)) { map 263 drivers/gpu/drm/via/via_verifier.c return map; map 267 drivers/gpu/drm/via/via_verifier.c map = r_list->map; map 268 drivers/gpu/drm/via/via_verifier.c if (!map) map 270 drivers/gpu/drm/via/via_verifier.c if (map->offset <= offset map 271 drivers/gpu/drm/via/via_verifier.c && (offset + size) <= (map->offset + map->size) map 272 drivers/gpu/drm/via/via_verifier.c && !(map->flags & _DRM_RESTRICTED) map 273 drivers/gpu/drm/via/via_verifier.c && (map->type == _DRM_AGP)) { map 274 drivers/gpu/drm/via/via_verifier.c seq->map_cache = map; map 275 drivers/gpu/drm/via/via_verifier.c return map; map 394 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used); map 398 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map); map 402 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c return ttm_kmap_obj_virtual(&vbo->map, ¬_used); map 416 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c if (vbo->map.bo == NULL) map 419 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c ttm_bo_kunmap(&vbo->map); map 120 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c u8 *map; map 887 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c header->cmd = man->map + offset; map 1233 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, map 1235 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c if (man->map) { map 1259 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); map 1390 drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c man->size, man->map, man->handle); map 352 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c struct ttm_bo_kmap_obj map; map 375 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); map 377 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c result = ttm_kmap_obj_virtual(&map, &dummy); map 381 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c ttm_bo_kunmap(&map); map 116 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h struct ttm_bo_kmap_obj map; map 96 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c struct ttm_bo_kmap_obj map; map 112 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map); map 116 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c virtual = ttm_kmap_obj_virtual(&map, &dummy); map 120 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ttm_bo_kunmap(&map); map 149 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c struct ttm_bo_kmap_obj map; map 207 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); map 211 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c virtual = ttm_kmap_obj_virtual(&map, &dummy); map 225 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ttm_bo_kunmap(&map); map 99 drivers/gpu/drm/vmwgfx/vmwgfx_prime.c .map = vmw_prime_dmabuf_kmap, map 968 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c struct ttm_bo_kmap_obj map; map 992 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c &map); map 998 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); map 1001 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c ttm_bo_kunmap(&map); map 120 drivers/gpu/ipu-v3/ipu-dc.c int map, int wave, int glue, int sync, int stop) map 132 drivers/gpu/ipu-v3/ipu-dc.c reg1 = sync | glue << 4 | ++wave << 11 | ++map << 15 | ((operand << 20) & 0xfff00000); map 166 drivers/gpu/ipu-v3/ipu-dc.c int map; map 170 drivers/gpu/ipu-v3/ipu-dc.c map = ipu_bus_format_to_map(bus_format); map 192 drivers/gpu/ipu-v3/ipu-dc.c dc_write_tmpl(dc, addr, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); map 199 drivers/gpu/ipu-v3/ipu-dc.c dc_write_tmpl(dc, addr + 2, WROD(0), 0, map, SYNC_WAVE, 8, sync, 1); map 200 drivers/gpu/ipu-v3/ipu-dc.c dc_write_tmpl(dc, addr + 3, WROD(0), 0, map, SYNC_WAVE, 4, sync, 0); map 201 drivers/gpu/ipu-v3/ipu-dc.c dc_write_tmpl(dc, addr + 4, WRG, 0, map, NULL_WAVE, 0, 0, 1); map 202 drivers/gpu/ipu-v3/ipu-dc.c dc_write_tmpl(dc, addr + 1, WROD(0), 0, map, SYNC_WAVE, 0, sync, 1); map 278 drivers/gpu/ipu-v3/ipu-dc.c static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map, map 281 drivers/gpu/ipu-v3/ipu-dc.c int ptr = map * 3 + byte_num; map 289 drivers/gpu/ipu-v3/ipu-dc.c reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map)); map 290 drivers/gpu/ipu-v3/ipu-dc.c reg &= ~(0x1f << ((16 * (map & 0x1)) + (5 * byte_num))); map 291 drivers/gpu/ipu-v3/ipu-dc.c reg |= ptr << ((16 * (map & 0x1)) + (5 * byte_num)); map 292 drivers/gpu/ipu-v3/ipu-dc.c writel(reg, priv->dc_reg + DC_MAP_CONF_PTR(map)); map 295 drivers/gpu/ipu-v3/ipu-dc.c static void ipu_dc_map_clear(struct ipu_dc_priv *priv, int map) map 297 drivers/gpu/ipu-v3/ipu-dc.c u32 reg = readl(priv->dc_reg + DC_MAP_CONF_PTR(map)); map 299 drivers/gpu/ipu-v3/ipu-dc.c writel(reg & ~(0xffff << (16 * (map & 0x1))), map 300 drivers/gpu/ipu-v3/ipu-dc.c priv->dc_reg + DC_MAP_CONF_PTR(map)); map 620 drivers/hid/hid-rmi.c .map = rmi_irq_map, map 502 drivers/hwmon/asc7621.c static const u8 map[] = { map 513 drivers/hwmon/asc7621.c return sprintf(buf, "%u\n", map[clamp_val(regval, 0, 15)]); map 523 drivers/hwmon/asc7621.c static const u16 map[] = { map 536 drivers/hwmon/asc7621.c reqval = map[reqval]; map 1450 drivers/hwmon/it87.c int map; map 1452 drivers/hwmon/it87.c map = data->pwm_temp_map[nr]; map 1453 drivers/hwmon/it87.c if (map >= 3) map 1454 drivers/hwmon/it87.c map = 0; /* Should never happen */ map 1456 drivers/hwmon/it87.c map += 3; map 1458 drivers/hwmon/it87.c return sprintf(buf, "%d\n", (int)BIT(map)); map 200 drivers/hwmon/lm85.c static int FREQ_TO_REG(const int *map, map 203 drivers/hwmon/lm85.c return find_closest(freq, map, map_size); map 206 drivers/hwmon/lm85.c static int FREQ_FROM_REG(const int *map, unsigned int map_size, u8 reg) map 208 drivers/hwmon/lm85.c return map[reg % map_size]; map 1269 drivers/i2c/i2c-core-base.c .map = i2c_host_notify_irq_map, map 606 drivers/iio/accel/kxcjk-1013.c const struct kx_odr_map *map, size_t map_size, int val, int val2) map 611 drivers/iio/accel/kxcjk-1013.c if (map[i].val == val && map[i].val2 == val2) map 612 drivers/iio/accel/kxcjk-1013.c return &map[i]; map 618 drivers/iio/accel/kxcjk-1013.c static int kxcjk1013_convert_odr_value(const struct kx_odr_map *map, map 625 drivers/iio/accel/kxcjk-1013.c if (map[i].odr_bits == odr_bits) { map 626 drivers/iio/accel/kxcjk-1013.c *val = map[i].val; map 627 drivers/iio/accel/kxcjk-1013.c *val2 = map[i].val2; map 74 drivers/iio/accel/kxsd9.c struct regmap *map; map 110 drivers/iio/accel/kxsd9.c ret = regmap_update_bits(st->map, map 173 drivers/iio/accel/kxsd9.c ret = regmap_bulk_read(st->map, chan->address, &raw_val, map 189 drivers/iio/accel/kxsd9.c ret = regmap_read(st->map, map 216 drivers/iio/accel/kxsd9.c ret = regmap_bulk_read(st->map, map 331 drivers/iio/accel/kxsd9.c ret = regmap_write(st->map, map 341 drivers/iio/accel/kxsd9.c ret = regmap_write(st->map, map 369 drivers/iio/accel/kxsd9.c ret = regmap_update_bits(st->map, map 396 drivers/iio/accel/kxsd9.c struct regmap *map, map 409 drivers/iio/accel/kxsd9.c st->map = map; map 9 drivers/iio/accel/kxsd9.h struct regmap *map, map 29 drivers/iio/adc/lp8788_adc.c struct iio_map *map; map 170 drivers/iio/adc/lp8788_adc.c struct iio_map *map; map 173 drivers/iio/adc/lp8788_adc.c map = (!pdata || !pdata->adc_pdata) ? map 176 drivers/iio/adc/lp8788_adc.c ret = iio_map_array_register(indio_dev, map); map 182 drivers/iio/adc/lp8788_adc.c adc->map = map; map 403 drivers/iio/adc/qcom-pm8xxx-xoadc.c struct regmap *map; map 455 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_AMUX_CNTRL, val); map 492 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_update_bits(adc->map, map 499 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM, map 505 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_DIG_PARAM, map 512 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_ANA_PARAM, map 518 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, map 522 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, map 530 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, map 545 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_read(adc->map, ADC_ARB_USRP_DATA0, &val); map 549 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_read(adc->map, ADC_ARB_USRP_DATA1, &val); map 556 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0); map 559 drivers/iio/adc/qcom-pm8xxx-xoadc.c ret = regmap_write(adc->map, ADC_ARB_USRP_CNTRL, 0); map 887 drivers/iio/adc/qcom-pm8xxx-xoadc.c struct regmap *map; map 910 drivers/iio/adc/qcom-pm8xxx-xoadc.c map = dev_get_regmap(dev->parent, NULL); map 911 drivers/iio/adc/qcom-pm8xxx-xoadc.c if (!map) { map 915 drivers/iio/adc/qcom-pm8xxx-xoadc.c adc->map = map; map 366 drivers/iio/adc/stm32-adc-core.c .map = stm32_adc_domain_map, map 198 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, map 204 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, map 214 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_write(mpu3050->map, MPU3050_X_OFFS_USR_H, raw_val, map 220 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_DLPF_FS_SYNC, map 228 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_SMPLRT_DIV, mpu3050->divisor); map 323 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, map 336 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, map 495 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, map 508 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, map 549 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, map 576 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, map 623 drivers/iio/gyro/mpu3050-core.c ret = regmap_bulk_read(mpu3050->map, MPU3050_TEMP_H, &hw_values, map 768 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, map 774 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, map 780 drivers/iio/gyro/mpu3050-core.c return regmap_bulk_read(mpu3050->map, map 792 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, map 800 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, map 808 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, map 863 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, map 885 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, map 921 drivers/iio/gyro/mpu3050-core.c ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val); map 950 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, map 957 drivers/iio/gyro/mpu3050-core.c ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val); map 962 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0); map 966 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_USR_CTRL, map 982 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, 0); map 987 drivers/iio/gyro/mpu3050-core.c ret = regmap_update_bits(mpu3050->map, MPU3050_USR_CTRL, map 998 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_FIFO_EN, map 1013 drivers/iio/gyro/mpu3050-core.c ret = regmap_read(mpu3050->map, MPU3050_INT_STATUS, &val); map 1027 drivers/iio/gyro/mpu3050-core.c ret = regmap_write(mpu3050->map, MPU3050_INT_CFG, val); map 1130 drivers/iio/gyro/mpu3050-core.c struct regmap *map, map 1145 drivers/iio/gyro/mpu3050-core.c mpu3050->map = map; map 1172 drivers/iio/gyro/mpu3050-core.c ret = regmap_read(map, MPU3050_CHIP_ID_REG, &val); map 1187 drivers/iio/gyro/mpu3050-core.c ret = regmap_read(map, MPU3050_PRODUCT_ID_REG, &val); map 71 drivers/iio/gyro/mpu3050.h struct regmap *map; map 91 drivers/iio/gyro/mpu3050.h struct regmap *map, map 196 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_read(st->map, st->reg->pwr_mgmt_1, &mgmt_1); map 209 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->pwr_mgmt_1, mgmt_1); map 214 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_read(st->map, st->reg->pwr_mgmt_2, &d); map 221 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->pwr_mgmt_2, d); map 231 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, map 247 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->pwr_mgmt_1, 0); map 256 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->pwr_mgmt_1, map 264 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c dev_dbg(regmap_get_device(st->map), "set power %d, count=%u\n", map 282 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->lpf, val); map 295 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->accel_lpf, val); map 321 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->gyro_config, d); map 330 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->sample_rate_div, d); map 335 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->accl_config, d); map 339 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask); map 367 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_bulk_write(st->map, reg + ind, (u8 *)&d, 2); map 381 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_bulk_read(st->map, reg + ind, (u8 *)&d, 2); map 528 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->gyro_config, d); map 566 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->accl_config, d); map 714 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->sample_rate_div, d); map 982 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, ®val); map 989 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c dev_warn(regmap_get_device(st->map), map 998 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c dev_err(regmap_get_device(st->map), map 1006 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c result = regmap_write(st->map, st->reg->pwr_mgmt_1, map 1044 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c dev_err(regmap_get_device(st->map), map 1060 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c dev_err(regmap_get_device(st->map), map 1097 drivers/iio/imu/inv_mpu6050/inv_mpu_core.c st->map = regmap; map 32 drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c ret = regmap_write(st->map, st->reg->int_pin_cfg, map 49 drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c regmap_write(st->map, st->reg->int_pin_cfg, st->irq_mask); map 148 drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h struct regmap *map; map 103 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->int_enable, 0); map 105 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c dev_err(regmap_get_device(st->map), "int_enable failed %d\n", map 110 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->fifo_en, 0); map 114 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->user_ctrl, map 121 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->user_ctrl, d); map 128 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->int_enable, map 135 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->user_ctrl, d); map 144 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->fifo_en, d); map 151 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c dev_err(regmap_get_device(st->map), "reset fifo failed %d\n", result); map 152 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_write(st->map, st->reg->int_enable, map 177 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_read(st->map, st->reg->int_status, &int_status); map 179 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c dev_err(regmap_get_device(st->map), map 184 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c dev_warn(regmap_get_device(st->map), map 206 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_bulk_read(st->map, st->reg->fifo_count_h, data, map 219 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c dev_warn(regmap_get_device(st->map), "fifo overflow reset\n"); map 227 drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c result = regmap_bulk_read(st->map, st->reg->fifo_r_w, map 27 drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c ret = regmap_write(st->map, st->reg->i2c_if, map 31 drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c ret = regmap_write(st->map, st->reg->user_ctrl, map 63 drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c result = regmap_write(st->map, st->reg->fifo_en, 0); map 67 drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c result = regmap_write(st->map, st->reg->int_enable, 0); map 71 drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c result = regmap_write(st->map, st->reg->user_ctrl, map 149 drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c st->trig->dev.parent = regmap_get_device(st->map); map 20 drivers/iio/inkern.c struct iio_map *map; map 42 drivers/iio/inkern.c mapi->map = &maps[i]; map 290 drivers/iio/inkern.c if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || map 292 drivers/iio/inkern.c strcmp(channel_name, c_i->map->consumer_channel) != 0)) map 310 drivers/iio/inkern.c if (c->map->adc_channel_label) { map 313 drivers/iio/inkern.c c->map->adc_channel_label); map 425 drivers/iio/inkern.c if (name && strcmp(name, c->map->consumer_dev_name) != 0) map 444 drivers/iio/inkern.c if (name && strcmp(name, c->map->consumer_dev_name) != 0) map 447 drivers/iio/inkern.c chans[mapind].data = c->map->consumer_data; map 450 drivers/iio/inkern.c c->map->adc_channel_label); map 73 drivers/iio/light/pa12203001.c struct regmap *map; map 133 drivers/iio/light/pa12203001.c ret = regmap_update_bits(data->map, PA12203001_REG_CFG0, map 147 drivers/iio/light/pa12203001.c ret = regmap_update_bits(data->map, PA12203001_REG_CFG0, map 230 drivers/iio/light/pa12203001.c ret = regmap_bulk_read(data->map, PA12203001_REG_ADL, map 245 drivers/iio/light/pa12203001.c ret = regmap_read(data->map, PA12203001_REG_PDH, map 260 drivers/iio/light/pa12203001.c ret = regmap_read(data->map, PA12203001_REG_CFG0, ®_byte); map 286 drivers/iio/light/pa12203001.c ret = regmap_read(data->map, PA12203001_REG_CFG0, ®_byte); map 292 drivers/iio/light/pa12203001.c return regmap_update_bits(data->map, map 318 drivers/iio/light/pa12203001.c ret = regmap_write(data->map, regvals[i].reg, regvals[i].val); map 359 drivers/iio/light/pa12203001.c data->map = devm_regmap_init_i2c(client, &pa12203001_regmap_config); map 360 drivers/iio/light/pa12203001.c if (IS_ERR(data->map)) map 361 drivers/iio/light/pa12203001.c return PTR_ERR(data->map); map 180 drivers/iio/magnetometer/ak8974.c struct regmap *map; map 198 drivers/iio/magnetometer/ak8974.c ret = regmap_bulk_read(ak8974->map, reg, &bulk, 2); map 210 drivers/iio/magnetometer/ak8974.c return regmap_bulk_write(ak8974->map, reg, &bulk, 2); map 220 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_CTRL1, val); map 238 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_RESDEF); map 241 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_CTRL3, AK8974_CTRL3_RESDEF); map 244 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_INT_CTRL, map 257 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_CTRL2, AK8974_CTRL2_DRDY_EN | map 261 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_CTRL3, 0); map 270 drivers/iio/magnetometer/ak8974.c ret = regmap_write(ak8974->map, AK8974_INT_CTRL, AK8974_INT_CTRL_POL); map 274 drivers/iio/magnetometer/ak8974.c return regmap_write(ak8974->map, AK8974_PRESET, 0); map 285 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_INT_CLEAR, &clear); map 300 drivers/iio/magnetometer/ak8974.c ret = regmap_update_bits(ak8974->map, AK8974_CTRL2, map 307 drivers/iio/magnetometer/ak8974.c return regmap_update_bits(ak8974->map, map 333 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_STATUS, &val); map 352 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_INT_SRC, &src); map 363 drivers/iio/magnetometer/ak8974.c ret = regmap_bulk_read(ak8974->map, AK8974_DATA_X, result, 6); map 388 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_STATUS, &val); map 409 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val); map 418 drivers/iio/magnetometer/ak8974.c ret = regmap_update_bits(ak8974->map, map 429 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val); map 437 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_SELFTEST, &val); map 452 drivers/iio/magnetometer/ak8974.c int ret = regmap_bulk_read(ak8974->map, reg, tab, tab_size); map 471 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AK8974_WHOAMI, &whoami); map 482 drivers/iio/magnetometer/ak8974.c ret = regmap_read(ak8974->map, AMI305_VER, &fw); map 764 drivers/iio/magnetometer/ak8974.c ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config); map 765 drivers/iio/magnetometer/ak8974.c if (IS_ERR(ak8974->map)) { map 767 drivers/iio/magnetometer/ak8974.c return PTR_ERR(ak8974->map); map 241 drivers/infiniband/core/core_priv.h int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); map 303 drivers/infiniband/core/core_priv.h static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, map 200 drivers/infiniband/core/rw.c ctx->map.sges = sge = kcalloc(sg_cnt, sizeof(*sge), GFP_KERNEL); map 201 drivers/infiniband/core/rw.c if (!ctx->map.sges) map 204 drivers/infiniband/core/rw.c ctx->map.wrs = kcalloc(ctx->nr_ops, sizeof(*ctx->map.wrs), GFP_KERNEL); map 205 drivers/infiniband/core/rw.c if (!ctx->map.wrs) map 209 drivers/infiniband/core/rw.c struct ib_rdma_wr *rdma_wr = &ctx->map.wrs[i]; map 233 drivers/infiniband/core/rw.c &ctx->map.wrs[i + 1].wr : NULL; map 240 drivers/infiniband/core/rw.c kfree(ctx->map.sges); map 522 drivers/infiniband/core/rw.c first_wr = &ctx->map.wrs[0].wr; map 523 drivers/infiniband/core/rw.c last_wr = &ctx->map.wrs[ctx->nr_ops - 1].wr; map 589 drivers/infiniband/core/rw.c kfree(ctx->map.wrs); map 590 drivers/infiniband/core/rw.c kfree(ctx->map.sges); map 738 drivers/infiniband/core/security.c int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) map 740 drivers/infiniband/core/security.c if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) map 743 drivers/infiniband/core/security.c if (map->agent.qp->qp_type == IB_QPT_SMI) { map 744 drivers/infiniband/core/security.c if (!READ_ONCE(map->agent.smp_allowed)) map 749 drivers/infiniband/core/security.c return ib_security_pkey_access(map->agent.device, map 750 drivers/infiniband/core/security.c map->agent.port_num, map 752 drivers/infiniband/core/security.c map->agent.security); map 14103 drivers/infiniband/hw/hfi1/chip.c u64 map[NUM_MAP_REGS]; map 14133 drivers/infiniband/hw/hfi1/chip.c memset(rmt->map, rxcontext, sizeof(rmt->map)); map 14152 drivers/infiniband/hw/hfi1/chip.c write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]); map 14283 drivers/infiniband/hw/hfi1/chip.c reg = rmt->map[regidx]; map 14287 drivers/infiniband/hw/hfi1/chip.c rmt->map[regidx] = reg; map 14361 drivers/infiniband/hw/hfi1/chip.c reg = rmt->map[regidx]; map 14364 drivers/infiniband/hw/hfi1/chip.c rmt->map[regidx] = reg; map 124 drivers/infiniband/hw/hfi1/exp_rcv.h u8 map; map 1823 drivers/infiniband/hw/hfi1/pio.c e = m->map[vl & m->mask]; map 1856 drivers/infiniband/hw/hfi1/pio.c kfree(m->map[i]); map 1953 drivers/infiniband/hw/hfi1/pio.c newmap->map[i] = kzalloc(sizeof(*newmap->map[i]) + map 1957 drivers/infiniband/hw/hfi1/pio.c if (!newmap->map[i]) map 1959 drivers/infiniband/hw/hfi1/pio.c newmap->map[i]->mask = (1 << ilog2(sz)) - 1; map 1966 drivers/infiniband/hw/hfi1/pio.c newmap->map[i]->ksc[j] = map 1977 drivers/infiniband/hw/hfi1/pio.c newmap->map[i] = newmap->map[i % num_vls]; map 266 drivers/infiniband/hw/hfi1/pio.h struct pio_map_elem *map[0]; map 804 drivers/infiniband/hw/hfi1/sdma.c e = m->map[vl & m->mask]; map 841 drivers/infiniband/hw/hfi1/sdma.c struct sdma_rht_map_elem *map[HFI1_MAX_VLS_SUPPORTED]; map 887 drivers/infiniband/hw/hfi1/sdma.c if (rht_node && rht_node->map[vl]) { map 888 drivers/infiniband/hw/hfi1/sdma.c struct sdma_rht_map_elem *map = rht_node->map[vl]; map 890 drivers/infiniband/hw/hfi1/sdma.c sde = map->sde[selector & map->mask]; map 901 drivers/infiniband/hw/hfi1/sdma.c static void sdma_populate_sde_map(struct sdma_rht_map_elem *map) map 905 drivers/infiniband/hw/hfi1/sdma.c for (i = 0; i < roundup_pow_of_two(map->ctr ? : 1) - map->ctr; i++) map 906 drivers/infiniband/hw/hfi1/sdma.c map->sde[map->ctr + i] = map->sde[i]; map 909 drivers/infiniband/hw/hfi1/sdma.c static void sdma_cleanup_sde_map(struct sdma_rht_map_elem *map, map 915 drivers/infiniband/hw/hfi1/sdma.c for (i = 0; i < map->ctr; i++) { map 916 drivers/infiniband/hw/hfi1/sdma.c if (map->sde[i] == sde) { map 917 drivers/infiniband/hw/hfi1/sdma.c memmove(&map->sde[i], &map->sde[i + 1], map 918 drivers/infiniband/hw/hfi1/sdma.c (map->ctr - i - 1) * sizeof(map->sde[0])); map 919 drivers/infiniband/hw/hfi1/sdma.c map->ctr--; map 920 drivers/infiniband/hw/hfi1/sdma.c pow = roundup_pow_of_two(map->ctr ? : 1); map 921 drivers/infiniband/hw/hfi1/sdma.c map->mask = pow - 1; map 922 drivers/infiniband/hw/hfi1/sdma.c sdma_populate_sde_map(map); map 943 drivers/infiniband/hw/hfi1/sdma.c if (unlikely(vl < 0 || vl >= ARRAY_SIZE(rht_node->map))) map 986 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); map 987 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[vl]) { map 993 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->mask = 0; map 994 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->ctr = 1; map 995 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->sde[0] = sde; map 1001 drivers/infiniband/hw/hfi1/sdma.c kfree(rht_node->map[vl]); map 1012 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[vl]) map 1013 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl] = kzalloc(sz, GFP_KERNEL); map 1015 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[vl]) { map 1020 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->ctr++; map 1021 drivers/infiniband/hw/hfi1/sdma.c ctr = rht_node->map[vl]->ctr; map 1022 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->sde[ctr - 1] = sde; map 1024 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[vl]->mask = pow - 1; map 1027 drivers/infiniband/hw/hfi1/sdma.c sdma_populate_sde_map(rht_node->map[vl]); map 1048 drivers/infiniband/hw/hfi1/sdma.c if (rht_node->map[i]) map 1049 drivers/infiniband/hw/hfi1/sdma.c sdma_cleanup_sde_map(rht_node->map[i], map 1054 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[i]) map 1057 drivers/infiniband/hw/hfi1/sdma.c if (rht_node->map[i]->ctr) { map 1070 drivers/infiniband/hw/hfi1/sdma.c kfree(rht_node->map[i]); map 1103 drivers/infiniband/hw/hfi1/sdma.c kfree(rht_node->map[i]); map 1130 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[i] || !rht_node->map[i]->ctr) map 1135 drivers/infiniband/hw/hfi1/sdma.c for (j = 0; j < rht_node->map[i]->ctr; j++) { map 1136 drivers/infiniband/hw/hfi1/sdma.c if (!rht_node->map[i]->sde[j]) map 1143 drivers/infiniband/hw/hfi1/sdma.c rht_node->map[i]->sde[j]->this_idx); map 1159 drivers/infiniband/hw/hfi1/sdma.c kfree(m->map[i]); map 1243 drivers/infiniband/hw/hfi1/sdma.c newmap->map[i] = kzalloc( map 1247 drivers/infiniband/hw/hfi1/sdma.c if (!newmap->map[i]) map 1249 drivers/infiniband/hw/hfi1/sdma.c newmap->map[i]->mask = (1 << ilog2(sz)) - 1; map 1252 drivers/infiniband/hw/hfi1/sdma.c newmap->map[i]->sde[j] = map 1263 drivers/infiniband/hw/hfi1/sdma.c newmap->map[i] = newmap->map[i % num_vls]; map 1027 drivers/infiniband/hw/hfi1/sdma.h struct sdma_map_elem *map[0]; map 1104 drivers/infiniband/hw/hfi1/tid_rdma.c sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; map 1105 drivers/infiniband/hw/hfi1/tid_rdma.c sge->length = sge->mr->map[sge->m]->segs[sge->n].length; map 1208 drivers/infiniband/hw/hfi1/tid_rdma.c cnt, grp->map, grp->used); map 1211 drivers/infiniband/hw/hfi1/tid_rdma.c node->map = grp->map; map 1214 drivers/infiniband/hw/hfi1/tid_rdma.c grp->base, grp->map, grp->used, cnt); map 1308 drivers/infiniband/hw/hfi1/tid_rdma.c if (node->map & BIT(i) || cnt >= node->cnt) { map 1330 drivers/infiniband/hw/hfi1/tid_rdma.c pair = !(i & 0x1) && !((node->map >> i) & 0x3) && map 1356 drivers/infiniband/hw/hfi1/tid_rdma.c grp->map |= BIT(i); map 1373 drivers/infiniband/hw/hfi1/tid_rdma.c if (node->map & BIT(i) || cnt >= node->cnt) { map 1381 drivers/infiniband/hw/hfi1/tid_rdma.c grp->map &= ~BIT(i); map 1396 drivers/infiniband/hw/hfi1/tid_rdma.c cnt, grp->map, grp->used); map 166 drivers/infiniband/hw/hfi1/tid_rdma.h u8 map; map 639 drivers/infiniband/hw/hfi1/trace_tid.h u8 map, u8 used, u8 cnt), map 640 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, msg, index, base, map, used, cnt), map 647 drivers/infiniband/hw/hfi1/trace_tid.h __field(u8, map) map 657 drivers/infiniband/hw/hfi1/trace_tid.h __entry->map = map; map 668 drivers/infiniband/hw/hfi1/trace_tid.h __entry->map, map 677 drivers/infiniband/hw/hfi1/trace_tid.h u8 map, u8 used, u8 cnt), map 678 drivers/infiniband/hw/hfi1/trace_tid.h TP_ARGS(qp, msg, index, base, map, used, cnt) map 698 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (!(grp->map & (1 << idx))) { map 716 drivers/infiniband/hw/hfi1/user_exp_rcv.c } else if (grp->map & (1 << useidx)) { map 737 drivers/infiniband/hw/hfi1/user_exp_rcv.c grp->map |= 1 << useidx++; map 861 drivers/infiniband/hw/hfi1/user_exp_rcv.c node->grp->map &= ~(1 << (node->rcventry - node->grp->base)); map 887 drivers/infiniband/hw/hfi1/user_exp_rcv.c if (grp->map & (1 << i)) { map 167 drivers/infiniband/hw/hns/hns_roce_alloc.c dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map); map 173 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].map); map 204 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->direct.map = t; map 229 drivers/infiniband/hw/hns/hns_roce_alloc.c buf->page_list[i].map = t; map 257 drivers/infiniband/hw/hns/hns_roce_alloc.c bufs[total++] = buf->direct.map + map 260 drivers/infiniband/hw/hns/hns_roce_alloc.c bufs[total++] = buf->page_list[i].map; map 446 drivers/infiniband/hw/hns/hns_roce_device.h dma_addr_t map; map 526 drivers/infiniband/hw/hns/hns_roce_hw_v1.c sdb_dma_addr = db->ext_db->sdb_buf_list->map; map 568 drivers/infiniband/hw/hns/hns_roce_hw_v1.c odb_dma_addr = db->ext_db->odb_buf_list->map; map 620 drivers/infiniband/hw/hns/hns_roce_hw_v1.c db->ext_db->sdb_buf_list->map = sdb_dma_addr; map 644 drivers/infiniband/hw/hns/hns_roce_hw_v1.c db->ext_db->odb_buf_list->map = odb_dma_addr; map 664 drivers/infiniband/hw/hns/hns_roce_hw_v1.c db->ext_db->sdb_buf_list->map); map 1181 drivers/infiniband/hw/hns/hns_roce_hw_v1.c db->ext_db->sdb_buf_list->map); map 1188 drivers/infiniband/hw/hns/hns_roce_hw_v1.c db->ext_db->odb_buf_list->map); map 1219 drivers/infiniband/hw/hns/hns_roce_hw_v1.c raq->e_raq_buf->map = addr; map 1222 drivers/infiniband/hw/hns/hns_roce_hw_v1.c roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); map 1237 drivers/infiniband/hw/hns/hns_roce_hw_v1.c raq->e_raq_buf->map >> 44); map 1295 drivers/infiniband/hw/hns/hns_roce_hw_v1.c raq->e_raq_buf->map); map 1333 drivers/infiniband/hw/hns/hns_roce_hw_v1.c HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, map 1339 drivers/infiniband/hw/hns/hns_roce_hw_v1.c HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, map 1347 drivers/infiniband/hw/hns/hns_roce_hw_v1.c HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, map 1358 drivers/infiniband/hw/hns/hns_roce_hw_v1.c priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); map 1362 drivers/infiniband/hw/hns/hns_roce_hw_v1.c priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); map 1375 drivers/infiniband/hw/hns/hns_roce_hw_v1.c priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); map 1378 drivers/infiniband/hw/hns/hns_roce_hw_v1.c priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); map 1381 drivers/infiniband/hw/hns/hns_roce_hw_v1.c priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); map 1400 drivers/infiniband/hw/hns/hns_roce_hw_v1.c &tptr_buf->map, GFP_KERNEL); map 1404 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_dev->tptr_dma_addr = tptr_buf->map; map 1420 drivers/infiniband/hw/hns/hns_roce_hw_v1.c tptr_buf->buf, tptr_buf->map); map 2089 drivers/infiniband/hw/hns/hns_roce_hw_v1.c tptr_dma_addr = tptr_buf->map + offset; map 2436 drivers/infiniband/hw/hns/hns_roce_hw_v1.c bt_ba = priv->bt_table.qpc_buf.map >> 12; map 2439 drivers/infiniband/hw/hns/hns_roce_hw_v1.c bt_ba = priv->bt_table.mtpt_buf.map >> 12; map 2442 drivers/infiniband/hw/hns/hns_roce_hw_v1.c bt_ba = priv->bt_table.cqc_buf.map >> 12; map 4190 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->buf_list[i].buf, eq->buf_list[i].map); map 4259 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->buf_list[i].map = tmp_dma_addr; map 4272 drivers/infiniband/hw/hns/hns_roce_hw_v1.c writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); map 4282 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->buf_list[0].map >> 44); map 4299 drivers/infiniband/hw/hns/hns_roce_hw_v1.c eq->buf_list[i].map); map 1820 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cpu_to_le32(link_tbl->table.map & 0xffffffff); map 1822 drivers/infiniband/hw/hns/hns_roce_hw_v2.c cpu_to_le32(link_tbl->table.map >> 32); map 1896 drivers/infiniband/hw/hns/hns_roce_hw_v2.c &link_tbl->table.map, map 1913 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->pg_list[i].map = t; map 1932 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->pg_list[i].map); map 1937 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->table.map); map 1956 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->pg_list[i].map); map 1960 drivers/infiniband/hw/hns/hns_roce_hw_v2.c link_tbl->table.map); map 5391 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->buf_list->map); map 5416 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->eqe_ba = eq->buf_list->map; map 5776 drivers/infiniband/hw/hns/hns_roce_hw_v2.c &(eq->buf_list->map), map 5807 drivers/infiniband/hw/hns/hns_roce_hw_v2.c eq->buf_list->map); map 903 drivers/infiniband/hw/hns/hns_roce_mr.c page_list[i] = buf->direct.map + (i << buf->page_shift); map 905 drivers/infiniband/hw/hns/hns_roce_mr.c page_list[i] = buf->page_list[i].map; map 389 drivers/infiniband/hw/mlx4/cm.c struct id_map_entry *map, *tmp_map; map 393 drivers/infiniband/hw/mlx4/cm.c list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { map 394 drivers/infiniband/hw/mlx4/cm.c if (slave < 0 || slave == map->slave_id) { map 395 drivers/infiniband/hw/mlx4/cm.c if (map->scheduled_delete) map 396 drivers/infiniband/hw/mlx4/cm.c need_flush |= !cancel_delayed_work(&map->timeout); map 428 drivers/infiniband/hw/mlx4/cm.c list_for_each_entry_safe(map, tmp_map, &lh, list) { map 429 drivers/infiniband/hw/mlx4/cm.c rb_erase(&map->node, sl_id_map); map 430 drivers/infiniband/hw/mlx4/cm.c xa_erase(&sriov->pv_id_table, map->pv_cm_id); map 434 drivers/infiniband/hw/mlx4/cm.c list_for_each_entry_safe(map, tmp_map, &dev->sriov.cm_list, list) { map 435 drivers/infiniband/hw/mlx4/cm.c if (slave == map->slave_id) map 436 drivers/infiniband/hw/mlx4/cm.c list_move_tail(&map->list, &lh); map 443 drivers/infiniband/hw/mlx4/cm.c list_for_each_entry_safe(map, tmp_map, &lh, list) { map 444 drivers/infiniband/hw/mlx4/cm.c list_del(&map->list); map 445 drivers/infiniband/hw/mlx4/cm.c kfree(map); map 591 drivers/infiniband/hw/mlx4/cq.c qp->sqp_proxy_rcv[tail].map, map 590 drivers/infiniband/hw/mlx4/mad.c tun_qp->tx_ring[tun_tx_ix].buf.map, map 632 drivers/infiniband/hw/mlx4/mad.c tun_qp->tx_ring[tun_tx_ix].buf.map, map 636 drivers/infiniband/hw/mlx4/mad.c list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map; map 1322 drivers/infiniband/hw/mlx4/mad.c sg_list.addr = tun_qp->ring[index].map; map 1331 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, map 1424 drivers/infiniband/hw/mlx4/mad.c sqp->tx_ring[wire_tx_ix].buf.map, map 1431 drivers/infiniband/hw/mlx4/mad.c sqp->tx_ring[wire_tx_ix].buf.map, map 1435 drivers/infiniband/hw/mlx4/mad.c list.addr = sqp->tx_ring[wire_tx_ix].buf.map; map 1512 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, map 1639 drivers/infiniband/hw/mlx4/mad.c tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, map 1643 drivers/infiniband/hw/mlx4/mad.c if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { map 1654 drivers/infiniband/hw/mlx4/mad.c tun_qp->tx_ring[i].buf.map = map 1660 drivers/infiniband/hw/mlx4/mad.c tun_qp->tx_ring[i].buf.map)) { map 1676 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, map 1684 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, map 1716 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, map 1722 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, map 256 drivers/infiniband/hw/mlx4/mlx4_ib.h dma_addr_t map; map 472 drivers/infiniband/hw/mlx4/qp.c qp->sqp_proxy_rcv[i].map = map 476 drivers/infiniband/hw/mlx4/qp.c if (ib_dma_mapping_error(dev, qp->sqp_proxy_rcv[i].map)) { map 486 drivers/infiniband/hw/mlx4/qp.c ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, map 501 drivers/infiniband/hw/mlx4/qp.c ib_dma_unmap_single(dev, qp->sqp_proxy_rcv[i].map, map 3899 drivers/infiniband/hw/mlx4/qp.c qp->sqp_proxy_rcv[ind].map, map 3906 drivers/infiniband/hw/mlx4/qp.c scat->addr = cpu_to_be64(qp->sqp_proxy_rcv[ind].map); map 637 drivers/infiniband/hw/mlx5/cq.c void __iomem *uar_page = mdev->priv.uar->map; map 5332 drivers/infiniband/hw/mlx5/qp.c mlx5_write64((__be32 *)ctrl, bf->bfreg->map + bf->offset); map 101 drivers/infiniband/hw/mthca/mthca_catas.c switch (swab32(readl(dev->catas_err.map)) >> 24) { map 122 drivers/infiniband/hw/mthca/mthca_catas.c i, swab32(readl(dev->catas_err.map + i))); map 139 drivers/infiniband/hw/mthca/mthca_catas.c if (readl(dev->catas_err.map + i)) { map 153 drivers/infiniband/hw/mthca/mthca_catas.c dev->catas_err.map = NULL; map 159 drivers/infiniband/hw/mthca/mthca_catas.c dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4); map 160 drivers/infiniband/hw/mthca/mthca_catas.c if (!dev->catas_err.map) { map 176 drivers/infiniband/hw/mthca/mthca_catas.c if (dev->catas_err.map) map 177 drivers/infiniband/hw/mthca/mthca_catas.c iounmap(dev->catas_err.map); map 281 drivers/infiniband/hw/mthca/mthca_dev.h u32 __iomem *map; map 644 drivers/infiniband/hw/mthca/mthca_eq.c void __iomem **map) map 648 drivers/infiniband/hw/mthca/mthca_eq.c *map = ioremap(base + offset, size); map 649 drivers/infiniband/hw/mthca/mthca_eq.c if (!*map) map 240 drivers/infiniband/hw/qib/qib_diag.c u32 __iomem *map = NULL; map 247 drivers/infiniband/hw/qib/qib_diag.c map = krb32 + (offset / sizeof(u32)); map 265 drivers/infiniband/hw/qib/qib_diag.c map = krb32 + (offset - dd->uregbase) / sizeof(u32); map 307 drivers/infiniband/hw/qib/qib_diag.c map = (u32 __iomem *)dd->piobase + (offset / sizeof(u32)); map 311 drivers/infiniband/hw/qib/qib_diag.c if (!map && offs4k && dd->piovl15base) { map 314 drivers/infiniband/hw/qib/qib_diag.c map = (u32 __iomem *)dd->piovl15base + map 323 drivers/infiniband/hw/qib/qib_diag.c return map; map 45 drivers/infiniband/hw/qib/qib_qp.c struct rvt_qpn_map *map, unsigned off) map 47 drivers/infiniband/hw/qib/qib_qp.c return (map - qpt->map) * RVT_BITS_PER_PAGE + off; map 51 drivers/infiniband/hw/qib/qib_qp.c struct rvt_qpn_map *map, unsigned off, map 59 drivers/infiniband/hw/qib/qib_qp.c off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off); map 107 drivers/infiniband/hw/qib/qib_qp.c static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) map 116 drivers/infiniband/hw/qib/qib_qp.c if (map->page) map 119 drivers/infiniband/hw/qib/qib_qp.c map->page = (void *)page; map 131 drivers/infiniband/hw/qib/qib_qp.c struct rvt_qpn_map *map; map 158 drivers/infiniband/hw/qib/qib_qp.c map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; map 161 drivers/infiniband/hw/qib/qib_qp.c if (unlikely(!map->page)) { map 162 drivers/infiniband/hw/qib/qib_qp.c get_map_page(qpt, map); map 163 drivers/infiniband/hw/qib/qib_qp.c if (unlikely(!map->page)) map 167 drivers/infiniband/hw/qib/qib_qp.c if (!test_and_set_bit(offset, map->page)) { map 172 drivers/infiniband/hw/qib/qib_qp.c offset = find_next_offset(qpt, map, offset, map 174 drivers/infiniband/hw/qib/qib_qp.c qpn = mk_qpn(qpt, map, offset); map 192 drivers/infiniband/hw/qib/qib_qp.c map = &qpt->map[qpt->nmaps++]; map 194 drivers/infiniband/hw/qib/qib_qp.c } else if (map < &qpt->map[qpt->nmaps]) { map 195 drivers/infiniband/hw/qib/qib_qp.c ++map; map 198 drivers/infiniband/hw/qib/qib_qp.c map = &qpt->map[0]; map 201 drivers/infiniband/hw/qib/qib_qp.c qpn = mk_qpn(qpt, map, offset); map 192 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].vaddr; map 194 drivers/infiniband/hw/qib/qib_ud.c sge->mr->map[sge->m]->segs[sge->n].length; map 168 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].vaddr; map 170 drivers/infiniband/hw/qib/qib_verbs.c sge.mr->map[sge.m]->segs[sge.n].length; map 201 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].vaddr; map 203 drivers/infiniband/hw/qib/qib_verbs.c sge->mr->map[sge->m]->segs[sge->n].length; map 111 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h void __iomem *map; map 311 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_CQ_OFFSET); map 316 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h writel(cpu_to_le32(val), dev->driver_uar.map + PVRDMA_UAR_QP_OFFSET); map 875 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->driver_uar.map = map 877 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c if (!dev->driver_uar.map) { map 1075 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c iounmap(dev->driver_uar.map); map 1135 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c iounmap(dev->driver_uar.map); map 124 drivers/infiniband/sw/rdmavt/mr.c kfree(mr->map[--i]); map 145 drivers/infiniband/sw/rdmavt/mr.c mr->map[i] = kzalloc_node(sizeof(*mr->map[0]), GFP_KERNEL, map 147 drivers/infiniband/sw/rdmavt/mr.c if (!mr->map[i]) map 288 drivers/infiniband/sw/rdmavt/mr.c mr = kzalloc(struct_size(mr, mr.map, m), GFP_KERNEL); map 423 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = vaddr; map 424 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = PAGE_SIZE; map 613 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].vaddr = (void *)addr; map 614 drivers/infiniband/sw/rdmavt/mr.c mr->mr.map[m]->segs[n].length = ps; map 643 drivers/infiniband/sw/rdmavt/mr.c mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr; map 734 drivers/infiniband/sw/rdmavt/mr.c fmr = kzalloc(struct_size(fmr, mr.map, m), GFP_KERNEL); map 811 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].vaddr = (void *)page_list[i]; map 812 drivers/infiniband/sw/rdmavt/mr.c fmr->mr.map[m]->segs[n].length = ps; map 985 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { map 986 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; map 995 drivers/infiniband/sw/rdmavt/mr.c isge->vaddr = mr->map[m]->segs[n].vaddr + off; map 996 drivers/infiniband/sw/rdmavt/mr.c isge->length = mr->map[m]->segs[n].length - off; map 1092 drivers/infiniband/sw/rdmavt/mr.c while (off >= mr->map[m]->segs[n].length) { map 1093 drivers/infiniband/sw/rdmavt/mr.c off -= mr->map[m]->segs[n].length; map 1102 drivers/infiniband/sw/rdmavt/mr.c sge->vaddr = mr->map[m]->segs[n].vaddr + off; map 1103 drivers/infiniband/sw/rdmavt/mr.c sge->length = mr->map[m]->segs[n].length - off; map 308 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qpn_map *map) map 317 drivers/infiniband/sw/rdmavt/qp.c if (map->page) map 320 drivers/infiniband/sw/rdmavt/qp.c map->page = (void *)page; map 331 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qpn_map *map; map 356 drivers/infiniband/sw/rdmavt/qp.c map = &qpt->map[qpt->nmaps]; map 361 drivers/infiniband/sw/rdmavt/qp.c if (!map->page) { map 362 drivers/infiniband/sw/rdmavt/qp.c get_map_page(qpt, map); map 363 drivers/infiniband/sw/rdmavt/qp.c if (!map->page) { map 368 drivers/infiniband/sw/rdmavt/qp.c set_bit(offset, map->page); map 373 drivers/infiniband/sw/rdmavt/qp.c map++; map 388 drivers/infiniband/sw/rdmavt/qp.c for (i = 0; i < ARRAY_SIZE(qpt->map); i++) map 389 drivers/infiniband/sw/rdmavt/qp.c free_page((unsigned long)qpt->map[i].page); map 517 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qpn_map *map, unsigned off) map 519 drivers/infiniband/sw/rdmavt/qp.c return (map - qpt->map) * RVT_BITS_PER_PAGE + off; map 535 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qpn_map *map; map 560 drivers/infiniband/sw/rdmavt/qp.c map = &qpt->map[qpn / RVT_BITS_PER_PAGE]; map 563 drivers/infiniband/sw/rdmavt/qp.c if (unlikely(!map->page)) { map 564 drivers/infiniband/sw/rdmavt/qp.c get_map_page(qpt, map); map 565 drivers/infiniband/sw/rdmavt/qp.c if (unlikely(!map->page)) map 569 drivers/infiniband/sw/rdmavt/qp.c if (!test_and_set_bit(offset, map->page)) { map 579 drivers/infiniband/sw/rdmavt/qp.c qpn = mk_qpn(qpt, map, offset); map 589 drivers/infiniband/sw/rdmavt/qp.c map = &qpt->map[qpt->nmaps++]; map 592 drivers/infiniband/sw/rdmavt/qp.c } else if (map < &qpt->map[qpt->nmaps]) { map 593 drivers/infiniband/sw/rdmavt/qp.c ++map; map 597 drivers/infiniband/sw/rdmavt/qp.c map = &qpt->map[0]; map 604 drivers/infiniband/sw/rdmavt/qp.c qpn = mk_qpn(qpt, map, offset); map 988 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qpn_map *map; map 990 drivers/infiniband/sw/rdmavt/qp.c map = qpt->map + (qpn & RVT_QPN_MASK) / RVT_BITS_PER_PAGE; map 991 drivers/infiniband/sw/rdmavt/qp.c if (map->page) map 992 drivers/infiniband/sw/rdmavt/qp.c clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page); map 101 drivers/infiniband/sw/rxe/rxe_mr.c if (mem->map) { map 103 drivers/infiniband/sw/rxe/rxe_mr.c kfree(mem->map[i]); map 105 drivers/infiniband/sw/rxe/rxe_mr.c kfree(mem->map); map 113 drivers/infiniband/sw/rxe/rxe_mr.c struct rxe_map **map = mem->map; map 117 drivers/infiniband/sw/rxe/rxe_mr.c mem->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); map 118 drivers/infiniband/sw/rxe/rxe_mr.c if (!mem->map) map 122 drivers/infiniband/sw/rxe/rxe_mr.c mem->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); map 123 drivers/infiniband/sw/rxe/rxe_mr.c if (!mem->map[i]) map 140 drivers/infiniband/sw/rxe/rxe_mr.c kfree(mem->map[i]); map 142 drivers/infiniband/sw/rxe/rxe_mr.c kfree(mem->map); map 164 drivers/infiniband/sw/rxe/rxe_mr.c struct rxe_map **map; map 196 drivers/infiniband/sw/rxe/rxe_mr.c map = mem->map; map 198 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf; map 202 drivers/infiniband/sw/rxe/rxe_mr.c map++; map 203 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf; map 284 drivers/infiniband/sw/rxe/rxe_mr.c length = mem->map[map_index]->buf[buf_index].size; map 294 drivers/infiniband/sw/rxe/rxe_mr.c length = mem->map[map_index]->buf[buf_index].size; map 315 drivers/infiniband/sw/rxe/rxe_mr.c if (!mem->map) { map 328 drivers/infiniband/sw/rxe/rxe_mr.c if (offset + length > mem->map[m]->buf[n].size) { map 334 drivers/infiniband/sw/rxe/rxe_mr.c addr = (void *)(uintptr_t)mem->map[m]->buf[n].addr + offset; map 350 drivers/infiniband/sw/rxe/rxe_mr.c struct rxe_map **map; map 378 drivers/infiniband/sw/rxe/rxe_mr.c WARN_ON_ONCE(!mem->map); map 388 drivers/infiniband/sw/rxe/rxe_mr.c map = mem->map + m; map 389 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf + i; map 418 drivers/infiniband/sw/rxe/rxe_mr.c map++; map 419 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf; map 597 drivers/infiniband/sw/rxe/rxe_mr.c struct rxe_map **map; map 608 drivers/infiniband/sw/rxe/rxe_mr.c map = mem->map; map 609 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf; map 618 drivers/infiniband/sw/rxe/rxe_mr.c map++; map 619 drivers/infiniband/sw/rxe/rxe_mr.c buf = map[0]->buf; map 1010 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_map *map; map 1016 drivers/infiniband/sw/rxe/rxe_verbs.c map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; map 1017 drivers/infiniband/sw/rxe/rxe_verbs.c buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; map 350 drivers/infiniband/sw/rxe/rxe_verbs.h struct rxe_map **map; map 30 drivers/input/joystick/amijoy.c module_param_array_named(map, amijoy, uint, NULL, 0); map 31 drivers/input/joystick/amijoy.c MODULE_PARM_DESC(map, "Map of attached joysticks in form of <a>,<b> (default is 0,1)"); map 44 drivers/input/joystick/analog.c module_param_array_named(map, js, charp, &js_nargs, 0); map 45 drivers/input/joystick/analog.c MODULE_PARM_DESC(map, "Describes analog joysticks type/capabilities"); map 42 drivers/input/joystick/gamecon.c module_param_array_named(map, gc_cfg[0].args, int, &gc_cfg[0].nargs, 0); map 43 drivers/input/joystick/gamecon.c MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<pad1>,<pad2>,..<pad5>)"); map 38 drivers/input/joystick/turbografx.c module_param_array_named(map, tgfx_cfg[0].args, int, &tgfx_cfg[0].nargs, 0); map 39 drivers/input/joystick/turbografx.c MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<js1>,<js2>,..<js7>"); map 30 drivers/input/keyboard/adc-keys.c const struct adc_keys_button *map; map 46 drivers/input/keyboard/adc-keys.c diff = abs(st->map[i].voltage - value); map 49 drivers/input/keyboard/adc-keys.c keycode = st->map[i].keycode; map 69 drivers/input/keyboard/adc-keys.c struct adc_keys_button *map; map 79 drivers/input/keyboard/adc-keys.c map = devm_kmalloc_array(dev, st->num_keys, sizeof(*map), GFP_KERNEL); map 80 drivers/input/keyboard/adc-keys.c if (!map) map 86 drivers/input/keyboard/adc-keys.c &map[i].voltage)) { map 91 drivers/input/keyboard/adc-keys.c map[i].voltage /= 1000; map 94 drivers/input/keyboard/adc-keys.c &map[i].keycode)) { map 103 drivers/input/keyboard/adc-keys.c st->map = map; map 172 drivers/input/keyboard/adc-keys.c __set_bit(st->map[i].keycode, input->keybit); map 213 drivers/input/keyboard/cros_ec_keyb.c const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; map 215 drivers/input/keyboard/cros_ec_keyb.c if (map->ev_type != ev_type) map 218 drivers/input/keyboard/cros_ec_keyb.c input_event(idev, ev_type, map->code, map 219 drivers/input/keyboard/cros_ec_keyb.c !!(mask & BIT(map->bit)) ^ map->inverted); map 496 drivers/input/keyboard/cros_ec_keyb.c const struct cros_ec_bs_map *map = &cros_ec_keyb_bs[i]; map 498 drivers/input/keyboard/cros_ec_keyb.c if ((map->ev_type == EV_KEY && (buttons & BIT(map->bit))) || map 499 drivers/input/keyboard/cros_ec_keyb.c (map->ev_type == EV_SW && (switches & BIT(map->bit)))) map 500 drivers/input/keyboard/cros_ec_keyb.c input_set_capability(idev, map->ev_type, map->code); map 291 drivers/input/keyboard/qt1050.c static int qt1050_set_key(struct regmap *map, int number, int on) map 297 drivers/input/keyboard/qt1050.c return regmap_update_bits(map, key_regs->di_aks, 0xfc, map 303 drivers/input/keyboard/qt1050.c struct regmap *map = ts->regmap; map 310 drivers/input/keyboard/qt1050.c err = qt1050_set_key(map, i, 0); map 320 drivers/input/keyboard/qt1050.c err = qt1050_set_key(map, button->num, 1); map 326 drivers/input/keyboard/qt1050.c err = regmap_write(map, key_regs->pulse_scale, map 330 drivers/input/keyboard/qt1050.c err = regmap_write(map, key_regs->csd, button->charge_delay); map 333 drivers/input/keyboard/qt1050.c err = regmap_write(map, key_regs->nthr, button->thr_cnt); map 433 drivers/input/keyboard/qt1050.c struct regmap *map; map 458 drivers/input/keyboard/qt1050.c map = devm_regmap_init_i2c(client, &qt1050_regmap_config); map 459 drivers/input/keyboard/qt1050.c if (IS_ERR(map)) map 460 drivers/input/keyboard/qt1050.c return PTR_ERR(map); map 464 drivers/input/keyboard/qt1050.c ts->regmap = map; map 72 drivers/input/keyboard/sh_keysc.c static void sh_keysc_map_dbg(struct device *dev, unsigned long *map, map 78 drivers/input/keyboard/sh_keysc.c dev_dbg(dev, "%s[%d] 0x%lx\n", str, k, map[k]); map 195 drivers/input/keyboard/sun4i-lradc-keys.c struct sun4i_lradc_keymap *map = &lradc->chan0_map[i]; map 205 drivers/input/keyboard/sun4i-lradc-keys.c error = of_property_read_u32(pp, "voltage", &map->voltage); map 212 drivers/input/keyboard/sun4i-lradc-keys.c error = of_property_read_u32(pp, "linux,code", &map->keycode); map 38 drivers/input/misc/88pm80x_onkey.c struct regmap *map; map 49 drivers/input/misc/88pm80x_onkey.c ret = regmap_read(info->map, PM800_STATUS_1, &val); map 84 drivers/input/misc/88pm80x_onkey.c info->map = info->pm80x->regmap; map 85 drivers/input/misc/88pm80x_onkey.c if (!info->map) { map 122 drivers/input/misc/88pm80x_onkey.c regmap_update_bits(info->map, PM800_RTC_MISC4, PM800_LONG_ONKEY_EN, map 125 drivers/input/misc/88pm80x_onkey.c regmap_update_bits(info->map, PM800_RTC_MISC3, map 50 drivers/input/misc/max77650-onkey.c struct regmap *map; map 56 drivers/input/misc/max77650-onkey.c map = dev_get_regmap(parent, NULL); map 57 drivers/input/misc/max77650-onkey.c if (!map) map 76 drivers/input/misc/max77650-onkey.c error = regmap_update_bits(map, MAX77650_REG_CNFG_GLBL, map 356 drivers/input/mouse/alps.c static void alps_get_bitmap_points(unsigned int map, map 365 drivers/input/mouse/alps.c for (i = 0; map != 0; i++, map >>= 1) { map 366 drivers/input/mouse/alps.c bit = map & 1; map 794 drivers/input/serio/hil_mlc.c struct hil_mlc_serio_map *map; map 799 drivers/input/serio/hil_mlc.c map = serio->port_data; map 800 drivers/input/serio/hil_mlc.c BUG_ON(map == NULL); map 802 drivers/input/serio/hil_mlc.c mlc = map->mlc; map 805 drivers/input/serio/hil_mlc.c mlc->serio_opacket[map->didx] |= map 806 drivers/input/serio/hil_mlc.c ((hil_packet)c) << (8 * (3 - mlc->serio_oidx[map->didx])); map 808 drivers/input/serio/hil_mlc.c if (mlc->serio_oidx[map->didx] >= 3) { map 810 drivers/input/serio/hil_mlc.c if (!(mlc->serio_opacket[map->didx] & HIL_PKT_CMD)) map 812 drivers/input/serio/hil_mlc.c switch (mlc->serio_opacket[map->didx] & HIL_PKT_DATA_MASK) { map 814 drivers/input/serio/hil_mlc.c idx = mlc->di[map->didx].idd; map 817 drivers/input/serio/hil_mlc.c idx = mlc->di[map->didx].rsc; map 820 drivers/input/serio/hil_mlc.c idx = mlc->di[map->didx].exd; map 823 drivers/input/serio/hil_mlc.c idx = mlc->di[map->didx].rnm; map 828 drivers/input/serio/hil_mlc.c mlc->serio_oidx[map->didx] = 0; map 829 drivers/input/serio/hil_mlc.c mlc->serio_opacket[map->didx] = 0; map 832 drivers/input/serio/hil_mlc.c mlc->serio_oidx[map->didx]++; map 854 drivers/input/serio/hil_mlc.c mlc->serio_oidx[map->didx] = 0; map 855 drivers/input/serio/hil_mlc.c mlc->serio_opacket[map->didx] = 0; map 862 drivers/input/serio/hil_mlc.c struct hil_mlc_serio_map *map; map 868 drivers/input/serio/hil_mlc.c map = serio->port_data; map 869 drivers/input/serio/hil_mlc.c BUG_ON(map == NULL); map 871 drivers/input/serio/hil_mlc.c mlc = map->mlc; map 879 drivers/input/serio/hil_mlc.c struct hil_mlc_serio_map *map; map 882 drivers/input/serio/hil_mlc.c map = serio->port_data; map 883 drivers/input/serio/hil_mlc.c BUG_ON(map == NULL); map 885 drivers/input/serio/hil_mlc.c mlc = map->mlc; map 172 drivers/input/sparse-keymap.c struct key_entry *map, *entry; map 179 drivers/input/sparse-keymap.c map = devm_kmemdup(&dev->dev, keymap, map_size * sizeof(*map), map 181 drivers/input/sparse-keymap.c if (!map) map 185 drivers/input/sparse-keymap.c entry = &map[i]; map 213 drivers/input/sparse-keymap.c dev->keycode = map; map 353 drivers/input/tablet/aiptek.c static int map_str_to_val(const struct aiptek_map *map, const char *str, size_t count) map 360 drivers/input/tablet/aiptek.c for (p = map; p->string; p++) map 367 drivers/input/tablet/aiptek.c static const char *map_val_to_str(const struct aiptek_map *map, int val) map 371 drivers/input/tablet/aiptek.c for (p = map; p->value != AIPTEK_INVALID_VALUE; p++) map 3280 drivers/iommu/amd_iommu.c .map = amd_iommu_map, map 2106 drivers/iommu/arm-smmu-v3.c static int arm_smmu_bitmap_alloc(unsigned long *map, int span) map 2111 drivers/iommu/arm-smmu-v3.c idx = find_first_zero_bit(map, size); map 2114 drivers/iommu/arm-smmu-v3.c } while (test_and_set_bit(idx, map)); map 2119 drivers/iommu/arm-smmu-v3.c static void arm_smmu_bitmap_free(unsigned long *map, int idx) map 2121 drivers/iommu/arm-smmu-v3.c clear_bit(idx, map); map 2460 drivers/iommu/arm-smmu-v3.c return ops->map(ops, iova, paddr, size, prot); map 2729 drivers/iommu/arm-smmu-v3.c .map = arm_smmu_map, map 222 drivers/iommu/arm-smmu.c static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) map 227 drivers/iommu/arm-smmu.c idx = find_next_zero_bit(map, end, start); map 230 drivers/iommu/arm-smmu.c } while (test_and_set_bit(idx, map)); map 235 drivers/iommu/arm-smmu.c static void __arm_smmu_free_bitmap(unsigned long *map, int idx) map 237 drivers/iommu/arm-smmu.c clear_bit(idx, map); map 1173 drivers/iommu/arm-smmu.c ret = ops->map(ops, iova, paddr, size, prot); map 1560 drivers/iommu/arm-smmu.c .map = arm_smmu_map, map 1330 drivers/iommu/exynos-iommu.c .map = exynos_iommu_map, map 1489 drivers/iommu/intel-iommu.c int ih, int map) map 1516 drivers/iommu/intel-iommu.c if (!cap_caching_mode(iommu->cap) || !map) map 5967 drivers/iommu/intel-iommu.c .map = intel_iommu_map, map 787 drivers/iommu/io-pgtable-arm-v7s.c .map = arm_v7s_map, map 925 drivers/iommu/io-pgtable-arm-v7s.c if (ops->map(ops, iova, iova, size, IOMMU_READ | map 932 drivers/iommu/io-pgtable-arm-v7s.c if (!ops->map(ops, iova, iova + size, size, map 952 drivers/iommu/io-pgtable-arm-v7s.c if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) map 973 drivers/iommu/io-pgtable-arm-v7s.c if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) map 780 drivers/iommu/io-pgtable-arm.c .map = arm_lpae_map, map 1188 drivers/iommu/io-pgtable-arm.c if (ops->map(ops, iova, iova, size, IOMMU_READ | map 1195 drivers/iommu/io-pgtable-arm.c if (!ops->map(ops, iova, iova + size, size, map 1211 drivers/iommu/io-pgtable-arm.c if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) map 1229 drivers/iommu/io-pgtable-arm.c if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) map 24 drivers/iommu/iommu-traces.c EXPORT_TRACEPOINT_SYMBOL_GPL(map); map 1868 drivers/iommu/iommu.c if (unlikely(ops->map == NULL || map 1897 drivers/iommu/iommu.c ret = ops->map(domain, iova, paddr, pgsize, prot); map 734 drivers/iommu/ipmmu-vmsa.c return domain->iop->map(domain->iop, iova, paddr, size, prot); map 966 drivers/iommu/ipmmu-vmsa.c .map = ipmmu_map, map 196 drivers/iommu/msm_iommu.c static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) map 201 drivers/iommu/msm_iommu.c idx = find_next_zero_bit(map, end, start); map 204 drivers/iommu/msm_iommu.c } while (test_and_set_bit(idx, map)); map 209 drivers/iommu/msm_iommu.c static void msm_iommu_free_ctx(unsigned long *map, int idx) map 211 drivers/iommu/msm_iommu.c clear_bit(idx, map); map 514 drivers/iommu/msm_iommu.c ret = priv->iop->map(priv->iop, iova, pa, len, prot); map 701 drivers/iommu/msm_iommu.c .map = msm_iommu_map, map 442 drivers/iommu/mtk_iommu.c ret = dom->iop->map(dom->iop, iova, paddr, size, prot); map 578 drivers/iommu/mtk_iommu.c .map = mtk_iommu_map, map 534 drivers/iommu/mtk_iommu_v1.c .map = mtk_iommu_map, map 1809 drivers/iommu/omap-iommu.c .map = omap_iommu_map, map 433 drivers/iommu/qcom_iommu.c ret = ops->map(ops, iova, paddr, size, prot); map 607 drivers/iommu/qcom_iommu.c .map = qcom_iommu_map, map 1127 drivers/iommu/rockchip-iommu.c .map = rk_iommu_map, map 373 drivers/iommu/s390-iommu.c .map = s390_iommu_map, map 296 drivers/iommu/tegra-gart.c .map = gart_iommu_map, map 896 drivers/iommu/tegra-smmu.c .map = tegra_smmu_map, map 393 drivers/iommu/virtio-iommu.c struct virtio_iommu_req_map map; map 399 drivers/iommu/virtio-iommu.c map = (struct virtio_iommu_req_map) { map 408 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); map 722 drivers/iommu/virtio-iommu.c struct virtio_iommu_req_map map; map 736 drivers/iommu/virtio-iommu.c map = (struct virtio_iommu_req_map) { map 748 drivers/iommu/virtio-iommu.c ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map)); map 959 drivers/iommu/virtio-iommu.c .map = viommu_map, map 170 drivers/irqchip/exynos-combiner.c .map = combiner_irq_domain_map, map 411 drivers/irqchip/irq-armada-370-xp.c unsigned long map = 0; map 415 drivers/irqchip/irq-armada-370-xp.c map |= 1 << cpu_logical_map(cpu); map 424 drivers/irqchip/irq-armada-370-xp.c writel((map << 8) | irq, main_int_base + map 468 drivers/irqchip/irq-armada-370-xp.c .map = armada_370_xp_mpic_irq_map, map 62 drivers/irqchip/irq-aspeed-i2c-ic.c .map = aspeed_i2c_ic_map_irq_domain, map 180 drivers/irqchip/irq-aspeed-vic.c .map = avic_map, map 114 drivers/irqchip/irq-ath79-misc.c .map = misc_map, map 96 drivers/irqchip/irq-ativic32.c .map = ativic32_irq_domain_map, map 208 drivers/irqchip/irq-atmel-aic.c .map = irq_map_generic_chip, map 304 drivers/irqchip/irq-atmel-aic5.c .map = irq_map_generic_chip, map 182 drivers/irqchip/irq-bcm2836.c .map = bcm2836_map, map 299 drivers/irqchip/irq-bcm6345-l1.c .map = bcm6345_l1_map, map 315 drivers/irqchip/irq-bcm7038-l1.c .map = bcm7038_l1_map, map 184 drivers/irqchip/irq-clps711x.c clps711x_intc->ops.map = clps711x_intc_irq_map; map 208 drivers/irqchip/irq-csky-mpintc.c .map = csky_irqdomain_map, map 154 drivers/irqchip/irq-davinci-cp-intc.c .map = davinci_cp_intc_host_map, map 132 drivers/irqchip/irq-eznps.c .map = nps400_irq_map, map 161 drivers/irqchip/irq-ftintc010.c .map = ft010_irqdomain_map, map 47 drivers/irqchip/irq-gic-realview.c struct regmap *map; map 59 drivers/irqchip/irq-gic-realview.c map = syscon_node_to_regmap(np); map 60 drivers/irqchip/irq-gic-realview.c if (!IS_ERR(map)) { map 62 drivers/irqchip/irq-gic-realview.c regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, map 64 drivers/irqchip/irq-gic-realview.c regmap_update_bits(map, pld1_ctrl, map 67 drivers/irqchip/irq-gic-realview.c regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, 0x0000); map 956 drivers/irqchip/irq-gic-v3-its.c struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; map 959 drivers/irqchip/irq-gic-v3-its.c desc.its_vmapti_cmd.vpe = map->vpe; map 961 drivers/irqchip/irq-gic-v3-its.c desc.its_vmapti_cmd.virt_id = map->vintid; map 963 drivers/irqchip/irq-gic-v3-its.c desc.its_vmapti_cmd.db_enabled = map->db_enabled; map 970 drivers/irqchip/irq-gic-v3-its.c struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id]; map 973 drivers/irqchip/irq-gic-v3-its.c desc.its_vmovi_cmd.vpe = map->vpe; map 976 drivers/irqchip/irq-gic-v3-its.c desc.its_vmovi_cmd.db_enabled = map->db_enabled; map 1064 drivers/irqchip/irq-gic-v3-its.c struct its_vlpi_map *map; map 1067 drivers/irqchip/irq-gic-v3-its.c map = &its_dev->event_map.vlpi_maps[event]; map 1068 drivers/irqchip/irq-gic-v3-its.c hwirq = map->vintid; map 1071 drivers/irqchip/irq-gic-v3-its.c map->properties &= ~clr; map 1072 drivers/irqchip/irq-gic-v3-its.c map->properties |= set | LPI_PROP_GROUP1; map 1279 drivers/irqchip/irq-gic-v3-its.c if (!info->map) map 1294 drivers/irqchip/irq-gic-v3-its.c its_dev->event_map.vm = info->map->vm; map 1296 drivers/irqchip/irq-gic-v3-its.c } else if (its_dev->event_map.vm != info->map->vm) { map 1302 drivers/irqchip/irq-gic-v3-its.c its_dev->event_map.vlpi_maps[event] = *info->map; map 1309 drivers/irqchip/irq-gic-v3-its.c its_map_vm(its_dev->its, info->map->vm); map 1318 drivers/irqchip/irq-gic-v3-its.c lpi_write_config(d, 0xff, info->map->properties); map 1350 drivers/irqchip/irq-gic-v3-its.c *info->map = its_dev->event_map.vlpi_maps[event]; map 161 drivers/irqchip/irq-gic-v4.c int its_map_vlpi(int irq, struct its_vlpi_map *map) map 166 drivers/irqchip/irq-gic-v4.c .map = map, map 184 drivers/irqchip/irq-gic-v4.c int its_get_vlpi(int irq, struct its_vlpi_map *map) map 189 drivers/irqchip/irq-gic-v4.c .map = map, map 809 drivers/irqchip/irq-gic.c unsigned long flags, map = 0; map 822 drivers/irqchip/irq-gic.c map |= gic_cpu_map[cpu]; map 831 drivers/irqchip/irq-gic.c writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); map 1073 drivers/irqchip/irq-gic.c .map = gic_irq_domain_map, map 286 drivers/irqchip/irq-hip04.c unsigned long flags, map = 0; map 292 drivers/irqchip/irq-hip04.c map |= hip04_cpu_map[cpu]; map 301 drivers/irqchip/irq-hip04.c writel_relaxed(map << 8 | irq, hip04_data.dist_base + GIC_DIST_SOFTINT); map 357 drivers/irqchip/irq-hip04.c .map = hip04_irq_domain_map, map 303 drivers/irqchip/irq-i8259.c .map = i8259A_irq_domain_map, map 90 drivers/irqchip/irq-imx-irqsteer.c .map = imx_irqsteer_irq_map, map 17 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map; map 29 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map = gc->private; map 33 drivers/irqchip/irq-ingenic-tcu.c regmap_read(map, TCU_REG_TFR, &irq_reg); map 34 drivers/irqchip/irq-ingenic-tcu.c regmap_read(map, TCU_REG_TMR, &irq_mask); map 50 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map = gc->private; map 54 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.ack, mask); map 55 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.enable, mask); map 64 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map = gc->private; map 68 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.disable, mask); map 77 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map = gc->private; map 81 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.ack, mask); map 82 drivers/irqchip/irq-ingenic-tcu.c regmap_write(map, ct->regs.disable, mask); map 92 drivers/irqchip/irq-ingenic-tcu.c struct regmap *map; map 96 drivers/irqchip/irq-ingenic-tcu.c map = device_node_to_regmap(np); map 97 drivers/irqchip/irq-ingenic-tcu.c if (IS_ERR(map)) map 98 drivers/irqchip/irq-ingenic-tcu.c return PTR_ERR(map); map 104 drivers/irqchip/irq-ingenic-tcu.c tcu->map = map; map 134 drivers/irqchip/irq-ingenic-tcu.c gc->private = tcu->map; map 145 drivers/irqchip/irq-ingenic-tcu.c regmap_write(tcu->map, TCU_REG_TMSR, IRQ_MSK(32)); map 57 drivers/irqchip/irq-jcore-aic.c .map = jcore_aic_irqdomain_map, map 136 drivers/irqchip/irq-keystone.c .map = keystone_irq_map, map 169 drivers/irqchip/irq-lpc32xx.c .map = lpc32xx_irq_domain_map, map 171 drivers/irqchip/irq-mips-cpu.c .map = mips_cpu_intc_map, map 62 drivers/irqchip/irq-mips-gic.c u32 map; map 391 drivers/irqchip/irq-mips-gic.c write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); map 459 drivers/irqchip/irq-mips-gic.c u32 map; map 477 drivers/irqchip/irq-mips-gic.c map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; map 482 drivers/irqchip/irq-mips-gic.c map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; map 492 drivers/irqchip/irq-mips-gic.c cd->map = map; map 520 drivers/irqchip/irq-mips-gic.c write_gic_vo_map(mips_gic_vx_map_reg(intr), map); map 550 drivers/irqchip/irq-mips-gic.c .map = gic_irq_domain_map, map 200 drivers/irqchip/irq-mmp.c .map = mmp_irq_domain_map, map 85 drivers/irqchip/irq-mvebu-pic.c .map = mvebu_pic_irq_map, map 158 drivers/irqchip/irq-mxs.c .map = icoll_irq_domain_map, map 134 drivers/irqchip/irq-or1k-pic.c .map = or1k_map, map 186 drivers/irqchip/irq-pic32-evic.c .map = pic32_irq_domain_map, map 80 drivers/irqchip/irq-rda-intc.c .map = rda_irq_map, map 72 drivers/irqchip/irq-renesas-h8300h.c .map = irq_map, map 78 drivers/irqchip/irq-renesas-h8s.c .map = irq_map, map 346 drivers/irqchip/irq-renesas-intc-irqpin.c .map = intc_irqpin_irq_domain_map, map 46 drivers/irqchip/irq-renesas-rza1.c struct of_phandle_args map[IRQC_NUM_IRQ]; map 118 drivers/irqchip/irq-renesas-rza1.c spec.param_count = priv->map[hwirq].args_count; map 120 drivers/irqchip/irq-renesas-rza1.c spec.param[i] = priv->map[hwirq].args[i]; map 180 drivers/irqchip/irq-renesas-rza1.c priv->map[i].args_count = intsize; map 182 drivers/irqchip/irq-renesas-rza1.c priv->map[i].args[j] = be32_to_cpup(imap++); map 486 drivers/irqchip/irq-s3c24xx.c .map = s3c24xx_irq_map, map 1210 drivers/irqchip/irq-s3c24xx.c .map = s3c24xx_irq_map_of, map 75 drivers/irqchip/irq-sa11x0.c .map = sa1100_normal_irqdomain_map, map 164 drivers/irqchip/irq-sifive-plic.c .map = plic_irqdomain_map, map 414 drivers/irqchip/irq-stm32-exti.c .map = irq_map_generic_chip, map 103 drivers/irqchip/irq-sun4i.c .map = sun4i_irq_map, map 63 drivers/irqchip/irq-ts4800.c .map = ts4800_irqdomain_map, map 145 drivers/irqchip/irq-versatile-fpga.c .map = fpga_irqdomain_map, map 246 drivers/irqchip/irq-vic.c .map = vic_irqdomain_map, map 162 drivers/irqchip/irq-vt8500.c .map = vt8500_irq_map, map 135 drivers/irqchip/irq-xilinx-intc.c .map = xintc_map, map 57 drivers/irqchip/irq-xtensa-mx.c .map = xtensa_mx_irq_map, map 41 drivers/irqchip/irq-xtensa-pic.c .map = xtensa_irq_map, map 140 drivers/irqchip/qcom-irq-combiner.c .map = combiner_irq_map, map 37 drivers/leds/leds-max77650.c struct regmap *map; map 60 drivers/leds/leds-max77650.c return regmap_update_bits(led->map, led->regA, mask, val); map 68 drivers/leds/leds-max77650.c struct regmap *map; map 80 drivers/leds/leds-max77650.c map = dev_get_regmap(dev->parent, NULL); map 81 drivers/leds/leds-max77650.c if (!map) map 96 drivers/leds/leds-max77650.c led->map = map; map 121 drivers/leds/leds-max77650.c rv = regmap_write(map, led->regA, MAX77650_LED_A_DEFAULT); map 125 drivers/leds/leds-max77650.c rv = regmap_write(map, led->regB, MAX77650_LED_B_DEFAULT); map 130 drivers/leds/leds-max77650.c return regmap_write(map, map 22 drivers/leds/leds-pm8058.c struct regmap *map; map 51 drivers/leds/leds-pm8058.c ret = regmap_update_bits(led->map, led->reg, mask, val); map 64 drivers/leds/leds-pm8058.c ret = regmap_read(led->map, led->reg, &val); map 93 drivers/leds/leds-pm8058.c struct regmap *map; map 103 drivers/leds/leds-pm8058.c map = dev_get_regmap(pdev->dev.parent, NULL); map 104 drivers/leds/leds-pm8058.c if (!map) { map 108 drivers/leds/leds-pm8058.c led->map = map; map 29 drivers/leds/leds-syscon.c struct regmap *map; map 51 drivers/leds/leds-syscon.c ret = regmap_update_bits(sled->map, sled->offset, sled->mask, val); map 61 drivers/leds/leds-syscon.c struct regmap *map; map 71 drivers/leds/leds-syscon.c map = syscon_node_to_regmap(parent->of_node); map 72 drivers/leds/leds-syscon.c if (IS_ERR(map)) { map 74 drivers/leds/leds-syscon.c return PTR_ERR(map); map 81 drivers/leds/leds-syscon.c sled->map = map; map 97 drivers/leds/leds-syscon.c ret = regmap_read(map, sled->offset, &val); map 103 drivers/leds/leds-syscon.c ret = regmap_update_bits(map, sled->offset, map 110 drivers/leds/leds-syscon.c ret = regmap_update_bits(map, sled->offset, map 104 drivers/lightnvm/core.c struct nvm_dev_map *dev_map = tgt_dev->map; map 222 drivers/lightnvm/core.c tgt_dev->map = dev_map; map 570 drivers/lightnvm/core.c struct nvm_dev_map *dev_map = tgt_dev->map; map 1086 drivers/lightnvm/pblk.h u32 *map = (u32 *)pblk->trans_map; map 1088 drivers/lightnvm/pblk.h ppa = pblk_ppa32_to_ppa64(pblk, map[lba]); map 1090 drivers/lightnvm/pblk.h struct ppa_addr *map = (struct ppa_addr *)pblk->trans_map; map 1092 drivers/lightnvm/pblk.h ppa = map[lba]; map 1102 drivers/lightnvm/pblk.h u32 *map = (u32 *)pblk->trans_map; map 1104 drivers/lightnvm/pblk.h map[lba] = pblk_ppa64_to_ppa32(pblk, ppa); map 1106 drivers/lightnvm/pblk.h u64 *map = (u64 *)pblk->trans_map; map 1108 drivers/lightnvm/pblk.h map[lba] = ppa.ppa; map 81 drivers/mailbox/tegra-hsp.c const struct tegra_hsp_db_map *map; map 557 drivers/mailbox/tegra-hsp.c const struct tegra_hsp_db_map *map = hsp->soc->map; map 560 drivers/mailbox/tegra-hsp.c while (map->name) { map 561 drivers/mailbox/tegra-hsp.c channel = tegra_hsp_doorbell_create(hsp, map->name, map 562 drivers/mailbox/tegra-hsp.c map->master, map->index); map 566 drivers/mailbox/tegra-hsp.c map++; map 808 drivers/mailbox/tegra-hsp.c .map = tegra186_hsp_db_map, map 813 drivers/mailbox/tegra-hsp.c .map = tegra186_hsp_db_map, map 3520 drivers/md/dm-cache-target.c .map = cache_map, map 2210 drivers/md/dm-clone-target.c .map = clone_map, map 42 drivers/md/dm-core.h void __rcu *map; map 2972 drivers/md/dm-crypt.c .map = crypt_map, map 359 drivers/md/dm-delay.c .map = delay_map, map 494 drivers/md/dm-dust.c .map = dust_map, map 1698 drivers/md/dm-era-target.c .map = era_map, map 498 drivers/md/dm-flakey.c .map = flakey_map, map 4192 drivers/md/dm-integrity.c .map = dm_integrity_map, map 228 drivers/md/dm-linear.c .map = linear_map, map 1009 drivers/md/dm-log-writes.c .map = log_writes_map, map 2005 drivers/md/dm-mpath.c .map = multipath_map_bio, map 4024 drivers/md/dm-raid.c .map = raid_map, map 1461 drivers/md/dm-raid1.c .map = mirror_map, map 501 drivers/md/dm-rq.c struct dm_table *map = dm_get_live_table(md, &srcu_idx); map 503 drivers/md/dm-rq.c ti = dm_table_find_target(map, 0); map 2728 drivers/md/dm-snap.c .map = origin_map, map 2741 drivers/md/dm-snap.c .map = snapshot_map, map 2756 drivers/md/dm-snap.c .map = snapshot_merge_map, map 494 drivers/md/dm-stripe.c .map = stripe_map, map 556 drivers/md/dm-switch.c .map = switch_map, map 156 drivers/md/dm-target.c .map = io_err_map, map 4129 drivers/md/dm-thin.c .map = pool_map, map 4508 drivers/md/dm-thin.c .map = thin_map, map 184 drivers/md/dm-unstripe.c .map = unstripe_map, map 1214 drivers/md/dm-verity-target.c .map = verity_map, map 2336 drivers/md/dm-writecache.c .map = writecache_map, map 64 drivers/md/dm-zero.c .map = zero_map, map 975 drivers/md/dm-zoned-target.c .map = dmz_map, map 66 drivers/md/dm.c struct dm_table *map; map 449 drivers/md/dm.c struct dm_table *map; map 455 drivers/md/dm.c map = dm_get_live_table(md, &srcu_idx); map 456 drivers/md/dm.c if (!map) map 459 drivers/md/dm.c tgt = dm_table_find_target(map, sector); map 497 drivers/md/dm.c struct dm_table *map; map 502 drivers/md/dm.c map = dm_get_live_table(md, srcu_idx); map 503 drivers/md/dm.c if (!map || !dm_table_get_size(map)) map 507 drivers/md/dm.c if (dm_table_get_num_targets(map) != 1) map 510 drivers/md/dm.c tgt = dm_table_get_target(map, 0); map 708 drivers/md/dm.c return srcu_dereference(md->map, &md->io_barrier); map 729 drivers/md/dm.c return rcu_dereference(md->map); map 1067 drivers/md/dm.c struct dm_table *map; map 1070 drivers/md/dm.c map = dm_get_live_table(md, srcu_idx); map 1071 drivers/md/dm.c if (!map) map 1074 drivers/md/dm.c ti = dm_table_find_target(map, sector); map 1112 drivers/md/dm.c struct dm_table *map; map 1116 drivers/md/dm.c map = dm_get_live_table(md, &srcu_idx); map 1117 drivers/md/dm.c if (!map) map 1120 drivers/md/dm.c ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); map 1283 drivers/md/dm.c r = ti->type->map(ti, clone); map 1437 drivers/md/dm.c while ((ti = dm_table_get_target(ci->map, target_nr++))) map 1574 drivers/md/dm.c ti = dm_table_find_target(ci->map, ci->sector); map 1594 drivers/md/dm.c struct dm_table *map, struct bio *bio) map 1596 drivers/md/dm.c ci->map = map; map 1608 drivers/md/dm.c struct dm_table *map, struct bio *bio) map 1614 drivers/md/dm.c init_clone_info(&ci, md, map, bio); map 1681 drivers/md/dm.c static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, map 1688 drivers/md/dm.c init_clone_info(&ci, md, map, bio); map 1739 drivers/md/dm.c struct dm_table *map, struct bio *bio) map 1744 drivers/md/dm.c if (unlikely(!map)) { map 1750 drivers/md/dm.c ti = dm_table_find_target(map, bio->bi_iter.bi_sector); map 1770 drivers/md/dm.c return __process_bio(md, map, bio, ti); map 1772 drivers/md/dm.c return __split_and_process_bio(md, map, bio); map 1780 drivers/md/dm.c struct dm_table *map; map 1782 drivers/md/dm.c map = dm_get_live_table(md, &srcu_idx); map 1795 drivers/md/dm.c ret = dm_process_bio(md, map, bio); map 1805 drivers/md/dm.c struct dm_table *map; map 1816 drivers/md/dm.c map = dm_get_live_table_fast(md); map 1817 drivers/md/dm.c if (map) map 1818 drivers/md/dm.c r = dm_table_any_congested(map, bdi_bits); map 2174 drivers/md/dm.c old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); map 2175 drivers/md/dm.c rcu_assign_pointer(md->map, (void *)t); map 2191 drivers/md/dm.c struct dm_table *map = rcu_dereference_protected(md->map, 1); map 2193 drivers/md/dm.c if (!map) map 2196 drivers/md/dm.c dm_table_event_callback(map, NULL, NULL); map 2197 drivers/md/dm.c RCU_INIT_POINTER(md->map, NULL); map 2200 drivers/md/dm.c return map; map 2372 drivers/md/dm.c struct dm_table *map; map 2389 drivers/md/dm.c map = dm_get_live_table(md, &srcu_idx); map 2391 drivers/md/dm.c dm_table_presuspend_targets(map); map 2393 drivers/md/dm.c dm_table_postsuspend_targets(map); map 2465 drivers/md/dm.c struct dm_table *map; map 2467 drivers/md/dm.c map = dm_get_live_table(md, &srcu_idx); map 2480 drivers/md/dm.c (void) dm_process_bio(md, map, c); map 2498 drivers/md/dm.c struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); map 2524 drivers/md/dm.c map = ERR_PTR(r); map 2529 drivers/md/dm.c map = __bind(md, table, &limits); map 2534 drivers/md/dm.c return map; map 2578 drivers/md/dm.c static int __dm_suspend(struct mapped_device *md, struct dm_table *map, map 2601 drivers/md/dm.c dm_table_presuspend_targets(map); map 2612 drivers/md/dm.c dm_table_presuspend_undo_targets(map); map 2630 drivers/md/dm.c if (map) map 2653 drivers/md/dm.c if (map) map 2664 drivers/md/dm.c dm_table_presuspend_undo_targets(map); map 2689 drivers/md/dm.c struct dm_table *map = NULL; map 2709 drivers/md/dm.c map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); map 2711 drivers/md/dm.c r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); map 2715 drivers/md/dm.c dm_table_postsuspend_targets(map); map 2722 drivers/md/dm.c static int __dm_resume(struct mapped_device *md, struct dm_table *map) map 2724 drivers/md/dm.c if (map) { map 2725 drivers/md/dm.c int r = dm_table_resume_targets(map); map 2748 drivers/md/dm.c struct dm_table *map = NULL; map 2766 drivers/md/dm.c map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); map 2767 drivers/md/dm.c if (!map || !dm_table_get_size(map)) map 2770 drivers/md/dm.c r = __dm_resume(md, map); map 2789 drivers/md/dm.c struct dm_table *map = NULL; map 2801 drivers/md/dm.c map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); map 2809 drivers/md/dm.c (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, map 2812 drivers/md/dm.c dm_table_postsuspend_targets(map); map 91 drivers/md/dm.h #define dm_target_bio_based(t) ((t)->type->map != NULL) map 68 drivers/md/md-bitmap.c if (bitmap->bp[page].map) /* page is already allocated, just return */ map 100 drivers/md/md-bitmap.c if (!bitmap->bp[page].map) map 102 drivers/md/md-bitmap.c } else if (bitmap->bp[page].map || map 110 drivers/md/md-bitmap.c bitmap->bp[page].map = mappage; map 130 drivers/md/md-bitmap.c bitmap->bp[page].map = NULL; map 133 drivers/md/md-bitmap.c ptr = bitmap->bp[page].map; map 134 drivers/md/md-bitmap.c bitmap->bp[page].map = NULL; map 836 drivers/md/md-bitmap.c struct page **map, *sb_page; map 841 drivers/md/md-bitmap.c map = store->filemap; map 846 drivers/md/md-bitmap.c if (map[pages] != sb_page) /* 0 is sb_page, release it below */ map 847 drivers/md/md-bitmap.c free_buffers(map[pages]); map 848 drivers/md/md-bitmap.c kfree(map); map 1373 drivers/md/md-bitmap.c bitmap->bp[page].map == NULL) map 1390 drivers/md/md-bitmap.c &bitmap->bp[page].map)[hi]; map 1393 drivers/md/md-bitmap.c &(bitmap->bp[page].map[pageoff]); map 1764 drivers/md/md-bitmap.c if (bp[k].map && !bp[k].hijacked) map 1765 drivers/md/md-bitmap.c kfree(bp[k].map); map 2168 drivers/md/md-bitmap.c kfree(new_bp[k].map); map 2221 drivers/md/md-bitmap.c kfree(old_counts.bp[k].map); map 162 drivers/md/md-bitmap.h char *map; map 363 drivers/media/common/videobuf2/videobuf2-dma-contig.c .map = vb2_dc_dmabuf_ops_kmap, map 498 drivers/media/common/videobuf2/videobuf2-dma-sg.c .map = vb2_dma_sg_dmabuf_ops_kmap, map 347 drivers/media/common/videobuf2/videobuf2-vmalloc.c .map = vb2_vmalloc_dmabuf_ops_kmap, map 228 drivers/media/i2c/mt9v032.c struct regmap *map = mt9v032->regmap; map 237 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_AEC_AGC_ENABLE, value); map 264 drivers/media/i2c/mt9v032.c struct regmap *map = mt9v032->regmap; map 292 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_RESET, 1); map 296 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_RESET, 0); map 300 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_CHIP_CONTROL, map 319 drivers/media/i2c/mt9v032.c struct regmap *map = mt9v032->regmap; map 333 drivers/media/i2c/mt9v032.c ret = regmap_write(map, mt9v032->model->data->pclk_reg, map 340 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_ROW_NOISE_CORR_CONTROL, 0); map 385 drivers/media/i2c/mt9v032.c struct regmap *map = mt9v032->regmap; map 391 drivers/media/i2c/mt9v032.c return regmap_update_bits(map, MT9V032_CHIP_CONTROL, mode, 0); map 396 drivers/media/i2c/mt9v032.c ret = regmap_update_bits(map, MT9V032_READ_MODE, map 403 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_COLUMN_START, crop->left); map 407 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_ROW_START, crop->top); map 411 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_WINDOW_WIDTH, crop->width); map 415 drivers/media/i2c/mt9v032.c ret = regmap_write(map, MT9V032_WINDOW_HEIGHT, crop->height); map 424 drivers/media/i2c/mt9v032.c return regmap_update_bits(map, MT9V032_CHIP_CONTROL, mode, mode); map 645 drivers/media/i2c/mt9v032.c struct regmap *map = mt9v032->regmap; map 655 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_ANALOG_GAIN, ctrl->val); map 662 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_TOTAL_SHUTTER_WIDTH, map 670 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_VERTICAL_BLANKING, map 708 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_TEST_PATTERN, data); map 711 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_AEGC_DESIRED_BIN, ctrl->val); map 714 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_AEC_LPF, ctrl->val); map 717 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_AGC_LPF, ctrl->val); map 720 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_AEC_UPDATE_FREQUENCY, map 724 drivers/media/i2c/mt9v032.c return regmap_write(map, MT9V032_AGC_UPDATE_FREQUENCY, map 728 drivers/media/i2c/mt9v032.c return regmap_write(map, map 589 drivers/media/i2c/tvp5150.c struct regmap *map = decoder->regmap; map 594 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_FULL_FIELD_ENA, 0); map 598 drivers/media/i2c/tvp5150.c regmap_write(map, i, 0xff); map 607 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_CONF_RAM_ADDR_HIGH, regs->reg >> 8); map 608 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_CONF_RAM_ADDR_LOW, regs->reg); map 611 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_VDP_CONF_RAM_DATA, map 842 drivers/media/i2c/tvp5150.c struct regmap *map = decoder->regmap; map 848 drivers/media/i2c/tvp5150.c regmap_read(map, TVP5150_INT_STATUS_REG_A, &status); map 850 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INT_STATUS_REG_A, status); map 858 drivers/media/i2c/tvp5150.c regmap_update_bits(map, TVP5150_MISC_CTL, mask, map 865 drivers/media/i2c/tvp5150.c regmap_read(map, TVP5150_INT_ACTIVE_REG_B, &active); map 868 drivers/media/i2c/tvp5150.c regmap_read(map, TVP5150_INT_STATUS_REG_B, &status); map 870 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INT_RESET_REG_B, status); map 879 drivers/media/i2c/tvp5150.c struct regmap *map = decoder->regmap; map 886 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_CONF_SHARED_PIN, 0x0); map 888 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INT_CONF, TVP5150_VDPOE | 0x1); map 889 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INTT_CONFIG_REG_B, 0x1); map 892 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_CONF_SHARED_PIN, 0x2); map 894 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INT_CONF, TVP5150_VDPOE); map 895 drivers/media/i2c/tvp5150.c regmap_write(map, TVP5150_INTT_CONFIG_REG_B, 0x0); map 1703 drivers/media/i2c/tvp5150.c struct regmap *map; map 1719 drivers/media/i2c/tvp5150.c map = devm_regmap_init_i2c(c, &tvp5150_config); map 1720 drivers/media/i2c/tvp5150.c if (IS_ERR(map)) map 1721 drivers/media/i2c/tvp5150.c return PTR_ERR(map); map 1723 drivers/media/i2c/tvp5150.c core->regmap = map; map 4501 drivers/media/pci/bt8xx/bttv-cards.c static unsigned char map[4] = {3, 0, 2, 1}; map 4508 drivers/media/pci/bt8xx/bttv-cards.c yaddr = map[yaddr]; map 4813 drivers/media/pci/bt8xx/bttv-cards.c static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 }, map 4824 drivers/media/pci/bt8xx/bttv-cards.c xaddr = map[yaddr][input] & 0xf; map 78 drivers/media/pci/cobalt/cobalt-alsa-pcm.c static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 }; map 82 drivers/media/pci/cobalt/cobalt-alsa-pcm.c unsigned offset = map[idx] * 4; map 324 drivers/media/pci/cobalt/cobalt-alsa-pcm.c static const unsigned map[8] = { 0, 1, 5, 4, 2, 3, 6, 7 }; map 328 drivers/media/pci/cobalt/cobalt-alsa-pcm.c unsigned offset = map[idx] * 4; map 25 drivers/media/pci/cobalt/cobalt-flash.c static map_word flash_read16(struct map_info *map, unsigned long offset) map 29 drivers/media/pci/cobalt/cobalt-flash.c r.x[0] = cobalt_bus_read32(map->virt, ADRS(offset)); map 38 drivers/media/pci/cobalt/cobalt-flash.c static void flash_write16(struct map_info *map, const map_word datum, map 43 drivers/media/pci/cobalt/cobalt-flash.c cobalt_bus_write16(map->virt, ADRS(offset), data); map 46 drivers/media/pci/cobalt/cobalt-flash.c static void flash_copy_from(struct map_info *map, void *to, map 54 drivers/media/pci/cobalt/cobalt-flash.c data = cobalt_bus_read32(map->virt, ADRS(src)); map 64 drivers/media/pci/cobalt/cobalt-flash.c static void flash_copy_to(struct map_info *map, unsigned long to, map 81 drivers/media/pci/cobalt/cobalt-flash.c cobalt_bus_write16(map->virt, ADRS(dest - 2), data); map 87 drivers/media/pci/cobalt/cobalt-flash.c struct map_info *map = &cobalt_flash_map; map 90 drivers/media/pci/cobalt/cobalt-flash.c BUG_ON(!map_bankwidth_supported(map->bankwidth)); map 91 drivers/media/pci/cobalt/cobalt-flash.c map->virt = cobalt->bar1; map 92 drivers/media/pci/cobalt/cobalt-flash.c map->read = flash_read16; map 93 drivers/media/pci/cobalt/cobalt-flash.c map->write = flash_write16; map 94 drivers/media/pci/cobalt/cobalt-flash.c map->copy_from = flash_copy_from; map 95 drivers/media/pci/cobalt/cobalt-flash.c map->copy_to = flash_copy_to; map 97 drivers/media/pci/cobalt/cobalt-flash.c mtd = do_map_probe("cfi_probe", map); map 288 drivers/media/pci/ivtv/ivtv-driver.h struct page *map[IVTV_DMA_SG_OSD_ENT]; map 40 drivers/media/pci/ivtv/ivtv-udma.c if (PageHighMem(dma->map[map_offset])) { map 48 drivers/media/pci/ivtv/ivtv-udma.c src = kmap_atomic(dma->map[map_offset]) + offset; map 55 drivers/media/pci/ivtv/ivtv-udma.c sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset); map 116 drivers/media/pci/ivtv/ivtv-udma.c dma->map, FOLL_FORCE); map 123 drivers/media/pci/ivtv/ivtv-udma.c put_page(dma->map[i]); map 134 drivers/media/pci/ivtv/ivtv-udma.c put_page(dma->map[i]); map 174 drivers/media/pci/ivtv/ivtv-udma.c put_page(dma->map[i]); map 67 drivers/media/pci/ivtv/ivtv-yuv.c y_dma.page_count, &dma->map[0], FOLL_FORCE); map 71 drivers/media/pci/ivtv/ivtv-yuv.c uv_dma.page_count, &dma->map[y_pages], map 85 drivers/media/pci/ivtv/ivtv-yuv.c put_page(dma->map[y_pages + i]); map 97 drivers/media/pci/ivtv/ivtv-yuv.c put_page(dma->map[i]); map 116 drivers/media/pci/ivtv/ivtv-yuv.c put_page(dma->map[i]); map 803 drivers/media/platform/exynos4-is/fimc-reg.c struct regmap *map = fimc->sysreg; map 807 drivers/media/platform/exynos4-is/fimc-reg.c if (map == NULL) map 810 drivers/media/platform/exynos4-is/fimc-reg.c ret = regmap_read(map, SYSREG_CAMBLK, &camblk_cfg); map 820 drivers/media/platform/exynos4-is/fimc-reg.c ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val); map 827 drivers/media/platform/exynos4-is/fimc-reg.c ret = regmap_update_bits(map, SYSREG_CAMBLK, mask, val); map 832 drivers/media/platform/exynos4-is/fimc-reg.c ret = regmap_update_bits(map, SYSREG_ISPBLK, mask, ~mask); map 838 drivers/media/platform/exynos4-is/fimc-reg.c return regmap_update_bits(map, SYSREG_ISPBLK, mask, mask); map 63 drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c .map = { map 34 drivers/media/rc/keymaps/rc-alink-dtu-m.c .map = { map 59 drivers/media/rc/keymaps/rc-anysee.c .map = { map 54 drivers/media/rc/keymaps/rc-apac-viewcomp.c .map = { map 42 drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c .map = { map 65 drivers/media/rc/keymaps/rc-asus-pc39.c .map = { map 64 drivers/media/rc/keymaps/rc-asus-ps3-100.c .map = { map 43 drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c .map = { map 103 drivers/media/rc/keymaps/rc-ati-x10.c .map = { map 49 drivers/media/rc/keymaps/rc-avermedia-a16d.c .map = { map 71 drivers/media/rc/keymaps/rc-avermedia-cardbus.c .map = { map 52 drivers/media/rc/keymaps/rc-avermedia-dvbt.c .map = { map 122 drivers/media/rc/keymaps/rc-avermedia-m135a.c .map = { map 70 drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c .map = { map 45 drivers/media/rc/keymaps/rc-avermedia-rm-ks.c .map = { map 60 drivers/media/rc/keymaps/rc-avermedia.c .map = { map 59 drivers/media/rc/keymaps/rc-avertv-303.c .map = { map 68 drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c .map = { map 82 drivers/media/rc/keymaps/rc-behold-columbus.c .map = { map 115 drivers/media/rc/keymaps/rc-behold.c .map = { map 67 drivers/media/rc/keymaps/rc-budget-ci-old.c .map = { map 156 drivers/media/rc/keymaps/rc-cec.c .map = { map 58 drivers/media/rc/keymaps/rc-cinergy-1400.c .map = { map 52 drivers/media/rc/keymaps/rc-cinergy.c .map = { map 50 drivers/media/rc/keymaps/rc-d680-dmb.c .map = { map 56 drivers/media/rc/keymaps/rc-delock-61959.c .map = { map 98 drivers/media/rc/keymaps/rc-dib0700-nec.c .map = { map 209 drivers/media/rc/keymaps/rc-dib0700-rc5.c .map = { map 64 drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c .map = { map 48 drivers/media/rc/keymaps/rc-digittrade.c .map = { map 50 drivers/media/rc/keymaps/rc-dm1105-nec.c .map = { map 52 drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c .map = { map 71 drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c .map = { map 33 drivers/media/rc/keymaps/rc-dtt200u.c .map = { map 51 drivers/media/rc/keymaps/rc-dvbsky.c .map = { map 60 drivers/media/rc/keymaps/rc-dvico-mce.c .map = { map 51 drivers/media/rc/keymaps/rc-dvico-portable.c .map = { map 43 drivers/media/rc/keymaps/rc-em-terratec.c .map = { map 55 drivers/media/rc/keymaps/rc-encore-enltv-fm53.c .map = { map 86 drivers/media/rc/keymaps/rc-encore-enltv.c .map = { map 64 drivers/media/rc/keymaps/rc-encore-enltv2.c .map = { map 35 drivers/media/rc/keymaps/rc-evga-indtube.c .map = { map 70 drivers/media/rc/keymaps/rc-eztv.c .map = { map 51 drivers/media/rc/keymaps/rc-flydvb.c .map = { map 44 drivers/media/rc/keymaps/rc-flyvideo.c .map = { map 72 drivers/media/rc/keymaps/rc-fusionhdtv-mce.c .map = { map 55 drivers/media/rc/keymaps/rc-gadmei-rm008z.c .map = { map 27 drivers/media/rc/keymaps/rc-geekbox.c .map = { map 58 drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c .map = { map 53 drivers/media/rc/keymaps/rc-gotview7135.c .map = { map 267 drivers/media/rc/keymaps/rc-hauppauge.c .map = { map 44 drivers/media/rc/keymaps/rc-hisi-poplar.c .map = { map 56 drivers/media/rc/keymaps/rc-hisi-tv-demo.c .map = { map 116 drivers/media/rc/keymaps/rc-imon-mce.c .map = { map 130 drivers/media/rc/keymaps/rc-imon-pad.c .map = { map 60 drivers/media/rc/keymaps/rc-imon-rsc.c .map = { map 62 drivers/media/rc/keymaps/rc-iodata-bctv7e.c .map = { map 69 drivers/media/rc/keymaps/rc-it913x-v1.c .map = { map 68 drivers/media/rc/keymaps/rc-it913x-v2.c .map = { map 61 drivers/media/rc/keymaps/rc-kaiomy.c .map = { map 32 drivers/media/rc/keymaps/rc-khadas.c .map = { map 57 drivers/media/rc/keymaps/rc-kworld-315u.c .map = { map 76 drivers/media/rc/keymaps/rc-kworld-pc150u.c .map = { map 77 drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c .map = { map 65 drivers/media/rc/keymaps/rc-leadtek-y04g0051.c .map = { map 84 drivers/media/rc/keymaps/rc-lme2510.c .map = { map 108 drivers/media/rc/keymaps/rc-manli.c .map = { map 87 drivers/media/rc/keymaps/rc-medion-x10-digitainer.c .map = { map 72 drivers/media/rc/keymaps/rc-medion-x10-or2x.c .map = { map 82 drivers/media/rc/keymaps/rc-medion-x10.c .map = { map 33 drivers/media/rc/keymaps/rc-msi-digivox-ii.c .map = { map 51 drivers/media/rc/keymaps/rc-msi-digivox-iii.c .map = { map 97 drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c .map = { map 43 drivers/media/rc/keymaps/rc-msi-tvanywhere.c .map = { map 70 drivers/media/rc/keymaps/rc-nebula.c .map = { map 131 drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c .map = { map 59 drivers/media/rc/keymaps/rc-norwood.c .map = { map 54 drivers/media/rc/keymaps/rc-npgtech.c .map = { map 32 drivers/media/rc/keymaps/rc-odroid.c .map = { map 54 drivers/media/rc/keymaps/rc-pctv-sedna.c .map = { map 68 drivers/media/rc/keymaps/rc-pinnacle-color.c .map = { map 63 drivers/media/rc/keymaps/rc-pinnacle-grey.c .map = { map 44 drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c .map = { map 51 drivers/media/rc/keymaps/rc-pixelview-002t.c .map = { map 57 drivers/media/rc/keymaps/rc-pixelview-mk12.c .map = { map 57 drivers/media/rc/keymaps/rc-pixelview-new.c .map = { map 56 drivers/media/rc/keymaps/rc-pixelview.c .map = { map 55 drivers/media/rc/keymaps/rc-powercolor-real-angel.c .map = { map 43 drivers/media/rc/keymaps/rc-proteus-2309.c .map = { map 55 drivers/media/rc/keymaps/rc-purpletv.c .map = { map 52 drivers/media/rc/keymaps/rc-pv951.c .map = { map 94 drivers/media/rc/keymaps/rc-rc6-mce.c .map = { map 52 drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c .map = { map 51 drivers/media/rc/keymaps/rc-reddo.c .map = { map 72 drivers/media/rc/keymaps/rc-snapstream-firefly.c .map = { map 55 drivers/media/rc/keymaps/rc-streamzap.c .map = { map 49 drivers/media/rc/keymaps/rc-su3000.c .map = { map 67 drivers/media/rc/keymaps/rc-tango.c .map = { map 55 drivers/media/rc/keymaps/rc-tanix-tx3mini.c .map = { map 46 drivers/media/rc/keymaps/rc-tanix-tx5max.c .map = { map 49 drivers/media/rc/keymaps/rc-tbs-nec.c .map = { map 51 drivers/media/rc/keymaps/rc-technisat-ts35.c .map = { map 68 drivers/media/rc/keymaps/rc-technisat-usb2.c .map = { map 63 drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c .map = { map 61 drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c .map = { map 66 drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c .map = { map 38 drivers/media/rc/keymaps/rc-terratec-slim-2.c .map = { map 45 drivers/media/rc/keymaps/rc-terratec-slim.c .map = { map 62 drivers/media/rc/keymaps/rc-tevii-nec.c .map = { map 73 drivers/media/rc/keymaps/rc-tivo.c .map = { map 51 drivers/media/rc/keymaps/rc-total-media-in-hand-02.c .map = { map 51 drivers/media/rc/keymaps/rc-total-media-in-hand.c .map = { map 46 drivers/media/rc/keymaps/rc-trekstor.c .map = { map 56 drivers/media/rc/keymaps/rc-tt-1500.c .map = { map 73 drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c .map = { map 67 drivers/media/rc/keymaps/rc-twinhan1027.c .map = { map 67 drivers/media/rc/keymaps/rc-videomate-m1f.c .map = { map 59 drivers/media/rc/keymaps/rc-videomate-s350.c .map = { map 61 drivers/media/rc/keymaps/rc-videomate-tv-pvr.c .map = { map 61 drivers/media/rc/keymaps/rc-videostrong-kii-pro.c .map = { map 31 drivers/media/rc/keymaps/rc-wetek-hub.c .map = { map 71 drivers/media/rc/keymaps/rc-wetek-play2.c .map = { map 56 drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c .map = { map 76 drivers/media/rc/keymaps/rc-winfast.c .map = { map 61 drivers/media/rc/keymaps/rc-x96max.c .map = { map 42 drivers/media/rc/keymaps/rc-xbox-dvd.c .map = { map 54 drivers/media/rc/keymaps/rc-zx-irdec.c .map = { map 92 drivers/media/rc/rc-main.c struct rc_map_list *map = NULL; map 95 drivers/media/rc/rc-main.c list_for_each_entry(map, &rc_map_list, list) { map 96 drivers/media/rc/rc-main.c if (!strcmp(name, map->map.name)) { map 98 drivers/media/rc/rc-main.c return map; map 109 drivers/media/rc/rc-main.c struct rc_map_list *map; map 111 drivers/media/rc/rc-main.c map = seek_rc_map(name); map 113 drivers/media/rc/rc-main.c if (!map) { map 121 drivers/media/rc/rc-main.c map = seek_rc_map(name); map 124 drivers/media/rc/rc-main.c if (!map) { map 129 drivers/media/rc/rc-main.c printk(KERN_INFO "Registered IR keymap %s\n", map->map.name); map 131 drivers/media/rc/rc-main.c return &map->map; map 135 drivers/media/rc/rc-main.c int rc_map_register(struct rc_map_list *map) map 138 drivers/media/rc/rc-main.c list_add_tail(&map->list, &rc_map_list); map 144 drivers/media/rc/rc-main.c void rc_map_unregister(struct rc_map_list *map) map 147 drivers/media/rc/rc-main.c list_del(&map->list); map 158 drivers/media/rc/rc-main.c .map = { map 94 drivers/media/tuners/qm1d1b0004.c const struct qm1d1b0004_cb_map *map; map 97 drivers/media/tuners/qm1d1b0004.c map = &cb_maps[i]; map 98 drivers/media/tuners/qm1d1b0004.c if (frequency < map->frequency) map 99 drivers/media/tuners/qm1d1b0004.c return map->cb; map 74 drivers/media/tuners/tda18271-fe.c struct tda18271_std_map_item *map, map 86 drivers/media/tuners/tda18271-fe.c regs[R_EP3] |= (map->agc_mode << 3) | map->std; map 98 drivers/media/tuners/tda18271-fe.c regs[R_EP4] |= (map->if_lvl << 2); map 102 drivers/media/tuners/tda18271-fe.c regs[R_EP4] |= map->fm_rfn << 7; map 106 drivers/media/tuners/tda18271-fe.c regs[R_EB22] |= map->rfagc_top; map 154 drivers/media/tuners/tda18271-fe.c N = map->if_freq * 1000 + freq; map 188 drivers/media/tuners/tda18271-fe.c if (map->fm_rfn) map 246 drivers/media/tuners/tda18271-fe.c struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state; map 269 drivers/media/tuners/tda18271-fe.c if ((0 == map[i].rf3) || (freq / 1000 < map[i].rf2)) { map 270 drivers/media/tuners/tda18271-fe.c approx = map[i].rf_a1 * (s32)(freq / 1000 - map[i].rf1) + map 271 drivers/media/tuners/tda18271-fe.c map[i].rf_b1 + rf_tab; map 273 drivers/media/tuners/tda18271-fe.c approx = map[i].rf_a2 * (s32)(freq / 1000 - map[i].rf2) + map 274 drivers/media/tuners/tda18271-fe.c map[i].rf_b2 + rf_tab; map 562 drivers/media/tuners/tda18271-fe.c struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state; map 579 drivers/media/tuners/tda18271-fe.c rf_default[RF1] = 1000 * map[i].rf1_def; map 580 drivers/media/tuners/tda18271-fe.c rf_default[RF2] = 1000 * map[i].rf2_def; map 581 drivers/media/tuners/tda18271-fe.c rf_default[RF3] = 1000 * map[i].rf3_def; map 604 drivers/media/tuners/tda18271-fe.c map[i].rf_a1 = 0; map 605 drivers/media/tuners/tda18271-fe.c map[i].rf_b1 = (prog_cal[RF1] - prog_tab[RF1]); map 606 drivers/media/tuners/tda18271-fe.c map[i].rf1 = rf_freq[RF1] / 1000; map 612 drivers/media/tuners/tda18271-fe.c map[i].rf_a1 = (dividend / divisor); map 613 drivers/media/tuners/tda18271-fe.c map[i].rf2 = rf_freq[RF2] / 1000; map 619 drivers/media/tuners/tda18271-fe.c map[i].rf_a2 = (dividend / divisor); map 620 drivers/media/tuners/tda18271-fe.c map[i].rf_b2 = (prog_cal[RF2] - prog_tab[RF2]); map 621 drivers/media/tuners/tda18271-fe.c map[i].rf3 = rf_freq[RF3] / 1000; map 885 drivers/media/tuners/tda18271-fe.c struct tda18271_std_map_item *map, u32 freq, u32 bw) map 891 drivers/media/tuners/tda18271-fe.c freq, map->if_freq, bw, map->agc_mode, map->std); map 911 drivers/media/tuners/tda18271-fe.c ret = tda18271_channel_configuration(fe, map, freq, bw); map 928 drivers/media/tuners/tda18271-fe.c struct tda18271_std_map_item *map; map 935 drivers/media/tuners/tda18271-fe.c map = &std_map->atsc_6; map 942 drivers/media/tuners/tda18271-fe.c map = &std_map->dvbt_6; map 944 drivers/media/tuners/tda18271-fe.c map = &std_map->dvbt_7; map 946 drivers/media/tuners/tda18271-fe.c map = &std_map->dvbt_8; map 955 drivers/media/tuners/tda18271-fe.c map = &std_map->qam_6; map 957 drivers/media/tuners/tda18271-fe.c map = &std_map->qam_7; map 959 drivers/media/tuners/tda18271-fe.c map = &std_map->qam_8; map 971 drivers/media/tuners/tda18271-fe.c ret = tda18271_tune(fe, map, freq, bw); map 976 drivers/media/tuners/tda18271-fe.c priv->if_freq = map->if_freq; map 988 drivers/media/tuners/tda18271-fe.c struct tda18271_std_map_item *map; map 997 drivers/media/tuners/tda18271-fe.c map = &std_map->fm_radio; map 1000 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_mn; map 1003 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_b; map 1006 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_gh; map 1009 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_i; map 1012 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_dk; map 1015 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_l; map 1018 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_lc; map 1021 drivers/media/tuners/tda18271-fe.c map = &std_map->atv_i; map 1027 drivers/media/tuners/tda18271-fe.c ret = tda18271_tune(fe, map, freq, 0); map 1032 drivers/media/tuners/tda18271-fe.c priv->if_freq = map->if_freq; map 1077 drivers/media/tuners/tda18271-fe.c if (map->std_cfg.if_freq + \ map 1078 drivers/media/tuners/tda18271-fe.c map->std_cfg.agc_mode + map->std_cfg.std + \ map 1079 drivers/media/tuners/tda18271-fe.c map->std_cfg.if_lvl + map->std_cfg.rfagc_top > 0) { \ map 1081 drivers/media/tuners/tda18271-fe.c memcpy(&std->std_cfg, &map->std_cfg, \ map 1119 drivers/media/tuners/tda18271-fe.c struct tda18271_std_map *map) map 1124 drivers/media/tuners/tda18271-fe.c if (!map) map 1010 drivers/media/tuners/tda18271-maps.c struct tda18271_rf_tracking_filter_cal *map = priv->rf_cal_state; map 1013 drivers/media/tuners/tda18271-maps.c while ((map[i].rfmax * 1000) < *freq) { map 1016 drivers/media/tuners/tda18271-maps.c i, map[i].rfmax * 1000, *freq, map 1017 drivers/media/tuners/tda18271-maps.c map[i].rf1_def, map[i].rf2_def, map[i].rf3_def, map 1018 drivers/media/tuners/tda18271-maps.c map[i].rf1, map[i].rf2, map[i].rf3, map 1019 drivers/media/tuners/tda18271-maps.c map[i].rf_a1, map[i].rf_a2, map 1020 drivers/media/tuners/tda18271-maps.c map[i].rf_b1, map[i].rf_b2); map 1021 drivers/media/tuners/tda18271-maps.c if (map[i].rfmax == 0) map 1026 drivers/media/tuners/tda18271-maps.c *rf_band = map[i].rfband; map 1028 drivers/media/tuners/tda18271-maps.c tda_map("(%d) rf_band = %02x\n", i, map[i].rfband); map 1056 drivers/media/tuners/tda18271-maps.c struct tda18271_pll_map *map = NULL; map 1065 drivers/media/tuners/tda18271-maps.c map = priv->maps->main_pll; map 1069 drivers/media/tuners/tda18271-maps.c map = priv->maps->cal_pll; map 1078 drivers/media/tuners/tda18271-maps.c if (!map) { map 1084 drivers/media/tuners/tda18271-maps.c while ((map[i].lomax * 1000) < *freq) { map 1085 drivers/media/tuners/tda18271-maps.c if (map[i + 1].lomax == 0) { map 1093 drivers/media/tuners/tda18271-maps.c *post_div = map[i].pd; map 1094 drivers/media/tuners/tda18271-maps.c *div = map[i].d; map 1107 drivers/media/tuners/tda18271-maps.c struct tda18271_map *map = NULL; map 1116 drivers/media/tuners/tda18271-maps.c map = priv->maps->bp_filter; map 1120 drivers/media/tuners/tda18271-maps.c map = priv->maps->rf_cal_kmco; map 1124 drivers/media/tuners/tda18271-maps.c map = priv->maps->rf_band; map 1128 drivers/media/tuners/tda18271-maps.c map = priv->maps->gain_taper; map 1132 drivers/media/tuners/tda18271-maps.c map = priv->maps->rf_cal; map 1136 drivers/media/tuners/tda18271-maps.c map = priv->maps->ir_measure; map 1140 drivers/media/tuners/tda18271-maps.c map = priv->maps->rf_cal_dc_over_dt; map 1149 drivers/media/tuners/tda18271-maps.c if (!map) { map 1155 drivers/media/tuners/tda18271-maps.c while ((map[i].rfmax * 1000) < *freq) { map 1156 drivers/media/tuners/tda18271-maps.c if (map[i + 1].rfmax == 0) { map 1164 drivers/media/tuners/tda18271-maps.c *val = map[i].val; map 861 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *map; map 872 drivers/media/usb/uvc/uvc_ctrl.c list_for_each_entry(map, &ctrl->info.mappings, list) { map 873 drivers/media/usb/uvc/uvc_ctrl.c if ((map->id == v4l2_id) && !next) { map 875 drivers/media/usb/uvc/uvc_ctrl.c *mapping = map; map 879 drivers/media/usb/uvc/uvc_ctrl.c if ((*mapping == NULL || (*mapping)->id > map->id) && map 880 drivers/media/usb/uvc/uvc_ctrl.c (map->id > v4l2_id) && next) { map 882 drivers/media/usb/uvc/uvc_ctrl.c *mapping = map; map 2052 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *map; map 2059 drivers/media/usb/uvc/uvc_ctrl.c map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL); map 2060 drivers/media/usb/uvc/uvc_ctrl.c if (map == NULL) map 2063 drivers/media/usb/uvc/uvc_ctrl.c INIT_LIST_HEAD(&map->ev_subs); map 2066 drivers/media/usb/uvc/uvc_ctrl.c map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL); map 2067 drivers/media/usb/uvc/uvc_ctrl.c if (map->menu_info == NULL) { map 2068 drivers/media/usb/uvc/uvc_ctrl.c kfree(map); map 2072 drivers/media/usb/uvc/uvc_ctrl.c if (map->get == NULL) map 2073 drivers/media/usb/uvc/uvc_ctrl.c map->get = uvc_get_le_value; map 2074 drivers/media/usb/uvc/uvc_ctrl.c if (map->set == NULL) map 2075 drivers/media/usb/uvc/uvc_ctrl.c map->set = uvc_set_le_value; map 2077 drivers/media/usb/uvc/uvc_ctrl.c list_add_tail(&map->list, &ctrl->info.mappings); map 2080 drivers/media/usb/uvc/uvc_ctrl.c map->name, ctrl->info.entity, ctrl->info.selector); map 2089 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *map; map 2141 drivers/media/usb/uvc/uvc_ctrl.c list_for_each_entry(map, &ctrl->info.mappings, list) { map 2142 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->id == map->id) { map 34 drivers/media/usb/uvc/uvc_v4l2.c struct uvc_control_mapping *map; map 38 drivers/media/usb/uvc/uvc_v4l2.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 39 drivers/media/usb/uvc/uvc_v4l2.c if (map == NULL) map 42 drivers/media/usb/uvc/uvc_v4l2.c map->id = xmap->id; map 43 drivers/media/usb/uvc/uvc_v4l2.c memcpy(map->name, xmap->name, sizeof(map->name)); map 44 drivers/media/usb/uvc/uvc_v4l2.c memcpy(map->entity, xmap->entity, sizeof(map->entity)); map 45 drivers/media/usb/uvc/uvc_v4l2.c map->selector = xmap->selector; map 46 drivers/media/usb/uvc/uvc_v4l2.c map->size = xmap->size; map 47 drivers/media/usb/uvc/uvc_v4l2.c map->offset = xmap->offset; map 48 drivers/media/usb/uvc/uvc_v4l2.c map->v4l2_type = xmap->v4l2_type; map 49 drivers/media/usb/uvc/uvc_v4l2.c map->data_type = xmap->data_type; map 67 drivers/media/usb/uvc/uvc_v4l2.c size = xmap->menu_count * sizeof(*map->menu_info); map 68 drivers/media/usb/uvc/uvc_v4l2.c map->menu_info = memdup_user(xmap->menu_info, size); map 69 drivers/media/usb/uvc/uvc_v4l2.c if (IS_ERR(map->menu_info)) { map 70 drivers/media/usb/uvc/uvc_v4l2.c ret = PTR_ERR(map->menu_info); map 74 drivers/media/usb/uvc/uvc_v4l2.c map->menu_count = xmap->menu_count; map 84 drivers/media/usb/uvc/uvc_v4l2.c ret = uvc_ctrl_add_mapping(chain, map); map 86 drivers/media/usb/uvc/uvc_v4l2.c kfree(map->menu_info); map 88 drivers/media/usb/uvc/uvc_v4l2.c kfree(map); map 205 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i]->map) { map 245 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i] && q->bufs[i]->map) { map 345 drivers/media/v4l2-core/videobuf-core.c if (vb->map) map 65 drivers/media/v4l2-core/videobuf-dma-contig.c struct videobuf_mapping *map = vma->vm_private_data; map 67 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map 68 drivers/media/v4l2-core/videobuf-dma-contig.c map, map->count, vma->vm_start, vma->vm_end); map 70 drivers/media/v4l2-core/videobuf-dma-contig.c map->count++; map 75 drivers/media/v4l2-core/videobuf-dma-contig.c struct videobuf_mapping *map = vma->vm_private_data; map 76 drivers/media/v4l2-core/videobuf-dma-contig.c struct videobuf_queue *q = map->q; map 80 drivers/media/v4l2-core/videobuf-dma-contig.c map, map->count, vma->vm_start, vma->vm_end); map 82 drivers/media/v4l2-core/videobuf-dma-contig.c map->count--; map 83 drivers/media/v4l2-core/videobuf-dma-contig.c if (0 == map->count) { map 86 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "munmap %p q=%p\n", map, q); map 97 drivers/media/v4l2-core/videobuf-dma-contig.c if (q->bufs[i]->map != map) map 120 drivers/media/v4l2-core/videobuf-dma-contig.c q->bufs[i]->map = NULL; map 124 drivers/media/v4l2-core/videobuf-dma-contig.c kfree(map); map 279 drivers/media/v4l2-core/videobuf-dma-contig.c struct videobuf_mapping *map; map 285 drivers/media/v4l2-core/videobuf-dma-contig.c map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); map 286 drivers/media/v4l2-core/videobuf-dma-contig.c if (!map) map 289 drivers/media/v4l2-core/videobuf-dma-contig.c buf->map = map; map 290 drivers/media/v4l2-core/videobuf-dma-contig.c map->q = q; map 324 drivers/media/v4l2-core/videobuf-dma-contig.c vma->vm_private_data = map; map 327 drivers/media/v4l2-core/videobuf-dma-contig.c map, q, vma->vm_start, vma->vm_end, map 335 drivers/media/v4l2-core/videobuf-dma-contig.c kfree(map); map 389 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_mapping *map = vma->vm_private_data; map 391 drivers/media/v4l2-core/videobuf-dma-sg.c dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, map 392 drivers/media/v4l2-core/videobuf-dma-sg.c map->count, vma->vm_start, vma->vm_end); map 394 drivers/media/v4l2-core/videobuf-dma-sg.c map->count++; map 399 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_mapping *map = vma->vm_private_data; map 400 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_queue *q = map->q; map 404 drivers/media/v4l2-core/videobuf-dma-sg.c dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, map 405 drivers/media/v4l2-core/videobuf-dma-sg.c map->count, vma->vm_start, vma->vm_end); map 407 drivers/media/v4l2-core/videobuf-dma-sg.c map->count--; map 408 drivers/media/v4l2-core/videobuf-dma-sg.c if (0 == map->count) { map 409 drivers/media/v4l2-core/videobuf-dma-sg.c dprintk(1, "munmap %p q=%p\n", map, q); map 420 drivers/media/v4l2-core/videobuf-dma-sg.c if (q->bufs[i]->map != map) map 422 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->map = NULL; map 427 drivers/media/v4l2-core/videobuf-dma-sg.c kfree(map); map 595 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_mapping *map; map 623 drivers/media/v4l2-core/videobuf-dma-sg.c map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); map 624 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == map) map 631 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->map = map; map 636 drivers/media/v4l2-core/videobuf-dma-sg.c map->count = 1; map 637 drivers/media/v4l2-core/videobuf-dma-sg.c map->q = q; map 641 drivers/media/v4l2-core/videobuf-dma-sg.c vma->vm_private_data = map; map 643 drivers/media/v4l2-core/videobuf-dma-sg.c map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last); map 53 drivers/media/v4l2-core/videobuf-vmalloc.c struct videobuf_mapping *map = vma->vm_private_data; map 55 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map, map 56 drivers/media/v4l2-core/videobuf-vmalloc.c map->count, vma->vm_start, vma->vm_end); map 58 drivers/media/v4l2-core/videobuf-vmalloc.c map->count++; map 63 drivers/media/v4l2-core/videobuf-vmalloc.c struct videobuf_mapping *map = vma->vm_private_data; map 64 drivers/media/v4l2-core/videobuf-vmalloc.c struct videobuf_queue *q = map->q; map 67 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map, map 68 drivers/media/v4l2-core/videobuf-vmalloc.c map->count, vma->vm_start, vma->vm_end); map 70 drivers/media/v4l2-core/videobuf-vmalloc.c map->count--; map 71 drivers/media/v4l2-core/videobuf-vmalloc.c if (0 == map->count) { map 74 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(1, "munmap %p q=%p\n", map, q); map 85 drivers/media/v4l2-core/videobuf-vmalloc.c if (q->bufs[i]->map != map) map 108 drivers/media/v4l2-core/videobuf-vmalloc.c q->bufs[i]->map = NULL; map 112 drivers/media/v4l2-core/videobuf-vmalloc.c kfree(map); map 214 drivers/media/v4l2-core/videobuf-vmalloc.c struct videobuf_mapping *map; map 220 drivers/media/v4l2-core/videobuf-vmalloc.c map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); map 221 drivers/media/v4l2-core/videobuf-vmalloc.c if (NULL == map) map 224 drivers/media/v4l2-core/videobuf-vmalloc.c buf->map = map; map 225 drivers/media/v4l2-core/videobuf-vmalloc.c map->q = q; map 251 drivers/media/v4l2-core/videobuf-vmalloc.c vma->vm_private_data = map; map 254 drivers/media/v4l2-core/videobuf-vmalloc.c map, q, vma->vm_start, vma->vm_end, map 264 drivers/media/v4l2-core/videobuf-vmalloc.c kfree(map); map 1366 drivers/memory/omap-gpmc.c .map = gpmc_irq_map, map 252 drivers/mfd/88pm800.c struct regmap *map = subchip->regmap_gpadc; map 255 drivers/mfd/88pm800.c if (!map) { map 264 drivers/mfd/88pm800.c ret = regmap_update_bits(map, map 277 drivers/mfd/88pm800.c ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN1, map 281 drivers/mfd/88pm800.c ret = regmap_update_bits(map, PM800_GPADC_MEAS_EN2, map 303 drivers/mfd/88pm800.c ret = regmap_update_bits(map, PM800_GP_BIAS_ENA1, mask, data); map 368 drivers/mfd/88pm800.c struct regmap *map = chip->regmap; map 372 drivers/mfd/88pm800.c if (!map || !chip->irq) { map 386 drivers/mfd/88pm800.c ret = regmap_update_bits(map, PM800_WAKEUP2, mask, data); map 138 drivers/mfd/88pm805.c struct regmap *map = chip->regmap; map 142 drivers/mfd/88pm805.c if (!map || !chip->irq) { map 156 drivers/mfd/88pm805.c ret = regmap_update_bits(map, PM805_INT_STATUS0, mask, data); map 193 drivers/mfd/88pm805.c struct regmap *map = chip->regmap; map 195 drivers/mfd/88pm805.c if (!map) { map 54 drivers/mfd/88pm80x.c struct regmap *map; map 63 drivers/mfd/88pm80x.c map = devm_regmap_init_i2c(client, &pm80x_regmap_config); map 64 drivers/mfd/88pm80x.c if (IS_ERR(map)) { map 65 drivers/mfd/88pm80x.c ret = PTR_ERR(map); map 72 drivers/mfd/88pm80x.c chip->regmap = map; map 563 drivers/mfd/88pm860x-core.c .map = pm860x_irq_domain_map, map 18 drivers/mfd/88pm860x-i2c.c struct regmap *map = (i2c == chip->client) ? chip->regmap map 23 drivers/mfd/88pm860x-i2c.c ret = regmap_read(map, reg, &data); map 35 drivers/mfd/88pm860x-i2c.c struct regmap *map = (i2c == chip->client) ? chip->regmap map 39 drivers/mfd/88pm860x-i2c.c ret = regmap_write(map, reg, data); map 48 drivers/mfd/88pm860x-i2c.c struct regmap *map = (i2c == chip->client) ? chip->regmap map 52 drivers/mfd/88pm860x-i2c.c ret = regmap_raw_read(map, reg, buf, count); map 61 drivers/mfd/88pm860x-i2c.c struct regmap *map = (i2c == chip->client) ? chip->regmap map 65 drivers/mfd/88pm860x-i2c.c ret = regmap_raw_write(map, reg, buf, count); map 74 drivers/mfd/88pm860x-i2c.c struct regmap *map = (i2c == chip->client) ? chip->regmap map 78 drivers/mfd/88pm860x-i2c.c ret = regmap_update_bits(map, reg, mask, data); map 574 drivers/mfd/ab8500-core.c .map = ab8500_irq_map, map 202 drivers/mfd/arizona-irq.c .map = arizona_irq_map, map 2672 drivers/mfd/db8500-prcmu.c .map = db8500_irq_map, map 60 drivers/mfd/fsl-imx25-tsadc.c .map = mx25_tsadc_domain_map, map 81 drivers/mfd/hi655x-pmic.c static void hi655x_local_irq_clear(struct regmap *map) map 85 drivers/mfd/hi655x-pmic.c regmap_write(map, HI655X_ANA_IRQM_BASE, HI655X_IRQ_CLR); map 87 drivers/mfd/hi655x-pmic.c regmap_write(map, HI655X_IRQ_STAT_BASE + i * HI655X_STRIDE, map 146 drivers/mfd/lp8788-irq.c .map = lp8788_irq_map, map 157 drivers/mfd/max77650.c struct regmap *map; map 161 drivers/mfd/max77650.c map = devm_regmap_init_i2c(i2c, &max77650_regmap_config); map 162 drivers/mfd/max77650.c if (IS_ERR(map)) { map 164 drivers/mfd/max77650.c return PTR_ERR(map); map 167 drivers/mfd/max77650.c rv = regmap_read(map, MAX77650_REG_CID, &val); map 191 drivers/mfd/max77650.c rv = regmap_update_bits(map, map 200 drivers/mfd/max77650.c rv = devm_regmap_add_irq_chip(dev, map, i2c->irq, map 656 drivers/mfd/max8925-core.c .map = max8925_irq_domain_map, map 289 drivers/mfd/max8997-irq.c .map = max8997_irq_domain_map, map 206 drivers/mfd/max8998-irq.c .map = max8998_irq_domain_map, map 128 drivers/mfd/mt6397-irq.c .map = mt6397_irq_domain_map, map 59 drivers/mfd/qcom-spmi-pmic.c static void pmic_spmi_show_revid(struct regmap *map, struct device *dev) map 65 drivers/mfd/qcom-spmi-pmic.c ret = regmap_read(map, PMIC_TYPE, &type); map 72 drivers/mfd/qcom-spmi-pmic.c ret = regmap_read(map, PMIC_SUBTYPE, &subtype); map 84 drivers/mfd/qcom-spmi-pmic.c ret = regmap_read(map, PMIC_REV2, &rev2); map 88 drivers/mfd/qcom-spmi-pmic.c ret = regmap_read(map, PMIC_REV3, &minor); map 92 drivers/mfd/qcom-spmi-pmic.c ret = regmap_read(map, PMIC_REV4, &major); map 122 drivers/mfd/stmfx.c ret = regmap_read(stmfx->map, STMFX_REG_SYS_CTRL, &sys_ctrl); map 154 drivers/mfd/stmfx.c return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, mask); map 162 drivers/mfd/stmfx.c return regmap_update_bits(stmfx->map, STMFX_REG_SYS_CTRL, mask, 0); map 177 drivers/mfd/stmfx.c regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, stmfx->irq_src); map 211 drivers/mfd/stmfx.c ret = regmap_read(stmfx->map, STMFX_REG_IRQ_PENDING, &pending); map 221 drivers/mfd/stmfx.c ret = regmap_write(stmfx->map, STMFX_REG_IRQ_ACK, ack); map 251 drivers/mfd/stmfx.c .map = stmfx_irq_map, map 288 drivers/mfd/stmfx.c ret = regmap_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, irqoutpin); map 306 drivers/mfd/stmfx.c ret = regmap_write(stmfx->map, STMFX_REG_SYS_CTRL, map 342 drivers/mfd/stmfx.c ret = regmap_read(stmfx->map, STMFX_REG_CHIP_ID, &id); map 365 drivers/mfd/stmfx.c ret = regmap_bulk_read(stmfx->map, STMFX_REG_FW_VERSION_MSB, map 394 drivers/mfd/stmfx.c regmap_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, 0); map 395 drivers/mfd/stmfx.c regmap_write(stmfx->map, STMFX_REG_SYS_CTRL, 0); map 418 drivers/mfd/stmfx.c stmfx->map = devm_regmap_init_i2c(client, &stmfx_regmap_config); map 419 drivers/mfd/stmfx.c if (IS_ERR(stmfx->map)) { map 420 drivers/mfd/stmfx.c ret = PTR_ERR(stmfx->map); map 473 drivers/mfd/stmfx.c ret = regmap_raw_read(stmfx->map, STMFX_REG_SYS_CTRL, map 478 drivers/mfd/stmfx.c ret = regmap_raw_read(stmfx->map, STMFX_REG_IRQ_OUT_PIN, map 504 drivers/mfd/stmfx.c ret = regmap_raw_write(stmfx->map, STMFX_REG_SYS_CTRL, map 509 drivers/mfd/stmfx.c ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_OUT_PIN, map 515 drivers/mfd/stmfx.c ret = regmap_raw_write(stmfx->map, STMFX_REG_IRQ_SRC_EN, map 1214 drivers/mfd/stmpe.c .map = stmpe_irq_map, map 52 drivers/mfd/stw481x.c ret = regmap_write(stw481x->map, STW_PCTL_REG_HI, msb); map 55 drivers/mfd/stw481x.c ret = regmap_write(stw481x->map, STW_PCTL_REG_LO, lsb); map 58 drivers/mfd/stw481x.c ret = regmap_read(stw481x->map, STW_PCTL_REG_HI, &val); map 62 drivers/mfd/stw481x.c ret = regmap_read(stw481x->map, STW_PCTL_REG_LO, &val); map 89 drivers/mfd/stw481x.c ret = regmap_read(stw481x->map, STW_CONF1, &val); map 132 drivers/mfd/stw481x.c ret = regmap_read(stw481x->map, STW_CONF2, &val); map 149 drivers/mfd/stw481x.c ret = regmap_read(stw481x->map, STW_VCORE_SLEEP, &val); map 189 drivers/mfd/stw481x.c stw481x->map = devm_regmap_init_i2c(client, &stw481x_regmap_config); map 190 drivers/mfd/stw481x.c if (IS_ERR(stw481x->map)) { map 191 drivers/mfd/stw481x.c ret = PTR_ERR(stw481x->map); map 230 drivers/mfd/tc3589x.c .map = tc3589x_irq_map, map 155 drivers/mfd/tps65217.c .map = tps65217_irq_map, map 305 drivers/mfd/tps6586x.c .map = tps6586x_irq_map, map 354 drivers/mfd/twl6030-irq.c .map = twl6030_irq_map, map 556 drivers/mfd/wm831x-irq.c .map = wm831x_irq_map, map 176 drivers/mfd/wm8994-irq.c .map = wm8994_edge_irq_map, map 210 drivers/misc/fastrpc.c struct fastrpc_map *map; map 212 drivers/misc/fastrpc.c map = container_of(ref, struct fastrpc_map, refcount); map 214 drivers/misc/fastrpc.c if (map->table) { map 215 drivers/misc/fastrpc.c dma_buf_unmap_attachment(map->attach, map->table, map 217 drivers/misc/fastrpc.c dma_buf_detach(map->buf, map->attach); map 218 drivers/misc/fastrpc.c dma_buf_put(map->buf); map 221 drivers/misc/fastrpc.c kfree(map); map 224 drivers/misc/fastrpc.c static void fastrpc_map_put(struct fastrpc_map *map) map 226 drivers/misc/fastrpc.c if (map) map 227 drivers/misc/fastrpc.c kref_put(&map->refcount, fastrpc_free_map); map 230 drivers/misc/fastrpc.c static void fastrpc_map_get(struct fastrpc_map *map) map 232 drivers/misc/fastrpc.c if (map) map 233 drivers/misc/fastrpc.c kref_get(&map->refcount); map 239 drivers/misc/fastrpc.c struct fastrpc_map *map = NULL; map 242 drivers/misc/fastrpc.c list_for_each_entry(map, &fl->maps, node) { map 243 drivers/misc/fastrpc.c if (map->fd == fd) { map 244 drivers/misc/fastrpc.c fastrpc_map_get(map); map 245 drivers/misc/fastrpc.c *ppmap = map; map 588 drivers/misc/fastrpc.c .map = fastrpc_kmap, map 597 drivers/misc/fastrpc.c struct fastrpc_map *map = NULL; map 603 drivers/misc/fastrpc.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 604 drivers/misc/fastrpc.c if (!map) map 607 drivers/misc/fastrpc.c INIT_LIST_HEAD(&map->node); map 608 drivers/misc/fastrpc.c map->fl = fl; map 609 drivers/misc/fastrpc.c map->fd = fd; map 610 drivers/misc/fastrpc.c map->buf = dma_buf_get(fd); map 611 drivers/misc/fastrpc.c if (IS_ERR(map->buf)) { map 612 drivers/misc/fastrpc.c err = PTR_ERR(map->buf); map 616 drivers/misc/fastrpc.c map->attach = dma_buf_attach(map->buf, sess->dev); map 617 drivers/misc/fastrpc.c if (IS_ERR(map->attach)) { map 619 drivers/misc/fastrpc.c err = PTR_ERR(map->attach); map 623 drivers/misc/fastrpc.c map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); map 624 drivers/misc/fastrpc.c if (IS_ERR(map->table)) { map 625 drivers/misc/fastrpc.c err = PTR_ERR(map->table); map 629 drivers/misc/fastrpc.c map->phys = sg_dma_address(map->table->sgl); map 630 drivers/misc/fastrpc.c map->phys += ((u64)fl->sctx->sid << 32); map 631 drivers/misc/fastrpc.c map->size = len; map 632 drivers/misc/fastrpc.c map->va = sg_virt(map->table->sgl); map 633 drivers/misc/fastrpc.c map->len = len; map 634 drivers/misc/fastrpc.c kref_init(&map->refcount); map 637 drivers/misc/fastrpc.c list_add_tail(&map->node, &fl->maps); map 639 drivers/misc/fastrpc.c *ppmap = map; map 644 drivers/misc/fastrpc.c dma_buf_detach(map->buf, map->attach); map 646 drivers/misc/fastrpc.c dma_buf_put(map->buf); map 648 drivers/misc/fastrpc.c kfree(map); map 975 drivers/misc/fastrpc.c struct fastrpc_map *map = NULL; map 1012 drivers/misc/fastrpc.c err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); map 1069 drivers/misc/fastrpc.c if (map) { map 1071 drivers/misc/fastrpc.c list_del(&map->node); map 1073 drivers/misc/fastrpc.c fastrpc_map_put(map); map 1133 drivers/misc/fastrpc.c struct fastrpc_map *map, *m; map 1150 drivers/misc/fastrpc.c list_for_each_entry_safe(map, m, &fl->maps, node) { map 1151 drivers/misc/fastrpc.c list_del(&map->node); map 1152 drivers/misc/fastrpc.c fastrpc_map_put(map); map 31 drivers/misc/hmc6352.c const char *map) map 40 drivers/misc/hmc6352.c if (val >= strlen(map)) map 42 drivers/misc/hmc6352.c val = array_index_nospec(val, strlen(map)); map 44 drivers/misc/hmc6352.c ret = compass_command(c, map[val]); map 504 drivers/misc/sgi-gru/grutables.h #define for_each_gru_in_bitmap(gid, map) \ map 505 drivers/misc/sgi-gru/grutables.h for_each_set_bit((gid), (map), GRU_MAX_GRUS) map 523 drivers/misc/sgi-gru/grutables.h #define for_each_cbr_in_tfm(i, map) \ map 524 drivers/misc/sgi-gru/grutables.h for_each_set_bit((i), (map), GRU_NUM_CBE) map 527 drivers/misc/sgi-gru/grutables.h #define for_each_cbr_in_allocation_map(i, map, k) \ map 528 drivers/misc/sgi-gru/grutables.h for_each_set_bit((k), (map), GRU_CBR_AU) \ map 533 drivers/misc/sgi-gru/grutables.h #define for_each_dsr_in_allocation_map(i, map, k) \ map 534 drivers/misc/sgi-gru/grutables.h for_each_set_bit((k), (const unsigned long *)(map), GRU_DSR_AU) \ map 97 drivers/mtd/chips/cfi_cmdset_0001.c static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode); map 98 drivers/mtd/chips/cfi_cmdset_0001.c static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); map 99 drivers/mtd/chips/cfi_cmdset_0001.c static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); map 172 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 173 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 212 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 213 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 224 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 225 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 237 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 238 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 250 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 251 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 259 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 260 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 279 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 280 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 287 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x60), 0); map 288 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x04), 0); map 299 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 300 drivers/mtd/chips/cfi_cmdset_0001.c if (!mtd->_point && map_is_linear(map)) { map 308 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 309 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 322 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 323 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 391 drivers/mtd/chips/cfi_cmdset_0001.c read_pri_intelext(struct map_info *map, __u16 adr) map 393 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 399 drivers/mtd/chips/cfi_cmdset_0001.c extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp"); map 479 drivers/mtd/chips/cfi_cmdset_0001.c struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) map 481 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 488 drivers/mtd/chips/cfi_cmdset_0001.c mtd->priv = map; map 502 drivers/mtd/chips/cfi_cmdset_0001.c mtd->name = map->name; map 517 drivers/mtd/chips/cfi_cmdset_0001.c extp = read_pri_intelext(map, adr); map 588 drivers/mtd/chips/cfi_cmdset_0001.c map->fldrv = &cfi_intelext_chipdrv; map 592 drivers/mtd/chips/cfi_cmdset_0001.c struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); map 593 drivers/mtd/chips/cfi_cmdset_0001.c struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); map 600 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 601 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 681 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 739 drivers/mtd/chips/cfi_cmdset_0001.c map->name, mtd->writesize, map 792 drivers/mtd/chips/cfi_cmdset_0001.c map->name, cfi->numchips, cfi->interleave, map 795 drivers/mtd/chips/cfi_cmdset_0001.c map->fldrv_priv = newcfi; map 806 drivers/mtd/chips/cfi_cmdset_0001.c static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode) map 809 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 822 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, adr); map 823 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_andequal(map, status, status_OK, status_OK)) map 828 drivers/mtd/chips/cfi_cmdset_0001.c if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS)) map 861 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xB0), chip->in_progress_block_addr); map 868 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), chip->in_progress_block_addr); map 873 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, chip->in_progress_block_addr); map 874 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_andequal(map, status, status_OK, status_OK)) map 880 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, adr); map 882 drivers/mtd/chips/cfi_cmdset_0001.c "suspended: status = 0x%lx\n", map->name, status.x[0]); map 923 drivers/mtd/chips/cfi_cmdset_0001.c static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) map 969 drivers/mtd/chips/cfi_cmdset_0001.c ret = chip_ready(map, contender, contender->start, mode); map 985 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, contender, contender->start); map 1012 drivers/mtd/chips/cfi_cmdset_0001.c ret = chip_ready(map, chip, adr, mode); map 1019 drivers/mtd/chips/cfi_cmdset_0001.c static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) map 1021 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1035 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, loaner, loaner->start); map 1069 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xd0), chip->in_progress_block_addr); map 1070 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), chip->in_progress_block_addr); map 1085 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate); map 1103 drivers/mtd/chips/cfi_cmdset_0001.c static void xip_disable(struct map_info *map, struct flchip *chip, map 1107 drivers/mtd/chips/cfi_cmdset_0001.c (void) map_read(map, adr); /* ensure mmu mapping is up to date */ map 1111 drivers/mtd/chips/cfi_cmdset_0001.c static void __xipram xip_enable(struct map_info *map, struct flchip *chip, map 1114 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1116 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), adr); map 1119 drivers/mtd/chips/cfi_cmdset_0001.c (void) map_read(map, adr); map 1138 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map, struct flchip *chip, map 1141 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1170 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xb0), adr); map 1171 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1183 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, adr); map 1184 drivers/mtd/chips/cfi_cmdset_0001.c } while (!map_word_andequal(map, status, OK, OK)); map 1189 drivers/mtd/chips/cfi_cmdset_0001.c if (!map_word_bitsset(map, status, CMD(0x40))) map 1194 drivers/mtd/chips/cfi_cmdset_0001.c if (!map_word_bitsset(map, status, CMD(0x04))) map 1200 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), adr); map 1201 drivers/mtd/chips/cfi_cmdset_0001.c (void) map_read(map, adr); map 1228 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xd0), adr); map 1229 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1240 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, adr); map 1242 drivers/mtd/chips/cfi_cmdset_0001.c } while (!map_word_andequal(map, status, OK, OK) map 1255 drivers/mtd/chips/cfi_cmdset_0001.c #define XIP_INVAL_CACHED_RANGE(map, from, size) \ map 1256 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, from, size) map 1258 drivers/mtd/chips/cfi_cmdset_0001.c #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \ map 1259 drivers/mtd/chips/cfi_cmdset_0001.c xip_wait_for_operation(map, chip, cmd_adr, usec_max) map 1263 drivers/mtd/chips/cfi_cmdset_0001.c #define xip_disable(map, chip, adr) map 1264 drivers/mtd/chips/cfi_cmdset_0001.c #define xip_enable(map, chip, adr) map 1269 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map, struct flchip *chip, map 1273 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1280 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); map 1302 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, cmd_adr); map 1303 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_andequal(map, status, status_OK, status_OK)) map 1317 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1348 drivers/mtd/chips/cfi_cmdset_0001.c #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \ map 1349 drivers/mtd/chips/cfi_cmdset_0001.c INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max); map 1352 drivers/mtd/chips/cfi_cmdset_0001.c static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) map 1355 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1361 drivers/mtd/chips/cfi_cmdset_0001.c cmd_addr = adr & ~(map_bankwidth(map)-1); map 1365 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, cmd_addr, FL_POINT); map 1369 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), cmd_addr); map 1382 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 1383 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1388 drivers/mtd/chips/cfi_cmdset_0001.c if (!map->virt) map 1397 drivers/mtd/chips/cfi_cmdset_0001.c *virt = map->virt + cfi->chips[chipnum].start + ofs; map 1399 drivers/mtd/chips/cfi_cmdset_0001.c *phys = map->phys + cfi->chips[chipnum].start + ofs; map 1418 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen); map 1434 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 1435 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1464 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name); map 1468 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); map 1479 drivers/mtd/chips/cfi_cmdset_0001.c static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) map 1482 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1488 drivers/mtd/chips/cfi_cmdset_0001.c cmd_addr = adr & ~(map_bankwidth(map)-1); map 1491 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, cmd_addr, FL_READY); map 1498 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), cmd_addr); map 1503 drivers/mtd/chips/cfi_cmdset_0001.c map_copy_from(map, buf, adr, len); map 1505 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, cmd_addr); map 1513 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 1514 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1534 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); map 1548 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map 1551 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1569 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, adr, mode); map 1575 drivers/mtd/chips/cfi_cmdset_0001.c XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); map 1576 drivers/mtd/chips/cfi_cmdset_0001.c ENABLE_VPP(map); map 1577 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, adr); map 1578 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, write_cmd, adr); map 1579 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, datum, adr); map 1582 drivers/mtd/chips/cfi_cmdset_0001.c ret = INVAL_CACHE_AND_WAIT(map, chip, adr, map 1583 drivers/mtd/chips/cfi_cmdset_0001.c adr, map_bankwidth(map), map 1587 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 1588 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); map 1593 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, adr); map 1594 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_bitsset(map, status, CMD(0x1a))) { map 1598 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), adr); map 1599 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1600 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 1605 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name); map 1608 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus); map 1615 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 1616 drivers/mtd/chips/cfi_cmdset_0001.c out: DISABLE_VPP(map); map 1617 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, adr); map 1625 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 1626 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1635 drivers/mtd/chips/cfi_cmdset_0001.c if (ofs & (map_bankwidth(map)-1)) { map 1636 drivers/mtd/chips/cfi_cmdset_0001.c unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); map 1641 drivers/mtd/chips/cfi_cmdset_0001.c n = min_t(int, len, map_bankwidth(map)-gap); map 1642 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_ff(map); map 1643 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, buf, gap, n); map 1645 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1663 drivers/mtd/chips/cfi_cmdset_0001.c while(len >= map_bankwidth(map)) { map 1664 drivers/mtd/chips/cfi_cmdset_0001.c map_word datum = map_word_load(map, buf); map 1666 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1671 drivers/mtd/chips/cfi_cmdset_0001.c ofs += map_bankwidth(map); map 1672 drivers/mtd/chips/cfi_cmdset_0001.c buf += map_bankwidth(map); map 1673 drivers/mtd/chips/cfi_cmdset_0001.c (*retlen) += map_bankwidth(map); map 1674 drivers/mtd/chips/cfi_cmdset_0001.c len -= map_bankwidth(map); map 1684 drivers/mtd/chips/cfi_cmdset_0001.c if (len & (map_bankwidth(map)-1)) { map 1687 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_ff(map); map 1688 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, buf, 0, len); map 1690 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1702 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, map 1706 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1730 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, cmd_adr, FL_WRITING); map 1736 drivers/mtd/chips/cfi_cmdset_0001.c XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len); map 1737 drivers/mtd/chips/cfi_cmdset_0001.c ENABLE_VPP(map); map 1738 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, cmd_adr); map 1745 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1748 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, cmd_adr); map 1749 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_bitsset(map, status, CMD(0x30))) { map 1750 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, cmd_adr); map 1752 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, cmd_adr); map 1753 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), cmd_adr); map 1754 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1758 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, write_cmd, cmd_adr); map 1759 drivers/mtd/chips/cfi_cmdset_0001.c ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0); map 1762 drivers/mtd/chips/cfi_cmdset_0001.c map_word Xstatus = map_read(map, cmd_adr); map 1763 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1765 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, cmd_adr); map 1766 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), cmd_adr); map 1767 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1768 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, cmd_adr); map 1770 drivers/mtd/chips/cfi_cmdset_0001.c map->name, Xstatus.x[0], status.x[0]); map 1775 drivers/mtd/chips/cfi_cmdset_0001.c word_gap = (-adr & (map_bankwidth(map)-1)); map 1776 drivers/mtd/chips/cfi_cmdset_0001.c words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map)); map 1780 drivers/mtd/chips/cfi_cmdset_0001.c word_gap = map_bankwidth(map) - word_gap; map 1782 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_ff(map); map 1786 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(words), cmd_adr ); map 1792 drivers/mtd/chips/cfi_cmdset_0001.c int n = map_bankwidth(map) - word_gap; map 1798 drivers/mtd/chips/cfi_cmdset_0001.c if (!word_gap && len < map_bankwidth(map)) map 1799 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_ff(map); map 1801 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, map 1807 drivers/mtd/chips/cfi_cmdset_0001.c if (!len || word_gap == map_bankwidth(map)) { map 1808 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, datum, adr); map 1809 drivers/mtd/chips/cfi_cmdset_0001.c adr += map_bankwidth(map); map 1823 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xd0), cmd_adr); map 1826 drivers/mtd/chips/cfi_cmdset_0001.c ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, map 1831 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1833 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, cmd_adr); map 1834 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name); map 1839 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, cmd_adr); map 1840 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_bitsset(map, status, CMD(0x1a))) { map 1844 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), cmd_adr); map 1845 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), cmd_adr); map 1846 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, cmd_adr); map 1851 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name); map 1854 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus); map 1861 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, cmd_adr); map 1862 drivers/mtd/chips/cfi_cmdset_0001.c out: DISABLE_VPP(map); map 1863 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, cmd_adr); map 1871 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 1872 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1895 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_write_buffer(map, &cfi->chips[chipnum], map 1931 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, map 1934 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 1943 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, adr, FL_ERASING); map 1949 drivers/mtd/chips/cfi_cmdset_0001.c XIP_INVAL_CACHED_RANGE(map, adr, len); map 1950 drivers/mtd/chips/cfi_cmdset_0001.c ENABLE_VPP(map); map 1951 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, adr); map 1954 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), adr); map 1957 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x20), adr); map 1958 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xD0), adr); map 1964 drivers/mtd/chips/cfi_cmdset_0001.c ret = INVAL_CACHE_AND_WAIT(map, chip, adr, map 1969 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1971 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 1972 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name); map 1977 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1979 drivers/mtd/chips/cfi_cmdset_0001.c status = map_read(map, adr); map 1982 drivers/mtd/chips/cfi_cmdset_0001.c if (map_word_bitsset(map, status, CMD(0x3a))) { map 1986 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x50), adr); map 1987 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 1988 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 1991 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus); map 1998 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name); map 2002 drivers/mtd/chips/cfi_cmdset_0001.c DISABLE_VPP(map); map 2003 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, adr); map 2007 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus); map 2014 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 2015 drivers/mtd/chips/cfi_cmdset_0001.c out: DISABLE_VPP(map); map 2016 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, adr); map 2029 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2030 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2039 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_SYNCING); map 2068 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_getlockstatus_oneblock(struct map_info *map, map 2073 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2077 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, adr+(2*ofs_factor)); map 2078 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x90), adr+(2*ofs_factor)); map 2080 drivers/mtd/chips/cfi_cmdset_0001.c status = cfi_read_query(map, adr+(2*ofs_factor)); map 2081 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, 0); map 2086 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_printlockstatus_oneblock(struct map_info *map, map 2092 drivers/mtd/chips/cfi_cmdset_0001.c adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk)); map 2100 drivers/mtd/chips/cfi_cmdset_0001.c static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip, map 2103 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2111 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, adr, FL_LOCKING); map 2117 drivers/mtd/chips/cfi_cmdset_0001.c ENABLE_VPP(map); map 2118 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, adr); map 2120 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x60), adr); map 2122 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x01), adr); map 2125 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xD0), adr); map 2144 drivers/mtd/chips/cfi_cmdset_0001.c ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000); map 2146 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x70), adr); map 2148 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 2149 drivers/mtd/chips/cfi_cmdset_0001.c printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name); map 2153 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, adr); map 2154 drivers/mtd/chips/cfi_cmdset_0001.c out: DISABLE_VPP(map); map 2155 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, adr); map 2217 drivers/mtd/chips/cfi_cmdset_0001.c typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, map 2222 drivers/mtd/chips/cfi_cmdset_0001.c do_otp_read(struct map_info *map, struct flchip *chip, u_long offset, map 2225 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2229 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY); map 2236 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); map 2238 drivers/mtd/chips/cfi_cmdset_0001.c xip_disable(map, chip, chip->start); map 2240 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0x90), chip->start); map 2243 drivers/mtd/chips/cfi_cmdset_0001.c map_copy_from(map, buf, chip->start + offset, size); map 2244 drivers/mtd/chips/cfi_cmdset_0001.c xip_enable(map, chip, chip->start); map 2247 drivers/mtd/chips/cfi_cmdset_0001.c INVALIDATE_CACHED_RANGE(map, chip->start + offset, size); map 2249 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); map 2255 drivers/mtd/chips/cfi_cmdset_0001.c do_otp_write(struct map_info *map, struct flchip *chip, u_long offset, map 2261 drivers/mtd/chips/cfi_cmdset_0001.c unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1); map 2263 drivers/mtd/chips/cfi_cmdset_0001.c int n = min_t(int, size, map_bankwidth(map)-gap); map 2264 drivers/mtd/chips/cfi_cmdset_0001.c map_word datum = map_word_ff(map); map 2266 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_load_partial(map, datum, buf, gap, n); map 2267 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); map 2280 drivers/mtd/chips/cfi_cmdset_0001.c do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset, map 2283 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2290 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_ff(map); map 2291 drivers/mtd/chips/cfi_cmdset_0001.c datum = map_word_clr(map, datum, CMD(1 << grpno)); map 2292 drivers/mtd/chips/cfi_cmdset_0001.c return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE); map 2299 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2300 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2374 drivers/mtd/chips/cfi_cmdset_0001.c ret = do_otp_read(map, chip, map 2377 drivers/mtd/chips/cfi_cmdset_0001.c map_bankwidth(map), map 2385 drivers/mtd/chips/cfi_cmdset_0001.c !map_word_bitsset(map, lockword, map 2400 drivers/mtd/chips/cfi_cmdset_0001.c ret = action(map, chip, data_offset, map 2506 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2507 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2529 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xFF), cfi->chips[i].start); map 2600 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2601 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2616 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xFF), cfi->chips[i].start); map 2631 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2632 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 2642 drivers/mtd/chips/cfi_cmdset_0001.c ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); map 2644 drivers/mtd/chips/cfi_cmdset_0001.c map_write(map, CMD(0xff), chip->start); map 2646 drivers/mtd/chips/cfi_cmdset_0001.c put_chip(map, chip, chip->start); map 2666 drivers/mtd/chips/cfi_cmdset_0001.c struct map_info *map = mtd->priv; map 2667 drivers/mtd/chips/cfi_cmdset_0001.c struct cfi_private *cfi = map->fldrv_priv; map 94 drivers/mtd/chips/cfi_cmdset_0002.c static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode); map 95 drivers/mtd/chips/cfi_cmdset_0002.c static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr); map 126 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_check_err_status(struct map_info *map, struct flchip *chip, map 129 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 135 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, map 137 drivers/mtd/chips/cfi_cmdset_0002.c status = map_read(map, adr); map 140 drivers/mtd/chips/cfi_cmdset_0002.c if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) map 143 drivers/mtd/chips/cfi_cmdset_0002.c if (map_word_bitsset(map, status, CMD(0x3a))) { map 148 drivers/mtd/chips/cfi_cmdset_0002.c map->name, chipstatus); map 151 drivers/mtd/chips/cfi_cmdset_0002.c map->name, chipstatus); map 154 drivers/mtd/chips/cfi_cmdset_0002.c map->name, chipstatus); map 157 drivers/mtd/chips/cfi_cmdset_0002.c map->name, chipstatus); map 223 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 224 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 233 drivers/mtd/chips/cfi_cmdset_0002.c map->name, cfi->mfr, cfi->id); map 253 drivers/mtd/chips/cfi_cmdset_0002.c " detected\n", map->name); map 257 drivers/mtd/chips/cfi_cmdset_0002.c printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id); map 264 drivers/mtd/chips/cfi_cmdset_0002.c " deduced %s from Device ID\n", map->name, major, minor, map 273 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 274 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 285 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 286 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 323 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 324 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 345 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 346 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 359 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 360 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 370 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 371 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 383 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 384 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 399 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 400 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 411 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 412 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 423 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 424 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 540 drivers/mtd/chips/cfi_cmdset_0002.c static void cfi_fixup_m29ew_erase_suspend(struct map_info *map, map 543 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 546 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), adr); map 583 drivers/mtd/chips/cfi_cmdset_0002.c struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) map 585 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 586 drivers/mtd/chips/cfi_cmdset_0002.c struct device_node __maybe_unused *np = map->device_node; map 593 drivers/mtd/chips/cfi_cmdset_0002.c mtd->priv = map; map 610 drivers/mtd/chips/cfi_cmdset_0002.c mtd->name = map->name; map 625 drivers/mtd/chips/cfi_cmdset_0002.c extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); map 680 drivers/mtd/chips/cfi_cmdset_0002.c map->name, bootloc); map 685 drivers/mtd/chips/cfi_cmdset_0002.c printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name); map 738 drivers/mtd/chips/cfi_cmdset_0002.c map->fldrv = &cfi_amdstd_chipdrv; map 742 drivers/mtd/chips/cfi_cmdset_0002.c struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); map 743 drivers/mtd/chips/cfi_cmdset_0002.c struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002"))); map 750 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 751 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 812 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram chip_ready(struct map_info *map, struct flchip *chip, map 815 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 824 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, map 826 drivers/mtd/chips/cfi_cmdset_0002.c d = map_read(map, addr); map 828 drivers/mtd/chips/cfi_cmdset_0002.c return map_word_andequal(map, d, ready, ready); map 831 drivers/mtd/chips/cfi_cmdset_0002.c d = map_read(map, addr); map 832 drivers/mtd/chips/cfi_cmdset_0002.c t = map_read(map, addr); map 834 drivers/mtd/chips/cfi_cmdset_0002.c return map_word_equal(map, d, t); map 852 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram chip_good(struct map_info *map, struct flchip *chip, map 855 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 865 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi, map 867 drivers/mtd/chips/cfi_cmdset_0002.c curd = map_read(map, addr); map 869 drivers/mtd/chips/cfi_cmdset_0002.c return map_word_andequal(map, curd, ready, ready); map 872 drivers/mtd/chips/cfi_cmdset_0002.c oldd = map_read(map, addr); map 873 drivers/mtd/chips/cfi_cmdset_0002.c curd = map_read(map, addr); map 875 drivers/mtd/chips/cfi_cmdset_0002.c return map_word_equal(map, oldd, curd) && map 876 drivers/mtd/chips/cfi_cmdset_0002.c map_word_equal(map, curd, expected); map 879 drivers/mtd/chips/cfi_cmdset_0002.c static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode) map 882 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 893 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 926 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xB0), chip->in_progress_block_addr); map 931 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 940 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 985 drivers/mtd/chips/cfi_cmdset_0002.c static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr) map 987 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 991 drivers/mtd/chips/cfi_cmdset_0002.c cfi_fixup_m29ew_erase_suspend(map, map 993 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr); map 1026 drivers/mtd/chips/cfi_cmdset_0002.c static void xip_disable(struct map_info *map, struct flchip *chip, map 1030 drivers/mtd/chips/cfi_cmdset_0002.c (void) map_read(map, adr); /* ensure mmu mapping is up to date */ map 1034 drivers/mtd/chips/cfi_cmdset_0002.c static void __xipram xip_enable(struct map_info *map, struct flchip *chip, map 1037 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1040 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xf0), adr); map 1043 drivers/mtd/chips/cfi_cmdset_0002.c (void) map_read(map, adr); map 1061 drivers/mtd/chips/cfi_cmdset_0002.c static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, map 1064 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1085 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xb0), adr); map 1098 drivers/mtd/chips/cfi_cmdset_0002.c status = map_read(map, adr); map 1099 drivers/mtd/chips/cfi_cmdset_0002.c } while (!map_word_andequal(map, status, OK, OK)); map 1103 drivers/mtd/chips/cfi_cmdset_0002.c if (!map_word_bitsset(map, status, CMD(0x40))) map 1107 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xf0), adr); map 1108 drivers/mtd/chips/cfi_cmdset_0002.c (void) map_read(map, adr); map 1135 drivers/mtd/chips/cfi_cmdset_0002.c cfi_fixup_m29ew_erase_suspend(map, adr); map 1137 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, cfi->sector_erase_cmd, adr); map 1148 drivers/mtd/chips/cfi_cmdset_0002.c status = map_read(map, adr); map 1149 drivers/mtd/chips/cfi_cmdset_0002.c } while (!map_word_andequal(map, status, OK, OK) map 1153 drivers/mtd/chips/cfi_cmdset_0002.c #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) map 1162 drivers/mtd/chips/cfi_cmdset_0002.c #define XIP_INVAL_CACHED_RANGE(map, from, size) \ map 1163 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, from, size) map 1165 drivers/mtd/chips/cfi_cmdset_0002.c #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ map 1166 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, usec) map 1187 drivers/mtd/chips/cfi_cmdset_0002.c #define xip_disable(map, chip, adr) map 1188 drivers/mtd/chips/cfi_cmdset_0002.c #define xip_enable(map, chip, adr) map 1191 drivers/mtd/chips/cfi_cmdset_0002.c #define UDELAY(map, chip, adr, usec) \ map 1198 drivers/mtd/chips/cfi_cmdset_0002.c #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ map 1201 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, adr, len); \ map 1208 drivers/mtd/chips/cfi_cmdset_0002.c static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) map 1211 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1217 drivers/mtd/chips/cfi_cmdset_0002.c cmd_addr = adr & ~(map_bankwidth(map)-1); map 1220 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, cmd_addr, FL_READY); map 1227 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xf0), cmd_addr); map 1231 drivers/mtd/chips/cfi_cmdset_0002.c map_copy_from(map, buf, adr, len); map 1233 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, cmd_addr); map 1242 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 1243 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1263 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); map 1277 drivers/mtd/chips/cfi_cmdset_0002.c typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip, map 1280 drivers/mtd/chips/cfi_cmdset_0002.c static inline void otp_enter(struct map_info *map, struct flchip *chip, map 1283 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1285 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 1287 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 1289 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, map 1292 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); map 1295 drivers/mtd/chips/cfi_cmdset_0002.c static inline void otp_exit(struct map_info *map, struct flchip *chip, map 1298 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1300 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 1302 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 1304 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, map 1306 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, map 1309 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHED_RANGE(map, chip->start + adr, len); map 1312 drivers/mtd/chips/cfi_cmdset_0002.c static inline int do_read_secsi_onechip(struct map_info *map, map 1338 drivers/mtd/chips/cfi_cmdset_0002.c otp_enter(map, chip, adr, len); map 1339 drivers/mtd/chips/cfi_cmdset_0002.c map_copy_from(map, buf, adr, len); map 1340 drivers/mtd/chips/cfi_cmdset_0002.c otp_exit(map, chip, adr, len); map 1350 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 1351 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1372 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, map 1387 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map 1391 drivers/mtd/chips/cfi_cmdset_0002.c static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr, map 1396 drivers/mtd/chips/cfi_cmdset_0002.c unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1); map 1398 drivers/mtd/chips/cfi_cmdset_0002.c int n = min_t(int, len, map_bankwidth(map) - gap); map 1399 drivers/mtd/chips/cfi_cmdset_0002.c map_word datum = map_word_ff(map); map 1401 drivers/mtd/chips/cfi_cmdset_0002.c if (n != map_bankwidth(map)) { map 1403 drivers/mtd/chips/cfi_cmdset_0002.c otp_enter(map, chip, bus_ofs, map_bankwidth(map)); map 1404 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_read(map, bus_ofs); map 1405 drivers/mtd/chips/cfi_cmdset_0002.c otp_exit(map, chip, bus_ofs, map_bankwidth(map)); map 1408 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load_partial(map, datum, buf, gap, n); map 1409 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); map 1421 drivers/mtd/chips/cfi_cmdset_0002.c static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr, map 1424 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1434 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, chip->start, FL_LOCKING); map 1442 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 1444 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 1446 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi, map 1450 drivers/mtd/chips/cfi_cmdset_0002.c lockreg = cfi_read_query(map, 0); map 1457 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xA0), chip->start); map 1458 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(lockreg), chip->start); map 1463 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 1471 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, 0, 1); map 1475 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); map 1476 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); map 1479 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); map 1489 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 1490 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1516 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, base, FL_CFI_QUERY); map 1521 drivers/mtd/chips/cfi_cmdset_0002.c cfi_qry_mode_on(base, map, cfi); map 1522 drivers/mtd/chips/cfi_cmdset_0002.c otp = cfi_read_query(map, base + 0x3 * ofs_factor); map 1523 drivers/mtd/chips/cfi_cmdset_0002.c cfi_qry_mode_off(base, map, cfi); map 1524 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, base); map 1537 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, base, FL_LOCKING); map 1545 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, map 1548 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, map 1551 drivers/mtd/chips/cfi_cmdset_0002.c chip->start, map, cfi, map 1554 drivers/mtd/chips/cfi_cmdset_0002.c lockreg = cfi_read_query(map, 0); map 1556 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); map 1557 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); map 1558 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); map 1587 drivers/mtd/chips/cfi_cmdset_0002.c ret = action(map, chip, otpoffset + from, size, buf, map 1649 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_oneword_once(struct map_info *map, map 1667 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 1668 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 1669 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 1670 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, datum, adr); map 1673 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHE_UDELAY(map, chip, map 1674 drivers/mtd/chips/cfi_cmdset_0002.c adr, map_bankwidth(map), map 1699 drivers/mtd/chips/cfi_cmdset_0002.c !chip_good(map, chip, adr, datum)) { map 1700 drivers/mtd/chips/cfi_cmdset_0002.c xip_enable(map, chip, adr); map 1702 drivers/mtd/chips/cfi_cmdset_0002.c xip_disable(map, chip, adr); map 1707 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_good(map, chip, adr, datum)) { map 1708 drivers/mtd/chips/cfi_cmdset_0002.c if (cfi_check_err_status(map, chip, adr)) map 1714 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, 1); map 1720 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_oneword_start(struct map_info *map, map 1728 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr, mode); map 1735 drivers/mtd/chips/cfi_cmdset_0002.c otp_enter(map, chip, adr, map_bankwidth(map)); map 1740 drivers/mtd/chips/cfi_cmdset_0002.c static void __xipram do_write_oneword_done(struct map_info *map, map 1745 drivers/mtd/chips/cfi_cmdset_0002.c otp_exit(map, chip, adr, map_bankwidth(map)); map 1748 drivers/mtd/chips/cfi_cmdset_0002.c DISABLE_VPP(map); map 1749 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 1754 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_oneword_retry(struct map_info *map, map 1759 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1770 drivers/mtd/chips/cfi_cmdset_0002.c oldd = map_read(map, adr); map 1771 drivers/mtd/chips/cfi_cmdset_0002.c if (map_word_equal(map, oldd, datum)) { map 1776 drivers/mtd/chips/cfi_cmdset_0002.c XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map)); map 1777 drivers/mtd/chips/cfi_cmdset_0002.c ENABLE_VPP(map); map 1778 drivers/mtd/chips/cfi_cmdset_0002.c xip_disable(map, chip, adr); map 1781 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); map 1784 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 1792 drivers/mtd/chips/cfi_cmdset_0002.c xip_enable(map, chip, adr); map 1797 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map 1808 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword_start(map, chip, adr, mode); map 1812 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword_retry(map, chip, adr, datum, mode); map 1814 drivers/mtd/chips/cfi_cmdset_0002.c do_write_oneword_done(map, chip, adr, mode); map 1823 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 1824 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 1835 drivers/mtd/chips/cfi_cmdset_0002.c if (ofs & (map_bankwidth(map)-1)) { map 1836 drivers/mtd/chips/cfi_cmdset_0002.c unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1); map 1856 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_read(map, bus_ofs+chipstart); map 1861 drivers/mtd/chips/cfi_cmdset_0002.c n = min_t(int, len, map_bankwidth(map)-i); map 1863 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); map 1865 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1884 drivers/mtd/chips/cfi_cmdset_0002.c while(len >= map_bankwidth(map)) { map 1887 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load(map, buf); map 1889 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1894 drivers/mtd/chips/cfi_cmdset_0002.c ofs += map_bankwidth(map); map 1895 drivers/mtd/chips/cfi_cmdset_0002.c buf += map_bankwidth(map); map 1896 drivers/mtd/chips/cfi_cmdset_0002.c (*retlen) += map_bankwidth(map); map 1897 drivers/mtd/chips/cfi_cmdset_0002.c len -= map_bankwidth(map); map 1909 drivers/mtd/chips/cfi_cmdset_0002.c if (len & (map_bankwidth(map)-1)) { map 1926 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_read(map, ofs + chipstart); map 1930 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); map 1932 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_oneword(map, &cfi->chips[chipnum], map 1944 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_buffer_wait(struct map_info *map, map 1979 drivers/mtd/chips/cfi_cmdset_0002.c !chip_good(map, chip, adr, datum)) { map 1986 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_good(map, chip, adr, datum)) { map 1987 drivers/mtd/chips/cfi_cmdset_0002.c if (cfi_check_err_status(map, chip, adr)) map 1993 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, 1); map 1999 drivers/mtd/chips/cfi_cmdset_0002.c static void __xipram do_write_buffer_reset(struct map_info *map, map 2011 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 2013 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 2015 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi, map 2024 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, map 2028 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2038 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr, FL_WRITING); map 2044 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load(map, buf); map 2049 drivers/mtd/chips/cfi_cmdset_0002.c XIP_INVAL_CACHED_RANGE(map, adr, len); map 2050 drivers/mtd/chips/cfi_cmdset_0002.c ENABLE_VPP(map); map 2051 drivers/mtd/chips/cfi_cmdset_0002.c xip_disable(map, chip, cmd_adr); map 2053 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2054 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2057 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x25), cmd_adr); map 2062 drivers/mtd/chips/cfi_cmdset_0002.c words = len / map_bankwidth(map); map 2063 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(words - 1), cmd_adr); map 2066 drivers/mtd/chips/cfi_cmdset_0002.c while(z < words * map_bankwidth(map)) { map 2067 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load(map, buf); map 2068 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, datum, adr + z); map 2070 drivers/mtd/chips/cfi_cmdset_0002.c z += map_bankwidth(map); map 2071 drivers/mtd/chips/cfi_cmdset_0002.c buf += map_bankwidth(map); map 2073 drivers/mtd/chips/cfi_cmdset_0002.c z -= map_bankwidth(map); map 2078 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x29), cmd_adr); map 2081 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHE_UDELAY(map, chip, map 2082 drivers/mtd/chips/cfi_cmdset_0002.c adr, map_bankwidth(map), map 2085 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_buffer_wait(map, chip, adr, datum); map 2087 drivers/mtd/chips/cfi_cmdset_0002.c do_write_buffer_reset(map, chip, cfi); map 2089 drivers/mtd/chips/cfi_cmdset_0002.c xip_enable(map, chip, adr); map 2092 drivers/mtd/chips/cfi_cmdset_0002.c DISABLE_VPP(map); map 2093 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 2103 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2104 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2114 drivers/mtd/chips/cfi_cmdset_0002.c if (ofs & (map_bankwidth(map)-1)) { map 2115 drivers/mtd/chips/cfi_cmdset_0002.c size_t local_len = (-ofs)&(map_bankwidth(map)-1); map 2135 drivers/mtd/chips/cfi_cmdset_0002.c while (len >= map_bankwidth(map) * 2) { map 2141 drivers/mtd/chips/cfi_cmdset_0002.c if (size % map_bankwidth(map)) map 2142 drivers/mtd/chips/cfi_cmdset_0002.c size -= size % map_bankwidth(map); map 2144 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_write_buffer(map, &cfi->chips[chipnum], map 2184 drivers/mtd/chips/cfi_cmdset_0002.c static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, map 2187 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2195 drivers/mtd/chips/cfi_cmdset_0002.c if (chip->state == FL_READY && chip_ready(map, chip, adr)) map 2208 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 2212 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 2236 drivers/mtd/chips/cfi_cmdset_0002.c static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, map 2240 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2248 drivers/mtd/chips/cfi_cmdset_0002.c ret = cfi_amdstd_panic_wait(map, chip, adr); map 2261 drivers/mtd/chips/cfi_cmdset_0002.c oldd = map_read(map, adr); map 2262 drivers/mtd/chips/cfi_cmdset_0002.c if (map_word_equal(map, oldd, datum)) { map 2267 drivers/mtd/chips/cfi_cmdset_0002.c ENABLE_VPP(map); map 2270 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2271 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2272 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2273 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, datum, adr); map 2276 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 2282 drivers/mtd/chips/cfi_cmdset_0002.c if (!chip_good(map, chip, adr, datum) || map 2283 drivers/mtd/chips/cfi_cmdset_0002.c cfi_check_err_status(map, chip, adr)) { map 2285 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 2295 drivers/mtd/chips/cfi_cmdset_0002.c DISABLE_VPP(map); map 2315 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2316 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2326 drivers/mtd/chips/cfi_cmdset_0002.c if (ofs & (map_bankwidth(map) - 1)) { map 2327 drivers/mtd/chips/cfi_cmdset_0002.c unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1); map 2332 drivers/mtd/chips/cfi_cmdset_0002.c ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs); map 2337 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_read(map, bus_ofs + chipstart); map 2340 drivers/mtd/chips/cfi_cmdset_0002.c n = min_t(int, len, map_bankwidth(map) - i); map 2342 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n); map 2344 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_panic_write_oneword(map, &cfi->chips[chipnum], map 2363 drivers/mtd/chips/cfi_cmdset_0002.c while (len >= map_bankwidth(map)) { map 2366 drivers/mtd/chips/cfi_cmdset_0002.c datum = map_word_load(map, buf); map 2368 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_panic_write_oneword(map, &cfi->chips[chipnum], map 2373 drivers/mtd/chips/cfi_cmdset_0002.c ofs += map_bankwidth(map); map 2374 drivers/mtd/chips/cfi_cmdset_0002.c buf += map_bankwidth(map); map 2375 drivers/mtd/chips/cfi_cmdset_0002.c (*retlen) += map_bankwidth(map); map 2376 drivers/mtd/chips/cfi_cmdset_0002.c len -= map_bankwidth(map); map 2389 drivers/mtd/chips/cfi_cmdset_0002.c if (len & (map_bankwidth(map) - 1)) { map 2392 drivers/mtd/chips/cfi_cmdset_0002.c ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs); map 2396 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_read(map, ofs + chipstart); map 2398 drivers/mtd/chips/cfi_cmdset_0002.c tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len); map 2400 drivers/mtd/chips/cfi_cmdset_0002.c ret = do_panic_write_oneword(map, &cfi->chips[chipnum], map 2416 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) map 2418 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2428 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr, FL_ERASING); map 2437 drivers/mtd/chips/cfi_cmdset_0002.c XIP_INVAL_CACHED_RANGE(map, adr, map->size); map 2438 drivers/mtd/chips/cfi_cmdset_0002.c ENABLE_VPP(map); map 2439 drivers/mtd/chips/cfi_cmdset_0002.c xip_disable(map, chip, adr); map 2442 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2443 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2444 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2445 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2446 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2447 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2452 drivers/mtd/chips/cfi_cmdset_0002.c chip->in_progress_block_mask = ~(map->size - 1); map 2454 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHE_UDELAY(map, chip, map 2455 drivers/mtd/chips/cfi_cmdset_0002.c adr, map->size, map 2478 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_good(map, chip, adr, map_word_ff(map))) { map 2479 drivers/mtd/chips/cfi_cmdset_0002.c if (cfi_check_err_status(map, chip, adr)) map 2492 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, 1000000/HZ); map 2497 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 2507 drivers/mtd/chips/cfi_cmdset_0002.c xip_enable(map, chip, adr); map 2508 drivers/mtd/chips/cfi_cmdset_0002.c DISABLE_VPP(map); map 2509 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 2516 drivers/mtd/chips/cfi_cmdset_0002.c static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk) map 2518 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2527 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr, FL_ERASING); map 2536 drivers/mtd/chips/cfi_cmdset_0002.c XIP_INVAL_CACHED_RANGE(map, adr, len); map 2537 drivers/mtd/chips/cfi_cmdset_0002.c ENABLE_VPP(map); map 2538 drivers/mtd/chips/cfi_cmdset_0002.c xip_disable(map, chip, adr); map 2541 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2542 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2543 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2544 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); map 2545 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); map 2546 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, cfi->sector_erase_cmd, adr); map 2553 drivers/mtd/chips/cfi_cmdset_0002.c INVALIDATE_CACHE_UDELAY(map, chip, map 2577 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_good(map, chip, adr, map_word_ff(map))) { map 2578 drivers/mtd/chips/cfi_cmdset_0002.c if (cfi_check_err_status(map, chip, adr)) map 2591 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, 1000000/HZ); map 2596 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 2606 drivers/mtd/chips/cfi_cmdset_0002.c xip_enable(map, chip, adr); map 2607 drivers/mtd/chips/cfi_cmdset_0002.c DISABLE_VPP(map); map 2608 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 2623 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2624 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2632 drivers/mtd/chips/cfi_cmdset_0002.c return do_erase_chip(map, &cfi->chips[0]); map 2635 drivers/mtd/chips/cfi_cmdset_0002.c static int do_atmel_lock(struct map_info *map, struct flchip *chip, map 2638 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2642 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr + chip->start, FL_LOCKING); map 2649 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 2651 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 2653 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, map 2655 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 2657 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 2659 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x40), chip->start + adr); map 2662 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr + chip->start); map 2670 drivers/mtd/chips/cfi_cmdset_0002.c static int do_atmel_unlock(struct map_info *map, struct flchip *chip, map 2673 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2677 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING); map 2684 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 2686 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x70), adr); map 2689 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr + chip->start); map 2721 drivers/mtd/chips/cfi_cmdset_0002.c static int __maybe_unused do_ppb_xxlock(struct map_info *map, map 2725 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2731 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, adr, FL_LOCKING); map 2739 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, map 2741 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, map 2744 drivers/mtd/chips/cfi_cmdset_0002.c cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi, map 2749 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xA0), adr); map 2750 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), adr); map 2757 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x80), chip->start); map 2758 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x30), chip->start); map 2762 drivers/mtd/chips/cfi_cmdset_0002.c ret = !cfi_read_query(map, adr); map 2771 drivers/mtd/chips/cfi_cmdset_0002.c if (chip_ready(map, chip, adr)) map 2780 drivers/mtd/chips/cfi_cmdset_0002.c UDELAY(map, chip, adr, 1); map 2784 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x90), chip->start); map 2785 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0x00), chip->start); map 2788 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, adr); map 2805 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2806 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2854 drivers/mtd/chips/cfi_cmdset_0002.c map, &cfi->chips[chipnum], adr, 0, map 2898 drivers/mtd/chips/cfi_cmdset_0002.c do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0, map 2915 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2916 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 2977 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 2978 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 3031 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 3032 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 3044 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 3063 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 3064 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 3074 drivers/mtd/chips/cfi_cmdset_0002.c ret = get_chip(map, chip, chip->start, FL_SHUTDOWN); map 3076 drivers/mtd/chips/cfi_cmdset_0002.c map_write(map, CMD(0xF0), chip->start); map 3078 drivers/mtd/chips/cfi_cmdset_0002.c put_chip(map, chip, chip->start); map 3101 drivers/mtd/chips/cfi_cmdset_0002.c struct map_info *map = mtd->priv; map 3102 drivers/mtd/chips/cfi_cmdset_0002.c struct cfi_private *cfi = map->fldrv_priv; map 113 drivers/mtd/chips/cfi_cmdset_0020.c struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary) map 115 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 127 drivers/mtd/chips/cfi_cmdset_0020.c extp = (struct cfi_pri_intelext*)cfi_read_pri(map, adr, sizeof(*extp), "ST Microelectronics"); map 141 drivers/mtd/chips/cfi_cmdset_0020.c extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport); map 142 drivers/mtd/chips/cfi_cmdset_0020.c extp->BlkStatusRegMask = cfi32_to_cpu(map, map 162 drivers/mtd/chips/cfi_cmdset_0020.c return cfi_staa_setup(map); map 166 drivers/mtd/chips/cfi_cmdset_0020.c static struct mtd_info *cfi_staa_setup(struct map_info *map) map 168 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 182 drivers/mtd/chips/cfi_cmdset_0020.c mtd->priv = map; map 241 drivers/mtd/chips/cfi_cmdset_0020.c map->fldrv = &cfi_staa_chipdrv; map 243 drivers/mtd/chips/cfi_cmdset_0020.c mtd->name = map->name; map 248 drivers/mtd/chips/cfi_cmdset_0020.c static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf) map 255 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 260 drivers/mtd/chips/cfi_cmdset_0020.c cmd_addr = adr & ~(map_bankwidth(map)-1); map 277 drivers/mtd/chips/cfi_cmdset_0020.c map_write (map, CMD(0xb0), cmd_addr); map 283 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_addr); map 288 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, cmd_addr); map 289 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 294 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xd0), cmd_addr); map 296 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_addr); map 311 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xff), cmd_addr); map 325 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_addr); map 330 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, cmd_addr); map 331 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) { map 332 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xff), cmd_addr); map 362 drivers/mtd/chips/cfi_cmdset_0020.c map_copy_from(map, buf, adr, len); map 375 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xd0), cmd_addr); map 376 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_addr); map 386 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 387 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 407 drivers/mtd/chips/cfi_cmdset_0020.c ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf); map 421 drivers/mtd/chips/cfi_cmdset_0020.c static int do_write_buffer(struct map_info *map, struct flchip *chip, map 424 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 431 drivers/mtd/chips/cfi_cmdset_0020.c if (adr & (map_bankwidth(map)-1)) map 460 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_adr); map 463 drivers/mtd/chips/cfi_cmdset_0020.c printk("%s: 1 status[%x]\n", __func__, map_read(map, cmd_adr)); map 468 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, cmd_adr); map 469 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 475 drivers/mtd/chips/cfi_cmdset_0020.c status.x[0], map_read(map, cmd_adr).x[0]); map 496 drivers/mtd/chips/cfi_cmdset_0020.c ENABLE_VPP(map); map 497 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xe8), cmd_adr); map 502 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, cmd_adr); map 503 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 512 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 513 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), cmd_adr); map 522 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(len/map_bankwidth(map)-1), cmd_adr ); map 526 drivers/mtd/chips/cfi_cmdset_0020.c z += map_bankwidth(map), buf += map_bankwidth(map)) { map 528 drivers/mtd/chips/cfi_cmdset_0020.c d = map_word_load(map, buf); map 529 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, d, adr+z); map 532 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xd0), cmd_adr); map 554 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, cmd_adr); map 555 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 561 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x50), cmd_adr); map 563 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 565 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 586 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 590 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_bitsset(map, status, CMD(0x3a))) { map 595 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x50), cmd_adr); map 597 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 600 drivers/mtd/chips/cfi_cmdset_0020.c return map_word_bitsset(map, status, CMD(0x02)) ? -EROFS : -EIO; map 611 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 612 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 622 drivers/mtd/chips/cfi_cmdset_0020.c printk("%s: map_bankwidth(map)[%x]\n", __func__, map_bankwidth(map)); map 635 drivers/mtd/chips/cfi_cmdset_0020.c ret = do_write_buffer(map, &cfi->chips[chipnum], map 734 drivers/mtd/chips/cfi_cmdset_0020.c static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) map 736 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 757 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 762 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 763 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 790 drivers/mtd/chips/cfi_cmdset_0020.c ENABLE_VPP(map); map 792 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x50), adr); map 795 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x20), adr); map 796 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xD0), adr); map 820 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 821 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 826 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 828 drivers/mtd/chips/cfi_cmdset_0020.c printk(KERN_ERR "waiting for erase to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); map 829 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 840 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 844 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 846 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 849 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_bitsset(map, status, CMD(0x3a))) { map 851 drivers/mtd/chips/cfi_cmdset_0020.c if (!map_word_equal(map, status, CMD(chipstatus))) { map 853 drivers/mtd/chips/cfi_cmdset_0020.c for (w=0; w<map_words(map); w++) { map 862 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x50), adr); map 863 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 895 drivers/mtd/chips/cfi_cmdset_0020.c { struct map_info *map = mtd->priv; map 896 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 952 drivers/mtd/chips/cfi_cmdset_0020.c ret = do_erase_oneblock(map, &cfi->chips[chipnum], adr); map 977 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 978 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1034 drivers/mtd/chips/cfi_cmdset_0020.c static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) map 1036 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1055 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 1060 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 1061 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 1088 drivers/mtd/chips/cfi_cmdset_0020.c ENABLE_VPP(map); map 1089 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x60), adr); map 1090 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x01), adr); map 1103 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 1104 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 1109 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 1111 drivers/mtd/chips/cfi_cmdset_0020.c printk(KERN_ERR "waiting for lock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); map 1112 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 1125 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 1132 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 1133 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1152 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1153 drivers/mtd/chips/cfi_cmdset_0020.c printk("before lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); map 1154 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1157 drivers/mtd/chips/cfi_cmdset_0020.c ret = do_lock_oneblock(map, &cfi->chips[chipnum], adr); map 1160 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1161 drivers/mtd/chips/cfi_cmdset_0020.c printk("after lock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); map 1162 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1181 drivers/mtd/chips/cfi_cmdset_0020.c static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr) map 1183 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1202 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 1207 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 1208 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 1235 drivers/mtd/chips/cfi_cmdset_0020.c ENABLE_VPP(map); map 1236 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x60), adr); map 1237 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xD0), adr); map 1250 drivers/mtd/chips/cfi_cmdset_0020.c status = map_read(map, adr); map 1251 drivers/mtd/chips/cfi_cmdset_0020.c if (map_word_andequal(map, status, status_OK, status_OK)) map 1256 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0x70), adr); map 1258 drivers/mtd/chips/cfi_cmdset_0020.c printk(KERN_ERR "waiting for unlock to complete timed out. Xstatus = %lx, status = %lx.\n", status.x[0], map_read(map, adr).x[0]); map 1259 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 1272 drivers/mtd/chips/cfi_cmdset_0020.c DISABLE_VPP(map); map 1279 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 1280 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1295 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1297 drivers/mtd/chips/cfi_cmdset_0020.c printk("before unlock %x: block status register is %x\n",temp_adr,cfi_read_query(map, temp_adr+(2*ofs_factor))); map 1301 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1305 drivers/mtd/chips/cfi_cmdset_0020.c ret = do_unlock_oneblock(map, &cfi->chips[chipnum], adr); map 1308 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1309 drivers/mtd/chips/cfi_cmdset_0020.c printk("after unlock: block status register is %x\n",cfi_read_query(map, adr+(2*ofs_factor))); map 1310 drivers/mtd/chips/cfi_cmdset_0020.c cfi_send_gen_cmd(0xff, 0x55, 0, map, cfi, cfi->device_type, NULL); map 1318 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 1319 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1374 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 1375 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 1387 drivers/mtd/chips/cfi_cmdset_0020.c map_write(map, CMD(0xFF), 0); map 1398 drivers/mtd/chips/cfi_cmdset_0020.c struct map_info *map = mtd->priv; map 1399 drivers/mtd/chips/cfi_cmdset_0020.c struct cfi_private *cfi = map->fldrv_priv; map 27 drivers/mtd/chips/cfi_probe.c static int cfi_probe_chip(struct map_info *map, __u32 base, map 29 drivers/mtd/chips/cfi_probe.c static int cfi_chip_setup(struct map_info *map, struct cfi_private *cfi); map 31 drivers/mtd/chips/cfi_probe.c struct mtd_info *cfi_probe(struct map_info *map); map 38 drivers/mtd/chips/cfi_probe.c #define xip_allowed(base, map) \ map 40 drivers/mtd/chips/cfi_probe.c (void) map_read(map, base); \ map 45 drivers/mtd/chips/cfi_probe.c #define xip_enable(base, map, cfi) \ map 47 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(base, map, cfi); \ map 48 drivers/mtd/chips/cfi_probe.c xip_allowed(base, map); \ map 51 drivers/mtd/chips/cfi_probe.c #define xip_disable_qry(base, map, cfi) \ map 54 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_on(base, map, cfi); \ map 60 drivers/mtd/chips/cfi_probe.c #define xip_allowed(base, map) do { } while (0) map 61 drivers/mtd/chips/cfi_probe.c #define xip_enable(base, map, cfi) do { } while (0) map 62 drivers/mtd/chips/cfi_probe.c #define xip_disable_qry(base, map, cfi) do { } while (0) map 95 drivers/mtd/chips/cfi_probe.c static int __xipram cfi_probe_chip(struct map_info *map, __u32 base, map 100 drivers/mtd/chips/cfi_probe.c if ((base + 0) >= map->size) { map 103 drivers/mtd/chips/cfi_probe.c (unsigned long)base, map->size -1); map 106 drivers/mtd/chips/cfi_probe.c if ((base + 0xff) >= map->size) { map 109 drivers/mtd/chips/cfi_probe.c (unsigned long)base + 0x55, map->size -1); map 114 drivers/mtd/chips/cfi_probe.c if (!cfi_qry_mode_on(base, map, cfi)) { map 115 drivers/mtd/chips/cfi_probe.c xip_enable(base, map, cfi); map 122 drivers/mtd/chips/cfi_probe.c return cfi_chip_setup(map, cfi); map 135 drivers/mtd/chips/cfi_probe.c if (cfi_qry_present(map, start, cfi)) { map 138 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(start, map, cfi); map 141 drivers/mtd/chips/cfi_probe.c if (!cfi_qry_present(map, start, cfi)) { map 142 drivers/mtd/chips/cfi_probe.c xip_allowed(base, map); map 144 drivers/mtd/chips/cfi_probe.c map->name, base, start); map 151 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(base, map, cfi); map 153 drivers/mtd/chips/cfi_probe.c if (cfi_qry_present(map, base, cfi)) { map 154 drivers/mtd/chips/cfi_probe.c xip_allowed(base, map); map 156 drivers/mtd/chips/cfi_probe.c map->name, base, start); map 168 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(base, map, cfi); map 169 drivers/mtd/chips/cfi_probe.c xip_allowed(base, map); map 172 drivers/mtd/chips/cfi_probe.c map->name, cfi->interleave, cfi->device_type*8, base, map 173 drivers/mtd/chips/cfi_probe.c map->bankwidth*8); map 194 drivers/mtd/chips/cfi_probe.c static int __xipram cfi_chip_setup(struct map_info *map, map 199 drivers/mtd/chips/cfi_probe.c int num_erase_regions = cfi_read_query(map, base + (0x10 + 28)*ofs_factor); map 203 drivers/mtd/chips/cfi_probe.c xip_enable(base, map, cfi); map 221 drivers/mtd/chips/cfi_probe.c xip_disable_qry(base, map, cfi); map 223 drivers/mtd/chips/cfi_probe.c ((unsigned char *)cfi->cfiq)[i] = cfi_read_query(map,base + (0x10 + i)*ofs_factor); map 262 drivers/mtd/chips/cfi_probe.c cfi_send_gen_cmd(0xf0, 0, base, map, cfi, cfi->device_type, NULL); map 263 drivers/mtd/chips/cfi_probe.c cfi_send_gen_cmd(0xaa, addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 264 drivers/mtd/chips/cfi_probe.c cfi_send_gen_cmd(0x55, addr_unlock2, base, map, cfi, cfi->device_type, NULL); map 265 drivers/mtd/chips/cfi_probe.c cfi_send_gen_cmd(0x90, addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 266 drivers/mtd/chips/cfi_probe.c cfi->mfr = cfi_read_query16(map, base); map 267 drivers/mtd/chips/cfi_probe.c cfi->id = cfi_read_query16(map, base + ofs_factor); map 271 drivers/mtd/chips/cfi_probe.c cfi->id = cfi_read_query(map, base + 0xe * ofs_factor) << 8 | map 272 drivers/mtd/chips/cfi_probe.c cfi_read_query(map, base + 0xf * ofs_factor); map 275 drivers/mtd/chips/cfi_probe.c cfi_qry_mode_off(base, map, cfi); map 276 drivers/mtd/chips/cfi_probe.c xip_allowed(base, map); map 281 drivers/mtd/chips/cfi_probe.c map->name, cfi->interleave, cfi->device_type*8, base, map 282 drivers/mtd/chips/cfi_probe.c map->bankwidth*8, cfi->mfr, cfi->id); map 431 drivers/mtd/chips/cfi_probe.c struct mtd_info *cfi_probe(struct map_info *map) map 437 drivers/mtd/chips/cfi_probe.c return mtd_do_chip_probe(map, &cfi_chip_probe); map 41 drivers/mtd/chips/cfi_util.c struct map_info *map, struct cfi_private *cfi) map 43 drivers/mtd/chips/cfi_util.c unsigned bankwidth = map_bankwidth(map); map 67 drivers/mtd/chips/cfi_util.c map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi) map 78 drivers/mtd/chips/cfi_util.c if (map_bankwidth_is_large(map)) { map 80 drivers/mtd/chips/cfi_util.c words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 map 82 drivers/mtd/chips/cfi_util.c wordwidth = map_bankwidth(map); map 86 drivers/mtd/chips/cfi_util.c chip_mode = map_bankwidth(map) / cfi_interleave(cfi); map 87 drivers/mtd/chips/cfi_util.c chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); map 97 drivers/mtd/chips/cfi_util.c onecmd = cpu_to_cfi16(map, cmd); map 100 drivers/mtd/chips/cfi_util.c onecmd = cpu_to_cfi32(map, cmd); map 133 drivers/mtd/chips/cfi_util.c unsigned long cfi_merge_status(map_word val, struct map_info *map, map 144 drivers/mtd/chips/cfi_util.c if (map_bankwidth_is_large(map)) { map 146 drivers/mtd/chips/cfi_util.c words_per_bus = (map_bankwidth(map)) / wordwidth; // i.e. normally 1 map 148 drivers/mtd/chips/cfi_util.c wordwidth = map_bankwidth(map); map 152 drivers/mtd/chips/cfi_util.c chip_mode = map_bankwidth(map) / cfi_interleave(cfi); map 153 drivers/mtd/chips/cfi_util.c chips_per_word = wordwidth * cfi_interleave(cfi) / map_bankwidth(map); map 185 drivers/mtd/chips/cfi_util.c res = cfi16_to_cpu(map, res); map 188 drivers/mtd/chips/cfi_util.c res = cfi32_to_cpu(map, res); map 204 drivers/mtd/chips/cfi_util.c struct map_info *map, struct cfi_private *cfi, map 208 drivers/mtd/chips/cfi_util.c uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi); map 209 drivers/mtd/chips/cfi_util.c val = cfi_build_cmd(cmd, map, cfi); map 212 drivers/mtd/chips/cfi_util.c *prev_val = map_read(map, addr); map 214 drivers/mtd/chips/cfi_util.c map_write(map, val, addr); map 220 drivers/mtd/chips/cfi_util.c int __xipram cfi_qry_present(struct map_info *map, __u32 base, map 227 drivers/mtd/chips/cfi_util.c qry[0] = cfi_build_cmd('Q', map, cfi); map 228 drivers/mtd/chips/cfi_util.c qry[1] = cfi_build_cmd('R', map, cfi); map 229 drivers/mtd/chips/cfi_util.c qry[2] = cfi_build_cmd('Y', map, cfi); map 231 drivers/mtd/chips/cfi_util.c val[0] = map_read(map, base + osf*0x10); map 232 drivers/mtd/chips/cfi_util.c val[1] = map_read(map, base + osf*0x11); map 233 drivers/mtd/chips/cfi_util.c val[2] = map_read(map, base + osf*0x12); map 235 drivers/mtd/chips/cfi_util.c if (!map_word_equal(map, qry[0], val[0])) map 238 drivers/mtd/chips/cfi_util.c if (!map_word_equal(map, qry[1], val[1])) map 241 drivers/mtd/chips/cfi_util.c if (!map_word_equal(map, qry[2], val[2])) map 248 drivers/mtd/chips/cfi_util.c int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, map 251 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 252 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); map 253 drivers/mtd/chips/cfi_util.c if (cfi_qry_present(map, base, cfi)) map 257 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 258 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); map 259 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x98, 0x55, base, map, cfi, cfi->device_type, NULL); map 260 drivers/mtd/chips/cfi_util.c if (cfi_qry_present(map, base, cfi)) map 263 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 264 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); map 265 drivers/mtd/chips/cfi_util.c if (cfi_qry_present(map, base, cfi)) map 268 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 269 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xAA, 0x5555, base, map, cfi, cfi->device_type, NULL); map 270 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x55, 0x2AAA, base, map, cfi, cfi->device_type, NULL); map 271 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x98, 0x5555, base, map, cfi, cfi->device_type, NULL); map 272 drivers/mtd/chips/cfi_util.c if (cfi_qry_present(map, base, cfi)) map 275 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 276 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xAA, 0x555, base, map, cfi, cfi->device_type, NULL); map 277 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x55, 0x2AA, base, map, cfi, cfi->device_type, NULL); map 278 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0x98, 0x555, base, map, cfi, cfi->device_type, NULL); map 279 drivers/mtd/chips/cfi_util.c if (cfi_qry_present(map, base, cfi)) map 286 drivers/mtd/chips/cfi_util.c void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, map 289 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 290 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); map 294 drivers/mtd/chips/cfi_util.c cfi_send_gen_cmd(0xF0, 0, base, map, cfi, cfi->device_type, NULL); map 299 drivers/mtd/chips/cfi_util.c __xipram cfi_read_pri(struct map_info *map, __u16 adr, __u16 size, const char* name) map 301 drivers/mtd/chips/cfi_util.c struct cfi_private *cfi = map->fldrv_priv; map 321 drivers/mtd/chips/cfi_util.c cfi_qry_mode_on(base, map, cfi); map 325 drivers/mtd/chips/cfi_util.c cfi_read_query(map, base+((adr+i)*ofs_factor)); map 329 drivers/mtd/chips/cfi_util.c cfi_qry_mode_off(base, map, cfi); map 332 drivers/mtd/chips/cfi_util.c (void) map_read(map, base); map 344 drivers/mtd/chips/cfi_util.c struct map_info *map = mtd->priv; map 345 drivers/mtd/chips/cfi_util.c struct cfi_private *cfi = map->fldrv_priv; map 361 drivers/mtd/chips/cfi_util.c struct map_info *map = mtd->priv; map 362 drivers/mtd/chips/cfi_util.c struct cfi_private *cfi = map->fldrv_priv; map 419 drivers/mtd/chips/cfi_util.c ret = (*frob)(map, &cfi->chips[chipnum], adr, size, thunk); map 58 drivers/mtd/chips/chipreg.c struct mtd_info *do_map_probe(const char *name, struct map_info *map) map 71 drivers/mtd/chips/chipreg.c ret = drv->probe(map); map 88 drivers/mtd/chips/chipreg.c struct map_info *map = mtd->priv; map 90 drivers/mtd/chips/chipreg.c if (map->fldrv->destroy) map 91 drivers/mtd/chips/chipreg.c map->fldrv->destroy(mtd); map 93 drivers/mtd/chips/chipreg.c module_put(map->fldrv->module); map 29 drivers/mtd/chips/fwh_lock.h static int fwh_xxlock_oneblock(struct map_info *map, struct flchip *chip, map 32 drivers/mtd/chips/fwh_lock.h struct cfi_private *cfi = map->fldrv_priv; map 62 drivers/mtd/chips/fwh_lock.h ret = get_chip(map, chip, adr, FL_LOCKING); map 70 drivers/mtd/chips/fwh_lock.h map_write(map, CMD(xxlt->val), adr); map 74 drivers/mtd/chips/fwh_lock.h put_chip(map, chip, adr); map 16 drivers/mtd/chips/gen_probe.c static struct cfi_private *genprobe_ident_chips(struct map_info *map, map 18 drivers/mtd/chips/gen_probe.c static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, map 21 drivers/mtd/chips/gen_probe.c struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp) map 27 drivers/mtd/chips/gen_probe.c cfi = genprobe_ident_chips(map, cp); map 32 drivers/mtd/chips/gen_probe.c map->fldrv_priv = cfi; map 35 drivers/mtd/chips/gen_probe.c mtd = check_cmd_set(map, 1); /* First the primary cmdset */ map 37 drivers/mtd/chips/gen_probe.c mtd = check_cmd_set(map, 0); /* Then the secondary */ map 40 drivers/mtd/chips/gen_probe.c if (mtd->size > map->size) { map 43 drivers/mtd/chips/gen_probe.c (unsigned long)map->size >> 10); map 44 drivers/mtd/chips/gen_probe.c mtd->size = map->size; map 53 drivers/mtd/chips/gen_probe.c map->fldrv_priv = NULL; map 59 drivers/mtd/chips/gen_probe.c static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chip_probe *cp) map 71 drivers/mtd/chips/gen_probe.c if (!genprobe_new_chip(map, cp, &cfi)) { map 74 drivers/mtd/chips/gen_probe.c cp->name, map->name); map 108 drivers/mtd/chips/gen_probe.c max_chips = map->size >> cfi.chipshift; map 130 drivers/mtd/chips/gen_probe.c cp->probe_chip(map, i << cfi.chipshift, chip_map, &cfi); map 165 drivers/mtd/chips/gen_probe.c static int genprobe_new_chip(struct map_info *map, struct chip_probe *cp, map 168 drivers/mtd/chips/gen_probe.c int min_chips = (map_bankwidth(map)/4?:1); /* At most 4-bytes wide. */ map 169 drivers/mtd/chips/gen_probe.c int max_chips = map_bankwidth(map); /* And minimum 1 */ map 181 drivers/mtd/chips/gen_probe.c type = map_bankwidth(map) / nr_chips; map 186 drivers/mtd/chips/gen_probe.c if (cp->probe_chip(map, 0, NULL, cfi)) map 199 drivers/mtd/chips/gen_probe.c static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, map 202 drivers/mtd/chips/gen_probe.c struct cfi_private *cfi = map->fldrv_priv; map 222 drivers/mtd/chips/gen_probe.c mtd = (*probe_function)(map, primary); map 233 drivers/mtd/chips/gen_probe.c static struct mtd_info *check_cmd_set(struct map_info *map, int primary) map 235 drivers/mtd/chips/gen_probe.c struct cfi_private *cfi = map->fldrv_priv; map 248 drivers/mtd/chips/gen_probe.c return cfi_cmdset_0001(map, primary); map 254 drivers/mtd/chips/gen_probe.c return cfi_cmdset_0002(map, primary); map 258 drivers/mtd/chips/gen_probe.c return cfi_cmdset_0020(map, primary); map 261 drivers/mtd/chips/gen_probe.c return cfi_cmdset_unknown(map, primary); map 1910 drivers/mtd/chips/jedec_probe.c static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, map 1922 drivers/mtd/chips/jedec_probe.c uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); map 1924 drivers/mtd/chips/jedec_probe.c if (ofs >= map->size) map 1926 drivers/mtd/chips/jedec_probe.c result = map_read(map, base + ofs); map 1933 drivers/mtd/chips/jedec_probe.c static inline u32 jedec_read_id(struct map_info *map, uint32_t base, map 1938 drivers/mtd/chips/jedec_probe.c u32 ofs = cfi_build_cmd_addr(1, map, cfi); map 1940 drivers/mtd/chips/jedec_probe.c result = map_read(map, base + ofs); map 1944 drivers/mtd/chips/jedec_probe.c static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) map 1957 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 1958 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); map 1961 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 1967 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0xFF, 0, base, map, cfi, cfi->device_type, NULL); map 1972 drivers/mtd/chips/jedec_probe.c static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index) map 2031 drivers/mtd/chips/jedec_probe.c struct map_info *map, map 2084 drivers/mtd/chips/jedec_probe.c if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) { map 2119 drivers/mtd/chips/jedec_probe.c jedec_reset( base, map, cfi ); map 2120 drivers/mtd/chips/jedec_probe.c mfr = jedec_read_mfr( map, base, cfi ); map 2121 drivers/mtd/chips/jedec_probe.c id = jedec_read_id( map, base, cfi ); map 2138 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 2139 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); map 2141 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 2149 drivers/mtd/chips/jedec_probe.c static int jedec_probe_chip(struct map_info *map, __u32 base, map 2168 drivers/mtd/chips/jedec_probe.c if (base >= map->size) { map 2171 drivers/mtd/chips/jedec_probe.c base, map->size -1); map 2176 drivers/mtd/chips/jedec_probe.c probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi); map 2177 drivers/mtd/chips/jedec_probe.c probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi); map 2178 drivers/mtd/chips/jedec_probe.c if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) || map 2179 drivers/mtd/chips/jedec_probe.c ((base + probe_offset2 + map_bankwidth(map)) >= map->size)) map 2183 drivers/mtd/chips/jedec_probe.c jedec_reset(base, map, cfi); map 2187 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 2188 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL); map 2190 drivers/mtd/chips/jedec_probe.c cfi_send_gen_cmd(0x90, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL); map 2197 drivers/mtd/chips/jedec_probe.c cfi->mfr = jedec_read_mfr(map, base, cfi); map 2198 drivers/mtd/chips/jedec_probe.c cfi->id = jedec_read_id(map, base, cfi); map 2202 drivers/mtd/chips/jedec_probe.c if ( jedec_match( base, map, cfi, &jedec_table[i] ) ) { map 2206 drivers/mtd/chips/jedec_probe.c if (!cfi_jedec_setup(map, cfi, i)) map 2217 drivers/mtd/chips/jedec_probe.c mfr = jedec_read_mfr(map, base, cfi); map 2218 drivers/mtd/chips/jedec_probe.c id = jedec_read_id(map, base, cfi); map 2222 drivers/mtd/chips/jedec_probe.c map->name, mfr, id, base); map 2223 drivers/mtd/chips/jedec_probe.c jedec_reset(base, map, cfi); map 2235 drivers/mtd/chips/jedec_probe.c if (jedec_read_mfr(map, start, cfi) == cfi->mfr && map 2236 drivers/mtd/chips/jedec_probe.c jedec_read_id(map, start, cfi) == cfi->id) { map 2239 drivers/mtd/chips/jedec_probe.c jedec_reset(start, map, cfi); map 2242 drivers/mtd/chips/jedec_probe.c if (jedec_read_mfr(map, base, cfi) != cfi->mfr || map 2243 drivers/mtd/chips/jedec_probe.c jedec_read_id(map, base, cfi) != cfi->id) { map 2245 drivers/mtd/chips/jedec_probe.c map->name, base, start); map 2253 drivers/mtd/chips/jedec_probe.c jedec_reset(base, map, cfi); map 2254 drivers/mtd/chips/jedec_probe.c if (jedec_read_mfr(map, base, cfi) == cfi->mfr && map 2255 drivers/mtd/chips/jedec_probe.c jedec_read_id(map, base, cfi) == cfi->id) { map 2257 drivers/mtd/chips/jedec_probe.c map->name, base, start); map 2270 drivers/mtd/chips/jedec_probe.c jedec_reset(base, map, cfi); map 2273 drivers/mtd/chips/jedec_probe.c map->name, cfi_interleave(cfi), cfi->device_type*8, base, map 2274 drivers/mtd/chips/jedec_probe.c map->bankwidth*8); map 2284 drivers/mtd/chips/jedec_probe.c static struct mtd_info *jedec_probe(struct map_info *map) map 2290 drivers/mtd/chips/jedec_probe.c return mtd_do_chip_probe(map, &jedec_chip_probe); map 34 drivers/mtd/chips/map_absent.c static struct mtd_info *map_absent_probe(struct map_info *map); map 45 drivers/mtd/chips/map_absent.c static struct mtd_info *map_absent_probe(struct map_info *map) map 54 drivers/mtd/chips/map_absent.c map->fldrv = &map_absent_chipdrv; map 55 drivers/mtd/chips/map_absent.c mtd->priv = map; map 56 drivers/mtd/chips/map_absent.c mtd->name = map->name; map 58 drivers/mtd/chips/map_absent.c mtd->size = map->size; map 22 drivers/mtd/chips/map_ram.c static struct mtd_info *map_ram_probe(struct map_info *map); map 34 drivers/mtd/chips/map_ram.c static struct mtd_info *map_ram_probe(struct map_info *map) map 40 drivers/mtd/chips/map_ram.c map_write8(map, 0x55, 0); map 41 drivers/mtd/chips/map_ram.c if (map_read8(map, 0) != 0x55) map 44 drivers/mtd/chips/map_ram.c map_write8(map, 0xAA, 0); map 45 drivers/mtd/chips/map_ram.c if (map_read8(map, 0) != 0xAA) map 49 drivers/mtd/chips/map_ram.c map_write8(map, 0x55, map->size-1); map 50 drivers/mtd/chips/map_ram.c if (map_read8(map, map->size-1) != 0x55) map 53 drivers/mtd/chips/map_ram.c map_write8(map, 0xAA, map->size-1); map 54 drivers/mtd/chips/map_ram.c if (map_read8(map, map->size-1) != 0xAA) map 63 drivers/mtd/chips/map_ram.c map->fldrv = &mapram_chipdrv; map 64 drivers/mtd/chips/map_ram.c mtd->priv = map; map 65 drivers/mtd/chips/map_ram.c mtd->name = map->name; map 67 drivers/mtd/chips/map_ram.c mtd->size = map->size; map 89 drivers/mtd/chips/map_ram.c struct map_info *map = mtd->priv; map 91 drivers/mtd/chips/map_ram.c if (!map->virt) map 93 drivers/mtd/chips/map_ram.c *virt = map->virt + from; map 95 drivers/mtd/chips/map_ram.c *phys = map->phys + from; map 107 drivers/mtd/chips/map_ram.c struct map_info *map = mtd->priv; map 109 drivers/mtd/chips/map_ram.c map_copy_from(map, buf, from, len); map 116 drivers/mtd/chips/map_ram.c struct map_info *map = mtd->priv; map 118 drivers/mtd/chips/map_ram.c map_copy_to(map, to, buf, len); map 127 drivers/mtd/chips/map_ram.c struct map_info *map = mtd->priv; map 131 drivers/mtd/chips/map_ram.c allff = map_word_ff(map); map 132 drivers/mtd/chips/map_ram.c for (i=0; i<instr->len; i += map_bankwidth(map)) map 133 drivers/mtd/chips/map_ram.c map_write(map, allff, instr->addr + i); map 21 drivers/mtd/chips/map_rom.c static struct mtd_info *map_rom_probe(struct map_info *map); map 34 drivers/mtd/chips/map_rom.c static unsigned int default_erasesize(struct map_info *map) map 38 drivers/mtd/chips/map_rom.c erase_size = of_get_property(map->device_node, "erase-size", NULL); map 40 drivers/mtd/chips/map_rom.c return !erase_size ? map->size : be32_to_cpu(*erase_size); map 43 drivers/mtd/chips/map_rom.c static struct mtd_info *map_rom_probe(struct map_info *map) map 51 drivers/mtd/chips/map_rom.c map->fldrv = &maprom_chipdrv; map 52 drivers/mtd/chips/map_rom.c mtd->priv = map; map 53 drivers/mtd/chips/map_rom.c mtd->name = map->name; map 55 drivers/mtd/chips/map_rom.c mtd->size = map->size; map 63 drivers/mtd/chips/map_rom.c mtd->erasesize = default_erasesize(map); map 75 drivers/mtd/chips/map_rom.c struct map_info *map = mtd->priv; map 77 drivers/mtd/chips/map_rom.c if (!map->virt) map 79 drivers/mtd/chips/map_rom.c *virt = map->virt + from; map 81 drivers/mtd/chips/map_rom.c *phys = map->phys + from; map 93 drivers/mtd/chips/map_rom.c struct map_info *map = mtd->priv; map 95 drivers/mtd/chips/map_rom.c map_copy_from(map, buf, from, len); map 66 drivers/mtd/devices/slram.c static char *map[SLRAM_MAX_DEVICES_PARAMS]; map 68 drivers/mtd/devices/slram.c module_param_array(map, charp, NULL, 0); map 69 drivers/mtd/devices/slram.c MODULE_PARM_DESC(map, "List of memory regions to map. \"map=<name>, <start>, <length / end>\""); map 71 drivers/mtd/devices/slram.c static char *map; map 269 drivers/mtd/devices/slram.c map = str; map 285 drivers/mtd/devices/slram.c if (!map) { map 289 drivers/mtd/devices/slram.c while (map) { map 292 drivers/mtd/devices/slram.c if (!(devname = strsep(&map, ","))) { map 297 drivers/mtd/devices/slram.c if ((!map) || (!(devstart = strsep(&map, ",")))) { map 301 drivers/mtd/devices/slram.c if ((!map) || (!(devlength = strsep(&map, ",")))) { map 313 drivers/mtd/devices/slram.c for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count]; map 322 drivers/mtd/devices/slram.c devname = map[i * 3]; map 324 drivers/mtd/devices/slram.c if (parse_cmdline(devname, map[i * 3 + 1], map[i * 3 + 2])!=0) { map 28 drivers/mtd/hyperbus/hbmc-am654.c struct map_info *map = &hbdev->map; map 36 drivers/mtd/hyperbus/hbmc-am654.c cfi_send_gen_cmd(0xF0, 0, 0, map, &cfi, cfi.device_type, NULL); map 37 drivers/mtd/hyperbus/hbmc-am654.c cfi_send_gen_cmd(0x98, 0x55, 0, map, &cfi, cfi.device_type, NULL); map 40 drivers/mtd/hyperbus/hbmc-am654.c ret = cfi_qry_present(map, 0, &cfi); map 49 drivers/mtd/hyperbus/hbmc-am654.c cfi_qry_mode_off(0, map, &cfi); map 16 drivers/mtd/hyperbus/hyperbus-core.c static struct hyperbus_device *map_to_hbdev(struct map_info *map) map 18 drivers/mtd/hyperbus/hyperbus-core.c return container_of(map, struct hyperbus_device, map); map 21 drivers/mtd/hyperbus/hyperbus-core.c static map_word hyperbus_read16(struct map_info *map, unsigned long addr) map 23 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_device *hbdev = map_to_hbdev(map); map 32 drivers/mtd/hyperbus/hyperbus-core.c static void hyperbus_write16(struct map_info *map, map_word d, map 35 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_device *hbdev = map_to_hbdev(map); map 41 drivers/mtd/hyperbus/hyperbus-core.c static void hyperbus_copy_from(struct map_info *map, void *to, map 44 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_device *hbdev = map_to_hbdev(map); map 50 drivers/mtd/hyperbus/hyperbus-core.c static void hyperbus_copy_to(struct map_info *map, unsigned long to, map 53 drivers/mtd/hyperbus/hyperbus-core.c struct hyperbus_device *hbdev = map_to_hbdev(map); map 64 drivers/mtd/hyperbus/hyperbus-core.c struct map_info *map; map 86 drivers/mtd/hyperbus/hyperbus-core.c map = &hbdev->map; map 87 drivers/mtd/hyperbus/hyperbus-core.c map->size = resource_size(&res); map 88 drivers/mtd/hyperbus/hyperbus-core.c map->virt = devm_ioremap_resource(dev, &res); map 89 drivers/mtd/hyperbus/hyperbus-core.c if (IS_ERR(map->virt)) map 90 drivers/mtd/hyperbus/hyperbus-core.c return PTR_ERR(map->virt); map 92 drivers/mtd/hyperbus/hyperbus-core.c map->name = dev_name(dev); map 93 drivers/mtd/hyperbus/hyperbus-core.c map->bankwidth = 2; map 94 drivers/mtd/hyperbus/hyperbus-core.c map->device_node = np; map 96 drivers/mtd/hyperbus/hyperbus-core.c simple_map_init(map); map 100 drivers/mtd/hyperbus/hyperbus-core.c map->read = hyperbus_read16; map 102 drivers/mtd/hyperbus/hyperbus-core.c map->write = hyperbus_write16; map 104 drivers/mtd/hyperbus/hyperbus-core.c map->copy_to = hyperbus_copy_to; map 106 drivers/mtd/hyperbus/hyperbus-core.c map->copy_from = hyperbus_copy_from; map 118 drivers/mtd/hyperbus/hyperbus-core.c hbdev->mtd = do_map_probe("cfi_probe", map); map 120 drivers/mtd/lpddr/lpddr2_nvm.c static inline u_long ow_reg_add(struct map_info *map, u_long offset) map 123 drivers/mtd/lpddr/lpddr2_nvm.c struct pcm_int_data *pcm_data = map->fldrv_priv; map 125 drivers/mtd/lpddr/lpddr2_nvm.c val = map->pfow_base + offset*pcm_data->bus_width; map 136 drivers/mtd/lpddr/lpddr2_nvm.c static inline void ow_enable(struct map_info *map) map 138 drivers/mtd/lpddr/lpddr2_nvm.c struct pcm_int_data *pcm_data = map->fldrv_priv; map 151 drivers/mtd/lpddr/lpddr2_nvm.c static inline void ow_disable(struct map_info *map) map 153 drivers/mtd/lpddr/lpddr2_nvm.c struct pcm_int_data *pcm_data = map->fldrv_priv; map 163 drivers/mtd/lpddr/lpddr2_nvm.c static int lpddr2_nvm_do_op(struct map_info *map, u_long cmd_code, map 171 drivers/mtd/lpddr/lpddr2_nvm.c struct pcm_int_data *pcm_data = map->fldrv_priv; map 185 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS)); map 186 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, data_l, ow_reg_add(map, CMD_DATA_OFS)); map 187 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS)); map 188 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS)); map 189 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS)); map 190 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS)); map 192 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, cmd, ow_reg_add(map, CMD_CODE_OFS) + 2); map 193 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, data_h, ow_reg_add(map, CMD_DATA_OFS) + 2); map 194 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, add_l, ow_reg_add(map, CMD_ADD_L_OFS) + 2); map 195 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, add_h, ow_reg_add(map, CMD_ADD_H_OFS) + 2); map 196 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, mpr_l, ow_reg_add(map, MPR_L_OFS) + 2); map 197 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, mpr_h, ow_reg_add(map, MPR_H_OFS) + 2); map 203 drivers/mtd/lpddr/lpddr2_nvm.c prg_buff_ofs = (map_read(map, map 204 drivers/mtd/lpddr/lpddr2_nvm.c ow_reg_add(map, PRG_BUFFER_OFS))).x[0]; map 206 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, build_map_word(buf[i]), map->pfow_base + map 212 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS)); map 214 drivers/mtd/lpddr/lpddr2_nvm.c map_write(map, exec_cmd, ow_reg_add(map, CMD_EXEC_OFS) + 2); map 218 drivers/mtd/lpddr/lpddr2_nvm.c sr = map_read(map, ow_reg_add(map, STATUS_REG_OFS)); map 221 drivers/mtd/lpddr/lpddr2_nvm.c sr = map_read(map, ow_reg_add(map, map 236 drivers/mtd/lpddr/lpddr2_nvm.c struct map_info *map = mtd->priv; map 242 drivers/mtd/lpddr/lpddr2_nvm.c ow_enable(map); map 248 drivers/mtd/lpddr/lpddr2_nvm.c ret = lpddr2_nvm_do_op(map, block_op, 0x00, add, add, NULL); map 255 drivers/mtd/lpddr/lpddr2_nvm.c ow_disable(map); map 263 drivers/mtd/lpddr/lpddr2_nvm.c static int lpddr2_nvm_pfow_present(struct map_info *map) map 270 drivers/mtd/lpddr/lpddr2_nvm.c ow_enable(map); map 273 drivers/mtd/lpddr/lpddr2_nvm.c pfow_val[0] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_P)); map 274 drivers/mtd/lpddr/lpddr2_nvm.c pfow_val[1] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_F)); map 275 drivers/mtd/lpddr/lpddr2_nvm.c pfow_val[2] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_O)); map 276 drivers/mtd/lpddr/lpddr2_nvm.c pfow_val[3] = map_read(map, ow_reg_add(map, PFOW_QUERY_STRING_W)); map 279 drivers/mtd/lpddr/lpddr2_nvm.c if (!map_word_equal(map, build_map_word('P'), pfow_val[0])) map 281 drivers/mtd/lpddr/lpddr2_nvm.c if (!map_word_equal(map, build_map_word('F'), pfow_val[1])) map 283 drivers/mtd/lpddr/lpddr2_nvm.c if (!map_word_equal(map, build_map_word('O'), pfow_val[2])) map 285 drivers/mtd/lpddr/lpddr2_nvm.c if (!map_word_equal(map, build_map_word('W'), pfow_val[3])) map 288 drivers/mtd/lpddr/lpddr2_nvm.c ow_disable(map); map 301 drivers/mtd/lpddr/lpddr2_nvm.c struct map_info *map = mtd->priv; map 307 drivers/mtd/lpddr/lpddr2_nvm.c map_copy_from(map, buf, start_add, *retlen); map 319 drivers/mtd/lpddr/lpddr2_nvm.c struct map_info *map = mtd->priv; map 320 drivers/mtd/lpddr/lpddr2_nvm.c struct pcm_int_data *pcm_data = map->fldrv_priv; map 327 drivers/mtd/lpddr/lpddr2_nvm.c ow_enable(map); map 342 drivers/mtd/lpddr/lpddr2_nvm.c ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_SW_OVERWRITE, map 352 drivers/mtd/lpddr/lpddr2_nvm.c ret = lpddr2_nvm_do_op(map, LPDDR2_NVM_BUF_OVERWRITE, map 364 drivers/mtd/lpddr/lpddr2_nvm.c ow_disable(map); map 401 drivers/mtd/lpddr/lpddr2_nvm.c struct map_info *map; map 415 drivers/mtd/lpddr/lpddr2_nvm.c map = devm_kzalloc(&pdev->dev, sizeof(*map), GFP_KERNEL); map 416 drivers/mtd/lpddr/lpddr2_nvm.c if (!map) map 427 drivers/mtd/lpddr/lpddr2_nvm.c *map = (struct map_info) { map 436 drivers/mtd/lpddr/lpddr2_nvm.c if (IS_ERR(map->virt)) map 437 drivers/mtd/lpddr/lpddr2_nvm.c return PTR_ERR(map->virt); map 439 drivers/mtd/lpddr/lpddr2_nvm.c simple_map_init(map); /* fill with default methods */ map 451 drivers/mtd/lpddr/lpddr2_nvm.c .priv = map, map 465 drivers/mtd/lpddr/lpddr2_nvm.c if (!lpddr2_nvm_pfow_present(map)) { map 31 drivers/mtd/lpddr/lpddr_cmds.c static int get_chip(struct map_info *map, struct flchip *chip, int mode); map 32 drivers/mtd/lpddr/lpddr_cmds.c static int chip_ready(struct map_info *map, struct flchip *chip, int mode); map 33 drivers/mtd/lpddr/lpddr_cmds.c static void put_chip(struct map_info *map, struct flchip *chip); map 35 drivers/mtd/lpddr/lpddr_cmds.c struct mtd_info *lpddr_cmdset(struct map_info *map) map 37 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 47 drivers/mtd/lpddr/lpddr_cmds.c mtd->priv = map; map 60 drivers/mtd/lpddr/lpddr_cmds.c if (map_is_linear(map)) { map 97 drivers/mtd/lpddr/lpddr_cmds.c static int wait_for_ready(struct map_info *map, struct flchip *chip, map 113 drivers/mtd/lpddr/lpddr_cmds.c dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); map 118 drivers/mtd/lpddr/lpddr_cmds.c map->name, chip_state); map 160 drivers/mtd/lpddr/lpddr_cmds.c map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR); map 162 drivers/mtd/lpddr/lpddr_cmds.c map->name, dsr); map 170 drivers/mtd/lpddr/lpddr_cmds.c static int get_chip(struct map_info *map, struct flchip *chip, int mode) map 215 drivers/mtd/lpddr/lpddr_cmds.c ret = chip_ready(map, contender, mode); map 231 drivers/mtd/lpddr/lpddr_cmds.c put_chip(map, contender); map 259 drivers/mtd/lpddr/lpddr_cmds.c ret = chip_ready(map, chip, mode); map 266 drivers/mtd/lpddr/lpddr_cmds.c static int chip_ready(struct map_info *map, struct flchip *chip, int mode) map 268 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 286 drivers/mtd/lpddr/lpddr_cmds.c map_write(map, CMD(LPDDR_SUSPEND), map 287 drivers/mtd/lpddr/lpddr_cmds.c map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND); map 290 drivers/mtd/lpddr/lpddr_cmds.c ret = wait_for_ready(map, chip, 0); map 294 drivers/mtd/lpddr/lpddr_cmds.c put_chip(map, chip); map 296 drivers/mtd/lpddr/lpddr_cmds.c "State may be wrong \n", map->name); map 321 drivers/mtd/lpddr/lpddr_cmds.c static void put_chip(struct map_info *map, struct flchip *chip) map 335 drivers/mtd/lpddr/lpddr_cmds.c put_chip(map, loaner); map 360 drivers/mtd/lpddr/lpddr_cmds.c map_write(map, CMD(LPDDR_RESUME), map 361 drivers/mtd/lpddr/lpddr_cmds.c map->pfow_base + PFOW_COMMAND_CODE); map 362 drivers/mtd/lpddr/lpddr_cmds.c map_write(map, CMD(LPDDR_START_EXECUTION), map 363 drivers/mtd/lpddr/lpddr_cmds.c map->pfow_base + PFOW_COMMAND_EXECUTE); map 371 drivers/mtd/lpddr/lpddr_cmds.c map->name, chip->oldstate); map 376 drivers/mtd/lpddr/lpddr_cmds.c static int do_write_buffer(struct map_info *map, struct flchip *chip, map 380 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 390 drivers/mtd/lpddr/lpddr_cmds.c ret = get_chip(map, chip, FL_WRITING); map 396 drivers/mtd/lpddr/lpddr_cmds.c word_gap = (-adr & (map_bankwidth(map)-1)); map 397 drivers/mtd/lpddr/lpddr_cmds.c words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map); map 401 drivers/mtd/lpddr/lpddr_cmds.c word_gap = map_bankwidth(map) - word_gap; map 403 drivers/mtd/lpddr/lpddr_cmds.c datum = map_word_ff(map); map 407 drivers/mtd/lpddr/lpddr_cmds.c prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map, map 408 drivers/mtd/lpddr/lpddr_cmds.c map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET)); map 412 drivers/mtd/lpddr/lpddr_cmds.c int n = map_bankwidth(map) - word_gap; map 419 drivers/mtd/lpddr/lpddr_cmds.c if (!word_gap && (len < map_bankwidth(map))) map 420 drivers/mtd/lpddr/lpddr_cmds.c datum = map_word_ff(map); map 422 drivers/mtd/lpddr/lpddr_cmds.c datum = map_word_load_partial(map, datum, map 427 drivers/mtd/lpddr/lpddr_cmds.c if (!len || word_gap == map_bankwidth(map)) { map 428 drivers/mtd/lpddr/lpddr_cmds.c map_write(map, datum, prog_buf_ofs); map 429 drivers/mtd/lpddr/lpddr_cmds.c prog_buf_ofs += map_bankwidth(map); map 443 drivers/mtd/lpddr/lpddr_cmds.c send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL); map 445 drivers/mtd/lpddr/lpddr_cmds.c ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime)); map 448 drivers/mtd/lpddr/lpddr_cmds.c map->name, ret, adr); map 452 drivers/mtd/lpddr/lpddr_cmds.c out: put_chip(map, chip); map 459 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 460 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 466 drivers/mtd/lpddr/lpddr_cmds.c ret = get_chip(map, chip, FL_ERASING); map 471 drivers/mtd/lpddr/lpddr_cmds.c send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL); map 473 drivers/mtd/lpddr/lpddr_cmds.c ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000); map 476 drivers/mtd/lpddr/lpddr_cmds.c map->name, ret, adr); map 479 drivers/mtd/lpddr/lpddr_cmds.c out: put_chip(map, chip); map 487 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 488 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 494 drivers/mtd/lpddr/lpddr_cmds.c ret = get_chip(map, chip, FL_READY); map 500 drivers/mtd/lpddr/lpddr_cmds.c map_copy_from(map, buf, adr, len); map 503 drivers/mtd/lpddr/lpddr_cmds.c put_chip(map, chip); map 511 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 512 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 518 drivers/mtd/lpddr/lpddr_cmds.c if (!map->virt) map 523 drivers/mtd/lpddr/lpddr_cmds.c *mtdbuf = (void *)map->virt + chip->start + ofs; map 543 drivers/mtd/lpddr/lpddr_cmds.c ret = get_chip(map, chip, FL_POINT); map 563 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 564 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 591 drivers/mtd/lpddr/lpddr_cmds.c "pointed region\n", map->name); map 595 drivers/mtd/lpddr/lpddr_cmds.c put_chip(map, chip); map 621 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 622 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 647 drivers/mtd/lpddr/lpddr_cmds.c ret = do_write_buffer(map, &lpddr->chips[chipnum], map 669 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 670 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 692 drivers/mtd/lpddr/lpddr_cmds.c struct map_info *map = mtd->priv; map 693 drivers/mtd/lpddr/lpddr_cmds.c struct lpddr_private *lpddr = map->fldrv_priv; map 698 drivers/mtd/lpddr/lpddr_cmds.c ret = get_chip(map, chip, FL_LOCKING); map 705 drivers/mtd/lpddr/lpddr_cmds.c send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL); map 708 drivers/mtd/lpddr/lpddr_cmds.c send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL); map 713 drivers/mtd/lpddr/lpddr_cmds.c ret = wait_for_ready(map, chip, 1); map 716 drivers/mtd/lpddr/lpddr_cmds.c map->name, ret); map 719 drivers/mtd/lpddr/lpddr_cmds.c out: put_chip(map, chip); map 20 drivers/mtd/lpddr/qinfo_probe.c static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr); map 21 drivers/mtd/lpddr/qinfo_probe.c struct mtd_info *lpddr_probe(struct map_info *map); map 22 drivers/mtd/lpddr/qinfo_probe.c static struct lpddr_private *lpddr_probe_chip(struct map_info *map); map 23 drivers/mtd/lpddr/qinfo_probe.c static int lpddr_pfow_present(struct map_info *map, map 44 drivers/mtd/lpddr/qinfo_probe.c static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) map 48 drivers/mtd/lpddr/qinfo_probe.c int bankwidth = map_bankwidth(map) * 8; map 58 drivers/mtd/lpddr/qinfo_probe.c printk(KERN_ERR"%s qinfo id string is wrong! \n", map->name); map 63 drivers/mtd/lpddr/qinfo_probe.c static uint16_t lpddr_info_query(struct map_info *map, char *id_str) map 66 drivers/mtd/lpddr/qinfo_probe.c int bits_per_chip = map_bankwidth(map) * 8; map 67 drivers/mtd/lpddr/qinfo_probe.c unsigned long adr = lpddr_get_qinforec_pos(map, id_str); map 71 drivers/mtd/lpddr/qinfo_probe.c map_write(map, CMD(LPDDR_INFO_QUERY), map 72 drivers/mtd/lpddr/qinfo_probe.c map->pfow_base + PFOW_COMMAND_CODE); map 73 drivers/mtd/lpddr/qinfo_probe.c map_write(map, CMD(adr & ((1 << bits_per_chip) - 1)), map 74 drivers/mtd/lpddr/qinfo_probe.c map->pfow_base + PFOW_COMMAND_ADDRESS_L); map 75 drivers/mtd/lpddr/qinfo_probe.c map_write(map, CMD(adr >> bits_per_chip), map 76 drivers/mtd/lpddr/qinfo_probe.c map->pfow_base + PFOW_COMMAND_ADDRESS_H); map 77 drivers/mtd/lpddr/qinfo_probe.c map_write(map, CMD(LPDDR_START_EXECUTION), map 78 drivers/mtd/lpddr/qinfo_probe.c map->pfow_base + PFOW_COMMAND_EXECUTE); map 81 drivers/mtd/lpddr/qinfo_probe.c dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR)); map 87 drivers/mtd/lpddr/qinfo_probe.c val = CMDVAL(map_read(map, map->pfow_base + PFOW_COMMAND_DATA)); map 91 drivers/mtd/lpddr/qinfo_probe.c static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr) map 96 drivers/mtd/lpddr/qinfo_probe.c pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P); map 97 drivers/mtd/lpddr/qinfo_probe.c pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F); map 98 drivers/mtd/lpddr/qinfo_probe.c pfow_val[2] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_O); map 99 drivers/mtd/lpddr/qinfo_probe.c pfow_val[3] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_W); map 101 drivers/mtd/lpddr/qinfo_probe.c if (!map_word_equal(map, CMD('P'), pfow_val[0])) map 104 drivers/mtd/lpddr/qinfo_probe.c if (!map_word_equal(map, CMD('F'), pfow_val[1])) map 107 drivers/mtd/lpddr/qinfo_probe.c if (!map_word_equal(map, CMD('O'), pfow_val[2])) map 110 drivers/mtd/lpddr/qinfo_probe.c if (!map_word_equal(map, CMD('W'), pfow_val[3])) map 116 drivers/mtd/lpddr/qinfo_probe.c map->name, map->pfow_base); map 120 drivers/mtd/lpddr/qinfo_probe.c static int lpddr_chip_setup(struct map_info *map, struct lpddr_private *lpddr) map 128 drivers/mtd/lpddr/qinfo_probe.c lpddr->ManufactId = CMDVAL(map_read(map, map->pfow_base + PFOW_MANUFACTURER_ID)); map 130 drivers/mtd/lpddr/qinfo_probe.c lpddr->DevId = CMDVAL(map_read(map, map->pfow_base + PFOW_DEVICE_ID)); map 132 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->DevSizeShift = lpddr_info_query(map, "DevSizeShift"); map 133 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->TotalBlocksNum = lpddr_info_query(map, "TotalBlocksNum"); map 134 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->BufSizeShift = lpddr_info_query(map, "BufSizeShift"); map 135 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->HWPartsNum = lpddr_info_query(map, "HWPartsNum"); map 137 drivers/mtd/lpddr/qinfo_probe.c lpddr_info_query(map, "UniformBlockSizeShift"); map 138 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->SuspEraseSupp = lpddr_info_query(map, "SuspEraseSupp"); map 140 drivers/mtd/lpddr/qinfo_probe.c lpddr_info_query(map, "SingleWordProgTime"); map 141 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->ProgBufferTime = lpddr_info_query(map, "ProgBufferTime"); map 142 drivers/mtd/lpddr/qinfo_probe.c lpddr->qinfo->BlockEraseTime = lpddr_info_query(map, "BlockEraseTime"); map 145 drivers/mtd/lpddr/qinfo_probe.c static struct lpddr_private *lpddr_probe_chip(struct map_info *map) map 152 drivers/mtd/lpddr/qinfo_probe.c if ((map->pfow_base + 0x1000) >= map->size) { map 154 drivers/mtd/lpddr/qinfo_probe.c "the map(0x%08lx)\n", map->name, map 155 drivers/mtd/lpddr/qinfo_probe.c (unsigned long)map->pfow_base, map->size - 1); map 159 drivers/mtd/lpddr/qinfo_probe.c if (!lpddr_pfow_present(map, &lpddr)) map 162 drivers/mtd/lpddr/qinfo_probe.c if (!lpddr_chip_setup(map, &lpddr)) map 184 drivers/mtd/lpddr/qinfo_probe.c struct mtd_info *lpddr_probe(struct map_info *map) map 190 drivers/mtd/lpddr/qinfo_probe.c lpddr = lpddr_probe_chip(map); map 194 drivers/mtd/lpddr/qinfo_probe.c map->fldrv_priv = lpddr; map 195 drivers/mtd/lpddr/qinfo_probe.c mtd = lpddr_cmdset(map); map 197 drivers/mtd/lpddr/qinfo_probe.c if (mtd->size > map->size) { map 200 drivers/mtd/lpddr/qinfo_probe.c (unsigned long)map->size >> 10); map 201 drivers/mtd/lpddr/qinfo_probe.c mtd->size = map->size; map 208 drivers/mtd/lpddr/qinfo_probe.c map->fldrv_priv = NULL; map 42 drivers/mtd/maps/amd76xrom.c struct map_info map; map 71 drivers/mtd/maps/amd76xrom.c struct amd76xrom_map_info *map, *scratch; map 82 drivers/mtd/maps/amd76xrom.c list_for_each_entry_safe(map, scratch, &window->maps, list) { map 83 drivers/mtd/maps/amd76xrom.c if (map->rsrc.parent) { map 84 drivers/mtd/maps/amd76xrom.c release_resource(&map->rsrc); map 86 drivers/mtd/maps/amd76xrom.c mtd_device_unregister(map->mtd); map 87 drivers/mtd/maps/amd76xrom.c map_destroy(map->mtd); map 88 drivers/mtd/maps/amd76xrom.c list_del(&map->list); map 89 drivers/mtd/maps/amd76xrom.c kfree(map); map 110 drivers/mtd/maps/amd76xrom.c struct amd76xrom_map_info *map = NULL; map 190 drivers/mtd/maps/amd76xrom.c if (!map) { map 191 drivers/mtd/maps/amd76xrom.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 193 drivers/mtd/maps/amd76xrom.c if (!map) { map 197 drivers/mtd/maps/amd76xrom.c memset(map, 0, sizeof(*map)); map 198 drivers/mtd/maps/amd76xrom.c INIT_LIST_HEAD(&map->list); map 199 drivers/mtd/maps/amd76xrom.c map->map.name = map->map_name; map 200 drivers/mtd/maps/amd76xrom.c map->map.phys = map_top; map 202 drivers/mtd/maps/amd76xrom.c map->map.virt = (void __iomem *) map 204 drivers/mtd/maps/amd76xrom.c map->map.size = 0xffffffffUL - map_top + 1UL; map 206 drivers/mtd/maps/amd76xrom.c sprintf(map->map_name, "%s @%08Lx", map 207 drivers/mtd/maps/amd76xrom.c MOD_NAME, (unsigned long long)map->map.phys); map 210 drivers/mtd/maps/amd76xrom.c for(map->map.bankwidth = 32; map->map.bankwidth; map 211 drivers/mtd/maps/amd76xrom.c map->map.bankwidth >>= 1) map 215 drivers/mtd/maps/amd76xrom.c if (!map_bankwidth_supported(map->map.bankwidth)) map 219 drivers/mtd/maps/amd76xrom.c simple_map_init(&map->map); map 224 drivers/mtd/maps/amd76xrom.c map->mtd = do_map_probe(*probe_type, &map->map); map 225 drivers/mtd/maps/amd76xrom.c if (map->mtd) map 233 drivers/mtd/maps/amd76xrom.c if (map->mtd->size > map->map.size) { map 236 drivers/mtd/maps/amd76xrom.c (unsigned long long)map->mtd->size, map->map.size); map 237 drivers/mtd/maps/amd76xrom.c map->mtd->size = map->map.size; map 245 drivers/mtd/maps/amd76xrom.c map->rsrc.name = map->map_name; map 246 drivers/mtd/maps/amd76xrom.c map->rsrc.start = map->map.phys; map 247 drivers/mtd/maps/amd76xrom.c map->rsrc.end = map->map.phys + map->mtd->size - 1; map 248 drivers/mtd/maps/amd76xrom.c map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; map 249 drivers/mtd/maps/amd76xrom.c if (request_resource(&window->rsrc, &map->rsrc)) { map 252 drivers/mtd/maps/amd76xrom.c map->rsrc.parent = NULL; map 257 drivers/mtd/maps/amd76xrom.c map->map.virt = window->virt; map 258 drivers/mtd/maps/amd76xrom.c map->map.phys = window->phys; map 259 drivers/mtd/maps/amd76xrom.c cfi = map->map.fldrv_priv; map 265 drivers/mtd/maps/amd76xrom.c map->mtd->owner = THIS_MODULE; map 266 drivers/mtd/maps/amd76xrom.c if (mtd_device_register(map->mtd, NULL, 0)) { map 267 drivers/mtd/maps/amd76xrom.c map_destroy(map->mtd); map 268 drivers/mtd/maps/amd76xrom.c map->mtd = NULL; map 274 drivers/mtd/maps/amd76xrom.c map_top += map->mtd->size; map 277 drivers/mtd/maps/amd76xrom.c list_add(&map->list, &window->maps); map 278 drivers/mtd/maps/amd76xrom.c map = NULL; map 283 drivers/mtd/maps/amd76xrom.c kfree(map); map 46 drivers/mtd/maps/ck804xrom.c struct map_info map; map 84 drivers/mtd/maps/ck804xrom.c struct ck804xrom_map_info *map, *scratch; map 94 drivers/mtd/maps/ck804xrom.c list_for_each_entry_safe(map, scratch, &window->maps, list) { map 95 drivers/mtd/maps/ck804xrom.c if (map->rsrc.parent) map 96 drivers/mtd/maps/ck804xrom.c release_resource(&map->rsrc); map 98 drivers/mtd/maps/ck804xrom.c mtd_device_unregister(map->mtd); map 99 drivers/mtd/maps/ck804xrom.c map_destroy(map->mtd); map 100 drivers/mtd/maps/ck804xrom.c list_del(&map->list); map 101 drivers/mtd/maps/ck804xrom.c kfree(map); map 123 drivers/mtd/maps/ck804xrom.c struct ck804xrom_map_info *map = NULL; map 220 drivers/mtd/maps/ck804xrom.c if (!map) map 221 drivers/mtd/maps/ck804xrom.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 223 drivers/mtd/maps/ck804xrom.c if (!map) { map 227 drivers/mtd/maps/ck804xrom.c memset(map, 0, sizeof(*map)); map 228 drivers/mtd/maps/ck804xrom.c INIT_LIST_HEAD(&map->list); map 229 drivers/mtd/maps/ck804xrom.c map->map.name = map->map_name; map 230 drivers/mtd/maps/ck804xrom.c map->map.phys = map_top; map 232 drivers/mtd/maps/ck804xrom.c map->map.virt = (void __iomem *) map 234 drivers/mtd/maps/ck804xrom.c map->map.size = 0xffffffffUL - map_top + 1UL; map 236 drivers/mtd/maps/ck804xrom.c sprintf(map->map_name, "%s @%08Lx", map 237 drivers/mtd/maps/ck804xrom.c MOD_NAME, (unsigned long long)map->map.phys); map 240 drivers/mtd/maps/ck804xrom.c for(map->map.bankwidth = 32; map->map.bankwidth; map 241 drivers/mtd/maps/ck804xrom.c map->map.bankwidth >>= 1) map 245 drivers/mtd/maps/ck804xrom.c if (!map_bankwidth_supported(map->map.bankwidth)) map 249 drivers/mtd/maps/ck804xrom.c simple_map_init(&map->map); map 254 drivers/mtd/maps/ck804xrom.c map->mtd = do_map_probe(*probe_type, &map->map); map 255 drivers/mtd/maps/ck804xrom.c if (map->mtd) map 263 drivers/mtd/maps/ck804xrom.c if (map->mtd->size > map->map.size) { map 266 drivers/mtd/maps/ck804xrom.c (unsigned long long)map->mtd->size, map->map.size); map 267 drivers/mtd/maps/ck804xrom.c map->mtd->size = map->map.size; map 275 drivers/mtd/maps/ck804xrom.c map->rsrc.name = map->map_name; map 276 drivers/mtd/maps/ck804xrom.c map->rsrc.start = map->map.phys; map 277 drivers/mtd/maps/ck804xrom.c map->rsrc.end = map->map.phys + map->mtd->size - 1; map 278 drivers/mtd/maps/ck804xrom.c map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; map 279 drivers/mtd/maps/ck804xrom.c if (request_resource(&window->rsrc, &map->rsrc)) { map 282 drivers/mtd/maps/ck804xrom.c map->rsrc.parent = NULL; map 287 drivers/mtd/maps/ck804xrom.c map->map.virt = window->virt; map 288 drivers/mtd/maps/ck804xrom.c map->map.phys = window->phys; map 289 drivers/mtd/maps/ck804xrom.c cfi = map->map.fldrv_priv; map 294 drivers/mtd/maps/ck804xrom.c map->mtd->owner = THIS_MODULE; map 295 drivers/mtd/maps/ck804xrom.c if (mtd_device_register(map->mtd, NULL, 0)) { map 296 drivers/mtd/maps/ck804xrom.c map_destroy(map->mtd); map 297 drivers/mtd/maps/ck804xrom.c map->mtd = NULL; map 303 drivers/mtd/maps/ck804xrom.c map_top += map->mtd->size; map 306 drivers/mtd/maps/ck804xrom.c list_add(&map->list, &window->maps); map 307 drivers/mtd/maps/ck804xrom.c map = NULL; map 312 drivers/mtd/maps/ck804xrom.c kfree(map); map 54 drivers/mtd/maps/dc21285.c static map_word dc21285_read8(struct map_info *map, unsigned long ofs) map 57 drivers/mtd/maps/dc21285.c val.x[0] = *(uint8_t*)(map->virt + ofs); map 61 drivers/mtd/maps/dc21285.c static map_word dc21285_read16(struct map_info *map, unsigned long ofs) map 64 drivers/mtd/maps/dc21285.c val.x[0] = *(uint16_t*)(map->virt + ofs); map 68 drivers/mtd/maps/dc21285.c static map_word dc21285_read32(struct map_info *map, unsigned long ofs) map 71 drivers/mtd/maps/dc21285.c val.x[0] = *(uint32_t*)(map->virt + ofs); map 75 drivers/mtd/maps/dc21285.c static void dc21285_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) map 77 drivers/mtd/maps/dc21285.c memcpy(to, (void*)(map->virt + from), len); map 80 drivers/mtd/maps/dc21285.c static void dc21285_write8(struct map_info *map, const map_word d, unsigned long adr) map 86 drivers/mtd/maps/dc21285.c *(uint8_t*)(map->virt + adr) = d.x[0]; map 89 drivers/mtd/maps/dc21285.c static void dc21285_write16(struct map_info *map, const map_word d, unsigned long adr) map 95 drivers/mtd/maps/dc21285.c *(uint16_t*)(map->virt + adr) = d.x[0]; map 98 drivers/mtd/maps/dc21285.c static void dc21285_write32(struct map_info *map, const map_word d, unsigned long adr) map 102 drivers/mtd/maps/dc21285.c *(uint32_t*)(map->virt + adr) = d.x[0]; map 105 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_32(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 110 drivers/mtd/maps/dc21285.c dc21285_write32(map, d, to); map 117 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_16(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 122 drivers/mtd/maps/dc21285.c dc21285_write16(map, d, to); map 129 drivers/mtd/maps/dc21285.c static void dc21285_copy_to_8(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 133 drivers/mtd/maps/dc21285.c dc21285_write8(map, d, to); map 108 drivers/mtd/maps/esb2rom.c struct map_info map; map 120 drivers/mtd/maps/esb2rom.c struct esb2rom_map_info *map, *scratch; map 129 drivers/mtd/maps/esb2rom.c list_for_each_entry_safe(map, scratch, &window->maps, list) { map 130 drivers/mtd/maps/esb2rom.c if (map->rsrc.parent) map 131 drivers/mtd/maps/esb2rom.c release_resource(&map->rsrc); map 132 drivers/mtd/maps/esb2rom.c mtd_device_unregister(map->mtd); map 133 drivers/mtd/maps/esb2rom.c map_destroy(map->mtd); map 134 drivers/mtd/maps/esb2rom.c list_del(&map->list); map 135 drivers/mtd/maps/esb2rom.c kfree(map); map 153 drivers/mtd/maps/esb2rom.c struct esb2rom_map_info *map = NULL; map 280 drivers/mtd/maps/esb2rom.c if (!map) map 281 drivers/mtd/maps/esb2rom.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 282 drivers/mtd/maps/esb2rom.c if (!map) { map 286 drivers/mtd/maps/esb2rom.c memset(map, 0, sizeof(*map)); map 287 drivers/mtd/maps/esb2rom.c INIT_LIST_HEAD(&map->list); map 288 drivers/mtd/maps/esb2rom.c map->map.name = map->map_name; map 289 drivers/mtd/maps/esb2rom.c map->map.phys = map_top; map 291 drivers/mtd/maps/esb2rom.c map->map.virt = (void __iomem *) map 293 drivers/mtd/maps/esb2rom.c map->map.size = 0xffffffffUL - map_top + 1UL; map 295 drivers/mtd/maps/esb2rom.c sprintf(map->map_name, "%s @%08Lx", map 296 drivers/mtd/maps/esb2rom.c MOD_NAME, (unsigned long long)map->map.phys); map 302 drivers/mtd/maps/esb2rom.c for(map->map.bankwidth = 32; map->map.bankwidth; map 303 drivers/mtd/maps/esb2rom.c map->map.bankwidth >>= 1) { map 306 drivers/mtd/maps/esb2rom.c if (!map_bankwidth_supported(map->map.bankwidth)) map 310 drivers/mtd/maps/esb2rom.c simple_map_init(&map->map); map 315 drivers/mtd/maps/esb2rom.c map->mtd = do_map_probe(*probe_type, &map->map); map 316 drivers/mtd/maps/esb2rom.c if (map->mtd) map 324 drivers/mtd/maps/esb2rom.c if (map->mtd->size > map->map.size) { map 327 drivers/mtd/maps/esb2rom.c (unsigned long long)map->mtd->size, map->map.size); map 328 drivers/mtd/maps/esb2rom.c map->mtd->size = map->map.size; map 336 drivers/mtd/maps/esb2rom.c map->rsrc.name = map->map_name; map 337 drivers/mtd/maps/esb2rom.c map->rsrc.start = map->map.phys; map 338 drivers/mtd/maps/esb2rom.c map->rsrc.end = map->map.phys + map->mtd->size - 1; map 339 drivers/mtd/maps/esb2rom.c map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; map 340 drivers/mtd/maps/esb2rom.c if (request_resource(&window->rsrc, &map->rsrc)) { map 343 drivers/mtd/maps/esb2rom.c map->rsrc.parent = NULL; map 348 drivers/mtd/maps/esb2rom.c map->map.virt = window->virt; map 349 drivers/mtd/maps/esb2rom.c map->map.phys = window->phys; map 350 drivers/mtd/maps/esb2rom.c cfi = map->map.fldrv_priv; map 355 drivers/mtd/maps/esb2rom.c map->mtd->owner = THIS_MODULE; map 356 drivers/mtd/maps/esb2rom.c if (mtd_device_register(map->mtd, NULL, 0)) { map 357 drivers/mtd/maps/esb2rom.c map_destroy(map->mtd); map 358 drivers/mtd/maps/esb2rom.c map->mtd = NULL; map 363 drivers/mtd/maps/esb2rom.c map_top += map->mtd->size; map 366 drivers/mtd/maps/esb2rom.c list_add(&map->list, &window->maps); map 367 drivers/mtd/maps/esb2rom.c map = NULL; map 372 drivers/mtd/maps/esb2rom.c kfree(map); map 47 drivers/mtd/maps/ichxrom.c struct map_info map; map 59 drivers/mtd/maps/ichxrom.c struct ichxrom_map_info *map, *scratch; map 70 drivers/mtd/maps/ichxrom.c list_for_each_entry_safe(map, scratch, &window->maps, list) { map 71 drivers/mtd/maps/ichxrom.c if (map->rsrc.parent) map 72 drivers/mtd/maps/ichxrom.c release_resource(&map->rsrc); map 73 drivers/mtd/maps/ichxrom.c mtd_device_unregister(map->mtd); map 74 drivers/mtd/maps/ichxrom.c map_destroy(map->mtd); map 75 drivers/mtd/maps/ichxrom.c list_del(&map->list); map 76 drivers/mtd/maps/ichxrom.c kfree(map); map 95 drivers/mtd/maps/ichxrom.c struct ichxrom_map_info *map = NULL; map 214 drivers/mtd/maps/ichxrom.c if (!map) { map 215 drivers/mtd/maps/ichxrom.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 217 drivers/mtd/maps/ichxrom.c if (!map) { map 221 drivers/mtd/maps/ichxrom.c memset(map, 0, sizeof(*map)); map 222 drivers/mtd/maps/ichxrom.c INIT_LIST_HEAD(&map->list); map 223 drivers/mtd/maps/ichxrom.c map->map.name = map->map_name; map 224 drivers/mtd/maps/ichxrom.c map->map.phys = map_top; map 226 drivers/mtd/maps/ichxrom.c map->map.virt = (void __iomem *) map 228 drivers/mtd/maps/ichxrom.c map->map.size = 0xffffffffUL - map_top + 1UL; map 230 drivers/mtd/maps/ichxrom.c sprintf(map->map_name, "%s @%08Lx", map 231 drivers/mtd/maps/ichxrom.c MOD_NAME, (unsigned long long)map->map.phys); map 237 drivers/mtd/maps/ichxrom.c for(map->map.bankwidth = 32; map->map.bankwidth; map 238 drivers/mtd/maps/ichxrom.c map->map.bankwidth >>= 1) map 242 drivers/mtd/maps/ichxrom.c if (!map_bankwidth_supported(map->map.bankwidth)) map 246 drivers/mtd/maps/ichxrom.c simple_map_init(&map->map); map 251 drivers/mtd/maps/ichxrom.c map->mtd = do_map_probe(*probe_type, &map->map); map 252 drivers/mtd/maps/ichxrom.c if (map->mtd) map 260 drivers/mtd/maps/ichxrom.c if (map->mtd->size > map->map.size) { map 263 drivers/mtd/maps/ichxrom.c (unsigned long long)map->mtd->size, map->map.size); map 264 drivers/mtd/maps/ichxrom.c map->mtd->size = map->map.size; map 272 drivers/mtd/maps/ichxrom.c map->rsrc.name = map->map_name; map 273 drivers/mtd/maps/ichxrom.c map->rsrc.start = map->map.phys; map 274 drivers/mtd/maps/ichxrom.c map->rsrc.end = map->map.phys + map->mtd->size - 1; map 275 drivers/mtd/maps/ichxrom.c map->rsrc.flags = IORESOURCE_MEM | IORESOURCE_BUSY; map 276 drivers/mtd/maps/ichxrom.c if (request_resource(&window->rsrc, &map->rsrc)) { map 279 drivers/mtd/maps/ichxrom.c map->rsrc.parent = NULL; map 284 drivers/mtd/maps/ichxrom.c map->map.virt = window->virt; map 285 drivers/mtd/maps/ichxrom.c map->map.phys = window->phys; map 286 drivers/mtd/maps/ichxrom.c cfi = map->map.fldrv_priv; map 292 drivers/mtd/maps/ichxrom.c map->mtd->owner = THIS_MODULE; map 293 drivers/mtd/maps/ichxrom.c if (mtd_device_register(map->mtd, NULL, 0)) { map 294 drivers/mtd/maps/ichxrom.c map_destroy(map->mtd); map 295 drivers/mtd/maps/ichxrom.c map->mtd = NULL; map 301 drivers/mtd/maps/ichxrom.c map_top += map->mtd->size; map 304 drivers/mtd/maps/ichxrom.c list_add(&map->list, &window->maps); map 305 drivers/mtd/maps/ichxrom.c map = NULL; map 310 drivers/mtd/maps/ichxrom.c kfree(map); map 44 drivers/mtd/maps/intel_vr_nor.c struct map_info map; map 89 drivers/mtd/maps/intel_vr_nor.c p->info = do_map_probe(*type, &p->map); map 108 drivers/mtd/maps/intel_vr_nor.c iounmap(p->map.virt); map 151 drivers/mtd/maps/intel_vr_nor.c p->map.name = DRV_NAME; map 152 drivers/mtd/maps/intel_vr_nor.c p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2; map 153 drivers/mtd/maps/intel_vr_nor.c p->map.phys = win_phys + CS0_START; map 154 drivers/mtd/maps/intel_vr_nor.c p->map.size = CS0_SIZE; map 155 drivers/mtd/maps/intel_vr_nor.c p->map.virt = ioremap_nocache(p->map.phys, p->map.size); map 156 drivers/mtd/maps/intel_vr_nor.c if (!p->map.virt) { map 160 drivers/mtd/maps/intel_vr_nor.c simple_map_init(&p->map); map 237 drivers/mtd/maps/intel_vr_nor.c iounmap(p->map.virt); map 89 drivers/mtd/maps/ixp4xx.c static map_word ixp4xx_read16(struct map_info *map, unsigned long ofs) map 92 drivers/mtd/maps/ixp4xx.c val.x[0] = flash_read16(map->virt + ofs); map 101 drivers/mtd/maps/ixp4xx.c static void ixp4xx_copy_from(struct map_info *map, void *to, map 105 drivers/mtd/maps/ixp4xx.c void __iomem *src = map->virt + from; map 132 drivers/mtd/maps/ixp4xx.c static void ixp4xx_probe_write16(struct map_info *map, map_word d, unsigned long adr) map 135 drivers/mtd/maps/ixp4xx.c flash_write16(d.x[0], map->virt + adr); map 141 drivers/mtd/maps/ixp4xx.c static void ixp4xx_write16(struct map_info *map, map_word d, unsigned long adr) map 143 drivers/mtd/maps/ixp4xx.c flash_write16(d.x[0], map->virt + adr); map 148 drivers/mtd/maps/ixp4xx.c struct map_info map; map 204 drivers/mtd/maps/ixp4xx.c info->map.phys = NO_XIP; map 205 drivers/mtd/maps/ixp4xx.c info->map.size = resource_size(dev->resource); map 212 drivers/mtd/maps/ixp4xx.c info->map.bankwidth = 2; map 213 drivers/mtd/maps/ixp4xx.c info->map.name = dev_name(&dev->dev); map 214 drivers/mtd/maps/ixp4xx.c info->map.read = ixp4xx_read16; map 215 drivers/mtd/maps/ixp4xx.c info->map.write = ixp4xx_probe_write16; map 216 drivers/mtd/maps/ixp4xx.c info->map.copy_from = ixp4xx_copy_from; map 218 drivers/mtd/maps/ixp4xx.c info->map.virt = devm_ioremap_resource(&dev->dev, dev->resource); map 219 drivers/mtd/maps/ixp4xx.c if (IS_ERR(info->map.virt)) { map 220 drivers/mtd/maps/ixp4xx.c err = PTR_ERR(info->map.virt); map 224 drivers/mtd/maps/ixp4xx.c info->mtd = do_map_probe(plat->map_name, &info->map); map 233 drivers/mtd/maps/ixp4xx.c info->map.write = ixp4xx_write16; map 33 drivers/mtd/maps/l440gx.c static void l440gx_set_vpp(struct map_info *map, int vpp) map 42 drivers/mtd/maps/lantiq-flash.c struct map_info *map; map 48 drivers/mtd/maps/lantiq-flash.c ltq_read16(struct map_info *map, unsigned long adr) map 53 drivers/mtd/maps/lantiq-flash.c if (map->map_priv_1 == LTQ_NOR_PROBING) map 56 drivers/mtd/maps/lantiq-flash.c temp.x[0] = *(u16 *)(map->virt + adr); map 62 drivers/mtd/maps/lantiq-flash.c ltq_write16(struct map_info *map, map_word d, unsigned long adr) map 66 drivers/mtd/maps/lantiq-flash.c if (map->map_priv_1 == LTQ_NOR_PROBING) map 69 drivers/mtd/maps/lantiq-flash.c *(u16 *)(map->virt + adr) = d.x[0]; map 81 drivers/mtd/maps/lantiq-flash.c ltq_copy_from(struct map_info *map, void *to, map 84 drivers/mtd/maps/lantiq-flash.c unsigned char *f = (unsigned char *)map->virt + from; map 95 drivers/mtd/maps/lantiq-flash.c ltq_copy_to(struct map_info *map, unsigned long to, map 99 drivers/mtd/maps/lantiq-flash.c unsigned char *t = (unsigned char *)map->virt + to; map 127 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map = devm_kzalloc(&pdev->dev, sizeof(struct map_info), map 129 drivers/mtd/maps/lantiq-flash.c if (!ltq_mtd->map) map 132 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->phys = ltq_mtd->res->start; map 133 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->size = resource_size(ltq_mtd->res); map 134 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->virt = devm_ioremap_resource(&pdev->dev, ltq_mtd->res); map 135 drivers/mtd/maps/lantiq-flash.c if (IS_ERR(ltq_mtd->map->virt)) map 136 drivers/mtd/maps/lantiq-flash.c return PTR_ERR(ltq_mtd->map->virt); map 138 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->name = ltq_map_name; map 139 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->bankwidth = 2; map 140 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->read = ltq_read16; map 141 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->write = ltq_write16; map 142 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->copy_from = ltq_copy_from; map 143 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->copy_to = ltq_copy_to; map 145 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING; map 146 drivers/mtd/maps/lantiq-flash.c ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map); map 147 drivers/mtd/maps/lantiq-flash.c ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL; map 157 drivers/mtd/maps/lantiq-flash.c cfi = ltq_mtd->map->fldrv_priv; map 13 drivers/mtd/maps/map_funcs.c static map_word __xipram simple_map_read(struct map_info *map, unsigned long ofs) map 15 drivers/mtd/maps/map_funcs.c return inline_map_read(map, ofs); map 18 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_write(struct map_info *map, const map_word datum, unsigned long ofs) map 20 drivers/mtd/maps/map_funcs.c inline_map_write(map, datum, ofs); map 23 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) map 25 drivers/mtd/maps/map_funcs.c inline_map_copy_from(map, to, from, len); map 28 drivers/mtd/maps/map_funcs.c static void __xipram simple_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 30 drivers/mtd/maps/map_funcs.c inline_map_copy_to(map, to, from, len); map 33 drivers/mtd/maps/map_funcs.c void simple_map_init(struct map_info *map) map 35 drivers/mtd/maps/map_funcs.c BUG_ON(!map_bankwidth_supported(map->bankwidth)); map 37 drivers/mtd/maps/map_funcs.c map->read = simple_map_read; map 38 drivers/mtd/maps/map_funcs.c map->write = simple_map_write; map 39 drivers/mtd/maps/map_funcs.c map->copy_from = simple_map_copy_from; map 40 drivers/mtd/maps/map_funcs.c map->copy_to = simple_map_copy_to; map 23 drivers/mtd/maps/pci.c int (*init)(struct pci_dev *dev, struct map_pci_info *map); map 24 drivers/mtd/maps/pci.c void (*exit)(struct pci_dev *dev, struct map_pci_info *map); map 25 drivers/mtd/maps/pci.c unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs); map 30 drivers/mtd/maps/pci.c struct map_info map; map 32 drivers/mtd/maps/pci.c void (*exit)(struct pci_dev *dev, struct map_pci_info *map); map 33 drivers/mtd/maps/pci.c unsigned long (*translate)(struct map_pci_info *map, unsigned long ofs); map 39 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 41 drivers/mtd/maps/pci.c val.x[0]= readb(map->base + map->translate(map, ofs)); map 47 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 49 drivers/mtd/maps/pci.c val.x[0] = readl(map->base + map->translate(map, ofs)); map 55 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 56 drivers/mtd/maps/pci.c memcpy_fromio(to, map->base + map->translate(map, from), len); map 61 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 62 drivers/mtd/maps/pci.c writeb(val.x[0], map->base + map->translate(map, ofs)); map 67 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 68 drivers/mtd/maps/pci.c writel(val.x[0], map->base + map->translate(map, ofs)); map 73 drivers/mtd/maps/pci.c struct map_pci_info *map = (struct map_pci_info *)_map; map 74 drivers/mtd/maps/pci.c memcpy_toio(map->base + map->translate(map, to), from, len); map 88 drivers/mtd/maps/pci.c intel_iq80310_init(struct pci_dev *dev, struct map_pci_info *map) map 92 drivers/mtd/maps/pci.c map->map.bankwidth = 1; map 93 drivers/mtd/maps/pci.c map->map.read = mtd_pci_read8, map 94 drivers/mtd/maps/pci.c map->map.write = mtd_pci_write8, map 96 drivers/mtd/maps/pci.c map->map.size = 0x00800000; map 97 drivers/mtd/maps/pci.c map->base = ioremap_nocache(pci_resource_start(dev, 0), map 100 drivers/mtd/maps/pci.c if (!map->base) map 110 drivers/mtd/maps/pci.c map->map.map_priv_2 = win_base; map 116 drivers/mtd/maps/pci.c intel_iq80310_exit(struct pci_dev *dev, struct map_pci_info *map) map 118 drivers/mtd/maps/pci.c if (map->base) map 119 drivers/mtd/maps/pci.c iounmap(map->base); map 120 drivers/mtd/maps/pci.c pci_write_config_dword(dev, 0x44, map->map.map_priv_2); map 124 drivers/mtd/maps/pci.c intel_iq80310_translate(struct map_pci_info *map, unsigned long ofs) map 133 drivers/mtd/maps/pci.c writel(0x00000008, map->base + 0x1558); map 134 drivers/mtd/maps/pci.c writel(0x00000000, map->base + 0x1550); map 136 drivers/mtd/maps/pci.c writel(0x00000007, map->base + 0x1558); map 137 drivers/mtd/maps/pci.c writel(0x00800000, map->base + 0x1550); map 156 drivers/mtd/maps/pci.c intel_dc21285_init(struct pci_dev *dev, struct map_pci_info *map) map 187 drivers/mtd/maps/pci.c map->map.bankwidth = 4; map 188 drivers/mtd/maps/pci.c map->map.read = mtd_pci_read32, map 189 drivers/mtd/maps/pci.c map->map.write = mtd_pci_write32, map 190 drivers/mtd/maps/pci.c map->map.size = len; map 191 drivers/mtd/maps/pci.c map->base = ioremap_nocache(base, len); map 193 drivers/mtd/maps/pci.c if (!map->base) map 200 drivers/mtd/maps/pci.c intel_dc21285_exit(struct pci_dev *dev, struct map_pci_info *map) map 202 drivers/mtd/maps/pci.c if (map->base) map 203 drivers/mtd/maps/pci.c iounmap(map->base); map 212 drivers/mtd/maps/pci.c intel_dc21285_translate(struct map_pci_info *map, unsigned long ofs) map 255 drivers/mtd/maps/pci.c struct map_pci_info *map = NULL; map 267 drivers/mtd/maps/pci.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 269 drivers/mtd/maps/pci.c if (!map) map 272 drivers/mtd/maps/pci.c map->map = mtd_pci_map; map 273 drivers/mtd/maps/pci.c map->map.name = pci_name(dev); map 274 drivers/mtd/maps/pci.c map->dev = dev; map 275 drivers/mtd/maps/pci.c map->exit = info->exit; map 276 drivers/mtd/maps/pci.c map->translate = info->translate; map 278 drivers/mtd/maps/pci.c err = info->init(dev, map); map 282 drivers/mtd/maps/pci.c mtd = do_map_probe(info->map_name, &map->map); map 295 drivers/mtd/maps/pci.c if (map) { map 296 drivers/mtd/maps/pci.c map->exit(dev, map); map 297 drivers/mtd/maps/pci.c kfree(map); map 308 drivers/mtd/maps/pci.c struct map_pci_info *map = mtd->priv; map 312 drivers/mtd/maps/pci.c map->exit(dev, map); map 313 drivers/mtd/maps/pci.c kfree(map); map 83 drivers/mtd/maps/pcmciamtd.c static void __iomem *remap_window(struct map_info *map, unsigned long to) map 85 drivers/mtd/maps/pcmciamtd.c struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; map 86 drivers/mtd/maps/pcmciamtd.c struct resource *win = (struct resource *) map->map_priv_2; map 108 drivers/mtd/maps/pcmciamtd.c static map_word pcmcia_read8_remap(struct map_info *map, unsigned long ofs) map 113 drivers/mtd/maps/pcmciamtd.c addr = remap_window(map, ofs); map 123 drivers/mtd/maps/pcmciamtd.c static map_word pcmcia_read16_remap(struct map_info *map, unsigned long ofs) map 128 drivers/mtd/maps/pcmciamtd.c addr = remap_window(map, ofs); map 138 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from_remap(struct map_info *map, void *to, unsigned long from, ssize_t len) map 140 drivers/mtd/maps/pcmciamtd.c struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; map 151 drivers/mtd/maps/pcmciamtd.c addr = remap_window(map, from); map 164 drivers/mtd/maps/pcmciamtd.c static void pcmcia_write8_remap(struct map_info *map, map_word d, unsigned long adr) map 166 drivers/mtd/maps/pcmciamtd.c void __iomem *addr = remap_window(map, adr); map 176 drivers/mtd/maps/pcmciamtd.c static void pcmcia_write16_remap(struct map_info *map, map_word d, unsigned long adr) map 178 drivers/mtd/maps/pcmciamtd.c void __iomem *addr = remap_window(map, adr); map 187 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to_remap(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 189 drivers/mtd/maps/pcmciamtd.c struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; map 200 drivers/mtd/maps/pcmciamtd.c addr = remap_window(map, to); map 215 drivers/mtd/maps/pcmciamtd.c #define DEV_REMOVED(x) (!(pcmcia_dev_present(((struct pcmciamtd_dev *)map->map_priv_1)->p_dev))) map 217 drivers/mtd/maps/pcmciamtd.c static map_word pcmcia_read8(struct map_info *map, unsigned long ofs) map 219 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 222 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 232 drivers/mtd/maps/pcmciamtd.c static map_word pcmcia_read16(struct map_info *map, unsigned long ofs) map 234 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 237 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 247 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) map 249 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 251 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 259 drivers/mtd/maps/pcmciamtd.c static void pcmcia_write8(struct map_info *map, map_word d, unsigned long adr) map 261 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 263 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 272 drivers/mtd/maps/pcmciamtd.c static void pcmcia_write16(struct map_info *map, map_word d, unsigned long adr) map 274 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 276 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 285 drivers/mtd/maps/pcmciamtd.c static void pcmcia_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 287 drivers/mtd/maps/pcmciamtd.c void __iomem *win_base = (void __iomem *)map->map_priv_2; map 289 drivers/mtd/maps/pcmciamtd.c if(DEV_REMOVED(map)) map 299 drivers/mtd/maps/pcmciamtd.c static void pcmciamtd_set_vpp(struct map_info *map, int on) map 301 drivers/mtd/maps/pcmciamtd.c struct pcmciamtd_dev *dev = (struct pcmciamtd_dev *)map->map_priv_1; map 93 drivers/mtd/maps/physmap-core.c static void physmap_set_vpp(struct map_info *map, int state) map 100 drivers/mtd/maps/physmap-core.c pdev = (struct platform_device *)map->map_priv_1; map 141 drivers/mtd/maps/physmap-core.c static map_word physmap_addr_gpios_read(struct map_info *map, map 149 drivers/mtd/maps/physmap-core.c pdev = (struct platform_device *)map->map_priv_1; map 153 drivers/mtd/maps/physmap-core.c word = readw(map->virt + (ofs & win_mask(info->win_order))); map 158 drivers/mtd/maps/physmap-core.c static void physmap_addr_gpios_copy_from(struct map_info *map, void *buf, map 164 drivers/mtd/maps/physmap-core.c pdev = (struct platform_device *)map->map_priv_1; map 173 drivers/mtd/maps/physmap-core.c memcpy_fromio(buf, map->virt + winofs, chunklen); map 180 drivers/mtd/maps/physmap-core.c static void physmap_addr_gpios_write(struct map_info *map, map_word mw, map 187 drivers/mtd/maps/physmap-core.c pdev = (struct platform_device *)map->map_priv_1; map 192 drivers/mtd/maps/physmap-core.c writew(word, map->virt + (ofs & win_mask(info->win_order))); map 195 drivers/mtd/maps/physmap-core.c static void physmap_addr_gpios_copy_to(struct map_info *map, unsigned long ofs, map 201 drivers/mtd/maps/physmap-core.c pdev = (struct platform_device *)map->map_priv_1; map 210 drivers/mtd/maps/physmap-core.c memcpy_toio(map->virt + winofs, buf, chunklen); map 217 drivers/mtd/maps/physmap-core.c static int physmap_addr_gpios_map_init(struct map_info *map) map 219 drivers/mtd/maps/physmap-core.c map->phys = NO_XIP; map 220 drivers/mtd/maps/physmap-core.c map->read = physmap_addr_gpios_read; map 221 drivers/mtd/maps/physmap-core.c map->copy_from = physmap_addr_gpios_copy_from; map 222 drivers/mtd/maps/physmap-core.c map->write = physmap_addr_gpios_write; map 223 drivers/mtd/maps/physmap-core.c map->copy_to = physmap_addr_gpios_copy_to; map 228 drivers/mtd/maps/physmap-core.c static int physmap_addr_gpios_map_init(struct map_info *map) map 86 drivers/mtd/maps/physmap-gemini.c static map_word __xipram gemini_flash_map_read(struct map_info *map, map 92 drivers/mtd/maps/physmap-gemini.c ret = inline_map_read(map, ofs); map 98 drivers/mtd/maps/physmap-gemini.c static void __xipram gemini_flash_map_write(struct map_info *map, map 103 drivers/mtd/maps/physmap-gemini.c inline_map_write(map, datum, ofs); map 107 drivers/mtd/maps/physmap-gemini.c static void __xipram gemini_flash_map_copy_from(struct map_info *map, map 112 drivers/mtd/maps/physmap-gemini.c inline_map_copy_from(map, to, from, len); map 116 drivers/mtd/maps/physmap-gemini.c static void __xipram gemini_flash_map_copy_to(struct map_info *map, map 121 drivers/mtd/maps/physmap-gemini.c inline_map_copy_to(map, to, from, len); map 127 drivers/mtd/maps/physmap-gemini.c struct map_info *map) map 168 drivers/mtd/maps/physmap-gemini.c if (map->bankwidth != 2) map 170 drivers/mtd/maps/physmap-gemini.c map->bankwidth * 8); map 172 drivers/mtd/maps/physmap-gemini.c if (map->bankwidth != 1) map 174 drivers/mtd/maps/physmap-gemini.c map->bankwidth * 8); map 197 drivers/mtd/maps/physmap-gemini.c map->read = gemini_flash_map_read; map 198 drivers/mtd/maps/physmap-gemini.c map->write = gemini_flash_map_write; map 199 drivers/mtd/maps/physmap-gemini.c map->copy_from = gemini_flash_map_copy_from; map 200 drivers/mtd/maps/physmap-gemini.c map->copy_to = gemini_flash_map_copy_to; map 8 drivers/mtd/maps/physmap-gemini.h struct map_info *map); map 13 drivers/mtd/maps/physmap-gemini.h struct map_info *map) map 121 drivers/mtd/maps/physmap-versatile.c static void ap_flash_set_vpp(struct map_info *map, int on) map 149 drivers/mtd/maps/physmap-versatile.c static void cp_flash_set_vpp(struct map_info *map, int on) map 176 drivers/mtd/maps/physmap-versatile.c static void versatile_flash_set_vpp(struct map_info *map, int on) map 188 drivers/mtd/maps/physmap-versatile.c struct map_info *map) map 221 drivers/mtd/maps/physmap-versatile.c map->set_vpp = ap_flash_set_vpp; map 225 drivers/mtd/maps/physmap-versatile.c map->set_vpp = cp_flash_set_vpp; map 230 drivers/mtd/maps/physmap-versatile.c map->set_vpp = versatile_flash_set_vpp; map 8 drivers/mtd/maps/physmap-versatile.h struct map_info *map); map 13 drivers/mtd/maps/physmap-versatile.h struct map_info *map) map 32 drivers/mtd/maps/plat-ram.c struct map_info map; map 127 drivers/mtd/maps/plat-ram.c info->map.virt = devm_ioremap_resource(&pdev->dev, res); map 128 drivers/mtd/maps/plat-ram.c if (IS_ERR(info->map.virt)) { map 129 drivers/mtd/maps/plat-ram.c err = PTR_ERR(info->map.virt); map 139 drivers/mtd/maps/plat-ram.c info->map.phys = res->start; map 140 drivers/mtd/maps/plat-ram.c info->map.size = resource_size(res); map 141 drivers/mtd/maps/plat-ram.c info->map.name = pdata->mapname != NULL ? map 143 drivers/mtd/maps/plat-ram.c info->map.bankwidth = pdata->bankwidth; map 145 drivers/mtd/maps/plat-ram.c dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); map 147 drivers/mtd/maps/plat-ram.c simple_map_init(&info->map); map 158 drivers/mtd/maps/plat-ram.c info->mtd = do_map_probe(*map_probes , &info->map); map 162 drivers/mtd/maps/plat-ram.c info->mtd = do_map_probe("map_ram", &info->map); map 25 drivers/mtd/maps/pxa2xx-flash.c static void pxa2xx_map_inval_cache(struct map_info *map, unsigned long from, map 28 drivers/mtd/maps/pxa2xx-flash.c unsigned long start = (unsigned long)map->cached + from; map 41 drivers/mtd/maps/pxa2xx-flash.c struct map_info map; map 60 drivers/mtd/maps/pxa2xx-flash.c info->map.name = flash->name; map 61 drivers/mtd/maps/pxa2xx-flash.c info->map.bankwidth = flash->width; map 62 drivers/mtd/maps/pxa2xx-flash.c info->map.phys = res->start; map 63 drivers/mtd/maps/pxa2xx-flash.c info->map.size = resource_size(res); map 65 drivers/mtd/maps/pxa2xx-flash.c info->map.virt = ioremap(info->map.phys, info->map.size); map 66 drivers/mtd/maps/pxa2xx-flash.c if (!info->map.virt) { map 68 drivers/mtd/maps/pxa2xx-flash.c info->map.name); map 71 drivers/mtd/maps/pxa2xx-flash.c info->map.cached = ioremap_cache(info->map.phys, info->map.size); map 72 drivers/mtd/maps/pxa2xx-flash.c if (!info->map.cached) map 74 drivers/mtd/maps/pxa2xx-flash.c info->map.name); map 75 drivers/mtd/maps/pxa2xx-flash.c info->map.inval_cache = pxa2xx_map_inval_cache; map 76 drivers/mtd/maps/pxa2xx-flash.c simple_map_init(&info->map); map 81 drivers/mtd/maps/pxa2xx-flash.c info->map.name, (unsigned long)info->map.phys, map 82 drivers/mtd/maps/pxa2xx-flash.c info->map.bankwidth * 8); map 84 drivers/mtd/maps/pxa2xx-flash.c info->mtd = do_map_probe(flash->map_name, &info->map); map 87 drivers/mtd/maps/pxa2xx-flash.c iounmap((void *)info->map.virt); map 88 drivers/mtd/maps/pxa2xx-flash.c if (info->map.cached) map 89 drivers/mtd/maps/pxa2xx-flash.c iounmap(info->map.cached); map 108 drivers/mtd/maps/pxa2xx-flash.c iounmap(info->map.virt); map 109 drivers/mtd/maps/pxa2xx-flash.c if (info->map.cached) map 110 drivers/mtd/maps/pxa2xx-flash.c iounmap(info->map.cached); map 23 drivers/mtd/maps/rbtx4939-flash.c struct map_info map; map 74 drivers/mtd/maps/rbtx4939-flash.c info->map.name = dev_name(&dev->dev); map 75 drivers/mtd/maps/rbtx4939-flash.c info->map.phys = res->start; map 76 drivers/mtd/maps/rbtx4939-flash.c info->map.size = size; map 77 drivers/mtd/maps/rbtx4939-flash.c info->map.bankwidth = pdata->width; map 79 drivers/mtd/maps/rbtx4939-flash.c info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size); map 80 drivers/mtd/maps/rbtx4939-flash.c if (!info->map.virt) map 84 drivers/mtd/maps/rbtx4939-flash.c (*pdata->map_init)(&info->map); map 86 drivers/mtd/maps/rbtx4939-flash.c simple_map_init(&info->map); map 90 drivers/mtd/maps/rbtx4939-flash.c info->mtd = do_map_probe(*probe_type, &info->map); map 29 drivers/mtd/maps/sa1100-flash.c struct map_info map; map 42 drivers/mtd/maps/sa1100-flash.c static void sa1100_set_vpp(struct map_info *map, int on) map 44 drivers/mtd/maps/sa1100-flash.c struct sa_subdev_info *subdev = container_of(map, struct sa_subdev_info, map); map 62 drivers/mtd/maps/sa1100-flash.c if (subdev->map.virt) map 63 drivers/mtd/maps/sa1100-flash.c iounmap(subdev->map.virt); map 64 drivers/mtd/maps/sa1100-flash.c release_mem_region(subdev->map.phys, subdev->map.size); map 87 drivers/mtd/maps/sa1100-flash.c subdev->map.bankwidth = (MSC0 & MSC_RBW) ? 2 : 4; map 91 drivers/mtd/maps/sa1100-flash.c subdev->map.bankwidth = ((MSC0 >> 16) & MSC_RBW) ? 2 : 4; map 101 drivers/mtd/maps/sa1100-flash.c subdev->map.set_vpp = sa1100_set_vpp; map 103 drivers/mtd/maps/sa1100-flash.c subdev->map.phys = phys; map 104 drivers/mtd/maps/sa1100-flash.c subdev->map.size = size; map 105 drivers/mtd/maps/sa1100-flash.c subdev->map.virt = ioremap(phys, size); map 106 drivers/mtd/maps/sa1100-flash.c if (!subdev->map.virt) { map 111 drivers/mtd/maps/sa1100-flash.c simple_map_init(&subdev->map); map 117 drivers/mtd/maps/sa1100-flash.c subdev->mtd = do_map_probe(subdev->plat->map_name, &subdev->map); map 125 drivers/mtd/maps/sa1100-flash.c subdev->map.bankwidth * 8); map 199 drivers/mtd/maps/sa1100-flash.c subdev->map.name = subdev->name; map 91 drivers/mtd/maps/sbc_gxx.c static inline void sbc_gxx_page(struct map_info *map, unsigned long ofs) map 102 drivers/mtd/maps/sbc_gxx.c static map_word sbc_gxx_read8(struct map_info *map, unsigned long ofs) map 106 drivers/mtd/maps/sbc_gxx.c sbc_gxx_page(map, ofs); map 112 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) map 120 drivers/mtd/maps/sbc_gxx.c sbc_gxx_page(map, from); map 129 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_write8(struct map_info *map, map_word d, unsigned long adr) map 132 drivers/mtd/maps/sbc_gxx.c sbc_gxx_page(map, adr); map 137 drivers/mtd/maps/sbc_gxx.c static void sbc_gxx_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 145 drivers/mtd/maps/sbc_gxx.c sbc_gxx_page(map, to); map 76 drivers/mtd/maps/scb2_flash.c struct map_info *map = mtd->priv; map 77 drivers/mtd/maps/scb2_flash.c struct cfi_private *cfi = map->fldrv_priv; map 89 drivers/mtd/maps/scb2_flash.c mtd->size = map->size; map 41 drivers/mtd/maps/sun_uflash.c struct map_info map; /* mtd map info */ map 72 drivers/mtd/maps/sun_uflash.c memcpy(&up->map, &uflash_map_templ, sizeof(uflash_map_templ)); map 74 drivers/mtd/maps/sun_uflash.c up->map.size = resource_size(&op->resource[0]); map 78 drivers/mtd/maps/sun_uflash.c up->map.name = up->name; map 80 drivers/mtd/maps/sun_uflash.c up->map.phys = op->resource[0].start; map 82 drivers/mtd/maps/sun_uflash.c up->map.virt = of_ioremap(&op->resource[0], 0, up->map.size, map 84 drivers/mtd/maps/sun_uflash.c if (!up->map.virt) { map 91 drivers/mtd/maps/sun_uflash.c simple_map_init(&up->map); map 94 drivers/mtd/maps/sun_uflash.c up->mtd = do_map_probe("cfi_probe", &up->map); map 96 drivers/mtd/maps/sun_uflash.c of_iounmap(&op->resource[0], up->map.virt, up->map.size); map 132 drivers/mtd/maps/sun_uflash.c if (up->map.virt) { map 133 drivers/mtd/maps/sun_uflash.c of_iounmap(&op->resource[0], up->map.virt, up->map.size); map 134 drivers/mtd/maps/sun_uflash.c up->map.virt = NULL; map 18 drivers/mtd/maps/tsunami_flash.c static inline map_word tsunami_flash_read8(struct map_info *map, unsigned long offset) map 25 drivers/mtd/maps/tsunami_flash.c static void tsunami_flash_write8(struct map_info *map, map_word value, unsigned long offset) map 31 drivers/mtd/maps/tsunami_flash.c struct map_info *map, void *addr, unsigned long offset, ssize_t len) map 44 drivers/mtd/maps/tsunami_flash.c struct map_info *map, unsigned long offset, map 57 drivers/mtd/maps/uclinux.c struct map_info *map = mtd->priv; map 58 drivers/mtd/maps/uclinux.c *virt = map->virt + from; map 60 drivers/mtd/maps/uclinux.c *phys = map->phys + from; map 1136 drivers/mtd/mtdchar.c struct map_info *map = mtd->priv; map 1144 drivers/mtd/mtdchar.c if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) map 1147 drivers/mtd/mtdchar.c return vm_iomap_memory(vma, map->phys, map->size); map 325 drivers/mtd/rfd_ftl.c u16 *map; map 335 drivers/mtd/rfd_ftl.c map = kmalloc(part->header_size, GFP_KERNEL); map 336 drivers/mtd/rfd_ftl.c if (!map) map 340 drivers/mtd/rfd_ftl.c part->header_size, &retlen, (u_char *)map); map 354 drivers/mtd/rfd_ftl.c u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); map 401 drivers/mtd/rfd_ftl.c kfree(map); map 607 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = &nor->params.erase_map; map 612 drivers/mtd/spi-nor/spi-nor.c erase = &map->erase_type[i]; map 994 drivers/mtd/spi-nor/spi-nor.c spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map, map 1012 drivers/mtd/spi-nor/spi-nor.c erase = &map->erase_type[i]; map 1057 drivers/mtd/spi-nor/spi-nor.c spi_nor_find_erase_region(const struct spi_nor_erase_map *map, u64 addr) map 1059 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_region *region = map->regions; map 1136 drivers/mtd/spi-nor/spi-nor.c const struct spi_nor_erase_map *map = &nor->params.erase_map; map 1143 drivers/mtd/spi-nor/spi-nor.c region = spi_nor_find_erase_region(map, addr); map 1150 drivers/mtd/spi-nor/spi-nor.c erase = spi_nor_find_best_erase_type(map, region, addr, len); map 3246 drivers/mtd/spi-nor/spi-nor.c static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) map 3248 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_type *erase_type = map->erase_type; map 3275 drivers/mtd/spi-nor/spi-nor.c static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) map 3277 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_region *region = map->regions; map 3283 drivers/mtd/spi-nor/spi-nor.c sorted_erase_mask = spi_nor_sort_erase_mask(map, map 3301 drivers/mtd/spi-nor/spi-nor.c static void spi_nor_init_uniform_erase_map(struct spi_nor_erase_map *map, map 3305 drivers/mtd/spi-nor/spi-nor.c map->uniform_region.offset = (erase_mask & SNOR_ERASE_TYPE_MASK) | map 3307 drivers/mtd/spi-nor/spi-nor.c map->uniform_region.size = flash_size; map 3308 drivers/mtd/spi-nor/spi-nor.c map->regions = &map->uniform_region; map 3309 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type = erase_mask; map 3359 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = ¶ms->erase_map; map 3360 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_type *erase_type = map->erase_type; map 3459 drivers/mtd/spi-nor/spi-nor.c spi_nor_init_uniform_erase_map(map, erase_mask, params->size); map 3471 drivers/mtd/spi-nor/spi-nor.c spi_nor_regions_sort_erase_types(map); map 3472 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type = map->uniform_region.offset & map 3726 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = ¶ms->erase_map; map 3727 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_type *erase = map->erase_type; map 3744 drivers/mtd/spi-nor/spi-nor.c map->regions = region; map 3774 drivers/mtd/spi-nor/spi-nor.c save_uniform_erase_type = map->uniform_erase_type; map 3775 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type = spi_nor_sort_erase_mask(map, map 3783 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type = save_uniform_erase_type; map 3905 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = ¶ms->erase_map; map 3906 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_type *erase_type = map->erase_type; map 3980 drivers/mtd/spi-nor/spi-nor.c erase_mask = spi_nor_sort_erase_mask(map, erase_mask); map 4227 drivers/mtd/spi-nor/spi-nor.c spi_nor_select_uniform_erase(struct spi_nor_erase_map *map, map 4232 drivers/mtd/spi-nor/spi-nor.c u8 uniform_erase_type = map->uniform_erase_type; map 4238 drivers/mtd/spi-nor/spi-nor.c tested_erase = &map->erase_type[i]; map 4262 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type &= ~SNOR_ERASE_TYPE_MASK; map 4263 drivers/mtd/spi-nor/spi-nor.c map->uniform_erase_type |= BIT(erase - map->erase_type); map 4269 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = &nor->params.erase_map; map 4289 drivers/mtd/spi-nor/spi-nor.c erase = spi_nor_select_uniform_erase(map, wanted_size); map 4302 drivers/mtd/spi-nor/spi-nor.c if (map->erase_type[i].size) { map 4303 drivers/mtd/spi-nor/spi-nor.c erase = &map->erase_type[i]; map 4463 drivers/mtd/spi-nor/spi-nor.c struct spi_nor_erase_map *map = ¶ms->erase_map; map 4531 drivers/mtd/spi-nor/spi-nor.c spi_nor_set_erase_type(&map->erase_type[i], 4096u, map 4536 drivers/mtd/spi-nor/spi-nor.c spi_nor_set_erase_type(&map->erase_type[i], 4096u, map 4541 drivers/mtd/spi-nor/spi-nor.c spi_nor_set_erase_type(&map->erase_type[i], info->sector_size, map 4543 drivers/mtd/spi-nor/spi-nor.c spi_nor_init_uniform_erase_map(map, erase_mask, params->size); map 242 drivers/net/dsa/mv88e6xxx/chip.c .map = mv88e6xxx_g1_irq_domain_map, map 144 drivers/net/dsa/mv88e6xxx/global2.c u16 map) map 147 drivers/net/dsa/mv88e6xxx/global2.c u16 val = (id << 11) | (map & port_mask); map 1061 drivers/net/dsa/mv88e6xxx/global2.c .map = mv88e6xxx_g2_irq_domain_map, map 999 drivers/net/dsa/mv88e6xxx/port.c int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map) map 1010 drivers/net/dsa/mv88e6xxx/port.c reg |= map & mask; map 1016 drivers/net/dsa/mv88e6xxx/port.c dev_dbg(chip->dev, "p%d: VLANTable set to %.3x\n", port, map); map 317 drivers/net/dsa/mv88e6xxx/port.h int mv88e6xxx_port_set_vlan_map(struct mv88e6xxx_chip *chip, int port, u16 map); map 400 drivers/net/dsa/realtek-smi-core.c smi->map = devm_regmap_init(dev, NULL, smi, map 402 drivers/net/dsa/realtek-smi-core.c if (IS_ERR(smi->map)) { map 403 drivers/net/dsa/realtek-smi-core.c ret = PTR_ERR(smi->map); map 48 drivers/net/dsa/realtek-smi-core.h struct regmap *map; map 366 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, addr, 0); /* Write whatever */ map 371 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_MIB_CTRL_REG, &val); map 384 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, addr + (i - 1), &val); map 412 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, map 423 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_MASK_REG, map 437 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, map 487 drivers/net/dsa/rtl8366rb.c .map = rtl8366rb_irq_map, map 515 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_INTERRUPT_STATUS_REG, map 536 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_INTERRUPT_CONTROL_REG, map 579 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_SMAR0, val); map 583 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_SMAR1, val); map 587 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_SMAR2, val); map 722 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id); map 736 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_CHIP_VERSION_CTRL_REG, map 783 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, map 789 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 799 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 810 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_BUSY_REG, map 815 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 820 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 828 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 835 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, 0x0c, 0x240); map 838 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, 0x0d, 0x240); map 854 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8368RB_CPU_CTRL_REG, map 861 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PECR, map 868 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_SGCR, map 875 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0); map 880 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0); map 891 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PMC0, map 900 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG, map 906 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_SSCR2, map 912 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_LED_BLINKRATE_REG, map 925 drivers/net/dsa/rtl8366rb.c regmap_update_bits(smi->map, map 928 drivers/net/dsa/rtl8366rb.c regmap_update_bits(smi->map, map 931 drivers/net/dsa/rtl8366rb.c regmap_update_bits(smi->map, map 941 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 998 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG, map 1003 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2, map 1010 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), map 1027 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 1032 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 1038 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 1043 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 1049 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, map 1070 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), map 1086 drivers/net/dsa/rtl8366rb.c ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port), map 1107 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_VLAN_TABLE_WRITE_BASE, map 1113 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, map 1119 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, map 1155 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 1163 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_TABLE_ACCESS_CTRL_REG, map 1182 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, map 1224 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, map 1242 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), map 1258 drivers/net/dsa/rtl8366rb.c return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port), map 1281 drivers/net/dsa/rtl8366rb.c return regmap_update_bits(smi->map, map 1289 drivers/net/dsa/rtl8366rb.c return regmap_update_bits(smi->map, RTL8366RB_SGCR, map 1303 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, map 1310 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, reg, 0); map 1318 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_PHY_ACCESS_DATA_REG, &val); map 1337 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, RTL8366RB_PHY_ACCESS_CTRL_REG, map 1347 drivers/net/dsa/rtl8366rb.c ret = regmap_write(smi->map, reg, val); map 1364 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, RTL8366RB_RESET_CTRL_REG, &val); map 1387 drivers/net/dsa/rtl8366rb.c ret = regmap_read(smi->map, 0x5c, &val); map 167 drivers/net/ethernet/3com/3c589_cs.c static int el3_config(struct net_device *dev, struct ifmap *map); map 494 drivers/net/ethernet/3com/3c589_cs.c static int el3_config(struct net_device *dev, struct ifmap *map) map 496 drivers/net/ethernet/3com/3c589_cs.c if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { map 497 drivers/net/ethernet/3com/3c589_cs.c if (map->port <= 3) { map 498 drivers/net/ethernet/3com/3c589_cs.c dev->if_port = map->port; map 251 drivers/net/ethernet/8390/etherh.c static int etherh_set_config(struct net_device *dev, struct ifmap *map) map 253 drivers/net/ethernet/8390/etherh.c switch (map->port) { map 261 drivers/net/ethernet/8390/etherh.c dev->if_port = map->port; map 103 drivers/net/ethernet/8390/pcnet_cs.c static int set_config(struct net_device *dev, struct ifmap *map); map 980 drivers/net/ethernet/8390/pcnet_cs.c static int set_config(struct net_device *dev, struct ifmap *map) map 983 drivers/net/ethernet/8390/pcnet_cs.c if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { map 986 drivers/net/ethernet/8390/pcnet_cs.c else if ((map->port < 1) || (map->port > 2)) map 988 drivers/net/ethernet/8390/pcnet_cs.c dev->if_port = map->port; map 405 drivers/net/ethernet/amd/nmclan_cs.c static int mace_config(struct net_device *dev, struct ifmap *map); map 760 drivers/net/ethernet/amd/nmclan_cs.c static int mace_config(struct net_device *dev, struct ifmap *map) map 762 drivers/net/ethernet/amd/nmclan_cs.c if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { map 763 drivers/net/ethernet/amd/nmclan_cs.c if (map->port <= 2) { map 764 drivers/net/ethernet/amd/nmclan_cs.c dev->if_port = map->port; map 661 drivers/net/ethernet/broadcom/b44.c struct ring_info *src_map, *map; map 672 drivers/net/ethernet/broadcom/b44.c map = &bp->rx_buffers[dest_idx]; map 711 drivers/net/ethernet/broadcom/b44.c map->skb = skb; map 712 drivers/net/ethernet/broadcom/b44.c map->mapping = mapping; map 792 drivers/net/ethernet/broadcom/b44.c dma_addr_t map = rp->mapping; map 796 drivers/net/ethernet/broadcom/b44.c dma_sync_single_for_cpu(bp->sdev->dma_dev, map, map 830 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, map, map 5814 drivers/net/ethernet/broadcom/bnx2.c dma_addr_t map; map 5851 drivers/net/ethernet/broadcom/bnx2.c map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, map 5853 drivers/net/ethernet/broadcom/bnx2.c if (dma_mapping_error(&bp->pdev->dev, map)) { map 5870 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_hi = (u64) map >> 32; map 5871 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff; map 5891 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); map 1180 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; map 1183 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (test_bit(idx, map->agg_idx_bmap)) map 1184 drivers/net/ethernet/broadcom/bnxt/bnxt.c idx = find_first_zero_bit(map->agg_idx_bmap, map 1186 drivers/net/ethernet/broadcom/bnxt/bnxt.c __set_bit(idx, map->agg_idx_bmap); map 1187 drivers/net/ethernet/broadcom/bnxt/bnxt.c map->agg_id_tbl[agg_id] = idx; map 1193 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; map 1195 drivers/net/ethernet/broadcom/bnxt/bnxt.c __clear_bit(idx, map->agg_idx_bmap); map 1200 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map; map 1202 drivers/net/ethernet/broadcom/bnxt/bnxt.c return map->agg_id_tbl[agg_id]; map 2553 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct bnxt_tpa_idx_map *map; map 2624 drivers/net/ethernet/broadcom/bnxt/bnxt.c map = rxr->rx_tpa_idx_map; map 2625 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (map) map 2626 drivers/net/ethernet/broadcom/bnxt/bnxt.c memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap)); map 2819 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c dma_addr_t map; map 2838 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, map 2840 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (dma_mapping_error(&bp->pdev->dev, map)) { map 2844 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c bnxt_xmit_bd(bp, txr, map, pkt_size); map 2852 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); map 1381 drivers/net/ethernet/broadcom/cnic.c dma_addr_t map; map 1383 drivers/net/ethernet/broadcom/cnic.c map = ctx->kwqe_data_mapping; map 1384 drivers/net/ethernet/broadcom/cnic.c l5_data->phy_address.lo = (u64) map & 0xffffffff; map 1385 drivers/net/ethernet/broadcom/cnic.c l5_data->phy_address.hi = (u64) map >> 32; map 4854 drivers/net/ethernet/broadcom/cnic.c dma_addr_t map = ctx->mapping; map 4859 drivers/net/ethernet/broadcom/cnic.c map = (map + mask) & ~mask; map 4862 drivers/net/ethernet/broadcom/cnic.c cnic_ctx_tbl_wr(dev, start_offset + i, map); map 6684 drivers/net/ethernet/broadcom/tg3.c struct ring_info *map; map 6693 drivers/net/ethernet/broadcom/tg3.c map = &tpr->rx_std_buffers[dest_idx]; map 6700 drivers/net/ethernet/broadcom/tg3.c map = &tpr->rx_jmb_buffers[dest_idx]; map 6735 drivers/net/ethernet/broadcom/tg3.c map->data = data; map 6736 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(map, mapping, mapping); map 7706 drivers/net/ethernet/broadcom/tg3.c dma_addr_t map, u32 len, u32 flags, map 7715 drivers/net/ethernet/broadcom/tg3.c if (tg3_4g_overflow_test(map, len)) map 7718 drivers/net/ethernet/broadcom/tg3.c if (tg3_4g_tso_overflow_test(tp, map, len, mss)) map 7721 drivers/net/ethernet/broadcom/tg3.c if (tg3_40bit_overflow_test(tp, map, len)) map 7739 drivers/net/ethernet/broadcom/tg3.c tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, map 7745 drivers/net/ethernet/broadcom/tg3.c map += frag_len; map 7750 drivers/net/ethernet/broadcom/tg3.c tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, map 7760 drivers/net/ethernet/broadcom/tg3.c tg3_tx_set_bd(&tnapi->tx_ring[*entry], map, map 13433 drivers/net/ethernet/broadcom/tg3.c dma_addr_t map; map 13518 drivers/net/ethernet/broadcom/tg3.c map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); map 13519 drivers/net/ethernet/broadcom/tg3.c if (pci_dma_mapping_error(tp->pdev, map)) { map 13526 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); map 13536 drivers/net/ethernet/broadcom/tg3.c if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len, map 13608 drivers/net/ethernet/broadcom/tg3.c map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], map 13612 drivers/net/ethernet/broadcom/tg3.c map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx], map 13617 drivers/net/ethernet/broadcom/tg3.c pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, map 54 drivers/net/ethernet/cavium/thunder/nic_main.c #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) map 55 drivers/net/ethernet/cavium/thunder/nic_main.c #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) map 180 drivers/net/ethernet/cavium/thunder/thunder_bgx.c unsigned map = 0; map 184 drivers/net/ethernet/cavium/thunder/thunder_bgx.c map |= (1 << i); map 187 drivers/net/ethernet/cavium/thunder/thunder_bgx.c return map; map 2771 drivers/net/ethernet/chelsio/cxgb3/sge.c u32 map; map 2776 drivers/net/ethernet/chelsio/cxgb3/sge.c map = t3_read_reg(adap, A_SG_DATA_INTR); map 2778 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(!map)) /* shared interrupt, most likely */ map 2783 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(map & F_ERRINTR)) map 2786 drivers/net/ethernet/chelsio/cxgb3/sge.c if (likely(map & 1)) map 2789 drivers/net/ethernet/chelsio/cxgb3/sge.c if (map & 2) map 2805 drivers/net/ethernet/chelsio/cxgb3/sge.c u32 map; map 2811 drivers/net/ethernet/chelsio/cxgb3/sge.c map = t3_read_reg(adap, A_SG_DATA_INTR); map 2813 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(!map)) /* shared interrupt, most likely */ map 2818 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(map & F_ERRINTR)) map 2821 drivers/net/ethernet/chelsio/cxgb3/sge.c if (likely(map & 1)) map 2824 drivers/net/ethernet/chelsio/cxgb3/sge.c if (map & 2) map 50 drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h u32 map; map 1245 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A); map 1755 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, map 2265 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c #define G_PFnLKPIDX(map, n) \ map 2266 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c (((map) >> PF1LKPIDX_S*(n)) & PF0LKPIDX_M) map 5276 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c int t4_read_rss(struct adapter *adapter, u16 *map) map 5286 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *map++ = LKPTBLQUEUE0_G(val); map 5287 drivers/net/ethernet/chelsio/cxgb4/t4_hw.c *map++ = LKPTBLQUEUE1_G(val); map 387 drivers/net/ethernet/faraday/ftgmac100.c dma_addr_t map; map 395 drivers/net/ethernet/faraday/ftgmac100.c map = priv->rx_scratch_dma; map 397 drivers/net/ethernet/faraday/ftgmac100.c map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE, map 399 drivers/net/ethernet/faraday/ftgmac100.c if (unlikely(dma_mapping_error(priv->dev, map))) { map 403 drivers/net/ethernet/faraday/ftgmac100.c map = priv->rx_scratch_dma; map 413 drivers/net/ethernet/faraday/ftgmac100.c rxdes->rxdes3 = cpu_to_le32(map); map 456 drivers/net/ethernet/faraday/ftgmac100.c dma_addr_t map; map 539 drivers/net/ethernet/faraday/ftgmac100.c map = le32_to_cpu(rxdes->rxdes3); map 546 drivers/net/ethernet/faraday/ftgmac100.c dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE); map 548 drivers/net/ethernet/faraday/ftgmac100.c dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); map 616 drivers/net/ethernet/faraday/ftgmac100.c dma_addr_t map = le32_to_cpu(txdes->txdes3); map 621 drivers/net/ethernet/faraday/ftgmac100.c dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE); map 624 drivers/net/ethernet/faraday/ftgmac100.c dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE); map 710 drivers/net/ethernet/faraday/ftgmac100.c dma_addr_t map; map 746 drivers/net/ethernet/faraday/ftgmac100.c map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); map 747 drivers/net/ethernet/faraday/ftgmac100.c if (dma_mapping_error(priv->dev, map)) { map 767 drivers/net/ethernet/faraday/ftgmac100.c txdes->txdes3 = cpu_to_le32(map); map 780 drivers/net/ethernet/faraday/ftgmac100.c map = skb_frag_dma_map(priv->dev, frag, 0, len, map 782 drivers/net/ethernet/faraday/ftgmac100.c if (dma_mapping_error(priv->dev, map)) map 795 drivers/net/ethernet/faraday/ftgmac100.c txdes->txdes3 = cpu_to_le32(map); map 865 drivers/net/ethernet/faraday/ftgmac100.c dma_addr_t map = le32_to_cpu(rxdes->rxdes3); map 871 drivers/net/ethernet/faraday/ftgmac100.c dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); map 391 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map; map 424 drivers/net/ethernet/faraday/ftmac100.c map = ftmac100_rxdes_get_dma_addr(rxdes); map 425 drivers/net/ethernet/faraday/ftmac100.c dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); map 578 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map; map 589 drivers/net/ethernet/faraday/ftmac100.c map = ftmac100_txdes_get_dma_addr(txdes); map 603 drivers/net/ethernet/faraday/ftmac100.c dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); map 625 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map) map 636 drivers/net/ethernet/faraday/ftmac100.c ftmac100_txdes_set_dma_addr(txdes, map); map 664 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map; map 673 drivers/net/ethernet/faraday/ftmac100.c map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); map 674 drivers/net/ethernet/faraday/ftmac100.c if (unlikely(dma_mapping_error(priv->dev, map))) { map 682 drivers/net/ethernet/faraday/ftmac100.c ftmac100_rxdes_set_dma_addr(rxdes, map); map 695 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); map 700 drivers/net/ethernet/faraday/ftmac100.c dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); map 707 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); map 712 drivers/net/ethernet/faraday/ftmac100.c dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); map 1008 drivers/net/ethernet/faraday/ftmac100.c dma_addr_t map; map 1019 drivers/net/ethernet/faraday/ftmac100.c map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); map 1020 drivers/net/ethernet/faraday/ftmac100.c if (unlikely(dma_mapping_error(priv->dev, map))) { map 1030 drivers/net/ethernet/faraday/ftmac100.c return ftmac100_xmit(priv, skb, map); map 65 drivers/net/ethernet/freescale/fsl_pq_mdio.c void __iomem *map; map 447 drivers/net/ethernet/freescale/fsl_pq_mdio.c priv->map = of_iomap(np, 0); map 448 drivers/net/ethernet/freescale/fsl_pq_mdio.c if (!priv->map) { map 464 drivers/net/ethernet/freescale/fsl_pq_mdio.c priv->regs = priv->map + data->mii_offset; map 488 drivers/net/ethernet/freescale/fsl_pq_mdio.c data->get_tbipa, priv->map, &res); map 505 drivers/net/ethernet/freescale/fsl_pq_mdio.c if (priv->map) map 506 drivers/net/ethernet/freescale/fsl_pq_mdio.c iounmap(priv->map); map 522 drivers/net/ethernet/freescale/fsl_pq_mdio.c iounmap(priv->map); map 87 drivers/net/ethernet/fujitsu/fmvj18x_cs.c static int fjn_config(struct net_device *dev, struct ifmap *map); map 1059 drivers/net/ethernet/fujitsu/fmvj18x_cs.c static int fjn_config(struct net_device *dev, struct ifmap *map){ map 244 drivers/net/ethernet/hisilicon/hip04_eth.c struct regmap *map; map 305 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val); map 306 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp); map 321 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val); map 328 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val); map 333 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val); map 421 drivers/net/ethernet/hisilicon/hip04_eth.c regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val); map 957 drivers/net/ethernet/hisilicon/hip04_eth.c priv->map = syscon_node_to_regmap(arg.np); map 958 drivers/net/ethernet/hisilicon/hip04_eth.c if (IS_ERR(priv->map)) { map 960 drivers/net/ethernet/hisilicon/hip04_eth.c ret = PTR_ERR(priv->map); map 557 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c struct hclge_qs_to_pri_link_cmd *map; map 580 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c map = (struct hclge_qs_to_pri_link_cmd *)desc.data; map 582 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c map->qs_id = cpu_to_le16(qset_id); map 586 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c pri_id = map->priority; map 271 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c struct hclge_pg_to_pri_link_cmd *map; map 276 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map = (struct hclge_pg_to_pri_link_cmd *)desc.data; map 278 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->pg_id = pg_id; map 279 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->pri_bit_map = pri_bit_map; map 287 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c struct hclge_qs_to_pri_link_cmd *map; map 292 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map = (struct hclge_qs_to_pri_link_cmd *)desc.data; map 294 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->qs_id = cpu_to_le16(qs_id); map 295 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->priority = pri; map 296 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; map 304 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c struct hclge_nq_to_qs_link_cmd *map; map 309 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map = (struct hclge_nq_to_qs_link_cmd *)desc.data; map 311 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->nq_id = cpu_to_le16(q_id); map 312 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); map 2236 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c struct virtchnl_vector_map *map; map 2253 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c map = &irqmap_info->vecmap[i]; map 2255 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (!i40e_vc_isvalid_vector_id(vf, map->vector_id) || map 2256 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c !i40e_vc_isvalid_vsi_id(vf, map->vsi_id)) { map 2260 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c vsi_id = map->vsi_id; map 2262 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) { map 2267 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) { map 2272 drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c i40e_config_irq_link_list(vf, vsi_id, map); map 2131 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c struct virtchnl_vector_map *map; map 2161 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c map = &irqmap_info->vecmap[i]; map 2163 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c vector_id = map->vector_id; map 2164 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c vsi_id = map->vsi_id; map 2168 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c (!vector_id && (map->rxq_map || map->txq_map))) { map 2187 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c qmap = map->rxq_map; map 2195 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c q_vector->rx.itr_idx = map->rxitr_idx; map 2201 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c qmap = map->txq_map; map 2209 drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c q_vector->tx.itr_idx = map->txitr_idx; map 235 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) map 240 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); map 367 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) map 373 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c map[i] = IXGBE_RTRUP2TC_UP_MASK & map 377 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) map 385 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c ixgbe_dcb_read_rtrup2tc_82599(hw, map); map 137 drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); map 339 drivers/net/ethernet/marvell/octeontx2/af/rvu.h static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) map 341 drivers/net/ethernet/marvell/octeontx2/af/rvu.h *cgx_id = (map >> 4) & 0xF; map 342 drivers/net/ethernet/marvell/octeontx2/af/rvu.h *lmac_id = (map & 0xF); map 53 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c u32 map; map 57 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c map = pkind->pfchan_map[i]; map 58 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c if (((map >> 16) & 0x3F) == pf) map 1342 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, map 1351 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = find_next_zero_bit(map, size, start); map 1356 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c next = find_next_bit(map, end, index); map 1373 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) map 1382 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index = find_next_zero_bit(map, end, start); map 1386 drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c next = find_next_bit(map, end, index); map 45 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c struct hw_reg_map *map; map 54 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c map = &txsch_reg_map[regblk]; map 60 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c if (map->regblk != regblk) map 63 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c reg &= map->mask; map 65 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c for (idx = 0; idx < map->num_ranges; idx++) { map 66 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c if (reg >= map->range[idx].start && map 67 drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c reg < map->range[idx].end) map 939 drivers/net/ethernet/marvell/skge.c dma_addr_t map; map 941 drivers/net/ethernet/marvell/skge.c map = pci_map_single(skge->hw->pdev, skb->data, bufsize, map 944 drivers/net/ethernet/marvell/skge.c if (pci_dma_mapping_error(skge->hw->pdev, map)) map 947 drivers/net/ethernet/marvell/skge.c rd->dma_lo = lower_32_bits(map); map 948 drivers/net/ethernet/marvell/skge.c rd->dma_hi = upper_32_bits(map); map 958 drivers/net/ethernet/marvell/skge.c dma_unmap_addr_set(e, mapaddr, map); map 2738 drivers/net/ethernet/marvell/skge.c dma_addr_t map; map 2751 drivers/net/ethernet/marvell/skge.c map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); map 2752 drivers/net/ethernet/marvell/skge.c if (pci_dma_mapping_error(hw->pdev, map)) map 2755 drivers/net/ethernet/marvell/skge.c dma_unmap_addr_set(e, mapaddr, map); map 2758 drivers/net/ethernet/marvell/skge.c td->dma_lo = lower_32_bits(map); map 2759 drivers/net/ethernet/marvell/skge.c td->dma_hi = upper_32_bits(map); map 2788 drivers/net/ethernet/marvell/skge.c map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, map 2790 drivers/net/ethernet/marvell/skge.c if (dma_mapping_error(&hw->pdev->dev, map)) map 2798 drivers/net/ethernet/marvell/skge.c tf->dma_lo = lower_32_bits(map); map 2799 drivers/net/ethernet/marvell/skge.c tf->dma_hi = upper_32_bits(map); map 2800 drivers/net/ethernet/marvell/skge.c dma_unmap_addr_set(e, mapaddr, map); map 1177 drivers/net/ethernet/marvell/sky2.c dma_addr_t map, unsigned len) map 1183 drivers/net/ethernet/marvell/sky2.c le->addr = cpu_to_le32(upper_32_bits(map)); map 1188 drivers/net/ethernet/marvell/sky2.c le->addr = cpu_to_le32(lower_32_bits(map)); map 591 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->direct.map = t; map 631 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].map = t; map 648 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->direct.buf, buf->direct.map); map 657 drivers/net/ethernet/mellanox/mlx4/alloc.c buf->page_list[i].map); map 229 drivers/net/ethernet/mellanox/mlx4/catas.c i, swab32(readl(priv->catas_err.map + i))); map 244 drivers/net/ethernet/mellanox/mlx4/catas.c } else if (readl(priv->catas_err.map)) { map 279 drivers/net/ethernet/mellanox/mlx4/catas.c priv->catas_err.map = NULL; map 286 drivers/net/ethernet/mellanox/mlx4/catas.c priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); map 287 drivers/net/ethernet/mellanox/mlx4/catas.c if (!priv->catas_err.map) { map 305 drivers/net/ethernet/mellanox/mlx4/catas.c if (priv->catas_err.map) { map 306 drivers/net/ethernet/mellanox/mlx4/catas.c iounmap(priv->catas_err.map); map 307 drivers/net/ethernet/mellanox/mlx4/catas.c priv->catas_err.map = NULL; map 103 drivers/net/ethernet/mellanox/mlx4/en_tx.c (unsigned long long) ring->sp_wqres.buf.direct.map); map 124 drivers/net/ethernet/mellanox/mlx4/en_tx.c ring->bf.uar->map = mdev->uar_map; map 731 drivers/net/ethernet/mellanox/mlx4/en_tx.c ring->bf.uar->map + MLX4_SEND_DOORBELL); map 1015 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map = t; map 1076 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map); map 1110 drivers/net/ethernet/mellanox/mlx4/eq.c eq->page_list[i].map); map 736 drivers/net/ethernet/mellanox/mlx4/mlx4.h u32 __iomem *map; map 803 drivers/net/ethernet/mellanox/mlx4/mr.c page_list[i] = buf->direct.map + (i << buf->page_shift); map 805 drivers/net/ethernet/mellanox/mlx4/mr.c page_list[i] = buf->page_list[i].map; map 161 drivers/net/ethernet/mellanox/mlx4/pd.c uar->map = NULL; map 202 drivers/net/ethernet/mellanox/mlx4/pd.c uar->map = ioremap(uar->pfn << PAGE_SHIFT, PAGE_SIZE); map 203 drivers/net/ethernet/mellanox/mlx4/pd.c if (!uar->map) { map 232 drivers/net/ethernet/mellanox/mlx4/pd.c iounmap(uar->map); map 262 drivers/net/ethernet/mellanox/mlx4/pd.c iounmap(bf->uar->map); map 92 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags->map = t; map 115 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags->map); map 139 drivers/net/ethernet/mellanox/mlx5/core/alloc.c &frag->map, node); map 142 drivers/net/ethernet/mellanox/mlx5/core/alloc.c if (frag->map & ((1 << buf->page_shift) - 1)) { map 144 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags[i].buf, buf->frags[i].map); map 146 drivers/net/ethernet/mellanox/mlx5/core/alloc.c &frag->map, buf->page_shift); map 157 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags[i].map); map 173 drivers/net/ethernet/mellanox/mlx5/core/alloc.c buf->frags[i].map); map 295 drivers/net/ethernet/mellanox/mlx5/core/alloc.c addr = buf->frags->map + (i << buf->page_shift); map 307 drivers/net/ethernet/mellanox/mlx5/core/alloc.c pas[i] = cpu_to_be64(buf->frags[i].map); map 147 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); map 1003 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; map 1074 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; map 1149 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; map 305 drivers/net/ethernet/mellanox/mlx5/core/eq.c eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; map 138 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_write64(wqe, conn->fdev->conn_res.uar->map + MLX5_BF_OFFSET); map 362 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->fdev->conn_res.uar->map, conn->cq.wq.cc); map 226 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c map: map 237 drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c goto map; map 221 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET); map 90 drivers/net/ethernet/mellanox/mlx5/core/uar.c iounmap(up->map); map 139 drivers/net/ethernet/mellanox/mlx5/core/uar.c up->map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); map 140 drivers/net/ethernet/mellanox/mlx5/core/uar.c if (!up->map) { map 145 drivers/net/ethernet/mellanox/mlx5/core/uar.c up->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); map 146 drivers/net/ethernet/mellanox/mlx5/core/uar.c if (!up->map) { map 250 drivers/net/ethernet/mellanox/mlx5/core/uar.c bfreg->map = up->map + map_offset(mdev, dbi); map 285 drivers/net/ethernet/mellanox/mlx5/core/uar.c uar_idx = (bfreg->map - up->map) >> MLX5_ADAPTER_PAGE_SHIFT; map 286 drivers/net/ethernet/mellanox/mlx5/core/uar.c bfreg_idx = (((uintptr_t)bfreg->map % MLX5_ADAPTER_PAGE_SIZE) - MLX5_BF_OFFSET) / bf_reg_size; map 290 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c struct dcb_ieee_app_dscp_map *map) map 294 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c dcb_ieee_getapp_dscp_prio_mask_map(mlxsw_sp_port->dev, map); map 295 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c for (i = 0; i < ARRAY_SIZE(map->map); ++i) { map 296 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c if (map->map[i]) map 297 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c map->map[i] = fls(map->map[i]) - 1; map 299 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c map->map[i] = default_prio; map 305 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c struct dcb_ieee_app_prio_map *map) map 310 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c dcb_ieee_getapp_prio_dscp_mask_map(mlxsw_sp_port->dev, map); map 311 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c for (i = 0; i < ARRAY_SIZE(map->map); ++i) { map 312 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c if (map->map[i]) { map 313 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c map->map[i] = fls64(map->map[i]) - 1; map 373 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c struct dcb_ieee_app_dscp_map *map) map 380 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c for (i = 0; i < ARRAY_SIZE(map->map); ++i) map 381 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c mlxsw_reg_qpdpm_dscp_pack(qpdpm_pl, i, map->map[i]); map 387 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c struct dcb_ieee_app_prio_map *map) map 394 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c for (i = 0; i < ARRAY_SIZE(map->map); ++i) map 395 drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c mlxsw_reg_qpdsm_prio_pack(qpdsm_pl, i, map->map[i]); map 442 drivers/net/ethernet/mscc/ocelot.h const u32 *const *map; map 21 drivers/net/ethernet/mscc/ocelot_io.c ocelot->map[target][reg & REG_MASK] + offset, &val); map 33 drivers/net/ethernet/mscc/ocelot_io.c ocelot->map[target][reg & REG_MASK] + offset, val); map 45 drivers/net/ethernet/mscc/ocelot_io.c ocelot->map[target][reg & REG_MASK] + offset, map 77 drivers/net/ethernet/mscc/ocelot_io.c regfield.reg = ocelot->map[target][reg & REG_MASK]; map 430 drivers/net/ethernet/mscc/ocelot_regs.c ocelot->map = ocelot_regmap; map 82 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map) map 95 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c req->key_size = cpu_to_be32(map->key_size); map 96 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c req->value_size = cpu_to_be32(map->value_size); map 97 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c req->max_entries = cpu_to_be32(map->max_entries); map 98 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c req->map_type = cpu_to_be32(map->map_type); map 202 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c struct bpf_map *map = &nfp_map->offmap->map; map 231 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c if (memcmp(cached_key, key, map->key_size)) map 236 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c map->value_size); map 243 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c map->key_size); map 298 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c struct bpf_map *map = &offmap->map; map 328 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size); map 331 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c map->value_size); map 368 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c map->key_size); map 371 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c map->value_size); map 4401 drivers/net/ethernet/netronome/nfp/bpf/jit.c struct bpf_map *map; map 4413 drivers/net/ethernet/netronome/nfp/bpf/jit.c map = (void *)(unsigned long)((u32)meta1->insn.imm | map 4415 drivers/net/ethernet/netronome/nfp/bpf/jit.c if (bpf_map_offload_neutral(map)) { map 4416 drivers/net/ethernet/netronome/nfp/bpf/jit.c id = map->id; map 4418 drivers/net/ethernet/netronome/nfp/bpf/jit.c nfp_map = map_to_offmap(map)->dev_priv; map 593 drivers/net/ethernet/netronome/nfp/bpf/main.h nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); map 32 drivers/net/ethernet/netronome/nfp/bpf/offload.c struct bpf_map *map) map 38 drivers/net/ethernet/netronome/nfp/bpf/offload.c record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id, map 49 drivers/net/ethernet/netronome/nfp/bpf/offload.c map = bpf_map_inc(map, false); map 50 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (IS_ERR(map)) map 51 drivers/net/ethernet/netronome/nfp/bpf/offload.c return PTR_ERR(map); map 59 drivers/net/ethernet/netronome/nfp/bpf/offload.c record->ptr = map; map 60 drivers/net/ethernet/netronome/nfp/bpf/offload.c record->map_id = map->id; map 75 drivers/net/ethernet/netronome/nfp/bpf/offload.c bpf_map_put(map); map 260 drivers/net/ethernet/netronome/nfp/bpf/offload.c for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) map 274 drivers/net/ethernet/netronome/nfp/bpf/offload.c for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++) map 315 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) map 337 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (offmap->map.map_flags || map 338 drivers/net/ethernet/netronome/nfp/bpf/offload.c offmap->map.numa_node != NUMA_NO_NODE) { map 343 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (!(bpf->maps.types & 1 << offmap->map.map_type)) { map 352 drivers/net/ethernet/netronome/nfp/bpf/offload.c offmap->map.max_entries) { map 354 drivers/net/ethernet/netronome/nfp/bpf/offload.c offmap->map.max_entries, map 359 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (round_up(offmap->map.key_size, 8) + map 360 drivers/net/ethernet/netronome/nfp/bpf/offload.c round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) { map 362 drivers/net/ethernet/netronome/nfp/bpf/offload.c round_up(offmap->map.key_size, 8) + map 363 drivers/net/ethernet/netronome/nfp/bpf/offload.c round_up(offmap->map.value_size, 8), map 367 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (offmap->map.key_size > bpf->maps.max_key_sz) { map 369 drivers/net/ethernet/netronome/nfp/bpf/offload.c offmap->map.key_size, bpf->maps.max_key_sz); map 372 drivers/net/ethernet/netronome/nfp/bpf/offload.c if (offmap->map.value_size > bpf->maps.max_val_sz) { map 374 drivers/net/ethernet/netronome/nfp/bpf/offload.c offmap->map.value_size, bpf->maps.max_val_sz); map 378 drivers/net/ethernet/netronome/nfp/bpf/offload.c use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * map 390 drivers/net/ethernet/netronome/nfp/bpf/offload.c res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); map 399 drivers/net/ethernet/netronome/nfp/bpf/offload.c bpf->map_elems_in_use += offmap->map.max_entries; map 414 drivers/net/ethernet/netronome/nfp/bpf/offload.c bpf->map_elems_in_use -= offmap->map.max_entries; map 103 drivers/net/ethernet/netronome/nfp/bpf/verifier.c for (i = 0; i < offmap->map.value_size; i++) { map 436 drivers/net/ethernet/netronome/nfp/bpf/verifier.c if (off + size > offmap->map.value_size) { map 1337 drivers/net/ethernet/pasemi/pasemi_mac.c const dma_addr_t *map, map 1359 drivers/net/ethernet/pasemi/pasemi_mac.c cs_dest = map[0] + skb_transport_offset(skb) + 16; map 1364 drivers/net/ethernet/pasemi/pasemi_mac.c cs_dest = map[0] + skb_transport_offset(skb) + 6; map 1379 drivers/net/ethernet/pasemi/pasemi_mac.c CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); map 1381 drivers/net/ethernet/pasemi/pasemi_mac.c CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); map 1429 drivers/net/ethernet/pasemi/pasemi_mac.c dma_addr_t map[MAX_SKB_FRAGS+1]; map 1443 drivers/net/ethernet/pasemi/pasemi_mac.c map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), map 1446 drivers/net/ethernet/pasemi/pasemi_mac.c if (pci_dma_mapping_error(mac->dma_pdev, map[0])) map 1452 drivers/net/ethernet/pasemi/pasemi_mac.c map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, map 1455 drivers/net/ethernet/pasemi/pasemi_mac.c if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) { map 1497 drivers/net/ethernet/pasemi/pasemi_mac.c pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); map 1507 drivers/net/ethernet/pasemi/pasemi_mac.c XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); map 1508 drivers/net/ethernet/pasemi/pasemi_mac.c TX_DESC_INFO(txring, fill+i).dma = map[i]; map 1533 drivers/net/ethernet/pasemi/pasemi_mac.c pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], map 1985 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c dma_addr_t map; map 1990 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c map = pci_map_single(pdev, skb->data, map 1992 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c if (pci_dma_mapping_error(pdev, map)) map 1995 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c nf->dma = map; map 2002 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), map 2004 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c if (dma_mapping_error(&pdev->dev, map)) map 2007 drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c nf->dma = map; map 181 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, vp_pq_id, rl_id, \ map 184 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c typeof(map) __map; \ map 197 drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c (map) = __map; \ map 297 drivers/net/ethernet/qlogic/qla3xxx.c dma_addr_t map; map 319 drivers/net/ethernet/qlogic/qla3xxx.c map = pci_map_single(qdev->pdev, map 324 drivers/net/ethernet/qlogic/qla3xxx.c err = pci_dma_mapping_error(qdev->pdev, map); map 337 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(LS_64BITS(map)); map 339 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(MS_64BITS(map)); map 340 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); map 1788 drivers/net/ethernet/qlogic/qla3xxx.c dma_addr_t map; map 1806 drivers/net/ethernet/qlogic/qla3xxx.c map = pci_map_single(qdev->pdev, map 1812 drivers/net/ethernet/qlogic/qla3xxx.c err = pci_dma_mapping_error(qdev->pdev, map); map 1824 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(LS_64BITS(map)); map 1826 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(MS_64BITS(map)); map 1827 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); map 1948 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[0], mapaddr), map 1949 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[0], maplen), map 1955 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[i], map 1957 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[i], maplen), map 2313 drivers/net/ethernet/qlogic/qla3xxx.c dma_addr_t map; map 2323 drivers/net/ethernet/qlogic/qla3xxx.c map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); map 2325 drivers/net/ethernet/qlogic/qla3xxx.c err = pci_dma_mapping_error(qdev->pdev, map); map 2334 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); map 2335 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); map 2337 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); map 2338 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, len); map 2361 drivers/net/ethernet/qlogic/qla3xxx.c map = pci_map_single(qdev->pdev, oal, map 2365 drivers/net/ethernet/qlogic/qla3xxx.c err = pci_dma_mapping_error(qdev->pdev, map); map 2373 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); map 2374 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); map 2377 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); map 2378 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, map 2385 drivers/net/ethernet/qlogic/qla3xxx.c map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), map 2388 drivers/net/ethernet/qlogic/qla3xxx.c err = dma_mapping_error(&qdev->pdev->dev, map); map 2396 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); map 2397 drivers/net/ethernet/qlogic/qla3xxx.c oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); map 2399 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); map 2400 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); map 2428 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[seg], mapaddr), map 2429 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[seg], maplen), map 2436 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[seg], mapaddr), map 2437 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[seg], maplen), map 2442 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[0], mapaddr), map 2443 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[0], maplen), map 2755 drivers/net/ethernet/qlogic/qla3xxx.c dma_addr_t map; map 2778 drivers/net/ethernet/qlogic/qla3xxx.c map = pci_map_single(qdev->pdev, map 2784 drivers/net/ethernet/qlogic/qla3xxx.c err = pci_dma_mapping_error(qdev->pdev, map); map 2795 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr_set(lrg_buf_cb, mapaddr, map); map 2800 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(LS_64BITS(map)); map 2802 drivers/net/ethernet/qlogic/qla3xxx.c cpu_to_le32(MS_64BITS(map)); map 3646 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[0], map 3648 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[0], maplen), map 3652 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_addr(&tx_cb->map[j], map 3654 drivers/net/ethernet/qlogic/qla3xxx.c dma_unmap_len(&tx_cb->map[j], map 1041 drivers/net/ethernet/qlogic/qla3xxx.h struct map_list map[MAX_SKB_FRAGS+1]; map 716 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c u8 i, num_app, map, cnt; map 729 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]); map 730 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c cnt = qlcnic_dcb_prio_count(map); map 1070 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c u8 i, j, k, map; map 1086 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c map = peer->tc_cfg[i].up_tc_map; map 1087 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c pg->prio_pg[j++] = map; map 586 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c dma_addr_t map; map 591 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c map = pci_map_single(pdev, skb->data, skb_headlen(skb), map 593 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c if (pci_dma_mapping_error(pdev, map)) map 596 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c nf->dma = map; map 602 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), map 604 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c if (dma_mapping_error(&pdev->dev, map)) map 607 drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c nf->dma = map; map 5412 drivers/net/ethernet/sfc/ef10.c static enum efx_ef10_default_filters map[] = { map 5427 drivers/net/ethernet/sfc/ef10.c if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { map 5432 drivers/net/ethernet/sfc/ef10.c id = &vlan->default_filters[map[encap_type]]; map 5466 drivers/net/ethernet/sfc/ef10.c static enum efx_ef10_default_filters map[] = { map 5481 drivers/net/ethernet/sfc/ef10.c if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) { map 5486 drivers/net/ethernet/sfc/ef10.c id = &vlan->default_filters[map[encap_type]]; map 239 drivers/net/ethernet/sis/sis900.c static int sis900_set_config(struct net_device *dev, struct ifmap *map); map 2246 drivers/net/ethernet/sis/sis900.c static int sis900_set_config(struct net_device *dev, struct ifmap *map) map 2253 drivers/net/ethernet/sis/sis900.c if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { map 2260 drivers/net/ethernet/sis/sis900.c switch(map->port){ map 2262 drivers/net/ethernet/sis/sis900.c dev->if_port = map->port; map 2283 drivers/net/ethernet/sis/sis900.c dev->if_port = map->port; map 2304 drivers/net/ethernet/sis/sis900.c dev->if_port = map->port; map 280 drivers/net/ethernet/smsc/smc91c92_cs.c static int s9k_config(struct net_device *dev, struct ifmap *map); map 1587 drivers/net/ethernet/smsc/smc91c92_cs.c static int s9k_config(struct net_device *dev, struct ifmap *map) map 1590 drivers/net/ethernet/smsc/smc91c92_cs.c if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { map 1593 drivers/net/ethernet/smsc/smc91c92_cs.c else if (map->port > 2) map 1595 drivers/net/ethernet/smsc/smc91c92_cs.c dev->if_port = map->port; map 296 drivers/net/ethernet/xircom/xirc2ps_cs.c static int do_config(struct net_device *dev, struct ifmap *map); map 1362 drivers/net/ethernet/xircom/xirc2ps_cs.c do_config(struct net_device *dev, struct ifmap *map) map 1367 drivers/net/ethernet/xircom/xirc2ps_cs.c if (map->port != 255 && map->port != dev->if_port) { map 1368 drivers/net/ethernet/xircom/xirc2ps_cs.c if (map->port > 4) map 1370 drivers/net/ethernet/xircom/xirc2ps_cs.c if (!map->port) { map 1375 drivers/net/ethernet/xircom/xirc2ps_cs.c dev->if_port = map->port; map 1261 drivers/net/fddi/skfp/smt.c const u_char *map ; map 1276 drivers/net/fddi/skfp/smt.c for (i = 0, map = ansi_weirdness ; i < 16 ; i++) { map 1278 drivers/net/fddi/skfp/smt.c out |= (1<<*map) ; map 1280 drivers/net/fddi/skfp/smt.c map++ ; map 42 drivers/net/netdevsim/bpf.c struct bpf_offloaded_map *map; map 328 drivers/net/netdevsim/bpf.c nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key) map 330 drivers/net/netdevsim/bpf.c return e->key && !memcmp(key, e->key, map->key_size); map 339 drivers/net/netdevsim/bpf.c if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key)) map 350 drivers/net/netdevsim/bpf.c nmap->entry[idx].key = kmalloc(offmap->map.key_size, GFP_USER); map 353 drivers/net/netdevsim/bpf.c nmap->entry[idx].value = kmalloc(offmap->map.value_size, GFP_USER); map 382 drivers/net/netdevsim/bpf.c offmap->map.key_size); map 404 drivers/net/netdevsim/bpf.c memcpy(value, nmap->entry[idx].value, offmap->map.value_size); map 444 drivers/net/netdevsim/bpf.c memcpy(nmap->entry[idx].key, key, offmap->map.key_size); map 445 drivers/net/netdevsim/bpf.c memcpy(nmap->entry[idx].value, value, offmap->map.value_size); map 457 drivers/net/netdevsim/bpf.c if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) map 487 drivers/net/netdevsim/bpf.c if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY && map 488 drivers/net/netdevsim/bpf.c offmap->map.map_type != BPF_MAP_TYPE_HASH)) map 490 drivers/net/netdevsim/bpf.c if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS) map 492 drivers/net/netdevsim/bpf.c if (offmap->map.map_flags) map 501 drivers/net/netdevsim/bpf.c nmap->map = offmap; map 504 drivers/net/netdevsim/bpf.c if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) { map 1584 drivers/net/phy/mscc.c struct mdio_device **map = phydev->mdio.bus->mdio_map; map 1598 drivers/net/phy/mscc.c phy = container_of(map[addr], struct phy_device, mdio); map 1901 drivers/net/usb/lan78xx.c .map = irq_map, map 1326 drivers/net/wan/sdla.c static int sdla_set_config(struct net_device *dev, struct ifmap *map) map 1340 drivers/net/wan/sdla.c if (valid_port[i] == map->base_addr) map 1346 drivers/net/wan/sdla.c if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ map 1350 drivers/net/wan/sdla.c base = map->base_addr; map 1426 drivers/net/wan/sdla.c switch (map->irq) { map 1485 drivers/net/wan/sdla.c if (valid_mem[i] == map->mem_start) map 1492 drivers/net/wan/sdla.c if (flp->type == SDLA_S502A && (map->mem_start & 0xF000) >> 12 == 0x0E) map 1495 drivers/net/wan/sdla.c if (flp->type != SDLA_S507 && map->mem_start >> 16 == 0x0B) map 1498 drivers/net/wan/sdla.c if (flp->type == SDLA_S507 && map->mem_start >> 16 == 0x0D) map 1502 drivers/net/wan/sdla.c byte |= (map->mem_start & 0xF000) >> (12 + (flp->type == SDLA_S508 ? 1 : 0)); map 1506 drivers/net/wan/sdla.c switch (map->mem_start >> 16) { map 1522 drivers/net/wan/sdla.c switch (map->mem_start >> 16) { map 1538 drivers/net/wan/sdla.c switch (map->mem_start >> 16) { map 1572 drivers/net/wan/sdla.c dev->irq = map->irq; map 1574 drivers/net/wan/sdla.c dev->mem_start = map->mem_start; map 1580 drivers/net/wan/sdla.c free_irq(map->irq, dev); map 1848 drivers/net/wireless/admtek/adm8211.c priv->map = pci_iomap(pdev, 1, mem_len); map 1849 drivers/net/wireless/admtek/adm8211.c if (!priv->map) map 1850 drivers/net/wireless/admtek/adm8211.c priv->map = pci_iomap(pdev, 0, io_len); map 1852 drivers/net/wireless/admtek/adm8211.c if (!priv->map) { map 1941 drivers/net/wireless/admtek/adm8211.c pci_iounmap(pdev, priv->map); map 1974 drivers/net/wireless/admtek/adm8211.c pci_iounmap(pdev, priv->map); map 11 drivers/net/wireless/admtek/adm8211.h #define ADM8211_CSR_READ(r) ioread32(&priv->map->r) map 12 drivers/net/wireless/admtek/adm8211.h #define ADM8211_CSR_WRITE(r, val) iowrite32((val), &priv->map->r) map 540 drivers/net/wireless/admtek/adm8211.h struct adm8211_csr __iomem *map; map 1645 drivers/net/wireless/ath/ath10k/htt.h u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; map 81 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; map 82 drivers/net/wireless/ath/ath10k/htt_tx.c ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; map 1025 drivers/net/wireless/ath/ath10k/wmi-tlv.c u32 map; map 1041 drivers/net/wireless/ath/ath10k/wmi-tlv.c for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1) map 1042 drivers/net/wireless/ath/ath10k/wmi-tlv.c if (map & BIT(0)) map 3593 drivers/net/wireless/ath/ath10k/wmi.c u32 map; map 3602 drivers/net/wireless/ath/ath10k/wmi.c for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { map 3603 drivers/net/wireless/ath/ath10k/wmi.c if (!(map & BIT(0))) map 3639 drivers/net/wireless/ath/ath10k/wmi.c u32 map; map 3648 drivers/net/wireless/ath/ath10k/wmi.c for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { map 3649 drivers/net/wireless/ath/ath10k/wmi.c if (!(map & BIT(0))) map 3683 drivers/net/wireless/ath/ath10k/wmi.c u32 map, tim_len; map 3692 drivers/net/wireless/ath/ath10k/wmi.c for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { map 3693 drivers/net/wireless/ath/ath10k/wmi.c if (!(map & BIT(0))) map 3743 drivers/net/wireless/ath/ath10k/wmi.c u32 map; map 3758 drivers/net/wireless/ath/ath10k/wmi.c map = __le32_to_cpu(arg.vdev_map); map 3761 drivers/net/wireless/ath/ath10k/wmi.c map); map 3763 drivers/net/wireless/ath/ath10k/wmi.c for (; map; map >>= 1, vdev_id++) { map 3764 drivers/net/wireless/ath/ath10k/wmi.c if (!(map & 0x1)) map 2355 drivers/net/wireless/ath/wil6210/debugfs.c const struct fw_map *map = &fw_mapping[i]; map 2357 drivers/net/wireless/ath/wil6210/debugfs.c if (!map->name) map 2361 drivers/net/wireless/ath/wil6210/debugfs.c blob->data = (void * __force)wil->csr + HOSTADDR(map->host); map 2362 drivers/net/wireless/ath/wil6210/debugfs.c blob->size = map->to - map->from; map 2363 drivers/net/wireless/ath/wil6210/debugfs.c snprintf(name, sizeof(name), "blob_%s", map->name); map 25 drivers/net/wireless/ath/wil6210/wil_crash_dump.c const struct fw_map *map; map 33 drivers/net/wireless/ath/wil6210/wil_crash_dump.c map = &fw_mapping[0]; map 34 drivers/net/wireless/ath/wil6210/wil_crash_dump.c host_min = map->host; map 35 drivers/net/wireless/ath/wil6210/wil_crash_dump.c host_max = map->host + (map->to - map->from); map 38 drivers/net/wireless/ath/wil6210/wil_crash_dump.c map = &fw_mapping[i]; map 40 drivers/net/wireless/ath/wil6210/wil_crash_dump.c if (!map->crash_dump) map 43 drivers/net/wireless/ath/wil6210/wil_crash_dump.c if (map->host < host_min) map 44 drivers/net/wireless/ath/wil6210/wil_crash_dump.c host_min = map->host; map 46 drivers/net/wireless/ath/wil6210/wil_crash_dump.c tmp_max = map->host + (map->to - map->from); map 61 drivers/net/wireless/ath/wil6210/wil_crash_dump.c const struct fw_map *map; map 82 drivers/net/wireless/ath/wil6210/wil_crash_dump.c map = &fw_mapping[i]; map 84 drivers/net/wireless/ath/wil6210/wil_crash_dump.c if (!map->crash_dump) map 87 drivers/net/wireless/ath/wil6210/wil_crash_dump.c data = (void * __force)wil->csr + HOSTADDR(map->host); map 88 drivers/net/wireless/ath/wil6210/wil_crash_dump.c len = map->to - map->from; map 89 drivers/net/wireless/ath/wil6210/wil_crash_dump.c offset = map->host - host_min; map 590 drivers/net/wireless/intel/ipw2x00/libipw.h u8 map; map 29 drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h u8 map; map 599 drivers/net/wireless/intersil/p54/p54pci.c priv->map = ioremap(mem_addr, mem_len); map 600 drivers/net/wireless/intersil/p54/p54pci.c if (!priv->map) { map 630 drivers/net/wireless/intersil/p54/p54pci.c iounmap(priv->map); map 658 drivers/net/wireless/intersil/p54/p54pci.c iounmap(priv->map); map 86 drivers/net/wireless/intersil/p54/p54pci.h #define P54P_READ(r) (__force __le32)__raw_readl(&priv->map->r) map 87 drivers/net/wireless/intersil/p54/p54pci.h #define P54P_WRITE(r, val) __raw_writel((__force u32)(__le32)(val), &priv->map->r) map 92 drivers/net/wireless/intersil/p54/p54pci.h struct p54p_csr __iomem *map; map 231 drivers/net/wireless/marvell/mwifiex/11h.c if (rpt->map.radar) { map 2202 drivers/net/wireless/marvell/mwifiex/fw.h struct meas_rpt_map map; map 561 drivers/net/wireless/quantenna/qtnfmac/commands.c const u8 *map = NULL; map 567 drivers/net/wireless/quantenna/qtnfmac/commands.c (qtnf_utils_is_bit_set(map, bitn, map_len) && \ map 576 drivers/net/wireless/quantenna/qtnfmac/commands.c map = tlv->val; map 590 drivers/net/wireless/quantenna/qtnfmac/commands.c if (!map || !stats) map 369 drivers/net/wireless/ralink/rt2x00/rt2800lib.c const unsigned int *map; map 379 drivers/net/wireless/ralink/rt2x00/rt2800lib.c map = rt2800_eeprom_map_ext; map 381 drivers/net/wireless/ralink/rt2x00/rt2800lib.c map = rt2800_eeprom_map; map 383 drivers/net/wireless/ralink/rt2x00/rt2800lib.c index = map[word]; map 63 drivers/net/wireless/ray_cs.c static int ray_dev_config(struct net_device *dev, struct ifmap *map); map 802 drivers/net/wireless/ray_cs.c static int ray_dev_config(struct net_device *dev, struct ifmap *map) map 807 drivers/net/wireless/ray_cs.c dev_dbg(&link->dev, "ray_dev_config(dev=%p,ifmap=%p)\n", dev, map); map 203 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf | 0x80); map 205 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->PHY[0], buf); map 206 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (rtl818x_ioread8(priv, &priv->map->PHY[2]) == (data & 0xFF)) map 386 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread32(priv, &priv->map->INT_STATUS_SE); map 392 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->INT_STATUS_SE, reg); map 395 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); map 432 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread16(priv, &priv->map->INT_STATUS); map 438 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->INT_STATUS, reg); map 582 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, map 586 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, map 596 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 599 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 600 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 603 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->ANAPARAM3, anaparam3); map 605 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 608 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 616 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 619 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 620 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 623 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2); map 625 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 628 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 636 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 637 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 638 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 640 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam); map 641 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 643 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 658 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->PHY_PR); map 659 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->PHY_PR, reg | 0x04); map 685 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00); map 691 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); map 699 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x00); map 705 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); map 720 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->IMR, map 729 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); map 738 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->IMR, 0); map 740 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); map 768 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread16(priv, &priv->map->BRSR); map 771 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR, reg); map 778 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR, basic_mask); map 779 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (resp_max << 4) | map 787 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR_8187SE, resp_mask); map 798 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg8 = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 800 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg8); map 805 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg16 = rtl818x_ioread16(priv, &priv->map->FEMR); map 807 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->FEMR, reg16); map 818 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, 0); map 819 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->CMD); map 824 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->CMD); map 826 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 829 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, RTL818X_CMD_RESET); map 830 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->CMD); map 834 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (rtl818x_ioread8(priv, &priv->map->CMD) & RTL818X_CMD_RESET) { map 839 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD); map 840 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->CMD); map 843 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (rtl818x_ioread8(priv, &priv->map->CONFIG3) & (1 << 3)) { map 848 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); map 850 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->MSR, 0); map 855 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->RDSAR, priv->rx_ring_dma); map 861 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TBDA, map 863 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TLPDA, map 866 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TBDA, map 868 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TVODA, map 870 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TVIDA, map 872 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TBEDA, map 874 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TBKDA, map 879 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 880 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); map 881 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg & ~(1 << 3)); map 883 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG2); map 884 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG2, reg | (1 << 4)); map 886 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 892 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); map 895 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); map 896 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0); map 898 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->SECURITY, 0); map 900 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->PHY_DELAY, 0x6); map 901 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, 0x4C); map 906 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->GP_ENABLE); map 907 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, reg & ~(1 << 6)); map 908 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 909 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 910 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | (1 << 2)); map 911 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 916 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT); map 917 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1); map 919 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 931 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TPPOLL_STOP, map 934 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0x00); map 935 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50); map 937 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0); map 950 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG5, map 951 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->CONFIG5) & 0x7F); map 954 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, map 955 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->PGSELECT) | 0x08); map 957 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); map 958 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1BFF); map 959 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); map 961 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x4003); map 967 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg32 = rtl818x_ioread32(priv, &priv->map->RF_PARA); map 970 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->RF_PARA, reg32); map 973 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, map 1154 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); map 1155 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); map 1178 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); map 1181 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); map 1193 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); map 1195 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); map 1203 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); map 1206 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, (u8 __iomem *)priv->map + 0xec, 0x3f); map 1209 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); map 1226 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); map 1228 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 1231 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 1252 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 1255 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 1259 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 1260 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); map 1261 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF); map 1262 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 1276 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c return rtl818x_ioread32(priv, &priv->map->TSFT[0]) | map 1277 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; map 1348 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 1349 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], map 1351 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], map 1353 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 1404 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->AC_BK_PARAM, ac_param); map 1407 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->AC_BE_PARAM, ac_param); map 1410 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->AC_VI_PARAM, ac_param); map 1413 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->AC_VO_PARAM, ac_param); map 1436 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CW_VAL, map 1472 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time); map 1473 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->SIFS, sifs); map 1474 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->DIFS, difs); map 1477 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, hw_eifs); map 1480 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EIFS_8187SE, hw_eifs); map 1488 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EIFS, hw_eifs); map 1505 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->BSSID[0], map 1507 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->BSSID[2], map 1521 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->MSR, reg); map 1597 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf); map 1617 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 1639 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg); map 1640 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 1654 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6)) map 1659 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 1661 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 1717 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 1782 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->map = pci_iomap(pdev, 1, mem_len); map 1783 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (!priv->map) { map 1784 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c priv->map = pci_iomap(pdev, 0, io_len); map 1788 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (!priv->map) { map 1815 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); map 1938 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c pci_iounmap(pdev, priv->map); map 1963 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c pci_iounmap(pdev, priv->map); map 49 drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); map 168 drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & map 54 drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); map 145 drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & map 106 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h struct rtl818x_csr __iomem *map; map 31 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3; map 32 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); map 34 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7); map 36 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); map 37 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7 | 0x400); map 38 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 41 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 42 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 44 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); map 45 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 55 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 57 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); map 58 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); map 61 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 64 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 65 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 68 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 69 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x400); map 70 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 79 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); map 80 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); map 81 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect) | 0x400; map 85 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F); map 86 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F); map 88 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 89 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 91 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); map 92 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 99 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 100 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 104 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 106 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 108 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 110 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 114 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 115 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 120 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x000E); map 121 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x040E); map 122 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 123 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 125 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 127 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 129 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 131 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 133 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 138 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 140 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 142 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 144 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 146 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 148 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 150 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 152 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 155 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1)) map 158 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 160 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 164 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 166 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 169 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82); map 170 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); map 171 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0); map 269 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, map 284 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 285 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 286 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE); map 287 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON); map 288 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); map 289 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 291 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, map 310 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); map 311 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 312 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); map 313 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); map 314 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 316 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6)); map 318 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008); map 321 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread16(priv, &priv->map->BRSR); map 322 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); map 323 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); map 324 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 325 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); map 326 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 356 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 433 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); msleep(1); map 441 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ map 443 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002); map 444 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 500 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, cck_power); map 501 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->TX_GAIN_CCK); map 505 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, ofdm_power); map 539 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); map 540 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 541 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); map 542 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); map 543 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 545 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0xFF & ~(1 << 6)); map 547 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00088008); map 550 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread16(priv, &priv->map->BRSR); map 551 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); map 552 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); map 553 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 554 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); map 555 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 557 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 604 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 685 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, (u8 __iomem *)((void __iomem *)priv->map + 0x5B), 0x0D); msleep(1); map 693 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ map 695 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, (__le32 __iomem *)((void __iomem *)priv->map + 0x94), 0x15c00002); map 696 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 706 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 707 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 708 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE); map 709 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_OFF); map 710 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_OFF); map 711 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE); map 712 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 750 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); map 751 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x0488); map 752 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 753 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225.c rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 135 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c tmp = rtl818x_ioread8(priv, &priv->map->rf_sw_config) | 0x02; map 136 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->rf_sw_config, tmp); map 217 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, map 225 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, map 405 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x10); map 406 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x1B); map 408 drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8225se.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); map 55 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c (__le32 __iomem *) &priv->map->RFPinsOutput, phy_config); map 174 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c txconf = rtl818x_ioread32(priv, &priv->map->TX_CONF); map 175 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, map 187 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, txconf); map 207 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c if (rtl818x_ioread8(priv, &priv->map->CONFIG2) & map 180 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PHY[3], (data >> 24) & 0xFF); map 181 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF); map 182 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF); map 183 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF); map 595 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 597 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 599 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); map 600 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam); map 601 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2); map 603 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->ANAPARAM3A, anaparam3); map 605 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg); map 606 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 616 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 619 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 624 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c if (!(rtl818x_ioread8(priv, &priv->map->CMD) & map 635 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD); map 640 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c if (!(rtl818x_ioread8(priv, &priv->map->EEPROM_CMD) & map 662 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); map 677 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); map 678 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 0); map 680 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8)); map 681 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 1); map 682 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); map 684 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 687 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); map 690 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG1, reg); map 692 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 694 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0); map 695 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); map 696 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0); map 699 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0); map 700 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); map 703 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); map 704 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 0); map 707 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8)); map 708 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x20); map 709 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0); map 710 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x80); map 711 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x80); map 712 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x80); map 715 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008); map 716 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF); map 717 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044); map 718 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 720 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44); map 721 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 723 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FF7); map 728 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3); map 729 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1; map 730 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1); map 732 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->TALLY_SEL, 0x80); map 734 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 795 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); map 797 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); map 805 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 807 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG1); map 808 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG1, (reg & 0x3F) | 0x80); map 809 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, map 812 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0); map 821 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->TID_AC_MAP, 0xFA50); map 822 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MIG, 0); map 828 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001); map 833 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); map 834 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); map 835 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); map 841 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 842 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); map 866 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->HSSI_PARA, 0x0600321B); map 875 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); map 878 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA); map 950 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); map 952 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); map 956 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); map 958 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, map 973 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF); map 975 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->MAR[0], ~0); map 976 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->MAR[1], ~0); map 993 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); map 995 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); map 998 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); map 1000 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL); map 1004 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg); map 1009 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); map 1011 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 1014 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 1029 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); map 1031 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CMD); map 1034 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CMD, reg); map 1039 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 1040 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); map 1041 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF); map 1042 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 1058 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c return rtl818x_ioread32(priv, &priv->map->TSFT[0]) | map 1059 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32; map 1134 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 1136 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->MAC[i], map 1138 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 1161 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread32(priv, &priv->map->TX_CONF); map 1166 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, map 1170 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg); map 1172 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2); map 1173 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100); map 1174 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100); map 1175 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100); map 1210 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); map 1211 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->SLOT, priv->slot_time); map 1212 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->DIFS, difs); map 1218 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, (u8 *)&priv->map->BRSR + 1, eifs); map 1230 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CARRIER_SENSE_COUNTER, map 1238 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22); map 1240 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9); map 1241 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14); map 1242 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14); map 1244 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14); map 1245 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24); map 1246 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24); map 1266 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->BSSID[i], map 1283 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->MSR, reg); map 1337 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite32_async(priv, &priv->map->RX_CONF, priv->rx_conf); map 1371 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->CW_VAL, map 1397 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD); map 1420 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg); map 1469 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c priv->map = (struct rtl818x_csr *)0xFF00; map 1489 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6)) map 1494 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 1523 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~1; map 1524 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg | 1); map 1529 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 1530 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 1534 drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c reg32 = rtl818x_ioread32(priv, &priv->map->TX_CONF); map 42 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); map 43 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x00); map 46 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 4); map 47 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 50 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) & ~(1 << 5); map 51 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 80 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->GPIO0, 0x01); map 81 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0x01); map 84 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 4); map 85 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 88 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c reg = rtl818x_ioread8(priv, &priv->map->PGSELECT) | (1 << 5); map 89 drivers/net/wireless/realtek/rtl818x/rtl8187/leds.c rtl818x_iowrite8(priv, &priv->map->PGSELECT, reg); map 24 drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c gpio = rtl818x_ioread8(priv, &priv->map->GPIO0); map 25 drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c rtl818x_iowrite8(priv, &priv->map->GPIO0, gpio & ~priv->rfkill_mask); map 26 drivers/net/wireless/realtek/rtl818x/rtl8187/rfkill.c gpio = rtl818x_ioread8(priv, &priv->map->GPIO1); map 101 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h struct rtl818x_csr *map; map 124 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3; map 125 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); map 127 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7); map 129 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); map 130 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7); map 133 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 135 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); map 142 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 144 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); map 145 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1)); map 148 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 151 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 154 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 155 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); map 163 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); map 164 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); map 165 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); map 170 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007); map 171 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007); map 174 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 177 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); map 190 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 193 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 194 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); map 213 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput); map 214 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable); map 215 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect); map 219 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F); map 220 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F); map 222 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2)); map 224 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80); map 231 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 235 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 238 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 243 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg); map 248 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 251 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 254 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 260 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 263 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 266 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 269 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 273 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1)) map 276 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 281 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, map 285 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82); map 286 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84); map 287 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0); map 388 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, map 402 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 403 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 404 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 406 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, map 408 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 410 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 416 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, map 550 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D); map 558 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ map 652 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, map 657 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); map 658 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c reg = rtl818x_ioread8(priv, &priv->map->CONFIG3); map 659 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 661 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, map 663 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->CONFIG3, map 665 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); map 673 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, map 725 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, map 729 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, map 921 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */ map 964 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK, 0x03); map 965 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM, 0x07); map 966 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); map 334 drivers/net/wireless/realtek/rtl818x/rtl818x.h #define REG_ADDR1(addr) ((u8 __iomem *)priv->map + (addr)) map 335 drivers/net/wireless/realtek/rtl818x/rtl818x.h #define REG_ADDR2(addr) ((__le16 __iomem *)priv->map + ((addr) >> 1)) map 336 drivers/net/wireless/realtek/rtl818x/rtl818x.h #define REG_ADDR4(addr) ((__le32 __iomem *)priv->map + ((addr) >> 2)) map 85 drivers/net/wireless/realtek/rtw88/efuse.c static int rtw_dump_physical_efuse_map(struct rtw_dev *rtwdev, u8 *map) map 113 drivers/net/wireless/realtek/rtw88/efuse.c *(map + addr) = (u8)(efuse_ctl & BIT_MASK_EF_DATA); map 625 drivers/net/wireless/realtek/rtw88/main.h int (*read_efuse)(struct rtw_dev *rtwdev, u8 *map); map 21 drivers/net/wireless/realtek/rtw88/rtw8822b.c struct rtw8822b_efuse *map) map 23 drivers/net/wireless/realtek/rtw88/rtw8822b.c ether_addr_copy(efuse->addr, map->e.mac_addr); map 29 drivers/net/wireless/realtek/rtw88/rtw8822b.c struct rtw8822b_efuse *map; map 32 drivers/net/wireless/realtek/rtw88/rtw8822b.c map = (struct rtw8822b_efuse *)log_map; map 34 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->rfe_option = map->rfe_option; map 35 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->rf_board_option = map->rf_board_option; map 36 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->crystal_cap = map->xtal_k; map 37 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->pa_type_2g = map->pa_type; map 38 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->pa_type_5g = map->pa_type; map 39 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->lna_type_2g = map->lna_type_2g[0]; map 40 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->lna_type_5g = map->lna_type_5g[0]; map 41 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->channel_plan = map->channel_plan; map 42 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->country_code[0] = map->country_code[0]; map 43 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->country_code[1] = map->country_code[1]; map 44 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->bt_setting = map->rf_bt_setting; map 45 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->regd = map->rf_board_option & 0x7; map 48 drivers/net/wireless/realtek/rtw88/rtw8822b.c efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; map 52 drivers/net/wireless/realtek/rtw88/rtw8822b.c rtw8822be_efuse_parsing(efuse, map); map 22 drivers/net/wireless/realtek/rtw88/rtw8822c.c struct rtw8822c_efuse *map) map 24 drivers/net/wireless/realtek/rtw88/rtw8822c.c ether_addr_copy(efuse->addr, map->e.mac_addr); map 30 drivers/net/wireless/realtek/rtw88/rtw8822c.c struct rtw8822c_efuse *map; map 33 drivers/net/wireless/realtek/rtw88/rtw8822c.c map = (struct rtw8822c_efuse *)log_map; map 35 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->rfe_option = map->rfe_option; map 36 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->rf_board_option = map->rf_board_option; map 37 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->crystal_cap = map->xtal_k; map 38 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->channel_plan = map->channel_plan; map 39 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->country_code[0] = map->country_code[0]; map 40 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->country_code[1] = map->country_code[1]; map 41 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->bt_setting = map->rf_bt_setting; map 42 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->regd = map->rf_board_option & 0x7; map 45 drivers/net/wireless/realtek/rtw88/rtw8822c.c efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i]; map 49 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822ce_efuse_parsing(efuse, map); map 89 drivers/net/wireless/st/cw1200/debug.c u32 map) map 94 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); map 1186 drivers/ntb/hw/mscc/ntb_hw_switchtec.c static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) map 1195 drivers/ntb/hw/mscc/ntb_hw_switchtec.c map[cnt++] = i; map 60 drivers/nubus/nubus.c static inline int not_useful(void *p, int map) map 65 drivers/nubus/nubus.c if (map & (1 << pv)) map 70 drivers/nubus/nubus.c static unsigned long nubus_get_rom(unsigned char **ptr, int len, int map) map 78 drivers/nubus/nubus.c while (not_useful(p, map)) map 87 drivers/nubus/nubus.c static void nubus_rewind(unsigned char **ptr, int len, int map) map 94 drivers/nubus/nubus.c } while (not_useful(p, map)); map 100 drivers/nubus/nubus.c static void nubus_advance(unsigned char **ptr, int len, int map) map 105 drivers/nubus/nubus.c while (not_useful(p, map)) map 113 drivers/nubus/nubus.c static void nubus_move(unsigned char **ptr, int len, int map) map 118 drivers/nubus/nubus.c nubus_advance(ptr, len, map); map 120 drivers/nubus/nubus.c nubus_rewind(ptr, -len, map); map 426 drivers/nvme/host/pci.c struct blk_mq_queue_map *map = &set->map[i]; map 428 drivers/nvme/host/pci.c map->nr_queues = dev->io_queues[i]; map 429 drivers/nvme/host/pci.c if (!map->nr_queues) { map 438 drivers/nvme/host/pci.c map->queue_offset = qoff; map 440 drivers/nvme/host/pci.c blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); map 442 drivers/nvme/host/pci.c blk_mq_map_queues(map); map 443 drivers/nvme/host/pci.c qoff += map->nr_queues; map 444 drivers/nvme/host/pci.c offset += map->nr_queues; map 1828 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_DEFAULT].nr_queues = map 1830 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; map 1831 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_READ].nr_queues = map 1833 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_READ].queue_offset = map 1837 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_DEFAULT].nr_queues = map 1839 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; map 1840 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_READ].nr_queues = map 1842 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_READ].queue_offset = 0; map 1844 drivers/nvme/host/rdma.c blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT], map 1846 drivers/nvme/host/rdma.c blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ], map 1851 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_POLL].nr_queues = map 1853 drivers/nvme/host/rdma.c set->map[HCTX_TYPE_POLL].queue_offset = map 1856 drivers/nvme/host/rdma.c blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); map 2185 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_DEFAULT].nr_queues = map 2187 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; map 2188 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_READ].nr_queues = map 2190 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_READ].queue_offset = map 2194 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_DEFAULT].nr_queues = map 2196 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_DEFAULT].queue_offset = 0; map 2197 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_READ].nr_queues = map 2199 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_READ].queue_offset = 0; map 2201 drivers/nvme/host/tcp.c blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); map 2202 drivers/nvme/host/tcp.c blk_mq_map_queues(&set->map[HCTX_TYPE_READ]); map 2206 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_POLL].nr_queues = map 2208 drivers/nvme/host/tcp.c set->map[HCTX_TYPE_POLL].queue_offset = map 2211 drivers/nvme/host/tcp.c blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]); map 84 drivers/nvmem/bcm-ocotp.c const struct otpc_map *map; map 181 drivers/nvmem/bcm-ocotp.c for (i = 0; i < priv->map->otpc_row_size; i++) { map 183 drivers/nvmem/bcm-ocotp.c priv->map->data_r_offset[i]); map 212 drivers/nvmem/bcm-ocotp.c for (i = 0; i < priv->map->otpc_row_size; i++) { map 213 drivers/nvmem/bcm-ocotp.c writel(*buf, priv->base + priv->map->data_w_offset[i]); map 267 drivers/nvmem/bcm-ocotp.c priv->map = device_get_match_data(dev); map 268 drivers/nvmem/bcm-ocotp.c if (!priv->map) map 299 drivers/nvmem/bcm-ocotp.c if (priv->map == &otp_map_v2) { map 47 drivers/of/address.c u64 (*map)(__be32 *addr, const __be32 *range, map 438 drivers/of/address.c .map = of_bus_pci_map, map 449 drivers/of/address.c .map = of_bus_isa_map, map 459 drivers/of/address.c .map = of_bus_default_map, map 539 drivers/of/address.c offset = bus->map(addr, ranges, na, ns, pna); map 1586 drivers/of/base.c const __be32 *map, *mask, *pass; map 1632 drivers/of/base.c map = of_get_property(cur, map_name, &map_len); map 1633 drivers/of/base.c if (!map) { map 1649 drivers/of/base.c match &= !((match_array[i] ^ *map++) & mask[i]); map 1652 drivers/of/base.c new = of_find_node_by_phandle(be32_to_cpup(map)); map 1653 drivers/of/base.c map++; map 1674 drivers/of/base.c map += new_size; map 1690 drivers/of/base.c match_array = map - new_size; map 1692 drivers/of/base.c __be32 val = *(map - new_size + i); map 2266 drivers/of/base.c const __be32 *map = NULL; map 2271 drivers/of/base.c map = of_get_property(np, map_name, &map_len); map 2272 drivers/of/base.c if (!map) { map 2280 drivers/of/base.c if (!map_len || map_len % (4 * sizeof(*map))) { map 2297 drivers/of/base.c for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) { map 2299 drivers/of/base.c u32 rid_base = be32_to_cpup(map + 0); map 2300 drivers/of/base.c u32 phandle = be32_to_cpup(map + 1); map 2301 drivers/of/base.c u32 out_base = be32_to_cpup(map + 2); map 2302 drivers/of/base.c u32 rid_len = be32_to_cpup(map + 3); map 42 drivers/of/fdt_address.c u64 (*map)(__be32 *addr, const __be32 *range, map 104 drivers/of/fdt_address.c .map = fdt_bus_default_map, map 135 drivers/of/fdt_address.c offset = bus->map(addr, ranges, na, ns, pna); map 77 drivers/of/of_numa.c static int __init of_numa_parse_distance_map_v1(struct device_node *map) map 85 drivers/of/of_numa.c matrix = of_get_property(map, "distance-matrix", NULL); map 91 drivers/of/of_numa.c entry_count = of_property_count_u32_elems(map, "distance-matrix"); map 232 drivers/pci/controller/dwc/pci-dra7xx.c .map = dra7xx_pcie_intx_map, map 354 drivers/pci/controller/dwc/pci-keystone.c .map = ks_pcie_init_legacy_irq_map, map 227 drivers/pci/controller/dwc/pcie-uniphier.c .map = uniphier_pcie_intx_map, map 765 drivers/pci/controller/pci-aardvark.c .map = advk_pcie_irq_map, map 340 drivers/pci/controller/pci-ftpci100.c .map = faraday_pci_irq_map, map 1745 drivers/pci/controller/pci-tegra.c .map = tegra_msi_map, map 250 drivers/pci/controller/pci-v3-semi.c struct regmap *map; map 477 drivers/pci/controller/pci-v3-semi.c if (v3->map) map 478 drivers/pci/controller/pci-v3-semi.c regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, map 489 drivers/pci/controller/pci-v3-semi.c v3->map = map 491 drivers/pci/controller/pci-v3-semi.c if (IS_ERR(v3->map)) { map 496 drivers/pci/controller/pci-v3-semi.c regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val); map 498 drivers/pci/controller/pci-v3-semi.c regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, map 639 drivers/pci/controller/pcie-altera.c .map = altera_pcie_intx_map, map 567 drivers/pci/controller/pcie-mediatek.c .map = mtk_pcie_intx_map, map 718 drivers/pci/controller/pcie-mobiveil.c .map = mobiveil_pcie_intx_map, map 879 drivers/pci/controller/pcie-rcar.c .map = rcar_msi_map, map 91 drivers/pci/controller/pcie-rockchip-host.c u8 map; map 97 drivers/pci/controller/pcie-rockchip-host.c map = val & PCIE_CORE_LANE_MAP_MASK; map 101 drivers/pci/controller/pcie-rockchip-host.c map = bitrev8(map) >> 4; map 103 drivers/pci/controller/pcie-rockchip-host.c return map; map 712 drivers/pci/controller/pcie-rockchip-host.c .map = rockchip_pcie_intx_map, map 434 drivers/pci/controller/pcie-xilinx-nwl.c .map = nwl_legacy_map, map 332 drivers/pci/controller/pcie-xilinx.c .map = xilinx_pcie_msi_map, map 375 drivers/pci/controller/pcie-xilinx.c .map = xilinx_pcie_intx_map, map 5503 drivers/pci/quirks.c void __iomem *map; map 5518 drivers/pci/quirks.c map = pci_iomap(pdev, 0, 0x23000); map 5519 drivers/pci/quirks.c if (!map) { map 5528 drivers/pci/quirks.c if (ioread32(map + 0x2240c) & 0x2) { map 5535 drivers/pci/quirks.c iounmap(map); map 1345 drivers/pci/switch/switchtec.c void __iomem *map; map 1370 drivers/pci/switch/switchtec.c map = devm_ioremap(&pdev->dev, map 1373 drivers/pci/switch/switchtec.c if (!map) map 1376 drivers/pci/switch/switchtec.c stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET; map 183 drivers/pcmcia/at91_cf.c at91_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map) map 187 drivers/pcmcia/at91_cf.c if (map->card_start) map 192 drivers/pcmcia/at91_cf.c map->flags &= (MAP_ACTIVE | MAP_ATTRIB | MAP_16BIT); map 193 drivers/pcmcia/at91_cf.c if (map->flags & MAP_ATTRIB) map 194 drivers/pcmcia/at91_cf.c map->static_start = cf->phys_baseaddr + CF_ATTR_PHYS; map 196 drivers/pcmcia/at91_cf.c map->static_start = cf->phys_baseaddr + CF_MEM_PHYS; map 291 drivers/pcmcia/bcm63xx_pcmcia.c struct pccard_io_map *map) map 299 drivers/pcmcia/bcm63xx_pcmcia.c struct pccard_mem_map *map) map 305 drivers/pcmcia/bcm63xx_pcmcia.c if (map->flags & MAP_ATTRIB) map 310 drivers/pcmcia/bcm63xx_pcmcia.c map->static_start = res->start + map->card_start; map 391 drivers/pcmcia/db1xxx_ss.c struct pccard_io_map *map) map 395 drivers/pcmcia/db1xxx_ss.c map->start = (u32)sock->virt_io; map 396 drivers/pcmcia/db1xxx_ss.c map->stop = map->start + IO_MAP_SIZE; map 402 drivers/pcmcia/db1xxx_ss.c struct pccard_mem_map *map) map 406 drivers/pcmcia/db1xxx_ss.c if (map->flags & MAP_ATTRIB) map 407 drivers/pcmcia/db1xxx_ss.c map->static_start = sock->phys_attr + map->card_start; map 409 drivers/pcmcia/db1xxx_ss.c map->static_start = sock->phys_mem + map->card_start; map 153 drivers/pcmcia/electra_cf.c struct pccard_mem_map *map) map 157 drivers/pcmcia/electra_cf.c if (map->card_start) map 160 drivers/pcmcia/electra_cf.c map->static_start = cf->mem_phys; map 161 drivers/pcmcia/electra_cf.c map->flags &= MAP_ACTIVE|MAP_ATTRIB; map 162 drivers/pcmcia/electra_cf.c if (!(map->flags & MAP_ATTRIB)) map 163 drivers/pcmcia/electra_cf.c map->static_start += 0x800; map 406 drivers/pcmcia/i82092.c io.map = i; map 410 drivers/pcmcia/i82092.c mem.map = i; map 561 drivers/pcmcia/i82092.c unsigned char map, ioctl; map 565 drivers/pcmcia/i82092.c map = io->map; map 568 drivers/pcmcia/i82092.c if (map > 1) { map 578 drivers/pcmcia/i82092.c if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_IO(map)) map 579 drivers/pcmcia/i82092.c indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_IO(map)); map 584 drivers/pcmcia/i82092.c indirect_write16(sock,I365_IO(map)+I365_W_START,io->start); map 585 drivers/pcmcia/i82092.c indirect_write16(sock,I365_IO(map)+I365_W_STOP,io->stop); map 587 drivers/pcmcia/i82092.c ioctl = indirect_read(sock,I365_IOCTL) & ~I365_IOCTL_MASK(map); map 590 drivers/pcmcia/i82092.c ioctl |= I365_IOCTL_16BIT(map); map 596 drivers/pcmcia/i82092.c indirect_setbit(sock,I365_ADDRWIN,I365_ENA_IO(map)); map 608 drivers/pcmcia/i82092.c unsigned char map; map 614 drivers/pcmcia/i82092.c map = mem->map; map 615 drivers/pcmcia/i82092.c if (map > 4) { map 634 drivers/pcmcia/i82092.c if (indirect_read(sock, I365_ADDRWIN) & I365_ENA_MEM(map)) map 635 drivers/pcmcia/i82092.c indirect_resetbit(sock, I365_ADDRWIN, I365_ENA_MEM(map)); map 641 drivers/pcmcia/i82092.c base = I365_MEM(map); map 683 drivers/pcmcia/i82092.c indirect_setbit(sock, I365_ADDRWIN, I365_ENA_MEM(map)); map 1034 drivers/pcmcia/i82365.c u_char map, ioctl; map 1037 drivers/pcmcia/i82365.c "%#llx-%#llx)\n", sock, io->map, io->flags, io->speed, map 1039 drivers/pcmcia/i82365.c map = io->map; map 1040 drivers/pcmcia/i82365.c if ((map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || map 1043 drivers/pcmcia/i82365.c if (i365_get(sock, I365_ADDRWIN) & I365_ENA_IO(map)) map 1044 drivers/pcmcia/i82365.c i365_bclr(sock, I365_ADDRWIN, I365_ENA_IO(map)); map 1045 drivers/pcmcia/i82365.c i365_set_pair(sock, I365_IO(map)+I365_W_START, io->start); map 1046 drivers/pcmcia/i82365.c i365_set_pair(sock, I365_IO(map)+I365_W_STOP, io->stop); map 1047 drivers/pcmcia/i82365.c ioctl = i365_get(sock, I365_IOCTL) & ~I365_IOCTL_MASK(map); map 1048 drivers/pcmcia/i82365.c if (io->speed) ioctl |= I365_IOCTL_WAIT(map); map 1049 drivers/pcmcia/i82365.c if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map); map 1050 drivers/pcmcia/i82365.c if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map); map 1051 drivers/pcmcia/i82365.c if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map); map 1055 drivers/pcmcia/i82365.c i365_bset(sock, I365_ADDRWIN, I365_ENA_IO(map)); map 1064 drivers/pcmcia/i82365.c u_char map; map 1067 drivers/pcmcia/i82365.c "%#x)\n", sock, mem->map, mem->flags, mem->speed, map 1071 drivers/pcmcia/i82365.c map = mem->map; map 1072 drivers/pcmcia/i82365.c if ((map > 4) || (mem->card_start > 0x3ffffff) || map 1079 drivers/pcmcia/i82365.c if (i365_get(sock, I365_ADDRWIN) & I365_ENA_MEM(map)) map 1080 drivers/pcmcia/i82365.c i365_bclr(sock, I365_ADDRWIN, I365_ENA_MEM(map)); map 1082 drivers/pcmcia/i82365.c base = I365_MEM(map); map 1104 drivers/pcmcia/i82365.c i365_bset(sock, I365_ADDRWIN, I365_ENA_MEM(map)); map 1212 drivers/pcmcia/i82365.c io.map = i; map 1216 drivers/pcmcia/i82365.c mem.map = i; map 48 drivers/pcmcia/i82365.h #define I365_IO(map) (0x08+((map)<<2)) map 49 drivers/pcmcia/i82365.h #define I365_MEM(map) (0x10+((map)<<3)) map 101 drivers/pcmcia/i82365.h #define I365_ENA_IO(map) (0x40 << (map)) map 102 drivers/pcmcia/i82365.h #define I365_ENA_MEM(map) (0x01 << (map)) map 105 drivers/pcmcia/i82365.h #define I365_IOCTL_MASK(map) (0x0F << (map<<2)) map 106 drivers/pcmcia/i82365.h #define I365_IOCTL_WAIT(map) (0x08 << (map<<2)) map 107 drivers/pcmcia/i82365.h #define I365_IOCTL_0WS(map) (0x04 << (map<<2)) map 108 drivers/pcmcia/i82365.h #define I365_IOCTL_IOCS16(map) (0x02 << (map<<2)) map 109 drivers/pcmcia/i82365.h #define I365_IOCTL_16BIT(map) (0x01 << (map<<2)) map 172 drivers/pcmcia/omap_cf.c omap_cf_set_mem_map(struct pcmcia_socket *s, struct pccard_mem_map *map) map 176 drivers/pcmcia/omap_cf.c if (map->card_start) map 179 drivers/pcmcia/omap_cf.c map->static_start = cf->phys_cf; map 180 drivers/pcmcia/omap_cf.c map->flags &= MAP_ACTIVE|MAP_ATTRIB|MAP_16BIT; map 181 drivers/pcmcia/omap_cf.c if (map->flags & MAP_ATTRIB) map 182 drivers/pcmcia/omap_cf.c map->static_start += SZ_2K; map 275 drivers/pcmcia/pcmcia_resource.c io_off.map = i; map 276 drivers/pcmcia/pcmcia_resource.c io_on.map = i; map 373 drivers/pcmcia/pcmcia_resource.c io.map = i; map 591 drivers/pcmcia/pcmcia_resource.c iomap.map = i; map 891 drivers/pcmcia/pcmcia_resource.c win->map = w+1; map 912 drivers/pcmcia/pcmcia_resource.c res->flags |= (win->map << 2) | IORESOURCE_MEM; map 427 drivers/pcmcia/pd6729.c unsigned char map, ioctl; map 429 drivers/pcmcia/pd6729.c map = io->map; map 432 drivers/pcmcia/pd6729.c if (map > 1) { map 438 drivers/pcmcia/pd6729.c if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_IO(map)) map 439 drivers/pcmcia/pd6729.c indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); map 445 drivers/pcmcia/pd6729.c indirect_write16(socket, I365_IO(map)+I365_W_START, io->start); map 446 drivers/pcmcia/pd6729.c indirect_write16(socket, I365_IO(map)+I365_W_STOP, io->stop); map 448 drivers/pcmcia/pd6729.c ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); map 451 drivers/pcmcia/pd6729.c ioctl |= I365_IOCTL_0WS(map); map 453 drivers/pcmcia/pd6729.c ioctl |= I365_IOCTL_16BIT(map); map 455 drivers/pcmcia/pd6729.c ioctl |= I365_IOCTL_IOCS16(map); map 461 drivers/pcmcia/pd6729.c indirect_setbit(socket, I365_ADDRWIN, I365_ENA_IO(map)); map 472 drivers/pcmcia/pd6729.c unsigned char map; map 474 drivers/pcmcia/pd6729.c map = mem->map; map 475 drivers/pcmcia/pd6729.c if (map > 4) { map 486 drivers/pcmcia/pd6729.c if (indirect_read(socket, I365_ADDRWIN) & I365_ENA_MEM(map)) map 487 drivers/pcmcia/pd6729.c indirect_resetbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); map 490 drivers/pcmcia/pd6729.c base = I365_MEM(map); map 518 drivers/pcmcia/pd6729.c indirect_write(socket, PD67_EXT_INDEX, PD67_MEM_PAGE(map)); map 538 drivers/pcmcia/pd6729.c indirect_setbit(socket, I365_ADDRWIN, I365_ENA_MEM(map)); map 552 drivers/pcmcia/pd6729.c io.map = i; map 556 drivers/pcmcia/pd6729.c mem.map = i; map 108 drivers/pcmcia/rsrc_nonstatic.c static int add_interval(struct resource_map *map, u_long base, u_long num) map 112 drivers/pcmcia/rsrc_nonstatic.c for (p = map; ; p = p->next) { map 113 drivers/pcmcia/rsrc_nonstatic.c if ((p != map) && (p->base+p->num >= base)) { map 117 drivers/pcmcia/rsrc_nonstatic.c if ((p->next == map) || (p->next->base > base+num-1)) map 132 drivers/pcmcia/rsrc_nonstatic.c static int sub_interval(struct resource_map *map, u_long base, u_long num) map 136 drivers/pcmcia/rsrc_nonstatic.c for (p = map; ; p = q) { map 138 drivers/pcmcia/rsrc_nonstatic.c if (q == map) map 297 drivers/pcmcia/rsrc_nonstatic.c pccard_mem_map map; map 303 drivers/pcmcia/rsrc_nonstatic.c map.map = 0; map 304 drivers/pcmcia/rsrc_nonstatic.c map.flags = MAP_ACTIVE; map 305 drivers/pcmcia/rsrc_nonstatic.c map.speed = 0; map 306 drivers/pcmcia/rsrc_nonstatic.c map.res = res; map 307 drivers/pcmcia/rsrc_nonstatic.c map.card_start = 0; map 308 drivers/pcmcia/rsrc_nonstatic.c s->ops->set_mem_map(s, &map); map 317 drivers/pcmcia/rsrc_nonstatic.c map.flags = 0; map 318 drivers/pcmcia/rsrc_nonstatic.c s->ops->set_mem_map(s, &map); map 586 drivers/pcmcia/rsrc_nonstatic.c struct resource_map *map; map 612 drivers/pcmcia/rsrc_nonstatic.c for (m = data->map->next; m != data->map; m = m->next) { map 638 drivers/pcmcia/rsrc_nonstatic.c if (m == data->map) map 695 drivers/pcmcia/rsrc_nonstatic.c data.map = &s_data->io_db; map 818 drivers/pcmcia/rsrc_nonstatic.c data.map = &s_data->mem_db_valid; map 842 drivers/pcmcia/rsrc_nonstatic.c data.map = &s_data->mem_db; map 562 drivers/pcmcia/soc_common.c struct pcmcia_socket *sock, struct pccard_io_map *map) map 565 drivers/pcmcia/soc_common.c unsigned short speed = map->speed; map 568 drivers/pcmcia/soc_common.c map->map, map->speed, (unsigned long long)map->start, map 569 drivers/pcmcia/soc_common.c (unsigned long long)map->stop); map 571 drivers/pcmcia/soc_common.c (map->flags == 0) ? "<NONE>" : "", map 572 drivers/pcmcia/soc_common.c (map->flags & MAP_ACTIVE) ? "ACTIVE " : "", map 573 drivers/pcmcia/soc_common.c (map->flags & MAP_16BIT) ? "16BIT " : "", map 574 drivers/pcmcia/soc_common.c (map->flags & MAP_AUTOSZ) ? "AUTOSZ " : "", map 575 drivers/pcmcia/soc_common.c (map->flags & MAP_0WS) ? "0WS " : "", map 576 drivers/pcmcia/soc_common.c (map->flags & MAP_WRPROT) ? "WRPROT " : "", map 577 drivers/pcmcia/soc_common.c (map->flags & MAP_USE_WAIT) ? "USE_WAIT " : "", map 578 drivers/pcmcia/soc_common.c (map->flags & MAP_PREFETCH) ? "PREFETCH " : ""); map 580 drivers/pcmcia/soc_common.c if (map->map >= MAX_IO_WIN) { map 582 drivers/pcmcia/soc_common.c map->map); map 586 drivers/pcmcia/soc_common.c if (map->flags & MAP_ACTIVE) { map 593 drivers/pcmcia/soc_common.c skt->spd_io[map->map] = speed; map 596 drivers/pcmcia/soc_common.c if (map->stop == 1) map 597 drivers/pcmcia/soc_common.c map->stop = PAGE_SIZE-1; map 599 drivers/pcmcia/soc_common.c map->stop -= map->start; map 600 drivers/pcmcia/soc_common.c map->stop += skt->socket.io_offset; map 601 drivers/pcmcia/soc_common.c map->start = skt->socket.io_offset; map 616 drivers/pcmcia/soc_common.c struct pcmcia_socket *sock, struct pccard_mem_map *map) map 620 drivers/pcmcia/soc_common.c unsigned short speed = map->speed; map 623 drivers/pcmcia/soc_common.c map->map, map->speed, map->card_start); map 625 drivers/pcmcia/soc_common.c (map->flags == 0) ? "<NONE>" : "", map 626 drivers/pcmcia/soc_common.c (map->flags & MAP_ACTIVE) ? "ACTIVE " : "", map 627 drivers/pcmcia/soc_common.c (map->flags & MAP_16BIT) ? "16BIT " : "", map 628 drivers/pcmcia/soc_common.c (map->flags & MAP_AUTOSZ) ? "AUTOSZ " : "", map 629 drivers/pcmcia/soc_common.c (map->flags & MAP_0WS) ? "0WS " : "", map 630 drivers/pcmcia/soc_common.c (map->flags & MAP_WRPROT) ? "WRPROT " : "", map 631 drivers/pcmcia/soc_common.c (map->flags & MAP_ATTRIB) ? "ATTRIB " : "", map 632 drivers/pcmcia/soc_common.c (map->flags & MAP_USE_WAIT) ? "USE_WAIT " : ""); map 634 drivers/pcmcia/soc_common.c if (map->map >= MAX_WIN) map 637 drivers/pcmcia/soc_common.c if (map->flags & MAP_ACTIVE) { map 644 drivers/pcmcia/soc_common.c if (map->flags & MAP_ATTRIB) { map 646 drivers/pcmcia/soc_common.c skt->spd_attr[map->map] = speed; map 647 drivers/pcmcia/soc_common.c skt->spd_mem[map->map] = 0; map 650 drivers/pcmcia/soc_common.c skt->spd_attr[map->map] = 0; map 651 drivers/pcmcia/soc_common.c skt->spd_mem[map->map] = speed; map 656 drivers/pcmcia/soc_common.c map->static_start = res->start + map->card_start; map 701 drivers/pcmcia/tcic.c "%#llx-%#llx)\n", psock, io->map, io->flags, io->speed, map 703 drivers/pcmcia/tcic.c if ((io->map > 1) || (io->start > 0xffff) || (io->stop > 0xffff) || map 706 drivers/pcmcia/tcic.c addr = TCIC_IWIN(psock, io->map); map 738 drivers/pcmcia/tcic.c "%#llx-%#llx, %#x)\n", psock, mem->map, mem->flags, map 741 drivers/pcmcia/tcic.c if ((mem->map > 3) || (mem->card_start > 0x3ffffff) || map 746 drivers/pcmcia/tcic.c addr = TCIC_MWIN(psock, mem->map); map 784 drivers/pcmcia/tcic.c io.map = i; map 788 drivers/pcmcia/tcic.c mem.map = i; map 217 drivers/pcmcia/tcic.h #define TCIC_MWIN(sock,map) (0x100+(((map)+((sock)<<2))<<3)) map 245 drivers/pcmcia/tcic.h #define TCIC_IWIN(sock,map) (0x200+(((map)+((sock)<<1))<<2)) map 151 drivers/pcmcia/ti113x.h #define TI113X_IO_OFFSET(map) (0x36+((map)<<1)) map 371 drivers/pcmcia/vrc4171_card.c u_char map; map 374 drivers/pcmcia/vrc4171_card.c io == NULL || io->map >= IO_MAX_MAPS || map 379 drivers/pcmcia/vrc4171_card.c map = io->map; map 382 drivers/pcmcia/vrc4171_card.c if (addrwin & I365_ENA_IO(map)) { map 383 drivers/pcmcia/vrc4171_card.c addrwin &= ~I365_ENA_IO(map); map 387 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_IO(map)+I365_W_START, io->start); map 388 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_IO(map)+I365_W_STOP, io->stop); map 392 drivers/pcmcia/vrc4171_card.c ioctl |= I365_IOCTL_WAIT(map); map 394 drivers/pcmcia/vrc4171_card.c ioctl |= I365_IOCTL_16BIT(map); map 396 drivers/pcmcia/vrc4171_card.c ioctl |= I365_IOCTL_IOCS16(map); map 398 drivers/pcmcia/vrc4171_card.c ioctl |= I365_IOCTL_0WS(map); map 402 drivers/pcmcia/vrc4171_card.c addrwin |= I365_ENA_IO(map); map 414 drivers/pcmcia/vrc4171_card.c u_char map; map 417 drivers/pcmcia/vrc4171_card.c mem == NULL || mem->map >= MEM_MAX_MAPS || map 426 drivers/pcmcia/vrc4171_card.c map = mem->map; map 429 drivers/pcmcia/vrc4171_card.c if (addrwin & I365_ENA_MEM(map)) { map 430 drivers/pcmcia/vrc4171_card.c addrwin &= ~I365_ENA_MEM(map); map 437 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_MEM(map)+I365_W_START, start); map 453 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_MEM(map)+I365_W_STOP, stop); map 460 drivers/pcmcia/vrc4171_card.c exca_write_word(slot, I365_MEM(map)+I365_W_OFF, offset); map 463 drivers/pcmcia/vrc4171_card.c addrwin |= I365_ENA_MEM(map); map 252 drivers/pcmcia/vrc4173_cardu.c u_char map; map 254 drivers/pcmcia/vrc4173_cardu.c map = io->map; map 255 drivers/pcmcia/vrc4173_cardu.c if (map > 1) map 258 drivers/pcmcia/vrc4173_cardu.c io->start = exca_readw(socket, IO_WIN_SA(map)); map 259 drivers/pcmcia/vrc4173_cardu.c io->stop = exca_readw(socket, IO_WIN_EA(map)); map 263 drivers/pcmcia/vrc4173_cardu.c io->flags = (window & IO_WIN_EN(map)) ? MAP_ACTIVE : 0; map 264 drivers/pcmcia/vrc4173_cardu.c if (ioctl & IO_WIN_DATA_AUTOSZ(map)) map 266 drivers/pcmcia/vrc4173_cardu.c else if (ioctl & IO_WIN_DATA_16BIT(map)) map 277 drivers/pcmcia/vrc4173_cardu.c u_char map; map 279 drivers/pcmcia/vrc4173_cardu.c map = io->map; map 280 drivers/pcmcia/vrc4173_cardu.c if (map > 1) map 284 drivers/pcmcia/vrc4173_cardu.c enable = IO_WIN_EN(map); map 291 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, IO_WIN_SA(map), io->start); map 292 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, IO_WIN_EA(map), io->stop); map 294 drivers/pcmcia/vrc4173_cardu.c ioctl = exca_readb(socket, IO_WIN_CNT) & ~IO_WIN_CNT_MASK(map); map 295 drivers/pcmcia/vrc4173_cardu.c if (io->flags & MAP_AUTOSZ) ioctl |= IO_WIN_DATA_AUTOSZ(map); map 296 drivers/pcmcia/vrc4173_cardu.c else if (io->flags & MAP_16BIT) ioctl |= IO_WIN_DATA_16BIT(map); map 310 drivers/pcmcia/vrc4173_cardu.c u_char map; map 312 drivers/pcmcia/vrc4173_cardu.c map = mem->map; map 313 drivers/pcmcia/vrc4173_cardu.c if (map > 4) map 317 drivers/pcmcia/vrc4173_cardu.c mem->flags = (window & MEM_WIN_EN(map)) ? MAP_ACTIVE : 0; map 319 drivers/pcmcia/vrc4173_cardu.c start = exca_readw(socket, MEM_WIN_SA(map)); map 323 drivers/pcmcia/vrc4173_cardu.c stop = exca_readw(socket, MEM_WIN_EA(map)); map 326 drivers/pcmcia/vrc4173_cardu.c offset = exca_readw(socket, MEM_WIN_OA(map)); map 332 drivers/pcmcia/vrc4173_cardu.c page = exca_readb(socket, MEM_WIN_SAU(map)) << 24; map 345 drivers/pcmcia/vrc4173_cardu.c u_char map; map 347 drivers/pcmcia/vrc4173_cardu.c map = mem->map; map 352 drivers/pcmcia/vrc4173_cardu.c if (map > 4 || sys_start > sys_stop || ((sys_start ^ sys_stop) >> 24) || map 357 drivers/pcmcia/vrc4173_cardu.c enable = MEM_WIN_EN(map); map 363 drivers/pcmcia/vrc4173_cardu.c exca_writeb(socket, MEM_WIN_SAU(map), sys_start >> 24); map 367 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, MEM_WIN_SA(map), value); map 370 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, MEM_WIN_EA(map), value); map 375 drivers/pcmcia/vrc4173_cardu.c exca_writew(socket, MEM_WIN_OA(map), value); map 177 drivers/pcmcia/xxs1500_ss.c struct pccard_io_map *map) map 181 drivers/pcmcia/xxs1500_ss.c map->start = (u32)sock->virt_io; map 182 drivers/pcmcia/xxs1500_ss.c map->stop = map->start + IO_MAP_SIZE; map 188 drivers/pcmcia/xxs1500_ss.c struct pccard_mem_map *map) map 192 drivers/pcmcia/xxs1500_ss.c if (map->flags & MAP_ATTRIB) map 193 drivers/pcmcia/xxs1500_ss.c map->static_start = sock->phys_attr + map->card_start; map 195 drivers/pcmcia/xxs1500_ss.c map->static_start = sock->phys_mem + map->card_start; map 407 drivers/pcmcia/yenta_socket.c int map; map 410 drivers/pcmcia/yenta_socket.c map = io->map; map 412 drivers/pcmcia/yenta_socket.c if (map > 1) map 415 drivers/pcmcia/yenta_socket.c enable = I365_ENA_IO(map); map 424 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_IO(map)+I365_W_START, io->start); map 425 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_IO(map)+I365_W_STOP, io->stop); map 427 drivers/pcmcia/yenta_socket.c ioctl = exca_readb(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map); map 429 drivers/pcmcia/yenta_socket.c ioctl |= I365_IOCTL_0WS(map); map 431 drivers/pcmcia/yenta_socket.c ioctl |= I365_IOCTL_16BIT(map); map 433 drivers/pcmcia/yenta_socket.c ioctl |= I365_IOCTL_IOCS16(map); map 445 drivers/pcmcia/yenta_socket.c int map; map 452 drivers/pcmcia/yenta_socket.c map = mem->map; map 457 drivers/pcmcia/yenta_socket.c if (map > 4 || start > stop || ((start ^ stop) >> 24) || map 461 drivers/pcmcia/yenta_socket.c enable = I365_ENA_MEM(map); map 468 drivers/pcmcia/yenta_socket.c exca_writeb(socket, CB_MEM_PAGE(map), start >> 24); map 475 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_MEM(map) + I365_W_START, word); map 491 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_MEM(map) + I365_W_STOP, word); map 498 drivers/pcmcia/yenta_socket.c exca_writew(socket, I365_MEM(map) + I365_W_OFF, word); map 557 drivers/pcmcia/yenta_socket.c io.map = i; map 561 drivers/pcmcia/yenta_socket.c mem.map = i; map 97 drivers/pcmcia/yenta_socket.h #define CB_MEM_PAGE(map) (0x40 + (map)) map 322 drivers/phy/motorola/phy-mapphone-mdm6600.c const struct phy_mdm6600_map *map = map 326 drivers/phy/motorola/phy-mapphone-mdm6600.c *gpio = devm_gpiod_get(dev, map->name, map->direction); map 329 drivers/phy/motorola/phy-mapphone-mdm6600.c map->name, PTR_ERR(*gpio)); map 1427 drivers/phy/tegra/xusb-tegra124.c .map = tegra124_usb2_port_map, map 1448 drivers/phy/tegra/xusb-tegra124.c .map = tegra124_ulpi_port_map, map 1469 drivers/phy/tegra/xusb-tegra124.c .map = tegra124_hsic_port_map, map 1652 drivers/phy/tegra/xusb-tegra124.c .map = tegra124_usb3_port_map, map 537 drivers/phy/tegra/xusb-tegra186.c .map = tegra186_usb2_port_map, map 596 drivers/phy/tegra/xusb-tegra186.c .map = tegra186_usb3_port_map, map 1779 drivers/phy/tegra/xusb-tegra210.c .map = tegra210_usb2_port_map, map 1800 drivers/phy/tegra/xusb-tegra210.c .map = tegra210_hsic_port_map, map 1945 drivers/phy/tegra/xusb-tegra210.c .map = tegra210_usb3_port_map, map 405 drivers/phy/tegra/xusb.c const struct tegra_xusb_lane_map *map, map 410 drivers/phy/tegra/xusb.c for (; map->type; map++) { map 411 drivers/phy/tegra/xusb.c if (port->index != map->port) map 414 drivers/phy/tegra/xusb.c lane = tegra_xusb_find_lane(port->padctl, map->type, map 415 drivers/phy/tegra/xusb.c map->index); map 424 drivers/phy/tegra/xusb.c map->type, map->index, match->soc->name); map 606 drivers/phy/tegra/xusb.c usb2->base.lane = usb2->base.ops->map(&usb2->base); map 658 drivers/phy/tegra/xusb.c ulpi->base.lane = ulpi->base.ops->map(&ulpi->base); map 706 drivers/phy/tegra/xusb.c hsic->base.lane = hsic->base.ops->map(&hsic->base); map 774 drivers/phy/tegra/xusb.c usb3->base.lane = usb3->base.ops->map(&usb3->base); map 281 drivers/phy/tegra/xusb.h const struct tegra_xusb_lane_map *map, map 355 drivers/phy/tegra/xusb.h struct tegra_xusb_lane *(*map)(struct tegra_xusb_port *port); map 892 drivers/pinctrl/actions/pinctrl-owl.c gpio_irq->map = devm_kcalloc(pctrl->dev, chip->ngpio, map 893 drivers/pinctrl/actions/pinctrl-owl.c sizeof(*gpio_irq->map), GFP_KERNEL); map 894 drivers/pinctrl/actions/pinctrl-owl.c if (!gpio_irq->map) map 901 drivers/pinctrl/actions/pinctrl-owl.c gpio_irq->map[offset + j] = gpio_irq->parents[i]; map 2633 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c struct regmap *map; map 2638 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c map = syscon_node_to_regmap(node); map 2640 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c if (IS_ERR(map)) map 2641 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return map; map 2645 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c ctx->maps[ASPEED_IP_GFX] = map; map 2647 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return map; map 2652 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c struct regmap *map; map 2657 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c map = syscon_node_to_regmap(node->parent); map 2659 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c if (IS_ERR(map)) map 2660 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return map; map 2664 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c ctx->maps[ASPEED_IP_LPC] = map; map 2666 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return map; map 2681 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c struct regmap *map; map 2683 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c map = aspeed_g5_acquire_regmap(ctx, desc->ip); map 2684 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c if (IS_ERR(map)) { map 2688 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return PTR_ERR(map); map 2723 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c struct regmap *map; map 2725 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c map = aspeed_g5_acquire_regmap(ctx, desc->ip); map 2726 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c if (IS_ERR(map)) { map 2730 drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c return PTR_ERR(map); map 41 drivers/pinctrl/aspeed/pinmux-aspeed.c bool enabled, struct regmap *map) map 47 drivers/pinctrl/aspeed/pinmux-aspeed.c if (!map) map 50 drivers/pinctrl/aspeed/pinmux-aspeed.c ret = regmap_read(map, desc->reg, &raw); map 799 drivers/pinctrl/aspeed/pinmux-aspeed.h struct regmap *map); map 675 drivers/pinctrl/bcm/pinctrl-bcm2835.c struct pinctrl_map *map = *maps; map 682 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->type = PIN_MAP_TYPE_MUX_GROUP; map 683 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->data.mux.group = bcm2835_gpio_groups[pin]; map 684 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->data.mux.function = bcm2835_functions[fnum]; map 694 drivers/pinctrl/bcm/pinctrl-bcm2835.c struct pinctrl_map *map = *maps; map 707 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->type = PIN_MAP_TYPE_CONFIGS_PIN; map 708 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->data.configs.group_or_pin = bcm2835_gpio_pins[pin].name; map 709 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->data.configs.configs = configs; map 710 drivers/pinctrl/bcm/pinctrl-bcm2835.c map->data.configs.num_configs = 1; map 718 drivers/pinctrl/bcm/pinctrl-bcm2835.c struct pinctrl_map **map, unsigned int *num_maps) map 728 drivers/pinctrl/bcm/pinctrl-bcm2835.c err = pinconf_generic_dt_node_to_map_all(pctldev, np, map, num_maps); map 810 drivers/pinctrl/bcm/pinctrl-bcm2835.c *map = maps; map 52 drivers/pinctrl/berlin/berlin.c struct pinctrl_map **map, map 61 drivers/pinctrl/berlin/berlin.c *map = NULL; map 78 drivers/pinctrl/berlin/berlin.c ret = pinctrl_utils_reserve_map(pctrl_dev, map, &reserved_maps, map 86 drivers/pinctrl/berlin/berlin.c ret = pinctrl_utils_add_map_mux(pctrl_dev, map, &reserved_maps, map 942 drivers/pinctrl/core.c const struct pinctrl_map *map) map 948 drivers/pinctrl/core.c state = find_state(p, map->name); map 950 drivers/pinctrl/core.c state = create_state(p, map->name); map 954 drivers/pinctrl/core.c if (map->type == PIN_MAP_TYPE_DUMMY_STATE) map 961 drivers/pinctrl/core.c setting->type = map->type; map 967 drivers/pinctrl/core.c get_pinctrl_dev_from_devname(map->ctrl_dev_name); map 971 drivers/pinctrl/core.c if (!strcmp(map->ctrl_dev_name, map->dev_name)) map 978 drivers/pinctrl/core.c map->ctrl_dev_name); map 982 drivers/pinctrl/core.c setting->dev_name = map->dev_name; map 984 drivers/pinctrl/core.c switch (map->type) { map 986 drivers/pinctrl/core.c ret = pinmux_map_to_setting(map, setting); map 990 drivers/pinctrl/core.c ret = pinconf_map_to_setting(map, setting); map 1030 drivers/pinctrl/core.c const struct pinctrl_map *map; map 1055 drivers/pinctrl/core.c for_each_maps(maps_node, i, map) { map 1057 drivers/pinctrl/core.c if (strcmp(map->dev_name, devname)) map 1067 drivers/pinctrl/core.c strcmp(dev_name(pctldev->dev), map->ctrl_dev_name)) map 1070 drivers/pinctrl/core.c ret = add_setting(p, pctldev, map); map 1466 drivers/pinctrl/core.c void pinctrl_unregister_map(const struct pinctrl_map *map) map 1472 drivers/pinctrl/core.c if (maps_node->maps == map) { map 1764 drivers/pinctrl/core.c const struct pinctrl_map *map; map 1769 drivers/pinctrl/core.c for_each_maps(maps_node, i, map) { map 1771 drivers/pinctrl/core.c map->dev_name, map->name, map_type(map->type), map 1772 drivers/pinctrl/core.c map->type); map 1774 drivers/pinctrl/core.c if (map->type != PIN_MAP_TYPE_DUMMY_STATE) map 1776 drivers/pinctrl/core.c map->ctrl_dev_name); map 1778 drivers/pinctrl/core.c switch (map->type) { map 1780 drivers/pinctrl/core.c pinmux_show_map(s, map); map 1784 drivers/pinctrl/core.c pinconf_show_map(s, map); map 241 drivers/pinctrl/core.h void pinctrl_unregister_map(const struct pinctrl_map *map); map 25 drivers/pinctrl/devicetree.c struct pinctrl_map *map; map 30 drivers/pinctrl/devicetree.c struct pinctrl_map *map, unsigned num_maps) map 35 drivers/pinctrl/devicetree.c kfree_const(map[i].dev_name); map 36 drivers/pinctrl/devicetree.c map[i].dev_name = NULL; map 42 drivers/pinctrl/devicetree.c ops->dt_free_map(pctldev, map, num_maps); map 45 drivers/pinctrl/devicetree.c kfree(map); map 54 drivers/pinctrl/devicetree.c pinctrl_unregister_map(dt_map->map); map 56 drivers/pinctrl/devicetree.c dt_free_map(dt_map->pctldev, dt_map->map, map 66 drivers/pinctrl/devicetree.c struct pinctrl_map *map, unsigned num_maps) map 79 drivers/pinctrl/devicetree.c map[i].dev_name = devname; map 80 drivers/pinctrl/devicetree.c map[i].name = statename; map 82 drivers/pinctrl/devicetree.c map[i].ctrl_dev_name = dev_name(pctldev->dev); map 91 drivers/pinctrl/devicetree.c dt_map->map = map; map 95 drivers/pinctrl/devicetree.c return pinctrl_register_map(map, num_maps, false); map 98 drivers/pinctrl/devicetree.c dt_free_map(pctldev, map, num_maps); map 116 drivers/pinctrl/devicetree.c struct pinctrl_map *map; map 162 drivers/pinctrl/devicetree.c ret = ops->dt_node_to_map(pctldev, np_config, &map, &num_maps); map 167 drivers/pinctrl/devicetree.c return dt_remember_or_free_map(p, statename, pctldev, map, num_maps); map 172 drivers/pinctrl/devicetree.c struct pinctrl_map *map; map 174 drivers/pinctrl/devicetree.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 175 drivers/pinctrl/devicetree.c if (!map) map 179 drivers/pinctrl/devicetree.c map->type = PIN_MAP_TYPE_DUMMY_STATE; map 181 drivers/pinctrl/devicetree.c return dt_remember_or_free_map(p, statename, NULL, map, 1); map 57 drivers/pinctrl/freescale/pinctrl-imx.c struct pinctrl_map **map, unsigned *num_maps) map 93 drivers/pinctrl/freescale/pinctrl-imx.c *map = new_map; map 142 drivers/pinctrl/freescale/pinctrl-imx.c (*map)->data.mux.function, (*map)->data.mux.group, map_num); map 148 drivers/pinctrl/freescale/pinctrl-imx.c struct pinctrl_map *map, unsigned num_maps) map 150 drivers/pinctrl/freescale/pinctrl-imx.c kfree(map); map 220 drivers/pinctrl/freescale/pinctrl-imx1-core.c struct pinctrl_map **map, unsigned *num_maps) map 249 drivers/pinctrl/freescale/pinctrl-imx1-core.c *map = new_map; map 275 drivers/pinctrl/freescale/pinctrl-imx1-core.c (*map)->data.mux.function, (*map)->data.mux.group, map_num); map 281 drivers/pinctrl/freescale/pinctrl-imx1-core.c struct pinctrl_map *map, unsigned num_maps) map 283 drivers/pinctrl/freescale/pinctrl-imx1-core.c kfree(map); map 62 drivers/pinctrl/freescale/pinctrl-mxs.c struct pinctrl_map **map, unsigned *num_maps) map 125 drivers/pinctrl/freescale/pinctrl-mxs.c *map = new_map; map 139 drivers/pinctrl/freescale/pinctrl-mxs.c struct pinctrl_map *map, unsigned num_maps) map 144 drivers/pinctrl/freescale/pinctrl-mxs.c if (map[i].type == PIN_MAP_TYPE_MUX_GROUP) map 145 drivers/pinctrl/freescale/pinctrl-mxs.c kfree(map[i].data.mux.group); map 146 drivers/pinctrl/freescale/pinctrl-mxs.c if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) map 147 drivers/pinctrl/freescale/pinctrl-mxs.c kfree(map[i].data.configs.configs); map 150 drivers/pinctrl/freescale/pinctrl-mxs.c kfree(map); map 101 drivers/pinctrl/intel/pinctrl-baytrail.c #define COMMUNITY(p, n, map) \ map 105 drivers/pinctrl/intel/pinctrl-baytrail.c .pad_map = (map),\ map 466 drivers/pinctrl/mediatek/pinctrl-mtk-common.c struct pinctrl_map **map, unsigned *reserved_maps, map 474 drivers/pinctrl/mediatek/pinctrl-mtk-common.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 475 drivers/pinctrl/mediatek/pinctrl-mtk-common.c (*map)[*num_maps].data.mux.group = grp->name; map 484 drivers/pinctrl/mediatek/pinctrl-mtk-common.c (*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum]; map 492 drivers/pinctrl/mediatek/pinctrl-mtk-common.c struct pinctrl_map **map, map 537 drivers/pinctrl/mediatek/pinctrl-mtk-common.c err = pinctrl_utils_reserve_map(pctldev, map, map 566 drivers/pinctrl/mediatek/pinctrl-mtk-common.c err = mtk_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map, map 572 drivers/pinctrl/mediatek/pinctrl-mtk-common.c err = pinctrl_utils_add_map_configs(pctldev, map, map 590 drivers/pinctrl/mediatek/pinctrl-mtk-common.c struct pinctrl_map **map, unsigned *num_maps) map 596 drivers/pinctrl/mediatek/pinctrl-mtk-common.c *map = NULL; map 601 drivers/pinctrl/mediatek/pinctrl-mtk-common.c ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map, map 604 drivers/pinctrl/mediatek/pinctrl-mtk-common.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 400 drivers/pinctrl/mediatek/pinctrl-paris.c struct pinctrl_map **map, map 409 drivers/pinctrl/mediatek/pinctrl-paris.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 410 drivers/pinctrl/mediatek/pinctrl-paris.c (*map)[*num_maps].data.mux.group = grp->name; map 419 drivers/pinctrl/mediatek/pinctrl-paris.c (*map)[*num_maps].data.mux.function = mtk_gpio_functions[fnum]; map 427 drivers/pinctrl/mediatek/pinctrl-paris.c struct pinctrl_map **map, map 471 drivers/pinctrl/mediatek/pinctrl-paris.c err = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, map 499 drivers/pinctrl/mediatek/pinctrl-paris.c err = mtk_pctrl_dt_node_to_map_func(hw, pin, func, grp, map, map 505 drivers/pinctrl/mediatek/pinctrl-paris.c err = pinctrl_utils_add_map_configs(pctldev, map, map 526 drivers/pinctrl/mediatek/pinctrl-paris.c struct pinctrl_map **map, map 533 drivers/pinctrl/mediatek/pinctrl-paris.c *map = NULL; map 538 drivers/pinctrl/mediatek/pinctrl-paris.c ret = mtk_pctrl_dt_subnode_to_map(pctldev, np, map, map 542 drivers/pinctrl/mediatek/pinctrl-paris.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 397 drivers/pinctrl/mvebu/pinctrl-mvebu.c struct pinctrl_map **map, map 406 drivers/pinctrl/mvebu/pinctrl-mvebu.c *map = NULL; map 423 drivers/pinctrl/mvebu/pinctrl-mvebu.c *map = kmalloc_array(nmaps, sizeof(**map), GFP_KERNEL); map 424 drivers/pinctrl/mvebu/pinctrl-mvebu.c if (!*map) map 443 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].type = PIN_MAP_TYPE_MUX_GROUP; map 444 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].data.mux.group = group; map 445 drivers/pinctrl/mvebu/pinctrl-mvebu.c (*map)[n].data.mux.function = function; map 455 drivers/pinctrl/mvebu/pinctrl-mvebu.c struct pinctrl_map *map, unsigned num_maps) map 457 drivers/pinctrl/mvebu/pinctrl-mvebu.c kfree(map); map 792 drivers/pinctrl/mvebu/pinctrl-mvebu.c err = regmap_read(data->regmap.map, data->regmap.offset + off, &val); map 807 drivers/pinctrl/mvebu/pinctrl-mvebu.c return regmap_update_bits(data->regmap.map, data->regmap.offset + off, map 829 drivers/pinctrl/mvebu/pinctrl-mvebu.c mpp_data[i].regmap.map = regmap; map 22 drivers/pinctrl/mvebu/pinctrl-mvebu.h struct regmap *map; map 685 drivers/pinctrl/nomadik/pinctrl-abx500.c static int abx500_dt_add_map_mux(struct pinctrl_map **map, map 693 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 694 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].data.mux.group = group; map 695 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].data.mux.function = function; map 701 drivers/pinctrl/nomadik/pinctrl-abx500.c static int abx500_dt_add_map_configs(struct pinctrl_map **map, map 716 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN; map 718 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].data.configs.group_or_pin = group; map 719 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].data.configs.configs = dup_configs; map 720 drivers/pinctrl/nomadik/pinctrl-abx500.c (*map)[*num_maps].data.configs.num_configs = num_configs; map 741 drivers/pinctrl/nomadik/pinctrl-abx500.c struct pinctrl_map **map, map 759 drivers/pinctrl/nomadik/pinctrl-abx500.c ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, map 765 drivers/pinctrl/nomadik/pinctrl-abx500.c ret = abx500_dt_add_map_mux(map, reserved_maps, map 781 drivers/pinctrl/nomadik/pinctrl-abx500.c ret = pinctrl_utils_reserve_map(pctldev, map, map 790 drivers/pinctrl/nomadik/pinctrl-abx500.c ret = abx500_dt_add_map_configs(map, reserved_maps, map 803 drivers/pinctrl/nomadik/pinctrl-abx500.c struct pinctrl_map **map, unsigned *num_maps) map 810 drivers/pinctrl/nomadik/pinctrl-abx500.c *map = NULL; map 814 drivers/pinctrl/nomadik/pinctrl-abx500.c ret = abx500_dt_subnode_to_map(pctldev, np, map, map 817 drivers/pinctrl/nomadik/pinctrl-abx500.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 1271 drivers/pinctrl/nomadik/pinctrl-nomadik.c static int nmk_dt_add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, map 1278 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 1279 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].data.mux.group = group; map 1280 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].data.mux.function = function; map 1286 drivers/pinctrl/nomadik/pinctrl-nomadik.c static int nmk_dt_add_map_configs(struct pinctrl_map **map, map 1301 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_PIN; map 1303 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].data.configs.group_or_pin = group; map 1304 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].data.configs.configs = dup_configs; map 1305 drivers/pinctrl/nomadik/pinctrl-nomadik.c (*map)[*num_maps].data.configs.num_configs = num_configs; map 1429 drivers/pinctrl/nomadik/pinctrl-nomadik.c struct pinctrl_map **map, map 1448 drivers/pinctrl/nomadik/pinctrl-nomadik.c ret = pinctrl_utils_reserve_map(pctldev, map, map 1455 drivers/pinctrl/nomadik/pinctrl-nomadik.c ret = nmk_dt_add_map_mux(map, reserved_maps, num_maps, map 1473 drivers/pinctrl/nomadik/pinctrl-nomadik.c ret = pinctrl_utils_reserve_map(pctldev, map, map 1482 drivers/pinctrl/nomadik/pinctrl-nomadik.c ret = nmk_dt_add_map_configs(map, reserved_maps, map 1496 drivers/pinctrl/nomadik/pinctrl-nomadik.c struct pinctrl_map **map, unsigned *num_maps) map 1503 drivers/pinctrl/nomadik/pinctrl-nomadik.c *map = NULL; map 1507 drivers/pinctrl/nomadik/pinctrl-nomadik.c ret = nmk_pinctrl_dt_subnode_to_map(pctldev, np, map, map 1510 drivers/pinctrl/nomadik/pinctrl-nomadik.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 1587 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c struct pinctrl_map **map, map 1594 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c map, num_maps, map 1599 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c struct pinctrl_map *map, u32 num_maps) map 1601 drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c kfree(map); map 291 drivers/pinctrl/pinconf-generic.c struct device_node *np, struct pinctrl_map **map, map 344 drivers/pinctrl/pinconf-generic.c ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, map 351 drivers/pinctrl/pinconf-generic.c ret = pinctrl_utils_add_map_mux(pctldev, map, map 359 drivers/pinctrl/pinconf-generic.c ret = pinctrl_utils_add_map_configs(pctldev, map, map 375 drivers/pinctrl/pinconf-generic.c struct device_node *np_config, struct pinctrl_map **map, map 383 drivers/pinctrl/pinconf-generic.c *map = NULL; map 386 drivers/pinctrl/pinconf-generic.c ret = pinconf_generic_dt_subnode_to_map(pctldev, np_config, map, map 392 drivers/pinctrl/pinconf-generic.c ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map, map 400 drivers/pinctrl/pinconf-generic.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 406 drivers/pinctrl/pinconf-generic.c struct pinctrl_map *map, map 409 drivers/pinctrl/pinconf-generic.c pinctrl_utils_free_map(pctldev, map, num_maps); map 38 drivers/pinctrl/pinconf.c int pinconf_validate_map(const struct pinctrl_map *map, int i) map 40 drivers/pinctrl/pinconf.c if (!map->data.configs.group_or_pin) { map 42 drivers/pinctrl/pinconf.c map->name, i); map 46 drivers/pinctrl/pinconf.c if (!map->data.configs.num_configs || map 47 drivers/pinctrl/pinconf.c !map->data.configs.configs) { map 49 drivers/pinctrl/pinconf.c map->name, i); map 107 drivers/pinctrl/pinconf.c int pinconf_map_to_setting(const struct pinctrl_map *map, map 116 drivers/pinctrl/pinconf.c map->data.configs.group_or_pin); map 119 drivers/pinctrl/pinconf.c map->data.configs.group_or_pin); map 126 drivers/pinctrl/pinconf.c map->data.configs.group_or_pin); map 129 drivers/pinctrl/pinconf.c map->data.configs.group_or_pin); map 138 drivers/pinctrl/pinconf.c setting->data.configs.num_configs = map->data.configs.num_configs; map 139 drivers/pinctrl/pinconf.c setting->data.configs.configs = map->data.configs.configs; map 236 drivers/pinctrl/pinconf.c void pinconf_show_map(struct seq_file *s, const struct pinctrl_map *map) map 240 drivers/pinctrl/pinconf.c pctldev = get_pinctrl_dev_from_devname(map->ctrl_dev_name); map 242 drivers/pinctrl/pinconf.c switch (map->type) { map 253 drivers/pinctrl/pinconf.c seq_printf(s, "%s\n", map->data.configs.group_or_pin); map 255 drivers/pinctrl/pinconf.c pinconf_show_config(s, pctldev, map->data.configs.configs, map 256 drivers/pinctrl/pinconf.c map->data.configs.num_configs); map 16 drivers/pinctrl/pinconf.h int pinconf_validate_map(const struct pinctrl_map *map, int i); map 17 drivers/pinctrl/pinconf.h int pinconf_map_to_setting(const struct pinctrl_map *map, map 41 drivers/pinctrl/pinconf.h static inline int pinconf_validate_map(const struct pinctrl_map *map, int i) map 46 drivers/pinctrl/pinconf.h static inline int pinconf_map_to_setting(const struct pinctrl_map *map, map 71 drivers/pinctrl/pinconf.h void pinconf_show_map(struct seq_file *s, const struct pinctrl_map *map); map 80 drivers/pinctrl/pinconf.h const struct pinctrl_map *map) map 480 drivers/pinctrl/pinctrl-at91-pio4.c struct pinctrl_map **map, map 517 drivers/pinctrl/pinctrl-at91-pio4.c ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, num_maps, map 534 drivers/pinctrl/pinctrl-at91-pio4.c pinctrl_utils_add_map_mux(pctldev, map, reserved_maps, num_maps, map 538 drivers/pinctrl/pinctrl-at91-pio4.c ret = pinctrl_utils_add_map_configs(pctldev, map, map 554 drivers/pinctrl/pinctrl-at91-pio4.c struct pinctrl_map **map, map 561 drivers/pinctrl/pinctrl-at91-pio4.c *map = NULL; map 570 drivers/pinctrl/pinctrl-at91-pio4.c ret = atmel_pctl_dt_subnode_to_map(pctldev, np_config, map, map 574 drivers/pinctrl/pinctrl-at91-pio4.c ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, map 584 drivers/pinctrl/pinctrl-at91-pio4.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 267 drivers/pinctrl/pinctrl-at91.c struct pinctrl_map **map, unsigned *num_maps) map 293 drivers/pinctrl/pinctrl-at91.c *map = new_map; map 318 drivers/pinctrl/pinctrl-at91.c (*map)->data.mux.function, (*map)->data.mux.group, map_num); map 324 drivers/pinctrl/pinctrl-at91.c struct pinctrl_map *map, unsigned num_maps) map 53 drivers/pinctrl/pinctrl-gemini.c struct regmap *map; map 2214 drivers/pinctrl/pinctrl-gemini.c regmap_read(pmx->map, GLOBAL_MISC_CTRL, &before); map 2215 drivers/pinctrl/pinctrl-gemini.c regmap_update_bits(pmx->map, GLOBAL_MISC_CTRL, map 2218 drivers/pinctrl/pinctrl-gemini.c regmap_read(pmx->map, GLOBAL_MISC_CTRL, &after); map 2390 drivers/pinctrl/pinctrl-gemini.c regmap_read(pmx->map, conf->reg, &val); map 2430 drivers/pinctrl/pinctrl-gemini.c regmap_update_bits(pmx->map, conf->reg, conf->mask, arg); map 2492 drivers/pinctrl/pinctrl-gemini.c regmap_update_bits(pmx->map, GLOBAL_IODRIVE, map 2526 drivers/pinctrl/pinctrl-gemini.c struct regmap *map; map 2545 drivers/pinctrl/pinctrl-gemini.c map = syscon_node_to_regmap(parent->of_node); map 2546 drivers/pinctrl/pinctrl-gemini.c if (IS_ERR(map)) { map 2548 drivers/pinctrl/pinctrl-gemini.c return PTR_ERR(map); map 2550 drivers/pinctrl/pinctrl-gemini.c pmx->map = map; map 2553 drivers/pinctrl/pinctrl-gemini.c ret = regmap_read(map, GLOBAL_WORD_ID, &val); map 2579 drivers/pinctrl/pinctrl-gemini.c ret = regmap_read(map, GLOBAL_MISC_CTRL, &val); map 2593 drivers/pinctrl/pinctrl-gemini.c regmap_read(map, GLOBAL_STATUS, &val); map 79 drivers/pinctrl/pinctrl-ingenic.c struct regmap *map; map 1337 drivers/pinctrl/pinctrl-ingenic.c regmap_read(jzgc->jzpc->map, jzgc->reg_base + reg, &val); map 1350 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzgc->jzpc->map, jzgc->reg_base + reg, BIT(offset)); map 1361 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_BASE + reg, BIT(offset)); map 1366 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzgc->jzpc->map, X1000_GPIO_PZ_GID2LD, map 1609 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzpc->map, offt * 0x100 + map 1618 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzpc->map, X1000_GPIO_PZ_BASE + map 1625 drivers/pinctrl/pinctrl-ingenic.c regmap_write(jzpc->map, X1000_GPIO_PZ_GID2LD, pin / PINS_PER_GPIO_CHIP); map 1635 drivers/pinctrl/pinctrl-ingenic.c regmap_read(jzpc->map, offt * 0x100 + reg, &val); map 2036 drivers/pinctrl/pinctrl-ingenic.c jzpc->map = devm_regmap_init_mmio(dev, base, map 2038 drivers/pinctrl/pinctrl-ingenic.c if (IS_ERR(jzpc->map)) { map 2040 drivers/pinctrl/pinctrl-ingenic.c return PTR_ERR(jzpc->map); map 2124 drivers/pinctrl/pinctrl-ingenic.c dev_set_drvdata(dev, jzpc->map); map 47 drivers/pinctrl/pinctrl-lantiq.c struct pinctrl_map *map, unsigned num_maps) map 52 drivers/pinctrl/pinctrl-lantiq.c if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || map 53 drivers/pinctrl/pinctrl-lantiq.c map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) map 54 drivers/pinctrl/pinctrl-lantiq.c kfree(map[i].data.configs.configs); map 55 drivers/pinctrl/pinctrl-lantiq.c kfree(map); map 67 drivers/pinctrl/pinctrl-lantiq.c struct pinctrl_map **map) map 94 drivers/pinctrl/pinctrl-lantiq.c (*map)->type = PIN_MAP_TYPE_MUX_GROUP; map 95 drivers/pinctrl/pinctrl-lantiq.c (*map)->name = function; map 96 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.mux.group = group; map 97 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.mux.function = function; map 98 drivers/pinctrl/pinctrl-lantiq.c (*map)++; map 116 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.configs = kmemdup(configs, map 119 drivers/pinctrl/pinctrl-lantiq.c (*map)->type = PIN_MAP_TYPE_CONFIGS_PIN; map 120 drivers/pinctrl/pinctrl-lantiq.c (*map)->name = pin; map 121 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.group_or_pin = pin; map 122 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.num_configs = num_configs; map 123 drivers/pinctrl/pinctrl-lantiq.c (*map)++; map 126 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.configs = kmemdup(configs, map 129 drivers/pinctrl/pinctrl-lantiq.c (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP; map 130 drivers/pinctrl/pinctrl-lantiq.c (*map)->name = group; map 131 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.group_or_pin = group; map 132 drivers/pinctrl/pinctrl-lantiq.c (*map)->data.configs.num_configs = num_configs; map 133 drivers/pinctrl/pinctrl-lantiq.c (*map)++; map 149 drivers/pinctrl/pinctrl-lantiq.c struct pinctrl_map **map, map 158 drivers/pinctrl/pinctrl-lantiq.c *map = kzalloc(array3_size(max_maps, sizeof(struct pinctrl_map), 2), map 160 drivers/pinctrl/pinctrl-lantiq.c if (!*map) map 162 drivers/pinctrl/pinctrl-lantiq.c tmp = *map; map 166 drivers/pinctrl/pinctrl-lantiq.c *num_maps = ((int)(tmp - *map)); map 147 drivers/pinctrl/pinctrl-ocelot.c struct regmap *map; map 420 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG_ALT(0, info, pin->pin), map 422 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG_ALT(1, info, pin->pin), map 437 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG(OCELOT_GPIO_OE, info, pin), BIT(p), map 450 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG_ALT(0, info, offset), map 452 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG_ALT(1, info, offset), map 581 drivers/pinctrl/pinctrl-ocelot.c regmap_read(info->map, REG(OCELOT_GPIO_IN, info, offset), &val); map 592 drivers/pinctrl/pinctrl-ocelot.c regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset), map 595 drivers/pinctrl/pinctrl-ocelot.c regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset), map 605 drivers/pinctrl/pinctrl-ocelot.c regmap_read(info->map, REG(OCELOT_GPIO_OE, info, offset), &val); map 623 drivers/pinctrl/pinctrl-ocelot.c regmap_write(info->map, REG(OCELOT_GPIO_OUT_SET, info, offset), map 626 drivers/pinctrl/pinctrl-ocelot.c regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset), map 649 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), map 659 drivers/pinctrl/pinctrl-ocelot.c regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), map 669 drivers/pinctrl/pinctrl-ocelot.c regmap_write_bits(info->map, REG(OCELOT_GPIO_INTR, info, gpio), map 718 drivers/pinctrl/pinctrl-ocelot.c regmap_read(info->map, OCELOT_GPIO_INTR_IDENT + 4 * i, ®); map 803 drivers/pinctrl/pinctrl-ocelot.c info->map = devm_regmap_init_mmio(dev, base, ®map_config); map 804 drivers/pinctrl/pinctrl-ocelot.c if (IS_ERR(info->map)) { map 806 drivers/pinctrl/pinctrl-ocelot.c return PTR_ERR(info->map); map 808 drivers/pinctrl/pinctrl-ocelot.c dev_set_drvdata(dev, info->map); map 488 drivers/pinctrl/pinctrl-rockchip.c struct pinctrl_map **map, unsigned *num_maps) map 514 drivers/pinctrl/pinctrl-rockchip.c *map = new_map; map 539 drivers/pinctrl/pinctrl-rockchip.c (*map)->data.mux.function, (*map)->data.mux.group, map_num); map 545 drivers/pinctrl/pinctrl-rockchip.c struct pinctrl_map *map, unsigned num_maps) map 984 drivers/pinctrl/pinctrl-rza1.c struct pinctrl_map **map, map 1065 drivers/pinctrl/pinctrl-rza1.c *map = kzalloc(sizeof(**map), GFP_KERNEL); map 1066 drivers/pinctrl/pinctrl-rza1.c if (!*map) { map 1071 drivers/pinctrl/pinctrl-rza1.c (*map)->type = PIN_MAP_TYPE_MUX_GROUP; map 1072 drivers/pinctrl/pinctrl-rza1.c (*map)->data.mux.group = np->name; map 1073 drivers/pinctrl/pinctrl-rza1.c (*map)->data.mux.function = np->name; map 1093 drivers/pinctrl/pinctrl-rza1.c struct pinctrl_map *map, unsigned int num_maps) map 1095 drivers/pinctrl/pinctrl-rza1.c kfree(map); map 327 drivers/pinctrl/pinctrl-rza2.c struct pinctrl_map **map, map 383 drivers/pinctrl/pinctrl-rza2.c *map = kzalloc(sizeof(**map), GFP_KERNEL); map 384 drivers/pinctrl/pinctrl-rza2.c if (!*map) { map 389 drivers/pinctrl/pinctrl-rza2.c (*map)->type = PIN_MAP_TYPE_MUX_GROUP; map 390 drivers/pinctrl/pinctrl-rza2.c (*map)->data.mux.group = np->name; map 391 drivers/pinctrl/pinctrl-rza2.c (*map)->data.mux.function = np->name; map 408 drivers/pinctrl/pinctrl-rza2.c struct pinctrl_map *map, unsigned int num_maps) map 410 drivers/pinctrl/pinctrl-rza2.c kfree(map); map 335 drivers/pinctrl/pinctrl-rzn1.c struct pinctrl_map **map, map 368 drivers/pinctrl/pinctrl-rzn1.c ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps, map 374 drivers/pinctrl/pinctrl-rzn1.c ret = pinctrl_utils_add_map_mux(pctldev, map, &reserved_maps, num_maps, map 381 drivers/pinctrl/pinctrl-rzn1.c ret = pinctrl_utils_add_map_configs(pctldev, map, map 400 drivers/pinctrl/pinctrl-rzn1.c struct pinctrl_map **map, map 406 drivers/pinctrl/pinctrl-rzn1.c *map = NULL; map 409 drivers/pinctrl/pinctrl-rzn1.c ret = rzn1_dt_node_to_map_one(pctldev, np, map, num_maps); map 414 drivers/pinctrl/pinctrl-rzn1.c ret = rzn1_dt_node_to_map_one(pctldev, child, map, num_maps); map 290 drivers/pinctrl/pinctrl-single.c struct pinctrl_map *map, unsigned num_maps) map 295 drivers/pinctrl/pinctrl-single.c devm_kfree(pcs->dev, map); map 300 drivers/pinctrl/pinctrl-single.c struct pinctrl_map **map, unsigned *num_maps); map 897 drivers/pinctrl/pinctrl-single.c struct pinctrl_map **map) map 900 drivers/pinctrl/pinctrl-single.c struct pinctrl_map *m = *map; map 982 drivers/pinctrl/pinctrl-single.c struct pinctrl_map **map, map 1053 drivers/pinctrl/pinctrl-single.c (*map)->type = PIN_MAP_TYPE_MUX_GROUP; map 1054 drivers/pinctrl/pinctrl-single.c (*map)->data.mux.group = np->name; map 1055 drivers/pinctrl/pinctrl-single.c (*map)->data.mux.function = np->name; map 1058 drivers/pinctrl/pinctrl-single.c res = pcs_parse_pinconf(pcs, np, function, map); map 1086 drivers/pinctrl/pinctrl-single.c struct pinctrl_map **map, map 1195 drivers/pinctrl/pinctrl-single.c (*map)->type = PIN_MAP_TYPE_MUX_GROUP; map 1196 drivers/pinctrl/pinctrl-single.c (*map)->data.mux.group = np->name; map 1197 drivers/pinctrl/pinctrl-single.c (*map)->data.mux.function = np->name; map 1232 drivers/pinctrl/pinctrl-single.c struct pinctrl_map **map, unsigned *num_maps) map 1241 drivers/pinctrl/pinctrl-single.c *map = devm_kcalloc(pcs->dev, 2, sizeof(**map), GFP_KERNEL); map 1242 drivers/pinctrl/pinctrl-single.c if (!*map) map 1254 drivers/pinctrl/pinctrl-single.c ret = pcs_parse_bits_in_pinctrl_entry(pcs, np_config, map, map 1262 drivers/pinctrl/pinctrl-single.c ret = pcs_parse_one_pinctrl_entry(pcs, np_config, map, map 1276 drivers/pinctrl/pinctrl-single.c devm_kfree(pcs->dev, *map); map 1537 drivers/pinctrl/pinctrl-single.c .map = pcs_irqdomain_map, map 808 drivers/pinctrl/pinctrl-st.c struct device_node *np, struct pinctrl_map **map, unsigned *num_maps) map 835 drivers/pinctrl/pinctrl-st.c *map = new_map; map 852 drivers/pinctrl/pinctrl-st.c (*map)->data.mux.function, grp->name, map_num); map 858 drivers/pinctrl/pinctrl-st.c struct pinctrl_map *map, unsigned num_maps) map 111 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_read(pctl->stmfx->map, reg, &value); map 122 drivers/pinctrl/pinctrl-stmfx.c regmap_write_bits(pctl->stmfx->map, reg + get_reg(offset), map 134 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_read(pctl->stmfx->map, reg, &val); map 149 drivers/pinctrl/pinctrl-stmfx.c return regmap_write_bits(pctl->stmfx->map, reg, mask, 0); map 161 drivers/pinctrl/pinctrl-stmfx.c return regmap_write_bits(pctl->stmfx->map, reg, mask, mask); map 171 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_read(pctl->stmfx->map, reg, &pupd); map 184 drivers/pinctrl/pinctrl-stmfx.c return regmap_write_bits(pctl->stmfx->map, reg, mask, pupd ? mask : 0); map 194 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_read(pctl->stmfx->map, reg, &type); map 207 drivers/pinctrl/pinctrl-stmfx.c return regmap_write_bits(pctl->stmfx->map, reg, mask, type ? mask : 0); map 498 drivers/pinctrl/pinctrl-stmfx.c regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT, map 500 drivers/pinctrl/pinctrl-stmfx.c regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE, map 502 drivers/pinctrl/pinctrl-stmfx.c regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC, map 524 drivers/pinctrl/pinctrl-stmfx.c regmap_write_bits(pctl->stmfx->map, map 530 drivers/pinctrl/pinctrl-stmfx.c regmap_write_bits(pctl->stmfx->map, map 545 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_IRQ_GPI_PENDING, map 550 drivers/pinctrl/pinctrl-stmfx.c regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC, map 559 drivers/pinctrl/pinctrl-stmfx.c regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC, map 707 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_STATE, map 711 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_DIR, map 715 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_TYPE, map 719 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_read(pctl->stmfx->map, STMFX_REG_GPIO_PUPD, map 731 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_DIR, map 735 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_TYPE, map 739 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPIO_PUPD, map 743 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_GPO_SET, map 747 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_EVT, map 751 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_TYPE, map 755 drivers/pinctrl/pinctrl-stmfx.c ret = regmap_bulk_write(pctl->stmfx->map, STMFX_REG_IRQ_GPI_SRC, map 541 drivers/pinctrl/pinctrl-tb10x.c struct pinctrl_map **map, unsigned *num_maps) map 553 drivers/pinctrl/pinctrl-tb10x.c *map = NULL; map 556 drivers/pinctrl/pinctrl-tb10x.c ret = pinctrl_utils_reserve_map(pctl, map, &reserved_maps, map 561 drivers/pinctrl/pinctrl-tb10x.c ret = pinctrl_utils_add_map_mux(pctl, map, &reserved_maps, map 32 drivers/pinctrl/pinctrl-utils.c struct pinctrl_map **map, unsigned *reserved_maps, map 42 drivers/pinctrl/pinctrl-utils.c new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); map 50 drivers/pinctrl/pinctrl-utils.c *map = new_map; map 57 drivers/pinctrl/pinctrl-utils.c struct pinctrl_map **map, unsigned *reserved_maps, map 64 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 65 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].data.mux.group = group; map 66 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].data.mux.function = function; map 74 drivers/pinctrl/pinctrl-utils.c struct pinctrl_map **map, unsigned *reserved_maps, map 89 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].type = type; map 90 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].data.configs.group_or_pin = group; map 91 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].data.configs.configs = dup_configs; map 92 drivers/pinctrl/pinctrl-utils.c (*map)[*num_maps].data.configs.num_configs = num_configs; map 124 drivers/pinctrl/pinctrl-utils.c struct pinctrl_map *map, unsigned num_maps) map 129 drivers/pinctrl/pinctrl-utils.c switch (map[i].type) { map 132 drivers/pinctrl/pinctrl-utils.c kfree(map[i].data.configs.configs); map 138 drivers/pinctrl/pinctrl-utils.c kfree(map); map 26 drivers/pinctrl/pinctrl-utils.h struct pinctrl_map **map, unsigned *reserved_maps, map 29 drivers/pinctrl/pinctrl-utils.h struct pinctrl_map **map, unsigned *reserved_maps, map 33 drivers/pinctrl/pinctrl-utils.h struct pinctrl_map **map, unsigned *reserved_maps, map 41 drivers/pinctrl/pinctrl-utils.h struct pinctrl_map *map, unsigned num_maps); map 62 drivers/pinctrl/pinmux.c int pinmux_validate_map(const struct pinctrl_map *map, int i) map 64 drivers/pinctrl/pinmux.c if (!map->data.mux.function) { map 66 drivers/pinctrl/pinmux.c map->name, i); map 336 drivers/pinctrl/pinmux.c int pinmux_map_to_setting(const struct pinctrl_map *map, map 351 drivers/pinctrl/pinmux.c ret = pinmux_func_name_to_selector(pctldev, map->data.mux.function); map 354 drivers/pinctrl/pinmux.c map->data.mux.function); map 363 drivers/pinctrl/pinmux.c map->data.mux.function); map 369 drivers/pinctrl/pinmux.c map->data.mux.function); map 372 drivers/pinctrl/pinmux.c if (map->data.mux.group) { map 373 drivers/pinctrl/pinmux.c group = map->data.mux.group; map 378 drivers/pinctrl/pinmux.c group, map->data.mux.function); map 388 drivers/pinctrl/pinmux.c map->data.mux.group); map 649 drivers/pinctrl/pinmux.c void pinmux_show_map(struct seq_file *s, const struct pinctrl_map *map) map 652 drivers/pinctrl/pinmux.c map->data.mux.group ? map->data.mux.group : "(default)", map 653 drivers/pinctrl/pinmux.c map->data.mux.function); map 16 drivers/pinctrl/pinmux.h int pinmux_validate_map(const struct pinctrl_map *map, int i); map 29 drivers/pinctrl/pinmux.h int pinmux_map_to_setting(const struct pinctrl_map *map, map 42 drivers/pinctrl/pinmux.h static inline int pinmux_validate_map(const struct pinctrl_map *map, int i) map 73 drivers/pinctrl/pinmux.h static inline int pinmux_map_to_setting(const struct pinctrl_map *map, map 96 drivers/pinctrl/pinmux.h void pinmux_show_map(struct seq_file *s, const struct pinctrl_map *map); map 105 drivers/pinctrl/pinmux.h const struct pinctrl_map *map) map 170 drivers/pinctrl/qcom/pinctrl-spmi-gpio.c struct regmap *map; map 220 drivers/pinctrl/qcom/pinctrl-spmi-gpio.c ret = regmap_read(state->map, pad->base + addr, &val); map 235 drivers/pinctrl/qcom/pinctrl-spmi-gpio.c ret = regmap_write(state->map, pad->base + addr, val); map 995 drivers/pinctrl/qcom/pinctrl-spmi-gpio.c state->map = dev_get_regmap(dev->parent, NULL); map 143 drivers/pinctrl/qcom/pinctrl-spmi-mpp.c struct regmap *map; map 182 drivers/pinctrl/qcom/pinctrl-spmi-mpp.c ret = regmap_read(state->map, pad->base + addr, &val); map 197 drivers/pinctrl/qcom/pinctrl-spmi-mpp.c ret = regmap_write(state->map, pad->base + addr, val); map 830 drivers/pinctrl/qcom/pinctrl-spmi-mpp.c state->map = dev_get_regmap(dev->parent, NULL); map 239 drivers/pinctrl/samsung/pinctrl-exynos.c .map = exynos_eint_irq_map, map 439 drivers/pinctrl/samsung/pinctrl-s3c24xx.c .map = s3c24xx_gpf_irq_map, map 458 drivers/pinctrl/samsung/pinctrl-s3c24xx.c .map = s3c24xx_gpg_irq_map, map 401 drivers/pinctrl/samsung/pinctrl-s3c64xx.c .map = s3c64xx_gpio_irq_map, map 674 drivers/pinctrl/samsung/pinctrl-s3c64xx.c .map = s3c64xx_eint0_irq_map, map 78 drivers/pinctrl/samsung/pinctrl-samsung.c static int reserve_map(struct device *dev, struct pinctrl_map **map, map 89 drivers/pinctrl/samsung/pinctrl-samsung.c new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL); map 95 drivers/pinctrl/samsung/pinctrl-samsung.c *map = new_map; map 101 drivers/pinctrl/samsung/pinctrl-samsung.c static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps, map 108 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 109 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].data.mux.group = group; map 110 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].data.mux.function = function; map 116 drivers/pinctrl/samsung/pinctrl-samsung.c static int add_map_configs(struct device *dev, struct pinctrl_map **map, map 131 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP; map 132 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].data.configs.group_or_pin = group; map 133 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].data.configs.configs = dup_configs; map 134 drivers/pinctrl/samsung/pinctrl-samsung.c (*map)[*num_maps].data.configs.num_configs = num_configs; map 161 drivers/pinctrl/samsung/pinctrl-samsung.c struct pinctrl_map *map, map 167 drivers/pinctrl/samsung/pinctrl-samsung.c if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) map 168 drivers/pinctrl/samsung/pinctrl-samsung.c kfree(map[i].data.configs.configs); map 170 drivers/pinctrl/samsung/pinctrl-samsung.c kfree(map); map 176 drivers/pinctrl/samsung/pinctrl-samsung.c struct pinctrl_map **map, map 220 drivers/pinctrl/samsung/pinctrl-samsung.c ret = reserve_map(dev, map, reserved_maps, num_maps, reserve); map 226 drivers/pinctrl/samsung/pinctrl-samsung.c ret = add_map_mux(map, reserved_maps, map 233 drivers/pinctrl/samsung/pinctrl-samsung.c ret = add_map_configs(dev, map, reserved_maps, map 250 drivers/pinctrl/samsung/pinctrl-samsung.c struct pinctrl_map **map, map 261 drivers/pinctrl/samsung/pinctrl-samsung.c *map = NULL; map 266 drivers/pinctrl/samsung/pinctrl-samsung.c np_config, map, map 271 drivers/pinctrl/samsung/pinctrl-samsung.c ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map, map 274 drivers/pinctrl/samsung/pinctrl-samsung.c samsung_dt_free_map(pctldev, *map, *num_maps); map 81 drivers/pinctrl/sh-pfc/pinctrl.c static int sh_pfc_map_add_config(struct pinctrl_map *map, map 94 drivers/pinctrl/sh-pfc/pinctrl.c map->type = type; map 95 drivers/pinctrl/sh-pfc/pinctrl.c map->data.configs.group_or_pin = group_or_pin; map 96 drivers/pinctrl/sh-pfc/pinctrl.c map->data.configs.configs = cfgs; map 97 drivers/pinctrl/sh-pfc/pinctrl.c map->data.configs.num_configs = num_configs; map 104 drivers/pinctrl/sh-pfc/pinctrl.c struct pinctrl_map **map, map 109 drivers/pinctrl/sh-pfc/pinctrl.c struct pinctrl_map *maps = *map; map 197 drivers/pinctrl/sh-pfc/pinctrl.c *map = maps; map 242 drivers/pinctrl/sh-pfc/pinctrl.c struct pinctrl_map *map, unsigned num_maps) map 246 drivers/pinctrl/sh-pfc/pinctrl.c if (map == NULL) map 250 drivers/pinctrl/sh-pfc/pinctrl.c if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP || map 251 drivers/pinctrl/sh-pfc/pinctrl.c map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) map 252 drivers/pinctrl/sh-pfc/pinctrl.c kfree(map[i].data.configs.configs); map 255 drivers/pinctrl/sh-pfc/pinctrl.c kfree(map); map 260 drivers/pinctrl/sh-pfc/pinctrl.c struct pinctrl_map **map, unsigned *num_maps) map 268 drivers/pinctrl/sh-pfc/pinctrl.c *map = NULL; map 273 drivers/pinctrl/sh-pfc/pinctrl.c ret = sh_pfc_dt_subnode_to_map(pctldev, child, map, num_maps, map 283 drivers/pinctrl/sh-pfc/pinctrl.c ret = sh_pfc_dt_subnode_to_map(pctldev, np, map, num_maps, map 297 drivers/pinctrl/sh-pfc/pinctrl.c sh_pfc_dt_free_map(pctldev, *map, *num_maps); map 5298 drivers/pinctrl/sirf/pinctrl-atlas7.c struct pinctrl_map **map, map 5301 drivers/pinctrl/sirf/pinctrl-atlas7.c return pinconf_generic_dt_node_to_map(pctldev, np_config, map, map 5306 drivers/pinctrl/sirf/pinctrl-atlas7.c struct pinctrl_map *map, u32 num_maps) map 5308 drivers/pinctrl/sirf/pinctrl-atlas7.c kfree(map); map 80 drivers/pinctrl/sirf/pinctrl-sirf.c struct pinctrl_map **map, unsigned *num_maps) map 110 drivers/pinctrl/sirf/pinctrl-sirf.c *map = kcalloc(count, sizeof(**map), GFP_KERNEL); map 111 drivers/pinctrl/sirf/pinctrl-sirf.c if (!*map) map 117 drivers/pinctrl/sirf/pinctrl-sirf.c (*map)[index].type = PIN_MAP_TYPE_MUX_GROUP; map 118 drivers/pinctrl/sirf/pinctrl-sirf.c (*map)[index].data.mux.group = group; map 119 drivers/pinctrl/sirf/pinctrl-sirf.c (*map)[index].data.mux.function = function; map 130 drivers/pinctrl/sirf/pinctrl-sirf.c struct pinctrl_map *map, unsigned num_maps) map 132 drivers/pinctrl/sirf/pinctrl-sirf.c kfree(map); map 148 drivers/pinctrl/spear/pinctrl-spear.c struct pinctrl_map **map, map 179 drivers/pinctrl/spear/pinctrl-spear.c *map = kcalloc(count, sizeof(**map), GFP_KERNEL); map 180 drivers/pinctrl/spear/pinctrl-spear.c if (!*map) map 186 drivers/pinctrl/spear/pinctrl-spear.c (*map)[index].type = PIN_MAP_TYPE_MUX_GROUP; map 187 drivers/pinctrl/spear/pinctrl-spear.c (*map)[index].data.mux.group = group; map 188 drivers/pinctrl/spear/pinctrl-spear.c (*map)[index].data.mux.function = function; map 199 drivers/pinctrl/spear/pinctrl-spear.c struct pinctrl_map *map, map 202 drivers/pinctrl/spear/pinctrl-spear.c kfree(map); map 242 drivers/pinctrl/sprd/pinctrl-sprd.c struct pinctrl_map **map, map 288 drivers/pinctrl/sprd/pinctrl-sprd.c *map = NULL; map 296 drivers/pinctrl/sprd/pinctrl-sprd.c ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, map 302 drivers/pinctrl/sprd/pinctrl-sprd.c ret = pinctrl_utils_add_map_mux(pctldev, map, map 320 drivers/pinctrl/sprd/pinctrl-sprd.c ret = pinctrl_utils_add_map_configs(pctldev, map, map 484 drivers/pinctrl/stm32/pinctrl-stm32.c struct pinctrl_map **map, unsigned *reserved_maps, map 490 drivers/pinctrl/stm32/pinctrl-stm32.c (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP; map 491 drivers/pinctrl/stm32/pinctrl-stm32.c (*map)[*num_maps].data.mux.group = grp->name; map 499 drivers/pinctrl/stm32/pinctrl-stm32.c (*map)[*num_maps].data.mux.function = stm32_gpio_functions[fnum]; map 507 drivers/pinctrl/stm32/pinctrl-stm32.c struct pinctrl_map **map, map 553 drivers/pinctrl/stm32/pinctrl-stm32.c err = pinctrl_utils_reserve_map(pctldev, map, map 581 drivers/pinctrl/stm32/pinctrl-stm32.c err = stm32_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map, map 587 drivers/pinctrl/stm32/pinctrl-stm32.c err = pinctrl_utils_add_map_configs(pctldev, map, map 603 drivers/pinctrl/stm32/pinctrl-stm32.c struct pinctrl_map **map, unsigned *num_maps) map 609 drivers/pinctrl/stm32/pinctrl-stm32.c *map = NULL; map 614 drivers/pinctrl/stm32/pinctrl-stm32.c ret = stm32_pctrl_dt_subnode_to_map(pctldev, np, map, map 617 drivers/pinctrl/stm32/pinctrl-stm32.c pinctrl_utils_free_map(pctldev, *map, *num_maps); map 321 drivers/pinctrl/sunxi/pinctrl-sunxi.c struct pinctrl_map **map, map 331 drivers/pinctrl/sunxi/pinctrl-sunxi.c *map = NULL; map 356 drivers/pinctrl/sunxi/pinctrl-sunxi.c *map = kmalloc_array(nmaps, sizeof(struct pinctrl_map), GFP_KERNEL); map 357 drivers/pinctrl/sunxi/pinctrl-sunxi.c if (!*map) map 383 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].type = PIN_MAP_TYPE_MUX_GROUP; map 384 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].data.mux.group = group; map 385 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].data.mux.function = function; map 390 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; map 391 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].data.configs.group_or_pin = group; map 392 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].data.configs.configs = pinconfig; map 393 drivers/pinctrl/sunxi/pinctrl-sunxi.c (*map)[i].data.configs.num_configs = configlen; map 404 drivers/pinctrl/sunxi/pinctrl-sunxi.c *map = krealloc(*map, i * sizeof(struct pinctrl_map), GFP_KERNEL); map 405 drivers/pinctrl/sunxi/pinctrl-sunxi.c if (!*map) map 411 drivers/pinctrl/sunxi/pinctrl-sunxi.c kfree(*map); map 412 drivers/pinctrl/sunxi/pinctrl-sunxi.c *map = NULL; map 417 drivers/pinctrl/sunxi/pinctrl-sunxi.c struct pinctrl_map *map, map 424 drivers/pinctrl/sunxi/pinctrl-sunxi.c if (map[i].type != PIN_MAP_TYPE_CONFIGS_GROUP) map 431 drivers/pinctrl/sunxi/pinctrl-sunxi.c kfree(map[i].data.configs.configs); map 435 drivers/pinctrl/sunxi/pinctrl-sunxi.c kfree(map); map 101 drivers/pinctrl/tegra/pinctrl-tegra.c struct pinctrl_map **map, map 152 drivers/pinctrl/tegra/pinctrl-tegra.c ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps, map 159 drivers/pinctrl/tegra/pinctrl-tegra.c ret = pinctrl_utils_add_map_mux(pctldev, map, map 167 drivers/pinctrl/tegra/pinctrl-tegra.c ret = pinctrl_utils_add_map_configs(pctldev, map, map 185 drivers/pinctrl/tegra/pinctrl-tegra.c struct pinctrl_map **map, map 193 drivers/pinctrl/tegra/pinctrl-tegra.c *map = NULL; map 197 drivers/pinctrl/tegra/pinctrl-tegra.c ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map, map 200 drivers/pinctrl/tegra/pinctrl-tegra.c pinctrl_utils_free_map(pctldev, *map, map 485 drivers/pinctrl/ti/pinctrl-ti-iodelay.c struct pinctrl_map **map, map 502 drivers/pinctrl/ti/pinctrl-ti-iodelay.c *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL); map 503 drivers/pinctrl/ti/pinctrl-ti-iodelay.c if (!*map) map 545 drivers/pinctrl/ti/pinctrl-ti-iodelay.c (*map)->type = PIN_MAP_TYPE_CONFIGS_GROUP; map 546 drivers/pinctrl/ti/pinctrl-ti-iodelay.c (*map)->data.configs.group_or_pin = np->name; map 547 drivers/pinctrl/ti/pinctrl-ti-iodelay.c (*map)->data.configs.configs = &g->config; map 548 drivers/pinctrl/ti/pinctrl-ti-iodelay.c (*map)->data.configs.num_configs = 1; map 560 drivers/pinctrl/ti/pinctrl-ti-iodelay.c devm_kfree(iod->dev, *map); map 213 drivers/pinctrl/vt8500/pinctrl-wmt.c struct pinctrl_map *map = *maps; map 226 drivers/pinctrl/vt8500/pinctrl-wmt.c map->type = PIN_MAP_TYPE_MUX_GROUP; map 227 drivers/pinctrl/vt8500/pinctrl-wmt.c map->data.mux.group = data->groups[group]; map 228 drivers/pinctrl/vt8500/pinctrl-wmt.c map->data.mux.function = wmt_functions[fnum]; map 241 drivers/pinctrl/vt8500/pinctrl-wmt.c struct pinctrl_map *map = *maps; map 273 drivers/pinctrl/vt8500/pinctrl-wmt.c map->type = PIN_MAP_TYPE_CONFIGS_PIN; map 274 drivers/pinctrl/vt8500/pinctrl-wmt.c map->data.configs.group_or_pin = data->groups[group]; map 275 drivers/pinctrl/vt8500/pinctrl-wmt.c map->data.configs.configs = configs; map 276 drivers/pinctrl/vt8500/pinctrl-wmt.c map->data.configs.num_configs = 1; map 297 drivers/pinctrl/vt8500/pinctrl-wmt.c struct pinctrl_map **map, map 387 drivers/pinctrl/vt8500/pinctrl-wmt.c *map = maps; map 41 drivers/pinctrl/zte/pinctrl-zx.c struct pinctrl_map **map, u32 *num_maps) map 43 drivers/pinctrl/zte/pinctrl-zx.c return pinconf_generic_dt_node_to_map(pctldev, np_config, map, map 392 drivers/platform/x86/intel_pmc_core.c const struct pmc_reg_map *map = pmcdev->map; map 395 drivers/platform/x86/intel_pmc_core.c value = pmc_core_reg_read(pmcdev, map->slp_s0_offset); map 408 drivers/platform/x86/intel_pmc_core.c value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset); map 409 drivers/platform/x86/intel_pmc_core.c return value & BIT(pmcdev->map->pm_read_disable_bit); map 426 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map = pmcdev->map->pfear_sts; map 430 drivers/platform/x86/intel_pmc_core.c iter = pmcdev->map->ppfear0_offset; map 432 drivers/platform/x86/intel_pmc_core.c for (index = 0; index < pmcdev->map->ppfear_buckets && map 436 drivers/platform/x86/intel_pmc_core.c for (index = 0; map[index].name && map 437 drivers/platform/x86/intel_pmc_core.c index < pmcdev->map->ppfear_buckets * 8; index++) map 438 drivers/platform/x86/intel_pmc_core.c pmc_core_display_map(s, index, pf_regs[index / 8], map); map 477 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map = pmcdev->map->mphy_sts; map 508 drivers/platform/x86/intel_pmc_core.c for (index = 0; map[index].name && index < 8; index++) { map 510 drivers/platform/x86/intel_pmc_core.c map[index].name, map 511 drivers/platform/x86/intel_pmc_core.c map[index].bit_mask & val_low ? "Not power gated" : map 515 drivers/platform/x86/intel_pmc_core.c for (index = 8; map[index].name; index++) { map 517 drivers/platform/x86/intel_pmc_core.c map[index].name, map 518 drivers/platform/x86/intel_pmc_core.c map[index].bit_mask & val_high ? "Not power gated" : map 531 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map = pmcdev->map->pll_sts; map 552 drivers/platform/x86/intel_pmc_core.c for (index = 0; map[index].name ; index++) { map 554 drivers/platform/x86/intel_pmc_core.c map[index].name, map 555 drivers/platform/x86/intel_pmc_core.c map[index].bit_mask & val ? "Active" : "Idle"); map 568 drivers/platform/x86/intel_pmc_core.c const struct pmc_reg_map *map = pmcdev->map; map 580 drivers/platform/x86/intel_pmc_core.c if (val > map->ltr_ignore_max) { map 585 drivers/platform/x86/intel_pmc_core.c fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset); map 587 drivers/platform/x86/intel_pmc_core.c pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd); map 614 drivers/platform/x86/intel_pmc_core.c const struct pmc_reg_map *map = pmcdev->map; map 622 drivers/platform/x86/intel_pmc_core.c fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset); map 627 drivers/platform/x86/intel_pmc_core.c pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd); map 638 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps; map 639 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map; map 644 drivers/platform/x86/intel_pmc_core.c offset = pmcdev->map->slps0_dbg_offset; map 646 drivers/platform/x86/intel_pmc_core.c map = *maps; map 649 drivers/platform/x86/intel_pmc_core.c while (map->name) { map 651 drivers/platform/x86/intel_pmc_core.c map->name, map 652 drivers/platform/x86/intel_pmc_core.c data & map->bit_mask ? map 654 drivers/platform/x86/intel_pmc_core.c ++map; map 700 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts; map 706 drivers/platform/x86/intel_pmc_core.c for (index = 0; map[index].name ; index++) { map 709 drivers/platform/x86/intel_pmc_core.c map[index].bit_mask); map 726 drivers/platform/x86/intel_pmc_core.c map[index].name, ltr_raw_data, map 737 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map = pmcdev->map->msr_sts; map 741 drivers/platform/x86/intel_pmc_core.c for (index = 0; map[index].name ; index++) { map 742 drivers/platform/x86/intel_pmc_core.c if (rdmsrl_safe(map[index].bit_mask, &pcstate_count)) map 747 drivers/platform/x86/intel_pmc_core.c seq_printf(s, "%-8s : %llu\n", map[index].name, map 781 drivers/platform/x86/intel_pmc_core.c if (pmcdev->map->pll_sts) map 785 drivers/platform/x86/intel_pmc_core.c if (pmcdev->map->mphy_sts) map 790 drivers/platform/x86/intel_pmc_core.c if (pmcdev->map->slps0_dbg_maps) { map 839 drivers/platform/x86/intel_pmc_core.c value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset); map 844 drivers/platform/x86/intel_pmc_core.c pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value); map 874 drivers/platform/x86/intel_pmc_core.c pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data; map 881 drivers/platform/x86/intel_pmc_core.c if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids)) map 882 drivers/platform/x86/intel_pmc_core.c pmcdev->map = &cnp_reg_map; map 890 drivers/platform/x86/intel_pmc_core.c pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset; map 894 drivers/platform/x86/intel_pmc_core.c pmcdev->map->regmap_length); map 983 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps; map 984 drivers/platform/x86/intel_pmc_core.c int offset = pmcdev->map->slps0_dbg_offset; map 985 drivers/platform/x86/intel_pmc_core.c const struct pmc_bit_map *map; map 1005 drivers/platform/x86/intel_pmc_core.c map = *maps; map 1008 drivers/platform/x86/intel_pmc_core.c while (map->name) { map 1010 drivers/platform/x86/intel_pmc_core.c map->name, map 1011 drivers/platform/x86/intel_pmc_core.c data & map->bit_mask ? "Yes" : "No"); map 1012 drivers/platform/x86/intel_pmc_core.c map++; map 253 drivers/platform/x86/intel_pmc_core.h const struct pmc_reg_map *map; map 34 drivers/platform/x86/pmc_atom.c const struct pmc_reg_map *map; map 41 drivers/platform/x86/pmc_atom.c const struct pmc_reg_map *map; map 194 drivers/platform/x86/pmc_atom.c .map = &byt_reg_map, map 199 drivers/platform/x86/pmc_atom.c .map = &cht_reg_map, map 286 drivers/platform/x86/pmc_atom.c const struct pmc_reg_map *m = pmc->map; map 309 drivers/platform/x86/pmc_atom.c const struct pmc_bit_map *map = pmc->map->pss; map 313 drivers/platform/x86/pmc_atom.c for (index = 0; map[index].name; index++) { map 315 drivers/platform/x86/pmc_atom.c index, map[index].name, map 316 drivers/platform/x86/pmc_atom.c map[index].bit_mask & pss ? "Off" : "On"); map 486 drivers/platform/x86/pmc_atom.c const struct pmc_reg_map *map = data->map; map 506 drivers/platform/x86/pmc_atom.c pmc->map = map; map 40 drivers/pnp/base.h pnp_irq_mask_t map; /* bitmap for IRQ lines */ map 45 drivers/pnp/base.h unsigned char map; /* bitmask for DMA channels */ map 82 drivers/pnp/base.h pnp_irq_mask_t *map, unsigned char flags); map 84 drivers/pnp/base.h unsigned char map, unsigned char flags); map 74 drivers/pnp/interface.c if (test_bit(i, irq->map.bits)) { map 85 drivers/pnp/interface.c if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) map 108 drivers/pnp/interface.c if (dma->map & (1 << i)) { map 116 drivers/pnp/interface.c if (!dma->map) map 415 drivers/pnp/isapnp/core.c pnp_irq_mask_t map; map 421 drivers/pnp/isapnp/core.c bitmap_zero(map.bits, PNP_IRQ_NR); map 422 drivers/pnp/isapnp/core.c bitmap_copy(map.bits, &bits, 16); map 427 drivers/pnp/isapnp/core.c pnp_register_irq_resource(dev, option_flags, &map, flags); map 154 drivers/pnp/manager.c if (bitmap_empty(rule->map.bits, PNP_IRQ_NR)) { map 161 drivers/pnp/manager.c res->start = find_next_bit(rule->map.bits, PNP_IRQ_NR, 16); map 167 drivers/pnp/manager.c if (test_bit(xtab[i], rule->map.bits)) { map 213 drivers/pnp/manager.c if (!rule->map) { map 220 drivers/pnp/manager.c if (rule->map & (1 << xtab[i])) { map 296 drivers/pnp/pnpacpi/rsparser.c unsigned char map = 0, flags; map 299 drivers/pnp/pnpacpi/rsparser.c map |= 1 << p->channels[i]; map 302 drivers/pnp/pnpacpi/rsparser.c pnp_register_dma_resource(dev, option_flags, map, flags); map 310 drivers/pnp/pnpacpi/rsparser.c pnp_irq_mask_t map; map 313 drivers/pnp/pnpacpi/rsparser.c bitmap_zero(map.bits, PNP_IRQ_NR); map 316 drivers/pnp/pnpacpi/rsparser.c __set_bit(p->interrupts[i], map.bits); map 319 drivers/pnp/pnpacpi/rsparser.c pnp_register_irq_resource(dev, option_flags, &map, flags); map 327 drivers/pnp/pnpacpi/rsparser.c pnp_irq_mask_t map; map 330 drivers/pnp/pnpacpi/rsparser.c bitmap_zero(map.bits, PNP_IRQ_NR); map 334 drivers/pnp/pnpacpi/rsparser.c __set_bit(p->interrupts[i], map.bits); map 343 drivers/pnp/pnpacpi/rsparser.c pnp_register_irq_resource(dev, option_flags, &map, flags); map 267 drivers/pnp/pnpbios/rsparser.c pnp_irq_mask_t map; map 272 drivers/pnp/pnpbios/rsparser.c bitmap_zero(map.bits, PNP_IRQ_NR); map 273 drivers/pnp/pnpbios/rsparser.c bitmap_copy(map.bits, &bits, 16); map 278 drivers/pnp/pnpbios/rsparser.c pnp_register_irq_resource(dev, option_flags, &map, flags); map 585 drivers/pnp/pnpbios/rsparser.c unsigned long map; map 588 drivers/pnp/pnpbios/rsparser.c map = 1 << res->start; map 590 drivers/pnp/pnpbios/rsparser.c map = 0; map 592 drivers/pnp/pnpbios/rsparser.c p[1] = map & 0xff; map 593 drivers/pnp/pnpbios/rsparser.c p[2] = (map >> 8) & 0xff; map 595 drivers/pnp/pnpbios/rsparser.c pnp_dbg(&dev->dev, " encode irq mask %#lx\n", map); map 601 drivers/pnp/pnpbios/rsparser.c unsigned long map; map 604 drivers/pnp/pnpbios/rsparser.c map = 1 << res->start; map 606 drivers/pnp/pnpbios/rsparser.c map = 0; map 608 drivers/pnp/pnpbios/rsparser.c p[1] = map & 0xff; map 610 drivers/pnp/pnpbios/rsparser.c pnp_dbg(&dev->dev, " encode dma mask %#lx\n", map); map 81 drivers/pnp/quirks.c bitmap_zero(irq->map.bits, PNP_IRQ_NR); map 82 drivers/pnp/quirks.c __set_bit(5, irq->map.bits); map 83 drivers/pnp/quirks.c __set_bit(7, irq->map.bits); map 84 drivers/pnp/quirks.c __set_bit(10, irq->map.bits); map 92 drivers/pnp/quirks.c dma->map != 0x0A) { map 96 drivers/pnp/quirks.c pnp_option_set(option), dma->map); map 97 drivers/pnp/quirks.c dma->map = 0x0A; map 52 drivers/pnp/resource.c pnp_irq_mask_t *map, unsigned char flags) map 62 drivers/pnp/resource.c irq->map = *map; map 70 drivers/pnp/resource.c if (test_bit(i, irq->map.bits)) map 80 drivers/pnp/resource.c unsigned char map, unsigned char flags) map 90 drivers/pnp/resource.c dma->map = map; map 671 drivers/pnp/resource.c test_bit(start, irq->map.bits)) map 676 drivers/pnp/resource.c if (dma->map & (1 << start)) map 145 drivers/pnp/support.c if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) map 150 drivers/pnp/support.c if (test_bit(i, irq->map.bits)) map 164 drivers/pnp/support.c if (!dma->map) map 169 drivers/pnp/support.c if (dma->map & (1 << i)) map 175 drivers/pnp/support.c "flags %#x", dma->map, dma->flags); map 20 drivers/power/reset/syscon-poweroff.c static struct regmap *map; map 28 drivers/power/reset/syscon-poweroff.c regmap_update_bits(map, offset, mask, value); map 40 drivers/power/reset/syscon-poweroff.c map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap"); map 41 drivers/power/reset/syscon-poweroff.c if (IS_ERR(map)) { map 43 drivers/power/reset/syscon-poweroff.c return PTR_ERR(map); map 17 drivers/power/reset/syscon-reboot-mode.c struct regmap *map; map 31 drivers/power/reset/syscon-reboot-mode.c ret = regmap_update_bits(syscon_rbm->map, syscon_rbm->offset, map 52 drivers/power/reset/syscon-reboot-mode.c syscon_rbm->map = syscon_node_to_regmap(pdev->dev.parent->of_node); map 53 drivers/power/reset/syscon-reboot-mode.c if (IS_ERR(syscon_rbm->map)) map 54 drivers/power/reset/syscon-reboot-mode.c return PTR_ERR(syscon_rbm->map); map 19 drivers/power/reset/syscon-reboot.c struct regmap *map; map 34 drivers/power/reset/syscon-reboot.c regmap_update_bits(ctx->map, ctx->offset, ctx->mask, ctx->value); map 53 drivers/power/reset/syscon-reboot.c ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap"); map 54 drivers/power/reset/syscon-reboot.c if (IS_ERR(ctx->map)) map 55 drivers/power/reset/syscon-reboot.c return PTR_ERR(ctx->map); map 229 drivers/power/supply/bq24257_charger.c static u8 bq24257_find_idx(u32 value, const u32 *map, u8 map_size) map 234 drivers/power/supply/bq24257_charger.c if (value < map[idx]) map 96 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 98 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_TEMP, &data); map 233 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 248 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_STATUS, &data); map 261 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_Cycles, &data); map 268 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_MinMaxVolt, &data); map 276 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_MinMaxVolt, &data); map 284 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_V_empty, &data); map 286 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17047_V_empty, &data); map 294 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_VCELL, &data); map 301 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_AvgVCELL, &data); map 308 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_OCVInternal, &data); map 315 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_RepSOC, &data); map 322 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_DesignCap, &data); map 331 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_FullCAP, &data); map 340 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_RepCap, &data); map 349 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_QH, &data); map 361 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_TALRT_Th, &data); map 368 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_TALRT_Th, &data); map 390 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_Current, &data); map 402 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_AvgCurrent, &data); map 423 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 430 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_TALRT_Th, &data); map 441 drivers/power/supply/max17042_battery.c ret = regmap_write(map, MAX17042_TALRT_Th, data); map 444 drivers/power/supply/max17042_battery.c ret = regmap_read(map, MAX17042_TALRT_Th, &data); map 455 drivers/power/supply/max17042_battery.c ret = regmap_write(map, MAX17042_TALRT_Th, data); map 486 drivers/power/supply/max17042_battery.c static int max17042_write_verify_reg(struct regmap *map, u8 reg, u32 value) map 493 drivers/power/supply/max17042_battery.c ret = regmap_write(map, reg, value); map 494 drivers/power/supply/max17042_battery.c regmap_read(map, reg, &read_value); map 507 drivers/power/supply/max17042_battery.c static inline void max17042_override_por(struct regmap *map, map 511 drivers/power/supply/max17042_battery.c regmap_write(map, reg, value); map 516 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 518 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_MLOCKReg1, MODEL_UNLOCK1); map 519 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_MLOCKReg2, MODEL_UNLOCK2); map 524 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 526 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_MLOCKReg1, MODEL_LOCK1); map 527 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_MLOCKReg2, MODEL_LOCK2); map 533 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 537 drivers/power/supply/max17042_battery.c regmap_write(map, addr + i, map 544 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 549 drivers/power/supply/max17042_battery.c regmap_read(map, addr + i, &tmp); map 622 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 624 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_CONFIG, config->config); map 625 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_LearnCFG, config->learn_cfg); map 626 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_FilterCFG, map 628 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_RelaxCFG, config->relax_cfg); map 631 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17047_FullSOCThr, map 638 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 640 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_RCOMP0, config->rcomp0); map 641 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_TempCo, config->tcompc0); map 642 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_ICHGTerm, config->ichgt_term); map 644 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_EmptyTempCo, config->empty_tempco); map 645 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_K_empty0, map 648 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17047_QRTbl00, map 650 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17047_QRTbl10, map 652 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17047_QRTbl20, map 654 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17047_QRTbl30, map 662 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 664 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_FullCAP, map 666 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_DesignCap, config->design_cap); map 667 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_FullCAPNom, map 674 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 676 drivers/power/supply/max17042_battery.c regmap_read(map, MAX17042_VFSOC, &vfSoc); map 677 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_UNLOCK); map 678 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_VFSOC0, vfSoc); map 679 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_VFSOC0Enable, VFSOC0_LOCK); map 688 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 690 drivers/power/supply/max17042_battery.c regmap_read(map, MAX17042_FullCAP0, &full_cap0); map 691 drivers/power/supply/max17042_battery.c regmap_read(map, MAX17042_VFSOC, &vfSoc); map 698 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_RemCap, rem_cap); map 701 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_RepCap, rep_cap); map 705 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_dQacc, dq_acc); map 706 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_dPacc, dP_ACC_200); map 708 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_FullCAP, map 710 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_DesignCap, map 712 drivers/power/supply/max17042_battery.c max17042_write_verify_reg(map, MAX17042_FullCAPNom, map 715 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_RepSOC, vfSoc); map 725 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 728 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_TGAIN, config->tgain); map 729 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAx17042_TOFF, config->toff); map 730 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_CGAIN, config->cgain); map 731 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_COFF, config->coff); map 733 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_VALRT_Th, config->valrt_thresh); map 734 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_TALRT_Th, config->talrt_thresh); map 735 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_SALRT_Th, map 737 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_CONFIG, config->config); map 738 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_SHDNTIMER, config->shdntimer); map 740 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_DesignCap, config->design_cap); map 741 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_ICHGTerm, config->ichgt_term); map 743 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_AtRate, config->at_rate); map 744 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_LearnCFG, config->learn_cfg); map 745 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_FilterCFG, config->filter_cfg); map 746 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_RelaxCFG, config->relax_cfg); map 747 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_MiscCFG, config->misc_cfg); map 748 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_MaskSOC, config->masksoc); map 750 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_FullCAP, config->fullcap); map 751 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_FullCAPNom, config->fullcapnom); map 753 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_SOC_empty, map 755 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_LAvg_empty, config->lavg_empty); map 756 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_dQacc, config->dqacc); map 757 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_dPacc, config->dpacc); map 760 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_V_empty, config->vempty); map 762 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17047_V_empty, config->vempty); map 763 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_TempNom, config->temp_nom); map 764 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_TempLim, config->temp_lim); map 765 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_FCTC, config->fctc); map 766 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_RCOMP0, config->rcomp0); map 767 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_TempCo, config->tcompc0); map 769 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_EmptyTempCo, map 771 drivers/power/supply/max17042_battery.c max17042_override_por(map, MAX17042_K_empty0, map 778 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 822 drivers/power/supply/max17042_battery.c regmap_update_bits(map, MAX17042_STATUS, STATUS_POR_BIT, 0x0); map 828 drivers/power/supply/max17042_battery.c struct regmap *map = chip->regmap; map 834 drivers/power/supply/max17042_battery.c regmap_read(map, MAX17042_RepSOC, &soc); map 838 drivers/power/supply/max17042_battery.c regmap_write(map, MAX17042_SALRT_Th, soc_tr); map 70 drivers/power/supply/max77650-charger.c struct regmap *map; map 95 drivers/power/supply/max77650-charger.c rv = regmap_update_bits(chg->map, map 116 drivers/power/supply/max77650-charger.c rv = regmap_update_bits(chg->map, map 134 drivers/power/supply/max77650-charger.c rv = regmap_update_bits(chg->map, map 148 drivers/power/supply/max77650-charger.c rv = regmap_update_bits(chg->map, map 163 drivers/power/supply/max77650-charger.c rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®); map 199 drivers/power/supply/max77650-charger.c rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®); map 232 drivers/power/supply/max77650-charger.c rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®); map 239 drivers/power/supply/max77650-charger.c rv = regmap_read(chg->map, MAX77650_REG_STAT_CHG_B, ®); map 297 drivers/power/supply/max77650-charger.c chg->map = dev_get_regmap(parent, NULL); map 298 drivers/power/supply/max77650-charger.c if (!chg->map) map 291 drivers/power/supply/test_power.c static int map_get_value(struct battery_property_map *map, const char *key, map 306 drivers/power/supply/test_power.c while (map->key) { map 307 drivers/power/supply/test_power.c if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0) map 308 drivers/power/supply/test_power.c return map->value; map 309 drivers/power/supply/test_power.c map++; map 316 drivers/power/supply/test_power.c static const char *map_get_key(struct battery_property_map *map, int value, map 319 drivers/power/supply/test_power.c while (map->key) { map 320 drivers/power/supply/test_power.c if (map->value == value) map 321 drivers/power/supply/test_power.c return map->key; map 322 drivers/power/supply/test_power.c map++; map 238 drivers/power/supply/wm831x_power.c struct chg_map *map, int count, int val, map 245 drivers/power/supply/wm831x_power.c if (val == map[i].val) map 251 drivers/power/supply/wm831x_power.c *reg |= map[i].reg_val; map 563 drivers/ps3/ps3av_cmd.c static u8 ps3av_cnv_fifomap(const u8 *map) map 567 drivers/ps3/ps3av_cmd.c ret = map[0] + (map[1] << 2) + (map[2] << 4) + (map[3] << 6); map 364 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 369 drivers/rapidio/devices/rio_mport_cdev.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 370 drivers/rapidio/devices/rio_mport_cdev.c if (map == NULL) map 377 drivers/rapidio/devices/rio_mport_cdev.c map->dir = MAP_OUTBOUND; map 378 drivers/rapidio/devices/rio_mport_cdev.c map->rioid = rioid; map 379 drivers/rapidio/devices/rio_mport_cdev.c map->rio_addr = raddr; map 380 drivers/rapidio/devices/rio_mport_cdev.c map->size = size; map 381 drivers/rapidio/devices/rio_mport_cdev.c map->phys_addr = *paddr; map 382 drivers/rapidio/devices/rio_mport_cdev.c map->filp = filp; map 383 drivers/rapidio/devices/rio_mport_cdev.c map->md = md; map 384 drivers/rapidio/devices/rio_mport_cdev.c kref_init(&map->ref); map 385 drivers/rapidio/devices/rio_mport_cdev.c list_add_tail(&map->node, &md->mappings); map 388 drivers/rapidio/devices/rio_mport_cdev.c kfree(map); map 397 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 401 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry(map, &md->mappings, node) { map 402 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir != MAP_OUTBOUND) map 404 drivers/rapidio/devices/rio_mport_cdev.c if (rioid == map->rioid && map 405 drivers/rapidio/devices/rio_mport_cdev.c raddr == map->rio_addr && size == map->size) { map 406 drivers/rapidio/devices/rio_mport_cdev.c *paddr = map->phys_addr; map 409 drivers/rapidio/devices/rio_mport_cdev.c } else if (rioid == map->rioid && map 410 drivers/rapidio/devices/rio_mport_cdev.c raddr < (map->rio_addr + map->size - 1) && map 411 drivers/rapidio/devices/rio_mport_cdev.c (raddr + size) > map->rio_addr) { map 429 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mmap map; map 433 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_from_user(&map, arg, sizeof(map)))) map 437 drivers/rapidio/devices/rio_mport_cdev.c map.rioid, map.rio_addr, map.length); map 439 drivers/rapidio/devices/rio_mport_cdev.c ret = rio_mport_get_outbound_mapping(data, filp, map.rioid, map 440 drivers/rapidio/devices/rio_mport_cdev.c map.rio_addr, map.length, &paddr); map 446 drivers/rapidio/devices/rio_mport_cdev.c map.handle = paddr; map 448 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_to_user(arg, &map, sizeof(map)))) map 464 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map, *_map; map 475 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry_safe(map, _map, &md->mappings, node) { map 476 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) { map 477 drivers/rapidio/devices/rio_mport_cdev.c if (map->filp == filp) { map 479 drivers/rapidio/devices/rio_mport_cdev.c map->filp = NULL; map 480 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 543 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 586 drivers/rapidio/devices/rio_mport_cdev.c if (req->map) { map 587 drivers/rapidio/devices/rio_mport_cdev.c mutex_lock(&req->map->md->buf_mutex); map 588 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&req->map->ref, mport_release_mapping); map 589 drivers/rapidio/devices/rio_mport_cdev.c mutex_unlock(&req->map->md->buf_mutex); map 899 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 904 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry(map, &md->mappings, node) { map 905 drivers/rapidio/devices/rio_mport_cdev.c if (baddr >= map->phys_addr && map 906 drivers/rapidio/devices/rio_mport_cdev.c baddr < (map->phys_addr + map->size)) { map 907 drivers/rapidio/devices/rio_mport_cdev.c kref_get(&map->ref); map 908 drivers/rapidio/devices/rio_mport_cdev.c req->map = map; map 914 drivers/rapidio/devices/rio_mport_cdev.c if (req->map == NULL) { map 919 drivers/rapidio/devices/rio_mport_cdev.c if (xfer->length + xfer->offset > map->size) { map 931 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr + (baddr - map->phys_addr) + map 1090 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 1092 drivers/rapidio/devices/rio_mport_cdev.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 1093 drivers/rapidio/devices/rio_mport_cdev.c if (map == NULL) map 1096 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, map 1097 drivers/rapidio/devices/rio_mport_cdev.c &map->phys_addr, GFP_KERNEL); map 1098 drivers/rapidio/devices/rio_mport_cdev.c if (map->virt_addr == NULL) { map 1099 drivers/rapidio/devices/rio_mport_cdev.c kfree(map); map 1103 drivers/rapidio/devices/rio_mport_cdev.c map->dir = MAP_DMA; map 1104 drivers/rapidio/devices/rio_mport_cdev.c map->size = size; map 1105 drivers/rapidio/devices/rio_mport_cdev.c map->filp = filp; map 1106 drivers/rapidio/devices/rio_mport_cdev.c map->md = md; map 1107 drivers/rapidio/devices/rio_mport_cdev.c kref_init(&map->ref); map 1109 drivers/rapidio/devices/rio_mport_cdev.c list_add_tail(&map->node, &md->mappings); map 1111 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; map 1120 drivers/rapidio/devices/rio_mport_cdev.c struct rio_dma_mem map; map 1124 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_from_user(&map, arg, sizeof(map)))) map 1127 drivers/rapidio/devices/rio_mport_cdev.c ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); map 1131 drivers/rapidio/devices/rio_mport_cdev.c map.dma_handle = mapping->phys_addr; map 1133 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { map 1149 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map, *_map; map 1156 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry_safe(map, _map, &md->mappings, node) { map 1157 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir == MAP_DMA && map->phys_addr == handle && map 1158 drivers/rapidio/devices/rio_mport_cdev.c map->filp == filp) { map 1159 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 1205 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 1212 drivers/rapidio/devices/rio_mport_cdev.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 1213 drivers/rapidio/devices/rio_mport_cdev.c if (map == NULL) map 1216 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr = dma_alloc_coherent(mport->dev.parent, size, map 1217 drivers/rapidio/devices/rio_mport_cdev.c &map->phys_addr, GFP_KERNEL); map 1218 drivers/rapidio/devices/rio_mport_cdev.c if (map->virt_addr == NULL) { map 1224 drivers/rapidio/devices/rio_mport_cdev.c raddr = map->phys_addr; map 1225 drivers/rapidio/devices/rio_mport_cdev.c ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0); map 1229 drivers/rapidio/devices/rio_mport_cdev.c map->dir = MAP_INBOUND; map 1230 drivers/rapidio/devices/rio_mport_cdev.c map->rio_addr = raddr; map 1231 drivers/rapidio/devices/rio_mport_cdev.c map->size = size; map 1232 drivers/rapidio/devices/rio_mport_cdev.c map->filp = filp; map 1233 drivers/rapidio/devices/rio_mport_cdev.c map->md = md; map 1234 drivers/rapidio/devices/rio_mport_cdev.c kref_init(&map->ref); map 1236 drivers/rapidio/devices/rio_mport_cdev.c list_add_tail(&map->node, &md->mappings); map 1238 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; map 1243 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr, map->phys_addr); map 1245 drivers/rapidio/devices/rio_mport_cdev.c kfree(map); map 1254 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 1261 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry(map, &md->mappings, node) { map 1262 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir != MAP_INBOUND) map 1264 drivers/rapidio/devices/rio_mport_cdev.c if (raddr == map->rio_addr && size == map->size) { map 1266 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; map 1269 drivers/rapidio/devices/rio_mport_cdev.c } else if (raddr < (map->rio_addr + map->size - 1) && map 1270 drivers/rapidio/devices/rio_mport_cdev.c (raddr + size) > map->rio_addr) { map 1288 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mmap map; map 1294 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_from_user(&map, arg, sizeof(map)))) map 1299 drivers/rapidio/devices/rio_mport_cdev.c ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, map 1300 drivers/rapidio/devices/rio_mport_cdev.c map.length, &mapping); map 1304 drivers/rapidio/devices/rio_mport_cdev.c map.handle = mapping->phys_addr; map 1305 drivers/rapidio/devices/rio_mport_cdev.c map.rio_addr = mapping->rio_addr; map 1307 drivers/rapidio/devices/rio_mport_cdev.c if (unlikely(copy_to_user(arg, &map, sizeof(map)))) { map 1331 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map, *_map; map 1342 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry_safe(map, _map, &md->mappings, node) { map 1343 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir == MAP_INBOUND && map->phys_addr == handle) { map 1344 drivers/rapidio/devices/rio_mport_cdev.c if (map->filp == filp) { map 1345 drivers/rapidio/devices/rio_mport_cdev.c map->filp = NULL; map 1346 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 2012 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map, *_map; map 2040 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry_safe(map, _map, &chdev->mappings, node) { map 2041 drivers/rapidio/devices/rio_mport_cdev.c if (map->filp == filp) { map 2043 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr, filp); map 2044 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 2144 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map = map 2146 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport *mport = map->md->mport; map 2149 drivers/rapidio/devices/rio_mport_cdev.c map->dir, map->virt_addr, map 2150 drivers/rapidio/devices/rio_mport_cdev.c &map->phys_addr, mport->name); map 2152 drivers/rapidio/devices/rio_mport_cdev.c list_del(&map->node); map 2154 drivers/rapidio/devices/rio_mport_cdev.c switch (map->dir) { map 2156 drivers/rapidio/devices/rio_mport_cdev.c rio_unmap_inb_region(mport, map->phys_addr); map 2159 drivers/rapidio/devices/rio_mport_cdev.c dma_free_coherent(mport->dev.parent, map->size, map 2160 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr, map->phys_addr); map 2163 drivers/rapidio/devices/rio_mport_cdev.c rio_unmap_outb_region(mport, map->rioid, map->rio_addr); map 2166 drivers/rapidio/devices/rio_mport_cdev.c kfree(map); map 2171 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map = vma->vm_private_data; map 2173 drivers/rapidio/devices/rio_mport_cdev.c rmcd_debug(MMAP, "%pad", &map->phys_addr); map 2174 drivers/rapidio/devices/rio_mport_cdev.c kref_get(&map->ref); map 2179 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map = vma->vm_private_data; map 2181 drivers/rapidio/devices/rio_mport_cdev.c rmcd_debug(MMAP, "%pad", &map->phys_addr); map 2182 drivers/rapidio/devices/rio_mport_cdev.c mutex_lock(&map->md->buf_mutex); map 2183 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 2184 drivers/rapidio/devices/rio_mport_cdev.c mutex_unlock(&map->md->buf_mutex); map 2200 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map; map 2209 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry(map, &md->mappings, node) { map 2210 drivers/rapidio/devices/rio_mport_cdev.c if (baddr >= map->phys_addr && map 2211 drivers/rapidio/devices/rio_mport_cdev.c baddr < (map->phys_addr + map->size)) { map 2221 drivers/rapidio/devices/rio_mport_cdev.c offset = baddr - map->phys_addr; map 2223 drivers/rapidio/devices/rio_mport_cdev.c if (size + offset > map->size) map 2229 drivers/rapidio/devices/rio_mport_cdev.c if (map->dir == MAP_INBOUND || map->dir == MAP_DMA) map 2231 drivers/rapidio/devices/rio_mport_cdev.c map->virt_addr, map->phys_addr, map->size); map 2232 drivers/rapidio/devices/rio_mport_cdev.c else if (map->dir == MAP_OUTBOUND) { map 2234 drivers/rapidio/devices/rio_mport_cdev.c ret = vm_iomap_memory(vma, map->phys_addr, map->size); map 2241 drivers/rapidio/devices/rio_mport_cdev.c vma->vm_private_data = map; map 2496 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *map, *_map; map 2514 drivers/rapidio/devices/rio_mport_cdev.c list_for_each_entry_safe(map, _map, &md->mappings, node) { map 2515 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&map->ref, mport_release_mapping); map 1097 drivers/rapidio/devices/tsi721.c struct tsi721_ib_win_mapping *map = NULL; map 1124 drivers/rapidio/devices/tsi721.c map = kzalloc(sizeof(struct tsi721_ib_win_mapping), GFP_ATOMIC); map 1125 drivers/rapidio/devices/tsi721.c if (map == NULL) map 1172 drivers/rapidio/devices/tsi721.c map->lstart = lstart; map 1173 drivers/rapidio/devices/tsi721.c list_add_tail(&map->node, &ib_win->mappings); map 1207 drivers/rapidio/devices/tsi721.c map->lstart = lstart; map 1208 drivers/rapidio/devices/tsi721.c list_add_tail(&map->node, &ib_win->mappings); map 1230 drivers/rapidio/devices/tsi721.c kfree(map); map 1262 drivers/rapidio/devices/tsi721.c struct tsi721_ib_win_mapping *map; map 1265 drivers/rapidio/devices/tsi721.c list_for_each_entry(map, map 1267 drivers/rapidio/devices/tsi721.c if (map->lstart == lstart) { map 1268 drivers/rapidio/devices/tsi721.c list_del(&map->node); map 1269 drivers/rapidio/devices/tsi721.c kfree(map); map 585 drivers/regulator/act8865-regulator.c static int act8600_charger_get_status(struct regmap *map) map 591 drivers/regulator/act8865-regulator.c ret = regmap_read(map, ACT8600_APCH_STAT, &val); map 611 drivers/regulator/act8865-regulator.c struct regmap *map = power_supply_get_drvdata(psy); map 616 drivers/regulator/act8865-regulator.c ret = act8600_charger_get_status(map); map 1661 drivers/regulator/core.c struct regulator_supply_alias *map; map 1663 drivers/regulator/core.c list_for_each_entry(map, ®ulator_supply_alias_list, list) map 1664 drivers/regulator/core.c if (map->src_dev == dev && strcmp(map->src_supply, supply) == 0) map 1665 drivers/regulator/core.c return map; map 1672 drivers/regulator/core.c struct regulator_supply_alias *map; map 1674 drivers/regulator/core.c map = regulator_find_supply_alias(*dev, *supply); map 1675 drivers/regulator/core.c if (map) { map 1677 drivers/regulator/core.c *supply, map->alias_supply, map 1678 drivers/regulator/core.c dev_name(map->alias_dev)); map 1679 drivers/regulator/core.c *dev = map->alias_dev; map 1680 drivers/regulator/core.c *supply = map->alias_supply; map 1717 drivers/regulator/core.c struct regulator_map *map; map 1743 drivers/regulator/core.c list_for_each_entry(map, ®ulator_map_list, list) { map 1745 drivers/regulator/core.c if (map->dev_name && map 1746 drivers/regulator/core.c (!devname || strcmp(map->dev_name, devname))) map 1749 drivers/regulator/core.c if (strcmp(map->supply, supply) == 0 && map 1750 drivers/regulator/core.c get_device(&map->regulator->dev)) { map 1751 drivers/regulator/core.c r = map->regulator; map 2103 drivers/regulator/core.c struct regulator_supply_alias *map; map 2105 drivers/regulator/core.c map = regulator_find_supply_alias(dev, id); map 2106 drivers/regulator/core.c if (map) map 2109 drivers/regulator/core.c map = kzalloc(sizeof(struct regulator_supply_alias), GFP_KERNEL); map 2110 drivers/regulator/core.c if (!map) map 2113 drivers/regulator/core.c map->src_dev = dev; map 2114 drivers/regulator/core.c map->src_supply = id; map 2115 drivers/regulator/core.c map->alias_dev = alias_dev; map 2116 drivers/regulator/core.c map->alias_supply = alias_id; map 2118 drivers/regulator/core.c list_add(&map->list, ®ulator_supply_alias_list); map 2137 drivers/regulator/core.c struct regulator_supply_alias *map; map 2139 drivers/regulator/core.c map = regulator_find_supply_alias(dev, id); map 2140 drivers/regulator/core.c if (map) { map 2141 drivers/regulator/core.c list_del(&map->list); map 2142 drivers/regulator/core.c kfree(map); map 2922 drivers/regulator/core.c struct regmap *map = regulator->rdev->regmap; map 2924 drivers/regulator/core.c return map ? map : ERR_PTR(-EOPNOTSUPP); map 5408 drivers/regulator/core.c struct regulator_map *map; map 5410 drivers/regulator/core.c list_for_each_entry(map, ®ulator_map_list, list) { map 5412 drivers/regulator/core.c rdev_get_name(map->regulator), map->dev_name, map 5413 drivers/regulator/core.c map->supply); map 70 drivers/regulator/max77650-regulator.c struct regmap *map; map 74 drivers/regulator/max77650-regulator.c map = rdev_get_regmap(rdev); map 76 drivers/regulator/max77650-regulator.c rv = regmap_read(map, rdesc->regB, &val); map 88 drivers/regulator/max77650-regulator.c struct regmap *map; map 91 drivers/regulator/max77650-regulator.c map = rdev_get_regmap(rdev); map 93 drivers/regulator/max77650-regulator.c return regmap_update_bits(map, rdesc->regB, map 101 drivers/regulator/max77650-regulator.c struct regmap *map; map 104 drivers/regulator/max77650-regulator.c map = rdev_get_regmap(rdev); map 106 drivers/regulator/max77650-regulator.c return regmap_update_bits(map, rdesc->regB, map 334 drivers/regulator/max77650-regulator.c struct regmap *map; map 349 drivers/regulator/max77650-regulator.c map = dev_get_regmap(parent, NULL); map 350 drivers/regulator/max77650-regulator.c if (!map) map 353 drivers/regulator/max77650-regulator.c rv = regmap_read(map, MAX77650_REG_CID, &val); map 1279 drivers/regulator/rk808-regulator.c struct regmap *map, map 1305 drivers/regulator/rk808-regulator.c ret = regmap_update_bits(map, RK808_IO_POL_REG, tmp, map 63 drivers/regulator/stw481x-vmmc.c ret = regmap_update_bits(stw481x->map, STW_CONF2, map 73 drivers/regulator/stw481x-vmmc.c config.regmap = stw481x->map; map 38 drivers/remoteproc/stm32_rproc.c struct regmap *map; map 352 drivers/remoteproc/stm32_rproc.c err = regmap_update_bits(hold_boot.map, hold_boot.reg, map 392 drivers/remoteproc/stm32_rproc.c if (ddata->pdds.map) { map 393 drivers/remoteproc/stm32_rproc.c err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg, map 436 drivers/remoteproc/stm32_rproc.c if (ddata->pdds.map) { map 437 drivers/remoteproc/stm32_rproc.c err = regmap_update_bits(ddata->pdds.map, ddata->pdds.reg, map 492 drivers/remoteproc/stm32_rproc.c syscon->map = syscon_regmap_lookup_by_phandle(np, prop); map 493 drivers/remoteproc/stm32_rproc.c if (IS_ERR(syscon->map)) { map 494 drivers/remoteproc/stm32_rproc.c err = PTR_ERR(syscon->map); map 495 drivers/remoteproc/stm32_rproc.c syscon->map = NULL; map 551 drivers/remoteproc/stm32_rproc.c err = regmap_read(tz.map, tz.reg, &tzen); map 16 drivers/reset/hisilicon/reset-hi3660.c struct regmap *map; map 30 drivers/reset/hisilicon/reset-hi3660.c return regmap_write(rc->map, offset, mask); map 32 drivers/reset/hisilicon/reset-hi3660.c return regmap_write(rc->map, offset + 4, mask); map 86 drivers/reset/hisilicon/reset-hi3660.c rc->map = syscon_regmap_lookup_by_phandle(np, "hisi,rst-syscon"); map 87 drivers/reset/hisilicon/reset-hi3660.c if (IS_ERR(rc->map)) { map 89 drivers/reset/hisilicon/reset-hi3660.c return PTR_ERR(rc->map); map 54 drivers/reset/reset-qcom-aoss.c const struct qcom_aoss_reset_map *map = &data->desc->resets[idx]; map 56 drivers/reset/reset-qcom-aoss.c writel(1, data->base + map->reg); map 66 drivers/reset/reset-qcom-aoss.c const struct qcom_aoss_reset_map *map = &data->desc->resets[idx]; map 68 drivers/reset/reset-qcom-aoss.c writel(0, data->base + map->reg); map 161 drivers/reset/sti/reset-syscfg.c struct regmap *map; map 165 drivers/reset/sti/reset-syscfg.c map = syscon_regmap_lookup_by_compatible(compatible); map 166 drivers/reset/sti/reset-syscfg.c if (IS_ERR(map)) map 167 drivers/reset/sti/reset-syscfg.c return PTR_ERR(map); map 169 drivers/reset/sti/reset-syscfg.c f = devm_regmap_field_alloc(dev, map, data->channels[i].reset); map 178 drivers/reset/sti/reset-syscfg.c f = devm_regmap_field_alloc(dev, map, data->channels[i].ack); map 40 drivers/rtc/rtc-88pm80x.c struct regmap *map; map 53 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, mask | PM800_ALARM1_EN, map 64 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, map 67 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, map 104 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); map 110 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); map 129 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); map 139 drivers/rtc/rtc-88pm80x.c regmap_raw_write(info->map, PM800_RTC_EXPIRE2_1, buf, 4); map 151 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); map 156 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4); map 164 drivers/rtc/rtc-88pm80x.c regmap_read(info->map, PM800_RTC_CONTROL, &ret); map 178 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0); map 180 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4); map 186 drivers/rtc/rtc-88pm80x.c regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4); map 205 drivers/rtc/rtc-88pm80x.c regmap_raw_write(info->map, PM800_RTC_EXPIRE1_1, buf, 4); map 208 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, mask); map 211 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, mask, map 272 drivers/rtc/rtc-88pm80x.c info->map = chip->regmap; map 273 drivers/rtc/rtc-88pm80x.c if (!info->map) { map 305 drivers/rtc/rtc-88pm80x.c regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_RTC1_USE_XO, map 80 drivers/rtc/rtc-ds1343.c struct regmap *map; map 101 drivers/rtc/rtc-ds1343.c return regmap_write(priv->map, DS1343_TRICKLE_REG, val); map 116 drivers/rtc/rtc-ds1343.c regmap_read(priv->map, DS1343_CONTROL_REG, &data); map 133 drivers/rtc/rtc-ds1343.c regmap_read(priv->map, DS1343_CONTROL_REG, &data); map 144 drivers/rtc/rtc-ds1343.c regmap_write(priv->map, DS1343_CONTROL_REG, data); map 157 drivers/rtc/rtc-ds1343.c return regmap_bulk_write(ds1343->map, DS1343_NVRAM + off, val, bytes); map 165 drivers/rtc/rtc-ds1343.c return regmap_bulk_read(ds1343->map, DS1343_NVRAM + off, val, bytes); map 175 drivers/rtc/rtc-ds1343.c regmap_read(priv->map, DS1343_TRICKLE_REG, &data); map 241 drivers/rtc/rtc-ds1343.c res = regmap_bulk_read(priv->map, DS1343_SECONDS_REG, buf, 7); map 261 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_SECONDS_REG, map 266 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_MINUTES_REG, map 271 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_HOURS_REG, map 276 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_DAY_REG, map 281 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_DATE_REG, map 286 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_MONTH_REG, map 293 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_YEAR_REG, map 308 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_CONTROL_REG, &control); map 312 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_STATUS_REG, &stat); map 319 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_CONTROL_REG, control); map 323 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_STATUS_REG, stat); map 336 drivers/rtc/rtc-ds1343.c res = regmap_bulk_write(priv->map, DS1343_ALM0_SEC_REG, buf, 4); map 342 drivers/rtc/rtc-ds1343.c res = regmap_write(priv->map, DS1343_CONTROL_REG, control); map 359 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_STATUS_REG, &stat); map 431 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_STATUS_REG, &stat); map 437 drivers/rtc/rtc-ds1343.c regmap_write(priv->map, DS1343_STATUS_REG, stat); map 439 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_CONTROL_REG, &control); map 444 drivers/rtc/rtc-ds1343.c regmap_write(priv->map, DS1343_CONTROL_REG, control); map 497 drivers/rtc/rtc-ds1343.c priv->map = devm_regmap_init_spi(spi, &config); map 499 drivers/rtc/rtc-ds1343.c if (IS_ERR(priv->map)) { map 501 drivers/rtc/rtc-ds1343.c return PTR_ERR(priv->map); map 504 drivers/rtc/rtc-ds1343.c res = regmap_read(priv->map, DS1343_SECONDS_REG, &data); map 508 drivers/rtc/rtc-ds1343.c regmap_read(priv->map, DS1343_CONTROL_REG, &data); map 511 drivers/rtc/rtc-ds1343.c regmap_write(priv->map, DS1343_CONTROL_REG, data); map 513 drivers/rtc/rtc-ds1343.c regmap_read(priv->map, DS1343_STATUS_REG, &data); map 515 drivers/rtc/rtc-ds1343.c regmap_write(priv->map, DS1343_STATUS_REG, data); map 47 drivers/rtc/rtc-ds1347.c struct regmap *map; map 51 drivers/rtc/rtc-ds1347.c map = spi_get_drvdata(spi); map 53 drivers/rtc/rtc-ds1347.c err = regmap_bulk_read(map, DS1347_CLOCK_BURST, buf, 8); map 71 drivers/rtc/rtc-ds1347.c struct regmap *map; map 74 drivers/rtc/rtc-ds1347.c map = spi_get_drvdata(spi); map 91 drivers/rtc/rtc-ds1347.c return regmap_bulk_write(map, DS1347_CLOCK_BURST, buf, 8); map 103 drivers/rtc/rtc-ds1347.c struct regmap *map; map 119 drivers/rtc/rtc-ds1347.c map = devm_regmap_init_spi(spi, &config); map 121 drivers/rtc/rtc-ds1347.c if (IS_ERR(map)) { map 123 drivers/rtc/rtc-ds1347.c return PTR_ERR(map); map 126 drivers/rtc/rtc-ds1347.c spi_set_drvdata(spi, map); map 129 drivers/rtc/rtc-ds1347.c res = regmap_read(map, DS1347_SECONDS_REG, &data); map 134 drivers/rtc/rtc-ds1347.c regmap_read(map, DS1347_CONTROL_REG, &data); map 136 drivers/rtc/rtc-ds1347.c regmap_write(map, DS1347_CONTROL_REG, data); map 140 drivers/rtc/rtc-ds1347.c regmap_read(map, DS1347_STATUS_REG, &data); map 142 drivers/rtc/rtc-ds1347.c regmap_write(map, DS1347_STATUS_REG, data); map 145 drivers/rtc/rtc-ds1347.c regmap_read(map, DS1347_CONTROL_REG, &data); map 148 drivers/rtc/rtc-ds1347.c regmap_read(map, DS1347_STATUS_REG, &data); map 70 drivers/rtc/rtc-max77686.c const unsigned int *map; map 188 drivers/rtc/rtc-max77686.c .map = max77686_map, map 199 drivers/rtc/rtc-max77686.c .map = max77686_map, map 248 drivers/rtc/rtc-max77686.c .map = max77802_map, map 326 drivers/rtc/rtc-max77686.c info->drv_data->map[REG_RTC_UPDATE0], map 352 drivers/rtc/rtc-max77686.c info->drv_data->map[REG_RTC_SEC], map 379 drivers/rtc/rtc-max77686.c info->drv_data->map[REG_RTC_SEC], map 398 drivers/rtc/rtc-max77686.c const unsigned int *map = info->drv_data->map; map 407 drivers/rtc/rtc-max77686.c ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC], map 419 drivers/rtc/rtc-max77686.c if (map[REG_RTC_AE1] == REG_RTC_NONE) { map 426 drivers/rtc/rtc-max77686.c ret = regmap_read(info->rtc_regmap, map[REG_RTC_AE1], &val); map 470 drivers/rtc/rtc-max77686.c const unsigned int *map = info->drv_data->map; map 480 drivers/rtc/rtc-max77686.c if (map[REG_RTC_AE1] == REG_RTC_NONE) { map 487 drivers/rtc/rtc-max77686.c ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1], 0); map 489 drivers/rtc/rtc-max77686.c ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC], map 501 drivers/rtc/rtc-max77686.c ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC], map 520 drivers/rtc/rtc-max77686.c const unsigned int *map = info->drv_data->map; map 530 drivers/rtc/rtc-max77686.c ret = regmap_write(info->rtc_regmap, map[REG_RTC_AE1], map 533 drivers/rtc/rtc-max77686.c ret = regmap_bulk_read(info->rtc_regmap, map[REG_ALARM1_SEC], map 553 drivers/rtc/rtc-max77686.c ret = regmap_bulk_write(info->rtc_regmap, map[REG_ALARM1_SEC], map 584 drivers/rtc/rtc-max77686.c info->drv_data->map[REG_ALARM1_SEC], map 650 drivers/rtc/rtc-max77686.c info->drv_data->map[REG_RTC_CONTROLM], map 109 drivers/rtc/rtc-pcf2123.c struct regmap *map; map 126 drivers/rtc/rtc-pcf2123.c ret = regmap_read(pcf2123->map, PCF2123_REG_OFFSET, ®); map 172 drivers/rtc/rtc-pcf2123.c return regmap_write(pcf2123->map, PCF2123_REG_OFFSET, (unsigned int)reg); map 181 drivers/rtc/rtc-pcf2123.c ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_SC, rxbuf, map 213 drivers/rtc/rtc-pcf2123.c ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP); map 226 drivers/rtc/rtc-pcf2123.c ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_SC, txbuf, map 232 drivers/rtc/rtc-pcf2123.c ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR); map 243 drivers/rtc/rtc-pcf2123.c return regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE, map 254 drivers/rtc/rtc-pcf2123.c ret = regmap_bulk_read(pcf2123->map, PCF2123_REG_ALRM_MN, rxbuf, map 266 drivers/rtc/rtc-pcf2123.c ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val); map 284 drivers/rtc/rtc-pcf2123.c ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AIE, 0); map 289 drivers/rtc/rtc-pcf2123.c ret = regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0); map 299 drivers/rtc/rtc-pcf2123.c ret = regmap_bulk_write(pcf2123->map, PCF2123_REG_ALRM_MN, txbuf, map 315 drivers/rtc/rtc-pcf2123.c regmap_read(pcf2123->map, PCF2123_REG_CTRL2, &val); map 322 drivers/rtc/rtc-pcf2123.c regmap_update_bits(pcf2123->map, PCF2123_REG_CTRL2, CTRL2_AF, 0); map 338 drivers/rtc/rtc-pcf2123.c ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_SW_RESET); map 344 drivers/rtc/rtc-pcf2123.c ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_STOP); map 350 drivers/rtc/rtc-pcf2123.c ret = regmap_read(pcf2123->map, PCF2123_REG_CTRL1, &val); map 359 drivers/rtc/rtc-pcf2123.c ret = regmap_write(pcf2123->map, PCF2123_REG_CTRL1, CTRL1_CLEAR); map 390 drivers/rtc/rtc-pcf2123.c pcf2123->map = devm_regmap_init_spi(spi, &pcf2123_regmap_config); map 391 drivers/rtc/rtc-pcf2123.c if (IS_ERR(pcf2123->map)) { map 393 drivers/rtc/rtc-pcf2123.c return PTR_ERR(pcf2123->map); map 361 drivers/s390/char/fs3270.c iocb.map = 0; map 90 drivers/s390/char/raw3270.h short map; map 495 drivers/s390/cio/chsc.c u8 map[32]; map 513 drivers/s390/cio/chsc.c if (!chp_test_bit(data->map, num)) map 422 drivers/scsi/aacraid/commsup.c int map = 0; map 433 drivers/scsi/aacraid/commsup.c map = 1; map 445 drivers/scsi/aacraid/commsup.c map = 0; map 451 drivers/scsi/aacraid/commsup.c if (map) map 977 drivers/scsi/aic7xxx/aic79xx_osm.c void* vaddr, bus_dmamap_t map) map 980 drivers/scsi/aic7xxx/aic79xx_osm.c vaddr, map); map 984 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_dmamap_load(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map, map 994 drivers/scsi/aic7xxx/aic79xx_osm.c stack_sg.ds_addr = map; map 1001 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_dmamap_destroy(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) map 1006 drivers/scsi/aic7xxx/aic79xx_osm.c ahd_dmamap_unload(struct ahd_softc *ahd, bus_dma_tag_t dmat, bus_dmamap_t map) map 872 drivers/scsi/aic7xxx/aic7xxx_osm.c void* vaddr, bus_dmamap_t map) map 874 drivers/scsi/aic7xxx/aic7xxx_osm.c dma_free_coherent(ahc->dev, dmat->maxsize, vaddr, map); map 878 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_dmamap_load(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map, map 888 drivers/scsi/aic7xxx/aic7xxx_osm.c stack_sg.ds_addr = map; map 895 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_dmamap_destroy(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) map 900 drivers/scsi/aic7xxx/aic7xxx_osm.c ahc_dmamap_unload(struct ahc_softc *ahc, bus_dma_tag_t dmat, bus_dmamap_t map) map 1688 drivers/scsi/hpsa.c struct raid_map_data *map = &logical_drive->raid_map; map 1689 drivers/scsi/hpsa.c struct raid_map_disk_data *dd = &map->data[0]; map 1691 drivers/scsi/hpsa.c int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + map 1692 drivers/scsi/hpsa.c le16_to_cpu(map->metadata_disks_per_row); map 1693 drivers/scsi/hpsa.c int nraid_map_entries = le16_to_cpu(map->row_cnt) * map 1694 drivers/scsi/hpsa.c le16_to_cpu(map->layout_map_count) * map 1696 drivers/scsi/hpsa.c int nphys_disk = le16_to_cpu(map->layout_map_count) * map 3219 drivers/scsi/hpsa.c int map, row, col; map 3261 drivers/scsi/hpsa.c for (map = 0; map < map_cnt; map++) { map 3262 drivers/scsi/hpsa.c dev_info(&h->pdev->dev, "Map%u:\n", map); map 4824 drivers/scsi/hpsa.c struct raid_map_data *map = &dev->raid_map; map 4828 drivers/scsi/hpsa.c if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON)) map 4831 drivers/scsi/hpsa.c cp->dekindex = map->dekindex; map 4867 drivers/scsi/hpsa.c if (le32_to_cpu(map->volume_blk_size) != 512) map 4869 drivers/scsi/hpsa.c le32_to_cpu(map->volume_blk_size)/512; map 5043 drivers/scsi/hpsa.c static void raid_map_helper(struct raid_map_data *map, map 5048 drivers/scsi/hpsa.c *map_index %= le16_to_cpu(map->data_disks_per_row); map 5054 drivers/scsi/hpsa.c le16_to_cpu(map->data_disks_per_row); map 5057 drivers/scsi/hpsa.c if (*current_group < le16_to_cpu(map->layout_map_count) - 1) { map 5059 drivers/scsi/hpsa.c *map_index += le16_to_cpu(map->data_disks_per_row); map 5063 drivers/scsi/hpsa.c *map_index %= le16_to_cpu(map->data_disks_per_row); map 5077 drivers/scsi/hpsa.c struct raid_map_data *map = &dev->raid_map; map 5078 drivers/scsi/hpsa.c struct raid_map_disk_data *dd = &map->data[0]; map 5183 drivers/scsi/hpsa.c if (last_block >= le64_to_cpu(map->volume_blk_cnt) || map 5188 drivers/scsi/hpsa.c blocks_per_row = le16_to_cpu(map->data_disks_per_row) * map 5189 drivers/scsi/hpsa.c le16_to_cpu(map->strip_size); map 5190 drivers/scsi/hpsa.c strip_size = le16_to_cpu(map->strip_size); map 5220 drivers/scsi/hpsa.c total_disks_per_row = le16_to_cpu(map->data_disks_per_row) + map 5221 drivers/scsi/hpsa.c le16_to_cpu(map->metadata_disks_per_row); map 5222 drivers/scsi/hpsa.c map_row = ((u32)(first_row >> map->parity_rotation_shift)) % map 5223 drivers/scsi/hpsa.c le16_to_cpu(map->row_cnt); map 5234 drivers/scsi/hpsa.c BUG_ON(le16_to_cpu(map->layout_map_count) != 2); map 5236 drivers/scsi/hpsa.c map_index += le16_to_cpu(map->data_disks_per_row); map 5243 drivers/scsi/hpsa.c BUG_ON(le16_to_cpu(map->layout_map_count) != 3); map 5246 drivers/scsi/hpsa.c raid_map_helper(map, offload_to_mirror, map 5251 drivers/scsi/hpsa.c le16_to_cpu(map->layout_map_count) - 1) map 5261 drivers/scsi/hpsa.c if (le16_to_cpu(map->layout_map_count) <= 1) map 5266 drivers/scsi/hpsa.c le16_to_cpu(map->strip_size) * map 5267 drivers/scsi/hpsa.c le16_to_cpu(map->data_disks_per_row); map 5270 drivers/scsi/hpsa.c le16_to_cpu(map->layout_map_count); map 5318 drivers/scsi/hpsa.c (void) do_div(tmpdiv, map->strip_size); map 5321 drivers/scsi/hpsa.c (void) do_div(tmpdiv, map->strip_size); map 5333 drivers/scsi/hpsa.c r5or6_first_row_offset / le16_to_cpu(map->strip_size); map 5335 drivers/scsi/hpsa.c r5or6_last_row_offset / le16_to_cpu(map->strip_size); map 5341 drivers/scsi/hpsa.c map_row = ((u32)(first_row >> map->parity_rotation_shift)) % map 5342 drivers/scsi/hpsa.c le16_to_cpu(map->row_cnt); map 5345 drivers/scsi/hpsa.c (le16_to_cpu(map->row_cnt) * total_disks_per_row)) + map 5360 drivers/scsi/hpsa.c disk_block = le64_to_cpu(map->disk_starting_blk) + map 5361 drivers/scsi/hpsa.c first_row * le16_to_cpu(map->strip_size) + map 5363 drivers/scsi/hpsa.c le16_to_cpu(map->strip_size)); map 5367 drivers/scsi/hpsa.c if (map->phys_blk_shift) { map 5368 drivers/scsi/hpsa.c disk_block <<= map->phys_blk_shift; map 5369 drivers/scsi/hpsa.c disk_block_cnt <<= map->phys_blk_shift; map 10986 drivers/scsi/lpfc/lpfc_init.c struct lpfc_vector_map_info *map; map 11020 drivers/scsi/lpfc/lpfc_init.c map = &phba->sli4_hba.cpu_map[i]; map 11021 drivers/scsi/lpfc/lpfc_init.c if (!(map->irq == pci_irq_vector(phba->pcidev, idx))) map 11023 drivers/scsi/lpfc/lpfc_init.c eq = phba->sli4_hba.hdwq[map->hdwq].hba_eq; map 2462 drivers/scsi/megaraid/megaraid_sas.h struct MR_LD_VF_MAP map[1]; map 2488 drivers/scsi/megaraid/megaraid_sas.h struct MR_LD_VF_MAP_111 map[MAX_LOGICAL_DRIVES]; map 2650 drivers/scsi/megaraid/megaraid_sas.h struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN); map 2651 drivers/scsi/megaraid/megaraid_sas.h u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map); map 2652 drivers/scsi/megaraid/megaraid_sas.h struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); map 2653 drivers/scsi/megaraid/megaraid_sas.h u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map); map 2654 drivers/scsi/megaraid/megaraid_sas.h u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map); map 2655 drivers/scsi/megaraid/megaraid_sas.h __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map); map 2656 drivers/scsi/megaraid/megaraid_sas.h u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map); map 2662 drivers/scsi/megaraid/megaraid_sas.h void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map, map 2371 drivers/scsi/megaraid/megaraid_sas_base.c if (instance->vf_affiliation_111->map[ld].policy[thisVf] != map 2372 drivers/scsi/megaraid/megaraid_sas_base.c new_affiliation_111->map[ld].policy[thisVf]) { map 2484 drivers/scsi/megaraid/megaraid_sas_base.c newmap = new_affiliation->map; map 2485 drivers/scsi/megaraid/megaraid_sas_base.c savedmap = instance->vf_affiliation->map; map 2513 drivers/scsi/megaraid/megaraid_sas_base.c newmap = new_affiliation->map; map 2514 drivers/scsi/megaraid/megaraid_sas_base.c savedmap = instance->vf_affiliation->map; map 57 drivers/scsi/megaraid/megaraid_sas_fp.c #define SPAN_ROW_SIZE(map, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowSize) map 58 drivers/scsi/megaraid/megaraid_sas_fp.c #define SPAN_ROW_DATA_SIZE(map_, ld, index_) (MR_LdSpanPtrGet(ld, index_, map)->spanRowDataSize) map 62 drivers/scsi/megaraid/megaraid_sas_fp.c static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, map 66 drivers/scsi/megaraid/megaraid_sas_fp.c struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map); map 68 drivers/scsi/megaraid/megaraid_sas_fp.c u64 strip, struct MR_DRV_RAID_MAP_ALL *map); map 102 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) map 104 drivers/scsi/megaraid/megaraid_sas_fp.c return &map->raidMap.ldSpanMap[ld].ldRaid; map 109 drivers/scsi/megaraid/megaraid_sas_fp.c *map) map 111 drivers/scsi/megaraid/megaraid_sas_fp.c return &map->raidMap.ldSpanMap[ld].spanBlock[0]; map 114 drivers/scsi/megaraid/megaraid_sas_fp.c static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map) map 116 drivers/scsi/megaraid/megaraid_sas_fp.c return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; map 119 drivers/scsi/megaraid/megaraid_sas_fp.c u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map) map 121 drivers/scsi/megaraid/megaraid_sas_fp.c return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); map 124 drivers/scsi/megaraid/megaraid_sas_fp.c u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map) map 126 drivers/scsi/megaraid/megaraid_sas_fp.c return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); map 129 drivers/scsi/megaraid/megaraid_sas_fp.c __le16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) map 131 drivers/scsi/megaraid/megaraid_sas_fp.c return map->raidMap.devHndlInfo[pd].curDevHdl; map 134 drivers/scsi/megaraid/megaraid_sas_fp.c static u8 MR_PdInterfaceTypeGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map) map 136 drivers/scsi/megaraid/megaraid_sas_fp.c return map->raidMap.devHndlInfo[pd].interfaceType; map 139 drivers/scsi/megaraid/megaraid_sas_fp.c u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map) map 141 drivers/scsi/megaraid/megaraid_sas_fp.c return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId); map 144 drivers/scsi/megaraid/megaraid_sas_fp.c u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map) map 146 drivers/scsi/megaraid/megaraid_sas_fp.c return map->raidMap.ldTgtIdToLd[ldTgtId]; map 150 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 152 drivers/scsi/megaraid/megaraid_sas_fp.c return &map->raidMap.ldSpanMap[ld].spanBlock[span].span; map 371 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 373 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map); map 375 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 422 drivers/scsi/megaraid/megaraid_sas_fp.c u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map) map 425 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 441 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. map 443 drivers/scsi/megaraid/megaraid_sas_fp.c quad = &map->raidMap.ldSpanMap[ld]. map 485 drivers/scsi/megaraid/megaraid_sas_fp.c u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) map 488 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 508 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. map 542 drivers/scsi/megaraid/megaraid_sas_fp.c u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map) map 545 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 561 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. map 563 drivers/scsi/megaraid/megaraid_sas_fp.c quad = &map->raidMap.ldSpanMap[ld]. map 603 drivers/scsi/megaraid/megaraid_sas_fp.c u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map) map 606 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 624 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. map 647 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 649 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 657 drivers/scsi/megaraid/megaraid_sas_fp.c arm = mega_mod64(stripe, SPAN_ROW_SIZE(map, ld, span)); map 661 drivers/scsi/megaraid/megaraid_sas_fp.c arm = get_arm_from_strip(instance, ld, stripe, map); map 691 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 693 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 713 drivers/scsi/megaraid/megaraid_sas_fp.c logArm = get_arm_from_strip(instance, ld, stripRow, map); map 716 drivers/scsi/megaraid/megaraid_sas_fp.c rowMod = mega_mod64(row, SPAN_ROW_SIZE(map, ld, span)); map 717 drivers/scsi/megaraid/megaraid_sas_fp.c armQ = SPAN_ROW_SIZE(map, ld, span) - 1 - rowMod; map 719 drivers/scsi/megaraid/megaraid_sas_fp.c if (arm >= SPAN_ROW_SIZE(map, ld, span)) map 720 drivers/scsi/megaraid/megaraid_sas_fp.c arm -= SPAN_ROW_SIZE(map, ld, span); map 724 drivers/scsi/megaraid/megaraid_sas_fp.c physArm = get_arm(instance, ld, span, stripRow, map); map 728 drivers/scsi/megaraid/megaraid_sas_fp.c arRef = MR_LdSpanArrayGet(ld, span, map); map 729 drivers/scsi/megaraid/megaraid_sas_fp.c pd = MR_ArPdGet(arRef, physArm, map); map 732 drivers/scsi/megaraid/megaraid_sas_fp.c *pDevHandle = MR_PdDevHandleGet(pd, map); map 733 drivers/scsi/megaraid/megaraid_sas_fp.c *pPdInterface = MR_PdInterfaceTypeGet(pd, map); map 738 drivers/scsi/megaraid/megaraid_sas_fp.c r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); map 741 drivers/scsi/megaraid/megaraid_sas_fp.c MR_PdDevHandleGet(r1_alt_pd, map); map 751 drivers/scsi/megaraid/megaraid_sas_fp.c pd = MR_ArPdGet(arRef, physArm, map); map 753 drivers/scsi/megaraid/megaraid_sas_fp.c *pDevHandle = MR_PdDevHandleGet(pd, map); map 754 drivers/scsi/megaraid/megaraid_sas_fp.c *pPdInterface = MR_PdInterfaceTypeGet(pd, map); map 759 drivers/scsi/megaraid/megaraid_sas_fp.c *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); map 794 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 796 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 830 drivers/scsi/megaraid/megaraid_sas_fp.c map); map 837 drivers/scsi/megaraid/megaraid_sas_fp.c span = (u8)MR_GetSpanBlock(ld, row, pdBlock, map); map 843 drivers/scsi/megaraid/megaraid_sas_fp.c arRef = MR_LdSpanArrayGet(ld, span, map); map 844 drivers/scsi/megaraid/megaraid_sas_fp.c pd = MR_ArPdGet(arRef, physArm, map); /* Get the pd */ map 848 drivers/scsi/megaraid/megaraid_sas_fp.c *pDevHandle = MR_PdDevHandleGet(pd, map); map 849 drivers/scsi/megaraid/megaraid_sas_fp.c *pPdInterface = MR_PdInterfaceTypeGet(pd, map); map 854 drivers/scsi/megaraid/megaraid_sas_fp.c r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map); map 857 drivers/scsi/megaraid/megaraid_sas_fp.c MR_PdDevHandleGet(r1_alt_pd, map); map 868 drivers/scsi/megaraid/megaraid_sas_fp.c pd = MR_ArPdGet(arRef, physArm, map); map 871 drivers/scsi/megaraid/megaraid_sas_fp.c *pDevHandle = MR_PdDevHandleGet(pd, map); map 872 drivers/scsi/megaraid/megaraid_sas_fp.c *pPdInterface = MR_PdInterfaceTypeGet(pd, map); map 877 drivers/scsi/megaraid/megaraid_sas_fp.c *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); map 908 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map) map 910 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); map 931 drivers/scsi/megaraid/megaraid_sas_fp.c span = (u8)MR_GetSpanBlock(ld, rowNum, pdBlock, map); map 976 drivers/scsi/megaraid/megaraid_sas_fp.c struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN) map 1002 drivers/scsi/megaraid/megaraid_sas_fp.c ld = MR_TargetIdToLdGet(ldTgtId, map); map 1003 drivers/scsi/megaraid/megaraid_sas_fp.c raid = MR_LdRaidGet(ld, map); map 1012 drivers/scsi/megaraid/megaraid_sas_fp.c if (MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize == 0) map 1021 drivers/scsi/megaraid/megaraid_sas_fp.c MR_LdSpanPtrGet(ld, 0, map)->spanRowDataSize); map 1042 drivers/scsi/megaraid/megaraid_sas_fp.c start_row = get_row_from_strip(instance, ld, start_strip, map); map 1043 drivers/scsi/megaraid/megaraid_sas_fp.c endRow = get_row_from_strip(instance, ld, endStrip, map); map 1056 drivers/scsi/megaraid/megaraid_sas_fp.c ld, start_row, pdBlock, map); map 1133 drivers/scsi/megaraid/megaraid_sas_fp.c if (start_strip == (get_strip_from_row(instance, ld, start_row, map) + map 1134 drivers/scsi/megaraid/megaraid_sas_fp.c SPAN_ROW_DATA_SIZE(map, ld, startlba_span) - 1)) { map 1148 drivers/scsi/megaraid/megaraid_sas_fp.c if (endStrip == get_strip_from_row(instance, ld, endRow, map)) map 1157 drivers/scsi/megaraid/megaraid_sas_fp.c map->raidMap.fpPdIoTimeoutSec); map 1175 drivers/scsi/megaraid/megaraid_sas_fp.c map); map 1185 drivers/scsi/megaraid/megaraid_sas_fp.c io_info, pRAID_Context, map) : map 1188 drivers/scsi/megaraid/megaraid_sas_fp.c pRAID_Context, map); map 1200 drivers/scsi/megaraid/megaraid_sas_fp.c pRAID_Context, map) : map 1203 drivers/scsi/megaraid/megaraid_sas_fp.c io_info, pRAID_Context, map); map 1222 drivers/scsi/megaraid/megaraid_sas_fp.c void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map, map 1236 drivers/scsi/megaraid/megaraid_sas_fp.c ld = MR_TargetIdToLdGet(ldCount, map); map 1239 drivers/scsi/megaraid/megaraid_sas_fp.c raid = MR_LdRaidGet(ld, map); map 1242 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. map 1247 drivers/scsi/megaraid/megaraid_sas_fp.c quad = &map->raidMap.ldSpanMap[ld]. map 1255 drivers/scsi/megaraid/megaraid_sas_fp.c if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. map 1263 drivers/scsi/megaraid/megaraid_sas_fp.c (ld, count, map)->spanRowDataSize; map 1437 drivers/scsi/megaraid/megaraid_sas_fusion.c struct MR_DRV_RAID_MAP_ALL *map; map 1457 drivers/scsi/megaraid/megaraid_sas_fusion.c map = fusion->ld_drv_map[instance->map_id & 1]; map 1459 drivers/scsi/megaraid/megaraid_sas_fusion.c num_lds = le16_to_cpu(map->raidMap.ldCount); map 1474 drivers/scsi/megaraid/megaraid_sas_fusion.c raid = MR_LdRaidGet(i, map); map 1475 drivers/scsi/megaraid/megaraid_sas_fusion.c ld_sync->targetId = MR_GetLDTgtId(i, map); map 3508 drivers/scsi/qla2xxx/qla_iocb.c int map, pos; map 3520 drivers/scsi/qla2xxx/qla_iocb.c map = (sp->u.iocb_cmd.u.ctrlvp.vp_index - 1) / 8; map 3522 drivers/scsi/qla2xxx/qla_iocb.c vce->vp_idx_map[map] |= 1 << pos; map 7123 drivers/scsi/qla2xxx/qla_os.c struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; map 5102 drivers/scsi/scsi_debug.c static DRIVER_ATTR_RO(map); map 1779 drivers/scsi/scsi_lib.c return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); map 5814 drivers/scsi/smartpqi/smartpqi_init.c return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT], map 706 drivers/scsi/virtio_scsi.c struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; map 469 drivers/sh/clk/cpg.c struct clk_mapping *map; map 474 drivers/sh/clk/cpg.c map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL); map 475 drivers/sh/clk/cpg.c if (!map) { map 481 drivers/sh/clk/cpg.c map->phys = (phys_addr_t)clks[i].enable_reg; map 482 drivers/sh/clk/cpg.c map->len = 8; map 486 drivers/sh/clk/cpg.c clks[i].mapping = map; map 69 drivers/soc/aspeed/aspeed-lpc-ctrl.c struct aspeed_lpc_ctrl_mapping map; map 74 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (copy_from_user(&map, p, sizeof(map))) map 77 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.flags != 0) map 83 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.window_type != ASPEED_LPC_CTRL_WINDOW_MEMORY) map 87 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.window_id != 0) map 96 drivers/soc/aspeed/aspeed-lpc-ctrl.c map.size = lpc_ctrl->mem_size; map 98 drivers/soc/aspeed/aspeed-lpc-ctrl.c return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0; map 121 drivers/soc/aspeed/aspeed-lpc-ctrl.c if ((map.size & 0x0000ffff) || (map.offset & 0x0000ffff)) map 128 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.offset & (map.size - 1)) map 131 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) { map 138 drivers/soc/aspeed/aspeed-lpc-ctrl.c } else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) { map 151 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.offset + map.size < map.offset || map 152 drivers/soc/aspeed/aspeed-lpc-ctrl.c map.offset + map.size > size) map 155 drivers/soc/aspeed/aspeed-lpc-ctrl.c if (map.size == 0 || map.size > size) map 158 drivers/soc/aspeed/aspeed-lpc-ctrl.c addr += map.offset; map 170 drivers/soc/aspeed/aspeed-lpc-ctrl.c (addr | (map.addr >> 16))); map 175 drivers/soc/aspeed/aspeed-lpc-ctrl.c (~(map.size - 1)) | ((map.size >> 16) - 1)); map 129 drivers/soc/aspeed/aspeed-p2a-ctrl.c struct aspeed_p2a_ctrl_mapping *map) map 135 drivers/soc/aspeed/aspeed-p2a-ctrl.c base = map->addr; map 136 drivers/soc/aspeed/aspeed-p2a-ctrl.c end = map->addr + (map->length - 1); map 178 drivers/soc/aspeed/aspeed-p2a-ctrl.c struct aspeed_p2a_ctrl_mapping map; map 180 drivers/soc/aspeed/aspeed-p2a-ctrl.c if (copy_from_user(&map, arg, sizeof(map))) map 190 drivers/soc/aspeed/aspeed-p2a-ctrl.c if (map.flags == ASPEED_P2A_CTRL_READ_ONLY) { map 199 drivers/soc/aspeed/aspeed-p2a-ctrl.c } else if (map.flags == ASPEED_P2A_CTRL_READWRITE) { map 201 drivers/soc/aspeed/aspeed-p2a-ctrl.c if (!aspeed_p2a_region_acquire(priv, ctrl, &map)) { map 216 drivers/soc/aspeed/aspeed-p2a-ctrl.c map.flags = 0; map 217 drivers/soc/aspeed/aspeed-p2a-ctrl.c map.addr = ctrl->mem_base; map 218 drivers/soc/aspeed/aspeed-p2a-ctrl.c map.length = ctrl->mem_size; map 220 drivers/soc/aspeed/aspeed-p2a-ctrl.c return copy_to_user(arg, &map, sizeof(map)) ? -EFAULT : 0; map 280 drivers/soc/fsl/qe/qe_ic.c .map = qe_ic_host_map, map 41 drivers/soc/gemini/soc-gemini.c struct regmap *map; map 50 drivers/soc/gemini/soc-gemini.c map = syscon_regmap_lookup_by_compatible("cortina,gemini-syscon"); map 51 drivers/soc/gemini/soc-gemini.c if (IS_ERR(map)) map 52 drivers/soc/gemini/soc-gemini.c return PTR_ERR(map); map 53 drivers/soc/gemini/soc-gemini.c ret = regmap_read(map, GLOBAL_WORD_ID, &rev); map 61 drivers/soc/gemini/soc-gemini.c regmap_update_bits(map, map 115 drivers/soc/imx/gpcv2.c u32 map; map 143 drivers/soc/imx/gpcv2.c domain->bits.map, domain->bits.map); map 207 drivers/soc/imx/gpcv2.c domain->bits.map, 0); map 228 drivers/soc/imx/gpcv2.c .map = IMX7_MIPI_PHY_A_CORE_DOMAIN, map 240 drivers/soc/imx/gpcv2.c .map = IMX7_PCIE_PHY_A_CORE_DOMAIN, map 252 drivers/soc/imx/gpcv2.c .map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN, map 288 drivers/soc/imx/gpcv2.c .map = IMX8M_MIPI_A53_DOMAIN, map 299 drivers/soc/imx/gpcv2.c .map = IMX8M_PCIE1_A53_DOMAIN, map 310 drivers/soc/imx/gpcv2.c .map = IMX8M_OTG1_A53_DOMAIN, map 321 drivers/soc/imx/gpcv2.c .map = IMX8M_OTG2_A53_DOMAIN, map 332 drivers/soc/imx/gpcv2.c .map = IMX8M_DDR2_A53_DOMAIN, map 343 drivers/soc/imx/gpcv2.c .map = IMX8M_GPU_A53_DOMAIN, map 355 drivers/soc/imx/gpcv2.c .map = IMX8M_VPU_A53_DOMAIN, map 367 drivers/soc/imx/gpcv2.c .map = IMX8M_DISP_A53_DOMAIN, map 379 drivers/soc/imx/gpcv2.c .map = IMX8M_MIPI_CSI1_A53_DOMAIN, map 390 drivers/soc/imx/gpcv2.c .map = IMX8M_MIPI_CSI2_A53_DOMAIN, map 401 drivers/soc/imx/gpcv2.c .map = IMX8M_PCIE2_A53_DOMAIN, map 301 drivers/soc/qcom/smp2p.c .map = smp2p_irq_map, map 331 drivers/soc/qcom/smsm.c .map = smsm_irq_map, map 352 drivers/staging/android/ion/ion.c .map = ion_dma_buf_kmap, map 62 drivers/staging/gdm724x/gdm_lte.c static int gdm_lte_set_config(struct net_device *dev, struct ifmap *map) map 11 drivers/staging/media/ipu3/ipu3-css-pool.c struct imgu_css_map *map, size_t size) map 13 drivers/staging/media/ipu3/ipu3-css-pool.c if (map->size < size && map->vaddr) { map 15 drivers/staging/media/ipu3/ipu3-css-pool.c map->size, size); map 17 drivers/staging/media/ipu3/ipu3-css-pool.c imgu_dmamap_free(imgu, map); map 18 drivers/staging/media/ipu3/ipu3-css-pool.c if (!imgu_dmamap_alloc(imgu, map, size)) map 45 drivers/staging/media/ipu3/ipu3-css-pool.h struct imgu_css_map *map, size_t size); map 2157 drivers/staging/media/ipu3/ipu3-css.c const struct imgu_css_map *map; map 2178 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.acc, 0); map 2179 drivers/staging/media/ipu3/ipu3-css.c if (set_params || !map->vaddr) { map 2181 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.acc, 0); map 2182 drivers/staging/media/ipu3/ipu3-css.c acc = map->vaddr; map 2187 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0); map 2188 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && (set_params->use.lin_vmem_params || map 2192 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0); map 2193 drivers/staging/media/ipu3/ipu3-css.c vmem0 = map->vaddr; map 2198 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0); map 2199 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && (set_params->use.tnr3_dmem_params || map 2202 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0); map 2203 drivers/staging/media/ipu3/ipu3-css.c dmem0 = map->vaddr; map 2209 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.acc, 1); map 2211 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_cfg_acc(css, pipe, use, acc, map->vaddr, map 2220 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1); map 2222 drivers/staging/media/ipu3/ipu3-css.c map->vaddr, set_params); map 2229 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 1); map 2231 drivers/staging/media/ipu3/ipu3-css.c map->vaddr, set_params); map 2242 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.gdc, 0); map 2243 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr) { map 2245 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.gdc, 0); map 2246 drivers/staging/media/ipu3/ipu3-css.c gdc = map->vaddr; map 2247 drivers/staging/media/ipu3/ipu3-css.c imgu_css_cfg_gdc_table(map->vaddr, map 2259 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0); map 2260 drivers/staging/media/ipu3/ipu3-css.c if (!map->vaddr || (set_params && set_params->use.obgrid_param)) { map 2262 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0); map 2263 drivers/staging/media/ipu3/ipu3-css.c obgrid = map->vaddr; map 2276 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.acc, 0); map 2277 drivers/staging/media/ipu3/ipu3-css.c param_set->mem_map.acc_cluster_params_for_sp = map->daddr; map 2279 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.gdc, 0); map 2280 drivers/staging/media/ipu3/ipu3-css.c param_set->mem_map.dvs_6axis_params_y = map->daddr; map 2283 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.obgrid, 0); map 2285 drivers/staging/media/ipu3/ipu3-css.c map->daddr + (obgrid_size / stripes) * i; map 2289 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.binary_params_p[m], 0); map 2290 drivers/staging/media/ipu3/ipu3-css.c param_set->mem_map.isp_mem_param[stage][m] = map->daddr; map 2294 drivers/staging/media/ipu3/ipu3-css.c map = imgu_css_pool_last(&css_pipe->pool.parameter_set_info, 0); map 2295 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_queue_data(css, queue_id, pipe, map->daddr); map 93 drivers/staging/media/ipu3/ipu3-dmamap.c void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map, map 127 drivers/staging/media/ipu3/ipu3-dmamap.c map->vma = __get_vm_area(size, VM_USERMAP, VMALLOC_START, VMALLOC_END); map 128 drivers/staging/media/ipu3/ipu3-dmamap.c if (!map->vma) map 131 drivers/staging/media/ipu3/ipu3-dmamap.c map->vma->pages = pages; map 133 drivers/staging/media/ipu3/ipu3-dmamap.c if (map_vm_area(map->vma, PAGE_KERNEL, pages)) map 136 drivers/staging/media/ipu3/ipu3-dmamap.c map->size = size; map 137 drivers/staging/media/ipu3/ipu3-dmamap.c map->daddr = iova_dma_addr(&imgu->iova_domain, iova); map 138 drivers/staging/media/ipu3/ipu3-dmamap.c map->vaddr = map->vma->addr; map 141 drivers/staging/media/ipu3/ipu3-dmamap.c size, &map->daddr, map->vma->addr); map 143 drivers/staging/media/ipu3/ipu3-dmamap.c return map->vma->addr; map 146 drivers/staging/media/ipu3/ipu3-dmamap.c vunmap(map->vma->addr); map 152 drivers/staging/media/ipu3/ipu3-dmamap.c map->vma = NULL; map 160 drivers/staging/media/ipu3/ipu3-dmamap.c void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map) map 165 drivers/staging/media/ipu3/ipu3-dmamap.c iova_pfn(&imgu->iova_domain, map->daddr)); map 178 drivers/staging/media/ipu3/ipu3-dmamap.c void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map) map 180 drivers/staging/media/ipu3/ipu3-dmamap.c struct vm_struct *area = map->vma; map 183 drivers/staging/media/ipu3/ipu3-dmamap.c __func__, map->size, &map->daddr, map->vaddr); map 185 drivers/staging/media/ipu3/ipu3-dmamap.c if (!map->vaddr) map 188 drivers/staging/media/ipu3/ipu3-dmamap.c imgu_dmamap_unmap(imgu, map); map 193 drivers/staging/media/ipu3/ipu3-dmamap.c imgu_dmamap_free_buffer(area->pages, map->size); map 194 drivers/staging/media/ipu3/ipu3-dmamap.c vunmap(map->vaddr); map 195 drivers/staging/media/ipu3/ipu3-dmamap.c map->vaddr = NULL; map 199 drivers/staging/media/ipu3/ipu3-dmamap.c int nents, struct imgu_css_map *map) map 233 drivers/staging/media/ipu3/ipu3-dmamap.c memset(map, 0, sizeof(*map)); map 234 drivers/staging/media/ipu3/ipu3-dmamap.c map->daddr = iova_dma_addr(&imgu->iova_domain, iova); map 235 drivers/staging/media/ipu3/ipu3-dmamap.c map->size = size; map 11 drivers/staging/media/ipu3/ipu3-dmamap.h void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map, map 13 drivers/staging/media/ipu3/ipu3-dmamap.h void imgu_dmamap_free(struct imgu_device *imgu, struct imgu_css_map *map); map 16 drivers/staging/media/ipu3/ipu3-dmamap.h int nents, struct imgu_css_map *map); map 17 drivers/staging/media/ipu3/ipu3-dmamap.h void imgu_dmamap_unmap(struct imgu_device *imgu, struct imgu_css_map *map); map 318 drivers/staging/media/ipu3/ipu3-v4l2.c return imgu_dmamap_map_sg(imgu, sg->sgl, sg->nents, &buf->map); map 334 drivers/staging/media/ipu3/ipu3-v4l2.c imgu_dmamap_unmap(imgu, &buf->map); map 363 drivers/staging/media/ipu3/ipu3-v4l2.c imgu_css_buf_init(&buf->css_buf, queue, buf->map.daddr); map 58 drivers/staging/media/ipu3/ipu3.h struct imgu_css_map map; map 1356 drivers/staging/qlge/qlge.h struct map_list map[MAX_SKB_FRAGS + 2]; map 1364 drivers/staging/qlge/qlge.h u64 map; /* mapping for master */ map 218 drivers/staging/qlge/qlge_main.c u64 map; map 228 drivers/staging/qlge/qlge_main.c map = pci_map_single(qdev->pdev, ptr, size, direction); map 229 drivers/staging/qlge/qlge_main.c if (pci_dma_mapping_error(qdev->pdev, map)) { map 245 drivers/staging/qlge/qlge_main.c ql_write32(qdev, ICB_L, (u32) map); map 246 drivers/staging/qlge/qlge_main.c ql_write32(qdev, ICB_H, (u32) (map >> 32)); map 258 drivers/staging/qlge/qlge_main.c pci_unmap_single(qdev->pdev, map, size, direction); map 1057 drivers/staging/qlge/qlge_main.c lbq_desc->p.pg_chunk.map, map 1094 drivers/staging/qlge/qlge_main.c u64 map; map 1103 drivers/staging/qlge/qlge_main.c map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, map 1106 drivers/staging/qlge/qlge_main.c if (pci_dma_mapping_error(qdev->pdev, map)) { map 1114 drivers/staging/qlge/qlge_main.c rx_ring->pg_chunk.map = map; map 1143 drivers/staging/qlge/qlge_main.c u64 map; map 1160 drivers/staging/qlge/qlge_main.c map = lbq_desc->p.pg_chunk.map + map 1162 drivers/staging/qlge/qlge_main.c dma_unmap_addr_set(lbq_desc, mapaddr, map); map 1165 drivers/staging/qlge/qlge_main.c *lbq_desc->addr = cpu_to_le64(map); map 1167 drivers/staging/qlge/qlge_main.c pci_dma_sync_single_for_device(qdev->pdev, map, map 1197 drivers/staging/qlge/qlge_main.c u64 map; map 1219 drivers/staging/qlge/qlge_main.c map = pci_map_single(qdev->pdev, map 1223 drivers/staging/qlge/qlge_main.c if (pci_dma_mapping_error(qdev->pdev, map)) { map 1231 drivers/staging/qlge/qlge_main.c dma_unmap_addr_set(sbq_desc, mapaddr, map); map 1234 drivers/staging/qlge/qlge_main.c *sbq_desc->addr = cpu_to_le64(map); map 1288 drivers/staging/qlge/qlge_main.c dma_unmap_addr(&tx_ring_desc->map[i], map 1290 drivers/staging/qlge/qlge_main.c dma_unmap_len(&tx_ring_desc->map[i], map 1297 drivers/staging/qlge/qlge_main.c dma_unmap_addr(&tx_ring_desc->map[i], map 1299 drivers/staging/qlge/qlge_main.c dma_unmap_len(&tx_ring_desc->map[i], map 1314 drivers/staging/qlge/qlge_main.c dma_addr_t map; map 1326 drivers/staging/qlge/qlge_main.c map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE); map 1328 drivers/staging/qlge/qlge_main.c err = pci_dma_mapping_error(qdev->pdev, map); map 1337 drivers/staging/qlge/qlge_main.c tbd->addr = cpu_to_le64(map); map 1338 drivers/staging/qlge/qlge_main.c dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); map 1339 drivers/staging/qlge/qlge_main.c dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); map 1372 drivers/staging/qlge/qlge_main.c map = pci_map_single(qdev->pdev, &tx_ring_desc->oal, map 1375 drivers/staging/qlge/qlge_main.c err = pci_dma_mapping_error(qdev->pdev, map); map 1383 drivers/staging/qlge/qlge_main.c tbd->addr = cpu_to_le64(map); map 1392 drivers/staging/qlge/qlge_main.c dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map 1393 drivers/staging/qlge/qlge_main.c map); map 1394 drivers/staging/qlge/qlge_main.c dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, map 1400 drivers/staging/qlge/qlge_main.c map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), map 1403 drivers/staging/qlge/qlge_main.c err = dma_mapping_error(&qdev->pdev->dev, map); map 1411 drivers/staging/qlge/qlge_main.c tbd->addr = cpu_to_le64(map); map 1413 drivers/staging/qlge/qlge_main.c dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); map 1414 drivers/staging/qlge/qlge_main.c dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, map 2834 drivers/staging/qlge/qlge_main.c lbq_desc->p.pg_chunk.map, map 2848 drivers/staging/qlge/qlge_main.c pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, map 2375 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c u8 map[95] = {0}; map 2382 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c map[i] = (u8) (94 - i); map 2397 drivers/staging/rtl8723bs/hal/hal_com_phycfg.c BufOfLines[j] = map[currentChar - 32] + 32; map 59 drivers/target/target_core_alua.c struct t10_alua_lba_map *map; map 83 drivers/target/target_core_alua.c list_for_each_entry(map, &dev->t10_alua.lba_map_list, map 90 drivers/target/target_core_alua.c put_unaligned_be64(map->lba_map_first_lba, &buf[off]); map 93 drivers/target/target_core_alua.c put_unaligned_be64(map->lba_map_last_lba, &buf[off]); map 97 drivers/target/target_core_alua.c list_for_each_entry(map_mem, &map->lba_map_mem_list, map 479 drivers/target/target_core_alua.c struct t10_alua_lba_map *cur_map = NULL, *map; map 482 drivers/target/target_core_alua.c list_for_each_entry(map, &dev->t10_alua.lba_map_list, map 485 drivers/target/target_core_alua.c u64 first_lba = map->lba_map_first_lba; map 495 drivers/target/target_core_alua.c cur_map = map; map 499 drivers/target/target_core_alua.c last_lba = map->lba_map_last_lba; map 502 drivers/target/target_core_alua.c cur_map = map; map 2299 drivers/target/target_core_configfs.c struct t10_alua_lba_map *map; map 2310 drivers/target/target_core_configfs.c list_for_each_entry(map, &dev->t10_alua.lba_map_list, lba_map_list) { map 2312 drivers/target/target_core_configfs.c map->lba_map_first_lba, map->lba_map_last_lba); map 2313 drivers/target/target_core_configfs.c list_for_each_entry(mem, &map->lba_map_mem_list, map 96 drivers/tee/tee_shm.c .map = tee_shm_op_map, map 226 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 231 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->panic_alarm_ctrl + REG_CLR, map 233 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->panic_alarm_ctrl + REG_SET, map 240 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 251 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->high_alarm_ctrl + REG_CLR, map 253 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->high_alarm_ctrl + REG_SET, map 261 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 268 drivers/thermal/imx_thermal.c regmap_read(map, soc_data->temp_data, &val); map 276 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_CLR, map 278 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_SET, map 291 drivers/thermal/imx_thermal.c regmap_read(map, soc_data->temp_data, &val); map 294 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_CLR, map 296 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_SET, map 355 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 362 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_CLR, map 364 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_SET, map 372 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_CLR, map 374 drivers/thermal/imx_thermal.c regmap_write(map, soc_data->sensor_ctrl + REG_SET, map 569 drivers/thermal/imx_thermal.c struct regmap *map; map 573 drivers/thermal/imx_thermal.c map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, map 575 drivers/thermal/imx_thermal.c if (IS_ERR(map)) { map 576 drivers/thermal/imx_thermal.c ret = PTR_ERR(map); map 581 drivers/thermal/imx_thermal.c ret = regmap_read(map, OCOTP_ANA1, &val); map 590 drivers/thermal/imx_thermal.c ret = regmap_read(map, OCOTP_MEM0, &val); map 702 drivers/thermal/imx_thermal.c struct regmap *map; map 710 drivers/thermal/imx_thermal.c map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "fsl,tempmon"); map 711 drivers/thermal/imx_thermal.c if (IS_ERR(map)) { map 712 drivers/thermal/imx_thermal.c ret = PTR_ERR(map); map 716 drivers/thermal/imx_thermal.c data->tempmon = map; map 726 drivers/thermal/imx_thermal.c regmap_write(map, IMX6_MISC1 + REG_CLR, map 733 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->low_alarm_ctrl + REG_SET, map 762 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, map 764 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, map 766 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR, map 769 drivers/thermal/imx_thermal.c regmap_write(map, IMX6_MISC0 + REG_SET, map 771 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_SET, map 825 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->measure_freq_ctrl + REG_CLR, map 828 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->measure_freq_ctrl + REG_SET, map 835 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, map 837 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_SET, map 866 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 869 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_SET, map 885 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 893 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, map 895 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_SET, map 906 drivers/thermal/imx_thermal.c struct regmap *map = data->tempmon; map 913 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_CLR, map 915 drivers/thermal/imx_thermal.c regmap_write(map, data->socdata->sensor_ctrl + REG_SET, map 66 drivers/thermal/qcom/qcom-spmi-temp-alarm.c struct regmap *map; map 90 drivers/thermal/qcom/qcom-spmi-temp-alarm.c ret = regmap_read(chip->map, chip->base + addr, &val); map 100 drivers/thermal/qcom/qcom-spmi-temp-alarm.c return regmap_write(chip->map, chip->base + addr, data); map 368 drivers/thermal/qcom/qcom-spmi-temp-alarm.c chip->map = dev_get_regmap(pdev->dev.parent, NULL); map 369 drivers/thermal/qcom/qcom-spmi-temp-alarm.c if (!chip->map) map 63 drivers/thermal/qcom/tsens-8960.c struct regmap *map = priv->tm_map; map 65 drivers/thermal/qcom/tsens-8960.c ret = regmap_read(map, THRESHOLD_ADDR, &priv->ctx.threshold); map 69 drivers/thermal/qcom/tsens-8960.c ret = regmap_read(map, CNTL_ADDR, &priv->ctx.control); map 78 drivers/thermal/qcom/tsens-8960.c ret = regmap_update_bits(map, CNTL_ADDR, mask, 0); map 88 drivers/thermal/qcom/tsens-8960.c struct regmap *map = priv->tm_map; map 90 drivers/thermal/qcom/tsens-8960.c ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST); map 99 drivers/thermal/qcom/tsens-8960.c ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG); map 104 drivers/thermal/qcom/tsens-8960.c ret = regmap_write(map, THRESHOLD_ADDR, priv->ctx.threshold); map 108 drivers/thermal/qcom/tsens-8960.c ret = regmap_write(map, CNTL_ADDR, priv->ctx.control); map 1238 drivers/thermal/tegra/soctherm.c .map = soctherm_oc_irq_map, map 93 drivers/thermal/uniphier_thermal.c struct regmap *map = tdev->regmap; map 99 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, map 109 drivers/thermal/uniphier_thermal.c ret = regmap_read(map, tdev->data->map_base + TMODCOEF, &val); map 121 drivers/thermal/uniphier_thermal.c regmap_write(map, tdev->data->tmod_setup_addr, map 127 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->block_base + PVTCTLMODE, map 131 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->block_base + EMONREPEAT, map 136 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->map_base + PVTCTLSEL, map 145 drivers/thermal/uniphier_thermal.c struct regmap *map = tdev->regmap; map 148 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->map_base + SETALERT0 + (ch << 2), map 156 drivers/thermal/uniphier_thermal.c struct regmap *map = tdev->regmap; map 165 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, map 169 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, map 177 drivers/thermal/uniphier_thermal.c struct regmap *map = tdev->regmap; map 180 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->map_base + PMALERTINTCTL, map 184 drivers/thermal/uniphier_thermal.c regmap_write_bits(map, tdev->data->block_base + PVTCTLEN, map 193 drivers/thermal/uniphier_thermal.c struct regmap *map = tdev->regmap; map 197 drivers/thermal/uniphier_thermal.c ret = regmap_read(map, tdev->data->map_base + TMOD, &temp); map 46 drivers/uio/uio.c #define to_map(map) container_of(map, struct uio_map, kobj) map 96 drivers/uio/uio.c struct uio_map *map = to_map(kobj); map 97 drivers/uio/uio.c kfree(map); map 103 drivers/uio/uio.c struct uio_map *map = to_map(kobj); map 104 drivers/uio/uio.c struct uio_mem *mem = map->mem; map 289 drivers/uio/uio.c struct uio_map *map; map 306 drivers/uio/uio.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 307 drivers/uio/uio.c if (!map) { map 311 drivers/uio/uio.c kobject_init(&map->kobj, &map_attr_type); map 312 drivers/uio/uio.c map->mem = mem; map 313 drivers/uio/uio.c mem->map = map; map 314 drivers/uio/uio.c ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); map 317 drivers/uio/uio.c ret = kobject_uevent(&map->kobj, KOBJ_ADD); map 368 drivers/uio/uio.c map = mem->map; map 369 drivers/uio/uio.c kobject_put(&map->kobj); map 386 drivers/uio/uio.c kobject_put(&mem->map->kobj); map 232 drivers/usb/dwc2/hcd_queue.c static int pmap_schedule(unsigned long *map, int bits_per_period, map 274 drivers/usb/dwc2/hcd_queue.c start = bitmap_find_next_zero_area(map, end, start, num_bits, map 295 drivers/usb/dwc2/hcd_queue.c map, ith_start + num_bits, ith_start, num_bits, map 304 drivers/usb/dwc2/hcd_queue.c map, ith_end, ith_start, num_bits, 0); map 324 drivers/usb/dwc2/hcd_queue.c bitmap_set(map, ith_start, num_bits); map 340 drivers/usb/dwc2/hcd_queue.c static void pmap_unschedule(unsigned long *map, int bits_per_period, map 357 drivers/usb/dwc2/hcd_queue.c bitmap_clear(map, ith_start, num_bits); map 377 drivers/usb/dwc2/hcd_queue.c unsigned long *map; map 384 drivers/usb/dwc2/hcd_queue.c map = qh->dwc_tt->periodic_bitmaps; map 386 drivers/usb/dwc2/hcd_queue.c map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1); map 388 drivers/usb/dwc2/hcd_queue.c return map; map 440 drivers/usb/dwc2/hcd_queue.c static void pmap_print(unsigned long *map, int bits_per_period, map 462 drivers/usb/dwc2/hcd_queue.c bitmap_find_next_zero_area(map, i + 1, map 528 drivers/usb/dwc2/hcd_queue.c unsigned long *map = dwc2_get_ls_map(hsotg, qh); map 535 drivers/usb/dwc2/hcd_queue.c if (map) { map 538 drivers/usb/dwc2/hcd_queue.c qh, map); map 539 drivers/usb/dwc2/hcd_queue.c pmap_print(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, map 588 drivers/usb/dwc2/hcd_queue.c unsigned long *map = dwc2_get_ls_map(hsotg, qh); map 591 drivers/usb/dwc2/hcd_queue.c if (!map) map 606 drivers/usb/dwc2/hcd_queue.c slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, map 627 drivers/usb/dwc2/hcd_queue.c unsigned long *map = dwc2_get_ls_map(hsotg, qh); map 630 drivers/usb/dwc2/hcd_queue.c if (!map) map 633 drivers/usb/dwc2/hcd_queue.c pmap_unschedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME, map 2022 drivers/usb/host/r8a66597-hcd.c static void collect_usb_address_map(struct usb_device *udev, unsigned long *map) map 2030 drivers/usb/host/r8a66597-hcd.c map[udev->devnum/32] |= (1 << (udev->devnum % 32)); map 2033 drivers/usb/host/r8a66597-hcd.c collect_usb_address_map(childdev, map); map 2056 drivers/usb/host/r8a66597-hcd.c unsigned long *map) map 2063 drivers/usb/host/r8a66597-hcd.c diff = r8a66597->child_connect_map[i] ^ map[i]; map 2072 drivers/usb/host/r8a66597-hcd.c if (map[i] & (1 << j)) map 223 drivers/usb/mon/mon_bin.c static int mon_alloc_buff(struct mon_pgmap *map, int npages); map 224 drivers/usb/mon/mon_bin.c static void mon_free_buff(struct mon_pgmap *map, int npages); map 1325 drivers/usb/mon/mon_bin.c static int mon_alloc_buff(struct mon_pgmap *map, int npages) map 1334 drivers/usb/mon/mon_bin.c free_page((unsigned long) map[n].ptr); map 1337 drivers/usb/mon/mon_bin.c map[n].ptr = (unsigned char *) vaddr; map 1338 drivers/usb/mon/mon_bin.c map[n].pg = virt_to_page((void *) vaddr); map 1343 drivers/usb/mon/mon_bin.c static void mon_free_buff(struct mon_pgmap *map, int npages) map 1348 drivers/usb/mon/mon_bin.c free_page((unsigned long) map[n].ptr); map 103 drivers/usb/renesas_usbhs/fifo.c static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); map 795 drivers/usb/renesas_usbhs/fifo.c static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) map 803 drivers/usb/renesas_usbhs/fifo.c return info->dma_map_ctrl(chan->device->dev, pkt, map); map 188 drivers/usb/renesas_usbhs/mod_gadget.c int map) map 198 drivers/usb/renesas_usbhs/mod_gadget.c if (map) { map 918 drivers/usb/renesas_usbhs/mod_host.c int map) map 920 drivers/usb/renesas_usbhs/mod_host.c if (map) { map 673 drivers/usb/renesas_usbhs/pipe.c struct usbhs_pkt *pkt, int map)) map 42 drivers/usb/renesas_usbhs/pipe.h int map); map 80 drivers/usb/renesas_usbhs/pipe.h struct usbhs_pkt *pkt, int map)); map 1428 drivers/vfio/pci/vfio_pci_config.c u8 *map = vdev->pci_config_map; map 1482 drivers/vfio/pci/vfio_pci_config.c if (likely(map[pos + i] == PCI_CAP_ID_INVALID)) map 1486 drivers/vfio/pci/vfio_pci_config.c __func__, pos + i, map[pos + i], cap); map 1491 drivers/vfio/pci/vfio_pci_config.c memset(map + pos, cap, len); map 1513 drivers/vfio/pci/vfio_pci_config.c u8 *map = vdev->pci_config_map; map 1567 drivers/vfio/pci/vfio_pci_config.c if (likely(map[epos + i] == PCI_CAP_ID_INVALID)) map 1571 drivers/vfio/pci/vfio_pci_config.c __func__, epos + i, map[epos + i], ecap); map 1581 drivers/vfio/pci/vfio_pci_config.c memset(map + epos, ecap, len); map 1633 drivers/vfio/pci/vfio_pci_config.c u8 *map, *vconfig; map 1642 drivers/vfio/pci/vfio_pci_config.c map = kmalloc(pdev->cfg_size, GFP_KERNEL); map 1643 drivers/vfio/pci/vfio_pci_config.c if (!map) map 1648 drivers/vfio/pci/vfio_pci_config.c kfree(map); map 1652 drivers/vfio/pci/vfio_pci_config.c vdev->pci_config_map = map; map 1655 drivers/vfio/pci/vfio_pci_config.c memset(map, PCI_CAP_ID_BASIC, PCI_STD_HEADER_SIZEOF); map 1656 drivers/vfio/pci/vfio_pci_config.c memset(map + PCI_STD_HEADER_SIZEOF, PCI_CAP_ID_INVALID, map 1716 drivers/vfio/pci/vfio_pci_config.c kfree(map); map 1072 drivers/vfio/vfio_iommu_type1.c struct vfio_iommu_type1_dma_map *map) map 1074 drivers/vfio/vfio_iommu_type1.c dma_addr_t iova = map->iova; map 1075 drivers/vfio/vfio_iommu_type1.c unsigned long vaddr = map->vaddr; map 1076 drivers/vfio/vfio_iommu_type1.c size_t size = map->size; map 1082 drivers/vfio/vfio_iommu_type1.c if (map->size != size || map->vaddr != vaddr || map->iova != iova) map 1090 drivers/vfio/vfio_iommu_type1.c if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) map 1092 drivers/vfio/vfio_iommu_type1.c if (map->flags & VFIO_DMA_MAP_FLAG_READ) map 2286 drivers/vfio/vfio_iommu_type1.c struct vfio_iommu_type1_dma_map map; map 2292 drivers/vfio/vfio_iommu_type1.c if (copy_from_user(&map, (void __user *)arg, minsz)) map 2295 drivers/vfio/vfio_iommu_type1.c if (map.argsz < minsz || map.flags & ~mask) map 2298 drivers/vfio/vfio_iommu_type1.c return vfio_dma_do_map(iommu, &map); map 119 drivers/video/fbdev/arkfb.c static void arkfb_settile(struct fb_info *info, struct fb_tilemap *map) map 121 drivers/video/fbdev/arkfb.c const u8 *font = map->data; map 125 drivers/video/fbdev/arkfb.c if ((map->width != 8) || (map->height != 16) || map 126 drivers/video/fbdev/arkfb.c (map->depth != 1) || (map->length != 256)) { map 128 drivers/video/fbdev/arkfb.c map->width, map->height, map->depth, map->length); map 133 drivers/video/fbdev/arkfb.c for (c = 0; c < map->length; c++) { map 134 drivers/video/fbdev/arkfb.c for (i = 0; i < map->height; i++) { map 143 drivers/video/fbdev/arkfb.c font += map->height; map 1048 drivers/video/fbdev/atmel_lcdfb.c struct resource *map = NULL; map 1123 drivers/video/fbdev/atmel_lcdfb.c map = platform_get_resource(pdev, IORESOURCE_MEM, 1); map 1124 drivers/video/fbdev/atmel_lcdfb.c if (map) { map 1126 drivers/video/fbdev/atmel_lcdfb.c info->fix.smem_start = map->start; map 1127 drivers/video/fbdev/atmel_lcdfb.c info->fix.smem_len = resource_size(map); map 1228 drivers/video/fbdev/atmel_lcdfb.c if (map) map 1234 drivers/video/fbdev/atmel_lcdfb.c if (map) map 520 drivers/video/fbdev/cg14.c struct sbus_mmap_map *map = &par->mmap_map[i]; map 522 drivers/video/fbdev/cg14.c if (!map->size) map 524 drivers/video/fbdev/cg14.c if (map->poff & 0x80000000) map 525 drivers/video/fbdev/cg14.c map->poff = (map->poff & 0x7fffffff) + map 529 drivers/video/fbdev/cg14.c map->size >= 0x100000 && map 530 drivers/video/fbdev/cg14.c map->size <= 0x400000) map 531 drivers/video/fbdev/cg14.c map->size *= 2; map 193 drivers/video/fbdev/core/svgalib.c void svga_settile(struct fb_info *info, struct fb_tilemap *map) map 195 drivers/video/fbdev/core/svgalib.c const u8 *font = map->data; map 199 drivers/video/fbdev/core/svgalib.c if ((map->width != 8) || (map->height != 16) || map 200 drivers/video/fbdev/core/svgalib.c (map->depth != 1) || (map->length != 256)) { map 202 drivers/video/fbdev/core/svgalib.c map->width, map->height, map->depth, map->length); map 207 drivers/video/fbdev/core/svgalib.c for (c = 0; c < map->length; c++) { map 208 drivers/video/fbdev/core/svgalib.c for (i = 0; i < map->height; i++) { map 213 drivers/video/fbdev/core/svgalib.c font += map->height; map 133 drivers/video/fbdev/core/tileblit.c struct fb_tilemap map; map 144 drivers/video/fbdev/core/tileblit.c map.width = vc->vc_font.width; map 145 drivers/video/fbdev/core/tileblit.c map.height = vc->vc_font.height; map 146 drivers/video/fbdev/core/tileblit.c map.depth = 1; map 147 drivers/video/fbdev/core/tileblit.c map.length = (ops->p->userfont) ? map 149 drivers/video/fbdev/core/tileblit.c map.data = ops->p->fontdata; map 150 drivers/video/fbdev/core/tileblit.c info->tileops->fb_settile(info, &map); map 224 drivers/video/fbdev/macmodes.c const struct mode_map *map; map 226 drivers/video/fbdev/macmodes.c for (map = mac_modes; map->vmode != -1; map++) map 227 drivers/video/fbdev/macmodes.c if (map->vmode == vmode) { map 228 drivers/video/fbdev/macmodes.c mode = map->mode; map 306 drivers/video/fbdev/macmodes.c const struct mode_map *map; map 321 drivers/video/fbdev/macmodes.c for (map = mac_modes; map->vmode != -1; map++) { map 322 drivers/video/fbdev/macmodes.c const struct fb_videomode *mode = map->mode; map 332 drivers/video/fbdev/macmodes.c *vmode = map->vmode; map 338 drivers/video/fbdev/macmodes.c map++; map 339 drivers/video/fbdev/macmodes.c while (map->vmode != -1) { map 340 drivers/video/fbdev/macmodes.c const struct fb_videomode *clk_mode = map->mode; map 348 drivers/video/fbdev/macmodes.c *vmode = map->vmode; map 349 drivers/video/fbdev/macmodes.c map++; map 369 drivers/video/fbdev/macmodes.c const struct monitor_map *map; map 371 drivers/video/fbdev/macmodes.c for (map = mac_monitors; map->sense != -1; map++) map 372 drivers/video/fbdev/macmodes.c if (map->sense == sense) map 374 drivers/video/fbdev/macmodes.c return map->vmode; map 519 drivers/video/fbdev/mx3fb.c const struct di_mapping *map; map 607 drivers/video/fbdev/mx3fb.c map = &di_mappings[mx3fb->disp_data_fmt]; map 608 drivers/video/fbdev/mx3fb.c mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP); map 609 drivers/video/fbdev/mx3fb.c mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP); map 610 drivers/video/fbdev/mx3fb.c mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP); map 50 drivers/video/fbdev/omap/omapfb.h unsigned map:1; /* kernel mapped by the driver */ map 1375 drivers/video/fbdev/omap2/omapfb/omapfb-main.c rg->map = false; map 51 drivers/video/fbdev/omap2/omapfb/omapfb.h bool map; /* kernel mapped by the driver */ map 116 drivers/video/fbdev/omap2/omapfb/vrfb.c unsigned long map = ctx_map; map 118 drivers/video/fbdev/omap2/omapfb/vrfb.c for (i = ffs(map); i; i = ffs(map)) { map 121 drivers/video/fbdev/omap2/omapfb/vrfb.c map &= ~(1 << i); map 290 drivers/video/fbdev/s3fb.c static void s3fb_settile_fast(struct fb_info *info, struct fb_tilemap *map) map 292 drivers/video/fbdev/s3fb.c const u8 *font = map->data; map 296 drivers/video/fbdev/s3fb.c if ((map->width != 8) || (map->height != 16) || map 297 drivers/video/fbdev/s3fb.c (map->depth != 1) || (map->length != 256)) { map 299 drivers/video/fbdev/s3fb.c map->width, map->height, map->depth, map->length); map 304 drivers/video/fbdev/s3fb.c for (i = 0; i < map->height; i++) { map 305 drivers/video/fbdev/s3fb.c for (c = 0; c < map->length; c++) { map 306 drivers/video/fbdev/s3fb.c fb_writeb(font[c * map->height + i], fb + c * 4); map 41 drivers/video/fbdev/sbuslib.c int sbusfb_mmap_helper(struct sbus_mmap_map *map, map 68 drivers/video/fbdev/sbuslib.c for (i = 0; map[i].size; i++) map 69 drivers/video/fbdev/sbuslib.c if (map[i].voff == off+page) { map 70 drivers/video/fbdev/sbuslib.c map_size = sbusfb_mmapsize(map[i].size, fbsize); map 76 drivers/video/fbdev/sbuslib.c map_offset = (physbase + map[i].poff) & POFF_MASK; map 18 drivers/video/fbdev/sbuslib.h extern int sbusfb_mmap_helper(struct sbus_mmap_map *map, map 638 drivers/video/fbdev/vga16fb.c static const unsigned char map[] = { 000, 001, 010, 011 }; map 643 drivers/video/fbdev/vga16fb.c val = map[red>>14] | ((map[green>>14]) << 1) | ((map[blue>>14]) << 2); map 97 drivers/w1/masters/ds1wm.c void __iomem *map; map 122 drivers/w1/masters/ds1wm.c iowrite8(val, ds1wm_data->map + (reg << 0)); map 125 drivers/w1/masters/ds1wm.c iowrite16be((u16)val, ds1wm_data->map + (reg << 1)); map 128 drivers/w1/masters/ds1wm.c iowrite32be((u32)val, ds1wm_data->map + (reg << 2)); map 134 drivers/w1/masters/ds1wm.c iowrite8(val, ds1wm_data->map + (reg << 0)); map 137 drivers/w1/masters/ds1wm.c iowrite16((u16)val, ds1wm_data->map + (reg << 1)); map 140 drivers/w1/masters/ds1wm.c iowrite32((u32)val, ds1wm_data->map + (reg << 2)); map 153 drivers/w1/masters/ds1wm.c val = ioread8(ds1wm_data->map + (reg << 0)); map 156 drivers/w1/masters/ds1wm.c val = ioread16be(ds1wm_data->map + (reg << 1)); map 159 drivers/w1/masters/ds1wm.c val = ioread32be(ds1wm_data->map + (reg << 2)); map 165 drivers/w1/masters/ds1wm.c val = ioread8(ds1wm_data->map + (reg << 0)); map 168 drivers/w1/masters/ds1wm.c val = ioread16(ds1wm_data->map + (reg << 1)); map 171 drivers/w1/masters/ds1wm.c val = ioread32(ds1wm_data->map + (reg << 2)); map 526 drivers/w1/masters/ds1wm.c ds1wm_data->map = devm_ioremap(&pdev->dev, res->start, map 528 drivers/w1/masters/ds1wm.c if (!ds1wm_data->map) map 88 drivers/xen/gntdev-common.h void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map); map 92 drivers/xen/gntdev-common.h int gntdev_map_grant_pages(struct gntdev_grant_map *map); map 45 drivers/xen/gntdev-dmabuf.c struct gntdev_grant_map *map; map 325 drivers/xen/gntdev-dmabuf.c struct gntdev_grant_map *map) map 328 drivers/xen/gntdev-dmabuf.c list_del(&map->next); map 329 drivers/xen/gntdev-dmabuf.c gntdev_put_map(NULL /* already removed */, map); map 339 drivers/xen/gntdev-dmabuf.c gntdev_dmabuf->u.exp.map); map 371 drivers/xen/gntdev-dmabuf.c .map = dmabuf_exp_ops_kmap, map 378 drivers/xen/gntdev-dmabuf.c struct gntdev_grant_map *map; map 402 drivers/xen/gntdev-dmabuf.c gntdev_dmabuf->u.exp.map = args->map; map 447 drivers/xen/gntdev-dmabuf.c struct gntdev_grant_map *map; map 458 drivers/xen/gntdev-dmabuf.c map = gntdev_alloc_map(priv, count, dmabuf_flags); map 459 drivers/xen/gntdev-dmabuf.c if (!map) map 464 drivers/xen/gntdev-dmabuf.c gntdev_put_map(NULL, map); map 467 drivers/xen/gntdev-dmabuf.c return map; map 473 drivers/xen/gntdev-dmabuf.c struct gntdev_grant_map *map; map 477 drivers/xen/gntdev-dmabuf.c map = dmabuf_exp_alloc_backing_storage(priv, flags, count); map 478 drivers/xen/gntdev-dmabuf.c if (IS_ERR(map)) map 479 drivers/xen/gntdev-dmabuf.c return PTR_ERR(map); map 482 drivers/xen/gntdev-dmabuf.c map->grants[i].domid = domid; map 483 drivers/xen/gntdev-dmabuf.c map->grants[i].ref = refs[i]; map 487 drivers/xen/gntdev-dmabuf.c gntdev_add_map(priv, map); map 490 drivers/xen/gntdev-dmabuf.c map->flags |= GNTMAP_host_map; map 492 drivers/xen/gntdev-dmabuf.c map->flags |= GNTMAP_device_map; map 495 drivers/xen/gntdev-dmabuf.c ret = gntdev_map_grant_pages(map); map 500 drivers/xen/gntdev-dmabuf.c args.map = map; map 503 drivers/xen/gntdev-dmabuf.c args.count = map->count; map 504 drivers/xen/gntdev-dmabuf.c args.pages = map->pages; map 515 drivers/xen/gntdev-dmabuf.c dmabuf_exp_remove_map(priv, map); map 68 drivers/xen/gntdev.c static int unmap_grant_pages(struct gntdev_grant_map *map, map 84 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 87 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) map 89 drivers/xen/gntdev.c map->index, map->count, map 90 drivers/xen/gntdev.c map->index == text_index && text ? text : ""); map 94 drivers/xen/gntdev.c static void gntdev_free_map(struct gntdev_grant_map *map) map 96 drivers/xen/gntdev.c if (map == NULL) map 100 drivers/xen/gntdev.c if (map->dma_vaddr) { map 103 drivers/xen/gntdev.c args.dev = map->dma_dev; map 104 drivers/xen/gntdev.c args.coherent = !!(map->dma_flags & GNTDEV_DMA_FLAG_COHERENT); map 105 drivers/xen/gntdev.c args.nr_pages = map->count; map 106 drivers/xen/gntdev.c args.pages = map->pages; map 107 drivers/xen/gntdev.c args.frames = map->frames; map 108 drivers/xen/gntdev.c args.vaddr = map->dma_vaddr; map 109 drivers/xen/gntdev.c args.dev_bus_addr = map->dma_bus_addr; map 114 drivers/xen/gntdev.c if (map->pages) map 115 drivers/xen/gntdev.c gnttab_free_pages(map->count, map->pages); map 118 drivers/xen/gntdev.c kfree(map->frames); map 120 drivers/xen/gntdev.c kfree(map->pages); map 121 drivers/xen/gntdev.c kfree(map->grants); map 122 drivers/xen/gntdev.c kfree(map->map_ops); map 123 drivers/xen/gntdev.c kfree(map->unmap_ops); map 124 drivers/xen/gntdev.c kfree(map->kmap_ops); map 125 drivers/xen/gntdev.c kfree(map->kunmap_ops); map 126 drivers/xen/gntdev.c kfree(map); map 207 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 209 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) { map 210 drivers/xen/gntdev.c if (add->index + add->count < map->index) { map 211 drivers/xen/gntdev.c list_add_tail(&add->next, &map->next); map 214 drivers/xen/gntdev.c add->index = map->index + map->count; map 225 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 227 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) { map 228 drivers/xen/gntdev.c if (map->index != index) map 230 drivers/xen/gntdev.c if (count && map->count != count) map 232 drivers/xen/gntdev.c return map; map 237 drivers/xen/gntdev.c void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) map 239 drivers/xen/gntdev.c if (!map) map 242 drivers/xen/gntdev.c if (!refcount_dec_and_test(&map->users)) map 245 drivers/xen/gntdev.c atomic_sub(map->count, &pages_mapped); map 247 drivers/xen/gntdev.c if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { map 248 drivers/xen/gntdev.c notify_remote_via_evtchn(map->notify.event); map 249 drivers/xen/gntdev.c evtchn_put(map->notify.event); map 254 drivers/xen/gntdev.c list_del(&map->next); map 258 drivers/xen/gntdev.c if (map->pages && !use_ptemod) map 259 drivers/xen/gntdev.c unmap_grant_pages(map, 0, map->count); map 260 drivers/xen/gntdev.c gntdev_free_map(map); map 267 drivers/xen/gntdev.c struct gntdev_grant_map *map = data; map 268 drivers/xen/gntdev.c unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT; map 269 drivers/xen/gntdev.c int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte; map 272 drivers/xen/gntdev.c BUG_ON(pgnr >= map->count); map 283 drivers/xen/gntdev.c gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags, map 284 drivers/xen/gntdev.c map->grants[pgnr].ref, map 285 drivers/xen/gntdev.c map->grants[pgnr].domid); map 286 drivers/xen/gntdev.c gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags, map 299 drivers/xen/gntdev.c int gntdev_map_grant_pages(struct gntdev_grant_map *map) map 305 drivers/xen/gntdev.c if (map->map_ops[0].handle != -1) map 307 drivers/xen/gntdev.c for (i = 0; i < map->count; i++) { map 309 drivers/xen/gntdev.c pfn_to_kaddr(page_to_pfn(map->pages[i])); map 310 drivers/xen/gntdev.c gnttab_set_map_op(&map->map_ops[i], addr, map->flags, map 311 drivers/xen/gntdev.c map->grants[i].ref, map 312 drivers/xen/gntdev.c map->grants[i].domid); map 313 drivers/xen/gntdev.c gnttab_set_unmap_op(&map->unmap_ops[i], addr, map 314 drivers/xen/gntdev.c map->flags, -1 /* handle */); map 323 drivers/xen/gntdev.c for (i = 0; i < map->count; i++) { map 325 drivers/xen/gntdev.c pfn_to_kaddr(page_to_pfn(map->pages[i])); map 326 drivers/xen/gntdev.c BUG_ON(PageHighMem(map->pages[i])); map 328 drivers/xen/gntdev.c gnttab_set_map_op(&map->kmap_ops[i], address, map 329 drivers/xen/gntdev.c map->flags | GNTMAP_host_map, map 330 drivers/xen/gntdev.c map->grants[i].ref, map 331 drivers/xen/gntdev.c map->grants[i].domid); map 332 drivers/xen/gntdev.c gnttab_set_unmap_op(&map->kunmap_ops[i], address, map 333 drivers/xen/gntdev.c map->flags | GNTMAP_host_map, -1); map 337 drivers/xen/gntdev.c pr_debug("map %d+%d\n", map->index, map->count); map 338 drivers/xen/gntdev.c err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL, map 339 drivers/xen/gntdev.c map->pages, map->count); map 343 drivers/xen/gntdev.c for (i = 0; i < map->count; i++) { map 344 drivers/xen/gntdev.c if (map->map_ops[i].status) { map 349 drivers/xen/gntdev.c map->unmap_ops[i].handle = map->map_ops[i].handle; map 351 drivers/xen/gntdev.c map->kunmap_ops[i].handle = map->kmap_ops[i].handle; map 353 drivers/xen/gntdev.c else if (map->dma_vaddr) { map 356 drivers/xen/gntdev.c bfn = pfn_to_bfn(page_to_pfn(map->pages[i])); map 357 drivers/xen/gntdev.c map->unmap_ops[i].dev_bus_addr = __pfn_to_phys(bfn); map 364 drivers/xen/gntdev.c static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, map 370 drivers/xen/gntdev.c if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { map 371 drivers/xen/gntdev.c int pgno = (map->notify.addr >> PAGE_SHIFT); map 374 drivers/xen/gntdev.c uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); map 375 drivers/xen/gntdev.c tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; map 376 drivers/xen/gntdev.c map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; map 380 drivers/xen/gntdev.c unmap_data.unmap_ops = map->unmap_ops + offset; map 381 drivers/xen/gntdev.c unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; map 382 drivers/xen/gntdev.c unmap_data.pages = map->pages + offset; map 390 drivers/xen/gntdev.c if (map->unmap_ops[offset+i].status) map 393 drivers/xen/gntdev.c map->unmap_ops[offset+i].handle, map 394 drivers/xen/gntdev.c map->unmap_ops[offset+i].status); map 395 drivers/xen/gntdev.c map->unmap_ops[offset+i].handle = -1; map 400 drivers/xen/gntdev.c static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, map 405 drivers/xen/gntdev.c pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); map 411 drivers/xen/gntdev.c while (pages && map->unmap_ops[offset].handle == -1) { map 417 drivers/xen/gntdev.c if (map->unmap_ops[offset+range].handle == -1) map 421 drivers/xen/gntdev.c err = __unmap_grant_pages(map, offset, range); map 433 drivers/xen/gntdev.c struct gntdev_grant_map *map = vma->vm_private_data; map 436 drivers/xen/gntdev.c refcount_inc(&map->users); map 441 drivers/xen/gntdev.c struct gntdev_grant_map *map = vma->vm_private_data; map 455 drivers/xen/gntdev.c map->vma = NULL; map 459 drivers/xen/gntdev.c gntdev_put_map(priv, map); map 465 drivers/xen/gntdev.c struct gntdev_grant_map *map = vma->vm_private_data; map 467 drivers/xen/gntdev.c return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT]; map 478 drivers/xen/gntdev.c static bool in_range(struct gntdev_grant_map *map, map 481 drivers/xen/gntdev.c if (!map->vma) map 483 drivers/xen/gntdev.c if (map->vma->vm_start >= end) map 485 drivers/xen/gntdev.c if (map->vma->vm_end <= start) map 491 drivers/xen/gntdev.c static int unmap_if_in_range(struct gntdev_grant_map *map, map 498 drivers/xen/gntdev.c if (!in_range(map, start, end)) map 504 drivers/xen/gntdev.c mstart = max(start, map->vma->vm_start); map 505 drivers/xen/gntdev.c mend = min(end, map->vma->vm_end); map 507 drivers/xen/gntdev.c map->index, map->count, map 508 drivers/xen/gntdev.c map->vma->vm_start, map->vma->vm_end, map 510 drivers/xen/gntdev.c err = unmap_grant_pages(map, map 511 drivers/xen/gntdev.c (mstart - map->vma->vm_start) >> PAGE_SHIFT, map 522 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 530 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) { map 531 drivers/xen/gntdev.c ret = unmap_if_in_range(map, range->start, range->end, map 536 drivers/xen/gntdev.c list_for_each_entry(map, &priv->freeable_maps, next) { map 537 drivers/xen/gntdev.c ret = unmap_if_in_range(map, range->start, range->end, map 553 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 557 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) { map 558 drivers/xen/gntdev.c if (!map->vma) map 561 drivers/xen/gntdev.c map->index, map->count, map 562 drivers/xen/gntdev.c map->vma->vm_start, map->vma->vm_end); map 563 drivers/xen/gntdev.c err = unmap_grant_pages(map, /* offset */ 0, map->count); map 566 drivers/xen/gntdev.c list_for_each_entry(map, &priv->freeable_maps, next) { map 567 drivers/xen/gntdev.c if (!map->vma) map 570 drivers/xen/gntdev.c map->index, map->count, map 571 drivers/xen/gntdev.c map->vma->vm_start, map->vma->vm_end); map 572 drivers/xen/gntdev.c err = unmap_grant_pages(map, /* offset */ 0, map->count); map 636 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 642 drivers/xen/gntdev.c map = list_entry(priv->maps.next, map 644 drivers/xen/gntdev.c list_del(&map->next); map 645 drivers/xen/gntdev.c gntdev_put_map(NULL /* already removed */, map); map 665 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 675 drivers/xen/gntdev.c map = gntdev_alloc_map(priv, op.count, 0 /* This is not a dma-buf. */); map 676 drivers/xen/gntdev.c if (!map) map 681 drivers/xen/gntdev.c gntdev_put_map(NULL, map); map 685 drivers/xen/gntdev.c if (copy_from_user(map->grants, &u->refs, map 686 drivers/xen/gntdev.c sizeof(map->grants[0]) * op.count) != 0) { map 687 drivers/xen/gntdev.c gntdev_put_map(NULL, map); map 692 drivers/xen/gntdev.c gntdev_add_map(priv, map); map 693 drivers/xen/gntdev.c op.index = map->index << PAGE_SHIFT; map 706 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 714 drivers/xen/gntdev.c map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count); map 715 drivers/xen/gntdev.c if (map) { map 716 drivers/xen/gntdev.c list_del(&map->next); map 718 drivers/xen/gntdev.c list_add_tail(&map->next, &priv->freeable_maps); map 722 drivers/xen/gntdev.c if (map) map 723 drivers/xen/gntdev.c gntdev_put_map(priv, map); map 732 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 744 drivers/xen/gntdev.c map = vma->vm_private_data; map 745 drivers/xen/gntdev.c if (!map) map 748 drivers/xen/gntdev.c op.offset = map->index << PAGE_SHIFT; map 749 drivers/xen/gntdev.c op.count = map->count; map 763 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 791 drivers/xen/gntdev.c list_for_each_entry(map, &priv->maps, next) { map 792 drivers/xen/gntdev.c uint64_t begin = map->index << PAGE_SHIFT; map 793 drivers/xen/gntdev.c uint64_t end = (map->index + map->count) << PAGE_SHIFT; map 802 drivers/xen/gntdev.c (map->flags & GNTMAP_readonly)) { map 807 drivers/xen/gntdev.c out_flags = map->notify.flags; map 808 drivers/xen/gntdev.c out_event = map->notify.event; map 810 drivers/xen/gntdev.c map->notify.flags = op.action; map 811 drivers/xen/gntdev.c map->notify.addr = op.index - (map->index << PAGE_SHIFT); map 812 drivers/xen/gntdev.c map->notify.event = op.event_channel_port; map 1075 drivers/xen/gntdev.c struct gntdev_grant_map *map; map 1085 drivers/xen/gntdev.c map = gntdev_find_map_index(priv, index, count); map 1086 drivers/xen/gntdev.c if (!map) map 1088 drivers/xen/gntdev.c if (use_ptemod && map->vma) map 1095 drivers/xen/gntdev.c refcount_inc(&map->users); map 1104 drivers/xen/gntdev.c vma->vm_private_data = map; map 1107 drivers/xen/gntdev.c map->vma = vma; map 1109 drivers/xen/gntdev.c if (map->flags) { map 1111 drivers/xen/gntdev.c (map->flags & GNTMAP_readonly)) map 1114 drivers/xen/gntdev.c map->flags = GNTMAP_host_map; map 1116 drivers/xen/gntdev.c map->flags |= GNTMAP_readonly; map 1122 drivers/xen/gntdev.c map->pages_vm_start = vma->vm_start; map 1125 drivers/xen/gntdev.c find_grant_ptes, map); map 1132 drivers/xen/gntdev.c err = gntdev_map_grant_pages(map); map 1137 drivers/xen/gntdev.c err = vm_map_pages_zero(vma, map->pages, map->count); map 1169 drivers/xen/gntdev.c map->vma = NULL; map 1170 drivers/xen/gntdev.c unmap_grant_pages(map, 0, map->count); map 1172 drivers/xen/gntdev.c gntdev_put_map(priv, map); map 88 drivers/xen/pvcalls-back.c struct sock_mapping *map); map 92 drivers/xen/pvcalls-back.c struct sock_mapping *map = (struct sock_mapping *)opaque; map 97 drivers/xen/pvcalls-back.c struct pvcalls_data_intf *intf = map->ring; map 98 drivers/xen/pvcalls-back.c struct pvcalls_data *data = &map->data; map 102 drivers/xen/pvcalls-back.c array_size = XEN_FLEX_RING_SIZE(map->ring_order); map 115 drivers/xen/pvcalls-back.c spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); map 116 drivers/xen/pvcalls-back.c if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { map 117 drivers/xen/pvcalls-back.c atomic_set(&map->read, 0); map 118 drivers/xen/pvcalls-back.c spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, map 122 drivers/xen/pvcalls-back.c spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); map 140 drivers/xen/pvcalls-back.c atomic_set(&map->read, 0); map 141 drivers/xen/pvcalls-back.c ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); map 147 drivers/xen/pvcalls-back.c spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); map 148 drivers/xen/pvcalls-back.c if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue)) map 149 drivers/xen/pvcalls-back.c atomic_inc(&map->read); map 150 drivers/xen/pvcalls-back.c spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); map 155 drivers/xen/pvcalls-back.c atomic_set(&map->read, 0); map 161 drivers/xen/pvcalls-back.c notify_remote_via_irq(map->irq); map 166 drivers/xen/pvcalls-back.c static void pvcalls_conn_back_write(struct sock_mapping *map) map 168 drivers/xen/pvcalls-back.c struct pvcalls_data_intf *intf = map->ring; map 169 drivers/xen/pvcalls-back.c struct pvcalls_data *data = &map->data; map 180 drivers/xen/pvcalls-back.c array_size = XEN_FLEX_RING_SIZE(map->ring_order); map 199 drivers/xen/pvcalls-back.c atomic_set(&map->write, 0); map 200 drivers/xen/pvcalls-back.c ret = inet_sendmsg(map->sock, &msg, size); map 202 drivers/xen/pvcalls-back.c atomic_inc(&map->write); map 203 drivers/xen/pvcalls-back.c atomic_inc(&map->io); map 220 drivers/xen/pvcalls-back.c atomic_inc(&map->write); map 221 drivers/xen/pvcalls-back.c notify_remote_via_irq(map->irq); map 228 drivers/xen/pvcalls-back.c struct sock_mapping *map = container_of(ioworker, struct sock_mapping, map 231 drivers/xen/pvcalls-back.c while (atomic_read(&map->io) > 0) { map 232 drivers/xen/pvcalls-back.c if (atomic_read(&map->release) > 0) { map 233 drivers/xen/pvcalls-back.c atomic_set(&map->release, 0); map 237 drivers/xen/pvcalls-back.c if (atomic_read(&map->read) > 0) map 238 drivers/xen/pvcalls-back.c pvcalls_conn_back_read(map); map 239 drivers/xen/pvcalls-back.c if (atomic_read(&map->write) > 0) map 240 drivers/xen/pvcalls-back.c pvcalls_conn_back_write(map); map 242 drivers/xen/pvcalls-back.c atomic_dec(&map->io); map 276 drivers/xen/pvcalls-back.c struct sock_mapping *map = sock->sk_user_data; map 278 drivers/xen/pvcalls-back.c if (map == NULL) map 281 drivers/xen/pvcalls-back.c atomic_inc(&map->read); map 282 drivers/xen/pvcalls-back.c notify_remote_via_irq(map->irq); map 287 drivers/xen/pvcalls-back.c struct sock_mapping *map = sock->sk_user_data; map 290 drivers/xen/pvcalls-back.c if (map == NULL) map 293 drivers/xen/pvcalls-back.c iow = &map->ioworker; map 294 drivers/xen/pvcalls-back.c atomic_inc(&map->read); map 295 drivers/xen/pvcalls-back.c atomic_inc(&map->io); map 307 drivers/xen/pvcalls-back.c struct sock_mapping *map; map 310 drivers/xen/pvcalls-back.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 311 drivers/xen/pvcalls-back.c if (map == NULL) map 314 drivers/xen/pvcalls-back.c map->fedata = fedata; map 315 drivers/xen/pvcalls-back.c map->sock = sock; map 316 drivers/xen/pvcalls-back.c map->id = id; map 317 drivers/xen/pvcalls-back.c map->ref = ref; map 322 drivers/xen/pvcalls-back.c map->ring = page; map 323 drivers/xen/pvcalls-back.c map->ring_order = map->ring->ring_order; map 326 drivers/xen/pvcalls-back.c if (map->ring_order > MAX_RING_ORDER) { map 328 drivers/xen/pvcalls-back.c __func__, map->ring_order, MAX_RING_ORDER); map 331 drivers/xen/pvcalls-back.c ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref, map 332 drivers/xen/pvcalls-back.c (1 << map->ring_order), &page); map 335 drivers/xen/pvcalls-back.c map->bytes = page; map 342 drivers/xen/pvcalls-back.c map); map 345 drivers/xen/pvcalls-back.c map->irq = ret; map 347 drivers/xen/pvcalls-back.c map->data.in = map->bytes; map 348 drivers/xen/pvcalls-back.c map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); map 350 drivers/xen/pvcalls-back.c map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); map 351 drivers/xen/pvcalls-back.c if (!map->ioworker.wq) map 353 drivers/xen/pvcalls-back.c atomic_set(&map->io, 1); map 354 drivers/xen/pvcalls-back.c INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); map 357 drivers/xen/pvcalls-back.c list_add_tail(&map->list, &fedata->socket_mappings); map 360 drivers/xen/pvcalls-back.c write_lock_bh(&map->sock->sk->sk_callback_lock); map 361 drivers/xen/pvcalls-back.c map->saved_data_ready = map->sock->sk->sk_data_ready; map 362 drivers/xen/pvcalls-back.c map->sock->sk->sk_user_data = map; map 363 drivers/xen/pvcalls-back.c map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; map 364 drivers/xen/pvcalls-back.c map->sock->sk->sk_state_change = pvcalls_sk_state_change; map 365 drivers/xen/pvcalls-back.c write_unlock_bh(&map->sock->sk->sk_callback_lock); map 367 drivers/xen/pvcalls-back.c return map; map 370 drivers/xen/pvcalls-back.c list_del(&map->list); map 371 drivers/xen/pvcalls-back.c pvcalls_back_release_active(fedata->dev, fedata, map); map 382 drivers/xen/pvcalls-back.c struct sock_mapping *map; map 402 drivers/xen/pvcalls-back.c map = pvcalls_new_active_socket(fedata, map 407 drivers/xen/pvcalls-back.c if (!map) { map 424 drivers/xen/pvcalls-back.c struct sock_mapping *map) map 426 drivers/xen/pvcalls-back.c disable_irq(map->irq); map 427 drivers/xen/pvcalls-back.c if (map->sock->sk != NULL) { map 428 drivers/xen/pvcalls-back.c write_lock_bh(&map->sock->sk->sk_callback_lock); map 429 drivers/xen/pvcalls-back.c map->sock->sk->sk_user_data = NULL; map 430 drivers/xen/pvcalls-back.c map->sock->sk->sk_data_ready = map->saved_data_ready; map 431 drivers/xen/pvcalls-back.c write_unlock_bh(&map->sock->sk->sk_callback_lock); map 434 drivers/xen/pvcalls-back.c atomic_set(&map->release, 1); map 435 drivers/xen/pvcalls-back.c flush_work(&map->ioworker.register_work); map 437 drivers/xen/pvcalls-back.c xenbus_unmap_ring_vfree(dev, map->bytes); map 438 drivers/xen/pvcalls-back.c xenbus_unmap_ring_vfree(dev, (void *)map->ring); map 439 drivers/xen/pvcalls-back.c unbind_from_irqhandler(map->irq, map); map 441 drivers/xen/pvcalls-back.c sock_release(map->sock); map 442 drivers/xen/pvcalls-back.c kfree(map); map 469 drivers/xen/pvcalls-back.c struct sock_mapping *map, *n; map 477 drivers/xen/pvcalls-back.c list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { map 478 drivers/xen/pvcalls-back.c if (map->id == req->u.release.id) { map 479 drivers/xen/pvcalls-back.c list_del(&map->list); map 481 drivers/xen/pvcalls-back.c ret = pvcalls_back_release_active(dev, fedata, map); map 507 drivers/xen/pvcalls-back.c struct sock_mapping *map; map 544 drivers/xen/pvcalls-back.c map = pvcalls_new_active_socket(fedata, map 549 drivers/xen/pvcalls-back.c if (!map) { map 555 drivers/xen/pvcalls-back.c map->sockpass = mappass; map 556 drivers/xen/pvcalls-back.c iow = &map->ioworker; map 557 drivers/xen/pvcalls-back.c atomic_inc(&map->read); map 558 drivers/xen/pvcalls-back.c atomic_inc(&map->io); map 612 drivers/xen/pvcalls-back.c struct sockpass_mapping *map; map 617 drivers/xen/pvcalls-back.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 618 drivers/xen/pvcalls-back.c if (map == NULL) { map 623 drivers/xen/pvcalls-back.c INIT_WORK(&map->register_work, __pvcalls_back_accept); map 624 drivers/xen/pvcalls-back.c spin_lock_init(&map->copy_lock); map 625 drivers/xen/pvcalls-back.c map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); map 626 drivers/xen/pvcalls-back.c if (!map->wq) { map 631 drivers/xen/pvcalls-back.c ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock); map 635 drivers/xen/pvcalls-back.c ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr, map 640 drivers/xen/pvcalls-back.c map->fedata = fedata; map 641 drivers/xen/pvcalls-back.c map->id = req->u.bind.id; map 644 drivers/xen/pvcalls-back.c ret = radix_tree_insert(&fedata->socketpass_mappings, map->id, map 645 drivers/xen/pvcalls-back.c map); map 650 drivers/xen/pvcalls-back.c write_lock_bh(&map->sock->sk->sk_callback_lock); map 651 drivers/xen/pvcalls-back.c map->saved_data_ready = map->sock->sk->sk_data_ready; map 652 drivers/xen/pvcalls-back.c map->sock->sk->sk_user_data = map; map 653 drivers/xen/pvcalls-back.c map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready; map 654 drivers/xen/pvcalls-back.c write_unlock_bh(&map->sock->sk->sk_callback_lock); map 658 drivers/xen/pvcalls-back.c if (map && map->sock) map 659 drivers/xen/pvcalls-back.c sock_release(map->sock); map 660 drivers/xen/pvcalls-back.c if (map && map->wq) map 661 drivers/xen/pvcalls-back.c destroy_workqueue(map->wq); map 662 drivers/xen/pvcalls-back.c kfree(map); map 677 drivers/xen/pvcalls-back.c struct sockpass_mapping *map; map 683 drivers/xen/pvcalls-back.c map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id); map 685 drivers/xen/pvcalls-back.c if (map == NULL) map 688 drivers/xen/pvcalls-back.c ret = inet_listen(map->sock, req->u.listen.backlog); map 890 drivers/xen/pvcalls-back.c struct sock_mapping *map = sock_map; map 893 drivers/xen/pvcalls-back.c if (map == NULL || map->sock == NULL || map->sock->sk == NULL || map 894 drivers/xen/pvcalls-back.c map->sock->sk->sk_user_data != map) map 897 drivers/xen/pvcalls-back.c iow = &map->ioworker; map 899 drivers/xen/pvcalls-back.c atomic_inc(&map->write); map 900 drivers/xen/pvcalls-back.c atomic_inc(&map->io); map 975 drivers/xen/pvcalls-back.c struct sock_mapping *map, *n; map 984 drivers/xen/pvcalls-back.c list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) { map 985 drivers/xen/pvcalls-back.c list_del(&map->list); map 986 drivers/xen/pvcalls-back.c pvcalls_back_release_active(dev, fedata, map); map 101 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 107 drivers/xen/pvcalls-front.c map = (struct sock_mapping *)sock->sk->sk_send_head; map 108 drivers/xen/pvcalls-front.c if (map == NULL) map 112 drivers/xen/pvcalls-front.c atomic_inc(&map->refcount); map 113 drivers/xen/pvcalls-front.c return map; map 118 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 120 drivers/xen/pvcalls-front.c map = (struct sock_mapping *)sock->sk->sk_send_head; map 121 drivers/xen/pvcalls-front.c atomic_dec(&map->refcount); map 134 drivers/xen/pvcalls-front.c static bool pvcalls_front_write_todo(struct sock_mapping *map) map 136 drivers/xen/pvcalls-front.c struct pvcalls_data_intf *intf = map->active.ring; map 151 drivers/xen/pvcalls-front.c static bool pvcalls_front_read_todo(struct sock_mapping *map) map 153 drivers/xen/pvcalls-front.c struct pvcalls_data_intf *intf = map->active.ring; map 189 drivers/xen/pvcalls-front.c struct sock_mapping *map = (struct sock_mapping *)(uintptr_t) map 193 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 201 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 229 drivers/xen/pvcalls-front.c struct sock_mapping *map) map 233 drivers/xen/pvcalls-front.c unbind_from_irqhandler(map->active.irq, map); map 236 drivers/xen/pvcalls-front.c if (!list_empty(&map->list)) map 237 drivers/xen/pvcalls-front.c list_del_init(&map->list); map 241 drivers/xen/pvcalls-front.c gnttab_end_foreign_access(map->active.ring->ref[i], 0, 0); map 242 drivers/xen/pvcalls-front.c gnttab_end_foreign_access(map->active.ref, 0, 0); map 243 drivers/xen/pvcalls-front.c free_page((unsigned long)map->active.ring); map 245 drivers/xen/pvcalls-front.c kfree(map); map 250 drivers/xen/pvcalls-front.c struct sock_mapping *map = sock_map; map 252 drivers/xen/pvcalls-front.c if (map == NULL) map 255 drivers/xen/pvcalls-front.c wake_up_interruptible(&map->active.inflight_conn_req); map 263 drivers/xen/pvcalls-front.c struct sock_mapping *map = NULL; map 284 drivers/xen/pvcalls-front.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 285 drivers/xen/pvcalls-front.c if (map == NULL) { map 294 drivers/xen/pvcalls-front.c kfree(map); map 306 drivers/xen/pvcalls-front.c sock->sk->sk_send_head = (void *)map; map 307 drivers/xen/pvcalls-front.c list_add_tail(&map->list, &bedata->socket_mappings); map 312 drivers/xen/pvcalls-front.c req->u.socket.id = (uintptr_t) map; map 335 drivers/xen/pvcalls-front.c static void free_active_ring(struct sock_mapping *map) map 337 drivers/xen/pvcalls-front.c if (!map->active.ring) map 340 drivers/xen/pvcalls-front.c free_pages((unsigned long)map->active.data.in, map 341 drivers/xen/pvcalls-front.c map->active.ring->ring_order); map 342 drivers/xen/pvcalls-front.c free_page((unsigned long)map->active.ring); map 345 drivers/xen/pvcalls-front.c static int alloc_active_ring(struct sock_mapping *map) map 349 drivers/xen/pvcalls-front.c map->active.ring = (struct pvcalls_data_intf *) map 351 drivers/xen/pvcalls-front.c if (!map->active.ring) map 354 drivers/xen/pvcalls-front.c map->active.ring->ring_order = PVCALLS_RING_ORDER; map 360 drivers/xen/pvcalls-front.c map->active.data.in = bytes; map 361 drivers/xen/pvcalls-front.c map->active.data.out = bytes + map 367 drivers/xen/pvcalls-front.c free_active_ring(map); map 371 drivers/xen/pvcalls-front.c static int create_active(struct sock_mapping *map, int *evtchn) map 377 drivers/xen/pvcalls-front.c init_waitqueue_head(&map->active.inflight_conn_req); map 379 drivers/xen/pvcalls-front.c bytes = map->active.data.in; map 381 drivers/xen/pvcalls-front.c map->active.ring->ref[i] = gnttab_grant_foreign_access( map 385 drivers/xen/pvcalls-front.c map->active.ref = gnttab_grant_foreign_access( map 387 drivers/xen/pvcalls-front.c pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); map 393 drivers/xen/pvcalls-front.c 0, "pvcalls-frontend", map); map 399 drivers/xen/pvcalls-front.c map->active.irq = irq; map 400 drivers/xen/pvcalls-front.c map->active_socket = true; map 401 drivers/xen/pvcalls-front.c mutex_init(&map->active.in_mutex); map 402 drivers/xen/pvcalls-front.c mutex_init(&map->active.out_mutex); map 416 drivers/xen/pvcalls-front.c struct sock_mapping *map = NULL; map 423 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 424 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 425 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 428 drivers/xen/pvcalls-front.c ret = alloc_active_ring(map); map 438 drivers/xen/pvcalls-front.c free_active_ring(map); map 442 drivers/xen/pvcalls-front.c ret = create_active(map, &evtchn); map 445 drivers/xen/pvcalls-front.c free_active_ring(map); map 453 drivers/xen/pvcalls-front.c req->u.connect.id = (uintptr_t)map; map 456 drivers/xen/pvcalls-front.c req->u.connect.ref = map->active.ref; map 460 drivers/xen/pvcalls-front.c map->sock = sock; map 534 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 542 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 543 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 544 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 546 drivers/xen/pvcalls-front.c mutex_lock(&map->active.out_mutex); map 547 drivers/xen/pvcalls-front.c if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) { map 548 drivers/xen/pvcalls-front.c mutex_unlock(&map->active.out_mutex); map 557 drivers/xen/pvcalls-front.c sent = __write_ring(map->active.ring, map 558 drivers/xen/pvcalls-front.c &map->active.data, &msg->msg_iter, map 563 drivers/xen/pvcalls-front.c notify_remote_via_irq(map->active.irq); map 570 drivers/xen/pvcalls-front.c mutex_unlock(&map->active.out_mutex); map 628 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 633 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 634 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 635 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 637 drivers/xen/pvcalls-front.c mutex_lock(&map->active.in_mutex); map 641 drivers/xen/pvcalls-front.c while (!(flags & MSG_DONTWAIT) && !pvcalls_front_read_todo(map)) { map 642 drivers/xen/pvcalls-front.c wait_event_interruptible(map->active.inflight_conn_req, map 643 drivers/xen/pvcalls-front.c pvcalls_front_read_todo(map)); map 645 drivers/xen/pvcalls-front.c ret = __read_ring(map->active.ring, &map->active.data, map 649 drivers/xen/pvcalls-front.c notify_remote_via_irq(map->active.irq); map 655 drivers/xen/pvcalls-front.c mutex_unlock(&map->active.in_mutex); map 663 drivers/xen/pvcalls-front.c struct sock_mapping *map = NULL; map 670 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 671 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 672 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 684 drivers/xen/pvcalls-front.c map->sock = sock; map 686 drivers/xen/pvcalls-front.c req->u.bind.id = (uintptr_t)map; map 690 drivers/xen/pvcalls-front.c init_waitqueue_head(&map->passive.inflight_accept_req); map 692 drivers/xen/pvcalls-front.c map->active_socket = false; map 708 drivers/xen/pvcalls-front.c map->passive.status = PVCALLS_STATUS_BIND; map 716 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 720 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 721 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 722 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 725 drivers/xen/pvcalls-front.c if (map->passive.status != PVCALLS_STATUS_BIND) { map 740 drivers/xen/pvcalls-front.c req->u.listen.id = (uintptr_t) map; map 757 drivers/xen/pvcalls-front.c map->passive.status = PVCALLS_STATUS_LISTEN; map 765 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 770 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 771 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 772 drivers/xen/pvcalls-front.c return PTR_ERR(map); map 775 drivers/xen/pvcalls-front.c if (map->passive.status != PVCALLS_STATUS_LISTEN) { map 786 drivers/xen/pvcalls-front.c (void *)&map->passive.flags)) { map 787 drivers/xen/pvcalls-front.c req_id = READ_ONCE(map->passive.inflight_req_id); map 790 drivers/xen/pvcalls-front.c map2 = map->passive.accept_map; map 797 drivers/xen/pvcalls-front.c if (wait_event_interruptible(map->passive.inflight_accept_req, map 799 drivers/xen/pvcalls-front.c (void *)&map->passive.flags))) { map 808 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 815 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 824 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 837 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 847 drivers/xen/pvcalls-front.c req->u.accept.id = (uintptr_t) map; map 851 drivers/xen/pvcalls-front.c map->passive.accept_map = map2; map 860 drivers/xen/pvcalls-front.c WRITE_ONCE(map->passive.inflight_req_id, req_id); map 878 drivers/xen/pvcalls-front.c map->passive.inflight_req_id = PVCALLS_INVALID_ID; map 880 drivers/xen/pvcalls-front.c (void *)&map->passive.flags); map 889 drivers/xen/pvcalls-front.c map->passive.inflight_req_id = PVCALLS_INVALID_ID; map 891 drivers/xen/pvcalls-front.c clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags); map 892 drivers/xen/pvcalls-front.c wake_up(&map->passive.inflight_accept_req); map 900 drivers/xen/pvcalls-front.c struct sock_mapping *map, map 907 drivers/xen/pvcalls-front.c (void *)&map->passive.flags)) { map 908 drivers/xen/pvcalls-front.c uint32_t req_id = READ_ONCE(map->passive.inflight_req_id); map 914 drivers/xen/pvcalls-front.c poll_wait(file, &map->passive.inflight_accept_req, wait); map 919 drivers/xen/pvcalls-front.c (void *)&map->passive.flags)) map 929 drivers/xen/pvcalls-front.c (void *)&map->passive.flags)) { map 943 drivers/xen/pvcalls-front.c req->u.poll.id = (uintptr_t) map; map 957 drivers/xen/pvcalls-front.c struct sock_mapping *map, map 962 drivers/xen/pvcalls-front.c struct pvcalls_data_intf *intf = map->active.ring; map 967 drivers/xen/pvcalls-front.c poll_wait(file, &map->active.inflight_conn_req, wait); map 968 drivers/xen/pvcalls-front.c if (pvcalls_front_write_todo(map)) map 970 drivers/xen/pvcalls-front.c if (pvcalls_front_read_todo(map)) map 982 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 985 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 986 drivers/xen/pvcalls-front.c if (IS_ERR(map)) map 990 drivers/xen/pvcalls-front.c if (map->active_socket) map 991 drivers/xen/pvcalls-front.c ret = pvcalls_front_poll_active(file, bedata, map, wait); map 993 drivers/xen/pvcalls-front.c ret = pvcalls_front_poll_passive(file, bedata, map, wait); map 1001 drivers/xen/pvcalls-front.c struct sock_mapping *map; map 1008 drivers/xen/pvcalls-front.c map = pvcalls_enter_sock(sock); map 1009 drivers/xen/pvcalls-front.c if (IS_ERR(map)) { map 1010 drivers/xen/pvcalls-front.c if (PTR_ERR(map) == -ENOTCONN) map 1029 drivers/xen/pvcalls-front.c req->u.release.id = (uintptr_t)map; map 1040 drivers/xen/pvcalls-front.c if (map->active_socket) { map 1045 drivers/xen/pvcalls-front.c map->active.ring->in_error = -EBADF; map 1046 drivers/xen/pvcalls-front.c wake_up_interruptible(&map->active.inflight_conn_req); map 1054 drivers/xen/pvcalls-front.c while (atomic_read(&map->refcount) > 1) map 1057 drivers/xen/pvcalls-front.c pvcalls_front_free_map(bedata, map); map 1060 drivers/xen/pvcalls-front.c wake_up(&map->passive.inflight_accept_req); map 1062 drivers/xen/pvcalls-front.c while (atomic_read(&map->refcount) > 1) map 1066 drivers/xen/pvcalls-front.c list_del(&map->list); map 1068 drivers/xen/pvcalls-front.c if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID && map 1069 drivers/xen/pvcalls-front.c READ_ONCE(map->passive.inflight_req_id) != 0) { map 1071 drivers/xen/pvcalls-front.c map->passive.accept_map); map 1073 drivers/xen/pvcalls-front.c kfree(map); map 1089 drivers/xen/pvcalls-front.c struct sock_mapping *map = NULL, *n; map 1097 drivers/xen/pvcalls-front.c list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { map 1098 drivers/xen/pvcalls-front.c map->sock->sk->sk_send_head = NULL; map 1099 drivers/xen/pvcalls-front.c if (map->active_socket) { map 1100 drivers/xen/pvcalls-front.c map->active.ring->in_error = -EBADF; map 1101 drivers/xen/pvcalls-front.c wake_up_interruptible(&map->active.inflight_conn_req); map 1108 drivers/xen/pvcalls-front.c list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { map 1109 drivers/xen/pvcalls-front.c if (map->active_socket) { map 1111 drivers/xen/pvcalls-front.c pvcalls_front_free_map(bedata, map); map 1113 drivers/xen/pvcalls-front.c list_del(&map->list); map 1114 drivers/xen/pvcalls-front.c kfree(map); map 369 drivers/xen/swiotlb-xen.c phys_addr_t map, phys = page_to_phys(page) + offset; map 389 drivers/xen/swiotlb-xen.c map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, map 391 drivers/xen/swiotlb-xen.c if (map == (phys_addr_t)DMA_MAPPING_ERROR) map 394 drivers/xen/swiotlb-xen.c phys = map; map 395 drivers/xen/swiotlb-xen.c dev_addr = xen_phys_to_bus(map); map 401 drivers/xen/swiotlb-xen.c swiotlb_tbl_unmap_single(dev, map, size, size, dir, map 66 drivers/xen/xen-front-pgdir-shbuf.c int (*map)(struct xen_front_pgdir_shbuf *buf); map 105 drivers/xen/xen-front-pgdir-shbuf.c if (buf->ops && buf->ops->map) map 106 drivers/xen/xen-front-pgdir-shbuf.c return buf->ops->map(buf); map 497 drivers/xen/xen-front-pgdir-shbuf.c .map = backend_map, map 417 drivers/xen/xen-scsiback.c static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map, map 425 drivers/xen/xen-scsiback.c err = gnttab_map_refs(map, NULL, pg, cnt); map 428 drivers/xen/xen-scsiback.c if (unlikely(map[i].status != GNTST_okay)) { map 430 drivers/xen/xen-scsiback.c map[i].handle = SCSIBACK_INVALID_HANDLE; map 435 drivers/xen/xen-scsiback.c grant[i] = map[i].handle; map 445 drivers/xen/xen-scsiback.c struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH]; map 454 drivers/xen/xen-scsiback.c gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]), map 459 drivers/xen/xen-scsiback.c err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); map 467 drivers/xen/xen-scsiback.c err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount); map 76 drivers/xen/xenbus/xenbus_client.c int (*map)(struct xenbus_device *dev, map 453 drivers/xen/xenbus/xenbus_client.c err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); map 473 drivers/xen/xenbus/xenbus_client.c struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; map 482 drivers/xen/xenbus/xenbus_client.c memset(&map[i], 0, sizeof(map[i])); map 483 drivers/xen/xenbus/xenbus_client.c gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], map 488 drivers/xen/xenbus/xenbus_client.c gnttab_batch_map(map, i); map 491 drivers/xen/xenbus/xenbus_client.c if (map[i].status != GNTST_okay) { map 492 drivers/xen/xenbus/xenbus_client.c err = map[i].status; map 493 drivers/xen/xenbus/xenbus_client.c xenbus_dev_fatal(dev, map[i].status, map 498 drivers/xen/xenbus/xenbus_client.c handles[i] = map[i].handle; map 798 drivers/xen/xenbus/xenbus_client.c .map = xenbus_map_ring_valloc_pv, map 932 drivers/xen/xenbus/xenbus_client.c .map = xenbus_map_ring_valloc_hvm, map 71 fs/adfs/map.c unsigned char *map = dm->dm_bh->b_data + 4; map 77 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); map 84 fs/adfs/map.c __le32 *_map = (__le32 *)map; map 132 fs/adfs/map.c unsigned char *map = dm->dm_bh->b_data; map 140 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); map 155 fs/adfs/map.c frag = GET_FRAG_ID(map, start, idmask); map 162 fs/adfs/map.c __le32 *_map = (__le32 *)map; map 91 fs/adfs/super.c static unsigned char adfs_calczonecheck(struct super_block *sb, unsigned char *map) map 98 fs/adfs/super.c v0 += map[i] + (v3 >> 8); map 100 fs/adfs/super.c v1 += map[i + 1] + (v0 >> 8); map 102 fs/adfs/super.c v2 += map[i + 2] + (v1 >> 8); map 104 fs/adfs/super.c v3 += map[i + 3] + (v2 >> 8); map 108 fs/adfs/super.c v1 += map[1] + (v0 >> 8); map 109 fs/adfs/super.c v2 += map[2] + (v1 >> 8); map 110 fs/adfs/super.c v3 += map[3] + (v2 >> 8); map 121 fs/adfs/super.c unsigned char *map; map 123 fs/adfs/super.c map = dm[i].dm_bh->b_data; map 125 fs/adfs/super.c if (adfs_calczonecheck(sb, map) != map[0]) { map 129 fs/adfs/super.c crosscheck ^= map[3]; map 1138 fs/btrfs/block-group.c struct map_lookup *map; map 1165 fs/btrfs/block-group.c map = em->map_lookup; map 1166 fs/btrfs/block-group.c num_items = 3 + map->num_stripes; map 729 fs/btrfs/dev-replace.c struct map_lookup *map; map 738 fs/btrfs/dev-replace.c map = em->map_lookup; map 739 fs/btrfs/dev-replace.c for (i = 0; i < map->num_stripes; i++) map 740 fs/btrfs/dev-replace.c if (srcdev == map->stripes[i].dev) map 741 fs/btrfs/dev-replace.c map->stripes[i].dev = tgtdev; map 4450 fs/btrfs/extent_io.c struct extent_map_tree *map = &btrfs_inode->extent_tree; map 4457 fs/btrfs/extent_io.c write_lock(&map->lock); map 4458 fs/btrfs/extent_io.c em = lookup_extent_mapping(map, start, len); map 4460 fs/btrfs/extent_io.c write_unlock(&map->lock); map 4465 fs/btrfs/extent_io.c write_unlock(&map->lock); map 4474 fs/btrfs/extent_io.c remove_extent_mapping(map, em); map 4479 fs/btrfs/extent_io.c write_unlock(&map->lock); map 5633 fs/btrfs/extent_io.c char **map, unsigned long *map_start, map 5663 fs/btrfs/extent_io.c *map = kaddr + offset; map 492 fs/btrfs/extent_io.h char **map, unsigned long *map_start, map 38 fs/btrfs/extent_map.c tree->map = RB_ROOT_CACHED; map 261 fs/btrfs/extent_map.c rb_erase_cached(&merge->rb_node, &tree->map); map 273 fs/btrfs/extent_map.c rb_erase_cached(&merge->rb_node, &tree->map); map 354 fs/btrfs/extent_map.c struct map_lookup *map = em->map_lookup; map 358 fs/btrfs/extent_map.c for (i = 0; i < map->num_stripes; i++) { map 359 fs/btrfs/extent_map.c struct btrfs_bio_stripe *stripe = &map->stripes[i]; map 369 fs/btrfs/extent_map.c struct map_lookup *map = em->map_lookup; map 373 fs/btrfs/extent_map.c for (i = 0; i < map->num_stripes; i++) { map 374 fs/btrfs/extent_map.c struct btrfs_bio_stripe *stripe = &map->stripes[i]; map 400 fs/btrfs/extent_map.c ret = tree_insert(&tree->map, em); map 423 fs/btrfs/extent_map.c rb_node = __tree_search(&tree->map.rb_root, start, &prev, &next); map 487 fs/btrfs/extent_map.c rb_erase_cached(&em->rb_node, &tree->map); map 504 fs/btrfs/extent_map.c rb_replace_node_cached(&cur->rb_node, &new->rb_node, &tree->map); map 60 fs/btrfs/extent_map.h struct rb_root_cached map; map 3604 fs/btrfs/free-space-cache.c void *map = NULL; map 3627 fs/btrfs/free-space-cache.c if (!map) { map 3628 fs/btrfs/free-space-cache.c map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS); map 3629 fs/btrfs/free-space-cache.c if (!map) { map 3639 fs/btrfs/free-space-cache.c info->bitmap = map; map 3640 fs/btrfs/free-space-cache.c map = NULL; map 3657 fs/btrfs/free-space-cache.c if (map) map 3658 fs/btrfs/free-space-cache.c kmem_cache_free(btrfs_free_space_bitmap_cachep, map); map 161 fs/btrfs/free-space-tree.c static void le_bitmap_set(unsigned long *map, unsigned int start, int len) map 163 fs/btrfs/free-space-tree.c u8 *p = ((u8 *)map) + BIT_BYTE(start); map 5434 fs/btrfs/inode.c while (!RB_EMPTY_ROOT(&map_tree->map.rb_root)) { map 5437 fs/btrfs/inode.c node = rb_first_cached(&map_tree->map); map 7001 fs/btrfs/inode.c char *map = kmap(page); map 7002 fs/btrfs/inode.c memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); map 7171 fs/btrfs/inode.c char *map; map 7200 fs/btrfs/inode.c map = kmap(page); map 7201 fs/btrfs/inode.c read_extent_buffer(leaf, map + pg_offset, ptr, map 7204 fs/btrfs/inode.c memset(map + pg_offset + copy_size, 0, map 7749 fs/btrfs/inode.c static int btrfs_get_blocks_direct_write(struct extent_map **map, map 7756 fs/btrfs/inode.c struct extent_map *em = *map; map 7793 fs/btrfs/inode.c *map = em = em2; map 7814 fs/btrfs/inode.c *map = em = btrfs_new_extent_direct(inode, start, len); map 8474 fs/btrfs/inode.c goto map; map 8495 fs/btrfs/inode.c map: map 10 fs/btrfs/raid56.h static inline int nr_parity_stripes(const struct map_lookup *map) map 12 fs/btrfs/raid56.h if (map->type & BTRFS_BLOCK_GROUP_RAID5) map 14 fs/btrfs/raid56.h else if (map->type & BTRFS_BLOCK_GROUP_RAID6) map 20 fs/btrfs/raid56.h static inline int nr_data_stripes(const struct map_lookup *map) map 22 fs/btrfs/raid56.h return map->num_stripes - nr_parity_stripes(map); map 2470 fs/btrfs/scrub.c static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map, map 2480 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) map 2481 fs/btrfs/scrub.c blocksize = map->stripe_len; map 2489 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) map 2490 fs/btrfs/scrub.c blocksize = map->stripe_len; map 2664 fs/btrfs/scrub.c struct map_lookup *map, u64 *offset, map 2673 fs/btrfs/scrub.c const int data_stripes = nr_data_stripes(map); map 2675 fs/btrfs/scrub.c last_offset = (physical - map->stripes[num].physical) * data_stripes; map 2681 fs/btrfs/scrub.c *offset = last_offset + i * map->stripe_len; map 2683 fs/btrfs/scrub.c stripe_nr = div64_u64(*offset, map->stripe_len); map 2687 fs/btrfs/scrub.c stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, &rot); map 2690 fs/btrfs/scrub.c stripe_index = rot % map->num_stripes; map 2696 fs/btrfs/scrub.c *offset = last_offset + j * map->stripe_len; map 2819 fs/btrfs/scrub.c struct map_lookup *map, map 2847 fs/btrfs/scrub.c nsectors = div_u64(map->stripe_len, fs_info->sectorsize); map 2858 fs/btrfs/scrub.c sparity->stripe_len = map->stripe_len; map 2930 fs/btrfs/scrub.c while (key.objectid >= logic_start + map->stripe_len) map 2931 fs/btrfs/scrub.c logic_start += map->stripe_len; map 2941 fs/btrfs/scrub.c logic_start + map->stripe_len)) { map 2960 fs/btrfs/scrub.c logic_start + map->stripe_len) map 2961 fs/btrfs/scrub.c extent_len = logic_start + map->stripe_len - map 3006 fs/btrfs/scrub.c logic_start += map->stripe_len; map 3027 fs/btrfs/scrub.c logic_start += map->stripe_len; map 3044 fs/btrfs/scrub.c struct map_lookup *map, map 3069 fs/btrfs/scrub.c u64 increment = map->stripe_len; map 3080 fs/btrfs/scrub.c physical = map->stripes[num].physical; map 3082 fs/btrfs/scrub.c nstripes = div64_u64(length, map->stripe_len); map 3083 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID0) { map 3084 fs/btrfs/scrub.c offset = map->stripe_len * num; map 3085 fs/btrfs/scrub.c increment = map->stripe_len * map->num_stripes; map 3087 fs/btrfs/scrub.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { map 3088 fs/btrfs/scrub.c int factor = map->num_stripes / map->sub_stripes; map 3089 fs/btrfs/scrub.c offset = map->stripe_len * (num / map->sub_stripes); map 3090 fs/btrfs/scrub.c increment = map->stripe_len * factor; map 3091 fs/btrfs/scrub.c mirror_num = num % map->sub_stripes + 1; map 3092 fs/btrfs/scrub.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { map 3093 fs/btrfs/scrub.c increment = map->stripe_len; map 3094 fs/btrfs/scrub.c mirror_num = num % map->num_stripes + 1; map 3095 fs/btrfs/scrub.c } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { map 3096 fs/btrfs/scrub.c increment = map->stripe_len; map 3097 fs/btrfs/scrub.c mirror_num = num % map->num_stripes + 1; map 3098 fs/btrfs/scrub.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 3099 fs/btrfs/scrub.c get_raid56_logic_offset(physical, num, map, &offset, NULL); map 3100 fs/btrfs/scrub.c increment = map->stripe_len * nr_data_stripes(map); map 3103 fs/btrfs/scrub.c increment = map->stripe_len; map 3133 fs/btrfs/scrub.c physical_end = physical + nstripes * map->stripe_len; map 3134 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 3136 fs/btrfs/scrub.c map, &logic_end, NULL); map 3203 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 3204 fs/btrfs/scrub.c ret = get_raid56_logic_offset(physical, num, map, map 3212 fs/btrfs/scrub.c ret = scrub_raid56_parity(sctx, map, scrub_dev, map 3277 fs/btrfs/scrub.c if (key.objectid >= logical + map->stripe_len) { map 3292 fs/btrfs/scrub.c logical + map->stripe_len)) { map 3314 fs/btrfs/scrub.c logical + map->stripe_len) { map 3315 fs/btrfs/scrub.c extent_len = logical + map->stripe_len - map 3336 fs/btrfs/scrub.c ret = scrub_extent(sctx, map, extent_logical, extent_len, map 3348 fs/btrfs/scrub.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 3354 fs/btrfs/scrub.c physical += map->stripe_len; map 3356 fs/btrfs/scrub.c num, map, &logical, map 3365 fs/btrfs/scrub.c map, scrub_dev, ppath, map 3373 fs/btrfs/scrub.c physical += map->stripe_len; map 3392 fs/btrfs/scrub.c physical += map->stripe_len; map 3395 fs/btrfs/scrub.c sctx->stat.last_physical = map->stripes[num].physical + map 3424 fs/btrfs/scrub.c struct map_lookup *map; map 3446 fs/btrfs/scrub.c map = em->map_lookup; map 3453 fs/btrfs/scrub.c for (i = 0; i < map->num_stripes; ++i) { map 3454 fs/btrfs/scrub.c if (map->stripes[i].dev->bdev == scrub_dev->bdev && map 3455 fs/btrfs/scrub.c map->stripes[i].physical == dev_offset) { map 3456 fs/btrfs/scrub.c ret = scrub_stripe(sctx, map, scrub_dev, i, map 15 fs/btrfs/tests/extent-map-tests.c while (!RB_EMPTY_ROOT(&em_tree->map.rb_root)) { map 16 fs/btrfs/tests/extent-map-tests.c node = rb_first_cached(&em_tree->map); map 1871 fs/btrfs/volumes.c n = rb_last(&em_tree->map.rb_root); map 3016 fs/btrfs/volumes.c struct map_lookup *map; map 3031 fs/btrfs/volumes.c map = em->map_lookup; map 3033 fs/btrfs/volumes.c check_system_chunk(trans, map->type); map 3042 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 3043 fs/btrfs/volumes.c struct btrfs_device *device = map->stripes[i].dev; map 3045 fs/btrfs/volumes.c map->stripes[i].physical, map 3077 fs/btrfs/volumes.c trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len); map 3079 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) { map 4960 fs/btrfs/volumes.c struct map_lookup *map = NULL; map 5151 fs/btrfs/volumes.c map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); map 5152 fs/btrfs/volumes.c if (!map) { map 5156 fs/btrfs/volumes.c map->num_stripes = num_stripes; map 5161 fs/btrfs/volumes.c map->stripes[s].dev = devices_info[i].dev; map 5162 fs/btrfs/volumes.c map->stripes[s].physical = devices_info[i].dev_offset + map 5166 fs/btrfs/volumes.c map->stripe_len = BTRFS_STRIPE_LEN; map 5167 fs/btrfs/volumes.c map->io_align = BTRFS_STRIPE_LEN; map 5168 fs/btrfs/volumes.c map->io_width = BTRFS_STRIPE_LEN; map 5169 fs/btrfs/volumes.c map->type = type; map 5170 fs/btrfs/volumes.c map->sub_stripes = sub_stripes; map 5174 fs/btrfs/volumes.c trace_btrfs_chunk_alloc(info, map, start, chunk_size); map 5178 fs/btrfs/volumes.c kfree(map); map 5183 fs/btrfs/volumes.c em->map_lookup = map; map 5204 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 5205 fs/btrfs/volumes.c struct btrfs_device *dev = map->stripes[i].dev; map 5213 fs/btrfs/volumes.c atomic64_sub(stripe_size * map->num_stripes, &info->free_chunk_space); map 5246 fs/btrfs/volumes.c struct map_lookup *map; map 5257 fs/btrfs/volumes.c map = em->map_lookup; map 5258 fs/btrfs/volumes.c item_size = btrfs_chunk_item_size(map->num_stripes); map 5275 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 5276 fs/btrfs/volumes.c device = map->stripes[i].dev; map 5277 fs/btrfs/volumes.c dev_offset = map->stripes[i].physical; map 5293 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 5294 fs/btrfs/volumes.c device = map->stripes[i].dev; map 5295 fs/btrfs/volumes.c dev_offset = map->stripes[i].physical; map 5306 fs/btrfs/volumes.c btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len); map 5307 fs/btrfs/volumes.c btrfs_set_stack_chunk_type(chunk, map->type); map 5308 fs/btrfs/volumes.c btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes); map 5309 fs/btrfs/volumes.c btrfs_set_stack_chunk_io_align(chunk, map->stripe_len); map 5310 fs/btrfs/volumes.c btrfs_set_stack_chunk_io_width(chunk, map->stripe_len); map 5312 fs/btrfs/volumes.c btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes); map 5319 fs/btrfs/volumes.c if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) { map 5369 fs/btrfs/volumes.c static inline int btrfs_chunk_max_errors(struct map_lookup *map) map 5371 fs/btrfs/volumes.c const int index = btrfs_bg_flags_to_raid_index(map->type); map 5379 fs/btrfs/volumes.c struct map_lookup *map; map 5388 fs/btrfs/volumes.c map = em->map_lookup; map 5389 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 5391 fs/btrfs/volumes.c &map->stripes[i].dev->dev_state)) { map 5396 fs/btrfs/volumes.c &map->stripes[i].dev->dev_state)) { map 5407 fs/btrfs/volumes.c if (miss_ndevs > btrfs_chunk_max_errors(map)) map 5436 fs/btrfs/volumes.c struct map_lookup *map; map 5449 fs/btrfs/volumes.c map = em->map_lookup; map 5450 fs/btrfs/volumes.c if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK)) map 5451 fs/btrfs/volumes.c ret = map->num_stripes; map 5452 fs/btrfs/volumes.c else if (map->type & BTRFS_BLOCK_GROUP_RAID10) map 5453 fs/btrfs/volumes.c ret = map->sub_stripes; map 5454 fs/btrfs/volumes.c else if (map->type & BTRFS_BLOCK_GROUP_RAID5) map 5456 fs/btrfs/volumes.c else if (map->type & BTRFS_BLOCK_GROUP_RAID6) map 5464 fs/btrfs/volumes.c ret = map->num_stripes; map 5482 fs/btrfs/volumes.c struct map_lookup *map; map 5488 fs/btrfs/volumes.c map = em->map_lookup; map 5489 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) map 5490 fs/btrfs/volumes.c len = map->stripe_len * nr_data_stripes(map); map 5499 fs/btrfs/volumes.c struct map_lookup *map; map 5505 fs/btrfs/volumes.c map = em->map_lookup; map 5506 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) map 5514 fs/btrfs/volumes.c struct map_lookup *map, int first, map 5523 fs/btrfs/volumes.c ASSERT((map->type & map 5526 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID10) map 5527 fs/btrfs/volumes.c num_stripes = map->sub_stripes; map 5529 fs/btrfs/volumes.c num_stripes = map->num_stripes; map 5546 fs/btrfs/volumes.c if (map->stripes[preferred_mirror].dev->bdev && map 5547 fs/btrfs/volumes.c (tolerance || map->stripes[preferred_mirror].dev != srcdev)) map 5550 fs/btrfs/volumes.c if (map->stripes[i].dev->bdev && map 5551 fs/btrfs/volumes.c (tolerance || map->stripes[i].dev != srcdev)) map 5639 fs/btrfs/volumes.c struct map_lookup *map; map 5665 fs/btrfs/volumes.c map = em->map_lookup; map 5667 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 5675 fs/btrfs/volumes.c stripe_len = map->stripe_len; map 5685 fs/btrfs/volumes.c stripe_nr_end = round_up(offset + length, map->stripe_len); map 5686 fs/btrfs/volumes.c stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len); map 5688 fs/btrfs/volumes.c stripe_end_offset = stripe_nr_end * map->stripe_len - map 5697 fs/btrfs/volumes.c if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | map 5699 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID0) map 5702 fs/btrfs/volumes.c sub_stripes = map->sub_stripes; map 5704 fs/btrfs/volumes.c factor = map->num_stripes / sub_stripes; map 5705 fs/btrfs/volumes.c num_stripes = min_t(u64, map->num_stripes, map 5713 fs/btrfs/volumes.c } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK | map 5715 fs/btrfs/volumes.c num_stripes = map->num_stripes; map 5717 fs/btrfs/volumes.c stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, map 5729 fs/btrfs/volumes.c map->stripes[stripe_index].physical + map 5730 fs/btrfs/volumes.c stripe_offset + stripe_nr * map->stripe_len; map 5731 fs/btrfs/volumes.c bbio->stripes[i].dev = map->stripes[stripe_index].dev; map 5733 fs/btrfs/volumes.c if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | map 5736 fs/btrfs/volumes.c map->stripe_len; map 5740 fs/btrfs/volumes.c map->stripe_len; map 5767 fs/btrfs/volumes.c if (stripe_index == map->num_stripes) { map 5774 fs/btrfs/volumes.c bbio->map_type = map->type; map 5975 fs/btrfs/volumes.c struct map_lookup *map; map 5990 fs/btrfs/volumes.c map = em->map_lookup; map 5994 fs/btrfs/volumes.c stripe_len = map->stripe_len; map 6009 fs/btrfs/volumes.c data_stripes = nr_data_stripes(map); map 6011 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) { map 6017 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 6064 fs/btrfs/volumes.c struct map_lookup *map; map 6096 fs/btrfs/volumes.c map = em->map_lookup; map 6103 fs/btrfs/volumes.c data_stripes = nr_data_stripes(map); map 6114 fs/btrfs/volumes.c if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 && map 6124 fs/btrfs/volumes.c } else if (mirror_num > map->num_stripes) { map 6130 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID0) { map 6131 fs/btrfs/volumes.c stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, map 6135 fs/btrfs/volumes.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) { map 6137 fs/btrfs/volumes.c num_stripes = map->num_stripes; map 6141 fs/btrfs/volumes.c stripe_index = find_live_mirror(fs_info, map, 0, map 6146 fs/btrfs/volumes.c } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { map 6148 fs/btrfs/volumes.c num_stripes = map->num_stripes; map 6155 fs/btrfs/volumes.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { map 6156 fs/btrfs/volumes.c u32 factor = map->num_stripes / map->sub_stripes; map 6159 fs/btrfs/volumes.c stripe_index *= map->sub_stripes; map 6162 fs/btrfs/volumes.c num_stripes = map->sub_stripes; map 6167 fs/btrfs/volumes.c stripe_index = find_live_mirror(fs_info, map, map 6173 fs/btrfs/volumes.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 6180 fs/btrfs/volumes.c num_stripes = map->num_stripes; map 6181 fs/btrfs/volumes.c max_errors = nr_parity_stripes(map); map 6183 fs/btrfs/volumes.c *length = map->stripe_len; map 6198 fs/btrfs/volumes.c div_u64_rem(stripe_nr + stripe_index, map->num_stripes, map 6209 fs/btrfs/volumes.c stripe_nr = div_u64_rem(stripe_nr, map->num_stripes, map 6213 fs/btrfs/volumes.c if (stripe_index >= map->num_stripes) { map 6216 fs/btrfs/volumes.c stripe_index, map->num_stripes); map 6239 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && map 6256 fs/btrfs/volumes.c em->start + (tmp + i) * map->stripe_len; map 6258 fs/btrfs/volumes.c bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; map 6259 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID6) map 6267 fs/btrfs/volumes.c map->stripes[stripe_index].physical + map 6269 fs/btrfs/volumes.c stripe_nr * map->stripe_len; map 6271 fs/btrfs/volumes.c map->stripes[stripe_index].dev; map 6276 fs/btrfs/volumes.c max_errors = btrfs_chunk_max_errors(map); map 6288 fs/btrfs/volumes.c bbio->map_type = map->type; map 6302 fs/btrfs/volumes.c bbio->mirror_num = map->num_stripes + 1; map 6334 fs/btrfs/volumes.c struct map_lookup *map; map 6346 fs/btrfs/volumes.c map = em->map_lookup; map 6348 fs/btrfs/volumes.c rmap_len = map->stripe_len; map 6350 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID10) map 6351 fs/btrfs/volumes.c length = div_u64(length, map->num_stripes / map->sub_stripes); map 6352 fs/btrfs/volumes.c else if (map->type & BTRFS_BLOCK_GROUP_RAID0) map 6353 fs/btrfs/volumes.c length = div_u64(length, map->num_stripes); map 6354 fs/btrfs/volumes.c else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { map 6355 fs/btrfs/volumes.c length = div_u64(length, nr_data_stripes(map)); map 6356 fs/btrfs/volumes.c rmap_len = map->stripe_len * nr_data_stripes(map); map 6359 fs/btrfs/volumes.c buf = kcalloc(map->num_stripes, sizeof(u64), GFP_NOFS); map 6362 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 6363 fs/btrfs/volumes.c if (map->stripes[i].physical > physical || map 6364 fs/btrfs/volumes.c map->stripes[i].physical + length <= physical) map 6367 fs/btrfs/volumes.c stripe_nr = physical - map->stripes[i].physical; map 6368 fs/btrfs/volumes.c stripe_nr = div64_u64(stripe_nr, map->stripe_len); map 6370 fs/btrfs/volumes.c if (map->type & BTRFS_BLOCK_GROUP_RAID10) { map 6371 fs/btrfs/volumes.c stripe_nr = stripe_nr * map->num_stripes + i; map 6372 fs/btrfs/volumes.c stripe_nr = div_u64(stripe_nr, map->sub_stripes); map 6373 fs/btrfs/volumes.c } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) { map 6374 fs/btrfs/volumes.c stripe_nr = stripe_nr * map->num_stripes + i; map 6380 fs/btrfs/volumes.c WARN_ON(nr >= map->num_stripes); map 6386 fs/btrfs/volumes.c WARN_ON(nr >= map->num_stripes); map 6769 fs/btrfs/volumes.c struct map_lookup *map; map 6808 fs/btrfs/volumes.c map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); map 6809 fs/btrfs/volumes.c if (!map) { map 6815 fs/btrfs/volumes.c em->map_lookup = map; map 6822 fs/btrfs/volumes.c map->num_stripes = num_stripes; map 6823 fs/btrfs/volumes.c map->io_width = btrfs_chunk_io_width(leaf, chunk); map 6824 fs/btrfs/volumes.c map->io_align = btrfs_chunk_io_align(leaf, chunk); map 6825 fs/btrfs/volumes.c map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk); map 6826 fs/btrfs/volumes.c map->type = btrfs_chunk_type(leaf, chunk); map 6827 fs/btrfs/volumes.c map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); map 6828 fs/btrfs/volumes.c map->verified_stripes = 0; map 6829 fs/btrfs/volumes.c em->orig_block_len = calc_stripe_length(map->type, em->len, map 6830 fs/btrfs/volumes.c map->num_stripes); map 6832 fs/btrfs/volumes.c map->stripes[i].physical = map 6838 fs/btrfs/volumes.c map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices, map 6840 fs/btrfs/volumes.c if (!map->stripes[i].dev && map 6846 fs/btrfs/volumes.c if (!map->stripes[i].dev) { map 6847 fs/btrfs/volumes.c map->stripes[i].dev = map 6850 fs/btrfs/volumes.c if (IS_ERR(map->stripes[i].dev)) { map 6854 fs/btrfs/volumes.c devid, PTR_ERR(map->stripes[i].dev)); map 6855 fs/btrfs/volumes.c return PTR_ERR(map->stripes[i].dev); map 6860 fs/btrfs/volumes.c &(map->stripes[i].dev->dev_state)); map 7198 fs/btrfs/volumes.c struct map_lookup *map; map 7203 fs/btrfs/volumes.c map = em->map_lookup; map 7206 fs/btrfs/volumes.c map->type); map 7207 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 7208 fs/btrfs/volumes.c struct btrfs_device *dev = map->stripes[i].dev; map 7707 fs/btrfs/volumes.c struct map_lookup *map; map 7726 fs/btrfs/volumes.c map = em->map_lookup; map 7727 fs/btrfs/volumes.c stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes); map 7737 fs/btrfs/volumes.c for (i = 0; i < map->num_stripes; i++) { map 7738 fs/btrfs/volumes.c if (map->stripes[i].dev->devid == devid && map 7739 fs/btrfs/volumes.c map->stripes[i].physical == physical_offset) { map 7741 fs/btrfs/volumes.c if (map->verified_stripes >= map->num_stripes) { map 7748 fs/btrfs/volumes.c map->verified_stripes++; map 7800 fs/btrfs/volumes.c for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) { map 950 fs/ecryptfs/crypto.c struct ecryptfs_cipher_code_str_map_elem *map = map 966 fs/ecryptfs/crypto.c if (strcmp(cipher_name, map[i].cipher_str) == 0) { map 967 fs/ecryptfs/crypto.c code = map[i].cipher_code; map 49 fs/erofs/data.c struct erofs_map_blocks *map, map 54 fs/erofs/data.c u64 offset = map->m_la; map 58 fs/erofs/data.c trace_erofs_map_blocks_flatmode_enter(inode, map, flags); map 65 fs/erofs/data.c map->m_flags = 0; map 66 fs/erofs/data.c map->m_plen = 0; map 71 fs/erofs/data.c map->m_flags = EROFS_MAP_MAPPED; map 74 fs/erofs/data.c map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la; map 75 fs/erofs/data.c map->m_plen = blknr_to_addr(lastblk) - offset; map 80 fs/erofs/data.c map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize + map 81 fs/erofs/data.c vi->xattr_isize + erofs_blkoff(map->m_la); map 82 fs/erofs/data.c map->m_plen = inode->i_size - offset; map 85 fs/erofs/data.c if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) { map 94 fs/erofs/data.c map->m_flags |= EROFS_MAP_META; map 98 fs/erofs/data.c vi->nid, inode->i_size, map->m_la); map 105 fs/erofs/data.c map->m_llen = map->m_plen; map 108 fs/erofs/data.c trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0); map 113 fs/erofs/data.c struct erofs_map_blocks *map, int flags) map 116 fs/erofs/data.c int err = z_erofs_map_blocks_iter(inode, map, flags); map 118 fs/erofs/data.c if (map->mpage) { map 119 fs/erofs/data.c put_page(map->mpage); map 120 fs/erofs/data.c map->mpage = NULL; map 124 fs/erofs/data.c return erofs_map_blocks_flatmode(inode, map, flags); map 156 fs/erofs/data.c struct erofs_map_blocks map = { map 162 fs/erofs/data.c err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); map 167 fs/erofs/data.c if (!(map.m_flags & EROFS_MAP_MAPPED)) { map 176 fs/erofs/data.c DBG_BUGON(map.m_plen != map.m_llen); map 178 fs/erofs/data.c blknr = erofs_blknr(map.m_pa); map 179 fs/erofs/data.c blkoff = erofs_blkoff(map.m_pa); map 182 fs/erofs/data.c if (map.m_flags & EROFS_MAP_META) { map 186 fs/erofs/data.c DBG_BUGON(map.m_plen > PAGE_SIZE); map 197 fs/erofs/data.c memcpy(vto, vsrc + blkoff, map.m_plen); map 198 fs/erofs/data.c memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen); map 213 fs/erofs/data.c DBG_BUGON(erofs_blkoff(map.m_pa)); map 216 fs/erofs/data.c if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE)) map 217 fs/erofs/data.c nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE); map 329 fs/erofs/data.c struct erofs_map_blocks map = { map 334 fs/erofs/data.c err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW); map 338 fs/erofs/data.c if (map.m_flags & EROFS_MAP_MAPPED) map 339 fs/erofs/data.c bh->b_blocknr = erofs_blknr(map.m_pa); map 340 fs/erofs/internal.h struct erofs_map_blocks *map, map 345 fs/erofs/internal.h struct erofs_map_blocks *map, map 146 fs/erofs/zdata.c struct erofs_map_blocks map; map 342 fs/erofs/zdata.c struct erofs_map_blocks *map) map 350 fs/erofs/zdata.c grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); map 362 fs/erofs/zdata.c if (cl->pageofs != (map->m_la & ~PAGE_MASK)) { map 370 fs/erofs/zdata.c if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) { map 376 fs/erofs/zdata.c unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT; map 378 fs/erofs/zdata.c if (map->m_flags & EROFS_MAP_FULL_MAPPED) map 402 fs/erofs/zdata.c struct erofs_map_blocks *map) map 414 fs/erofs/zdata.c pcl->obj.index = map->m_pa >> PAGE_SHIFT; map 416 fs/erofs/zdata.c pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) | map 417 fs/erofs/zdata.c (map->m_flags & EROFS_MAP_FULL_MAPPED ? map 420 fs/erofs/zdata.c if (map->m_flags & EROFS_MAP_ZIPPED) map 433 fs/erofs/zdata.c cl->pageofs = map->m_la & ~PAGE_MASK; map 458 fs/erofs/zdata.c struct erofs_map_blocks *map) map 468 fs/erofs/zdata.c if (!PAGE_ALIGNED(map->m_pa)) { map 474 fs/erofs/zdata.c cl = cllookup(clt, inode, map); map 476 fs/erofs/zdata.c cl = clregister(clt, inode, map); map 575 fs/erofs/zdata.c struct erofs_map_blocks *const map = &fe->map; map 594 fs/erofs/zdata.c if (offset + cur >= map->m_la && map 595 fs/erofs/zdata.c offset + cur < map->m_la + map->m_llen) { map 608 fs/erofs/zdata.c map->m_la = offset + cur; map 609 fs/erofs/zdata.c map->m_llen = 0; map 610 fs/erofs/zdata.c err = z_erofs_map_blocks_iter(inode, map, 0); map 615 fs/erofs/zdata.c if (!(map->m_flags & EROFS_MAP_MAPPED)) map 618 fs/erofs/zdata.c err = z_erofs_collector_begin(clt, inode, map); map 623 fs/erofs/zdata.c if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la)) map 641 fs/erofs/zdata.c cur = end - min_t(unsigned int, offset + end - map->m_la, end); map 642 fs/erofs/zdata.c if (!(map->m_flags & EROFS_MAP_MAPPED)) { map 672 fs/erofs/zdata.c index = page->index - (map->m_la >> PAGE_SHIFT); map 682 fs/erofs/zdata.c map->m_llen = offset + cur - map->m_la; map 692 fs/erofs/zdata.c __func__, page, spiltted, map->m_llen); map 1353 fs/erofs/zdata.c if (f.map.mpage) map 1354 fs/erofs/zdata.c put_page(f.map.mpage); map 1427 fs/erofs/zdata.c if (f.map.mpage) map 1428 fs/erofs/zdata.c put_page(f.map.mpage); map 98 fs/erofs/zmap.c struct erofs_map_blocks *map; map 113 fs/erofs/zmap.c struct erofs_map_blocks *const map = m->map; map 114 fs/erofs/zmap.c struct page *mpage = map->mpage; map 132 fs/erofs/zmap.c map->mpage = NULL; map 137 fs/erofs/zmap.c map->mpage = mpage; map 332 fs/erofs/zmap.c struct erofs_map_blocks *const map = m->map; map 361 fs/erofs/zmap.c map->m_flags &= ~EROFS_MAP_ZIPPED; map 364 fs/erofs/zmap.c map->m_la = (lcn << lclusterbits) | m->clusterofs; map 377 fs/erofs/zmap.c struct erofs_map_blocks *map, map 383 fs/erofs/zmap.c .map = map, map 389 fs/erofs/zmap.c trace_z_erofs_map_blocks_iter_enter(inode, map, flags); map 392 fs/erofs/zmap.c if (map->m_la >= inode->i_size) { map 393 fs/erofs/zmap.c map->m_llen = map->m_la + 1 - inode->i_size; map 394 fs/erofs/zmap.c map->m_la = inode->i_size; map 395 fs/erofs/zmap.c map->m_flags = 0; map 404 fs/erofs/zmap.c ofs = map->m_la; map 412 fs/erofs/zmap.c map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */ map 418 fs/erofs/zmap.c map->m_flags &= ~EROFS_MAP_ZIPPED; map 422 fs/erofs/zmap.c map->m_la = (m.lcn << lclusterbits) | m.clusterofs; map 434 fs/erofs/zmap.c map->m_flags |= EROFS_MAP_FULL_MAPPED; map 451 fs/erofs/zmap.c map->m_llen = end - map->m_la; map 452 fs/erofs/zmap.c map->m_plen = 1 << lclusterbits; map 453 fs/erofs/zmap.c map->m_pa = blknr_to_addr(m.pblk); map 454 fs/erofs/zmap.c map->m_flags |= EROFS_MAP_MAPPED; map 462 fs/erofs/zmap.c __func__, map->m_la, map->m_pa, map 463 fs/erofs/zmap.c map->m_llen, map->m_plen, map->m_flags); map 465 fs/erofs/zmap.c trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err); map 1459 fs/ext2/balloc.c unsigned long ext2_count_free(struct buffer_head *map, unsigned int numchars) map 1461 fs/ext2/balloc.c return numchars * BITS_PER_BYTE - memweight(map->b_data, numchars); map 194 fs/ext4/block_validity.c struct ext4_map_blocks map; map 207 fs/ext4/block_validity.c map.m_lblk = i; map 208 fs/ext4/block_validity.c map.m_len = num - i; map 209 fs/ext4/block_validity.c n = ext4_map_blocks(NULL, inode, &map, 0); map 218 fs/ext4/block_validity.c map.m_pblk, n)) { map 220 fs/ext4/block_validity.c "overlap system zone", map.m_pblk, map 221 fs/ext4/block_validity.c map.m_pblk + map.m_len - 1, ino); map 225 fs/ext4/block_validity.c err = add_system_zone(system_blks, map.m_pblk, n); map 158 fs/ext4/dir.c struct ext4_map_blocks map; map 166 fs/ext4/dir.c map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb); map 167 fs/ext4/dir.c map.m_len = 1; map 168 fs/ext4/dir.c err = ext4_map_blocks(NULL, inode, &map, 0); map 172 fs/ext4/dir.c if (map.m_len == 0) map 173 fs/ext4/dir.c map.m_len = 1; map 174 fs/ext4/dir.c ctx->pos += map.m_len * sb->s_blocksize; map 178 fs/ext4/dir.c pgoff_t index = map.m_pblk >> map 186 fs/ext4/dir.c bh = ext4_bread(NULL, inode, map.m_lblk, 0); map 2650 fs/ext4/ext4.h struct ext4_map_blocks *map, int flags); map 3281 fs/ext4/ext4.h struct ext4_map_blocks *map, int flags); map 3292 fs/ext4/ext4.h struct ext4_map_blocks *map, int flags); map 89 fs/ext4/extents.c struct ext4_map_blocks *map, map 3447 fs/ext4/extents.c struct ext4_map_blocks *map, map 3458 fs/ext4/extents.c int allocated = map->m_len; map 3466 fs/ext4/extents.c if (map->m_lblk + map->m_len < ee_block + ee_len) { map 3475 fs/ext4/extents.c map->m_lblk + map->m_len, split_flag1, flags1); map 3479 fs/ext4/extents.c allocated = ee_len - (map->m_lblk - ee_block); map 3485 fs/ext4/extents.c path = ext4_find_extent(inode, map->m_lblk, ppath, 0); map 3492 fs/ext4/extents.c (unsigned long) map->m_lblk); map 3498 fs/ext4/extents.c if (map->m_lblk >= ee_block) { map 3506 fs/ext4/extents.c map->m_lblk, split_flag1, flags); map 3538 fs/ext4/extents.c struct ext4_map_blocks *map, map 3549 fs/ext4/extents.c unsigned int ee_len, depth, map_len = map->m_len; map 3556 fs/ext4/extents.c (unsigned long long)map->m_lblk, map_len); map 3561 fs/ext4/extents.c if (eof_block < map->m_lblk + map_len) map 3562 fs/ext4/extents.c eof_block = map->m_lblk + map_len; map 3572 fs/ext4/extents.c trace_ext4_ext_convert_to_initialized_enter(inode, map, ex); map 3576 fs/ext4/extents.c BUG_ON(!in_range(map->m_lblk, ee_block, ee_len)); map 3593 fs/ext4/extents.c if ((map->m_lblk == ee_block) && map 3625 fs/ext4/extents.c map, ex, abut_ex); map 3639 fs/ext4/extents.c } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) && map 3663 fs/ext4/extents.c ((map->m_lblk + map_len) == next_lblk) && /*C2*/ map 3671 fs/ext4/extents.c map, ex, abut_ex); map 3694 fs/ext4/extents.c allocated = ee_len - (map->m_lblk - ee_block); map 3696 fs/ext4/extents.c WARN_ON(map->m_lblk < ee_block); map 3721 fs/ext4/extents.c split_map.m_lblk = map->m_lblk; map 3722 fs/ext4/extents.c split_map.m_len = map->m_len; map 3756 fs/ext4/extents.c allocated = map->m_len; map 3800 fs/ext4/extents.c struct ext4_map_blocks *map, map 3813 fs/ext4/extents.c (unsigned long long)map->m_lblk, map->m_len); map 3817 fs/ext4/extents.c if (eof_block < map->m_lblk + map->m_len) map 3818 fs/ext4/extents.c eof_block = map->m_lblk + map->m_len; map 3838 fs/ext4/extents.c return ext4_split_extent(handle, inode, ppath, map, split_flag, flags); map 3843 fs/ext4/extents.c struct ext4_map_blocks *map, map 3868 fs/ext4/extents.c if (ee_block != map->m_lblk || ee_len > map->m_len) { map 3873 fs/ext4/extents.c (unsigned long long)map->m_lblk, map->m_len); map 3875 fs/ext4/extents.c err = ext4_split_convert_extents(handle, inode, map, ppath, map 3879 fs/ext4/extents.c path = ext4_find_extent(inode, map->m_lblk, ppath, 0); map 3959 fs/ext4/extents.c struct ext4_map_blocks *map, map 3974 fs/ext4/extents.c if (map->m_len > EXT_UNWRITTEN_MAX_LEN) map 3975 fs/ext4/extents.c map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; map 3986 fs/ext4/extents.c if (ee_block != map->m_lblk || ee_len > map->m_len) { map 3987 fs/ext4/extents.c err = ext4_split_convert_extents(handle, inode, map, ppath, map 3991 fs/ext4/extents.c path = ext4_find_extent(inode, map->m_lblk, ppath, 0); map 3998 fs/ext4/extents.c (unsigned long) map->m_lblk); map 4021 fs/ext4/extents.c err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len); map 4024 fs/ext4/extents.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 4025 fs/ext4/extents.c if (allocated > map->m_len) map 4026 fs/ext4/extents.c allocated = map->m_len; map 4027 fs/ext4/extents.c map->m_len = allocated; map 4033 fs/ext4/extents.c struct ext4_map_blocks *map, map 4043 fs/ext4/extents.c inode->i_ino, (unsigned long long)map->m_lblk, map->m_len, map 4053 fs/ext4/extents.c trace_ext4_ext_handle_unwritten_extents(inode, map, flags, map 4058 fs/ext4/extents.c ret = ext4_split_convert_extents(handle, inode, map, ppath, map 4062 fs/ext4/extents.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 4068 fs/ext4/extents.c if (allocated > map->m_len) map 4069 fs/ext4/extents.c allocated = map->m_len; map 4070 fs/ext4/extents.c err = ext4_issue_zeroout(inode, map->m_lblk, newblock, map 4075 fs/ext4/extents.c ret = ext4_convert_unwritten_extents_endio(handle, inode, map, map 4079 fs/ext4/extents.c err = check_eofblocks_fl(handle, inode, map->m_lblk, map 4080 fs/ext4/extents.c path, map->m_len); map 4083 fs/ext4/extents.c map->m_flags |= EXT4_MAP_MAPPED; map 4084 fs/ext4/extents.c map->m_pblk = newblock; map 4085 fs/ext4/extents.c if (allocated > map->m_len) map 4086 fs/ext4/extents.c allocated = map->m_len; map 4087 fs/ext4/extents.c map->m_len = allocated; map 4096 fs/ext4/extents.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 4109 fs/ext4/extents.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 4114 fs/ext4/extents.c ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags); map 4123 fs/ext4/extents.c map->m_flags |= EXT4_MAP_NEW; map 4124 fs/ext4/extents.c if (allocated > map->m_len) map 4125 fs/ext4/extents.c allocated = map->m_len; map 4126 fs/ext4/extents.c map->m_len = allocated; map 4129 fs/ext4/extents.c map->m_flags |= EXT4_MAP_MAPPED; map 4131 fs/ext4/extents.c err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map 4132 fs/ext4/extents.c map->m_len); map 4137 fs/ext4/extents.c if (allocated > map->m_len) map 4138 fs/ext4/extents.c allocated = map->m_len; map 4140 fs/ext4/extents.c map->m_pblk = newblock; map 4141 fs/ext4/extents.c map->m_len = allocated; map 4188 fs/ext4/extents.c struct ext4_map_blocks *map, map 4193 fs/ext4/extents.c ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); map 4205 fs/ext4/extents.c rr_cluster_start = EXT4_B2C(sbi, map->m_lblk); map 4211 fs/ext4/extents.c map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; map 4212 fs/ext4/extents.c map->m_len = min(map->m_len, map 4223 fs/ext4/extents.c if (map->m_lblk < ee_block) map 4224 fs/ext4/extents.c map->m_len = min(map->m_len, ee_block - map->m_lblk); map 4235 fs/ext4/extents.c if (map->m_lblk > ee_block) { map 4237 fs/ext4/extents.c map->m_len = min(map->m_len, next - map->m_lblk); map 4240 fs/ext4/extents.c trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1); map 4244 fs/ext4/extents.c trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0); map 4268 fs/ext4/extents.c struct ext4_map_blocks *map, int flags) map 4282 fs/ext4/extents.c map->m_lblk, map->m_len, inode->i_ino); map 4283 fs/ext4/extents.c trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); map 4286 fs/ext4/extents.c path = ext4_find_extent(inode, map->m_lblk, NULL, 0); map 4303 fs/ext4/extents.c (unsigned long) map->m_lblk, depth, map 4325 fs/ext4/extents.c if (in_range(map->m_lblk, ee_block, ee_len)) { map 4326 fs/ext4/extents.c newblock = map->m_lblk - ee_block + ee_start; map 4328 fs/ext4/extents.c allocated = ee_len - (map->m_lblk - ee_block); map 4329 fs/ext4/extents.c ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, map 4339 fs/ext4/extents.c handle, inode, map, &path, map 4346 fs/ext4/extents.c handle, inode, map, &path, flags, map 4363 fs/ext4/extents.c hole_start = map->m_lblk; map 4372 fs/ext4/extents.c if (hole_start != map->m_lblk) map 4373 fs/ext4/extents.c hole_len -= map->m_lblk - hole_start; map 4374 fs/ext4/extents.c map->m_pblk = 0; map 4375 fs/ext4/extents.c map->m_len = min_t(unsigned int, map->m_len, hole_len); map 4383 fs/ext4/extents.c newex.ee_block = cpu_to_le32(map->m_lblk); map 4384 fs/ext4/extents.c cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); map 4391 fs/ext4/extents.c get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { map 4392 fs/ext4/extents.c ar.len = allocated = map->m_len; map 4393 fs/ext4/extents.c newblock = map->m_pblk; map 4399 fs/ext4/extents.c ar.lleft = map->m_lblk; map 4403 fs/ext4/extents.c ar.lright = map->m_lblk; map 4412 fs/ext4/extents.c get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { map 4413 fs/ext4/extents.c ar.len = allocated = map->m_len; map 4414 fs/ext4/extents.c newblock = map->m_pblk; map 4425 fs/ext4/extents.c if (map->m_len > EXT_INIT_MAX_LEN && map 4427 fs/ext4/extents.c map->m_len = EXT_INIT_MAX_LEN; map 4428 fs/ext4/extents.c else if (map->m_len > EXT_UNWRITTEN_MAX_LEN && map 4430 fs/ext4/extents.c map->m_len = EXT_UNWRITTEN_MAX_LEN; map 4433 fs/ext4/extents.c newex.ee_len = cpu_to_le16(map->m_len); map 4438 fs/ext4/extents.c allocated = map->m_len; map 4442 fs/ext4/extents.c ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk); map 4443 fs/ext4/extents.c ar.logical = map->m_lblk; map 4452 fs/ext4/extents.c offset = EXT4_LBLK_COFF(sbi, map->m_lblk); map 4485 fs/ext4/extents.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 4490 fs/ext4/extents.c err = check_eofblocks_fl(handle, inode, map->m_lblk, map 4511 fs/ext4/extents.c if (allocated > map->m_len) map 4512 fs/ext4/extents.c allocated = map->m_len; map 4513 fs/ext4/extents.c map->m_flags |= EXT4_MAP_NEW; map 4544 fs/ext4/extents.c lblk = EXT4_LBLK_CMASK(sbi, map->m_lblk); map 4561 fs/ext4/extents.c if (allocated > map->m_len) map 4562 fs/ext4/extents.c allocated = map->m_len; map 4564 fs/ext4/extents.c map->m_flags |= EXT4_MAP_MAPPED; map 4565 fs/ext4/extents.c map->m_pblk = newblock; map 4566 fs/ext4/extents.c map->m_len = allocated; map 4571 fs/ext4/extents.c trace_ext4_ext_map_blocks_exit(inode, flags, map, map 4619 fs/ext4/extents.c struct ext4_map_blocks map; map 4624 fs/ext4/extents.c map.m_lblk = offset; map 4625 fs/ext4/extents.c map.m_len = len; map 4656 fs/ext4/extents.c ret = ext4_map_blocks(handle, inode, &map, flags); map 4660 fs/ext4/extents.c inode->i_ino, map.m_lblk, map 4661 fs/ext4/extents.c map.m_len, ret); map 4666 fs/ext4/extents.c map.m_lblk += ret; map 4667 fs/ext4/extents.c map.m_len = len = len - ret; map 4668 fs/ext4/extents.c epos = (loff_t)map.m_lblk << inode->i_blkbits; map 4971 fs/ext4/extents.c struct ext4_map_blocks map; map 4974 fs/ext4/extents.c map.m_lblk = offset >> blkbits; map 4995 fs/ext4/extents.c map.m_lblk += ret; map 4996 fs/ext4/extents.c map.m_len = (max_blocks -= ret); map 5005 fs/ext4/extents.c ret = ext4_map_blocks(handle, inode, &map, map 5011 fs/ext4/extents.c inode->i_ino, map.m_lblk, map 5012 fs/ext4/extents.c map.m_len, ret); map 668 fs/ext4/extents_status.c struct ext4_map_blocks map; map 678 fs/ext4/extents_status.c map.m_lblk = es->es_lblk; map 679 fs/ext4/extents_status.c map.m_len = es->es_len; map 681 fs/ext4/extents_status.c retval = ext4_ind_map_blocks(NULL, inode, &map, 0); map 701 fs/ext4/extents_status.c if (map.m_pblk != ext4_es_pblock(es)) { map 705 fs/ext4/extents_status.c inode->i_ino, map.m_pblk, map 141 fs/ext4/file.c struct ext4_map_blocks map; map 148 fs/ext4/file.c map.m_lblk = pos >> blkbits; map 149 fs/ext4/file.c map.m_len = EXT4_MAX_BLOCKS(len, pos, blkbits); map 150 fs/ext4/file.c blklen = map.m_len; map 152 fs/ext4/file.c err = ext4_map_blocks(NULL, inode, &map, 0); map 158 fs/ext4/file.c return err == blklen && (map.m_flags & EXT4_MAP_MAPPED); map 510 fs/ext4/indirect.c struct ext4_map_blocks *map, map 524 fs/ext4/indirect.c trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); map 527 fs/ext4/indirect.c depth = ext4_block_to_path(inode, map->m_lblk, offsets, map 540 fs/ext4/indirect.c while (count < map->m_len && count <= blocks_to_boundary) { map 569 fs/ext4/indirect.c map->m_pblk = 0; map 570 fs/ext4/indirect.c map->m_len = min_t(unsigned int, map->m_len, count); map 590 fs/ext4/indirect.c ar.logical = map->m_lblk; map 598 fs/ext4/indirect.c ar.goal = ext4_find_goal(inode, map->m_lblk, partial); map 608 fs/ext4/indirect.c map->m_len, blocks_to_boundary); map 628 fs/ext4/indirect.c map->m_flags |= EXT4_MAP_NEW; map 633 fs/ext4/indirect.c map->m_flags |= EXT4_MAP_MAPPED; map 634 fs/ext4/indirect.c map->m_pblk = le32_to_cpu(chain[depth-1].key); map 635 fs/ext4/indirect.c map->m_len = count; map 637 fs/ext4/indirect.c map->m_flags |= EXT4_MAP_BOUNDARY; map 648 fs/ext4/indirect.c trace_ext4_ind_map_blocks_exit(inode, flags, map, err); map 1178 fs/ext4/inline.c struct ext4_map_blocks map; map 1208 fs/ext4/inline.c map.m_lblk = 0; map 1209 fs/ext4/inline.c map.m_len = 1; map 1210 fs/ext4/inline.c map.m_flags = 0; map 1211 fs/ext4/inline.c error = ext4_map_blocks(handle, inode, &map, EXT4_GET_BLOCKS_CREATE); map 1214 fs/ext4/inline.c if (!(map.m_flags & EXT4_MAP_MAPPED)) { map 1219 fs/ext4/inline.c data_bh = sb_getblk(inode->i_sb, map.m_pblk); map 409 fs/ext4/inode.c struct ext4_map_blocks *map) map 415 fs/ext4/inode.c if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, map 416 fs/ext4/inode.c map->m_len)) { map 417 fs/ext4/inode.c ext4_error_inode(inode, func, line, map->m_pblk, map 419 fs/ext4/inode.c "(length %d)", (unsigned long) map->m_lblk, map 420 fs/ext4/inode.c map->m_pblk, map->m_len); map 441 fs/ext4/inode.c #define check_block_validity(inode, map) \ map 442 fs/ext4/inode.c __check_block_validity((inode), __func__, __LINE__, (map)) map 448 fs/ext4/inode.c struct ext4_map_blocks *map, map 453 fs/ext4/inode.c map->m_flags = 0; map 463 fs/ext4/inode.c retval = ext4_ext_map_blocks(handle, inode, map, flags & map 466 fs/ext4/inode.c retval = ext4_ind_map_blocks(handle, inode, map, flags & map 475 fs/ext4/inode.c if (es_map->m_lblk != map->m_lblk || map 476 fs/ext4/inode.c es_map->m_flags != map->m_flags || map 477 fs/ext4/inode.c es_map->m_pblk != map->m_pblk) { map 482 fs/ext4/inode.c es_map->m_pblk, es_map->m_flags, map->m_lblk, map 483 fs/ext4/inode.c map->m_len, map->m_pblk, map->m_flags, map 512 fs/ext4/inode.c struct ext4_map_blocks *map, int flags) map 520 fs/ext4/inode.c memcpy(&orig_map, map, sizeof(*map)); map 523 fs/ext4/inode.c map->m_flags = 0; map 525 fs/ext4/inode.c "logical block %lu\n", inode->i_ino, flags, map->m_len, map 526 fs/ext4/inode.c (unsigned long) map->m_lblk); map 531 fs/ext4/inode.c if (unlikely(map->m_len > INT_MAX)) map 532 fs/ext4/inode.c map->m_len = INT_MAX; map 535 fs/ext4/inode.c if (unlikely(map->m_lblk >= EXT_MAX_BLOCKS)) map 539 fs/ext4/inode.c if (ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { map 541 fs/ext4/inode.c map->m_pblk = ext4_es_pblock(&es) + map 542 fs/ext4/inode.c map->m_lblk - es.es_lblk; map 543 fs/ext4/inode.c map->m_flags |= ext4_es_is_written(&es) ? map 545 fs/ext4/inode.c retval = es.es_len - (map->m_lblk - es.es_lblk); map 546 fs/ext4/inode.c if (retval > map->m_len) map 547 fs/ext4/inode.c retval = map->m_len; map 548 fs/ext4/inode.c map->m_len = retval; map 550 fs/ext4/inode.c map->m_pblk = 0; map 551 fs/ext4/inode.c retval = es.es_len - (map->m_lblk - es.es_lblk); map 552 fs/ext4/inode.c if (retval > map->m_len) map 553 fs/ext4/inode.c retval = map->m_len; map 554 fs/ext4/inode.c map->m_len = retval; map 560 fs/ext4/inode.c ext4_map_blocks_es_recheck(handle, inode, map, map 572 fs/ext4/inode.c retval = ext4_ext_map_blocks(handle, inode, map, flags & map 575 fs/ext4/inode.c retval = ext4_ind_map_blocks(handle, inode, map, flags & map 581 fs/ext4/inode.c if (unlikely(retval != map->m_len)) { map 585 fs/ext4/inode.c inode->i_ino, retval, map->m_len); map 589 fs/ext4/inode.c status = map->m_flags & EXT4_MAP_UNWRITTEN ? map 593 fs/ext4/inode.c ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map 594 fs/ext4/inode.c map->m_lblk + map->m_len - 1)) map 596 fs/ext4/inode.c ret = ext4_es_insert_extent(inode, map->m_lblk, map 597 fs/ext4/inode.c map->m_len, map->m_pblk, status); map 604 fs/ext4/inode.c if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { map 605 fs/ext4/inode.c ret = check_block_validity(inode, map); map 621 fs/ext4/inode.c if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) map 634 fs/ext4/inode.c map->m_flags &= ~EXT4_MAP_FLAGS; map 649 fs/ext4/inode.c retval = ext4_ext_map_blocks(handle, inode, map, flags); map 651 fs/ext4/inode.c retval = ext4_ind_map_blocks(handle, inode, map, flags); map 653 fs/ext4/inode.c if (retval > 0 && map->m_flags & EXT4_MAP_NEW) { map 676 fs/ext4/inode.c if (unlikely(retval != map->m_len)) { map 680 fs/ext4/inode.c inode->i_ino, retval, map->m_len); map 692 fs/ext4/inode.c map->m_flags & EXT4_MAP_MAPPED && map 693 fs/ext4/inode.c map->m_flags & EXT4_MAP_NEW) { map 694 fs/ext4/inode.c ret = ext4_issue_zeroout(inode, map->m_lblk, map 695 fs/ext4/inode.c map->m_pblk, map->m_len); map 707 fs/ext4/inode.c ext4_es_lookup_extent(inode, map->m_lblk, NULL, &es)) { map 711 fs/ext4/inode.c status = map->m_flags & EXT4_MAP_UNWRITTEN ? map 715 fs/ext4/inode.c ext4_es_scan_range(inode, &ext4_es_is_delayed, map->m_lblk, map 716 fs/ext4/inode.c map->m_lblk + map->m_len - 1)) map 718 fs/ext4/inode.c ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map 719 fs/ext4/inode.c map->m_pblk, status); map 728 fs/ext4/inode.c if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { map 729 fs/ext4/inode.c ret = check_block_validity(inode, map); map 738 fs/ext4/inode.c if (map->m_flags & EXT4_MAP_NEW && map 739 fs/ext4/inode.c !(map->m_flags & EXT4_MAP_UNWRITTEN) && map 744 fs/ext4/inode.c (loff_t)map->m_lblk << inode->i_blkbits; map 745 fs/ext4/inode.c loff_t length = (loff_t)map->m_len << inode->i_blkbits; map 791 fs/ext4/inode.c struct ext4_map_blocks map; map 797 fs/ext4/inode.c map.m_lblk = iblock; map 798 fs/ext4/inode.c map.m_len = bh->b_size >> inode->i_blkbits; map 800 fs/ext4/inode.c ret = ext4_map_blocks(ext4_journal_current_handle(), inode, &map, map 803 fs/ext4/inode.c map_bh(bh, inode->i_sb, map.m_pblk); map 804 fs/ext4/inode.c ext4_update_bh_state(bh, map.m_flags); map 805 fs/ext4/inode.c bh->b_size = inode->i_sb->s_blocksize * map.m_len; map 809 fs/ext4/inode.c bh->b_size = inode->i_sb->s_blocksize * map.m_len; map 974 fs/ext4/inode.c struct ext4_map_blocks map; map 981 fs/ext4/inode.c map.m_lblk = block; map 982 fs/ext4/inode.c map.m_len = 1; map 983 fs/ext4/inode.c err = ext4_map_blocks(handle, inode, &map, map_flags); map 990 fs/ext4/inode.c bh = sb_getblk(inode->i_sb, map.m_pblk); map 993 fs/ext4/inode.c if (map.m_flags & EXT4_MAP_NEW) { map 1684 fs/ext4/inode.c struct ext4_map_blocks map; map 1826 fs/ext4/inode.c struct ext4_map_blocks *map, map 1835 fs/ext4/inode.c memcpy(&orig_map, map, sizeof(*map)); map 1841 fs/ext4/inode.c map->m_flags = 0; map 1843 fs/ext4/inode.c "logical block %lu\n", inode->i_ino, map->m_len, map 1844 fs/ext4/inode.c (unsigned long) map->m_lblk); map 1865 fs/ext4/inode.c map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk; map 1867 fs/ext4/inode.c if (retval > map->m_len) map 1868 fs/ext4/inode.c retval = map->m_len; map 1869 fs/ext4/inode.c map->m_len = retval; map 1871 fs/ext4/inode.c map->m_flags |= EXT4_MAP_MAPPED; map 1873 fs/ext4/inode.c map->m_flags |= EXT4_MAP_UNWRITTEN; map 1878 fs/ext4/inode.c ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0); map 1891 fs/ext4/inode.c retval = ext4_ext_map_blocks(NULL, inode, map, 0); map 1893 fs/ext4/inode.c retval = ext4_ind_map_blocks(NULL, inode, map, 0); map 1904 fs/ext4/inode.c ret = ext4_insert_delayed_block(inode, map->m_lblk); map 1917 fs/ext4/inode.c if (unlikely(retval != map->m_len)) { map 1921 fs/ext4/inode.c inode->i_ino, retval, map->m_len); map 1925 fs/ext4/inode.c status = map->m_flags & EXT4_MAP_UNWRITTEN ? map 1927 fs/ext4/inode.c ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len, map 1928 fs/ext4/inode.c map->m_pblk, status); map 1954 fs/ext4/inode.c struct ext4_map_blocks map; map 1960 fs/ext4/inode.c map.m_lblk = iblock; map 1961 fs/ext4/inode.c map.m_len = 1; map 1968 fs/ext4/inode.c ret = ext4_da_map_blocks(inode, iblock, &map, bh); map 1972 fs/ext4/inode.c map_bh(bh, inode->i_sb, map.m_pblk); map 1973 fs/ext4/inode.c ext4_update_bh_state(bh, map.m_flags); map 2265 fs/ext4/inode.c struct ext4_map_blocks *map = &mpd->map; map 2271 fs/ext4/inode.c if (map->m_len == 0) map 2277 fs/ext4/inode.c if (map->m_len == 0) { map 2281 fs/ext4/inode.c map->m_lblk = lblk; map 2282 fs/ext4/inode.c map->m_len = 1; map 2283 fs/ext4/inode.c map->m_flags = bh->b_state & BH_FLAGS; map 2288 fs/ext4/inode.c if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN) map 2292 fs/ext4/inode.c if (lblk == map->m_lblk + map->m_len && map 2293 fs/ext4/inode.c (bh->b_state & BH_FLAGS) == map->m_flags) { map 2294 fs/ext4/inode.c map->m_len++; map 2334 fs/ext4/inode.c if (mpd->map.m_len) map 2344 fs/ext4/inode.c if (mpd->map.m_len == 0) { map 2378 fs/ext4/inode.c start = mpd->map.m_lblk >> bpp_bits; map 2379 fs/ext4/inode.c end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits; map 2381 fs/ext4/inode.c pblock = mpd->map.m_pblk; map 2394 fs/ext4/inode.c if (lblk < mpd->map.m_lblk) map 2396 fs/ext4/inode.c if (lblk >= mpd->map.m_lblk + mpd->map.m_len) { map 2401 fs/ext4/inode.c mpd->map.m_len = 0; map 2402 fs/ext4/inode.c mpd->map.m_flags = 0; map 2440 fs/ext4/inode.c mpd->map.m_len = 0; map 2441 fs/ext4/inode.c mpd->map.m_flags = 0; map 2448 fs/ext4/inode.c struct ext4_map_blocks *map = &mpd->map; map 2452 fs/ext4/inode.c trace_ext4_da_write_pages_extent(inode, map); map 2474 fs/ext4/inode.c if (map->m_flags & (1 << BH_Delay)) map 2477 fs/ext4/inode.c err = ext4_map_blocks(handle, inode, map, get_blocks_flags); map 2480 fs/ext4/inode.c if (dioread_nolock && (map->m_flags & EXT4_MAP_UNWRITTEN)) { map 2489 fs/ext4/inode.c BUG_ON(map->m_len == 0); map 2518 fs/ext4/inode.c struct ext4_map_blocks *map = &mpd->map; map 2524 fs/ext4/inode.c ((loff_t)map->m_lblk) << inode->i_blkbits; map 2549 fs/ext4/inode.c (unsigned long long)map->m_lblk, map 2550 fs/ext4/inode.c (unsigned)map->m_len, -err); map 2568 fs/ext4/inode.c } while (map->m_len); map 2651 fs/ext4/inode.c mpd->map.m_len = 0; map 2674 fs/ext4/inode.c if (mpd->map.m_len > 0 && mpd->next_page != page->index) map 2696 fs/ext4/inode.c if (mpd->map.m_len == 0) map 2877 fs/ext4/inode.c if (mpd.map.m_len) map 3424 fs/ext4/inode.c struct ext4_map_blocks map; map 3448 fs/ext4/inode.c map.m_lblk = first_block; map 3449 fs/ext4/inode.c map.m_len = last_block - first_block + 1; map 3452 fs/ext4/inode.c ret = ext4_map_blocks(NULL, inode, &map, 0); map 3457 fs/ext4/inode.c ext4_lblk_t end = map.m_lblk + map.m_len - 1; map 3461 fs/ext4/inode.c map.m_lblk, end, &es); map 3465 fs/ext4/inode.c } else if (es.es_lblk > map.m_lblk) { map 3467 fs/ext4/inode.c map.m_len = es.es_lblk - map.m_lblk; map 3471 fs/ext4/inode.c if (es.es_lblk < map.m_lblk) map 3472 fs/ext4/inode.c offs = map.m_lblk - es.es_lblk; map 3473 fs/ext4/inode.c map.m_lblk = es.es_lblk + offs; map 3474 fs/ext4/inode.c map.m_len = es.es_len - offs; map 3484 fs/ext4/inode.c if (map.m_len > DIO_MAX_BLOCKS) map 3485 fs/ext4/inode.c map.m_len = DIO_MAX_BLOCKS; map 3486 fs/ext4/inode.c dio_credits = ext4_chunk_trans_blocks(inode, map.m_len); map 3499 fs/ext4/inode.c ret = ext4_map_blocks(handle, inode, &map, map 3518 fs/ext4/inode.c if (!(flags & IOMAP_FAULT) && first_block + map.m_len > map 3530 fs/ext4/inode.c ret = ext4_map_blocks(NULL, inode, &map, 0); map 3547 fs/ext4/inode.c iomap->length = (u64)map.m_len << blkbits; map 3553 fs/ext4/inode.c if (map.m_flags & EXT4_MAP_MAPPED) { map 3555 fs/ext4/inode.c } else if (map.m_flags & EXT4_MAP_UNWRITTEN) { map 3561 fs/ext4/inode.c iomap->addr = (u64)map.m_pblk << blkbits; map 3564 fs/ext4/inode.c if (map.m_flags & EXT4_MAP_NEW) map 278 fs/ext4/namei.c struct dx_map_entry map[]); map 279 fs/ext4/namei.c static void dx_sort_map(struct dx_map_entry *map, unsigned count); map 1233 fs/ext4/namei.c static void dx_sort_map (struct dx_map_entry *map, unsigned count) map 1235 fs/ext4/namei.c struct dx_map_entry *p, *q, *top = map + count - 1; map 1242 fs/ext4/namei.c for (p = top, q = p - count; q >= map; p--, q--) map 1250 fs/ext4/namei.c while (q-- > map) { map 1762 fs/ext4/namei.c dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count, map 1769 fs/ext4/namei.c (from + (map->offs<<2)); map 1775 fs/ext4/namei.c map++; map 1820 fs/ext4/namei.c struct dx_map_entry *map; map 1850 fs/ext4/namei.c map = (struct dx_map_entry *) (data2 + blocksize); map 1852 fs/ext4/namei.c blocksize, hinfo, map); map 1853 fs/ext4/namei.c map -= count; map 1854 fs/ext4/namei.c dx_sort_map(map, count); map 1860 fs/ext4/namei.c if (size + map[i].size/2 > blocksize/2) map 1862 fs/ext4/namei.c size += map[i].size; map 1867 fs/ext4/namei.c hash2 = map[split].hash; map 1868 fs/ext4/namei.c continued = hash2 == map[split - 1].hash; map 1874 fs/ext4/namei.c de2 = dx_move_dirents(data1, data2, map + split, count - split, map 244 fs/ext4/readpage.c struct ext4_map_blocks map; map 246 fs/ext4/readpage.c map.m_pblk = 0; map 247 fs/ext4/readpage.c map.m_lblk = 0; map 248 fs/ext4/readpage.c map.m_len = 0; map 249 fs/ext4/readpage.c map.m_flags = 0; map 279 fs/ext4/readpage.c if ((map.m_flags & EXT4_MAP_MAPPED) && map 280 fs/ext4/readpage.c block_in_file > map.m_lblk && map 281 fs/ext4/readpage.c block_in_file < (map.m_lblk + map.m_len)) { map 282 fs/ext4/readpage.c unsigned map_offset = block_in_file - map.m_lblk; map 283 fs/ext4/readpage.c unsigned last = map.m_len - map_offset; map 288 fs/ext4/readpage.c map.m_flags &= ~EXT4_MAP_MAPPED; map 293 fs/ext4/readpage.c blocks[page_block] = map.m_pblk + map_offset + map 306 fs/ext4/readpage.c map.m_lblk = block_in_file; map 307 fs/ext4/readpage.c map.m_len = last_block - block_in_file; map 309 fs/ext4/readpage.c if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { map 318 fs/ext4/readpage.c if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { map 330 fs/ext4/readpage.c if (page_block && blocks[page_block-1] != map.m_pblk-1) map 333 fs/ext4/readpage.c if (relative_block == map.m_len) { map 335 fs/ext4/readpage.c map.m_flags &= ~EXT4_MAP_MAPPED; map 339 fs/ext4/readpage.c blocks[page_block] = map.m_pblk+relative_block; map 398 fs/ext4/readpage.c if (((map.m_flags & EXT4_MAP_BOUNDARY) && map 399 fs/ext4/readpage.c (relative_block == map.m_len)) || map 1355 fs/ext4/xattr.c struct ext4_map_blocks map; map 1356 fs/ext4/xattr.c map.m_lblk = block += ret; map 1357 fs/ext4/xattr.c map.m_len = max_blocks -= ret; map 1359 fs/ext4/xattr.c ret = ext4_map_blocks(handle, ea_inode, &map, map 1072 fs/f2fs/data.c struct f2fs_map_blocks map; map 1077 fs/f2fs/data.c map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); map 1078 fs/f2fs/data.c map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from)); map 1079 fs/f2fs/data.c if (map.m_len > map.m_lblk) map 1080 fs/f2fs/data.c map.m_len -= map.m_lblk; map 1082 fs/f2fs/data.c map.m_len = 0; map 1084 fs/f2fs/data.c map.m_next_pgofs = NULL; map 1085 fs/f2fs/data.c map.m_next_extent = NULL; map 1086 fs/f2fs/data.c map.m_seg_type = NO_CHECK_TYPE; map 1087 fs/f2fs/data.c map.m_may_create = true; map 1090 fs/f2fs/data.c map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint); map 1107 fs/f2fs/data.c err = f2fs_map_blocks(inode, &map, 1, flag); map 1108 fs/f2fs/data.c if (map.m_len > 0 && err == -ENOSPC) { map 1140 fs/f2fs/data.c int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, map 1143 fs/f2fs/data.c unsigned int maxblocks = map->m_len; map 1146 fs/f2fs/data.c int mode = map->m_may_create ? ALLOC_NODE : LOOKUP_NODE; map 1158 fs/f2fs/data.c map->m_len = 0; map 1159 fs/f2fs/data.c map->m_flags = 0; map 1162 fs/f2fs/data.c pgofs = (pgoff_t)map->m_lblk; map 1167 fs/f2fs/data.c map->m_may_create) map 1170 fs/f2fs/data.c map->m_pblk = ei.blk + pgofs - ei.fofs; map 1171 fs/f2fs/data.c map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); map 1172 fs/f2fs/data.c map->m_flags = F2FS_MAP_MAPPED; map 1173 fs/f2fs/data.c if (map->m_next_extent) map 1174 fs/f2fs/data.c *map->m_next_extent = pgofs + map->m_len; map 1179 fs/f2fs/data.c map->m_pblk, map->m_len); map 1184 fs/f2fs/data.c if (map->m_may_create) map 1192 fs/f2fs/data.c map->m_pblk = 0; map 1195 fs/f2fs/data.c if (map->m_next_pgofs) map 1196 fs/f2fs/data.c *map->m_next_pgofs = map 1198 fs/f2fs/data.c if (map->m_next_extent) map 1199 fs/f2fs/data.c *map->m_next_extent = map 1222 fs/f2fs/data.c map->m_may_create) { map 1223 fs/f2fs/data.c err = __allocate_data_block(&dn, map->m_seg_type); map 1244 fs/f2fs/data.c map->m_seg_type); map 1250 fs/f2fs/data.c map->m_flags |= F2FS_MAP_NEW; map 1254 fs/f2fs/data.c map->m_pblk = 0; map 1261 fs/f2fs/data.c if (map->m_next_pgofs) map 1262 fs/f2fs/data.c *map->m_next_pgofs = pgofs + 1; map 1267 fs/f2fs/data.c if (map->m_next_pgofs) map 1268 fs/f2fs/data.c *map->m_next_pgofs = pgofs + 1; map 1277 fs/f2fs/data.c if (map->m_len == 0) { map 1280 fs/f2fs/data.c map->m_flags |= F2FS_MAP_UNWRITTEN; map 1281 fs/f2fs/data.c map->m_flags |= F2FS_MAP_MAPPED; map 1283 fs/f2fs/data.c map->m_pblk = blkaddr; map 1284 fs/f2fs/data.c map->m_len = 1; map 1285 fs/f2fs/data.c } else if ((map->m_pblk != NEW_ADDR && map 1286 fs/f2fs/data.c blkaddr == (map->m_pblk + ofs)) || map 1287 fs/f2fs/data.c (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || map 1290 fs/f2fs/data.c map->m_len++; map 1308 fs/f2fs/data.c map->m_len += dn.ofs_in_node - ofs_in_node; map 1322 fs/f2fs/data.c if (map->m_flags & F2FS_MAP_MAPPED) { map 1323 fs/f2fs/data.c unsigned int ofs = start_pgofs - map->m_lblk; map 1326 fs/f2fs/data.c start_pgofs, map->m_pblk + ofs, map 1327 fs/f2fs/data.c map->m_len - ofs); map 1333 fs/f2fs/data.c if (map->m_may_create) { map 1342 fs/f2fs/data.c if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) map 1344 fs/f2fs/data.c map->m_pblk, map->m_len); map 1347 fs/f2fs/data.c if (map->m_flags & F2FS_MAP_MAPPED) { map 1348 fs/f2fs/data.c unsigned int ofs = start_pgofs - map->m_lblk; map 1351 fs/f2fs/data.c start_pgofs, map->m_pblk + ofs, map 1352 fs/f2fs/data.c map->m_len - ofs); map 1354 fs/f2fs/data.c if (map->m_next_extent) map 1355 fs/f2fs/data.c *map->m_next_extent = pgofs + 1; map 1359 fs/f2fs/data.c if (map->m_may_create) { map 1364 fs/f2fs/data.c trace_f2fs_map_blocks(inode, map, err); map 1370 fs/f2fs/data.c struct f2fs_map_blocks map; map 1377 fs/f2fs/data.c map.m_lblk = F2FS_BYTES_TO_BLK(pos); map 1378 fs/f2fs/data.c map.m_next_pgofs = NULL; map 1379 fs/f2fs/data.c map.m_next_extent = NULL; map 1380 fs/f2fs/data.c map.m_seg_type = NO_CHECK_TYPE; map 1381 fs/f2fs/data.c map.m_may_create = false; map 1384 fs/f2fs/data.c while (map.m_lblk < last_lblk) { map 1385 fs/f2fs/data.c map.m_len = last_lblk - map.m_lblk; map 1386 fs/f2fs/data.c err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); map 1387 fs/f2fs/data.c if (err || map.m_len == 0) map 1389 fs/f2fs/data.c map.m_lblk += map.m_len; map 1398 fs/f2fs/data.c struct f2fs_map_blocks map; map 1401 fs/f2fs/data.c map.m_lblk = iblock; map 1402 fs/f2fs/data.c map.m_len = bh->b_size >> inode->i_blkbits; map 1403 fs/f2fs/data.c map.m_next_pgofs = next_pgofs; map 1404 fs/f2fs/data.c map.m_next_extent = NULL; map 1405 fs/f2fs/data.c map.m_seg_type = seg_type; map 1406 fs/f2fs/data.c map.m_may_create = may_write; map 1408 fs/f2fs/data.c err = f2fs_map_blocks(inode, &map, create, flag); map 1410 fs/f2fs/data.c map_bh(bh, inode->i_sb, map.m_pblk); map 1411 fs/f2fs/data.c bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; map 1412 fs/f2fs/data.c bh->b_size = (u64)map.m_len << inode->i_blkbits; map 1640 fs/f2fs/data.c struct f2fs_map_blocks *map, map 1667 fs/f2fs/data.c if ((map->m_flags & F2FS_MAP_MAPPED) && map 1668 fs/f2fs/data.c block_in_file > map->m_lblk && map 1669 fs/f2fs/data.c block_in_file < (map->m_lblk + map->m_len)) map 1676 fs/f2fs/data.c map->m_lblk = block_in_file; map 1677 fs/f2fs/data.c map->m_len = last_block - block_in_file; map 1679 fs/f2fs/data.c ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT); map 1683 fs/f2fs/data.c if ((map->m_flags & F2FS_MAP_MAPPED)) { map 1684 fs/f2fs/data.c block_nr = map->m_pblk + block_in_file - map->m_lblk; map 1772 fs/f2fs/data.c struct f2fs_map_blocks map; map 1775 fs/f2fs/data.c map.m_pblk = 0; map 1776 fs/f2fs/data.c map.m_lblk = 0; map 1777 fs/f2fs/data.c map.m_len = 0; map 1778 fs/f2fs/data.c map.m_flags = 0; map 1779 fs/f2fs/data.c map.m_next_pgofs = NULL; map 1780 fs/f2fs/data.c map.m_next_extent = NULL; map 1781 fs/f2fs/data.c map.m_seg_type = NO_CHECK_TYPE; map 1782 fs/f2fs/data.c map.m_may_create = false; map 1796 fs/f2fs/data.c ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio, map 3227 fs/f2fs/f2fs.h int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, map 1517 fs/f2fs/file.c struct f2fs_map_blocks map = { .m_next_pgofs = NULL, map 1538 fs/f2fs/file.c map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; map 1539 fs/f2fs/file.c map.m_len = pg_end - map.m_lblk; map 1541 fs/f2fs/file.c map.m_len++; map 1544 fs/f2fs/file.c map.m_seg_type = CURSEG_COLD_DATA; map 1546 fs/f2fs/file.c err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ? map 1552 fs/f2fs/file.c if (!map.m_len) map 1555 fs/f2fs/file.c last_off = map.m_lblk + map.m_len - 1; map 2379 fs/f2fs/file.c struct f2fs_map_blocks map = { .m_next_extent = NULL, map 2416 fs/f2fs/file.c map.m_lblk = pg_start; map 2417 fs/f2fs/file.c map.m_next_pgofs = &next_pgofs; map 2424 fs/f2fs/file.c while (map.m_lblk < pg_end) { map 2425 fs/f2fs/file.c map.m_len = pg_end - map.m_lblk; map 2426 fs/f2fs/file.c err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); map 2430 fs/f2fs/file.c if (!(map.m_flags & F2FS_MAP_FLAGS)) { map 2431 fs/f2fs/file.c map.m_lblk = next_pgofs; map 2435 fs/f2fs/file.c if (blk_end && blk_end != map.m_pblk) map 2439 fs/f2fs/file.c total += map.m_len; map 2441 fs/f2fs/file.c blk_end = map.m_pblk + map.m_len; map 2443 fs/f2fs/file.c map.m_lblk += map.m_len; map 2463 fs/f2fs/file.c map.m_lblk = pg_start; map 2464 fs/f2fs/file.c map.m_len = pg_end - pg_start; map 2467 fs/f2fs/file.c while (map.m_lblk < pg_end) { map 2472 fs/f2fs/file.c map.m_len = pg_end - map.m_lblk; map 2473 fs/f2fs/file.c err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT); map 2477 fs/f2fs/file.c if (!(map.m_flags & F2FS_MAP_FLAGS)) { map 2478 fs/f2fs/file.c map.m_lblk = next_pgofs; map 2484 fs/f2fs/file.c idx = map.m_lblk; map 2485 fs/f2fs/file.c while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) { map 2502 fs/f2fs/file.c map.m_lblk = idx; map 2504 fs/f2fs/file.c if (map.m_lblk < pg_end && cnt < blk_per_seg) map 3088 fs/f2fs/file.c struct f2fs_map_blocks map; map 3096 fs/f2fs/file.c map.m_lblk = 0; map 3097 fs/f2fs/file.c map.m_next_pgofs = NULL; map 3098 fs/f2fs/file.c map.m_next_extent = &m_next_extent; map 3099 fs/f2fs/file.c map.m_seg_type = NO_CHECK_TYPE; map 3100 fs/f2fs/file.c map.m_may_create = false; map 3103 fs/f2fs/file.c while (map.m_lblk < end) { map 3104 fs/f2fs/file.c map.m_len = end - map.m_lblk; map 3107 fs/f2fs/file.c err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); map 3112 fs/f2fs/file.c map.m_lblk = m_next_extent; map 1050 fs/f2fs/segment.c unsigned long *map; map 1061 fs/f2fs/segment.c map = (unsigned long *)(sentry->cur_valid_map); map 1062 fs/f2fs/segment.c offset = __find_rev_next_bit(map, size, offset); map 239 fs/hpfs/hpfs.h u8 map[128]; /* upcase table for chars 80..ff */ map 92 fs/isofs/dir.c int map; map 205 fs/isofs/dir.c map = 1; map 210 fs/isofs/dir.c map = 0; map 213 fs/isofs/dir.c if (map) { map 159 fs/isofs/inode.c unsigned char map; map 343 fs/isofs/inode.c popt->map = 'n'; map 403 fs/isofs/inode.c popt->map = 'a'; map 406 fs/isofs/inode.c popt->map = 'o'; map 409 fs/isofs/inode.c popt->map = 'n'; map 883 fs/isofs/inode.c sbi->s_mapping = opt.map; map 27 fs/minix/bitmap.c static __u32 count_free(struct buffer_head *map[], unsigned blocksize, __u32 numbits) map 34 fs/minix/bitmap.c __u16 *p = (__u16 *)(*map++)->b_data; map 156 fs/minix/inode.c struct buffer_head **map; map 237 fs/minix/inode.c map = kzalloc(i, GFP_KERNEL); map 238 fs/minix/inode.c if (!map) map 240 fs/minix/inode.c sbi->s_imap = &map[0]; map 241 fs/minix/inode.c sbi->s_zmap = &map[sbi->s_imap_blocks]; map 140 fs/nfs/blocklayout/blocklayout.c static bool offset_in_map(u64 offset, struct pnfs_block_dev_map *map) map 142 fs/nfs/blocklayout/blocklayout.c return offset >= map->start && offset < map->start + map->len; map 147 fs/nfs/blocklayout/blocklayout.c struct page *page, struct pnfs_block_dev_map *map, map 164 fs/nfs/blocklayout/blocklayout.c if (!offset_in_map(disk_addr, map)) { map 165 fs/nfs/blocklayout/blocklayout.c if (!dev->map(dev, disk_addr, map) || !offset_in_map(disk_addr, map)) map 169 fs/nfs/blocklayout/blocklayout.c disk_addr += map->disk_offset; map 170 fs/nfs/blocklayout/blocklayout.c disk_addr -= map->start; map 174 fs/nfs/blocklayout/blocklayout.c if (end >= map->start + map->len) map 175 fs/nfs/blocklayout/blocklayout.c *len = map->start + map->len - disk_addr; map 179 fs/nfs/blocklayout/blocklayout.c bio = bl_alloc_init_bio(npg, map->bdev, map 256 fs/nfs/blocklayout/blocklayout.c struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; map 313 fs/nfs/blocklayout/blocklayout.c map.start = NFS4_MAX_UINT64; map 318 fs/nfs/blocklayout/blocklayout.c isect, pages[i], &map, &be, map 401 fs/nfs/blocklayout/blocklayout.c struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 }; map 446 fs/nfs/blocklayout/blocklayout.c WRITE, isect, pages[i], &map, &be, map 117 fs/nfs/blocklayout/blocklayout.h bool (*map)(struct pnfs_block_dev *dev, u64 offset, map 118 fs/nfs/blocklayout/blocklayout.h struct pnfs_block_dev_map *map); map 167 fs/nfs/blocklayout/dev.c struct pnfs_block_dev_map *map) map 169 fs/nfs/blocklayout/dev.c map->start = dev->start; map 170 fs/nfs/blocklayout/dev.c map->len = dev->len; map 171 fs/nfs/blocklayout/dev.c map->disk_offset = dev->disk_offset; map 172 fs/nfs/blocklayout/dev.c map->bdev = dev->bdev; map 177 fs/nfs/blocklayout/dev.c struct pnfs_block_dev_map *map) map 188 fs/nfs/blocklayout/dev.c child->map(child, offset - child->start, map); map 197 fs/nfs/blocklayout/dev.c struct pnfs_block_dev_map *map) map 221 fs/nfs/blocklayout/dev.c child->map(child, disk_offset, map); map 223 fs/nfs/blocklayout/dev.c map->start += offset; map 224 fs/nfs/blocklayout/dev.c map->disk_offset += disk_offset; map 225 fs/nfs/blocklayout/dev.c map->len = dev->chunk_size; map 256 fs/nfs/blocklayout/dev.c d->map = bl_map_simple; map 371 fs/nfs/blocklayout/dev.c d->map = bl_map_simple; map 441 fs/nfs/blocklayout/dev.c d->map = bl_map_concat; map 470 fs/nfs/blocklayout/dev.c d->map = bl_map_stripe; map 92 fs/nfsd/nfs4idmap.c struct ent *map = container_of(ref, struct ent, h.ref); map 93 fs/nfsd/nfs4idmap.c kfree_rcu(map, rcu_head); map 60 fs/nfsd/nfs4layouts.c struct nfsd4_deviceid_map *map, *old; map 63 fs/nfsd/nfs4layouts.c map = kzalloc(sizeof(*map) + fsid_len, GFP_KERNEL); map 64 fs/nfsd/nfs4layouts.c if (!map) map 67 fs/nfsd/nfs4layouts.c map->fsid_type = fh->fh_fsid_type; map 68 fs/nfsd/nfs4layouts.c memcpy(&map->fsid, fh->fh_fsid, fsid_len); map 87 fs/nfsd/nfs4layouts.c map->idx = nfsd_devid_seq++; map 88 fs/nfsd/nfs4layouts.c list_add_tail_rcu(&map->hash, &nfsd_devid_hash[devid_hashfn(map->idx)]); map 89 fs/nfsd/nfs4layouts.c fhp->fh_export->ex_devid_map = map; map 90 fs/nfsd/nfs4layouts.c map = NULL; map 94 fs/nfsd/nfs4layouts.c kfree(map); map 100 fs/nfsd/nfs4layouts.c struct nfsd4_deviceid_map *map, *ret = NULL; map 103 fs/nfsd/nfs4layouts.c list_for_each_entry_rcu(map, &nfsd_devid_hash[devid_hashfn(idx)], hash) map 104 fs/nfsd/nfs4layouts.c if (map->idx == idx) map 105 fs/nfsd/nfs4layouts.c ret = map; map 781 fs/nfsd/nfs4layouts.c struct nfsd4_deviceid_map *map, *n; map 783 fs/nfsd/nfs4layouts.c list_for_each_entry_safe(map, n, &nfsd_devid_hash[i], hash) map 784 fs/nfsd/nfs4layouts.c kfree(map); map 1551 fs/nfsd/nfs4proc.c struct nfsd4_deviceid_map *map; map 1561 fs/nfsd/nfs4proc.c map = nfsd4_find_devid_map(gdp->gd_devid.fsid_idx); map 1562 fs/nfsd/nfs4proc.c if (!map) { map 1568 fs/nfsd/nfs4proc.c exp = rqst_exp_find(rqstp, map->fsid_type, map->fsid); map 634 fs/nfsd/vfs.c struct accessmap *map; map 648 fs/nfsd/vfs.c map = nfs3_regaccess; map 650 fs/nfsd/vfs.c map = nfs3_diraccess; map 652 fs/nfsd/vfs.c map = nfs3_anyaccess; map 656 fs/nfsd/vfs.c for (; map->access; map++) { map 657 fs/nfsd/vfs.c if (map->access & query) { map 660 fs/nfsd/vfs.c sresult |= map->access; map 662 fs/nfsd/vfs.c err2 = nfsd_permission(rqstp, export, dentry, map->how); map 665 fs/nfsd/vfs.c result |= map->access; map 1280 fs/ocfs2/cluster/heartbeat.c unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; map 1287 fs/ocfs2/cluster/heartbeat.c BUG_ON(sizeof(map) < db->db_size); map 1299 fs/ocfs2/cluster/heartbeat.c memcpy(map, db->db_data, db->db_size); map 1306 fs/ocfs2/cluster/heartbeat.c memcpy(map, reg->hr_live_node_bitmap, db->db_size); map 1335 fs/ocfs2/cluster/heartbeat.c while ((i = find_next_bit(map, db->db_len, i + 1)) < db->db_len) map 1463 fs/ocfs2/cluster/heartbeat.c static void o2hb_fill_node_map_from_callback(unsigned long *map, map 1468 fs/ocfs2/cluster/heartbeat.c memcpy(map, &o2hb_live_node_bitmap, bytes); map 1474 fs/ocfs2/cluster/heartbeat.c void o2hb_fill_node_map(unsigned long *map, unsigned bytes) map 1480 fs/ocfs2/cluster/heartbeat.c o2hb_fill_node_map_from_callback(map, bytes); map 63 fs/ocfs2/cluster/heartbeat.h void o2hb_fill_node_map(unsigned long *map, map 440 fs/ocfs2/cluster/netdebug.c unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)]; map 443 fs/ocfs2/cluster/netdebug.c o2net_fill_node_map(map, sizeof(map)); map 445 fs/ocfs2/cluster/netdebug.c while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES) map 49 fs/ocfs2/cluster/nodemanager.c int o2nm_configured_node_map(unsigned long *map, unsigned bytes) map 59 fs/ocfs2/cluster/nodemanager.c memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap)); map 62 fs/ocfs2/cluster/nodemanager.h int o2nm_configured_node_map(unsigned long *map, unsigned bytes); map 995 fs/ocfs2/cluster/tcp.c void o2net_fill_node_map(unsigned long *map, unsigned bytes) map 1002 fs/ocfs2/cluster/tcp.c memset(map, 0, bytes); map 1007 fs/ocfs2/cluster/tcp.c set_bit(node, map); map 95 fs/ocfs2/cluster/tcp.h void o2net_fill_node_map(unsigned long *map, unsigned bytes); map 1104 fs/ocfs2/dlm/dlmcommon.h static inline void dlm_node_iter_init(unsigned long *map, map 1107 fs/ocfs2/dlm/dlmcommon.h memcpy(iter->node_map, map, sizeof(iter->node_map)); map 41 fs/ocfs2/dlm/dlmdomain.c static inline void byte_set_bit(u8 nr, u8 map[]) map 43 fs/ocfs2/dlm/dlmdomain.c map[nr >> 3] |= (1UL << (nr & 7)); map 46 fs/ocfs2/dlm/dlmdomain.c static inline int byte_test_bit(u8 nr, u8 map[]) map 48 fs/ocfs2/dlm/dlmdomain.c return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0; map 29 fs/ocfs2/heartbeat.c static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, map 31 fs/ocfs2/heartbeat.c static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, map 36 fs/ocfs2/heartbeat.c static void ocfs2_node_map_init(struct ocfs2_node_map *map) map 38 fs/ocfs2/heartbeat.c map->num_nodes = OCFS2_NODE_MAP_MAX_NODES; map 39 fs/ocfs2/heartbeat.c memset(map->map, 0, BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES) * map 70 fs/ocfs2/heartbeat.c static inline void __ocfs2_node_map_set_bit(struct ocfs2_node_map *map, map 73 fs/ocfs2/heartbeat.c set_bit(bit, map->map); map 77 fs/ocfs2/heartbeat.c struct ocfs2_node_map *map, map 82 fs/ocfs2/heartbeat.c BUG_ON(bit >= map->num_nodes); map 84 fs/ocfs2/heartbeat.c __ocfs2_node_map_set_bit(map, bit); map 88 fs/ocfs2/heartbeat.c static inline void __ocfs2_node_map_clear_bit(struct ocfs2_node_map *map, map 91 fs/ocfs2/heartbeat.c clear_bit(bit, map->map); map 95 fs/ocfs2/heartbeat.c struct ocfs2_node_map *map, map 100 fs/ocfs2/heartbeat.c BUG_ON(bit >= map->num_nodes); map 102 fs/ocfs2/heartbeat.c __ocfs2_node_map_clear_bit(map, bit); map 107 fs/ocfs2/heartbeat.c struct ocfs2_node_map *map, map 111 fs/ocfs2/heartbeat.c if (bit >= map->num_nodes) { map 112 fs/ocfs2/heartbeat.c mlog(ML_ERROR, "bit=%d map->num_nodes=%d\n", bit, map->num_nodes); map 116 fs/ocfs2/heartbeat.c ret = test_bit(bit, map->map); map 22 fs/ocfs2/heartbeat.h struct ocfs2_node_map *map, map 25 fs/ocfs2/heartbeat.h struct ocfs2_node_map *map, map 28 fs/ocfs2/heartbeat.h struct ocfs2_node_map *map, map 91 fs/ocfs2/ocfs2.h unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)]; map 48 fs/omfs/bitmap.c static int set_run(struct super_block *sb, int map, map 57 fs/omfs/bitmap.c bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); map 64 fs/omfs/bitmap.c map++; map 69 fs/omfs/bitmap.c clus_to_blk(sbi, sbi->s_bitmap_ino) + map); map 74 fs/omfs/bitmap.c set_bit(bit, sbi->s_imap[map]); map 77 fs/omfs/bitmap.c clear_bit(bit, sbi->s_imap[map]); map 96 fs/omfs/bitmap.c unsigned int map, bit; map 102 fs/omfs/bitmap.c map = tmp; map 105 fs/omfs/bitmap.c if (map >= sbi->s_imap_size || test_and_set_bit(bit, sbi->s_imap[map])) map 109 fs/omfs/bitmap.c bh = sb_bread(sb, clus_to_blk(sbi, sbi->s_bitmap_ino) + map); map 180 fs/omfs/bitmap.c unsigned int map, bit; map 185 fs/omfs/bitmap.c map = tmp; map 187 fs/omfs/bitmap.c if (map >= sbi->s_imap_size) map 191 fs/omfs/bitmap.c ret = set_run(sb, map, bits_per_entry, bit, count, 0); map 15 fs/orangefs/orangefs-bufmap.c unsigned long *map; map 28 fs/orangefs/orangefs-bufmap.c static void install(struct slot_map *m, int count, unsigned long *map) map 32 fs/orangefs/orangefs-bufmap.c m->map = map; map 64 fs/orangefs/orangefs-bufmap.c m->map = NULL; map 72 fs/orangefs/orangefs-bufmap.c __clear_bit(slot, m->map); map 132 fs/orangefs/orangefs-bufmap.c res = find_first_zero_bit(m->map, m->count); map 133 fs/orangefs/orangefs-bufmap.c __set_bit(res, m->map); map 17 fs/reiserfs/objectid.c static void check_objectid_map(struct super_block *s, __le32 * map) map 19 fs/reiserfs/objectid.c if (le32_to_cpu(map[0]) != 1) map 21 fs/reiserfs/objectid.c (long unsigned int)le32_to_cpu(map[0])); map 27 fs/reiserfs/objectid.c static void check_objectid_map(struct super_block *s, __le32 * map) map 53 fs/reiserfs/objectid.c __le32 *map = objectid_map(s, rs); map 58 fs/reiserfs/objectid.c check_objectid_map(s, map); map 62 fs/reiserfs/objectid.c unused_objectid = le32_to_cpu(map[1]); map 76 fs/reiserfs/objectid.c map[1] = cpu_to_le32(unused_objectid + 1); map 86 fs/reiserfs/objectid.c if (sb_oid_cursize(rs) > 2 && map[1] == map[2]) { map 87 fs/reiserfs/objectid.c memmove(map + 1, map + 3, map 102 fs/reiserfs/objectid.c __le32 *map = objectid_map(s, rs); map 107 fs/reiserfs/objectid.c check_objectid_map(s, map); map 120 fs/reiserfs/objectid.c if (objectid_to_release == le32_to_cpu(map[i])) { map 122 fs/reiserfs/objectid.c le32_add_cpu(&map[i], 1); map 128 fs/reiserfs/objectid.c if (map[i] == map[i + 1]) { map 130 fs/reiserfs/objectid.c memmove(map + i, map + i + 2, map 143 fs/reiserfs/objectid.c if (objectid_to_release > le32_to_cpu(map[i]) && map 144 fs/reiserfs/objectid.c objectid_to_release < le32_to_cpu(map[i + 1])) { map 146 fs/reiserfs/objectid.c if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) { map 147 fs/reiserfs/objectid.c le32_add_cpu(&map[i + 1], -1); map 165 fs/reiserfs/objectid.c memmove(map + i + 3, map + i + 1, map 167 fs/reiserfs/objectid.c map[i + 1] = cpu_to_le32(objectid_to_release); map 168 fs/reiserfs/objectid.c map[i + 2] = cpu_to_le32(objectid_to_release + 1); map 656 fs/udf/balloc.c struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; map 658 fs/udf/balloc.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { map 659 fs/udf/balloc.c udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap, map 661 fs/udf/balloc.c } else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { map 662 fs/udf/balloc.c udf_table_free_blocks(sb, map->s_uspace.s_table, map 677 fs/udf/balloc.c struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; map 680 fs/udf/balloc.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) map 682 fs/udf/balloc.c map->s_uspace.s_bitmap, map 685 fs/udf/balloc.c else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) map 687 fs/udf/balloc.c map->s_uspace.s_table, map 702 fs/udf/balloc.c struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; map 705 fs/udf/balloc.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) map 707 fs/udf/balloc.c map->s_uspace.s_bitmap, map 709 fs/udf/balloc.c else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) map 711 fs/udf/balloc.c map->s_uspace.s_table, map 33 fs/udf/partition.c struct udf_part_map *map; map 39 fs/udf/partition.c map = &sbi->s_partmaps[partition]; map 40 fs/udf/partition.c if (map->s_partition_func) map 41 fs/udf/partition.c return map->s_partition_func(sb, block, partition, offset); map 43 fs/udf/partition.c return map->s_partition_root + block + offset; map 54 fs/udf/partition.c struct udf_part_map *map; map 58 fs/udf/partition.c map = &sbi->s_partmaps[partition]; map 59 fs/udf/partition.c vdata = &map->s_type_specific.s_virtual; map 118 fs/udf/partition.c struct udf_part_map *map; map 122 fs/udf/partition.c map = &sbi->s_partmaps[partition]; map 123 fs/udf/partition.c sdata = &map->s_type_specific.s_sparing; map 149 fs/udf/partition.c return map->s_partition_root + block + offset; map 166 fs/udf/partition.c struct udf_part_map *map = &sbi->s_partmaps[i]; map 167 fs/udf/partition.c if (old_block > map->s_partition_root && map 168 fs/udf/partition.c old_block < map->s_partition_root + map->s_partition_len) { map 169 fs/udf/partition.c sdata = &map->s_type_specific.s_sparing; map 170 fs/udf/partition.c packet = (old_block - map->s_partition_root) & map 212 fs/udf/partition.c map->s_partition_root) & map 220 fs/udf/partition.c map->s_partition_root) & map 258 fs/udf/partition.c ((old_block - map->s_partition_root) & map 284 fs/udf/partition.c struct udf_part_map *map; map 295 fs/udf/partition.c map = &UDF_SB(sb)->s_partmaps[partition]; map 298 fs/udf/partition.c map->s_type_specific.s_metadata.s_phys_partition_ref, map 310 fs/udf/partition.c struct udf_part_map *map; map 317 fs/udf/partition.c map = &sbi->s_partmaps[partition]; map 318 fs/udf/partition.c mdata = &map->s_type_specific.s_metadata; map 277 fs/udf/super.c static void udf_free_partition(struct udf_part_map *map) map 282 fs/udf/super.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) map 283 fs/udf/super.c iput(map->s_uspace.s_table); map 284 fs/udf/super.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) map 285 fs/udf/super.c udf_sb_free_bitmap(map->s_uspace.s_bitmap); map 286 fs/udf/super.c if (map->s_partition_type == UDF_SPARABLE_MAP15) map 288 fs/udf/super.c brelse(map->s_type_specific.s_sparing.s_spar_map[i]); map 289 fs/udf/super.c else if (map->s_partition_type == UDF_METADATA_MAP25) { map 290 fs/udf/super.c mdata = &map->s_type_specific.s_metadata; map 938 fs/udf/super.c struct udf_part_map *map; map 943 fs/udf/super.c map = &sbi->s_partmaps[partition]; map 944 fs/udf/super.c mdata = &map->s_type_specific.s_metadata; map 1000 fs/udf/super.c struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition]; map 1001 fs/udf/super.c return DIV_ROUND_UP(map->s_partition_len + map 1030 fs/udf/super.c struct udf_part_map *map) map 1064 fs/udf/super.c if (map->s_partition_type == UDF_VIRTUAL_MAP15 || map 1065 fs/udf/super.c map->s_partition_type == UDF_VIRTUAL_MAP20) map 1079 fs/udf/super.c struct udf_part_map *map; map 1084 fs/udf/super.c map = &sbi->s_partmaps[p_index]; map 1086 fs/udf/super.c map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */ map 1087 fs/udf/super.c map->s_partition_root = le32_to_cpu(p->partitionStartingLocation); map 1090 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY; map 1092 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE; map 1094 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE; map 1096 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE; map 1099 fs/udf/super.c p_index, map->s_partition_type, map 1100 fs/udf/super.c map->s_partition_root, map->s_partition_len); map 1102 fs/udf/super.c err = check_partition_desc(sb, p, map); map 1129 fs/udf/super.c map->s_uspace.s_table = inode; map 1130 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; map 1132 fs/udf/super.c p_index, map->s_uspace.s_table->i_ino); map 1139 fs/udf/super.c map->s_uspace.s_bitmap = bitmap; map 1142 fs/udf/super.c map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP; map 1154 fs/udf/super.c struct udf_part_map *map = &sbi->s_partmaps[p_index]; map 1165 fs/udf/super.c vat_block >= map->s_partition_root && map 1167 fs/udf/super.c ino.logicalBlockNum = vat_block - map->s_partition_root; map 1179 fs/udf/super.c struct udf_part_map *map = &sbi->s_partmaps[p_index]; map 1198 fs/udf/super.c if (map->s_partition_type == UDF_VIRTUAL_MAP15) { map 1199 fs/udf/super.c map->s_type_specific.s_virtual.s_start_offset = 0; map 1200 fs/udf/super.c map->s_type_specific.s_virtual.s_num_entries = map 1202 fs/udf/super.c } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) { map 1215 fs/udf/super.c map->s_type_specific.s_virtual.s_start_offset = map 1217 fs/udf/super.c map->s_type_specific.s_virtual.s_num_entries = map 1219 fs/udf/super.c map->s_type_specific.s_virtual. map 1236 fs/udf/super.c struct udf_part_map *map; map 1256 fs/udf/super.c map = &sbi->s_partmaps[i]; map 1258 fs/udf/super.c map->s_partition_num, partitionNumber); map 1259 fs/udf/super.c if (map->s_partition_num == partitionNumber && map 1260 fs/udf/super.c (map->s_partition_type == UDF_TYPE1_MAP15 || map 1261 fs/udf/super.c map->s_partition_type == UDF_SPARABLE_MAP15)) map 1281 fs/udf/super.c map = NULL; /* supress 'maybe used uninitialized' warning */ map 1283 fs/udf/super.c map = &sbi->s_partmaps[i]; map 1285 fs/udf/super.c if (map->s_partition_num == partitionNumber && map 1286 fs/udf/super.c (map->s_partition_type == UDF_VIRTUAL_MAP15 || map 1287 fs/udf/super.c map->s_partition_type == UDF_VIRTUAL_MAP20 || map 1288 fs/udf/super.c map->s_partition_type == UDF_METADATA_MAP25)) map 1301 fs/udf/super.c if (map->s_partition_type == UDF_METADATA_MAP25) { map 1331 fs/udf/super.c struct udf_part_map *map, map 1337 fs/udf/super.c struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing; map 1341 fs/udf/super.c map->s_partition_type = UDF_SPARABLE_MAP15; map 1374 fs/udf/super.c map->s_partition_func = udf_get_pblock_spar15; map 1416 fs/udf/super.c struct udf_part_map *map = &sbi->s_partmaps[i]; map 1423 fs/udf/super.c map->s_partition_type = UDF_TYPE1_MAP15; map 1424 fs/udf/super.c map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum); map 1425 fs/udf/super.c map->s_partition_num = le16_to_cpu(gpm1->partitionNum); map 1426 fs/udf/super.c map->s_partition_func = NULL; map 1436 fs/udf/super.c map->s_partition_type = map 1438 fs/udf/super.c map->s_partition_func = map 1441 fs/udf/super.c map->s_partition_type = map 1443 fs/udf/super.c map->s_partition_func = map 1449 fs/udf/super.c ret = udf_load_sparable_map(sb, map, map 1457 fs/udf/super.c &map->s_type_specific.s_metadata; map 1464 fs/udf/super.c map->s_partition_type = UDF_METADATA_MAP25; map 1465 fs/udf/super.c map->s_partition_func = udf_get_pblock_meta25; map 1500 fs/udf/super.c map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum); map 1501 fs/udf/super.c map->s_partition_num = le16_to_cpu(upm2->partitionNum); map 1504 fs/udf/super.c i, map->s_partition_num, type, map->s_volumeseqnum); map 2495 fs/udf/super.c struct udf_part_map *map; map 2525 fs/udf/super.c map = &sbi->s_partmaps[part]; map 2526 fs/udf/super.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) { map 2528 fs/udf/super.c map->s_uspace.s_bitmap); map 2533 fs/udf/super.c if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) { map 2535 fs/udf/super.c map->s_uspace.s_table); map 421 fs/ufs/util.h unsigned char map; map 424 fs/ufs/util.h map = *mapp--; map 427 fs/ufs/util.h if ((map & bit) == 0) map 432 fs/ufs/util.h map = *mapp--; map 368 fs/xfs/libxfs/xfs_attr_remote.c struct xfs_bmbt_irec map[ATTR_RMTVALUE_MAPSIZE]; map 389 fs/xfs/libxfs/xfs_attr_remote.c blkcnt, map, &nmap, map 399 fs/xfs/libxfs/xfs_attr_remote.c ASSERT((map[i].br_startblock != DELAYSTARTBLOCK) && map 400 fs/xfs/libxfs/xfs_attr_remote.c (map[i].br_startblock != HOLESTARTBLOCK)); map 401 fs/xfs/libxfs/xfs_attr_remote.c dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock); map 402 fs/xfs/libxfs/xfs_attr_remote.c dblkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount); map 418 fs/xfs/libxfs/xfs_attr_remote.c lblkno += map[i].br_blockcount; map 419 fs/xfs/libxfs/xfs_attr_remote.c blkcnt -= map[i].br_blockcount; map 436 fs/xfs/libxfs/xfs_attr_remote.c struct xfs_bmbt_irec map; map 481 fs/xfs/libxfs/xfs_attr_remote.c blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map, map 490 fs/xfs/libxfs/xfs_attr_remote.c ASSERT((map.br_startblock != DELAYSTARTBLOCK) && map 491 fs/xfs/libxfs/xfs_attr_remote.c (map.br_startblock != HOLESTARTBLOCK)); map 492 fs/xfs/libxfs/xfs_attr_remote.c lblkno += map.br_blockcount; map 493 fs/xfs/libxfs/xfs_attr_remote.c blkcnt -= map.br_blockcount; map 521 fs/xfs/libxfs/xfs_attr_remote.c blkcnt, &map, &nmap, map 526 fs/xfs/libxfs/xfs_attr_remote.c ASSERT((map.br_startblock != DELAYSTARTBLOCK) && map 527 fs/xfs/libxfs/xfs_attr_remote.c (map.br_startblock != HOLESTARTBLOCK)); map 529 fs/xfs/libxfs/xfs_attr_remote.c dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), map 530 fs/xfs/libxfs/xfs_attr_remote.c dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); map 547 fs/xfs/libxfs/xfs_attr_remote.c lblkno += map.br_blockcount; map 548 fs/xfs/libxfs/xfs_attr_remote.c blkcnt -= map.br_blockcount; map 576 fs/xfs/libxfs/xfs_attr_remote.c struct xfs_bmbt_irec map; map 587 fs/xfs/libxfs/xfs_attr_remote.c blkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); map 591 fs/xfs/libxfs/xfs_attr_remote.c ASSERT((map.br_startblock != DELAYSTARTBLOCK) && map 592 fs/xfs/libxfs/xfs_attr_remote.c (map.br_startblock != HOLESTARTBLOCK)); map 594 fs/xfs/libxfs/xfs_attr_remote.c dblkno = XFS_FSB_TO_DADDR(mp, map.br_startblock), map 595 fs/xfs/libxfs/xfs_attr_remote.c dblkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount); map 607 fs/xfs/libxfs/xfs_attr_remote.c lblkno += map.br_blockcount; map 608 fs/xfs/libxfs/xfs_attr_remote.c blkcnt -= map.br_blockcount; map 19 fs/xfs/libxfs/xfs_bit.c xfs_bitmap_empty(uint *map, uint size) map 24 fs/xfs/libxfs/xfs_bit.c if (map[i] != 0) map 36 fs/xfs/libxfs/xfs_bit.c xfs_contig_bits(uint *map, uint size, uint start_bit) map 38 fs/xfs/libxfs/xfs_bit.c uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); map 75 fs/xfs/libxfs/xfs_bit.c int xfs_next_bit(uint *map, uint size, uint start_bit) map 77 fs/xfs/libxfs/xfs_bit.c uint * p = ((unsigned int *) map) + (start_bit >> BIT_TO_WORD_SHIFT); map 67 fs/xfs/libxfs/xfs_bit.h extern int xfs_bitmap_empty(uint *map, uint size); map 70 fs/xfs/libxfs/xfs_bit.h extern int xfs_contig_bits(uint *map, uint size, uint start_bit); map 73 fs/xfs/libxfs/xfs_bit.h extern int xfs_next_bit(uint *map, uint size, uint start_bit); map 3741 fs/xfs/libxfs/xfs_bmap.c struct xfs_bmbt_irec **map, map 3749 fs/xfs/libxfs/xfs_bmap.c xfs_bmbt_irec_t *mval = *map; map 3787 fs/xfs/libxfs/xfs_bmap.c *map = mval; map 2069 fs/xfs/libxfs/xfs_da_btree.c struct xfs_bmbt_irec map, *mapp; map 2085 fs/xfs/libxfs/xfs_da_btree.c args->total, &map, &nmap); map 2091 fs/xfs/libxfs/xfs_da_btree.c mapp = ↦ map 2137 fs/xfs/libxfs/xfs_da_btree.c if (mapp != &map) map 2475 fs/xfs/libxfs/xfs_da_btree.c struct xfs_buf_map *map; map 2482 fs/xfs/libxfs/xfs_da_btree.c map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), map 2484 fs/xfs/libxfs/xfs_da_btree.c if (!map) map 2486 fs/xfs/libxfs/xfs_da_btree.c *mapp = map; map 2490 fs/xfs/libxfs/xfs_da_btree.c map = *mapp; map 2494 fs/xfs/libxfs/xfs_da_btree.c map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock); map 2495 fs/xfs/libxfs/xfs_da_btree.c map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount); map 2514 fs/xfs/libxfs/xfs_da_btree.c struct xfs_buf_map **map, map 2524 fs/xfs/libxfs/xfs_da_btree.c ASSERT(map && *map); map 2580 fs/xfs/libxfs/xfs_da_btree.c error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs); map 2600 fs/xfs/libxfs/xfs_da_btree.c struct xfs_buf_map map; map 2606 fs/xfs/libxfs/xfs_da_btree.c mapp = ↦ map 2629 fs/xfs/libxfs/xfs_da_btree.c if (mapp != &map) map 2649 fs/xfs/libxfs/xfs_da_btree.c struct xfs_buf_map map; map 2655 fs/xfs/libxfs/xfs_da_btree.c mapp = ↦ map 2678 fs/xfs/libxfs/xfs_da_btree.c if (mapp != &map) map 2695 fs/xfs/libxfs/xfs_da_btree.c struct xfs_buf_map map; map 2700 fs/xfs/libxfs/xfs_da_btree.c mapp = ↦ map 2715 fs/xfs/libxfs/xfs_da_btree.c if (mapp != &map) map 63 fs/xfs/libxfs/xfs_rtbitmap.c xfs_bmbt_irec_t map; map 69 fs/xfs/libxfs/xfs_rtbitmap.c error = xfs_bmapi_read(ip, block, 1, &map, &nmap, XFS_DATA_FORK); map 73 fs/xfs/libxfs/xfs_rtbitmap.c if (nmap == 0 || !xfs_bmap_is_real_extent(&map)) map 76 fs/xfs/libxfs/xfs_rtbitmap.c ASSERT(map.br_startblock != NULLFSBLOCK); map 78 fs/xfs/libxfs/xfs_rtbitmap.c XFS_FSB_TO_DADDR(mp, map.br_startblock), map 188 fs/xfs/scrub/attr.c unsigned long *map, map 202 fs/xfs/scrub/attr.c if (find_next_bit(map, mapsize, start) < start + len) map 204 fs/xfs/scrub/attr.c bitmap_set(map, start, len); map 216 fs/xfs/scrub/attr.c unsigned long *map, map 234 fs/xfs/scrub/attr.c return bitmap_and(dstmap, freemap, map, mapsize) == 0; map 37 fs/xfs/xfs_attr_inactive.c struct xfs_bmbt_irec map; map 58 fs/xfs/xfs_attr_inactive.c &map, &nmap, XFS_BMAPI_ATTRFORK); map 63 fs/xfs/xfs_attr_inactive.c ASSERT(map.br_startblock != DELAYSTARTBLOCK); map 69 fs/xfs/xfs_attr_inactive.c if (map.br_startblock != HOLESTARTBLOCK) { map 72 fs/xfs/xfs_attr_inactive.c map.br_startblock); map 74 fs/xfs/xfs_attr_inactive.c map.br_blockcount); map 89 fs/xfs/xfs_attr_inactive.c tblkno += map.br_blockcount; map 90 fs/xfs/xfs_attr_inactive.c tblkcnt -= map.br_blockcount; map 335 fs/xfs/xfs_bmap_item.c struct xfs_map_extent *map; map 349 fs/xfs/xfs_bmap_item.c map = &buip->bui_format.bui_extents[next_extent]; map 350 fs/xfs/xfs_bmap_item.c map->me_owner = bmap->bi_owner->i_ino; map 351 fs/xfs/xfs_bmap_item.c map->me_startblock = bmap->bi_bmap.br_startblock; map 352 fs/xfs/xfs_bmap_item.c map->me_startoff = bmap->bi_bmap.br_startoff; map 353 fs/xfs/xfs_bmap_item.c map->me_len = bmap->bi_bmap.br_blockcount; map 354 fs/xfs/xfs_bmap_item.c xfs_trans_set_bmap_flags(map, bmap->bi_type, bmap->bi_whichfork, map 204 fs/xfs/xfs_buf.c struct xfs_buf_map *map, map 245 fs/xfs/xfs_buf.c bp->b_bn = map[0].bm_bn; map 248 fs/xfs/xfs_buf.c bp->b_maps[i].bm_bn = map[i].bm_bn; map 249 fs/xfs/xfs_buf.c bp->b_maps[i].bm_len = map[i].bm_len; map 250 fs/xfs/xfs_buf.c bp->b_length += map[i].bm_len; map 497 fs/xfs/xfs_buf.c const struct xfs_buf_map *map = arg->key; map 506 fs/xfs/xfs_buf.c if (bp->b_bn != map->bm_bn) map 509 fs/xfs/xfs_buf.c if (unlikely(bp->b_length != map->bm_len)) { map 570 fs/xfs/xfs_buf.c struct xfs_buf_map *map, map 578 fs/xfs/xfs_buf.c struct xfs_buf_map cmap = { .bm_bn = map[0].bm_bn }; map 585 fs/xfs/xfs_buf.c cmap.bm_len += map[i].bm_len; map 672 fs/xfs/xfs_buf.c DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 674 fs/xfs/xfs_buf.c error = xfs_buf_find(target, &map, 1, flags, NULL, &bp); map 688 fs/xfs/xfs_buf.c struct xfs_buf_map *map, map 696 fs/xfs/xfs_buf.c error = xfs_buf_find(target, map, nmaps, flags, NULL, &bp); map 718 fs/xfs/xfs_buf.c new_bp = _xfs_buf_alloc(target, map, nmaps, flags); map 728 fs/xfs/xfs_buf.c error = xfs_buf_find(target, map, nmaps, flags, new_bp, &bp); map 812 fs/xfs/xfs_buf.c struct xfs_buf_map *map, map 821 fs/xfs/xfs_buf.c bp = xfs_buf_get_map(target, map, nmaps, flags); map 858 fs/xfs/xfs_buf.c struct xfs_buf_map *map, map 865 fs/xfs/xfs_buf.c xfs_buf_read_map(target, map, nmaps, map 917 fs/xfs/xfs_buf.c DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks); map 920 fs/xfs/xfs_buf.c bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT); map 1261 fs/xfs/xfs_buf.c int map, map 1271 fs/xfs/xfs_buf.c sector_t sector = bp->b_maps[map].bm_bn; map 1287 fs/xfs/xfs_buf.c size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); map 107 fs/xfs/xfs_buf.h #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ map 108 fs/xfs/xfs_buf.h struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; map 196 fs/xfs/xfs_buf.h struct xfs_buf_map *map, int nmaps, map 199 fs/xfs/xfs_buf.h struct xfs_buf_map *map, int nmaps, map 203 fs/xfs/xfs_buf.h struct xfs_buf_map *map, int nmaps, map 212 fs/xfs/xfs_buf.h DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 213 fs/xfs/xfs_buf.h return xfs_buf_get_map(target, &map, 1, 0); map 224 fs/xfs/xfs_buf.h DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 225 fs/xfs/xfs_buf.h return xfs_buf_read_map(target, &map, 1, flags, ops); map 235 fs/xfs/xfs_buf.h DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 236 fs/xfs/xfs_buf.h return xfs_buf_readahead_map(target, &map, 1, ops); map 796 fs/xfs/xfs_buf_item.c uint *map) map 824 fs/xfs/xfs_buf_item.c wordp = &map[word_num]; map 246 fs/xfs/xfs_dir2_readdir.c struct xfs_bmbt_irec map; map 269 fs/xfs/xfs_dir2_readdir.c if (!xfs_iext_lookup_extent(dp, ifp, map_off, &icur, &map)) map 271 fs/xfs/xfs_dir2_readdir.c if (map.br_startoff >= last_da) map 273 fs/xfs/xfs_dir2_readdir.c xfs_trim_extent(&map, map_off, last_da - map_off); map 276 fs/xfs/xfs_dir2_readdir.c new_off = xfs_dir2_da_to_byte(geo, map.br_startoff); map 279 fs/xfs/xfs_dir2_readdir.c error = xfs_dir3_data_read(args->trans, dp, map.br_startoff, -1, &bp); map 292 fs/xfs/xfs_dir2_readdir.c *ra_blk = map.br_startoff; map 293 fs/xfs/xfs_dir2_readdir.c next_ra = map.br_startoff + geo->fsbcount; map 296 fs/xfs/xfs_dir2_readdir.c if (map.br_blockcount < geo->fsbcount && map 297 fs/xfs/xfs_dir2_readdir.c !xfs_iext_next_extent(ifp, &icur, &map)) map 299 fs/xfs/xfs_dir2_readdir.c if (map.br_startoff >= last_da) map 301 fs/xfs/xfs_dir2_readdir.c xfs_trim_extent(&map, next_ra, last_da - next_ra); map 306 fs/xfs/xfs_dir2_readdir.c next_ra = roundup((xfs_dablk_t)map.br_startoff, geo->fsbcount); map 308 fs/xfs/xfs_dir2_readdir.c next_ra < map.br_startoff + map.br_blockcount) { map 320 fs/xfs/xfs_dir2_readdir.c if (!xfs_iext_next_extent(ifp, &icur, &map)) { map 285 fs/xfs/xfs_dquot.c struct xfs_bmbt_irec map; map 309 fs/xfs/xfs_dquot.c XFS_QM_DQALLOC_SPACE_RES(mp), &map, &nmaps); map 312 fs/xfs/xfs_dquot.c ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB); map 314 fs/xfs/xfs_dquot.c ASSERT((map.br_startblock != DELAYSTARTBLOCK) && map 315 fs/xfs/xfs_dquot.c (map.br_startblock != HOLESTARTBLOCK)); map 320 fs/xfs/xfs_dquot.c dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); map 379 fs/xfs/xfs_dquot.c struct xfs_bmbt_irec map; map 400 fs/xfs/xfs_dquot.c XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0); map 406 fs/xfs/xfs_dquot.c ASSERT(map.br_blockcount >= 1); map 407 fs/xfs/xfs_dquot.c ASSERT(map.br_startblock != DELAYSTARTBLOCK); map 408 fs/xfs/xfs_dquot.c if (map.br_startblock == HOLESTARTBLOCK) map 417 fs/xfs/xfs_dquot.c dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock); map 963 fs/xfs/xfs_qm.c struct xfs_bmbt_irec *map; map 981 fs/xfs/xfs_qm.c map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), 0); map 996 fs/xfs/xfs_qm.c map, &nmaps, 0); map 1003 fs/xfs/xfs_qm.c ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); map 1004 fs/xfs/xfs_qm.c ASSERT(map[i].br_blockcount); map 1007 fs/xfs/xfs_qm.c lblkno += map[i].br_blockcount; map 1009 fs/xfs/xfs_qm.c if (map[i].br_startblock == HOLESTARTBLOCK) map 1012 fs/xfs/xfs_qm.c firstid = (xfs_dqid_t) map[i].br_startoff * map 1018 fs/xfs/xfs_qm.c (map[i+1].br_startblock != HOLESTARTBLOCK)) { map 1019 fs/xfs/xfs_qm.c rablkcnt = map[i+1].br_blockcount; map 1020 fs/xfs/xfs_qm.c rablkno = map[i+1].br_startblock; map 1034 fs/xfs/xfs_qm.c map[i].br_startblock, map 1035 fs/xfs/xfs_qm.c map[i].br_blockcount, map 1043 fs/xfs/xfs_qm.c kmem_free(map); map 1408 fs/xfs/xfs_reflink.c struct xfs_bmbt_irec map[2]; map 1418 fs/xfs/xfs_reflink.c error = xfs_bmapi_read(ip, fbno, end - fbno, map, &nmaps, 0); map 1423 fs/xfs/xfs_reflink.c if (!xfs_bmap_is_real_extent(&map[0])) map 1426 fs/xfs/xfs_reflink.c map[1] = map[0]; map 1427 fs/xfs/xfs_reflink.c while (map[1].br_blockcount) { map 1428 fs/xfs/xfs_reflink.c agno = XFS_FSB_TO_AGNO(mp, map[1].br_startblock); map 1429 fs/xfs/xfs_reflink.c agbno = XFS_FSB_TO_AGBNO(mp, map[1].br_startblock); map 1430 fs/xfs/xfs_reflink.c aglen = map[1].br_blockcount; map 1441 fs/xfs/xfs_reflink.c fpos = XFS_FSB_TO_B(mp, map[1].br_startoff + map 1452 fs/xfs/xfs_reflink.c map[1].br_blockcount -= (rbno - agbno + rlen); map 1453 fs/xfs/xfs_reflink.c map[1].br_startoff += (rbno - agbno + rlen); map 1454 fs/xfs/xfs_reflink.c map[1].br_startblock += (rbno - agbno + rlen); map 1458 fs/xfs/xfs_reflink.c fbno = map[0].br_startoff + map[0].br_blockcount; map 384 fs/xfs/xfs_rmap_item.c struct xfs_map_extent *map; map 398 fs/xfs/xfs_rmap_item.c map = &ruip->rui_format.rui_extents[next_extent]; map 399 fs/xfs/xfs_rmap_item.c map->me_owner = rmap->ri_owner; map 400 fs/xfs/xfs_rmap_item.c map->me_startblock = rmap->ri_bmap.br_startblock; map 401 fs/xfs/xfs_rmap_item.c map->me_startoff = rmap->ri_bmap.br_startoff; map 402 fs/xfs/xfs_rmap_item.c map->me_len = rmap->ri_bmap.br_blockcount; map 403 fs/xfs/xfs_rmap_item.c xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork, map 767 fs/xfs/xfs_rtalloc.c struct xfs_bmbt_irec map; /* block map output */ map 795 fs/xfs/xfs_rtalloc.c XFS_BMAPI_METADATA, resblks, &map, map 811 fs/xfs/xfs_rtalloc.c for (bno = map.br_startoff, fsbno = map.br_startblock; map 812 fs/xfs/xfs_rtalloc.c bno < map.br_startoff + map.br_blockcount; map 848 fs/xfs/xfs_rtalloc.c oblocks = map.br_startoff + map.br_blockcount; map 174 fs/xfs/xfs_trans.h struct xfs_buf_map *map, int nmaps, map 185 fs/xfs/xfs_trans.h DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 186 fs/xfs/xfs_trans.h return xfs_trans_get_buf_map(tp, target, &map, 1, flags); map 192 fs/xfs/xfs_trans.h struct xfs_buf_map *map, int nmaps, map 208 fs/xfs/xfs_trans.h DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); map 209 fs/xfs/xfs_trans.h return xfs_trans_read_buf_map(mp, tp, target, &map, 1, map 26 fs/xfs/xfs_trans_buf.c struct xfs_buf_map *map, map 35 fs/xfs/xfs_trans_buf.c len += map[i].bm_len; map 41 fs/xfs/xfs_trans_buf.c XFS_BUF_ADDR(blip->bli_buf) == map[0].bm_bn && map 119 fs/xfs/xfs_trans_buf.c struct xfs_buf_map *map, map 127 fs/xfs/xfs_trans_buf.c return xfs_buf_get_map(target, map, nmaps, flags); map 135 fs/xfs/xfs_trans_buf.c bp = xfs_trans_buf_item_match(tp, target, map, nmaps); map 152 fs/xfs/xfs_trans_buf.c bp = xfs_buf_get_map(target, map, nmaps, flags); map 227 fs/xfs/xfs_trans_buf.c struct xfs_buf_map *map, map 247 fs/xfs/xfs_trans_buf.c bp = xfs_trans_buf_item_match(tp, target, map, nmaps); map 301 fs/xfs/xfs_trans_buf.c bp = xfs_buf_read_map(target, map, nmaps, flags, ops); map 83 include/drm/drm_gem_vram_helper.h void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, map 154 include/drm/drm_legacy.h struct drm_local_map *map; /**< mapping */ map 163 include/drm/drm_legacy.h void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); map 164 include/drm/drm_legacy.h int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); map 195 include/drm/drm_legacy.h void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev); map 196 include/drm/drm_legacy.h void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); map 197 include/drm/drm_legacy.h void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); map 16 include/drm/drm_os_linux.h #define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset)) map 18 include/drm/drm_os_linux.h #define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset)) map 20 include/drm/drm_os_linux.h #define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset)) map 22 include/drm/drm_os_linux.h #define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset)) map 24 include/drm/drm_os_linux.h #define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset)) map 26 include/drm/drm_os_linux.h #define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset)) map 29 include/drm/drm_os_linux.h #define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset)) map 31 include/drm/drm_os_linux.h #define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset)) map 677 include/drm/ttm/ttm_bo_api.h static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, map 680 include/drm/ttm/ttm_bo_api.h *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); map 681 include/drm/ttm/ttm_bo_api.h return map->virtual; map 701 include/drm/ttm/ttm_bo_api.h unsigned long num_pages, struct ttm_bo_kmap_obj *map); map 710 include/drm/ttm/ttm_bo_api.h void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); map 222 include/linux/acpi.h void __acpi_unmap_table(void __iomem *map, unsigned long size); map 146 include/linux/bitmap.h extern void __bitmap_set(unsigned long *map, unsigned int start, int len); map 147 include/linux/bitmap.h extern void __bitmap_clear(unsigned long *map, unsigned int start, int len); map 149 include/linux/bitmap.h extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map, map 169 include/linux/bitmap.h bitmap_find_next_zero_area(unsigned long *map, map 175 include/linux/bitmap.h return bitmap_find_next_zero_area_off(map, size, start, nr, map 389 include/linux/bitmap.h static __always_inline void bitmap_set(unsigned long *map, unsigned int start, map 393 include/linux/bitmap.h __set_bit(start, map); map 398 include/linux/bitmap.h memset((char *)map + start / 8, 0xff, nbits / 8); map 400 include/linux/bitmap.h __bitmap_set(map, start, nbits); map 403 include/linux/bitmap.h static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, map 407 include/linux/bitmap.h __clear_bit(start, map); map 412 include/linux/bitmap.h memset((char *)map + start / 8, 0, nbits / 8); map 414 include/linux/bitmap.h __bitmap_clear(map, start, nbits); map 8 include/linux/blk-mq-rdma.h int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, map 100 include/linux/blk-mq.h struct blk_mq_queue_map map[HCTX_MAX_TYPES]; map 47 include/linux/bpf-cgroup.h struct bpf_cgroup_storage_map *map; map 136 include/linux/bpf-cgroup.h struct bpf_map *map) map 138 include/linux/bpf-cgroup.h if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) map 160 include/linux/bpf-cgroup.h int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); map 161 include/linux/bpf-cgroup.h void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); map 163 include/linux/bpf-cgroup.h int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); map 164 include/linux/bpf-cgroup.h int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, map 364 include/linux/bpf-cgroup.h struct bpf_map *map) { return 0; } map 366 include/linux/bpf-cgroup.h struct bpf_map *map) {} map 371 include/linux/bpf-cgroup.h static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, map 375 include/linux/bpf-cgroup.h static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, map 35 include/linux/bpf.h void (*map_release)(struct bpf_map *map, struct file *map_file); map 36 include/linux/bpf.h void (*map_free)(struct bpf_map *map); map 37 include/linux/bpf.h int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); map 38 include/linux/bpf.h void (*map_release_uref)(struct bpf_map *map); map 39 include/linux/bpf.h void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); map 42 include/linux/bpf.h void *(*map_lookup_elem)(struct bpf_map *map, void *key); map 43 include/linux/bpf.h int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); map 44 include/linux/bpf.h int (*map_delete_elem)(struct bpf_map *map, void *key); map 45 include/linux/bpf.h int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); map 46 include/linux/bpf.h int (*map_pop_elem)(struct bpf_map *map, void *value); map 47 include/linux/bpf.h int (*map_peek_elem)(struct bpf_map *map, void *value); map 50 include/linux/bpf.h void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, map 53 include/linux/bpf.h u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); map 55 include/linux/bpf.h void (*map_seq_show_elem)(struct bpf_map *map, void *key, map 57 include/linux/bpf.h int (*map_check_btf)(const struct bpf_map *map, map 63 include/linux/bpf.h int (*map_direct_value_addr)(const struct bpf_map *map, map 65 include/linux/bpf.h int (*map_direct_value_meta)(const struct bpf_map *map, map 108 include/linux/bpf.h static inline bool map_value_has_spin_lock(const struct bpf_map *map) map 110 include/linux/bpf.h return map->spin_lock_off >= 0; map 113 include/linux/bpf.h static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) map 115 include/linux/bpf.h if (likely(!map_value_has_spin_lock(map))) map 117 include/linux/bpf.h *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = map 122 include/linux/bpf.h static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) map 124 include/linux/bpf.h if (unlikely(map_value_has_spin_lock(map))) { map 125 include/linux/bpf.h u32 off = map->spin_lock_off; map 130 include/linux/bpf.h map->value_size - off - sizeof(struct bpf_spin_lock)); map 132 include/linux/bpf.h memcpy(dst, src, map->value_size); map 135 include/linux/bpf.h void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, map 142 include/linux/bpf.h int (*map_get_next_key)(struct bpf_offloaded_map *map, map 144 include/linux/bpf.h int (*map_lookup_elem)(struct bpf_offloaded_map *map, map 146 include/linux/bpf.h int (*map_update_elem)(struct bpf_offloaded_map *map, map 148 include/linux/bpf.h int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); map 152 include/linux/bpf.h struct bpf_map map; map 159 include/linux/bpf.h static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) map 161 include/linux/bpf.h return container_of(map, struct bpf_offloaded_map, map); map 164 include/linux/bpf.h static inline bool bpf_map_offload_neutral(const struct bpf_map *map) map 166 include/linux/bpf.h return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; map 169 include/linux/bpf.h static inline bool bpf_map_support_seq_show(const struct bpf_map *map) map 171 include/linux/bpf.h return map->btf && map->ops->map_seq_show_elem; map 174 include/linux/bpf.h int map_check_no_btf(const struct bpf_map *map, map 427 include/linux/bpf.h struct bpf_map map; map 455 include/linux/bpf.h static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) map 457 include/linux/bpf.h u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); map 496 include/linux/bpf.h u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, map 648 include/linux/bpf.h void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); map 652 include/linux/bpf.h struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); map 653 include/linux/bpf.h struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, map 655 include/linux/bpf.h void bpf_map_put_with_uref(struct bpf_map *map); map 656 include/linux/bpf.h void bpf_map_put(struct bpf_map *map); map 657 include/linux/bpf.h int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); map 658 include/linux/bpf.h void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); map 665 include/linux/bpf.h void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); map 669 include/linux/bpf.h int bpf_map_new_fd(struct bpf_map *map, int flags); map 675 include/linux/bpf.h int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); map 676 include/linux/bpf.h int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); map 677 include/linux/bpf.h int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, map 679 include/linux/bpf.h int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, map 682 include/linux/bpf.h int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); map 684 include/linux/bpf.h int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, map 686 include/linux/bpf.h int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); map 687 include/linux/bpf.h int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, map 689 include/linux/bpf.h int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); map 720 include/linux/bpf.h struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); map 721 include/linux/bpf.h struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); map 722 include/linux/bpf.h void __dev_map_flush(struct bpf_map *map); map 728 include/linux/bpf.h struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); map 729 include/linux/bpf.h void __cpu_map_flush(struct bpf_map *map); map 802 include/linux/bpf.h static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, map 808 include/linux/bpf.h static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, map 814 include/linux/bpf.h static inline void __dev_map_flush(struct bpf_map *map) map 838 include/linux/bpf.h struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) map 843 include/linux/bpf.h static inline void __cpu_map_flush(struct bpf_map *map) map 895 include/linux/bpf.h int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); map 897 include/linux/bpf.h int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); map 898 include/linux/bpf.h int bpf_map_offload_update_elem(struct bpf_map *map, map 900 include/linux/bpf.h int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); map 901 include/linux/bpf.h int bpf_map_offload_get_next_key(struct bpf_map *map, map 904 include/linux/bpf.h bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); map 924 include/linux/bpf.h static inline bool bpf_map_is_dev_bound(struct bpf_map *map) map 926 include/linux/bpf.h return unlikely(map->ops == &bpf_map_offload_ops); map 930 include/linux/bpf.h void bpf_map_offload_map_free(struct bpf_map *map); map 943 include/linux/bpf.h static inline bool bpf_map_is_dev_bound(struct bpf_map *map) map 953 include/linux/bpf.h static inline void bpf_map_offload_map_free(struct bpf_map *map) map 959 include/linux/bpf.h int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, u32 which); map 962 include/linux/bpf.h static inline int sock_map_prog_update(struct bpf_map *map, map 977 include/linux/bpf.h struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); map 978 include/linux/bpf.h int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, map 980 include/linux/bpf.h void __xsk_map_flush(struct bpf_map *map); map 983 include/linux/bpf.h static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, map 989 include/linux/bpf.h static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, map 995 include/linux/bpf.h static inline void __xsk_map_flush(struct bpf_map *map) map 1002 include/linux/bpf.h int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, map 1004 include/linux/bpf.h int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, map 1012 include/linux/bpf.h static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, map 1018 include/linux/bpf.h static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, map 191 include/linux/ceph/osdmap.h static inline bool ceph_osd_exists(struct ceph_osdmap *map, int osd) map 193 include/linux/ceph/osdmap.h return osd >= 0 && osd < map->max_osd && map 194 include/linux/ceph/osdmap.h (map->osd_state[osd] & CEPH_OSD_EXISTS); map 197 include/linux/ceph/osdmap.h static inline bool ceph_osd_is_up(struct ceph_osdmap *map, int osd) map 199 include/linux/ceph/osdmap.h return ceph_osd_exists(map, osd) && map 200 include/linux/ceph/osdmap.h (map->osd_state[osd] & CEPH_OSD_UP); map 203 include/linux/ceph/osdmap.h static inline bool ceph_osd_is_down(struct ceph_osdmap *map, int osd) map 205 include/linux/ceph/osdmap.h return !ceph_osd_is_up(map, osd); map 209 include/linux/ceph/osdmap.h extern u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd); map 211 include/linux/ceph/osdmap.h static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, map 214 include/linux/ceph/osdmap.h if (osd >= map->max_osd) map 216 include/linux/ceph/osdmap.h return &map->osd_addr[osd]; map 246 include/linux/ceph/osdmap.h struct ceph_osdmap *map); map 247 include/linux/ceph/osdmap.h extern void ceph_osdmap_destroy(struct ceph_osdmap *map); map 305 include/linux/ceph/osdmap.h extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, map 308 include/linux/ceph/osdmap.h extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id); map 309 include/linux/ceph/osdmap.h extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); map 310 include/linux/ceph/osdmap.h u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id); map 39 include/linux/completion.h #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ map 40 include/linux/completion.h (*({ init_completion_map(&(work), &(map)); &(work); })) map 71 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ map 72 include/linux/completion.h struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) map 75 include/linux/completion.h # define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) map 319 include/linux/crush/crush.h extern void crush_destroy(struct crush_map *map); map 14 include/linux/crush/mapper.h extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); map 15 include/linux/crush/mapper.h int crush_do_rule(const struct crush_map *map, map 26 include/linux/crush/mapper.h static inline size_t crush_work_size(const struct crush_map *map, map 29 include/linux/crush/mapper.h return map->working_size + result_max * 3 * sizeof(__u32); map 32 include/linux/crush/mapper.h void crush_init_workspace(const struct crush_map *map, void *v); map 175 include/linux/device-mapper.h dm_map_fn map; map 250 include/linux/dma-buf.h void *(*map)(struct dma_buf *, unsigned long); map 658 include/linux/dmaengine.h const struct dma_slave_map *map; map 139 include/linux/efi.h efi_memory_desc_t **map; map 402 include/linux/efi.h u32 map; map 422 include/linux/efi.h u64 map; map 442 include/linux/efi.h void *map; map 792 include/linux/efi.h void *map; map 1132 include/linux/efi.h #define efi_early_memdesc_ptr(map, desc_size, n) \ map 1133 include/linux/efi.h (efi_memory_desc_t *)((void *)(map) + ((n) * (desc_size))) map 1137 include/linux/efi.h for ((md) = (m)->map; \ map 1580 include/linux/efi.h struct efi_boot_memmap *map); map 1701 include/linux/efi.h struct efi_boot_memmap *map, map 1706 include/linux/efi.h struct efi_boot_memmap *map, map 357 include/linux/fb.h void (*fb_settile)(struct fb_info *info, struct fb_tilemap *map); map 585 include/linux/filter.h struct bpf_map *map; map 868 include/linux/filter.h void bpf_clear_redirect_map(struct bpf_map *map); map 34 include/linux/frontswap.h extern void __frontswap_init(unsigned type, unsigned long *map); map 54 include/linux/frontswap.h unsigned long *map) map 56 include/linux/frontswap.h p->frontswap_map = map; map 77 include/linux/frontswap.h unsigned long *map) map 115 include/linux/frontswap.h static inline void frontswap_init(unsigned type, unsigned long *map) map 118 include/linux/frontswap.h __frontswap_init(type, map); map 48 include/linux/genalloc.h typedef unsigned long (*genpool_algo_t)(unsigned long *map, map 183 include/linux/genalloc.h extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, map 187 include/linux/genalloc.h extern unsigned long gen_pool_fixed_alloc(unsigned long *map, map 191 include/linux/genalloc.h extern unsigned long gen_pool_first_fit_align(unsigned long *map, map 196 include/linux/genalloc.h extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, map 200 include/linux/genalloc.h extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, map 195 include/linux/gpio/driver.h unsigned int *map; map 20 include/linux/iio/driver.h struct iio_map *map); map 138 include/linux/io-pgtable.h int (*map)(struct io_pgtable_ops *ops, unsigned long iova, map 27 include/linux/iommu-helper.h extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, map 258 include/linux/iommu.h int (*map)(struct iommu_domain *domain, unsigned long iova, map 87 include/linux/irqchip/arm-gic-v4.h struct its_vlpi_map *map; map 96 include/linux/irqchip/arm-gic-v4.h int its_map_vlpi(int irq, struct its_vlpi_map *map); map 97 include/linux/irqchip/arm-gic-v4.h int its_get_vlpi(int irq, struct its_vlpi_map *map); map 108 include/linux/irqdomain.h int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); map 248 include/linux/kvm_host.h static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) map 250 include/linux/kvm_host.h return !!map->hva; map 410 include/linux/kvm_host.h struct hlist_head map[0]; map 761 include/linux/kvm_host.h int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); map 762 include/linux/kvm_host.h int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, map 765 include/linux/kvm_host.h void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); map 766 include/linux/kvm_host.h int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, map 410 include/linux/lightnvm.h void *map; map 1813 include/linux/lsm_hooks.h int (*bpf_map)(struct bpf_map *map, fmode_t fmode); map 1815 include/linux/lsm_hooks.h int (*bpf_map_alloc_security)(struct bpf_map *map); map 1816 include/linux/lsm_hooks.h void (*bpf_map_free_security)(struct bpf_map *map); map 343 include/linux/mISDNif.h test_channelmap(u_int nr, u_char *map) map 346 include/linux/mISDNif.h return map[nr >> 3] & (1 << (nr & 7)); map 352 include/linux/mISDNif.h set_channelmap(u_int nr, u_char *map) map 354 include/linux/mISDNif.h map[nr >> 3] |= (1 << (nr & 7)); map 358 include/linux/mISDNif.h clear_channelmap(u_int nr, u_char *map) map 360 include/linux/mISDNif.h map[nr >> 3] &= ~(1 << (nr & 7)); map 111 include/linux/memcontrol.h unsigned long map[0]; map 442 include/linux/mfd/max14577-private.h static inline int max14577_read_reg(struct regmap *map, u8 reg, u8 *dest) map 447 include/linux/mfd/max14577-private.h ret = regmap_read(map, reg, &val); map 453 include/linux/mfd/max14577-private.h static inline int max14577_bulk_read(struct regmap *map, u8 reg, u8 *buf, map 456 include/linux/mfd/max14577-private.h return regmap_bulk_read(map, reg, buf, count); map 459 include/linux/mfd/max14577-private.h static inline int max14577_write_reg(struct regmap *map, u8 reg, u8 value) map 461 include/linux/mfd/max14577-private.h return regmap_write(map, reg, value); map 464 include/linux/mfd/max14577-private.h static inline int max14577_bulk_write(struct regmap *map, u8 reg, u8 *buf, map 467 include/linux/mfd/max14577-private.h return regmap_bulk_write(map, reg, buf, count); map 470 include/linux/mfd/max14577-private.h static inline int max14577_update_reg(struct regmap *map, u8 reg, u8 mask, map 473 include/linux/mfd/max14577-private.h return regmap_update_bits(map, reg, mask, val); map 110 include/linux/mfd/stmfx.h struct regmap *map; map 48 include/linux/mfd/stw481x.h struct regmap *map; map 639 include/linux/mlx4/device.h dma_addr_t map; map 726 include/linux/mlx4/device.h void __iomem *map; map 325 include/linux/mlx5/driver.h dma_addr_t map; map 399 include/linux/mlx5/driver.h void __iomem *map; map 424 include/linux/mlx5/driver.h void __iomem *map; map 2794 include/linux/mm.h void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, map 109 include/linux/mmiotrace.h extern void mmio_trace_mapping(struct mmiotrace_map *map); map 1271 include/linux/mmzone.h unsigned long map = section->section_mem_map; map 1272 include/linux/mmzone.h map &= SECTION_MAP_MASK; map 1273 include/linux/mmzone.h return (struct page *)map; map 293 include/linux/mtd/cfi.h struct map_info *map, struct cfi_private *cfi); map 295 include/linux/mtd/cfi.h map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi); map 296 include/linux/mtd/cfi.h #define CMD(x) cfi_build_cmd((x), map, cfi) map 298 include/linux/mtd/cfi.h unsigned long cfi_merge_status(map_word val, struct map_info *map, map 300 include/linux/mtd/cfi.h #define MERGESTATUS(x) cfi_merge_status((x), map, cfi) map 303 include/linux/mtd/cfi.h struct map_info *map, struct cfi_private *cfi, map 306 include/linux/mtd/cfi.h static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr) map 308 include/linux/mtd/cfi.h map_word val = map_read(map, addr); map 310 include/linux/mtd/cfi.h if (map_bankwidth_is_1(map)) { map 312 include/linux/mtd/cfi.h } else if (map_bankwidth_is_2(map)) { map 313 include/linux/mtd/cfi.h return cfi16_to_cpu(map, val.x[0]); map 318 include/linux/mtd/cfi.h return cfi32_to_cpu(map, val.x[0]); map 322 include/linux/mtd/cfi.h static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr) map 324 include/linux/mtd/cfi.h map_word val = map_read(map, addr); map 326 include/linux/mtd/cfi.h if (map_bankwidth_is_1(map)) { map 328 include/linux/mtd/cfi.h } else if (map_bankwidth_is_2(map)) { map 329 include/linux/mtd/cfi.h return cfi16_to_cpu(map, val.x[0]); map 334 include/linux/mtd/cfi.h return cfi32_to_cpu(map, val.x[0]); map 340 include/linux/mtd/cfi.h int __xipram cfi_qry_present(struct map_info *map, __u32 base, map 342 include/linux/mtd/cfi.h int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map, map 344 include/linux/mtd/cfi.h void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map, map 347 include/linux/mtd/cfi.h struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size, map 379 include/linux/mtd/cfi.h typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip, map 27 include/linux/mtd/cfi_endian.h #define cpu_to_cfi8(map, x) (x) map 28 include/linux/mtd/cfi_endian.h #define cfi8_to_cpu(map, x) (x) map 29 include/linux/mtd/cfi_endian.h #define cpu_to_cfi16(map, x) _cpu_to_cfi(16, (map)->swap, (x)) map 30 include/linux/mtd/cfi_endian.h #define cpu_to_cfi32(map, x) _cpu_to_cfi(32, (map)->swap, (x)) map 31 include/linux/mtd/cfi_endian.h #define cpu_to_cfi64(map, x) _cpu_to_cfi(64, (map)->swap, (x)) map 32 include/linux/mtd/cfi_endian.h #define cfi16_to_cpu(map, x) _cfi_to_cpu(16, (map)->swap, (x)) map 33 include/linux/mtd/cfi_endian.h #define cfi32_to_cpu(map, x) _cfi_to_cpu(32, (map)->swap, (x)) map 34 include/linux/mtd/cfi_endian.h #define cfi64_to_cpu(map, x) _cfi_to_cpu(64, (map)->swap, (x)) map 17 include/linux/mtd/gen_probe.h int (*probe_chip)(struct map_info *map, __u32 base, map 21 include/linux/mtd/gen_probe.h struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp); map 26 include/linux/mtd/hyperbus.h struct map_info map; map 22 include/linux/mtd/map.h #define map_bankwidth(map) 1 map 23 include/linux/mtd/map.h #define map_bankwidth_is_1(map) (map_bankwidth(map) == 1) map 24 include/linux/mtd/map.h #define map_bankwidth_is_large(map) (0) map 25 include/linux/mtd/map.h #define map_words(map) (1) map 28 include/linux/mtd/map.h #define map_bankwidth_is_1(map) (0) map 34 include/linux/mtd/map.h # define map_bankwidth(map) ((map)->bankwidth) map 36 include/linux/mtd/map.h # define map_bankwidth(map) 2 map 37 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (0) map 38 include/linux/mtd/map.h # define map_words(map) (1) map 40 include/linux/mtd/map.h #define map_bankwidth_is_2(map) (map_bankwidth(map) == 2) map 44 include/linux/mtd/map.h #define map_bankwidth_is_2(map) (0) map 50 include/linux/mtd/map.h # define map_bankwidth(map) ((map)->bankwidth) map 52 include/linux/mtd/map.h # define map_bankwidth(map) 4 map 53 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (0) map 54 include/linux/mtd/map.h # define map_words(map) (1) map 56 include/linux/mtd/map.h #define map_bankwidth_is_4(map) (map_bankwidth(map) == 4) map 60 include/linux/mtd/map.h #define map_bankwidth_is_4(map) (0) map 66 include/linux/mtd/map.h #define map_calc_words(map) ((map_bankwidth(map) + (sizeof(unsigned long)-1)) / sizeof(unsigned long)) map 71 include/linux/mtd/map.h # define map_bankwidth(map) ((map)->bankwidth) map 74 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) map 76 include/linux/mtd/map.h # define map_words(map) map_calc_words(map) map 79 include/linux/mtd/map.h # define map_bankwidth(map) 8 map 80 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (BITS_PER_LONG < 64) map 81 include/linux/mtd/map.h # define map_words(map) map_calc_words(map) map 83 include/linux/mtd/map.h #define map_bankwidth_is_8(map) (map_bankwidth(map) == 8) map 87 include/linux/mtd/map.h #define map_bankwidth_is_8(map) (0) map 93 include/linux/mtd/map.h # define map_bankwidth(map) ((map)->bankwidth) map 95 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) map 97 include/linux/mtd/map.h # define map_words(map) map_calc_words(map) map 99 include/linux/mtd/map.h # define map_bankwidth(map) 16 map 100 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (1) map 101 include/linux/mtd/map.h # define map_words(map) map_calc_words(map) map 103 include/linux/mtd/map.h #define map_bankwidth_is_16(map) (map_bankwidth(map) == 16) map 107 include/linux/mtd/map.h #define map_bankwidth_is_16(map) (0) map 113 include/linux/mtd/map.h # define map_bankwidth(map) ((map)->bankwidth) map 115 include/linux/mtd/map.h # define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8) map 117 include/linux/mtd/map.h # define map_words(map) map_calc_words(map) map 118 include/linux/mtd/map.h #define map_bankwidth_is_32(map) (map_bankwidth(map) == 32) map 122 include/linux/mtd/map.h #define map_bankwidth_is_32(map) (0) map 129 include/linux/mtd/map.h static inline int map_bankwidth(void *map) map 134 include/linux/mtd/map.h #define map_bankwidth_is_large(map) (0) map 135 include/linux/mtd/map.h #define map_words(map) (0) map 240 include/linux/mtd/map.h struct mtd_info *(*probe)(struct map_info *map); map 250 include/linux/mtd/map.h struct mtd_info *do_map_probe(const char *name, struct map_info *map); map 253 include/linux/mtd/map.h #define ENABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 1); } while (0) map 254 include/linux/mtd/map.h #define DISABLE_VPP(map) do { if (map->set_vpp) map->set_vpp(map, 0); } while (0) map 256 include/linux/mtd/map.h #define INVALIDATE_CACHED_RANGE(map, from, size) \ map 257 include/linux/mtd/map.h do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) map 259 include/linux/mtd/map.h #define map_word_equal(map, val1, val2) \ map 262 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) \ map 270 include/linux/mtd/map.h #define map_word_and(map, val1, val2) \ map 274 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) \ map 279 include/linux/mtd/map.h #define map_word_clr(map, val1, val2) \ map 283 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) \ map 288 include/linux/mtd/map.h #define map_word_or(map, val1, val2) \ map 292 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) \ map 297 include/linux/mtd/map.h #define map_word_andequal(map, val1, val2, val3) \ map 300 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) { \ map 309 include/linux/mtd/map.h #define map_word_bitsset(map, val1, val2) \ map 312 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) { \ map 321 include/linux/mtd/map.h static inline map_word map_word_load(struct map_info *map, const void *ptr) map 325 include/linux/mtd/map.h if (map_bankwidth_is_1(map)) map 327 include/linux/mtd/map.h else if (map_bankwidth_is_2(map)) map 329 include/linux/mtd/map.h else if (map_bankwidth_is_4(map)) map 332 include/linux/mtd/map.h else if (map_bankwidth_is_8(map)) map 335 include/linux/mtd/map.h else if (map_bankwidth_is_large(map)) map 336 include/linux/mtd/map.h memcpy(r.x, ptr, map->bankwidth); map 343 include/linux/mtd/map.h static inline map_word map_word_load_partial(struct map_info *map, map_word orig, const unsigned char *buf, int start, int len) map 347 include/linux/mtd/map.h if (map_bankwidth_is_large(map)) { map 358 include/linux/mtd/map.h bitpos = (map_bankwidth(map) - 1 - i) * 8; map 373 include/linux/mtd/map.h static inline map_word map_word_ff(struct map_info *map) map 378 include/linux/mtd/map.h if (map_bankwidth(map) < MAP_FF_LIMIT) { map 379 include/linux/mtd/map.h int bw = 8 * map_bankwidth(map); map 383 include/linux/mtd/map.h for (i = 0; i < map_words(map); i++) map 389 include/linux/mtd/map.h static inline map_word inline_map_read(struct map_info *map, unsigned long ofs) map 393 include/linux/mtd/map.h if (map_bankwidth_is_1(map)) map 394 include/linux/mtd/map.h r.x[0] = __raw_readb(map->virt + ofs); map 395 include/linux/mtd/map.h else if (map_bankwidth_is_2(map)) map 396 include/linux/mtd/map.h r.x[0] = __raw_readw(map->virt + ofs); map 397 include/linux/mtd/map.h else if (map_bankwidth_is_4(map)) map 398 include/linux/mtd/map.h r.x[0] = __raw_readl(map->virt + ofs); map 400 include/linux/mtd/map.h else if (map_bankwidth_is_8(map)) map 401 include/linux/mtd/map.h r.x[0] = __raw_readq(map->virt + ofs); map 403 include/linux/mtd/map.h else if (map_bankwidth_is_large(map)) map 404 include/linux/mtd/map.h memcpy_fromio(r.x, map->virt + ofs, map->bankwidth); map 411 include/linux/mtd/map.h static inline void inline_map_write(struct map_info *map, const map_word datum, unsigned long ofs) map 413 include/linux/mtd/map.h if (map_bankwidth_is_1(map)) map 414 include/linux/mtd/map.h __raw_writeb(datum.x[0], map->virt + ofs); map 415 include/linux/mtd/map.h else if (map_bankwidth_is_2(map)) map 416 include/linux/mtd/map.h __raw_writew(datum.x[0], map->virt + ofs); map 417 include/linux/mtd/map.h else if (map_bankwidth_is_4(map)) map 418 include/linux/mtd/map.h __raw_writel(datum.x[0], map->virt + ofs); map 420 include/linux/mtd/map.h else if (map_bankwidth_is_8(map)) map 421 include/linux/mtd/map.h __raw_writeq(datum.x[0], map->virt + ofs); map 423 include/linux/mtd/map.h else if (map_bankwidth_is_large(map)) map 424 include/linux/mtd/map.h memcpy_toio(map->virt+ofs, datum.x, map->bankwidth); map 430 include/linux/mtd/map.h static inline void inline_map_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) map 432 include/linux/mtd/map.h if (map->cached) map 433 include/linux/mtd/map.h memcpy(to, (char *)map->cached + from, len); map 435 include/linux/mtd/map.h memcpy_fromio(to, map->virt + from, len); map 438 include/linux/mtd/map.h static inline void inline_map_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) map 440 include/linux/mtd/map.h memcpy_toio(map->virt + to, from, len); map 444 include/linux/mtd/map.h #define map_read(map, ofs) (map)->read(map, ofs) map 445 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) (map)->copy_from(map, to, from, len) map 446 include/linux/mtd/map.h #define map_write(map, datum, ofs) (map)->write(map, datum, ofs) map 447 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) (map)->copy_to(map, to, from, len) map 450 include/linux/mtd/map.h #define map_is_linear(map) (map->phys != NO_XIP) map 453 include/linux/mtd/map.h #define map_read(map, ofs) inline_map_read(map, ofs) map 454 include/linux/mtd/map.h #define map_copy_from(map, to, from, len) inline_map_copy_from(map, to, from, len) map 455 include/linux/mtd/map.h #define map_write(map, datum, ofs) inline_map_write(map, datum, ofs) map 456 include/linux/mtd/map.h #define map_copy_to(map, to, from, len) inline_map_copy_to(map, to, from, len) map 459 include/linux/mtd/map.h #define simple_map_init(map) BUG_ON(!map_bankwidth_supported((map)->bankwidth)) map 460 include/linux/mtd/map.h #define map_is_linear(map) ({ (void)(map); 1; }) map 100 include/linux/mtd/pfow.h static inline void send_pfow_command(struct map_info *map, map 104 include/linux/mtd/pfow.h int bits_per_chip = map_bankwidth(map) * 8; map 106 include/linux/mtd/pfow.h map_write(map, CMD(cmd_code), map->pfow_base + PFOW_COMMAND_CODE); map 107 include/linux/mtd/pfow.h map_write(map, CMD(adr & ((1<<bits_per_chip) - 1)), map 108 include/linux/mtd/pfow.h map->pfow_base + PFOW_COMMAND_ADDRESS_L); map 109 include/linux/mtd/pfow.h map_write(map, CMD(adr>>bits_per_chip), map 110 include/linux/mtd/pfow.h map->pfow_base + PFOW_COMMAND_ADDRESS_H); map 112 include/linux/mtd/pfow.h map_write(map, CMD(len & ((1<<bits_per_chip) - 1)), map 113 include/linux/mtd/pfow.h map->pfow_base + PFOW_DATA_COUNT_L); map 114 include/linux/mtd/pfow.h map_write(map, CMD(len>>bits_per_chip), map 115 include/linux/mtd/pfow.h map->pfow_base + PFOW_DATA_COUNT_H); map 118 include/linux/mtd/pfow.h map_write(map, *datum, map->pfow_base + PFOW_COMMAND_DATA); map 121 include/linux/mtd/pfow.h map_write(map, CMD(LPDDR_START_EXECUTION), map 122 include/linux/mtd/pfow.h map->pfow_base + PFOW_COMMAND_EXECUTE); map 79 include/linux/mtd/qinfo.h static inline map_word lpddr_build_cmd(u_long cmd, struct map_info *map) map 86 include/linux/mtd/qinfo.h #define CMD(x) lpddr_build_cmd(x, map) map 298 include/linux/netdevice.h struct ifmap map; map 1271 include/linux/netdevice.h struct ifmap *map); map 154 include/linux/pinctrl/machine.h extern int pinctrl_register_mappings(const struct pinctrl_map *map, map 159 include/linux/pinctrl/machine.h static inline int pinctrl_register_mappings(const struct pinctrl_map *map, map 185 include/linux/pinctrl/pinconf-generic.h struct device_node *np, struct pinctrl_map **map, map 189 include/linux/pinctrl/pinconf-generic.h struct device_node *np_config, struct pinctrl_map **map, map 192 include/linux/pinctrl/pinconf-generic.h struct pinctrl_map *map, unsigned num_maps); map 196 include/linux/pinctrl/pinconf-generic.h struct pinctrl_map **map, unsigned *num_maps) map 198 include/linux/pinctrl/pinconf-generic.h return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, map 204 include/linux/pinctrl/pinconf-generic.h struct pinctrl_map **map, unsigned *num_maps) map 206 include/linux/pinctrl/pinconf-generic.h return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, map 212 include/linux/pinctrl/pinconf-generic.h struct pinctrl_map **map, unsigned *num_maps) map 218 include/linux/pinctrl/pinconf-generic.h return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps, map 100 include/linux/pinctrl/pinctrl.h struct pinctrl_map **map, unsigned *num_maps); map 102 include/linux/pinctrl/pinctrl.h struct pinctrl_map *map, unsigned num_maps); map 206 include/linux/rcupdate.h static inline void rcu_lock_acquire(struct lockdep_map *map) map 208 include/linux/rcupdate.h lock_acquire(map, 0, 0, 2, 0, NULL, _THIS_IP_); map 211 include/linux/rcupdate.h static inline void rcu_lock_release(struct lockdep_map *map) map 213 include/linux/rcupdate.h lock_release(map, 1, _THIS_IP_); map 74 include/linux/regmap.h #define regmap_update_bits(map, reg, mask, val) \ map 75 include/linux/regmap.h regmap_update_bits_base(map, reg, mask, val, NULL, false, false) map 76 include/linux/regmap.h #define regmap_update_bits_async(map, reg, mask, val)\ map 77 include/linux/regmap.h regmap_update_bits_base(map, reg, mask, val, NULL, true, false) map 78 include/linux/regmap.h #define regmap_update_bits_check(map, reg, mask, val, change)\ map 79 include/linux/regmap.h regmap_update_bits_base(map, reg, mask, val, change, false, false) map 80 include/linux/regmap.h #define regmap_update_bits_check_async(map, reg, mask, val, change)\ map 81 include/linux/regmap.h regmap_update_bits_base(map, reg, mask, val, change, true, false) map 83 include/linux/regmap.h #define regmap_write_bits(map, reg, mask, val) \ map 84 include/linux/regmap.h regmap_update_bits_base(map, reg, mask, val, NULL, false, true) map 123 include/linux/regmap.h #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ map 131 include/linux/regmap.h __ret = regmap_read((map), (addr), &(val)); \ map 138 include/linux/regmap.h __ret = regmap_read((map), (addr), &(val)); \ map 666 include/linux/regmap.h int regmap_attach_dev(struct device *dev, struct regmap *map, map 1002 include/linux/regmap.h int regmap_mmio_attach_clk(struct regmap *map, struct clk *clk); map 1003 include/linux/regmap.h void regmap_mmio_detach_clk(struct regmap *map); map 1004 include/linux/regmap.h void regmap_exit(struct regmap *map); map 1005 include/linux/regmap.h int regmap_reinit_cache(struct regmap *map, map 1008 include/linux/regmap.h struct device *regmap_get_device(struct regmap *map); map 1009 include/linux/regmap.h int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); map 1010 include/linux/regmap.h int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); map 1011 include/linux/regmap.h int regmap_raw_write(struct regmap *map, unsigned int reg, map 1013 include/linux/regmap.h int regmap_noinc_write(struct regmap *map, unsigned int reg, map 1015 include/linux/regmap.h int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, map 1017 include/linux/regmap.h int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs, map 1019 include/linux/regmap.h int regmap_multi_reg_write_bypassed(struct regmap *map, map 1022 include/linux/regmap.h int regmap_raw_write_async(struct regmap *map, unsigned int reg, map 1024 include/linux/regmap.h int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); map 1025 include/linux/regmap.h int regmap_raw_read(struct regmap *map, unsigned int reg, map 1027 include/linux/regmap.h int regmap_noinc_read(struct regmap *map, unsigned int reg, map 1029 include/linux/regmap.h int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, map 1031 include/linux/regmap.h int regmap_update_bits_base(struct regmap *map, unsigned int reg, map 1034 include/linux/regmap.h int regmap_get_val_bytes(struct regmap *map); map 1035 include/linux/regmap.h int regmap_get_max_register(struct regmap *map); map 1036 include/linux/regmap.h int regmap_get_reg_stride(struct regmap *map); map 1037 include/linux/regmap.h int regmap_async_complete(struct regmap *map); map 1038 include/linux/regmap.h bool regmap_can_raw_write(struct regmap *map); map 1039 include/linux/regmap.h size_t regmap_get_raw_read_max(struct regmap *map); map 1040 include/linux/regmap.h size_t regmap_get_raw_write_max(struct regmap *map); map 1042 include/linux/regmap.h int regcache_sync(struct regmap *map); map 1043 include/linux/regmap.h int regcache_sync_region(struct regmap *map, unsigned int min, map 1045 include/linux/regmap.h int regcache_drop_region(struct regmap *map, unsigned int min, map 1047 include/linux/regmap.h void regcache_cache_only(struct regmap *map, bool enable); map 1048 include/linux/regmap.h void regcache_cache_bypass(struct regmap *map, bool enable); map 1049 include/linux/regmap.h void regcache_mark_dirty(struct regmap *map); map 1051 include/linux/regmap.h bool regmap_check_range_table(struct regmap *map, unsigned int reg, map 1054 include/linux/regmap.h int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs, map 1056 include/linux/regmap.h int regmap_parse_val(struct regmap *map, const void *buf, map 1265 include/linux/regmap.h int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, map 1270 include/linux/regmap.h int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq, map 1290 include/linux/regmap.h static inline int regmap_write(struct regmap *map, unsigned int reg, map 1297 include/linux/regmap.h static inline int regmap_write_async(struct regmap *map, unsigned int reg, map 1304 include/linux/regmap.h static inline int regmap_raw_write(struct regmap *map, unsigned int reg, map 1311 include/linux/regmap.h static inline int regmap_raw_write_async(struct regmap *map, unsigned int reg, map 1318 include/linux/regmap.h static inline int regmap_noinc_write(struct regmap *map, unsigned int reg, map 1325 include/linux/regmap.h static inline int regmap_bulk_write(struct regmap *map, unsigned int reg, map 1332 include/linux/regmap.h static inline int regmap_read(struct regmap *map, unsigned int reg, map 1339 include/linux/regmap.h static inline int regmap_raw_read(struct regmap *map, unsigned int reg, map 1346 include/linux/regmap.h static inline int regmap_noinc_read(struct regmap *map, unsigned int reg, map 1353 include/linux/regmap.h static inline int regmap_bulk_read(struct regmap *map, unsigned int reg, map 1360 include/linux/regmap.h static inline int regmap_update_bits_base(struct regmap *map, unsigned int reg, map 1385 include/linux/regmap.h static inline int regmap_get_val_bytes(struct regmap *map) map 1391 include/linux/regmap.h static inline int regmap_get_max_register(struct regmap *map) map 1397 include/linux/regmap.h static inline int regmap_get_reg_stride(struct regmap *map) map 1403 include/linux/regmap.h static inline int regcache_sync(struct regmap *map) map 1409 include/linux/regmap.h static inline int regcache_sync_region(struct regmap *map, unsigned int min, map 1416 include/linux/regmap.h static inline int regcache_drop_region(struct regmap *map, unsigned int min, map 1423 include/linux/regmap.h static inline void regcache_cache_only(struct regmap *map, bool enable) map 1428 include/linux/regmap.h static inline void regcache_cache_bypass(struct regmap *map, bool enable) map 1433 include/linux/regmap.h static inline void regcache_mark_dirty(struct regmap *map) map 1438 include/linux/regmap.h static inline void regmap_async_complete(struct regmap *map) map 1443 include/linux/regmap.h static inline int regmap_register_patch(struct regmap *map, map 1451 include/linux/regmap.h static inline int regmap_parse_val(struct regmap *map, const void *buf, map 1464 include/linux/regmap.h static inline struct device *regmap_get_device(struct regmap *map) map 67 include/linux/sbitmap.h struct sbitmap_word *map; map 164 include/linux/sbitmap.h kfree(sb->map); map 165 include/linux/sbitmap.h sb->map = NULL; map 259 include/linux/sbitmap.h sb->map[index].depth - nr, map 263 include/linux/sbitmap.h word = sb->map[index].word & ~sb->map[index].cleared; map 304 include/linux/sbitmap.h return &sb->map[SB_NR_TO_INDEX(sb, bitnr)].word; map 327 include/linux/sbitmap.h unsigned long *addr = &sb->map[SB_NR_TO_INDEX(sb, bitnr)].cleared; map 1859 include/linux/security.h extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); map 1861 include/linux/security.h extern int security_bpf_map_alloc(struct bpf_map *map); map 1862 include/linux/security.h extern void security_bpf_map_free(struct bpf_map *map); map 1872 include/linux/security.h static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) map 1882 include/linux/security.h static inline int security_bpf_map_alloc(struct bpf_map *map) map 1887 include/linux/security.h static inline void security_bpf_map_free(struct bpf_map *map) map 68 include/linux/skmsg.h struct bpf_map *map; map 26 include/linux/spinlock_api_smp.h _raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) map 109 include/linux/svga.h void svga_settile(struct fb_info *info, struct fb_tilemap *map); map 44 include/linux/uio_driver.h struct uio_map *map; map 103 include/linux/zpool.h void *(*map)(void *pool, unsigned long handle, map 126 include/media/rc-map.h struct rc_map map; map 136 include/media/rc-map.h int rc_map_register(struct rc_map_list *map); map 143 include/media/rc-map.h void rc_map_unregister(struct rc_map_list *map); map 95 include/media/videobuf-core.h struct videobuf_mapping *map; map 72 include/net/cipso_ipv4.h } map; map 27 include/net/dcbnl.h u64 map[IEEE_8021QAZ_MAX_TCS]; map 33 include/net/dcbnl.h u8 map[64]; map 87 include/net/sctp/tsnmap.h void sctp_tsnmap_free(struct sctp_tsnmap *map); map 102 include/net/sctp/tsnmap.h void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn); map 105 include/net/sctp/tsnmap.h static inline __u32 sctp_tsnmap_get_ctsn(const struct sctp_tsnmap *map) map 107 include/net/sctp/tsnmap.h return map->cumulative_tsn_ack_point; map 111 include/net/sctp/tsnmap.h static inline __u32 sctp_tsnmap_get_max_tsn_seen(const struct sctp_tsnmap *map) map 113 include/net/sctp/tsnmap.h return map->max_tsn_seen; map 117 include/net/sctp/tsnmap.h static inline __u16 sctp_tsnmap_num_dups(struct sctp_tsnmap *map) map 119 include/net/sctp/tsnmap.h return map->num_dup_tsns; map 123 include/net/sctp/tsnmap.h static inline __be32 *sctp_tsnmap_get_dups(struct sctp_tsnmap *map) map 125 include/net/sctp/tsnmap.h map->num_dup_tsns = 0; map 126 include/net/sctp/tsnmap.h return map->dup_tsns; map 130 include/net/sctp/tsnmap.h __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, map 134 include/net/sctp/tsnmap.h __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map); map 137 include/net/sctp/tsnmap.h static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map) map 139 include/net/sctp/tsnmap.h return map->cumulative_tsn_ack_point != map->max_tsn_seen; map 145 include/net/sctp/tsnmap.h static inline void sctp_tsnmap_mark_dup(struct sctp_tsnmap *map, __u32 tsn) map 147 include/net/sctp/tsnmap.h if (map->num_dup_tsns < SCTP_MAX_DUP_TSNS) map 148 include/net/sctp/tsnmap.h map->dup_tsns[map->num_dup_tsns++] = htonl(tsn); map 75 include/net/xdp_sock.h struct xsk_map *map; map 133 include/net/xdp_sock.h void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, map 135 include/net/xdp_sock.h int xsk_map_inc(struct xsk_map *map); map 136 include/net/xdp_sock.h void xsk_map_put(struct xsk_map *map); map 85 include/pcmcia/ss.h u_char map; map 92 include/pcmcia/ss.h u_char map; map 88 include/rdma/rdmavt_mr.h struct rvt_segarray *map[0]; /* the segments */ map 175 include/rdma/rdmavt_mr.h sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; map 176 include/rdma/rdmavt_mr.h sge->length = sge->mr->map[sge->m]->segs[sge->n].length; map 488 include/rdma/rdmavt_qp.h struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES]; map 32 include/rdma/rw.h } map; map 67 include/sound/hda_chmap.h bool non_pcm, unsigned char *map); map 71 include/sound/hda_chmap.h int channels, unsigned char *map, map 1363 include/sound/pcm.h unsigned char map[15]; map 246 include/trace/events/btrfs.h const struct extent_map *map), map 248 include/trace/events/btrfs.h TP_ARGS(root, inode, map), map 250 include/trace/events/btrfs.h TP_CONDITION(map), map 268 include/trace/events/btrfs.h __entry->start = map->start; map 269 include/trace/events/btrfs.h __entry->len = map->len; map 270 include/trace/events/btrfs.h __entry->orig_start = map->orig_start; map 271 include/trace/events/btrfs.h __entry->block_start = map->block_start; map 272 include/trace/events/btrfs.h __entry->block_len = map->block_len; map 273 include/trace/events/btrfs.h __entry->flags = map->flags; map 274 include/trace/events/btrfs.h __entry->refs = refcount_read(&map->refs); map 275 include/trace/events/btrfs.h __entry->compress_type = map->compress_type; map 296 include/trace/events/btrfs.h const struct extent_map *existing, const struct extent_map *map, map 299 include/trace/events/btrfs.h TP_ARGS(fs_info, existing, map, start, len), map 313 include/trace/events/btrfs.h __entry->map_start = map->start; map 314 include/trace/events/btrfs.h __entry->map_len = map->len; map 937 include/trace/events/btrfs.h const struct map_lookup *map, u64 offset, u64 size), map 939 include/trace/events/btrfs.h TP_ARGS(fs_info, map, offset, size), map 951 include/trace/events/btrfs.h __entry->num_stripes = map->num_stripes; map 952 include/trace/events/btrfs.h __entry->type = map->type; map 953 include/trace/events/btrfs.h __entry->sub_stripes = map->sub_stripes; map 971 include/trace/events/btrfs.h const struct map_lookup *map, u64 offset, u64 size), map 973 include/trace/events/btrfs.h TP_ARGS(fs_info, map, offset, size) map 979 include/trace/events/btrfs.h const struct map_lookup *map, u64 offset, u64 size), map 981 include/trace/events/btrfs.h TP_ARGS(fs_info, map, offset, size) map 145 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 148 include/trace/events/erofs.h TP_ARGS(inode, map, flags), map 161 include/trace/events/erofs.h __entry->la = map->m_la; map 162 include/trace/events/erofs.h __entry->llen = map->m_llen; map 173 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 176 include/trace/events/erofs.h TP_ARGS(inode, map, flags) map 180 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 183 include/trace/events/erofs.h TP_ARGS(inode, map, flags) map 187 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 190 include/trace/events/erofs.h TP_ARGS(inode, map, flags, ret), map 208 include/trace/events/erofs.h __entry->la = map->m_la; map 209 include/trace/events/erofs.h __entry->pa = map->m_pa; map 210 include/trace/events/erofs.h __entry->llen = map->m_llen; map 211 include/trace/events/erofs.h __entry->plen = map->m_plen; map 212 include/trace/events/erofs.h __entry->mflags = map->m_flags; map 225 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 228 include/trace/events/erofs.h TP_ARGS(inode, map, flags, ret) map 232 include/trace/events/erofs.h TP_PROTO(struct inode *inode, struct erofs_map_blocks *map, map 235 include/trace/events/erofs.h TP_ARGS(inode, map, flags, ret) map 463 include/trace/events/ext4.h TP_PROTO(struct inode *inode, struct ext4_map_blocks *map), map 465 include/trace/events/ext4.h TP_ARGS(inode, map), map 478 include/trace/events/ext4.h __entry->lblk = map->m_lblk; map 479 include/trace/events/ext4.h __entry->len = map->m_len; map 480 include/trace/events/ext4.h __entry->flags = map->m_flags; map 1531 include/trace/events/ext4.h TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, map 1534 include/trace/events/ext4.h TP_ARGS(inode, map, ux), map 1549 include/trace/events/ext4.h __entry->m_lblk = map->m_lblk; map 1550 include/trace/events/ext4.h __entry->m_len = map->m_len; map 1569 include/trace/events/ext4.h TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, map 1572 include/trace/events/ext4.h TP_ARGS(inode, map, ux, ix), map 1590 include/trace/events/ext4.h __entry->m_lblk = map->m_lblk; map 1591 include/trace/events/ext4.h __entry->m_len = map->m_len; map 1653 include/trace/events/ext4.h TP_PROTO(struct inode *inode, unsigned flags, struct ext4_map_blocks *map, map 1656 include/trace/events/ext4.h TP_ARGS(inode, flags, map, ret), map 1673 include/trace/events/ext4.h __entry->pblk = map->m_pblk; map 1674 include/trace/events/ext4.h __entry->lblk = map->m_lblk; map 1675 include/trace/events/ext4.h __entry->len = map->m_len; map 1676 include/trace/events/ext4.h __entry->mflags = map->m_flags; map 1690 include/trace/events/ext4.h struct ext4_map_blocks *map, int ret), map 1692 include/trace/events/ext4.h TP_ARGS(inode, flags, map, ret) map 1697 include/trace/events/ext4.h struct ext4_map_blocks *map, int ret), map 1699 include/trace/events/ext4.h TP_ARGS(inode, flags, map, ret) map 1844 include/trace/events/ext4.h TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags, map 1847 include/trace/events/ext4.h TP_ARGS(inode, map, flags, allocated, newblock), map 1864 include/trace/events/ext4.h __entry->lblk = map->m_lblk; map 1865 include/trace/events/ext4.h __entry->pblk = map->m_pblk; map 1866 include/trace/events/ext4.h __entry->len = map->m_len; map 1882 include/trace/events/ext4.h TP_PROTO(struct super_block *sb, struct ext4_map_blocks *map, int ret), map 1884 include/trace/events/ext4.h TP_ARGS(sb, map, ret), map 1897 include/trace/events/ext4.h __entry->flags = map->m_flags; map 1898 include/trace/events/ext4.h __entry->lblk = map->m_lblk; map 1899 include/trace/events/ext4.h __entry->pblk = map->m_pblk; map 1900 include/trace/events/ext4.h __entry->len = map->m_len; map 559 include/trace/events/f2fs.h TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int ret), map 561 include/trace/events/f2fs.h TP_ARGS(inode, map, ret), map 578 include/trace/events/f2fs.h __entry->m_lblk = map->m_lblk; map 579 include/trace/events/f2fs.h __entry->m_pblk = map->m_pblk; map 580 include/trace/events/f2fs.h __entry->m_len = map->m_len; map 581 include/trace/events/f2fs.h __entry->m_flags = map->m_flags; map 582 include/trace/events/f2fs.h __entry->m_seg_type = map->m_seg_type; map 583 include/trace/events/f2fs.h __entry->m_may_create = map->m_may_create; map 254 include/trace/events/hswadsp.h __field( uint32_t, map ) map 265 include/trace/events/hswadsp.h __entry->map = req->map; map 274 include/trace/events/hswadsp.h (uint32_t)__entry->bitdepth, (uint32_t)__entry->map, map 86 include/trace/events/iommu.h TRACE_EVENT(map, map 953 include/trace/events/rpcrdma.h DEFINE_MR_EVENT(map); map 87 include/trace/events/xdp.h const struct bpf_map *map, u32 map_index), map 89 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), map 107 include/trace/events/xdp.h __entry->map_id = map ? map->id : 0; map 122 include/trace/events/xdp.h const struct bpf_map *map, u32 map_index), map 123 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) map 130 include/trace/events/xdp.h const struct bpf_map *map, u32 map_index), map 131 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) map 144 include/trace/events/xdp.h const struct bpf_map *map, u32 map_index), map 145 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), map 159 include/trace/events/xdp.h const struct bpf_map *map, u32 map_index), map 160 include/trace/events/xdp.h TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), map 177 include/trace/events/xdp.h #define devmap_ifindex(fwd, map) \ map 178 include/trace/events/xdp.h ((map->map_type == BPF_MAP_TYPE_DEVMAP || \ map 179 include/trace/events/xdp.h map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \ map 182 include/trace/events/xdp.h #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ map 183 include/trace/events/xdp.h trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ map 184 include/trace/events/xdp.h 0, map, idx) map 186 include/trace/events/xdp.h #define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \ map 187 include/trace/events/xdp.h trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \ map 188 include/trace/events/xdp.h err, map, idx) map 262 include/trace/events/xdp.h TP_PROTO(const struct bpf_map *map, u32 map_index, map 267 include/trace/events/xdp.h TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err), map 281 include/trace/events/xdp.h __entry->map_id = map->id; map 79 include/uapi/linux/map_to_7segment.h static __inline__ int map_to_seg7(struct seg7_conversion_map *map, int c) map 81 include/uapi/linux/map_to_7segment.h return c >= 0 && c < sizeof(map->table) ? map->table[c] : -EINVAL; map 147 include/xen/grant_table.h gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr, map 151 include/xen/grant_table.h map->host_addr = addr; map 153 include/xen/grant_table.h map->host_addr = __pa(addr); map 155 include/xen/grant_table.h map->host_addr = addr; map 157 include/xen/grant_table.h map->flags = flags; map 158 include/xen/grant_table.h map->ref = ref; map 159 include/xen/grant_table.h map->dom = domid; map 196 include/xen/grant_table.h #define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr)) map 23 kernel/bpf/arraymap.c for (i = 0; i < array->map.max_entries; i++) { map 34 kernel/bpf/arraymap.c for (i = 0; i < array->map.max_entries; i++) { map 126 kernel/bpf/arraymap.c array->map.unpriv_array = unpriv; map 129 kernel/bpf/arraymap.c bpf_map_init_from_attr(&array->map, attr); map 130 kernel/bpf/arraymap.c bpf_map_charge_move(&array->map.memory, &mem); map 134 kernel/bpf/arraymap.c bpf_map_charge_finish(&array->map.memory); map 139 kernel/bpf/arraymap.c return &array->map; map 143 kernel/bpf/arraymap.c static void *array_map_lookup_elem(struct bpf_map *map, void *key) map 145 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 148 kernel/bpf/arraymap.c if (unlikely(index >= array->map.max_entries)) map 154 kernel/bpf/arraymap.c static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, map 157 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 159 kernel/bpf/arraymap.c if (map->max_entries != 1) map 161 kernel/bpf/arraymap.c if (off >= map->value_size) map 168 kernel/bpf/arraymap.c static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, map 171 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 175 kernel/bpf/arraymap.c if (map->max_entries != 1) map 185 kernel/bpf/arraymap.c static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) map 187 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 189 kernel/bpf/arraymap.c u32 elem_size = round_up(map->value_size, 8); map 196 kernel/bpf/arraymap.c if (map->unpriv_array) { map 197 kernel/bpf/arraymap.c *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); map 200 kernel/bpf/arraymap.c *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); map 215 kernel/bpf/arraymap.c static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) map 217 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 220 kernel/bpf/arraymap.c if (unlikely(index >= array->map.max_entries)) map 226 kernel/bpf/arraymap.c int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) map 228 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 234 kernel/bpf/arraymap.c if (unlikely(index >= array->map.max_entries)) map 241 kernel/bpf/arraymap.c size = round_up(map->value_size, 8); map 253 kernel/bpf/arraymap.c static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) map 255 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 259 kernel/bpf/arraymap.c if (index >= array->map.max_entries) { map 264 kernel/bpf/arraymap.c if (index == array->map.max_entries - 1) map 272 kernel/bpf/arraymap.c static int array_map_update_elem(struct bpf_map *map, void *key, void *value, map 275 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 283 kernel/bpf/arraymap.c if (unlikely(index >= array->map.max_entries)) map 292 kernel/bpf/arraymap.c !map_value_has_spin_lock(map))) map 295 kernel/bpf/arraymap.c if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { map 297 kernel/bpf/arraymap.c value, map->value_size); map 302 kernel/bpf/arraymap.c copy_map_value_locked(map, val, value, false); map 304 kernel/bpf/arraymap.c copy_map_value(map, val, value); map 309 kernel/bpf/arraymap.c int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, map 312 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 322 kernel/bpf/arraymap.c if (unlikely(index >= array->map.max_entries)) map 336 kernel/bpf/arraymap.c size = round_up(map->value_size, 8); map 348 kernel/bpf/arraymap.c static int array_map_delete_elem(struct bpf_map *map, void *key) map 354 kernel/bpf/arraymap.c static void array_map_free(struct bpf_map *map) map 356 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 365 kernel/bpf/arraymap.c if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) map 371 kernel/bpf/arraymap.c static void array_map_seq_show_elem(struct bpf_map *map, void *key, map 378 kernel/bpf/arraymap.c value = array_map_lookup_elem(map, key); map 384 kernel/bpf/arraymap.c if (map->btf_key_type_id) map 386 kernel/bpf/arraymap.c btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); map 392 kernel/bpf/arraymap.c static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, map 395 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 406 kernel/bpf/arraymap.c btf_type_seq_show(map->btf, map->btf_value_type_id, map 415 kernel/bpf/arraymap.c static int array_map_check_btf(const struct bpf_map *map, map 424 kernel/bpf/arraymap.c if (map->map_type != BPF_MAP_TYPE_ARRAY || map 425 kernel/bpf/arraymap.c map->max_entries != 1) map 485 kernel/bpf/arraymap.c static void fd_array_map_free(struct bpf_map *map) map 487 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 493 kernel/bpf/arraymap.c for (i = 0; i < array->map.max_entries; i++) map 499 kernel/bpf/arraymap.c static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) map 505 kernel/bpf/arraymap.c int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) map 510 kernel/bpf/arraymap.c if (!map->ops->map_fd_sys_lookup_elem) map 514 kernel/bpf/arraymap.c elem = array_map_lookup_elem(map, key); map 516 kernel/bpf/arraymap.c *value = map->ops->map_fd_sys_lookup_elem(ptr); map 525 kernel/bpf/arraymap.c int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, map 528 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 535 kernel/bpf/arraymap.c if (index >= array->map.max_entries) map 539 kernel/bpf/arraymap.c new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); map 545 kernel/bpf/arraymap.c map->ops->map_fd_put_ptr(old_ptr); map 550 kernel/bpf/arraymap.c static int fd_array_map_delete_elem(struct bpf_map *map, void *key) map 552 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 556 kernel/bpf/arraymap.c if (index >= array->map.max_entries) map 561 kernel/bpf/arraymap.c map->ops->map_fd_put_ptr(old_ptr); map 568 kernel/bpf/arraymap.c static void *prog_fd_array_get_ptr(struct bpf_map *map, map 571 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 596 kernel/bpf/arraymap.c static void bpf_fd_array_map_clear(struct bpf_map *map) map 598 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 601 kernel/bpf/arraymap.c for (i = 0; i < array->map.max_entries; i++) map 602 kernel/bpf/arraymap.c fd_array_map_delete_elem(map, &i); map 605 kernel/bpf/arraymap.c static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, map 613 kernel/bpf/arraymap.c elem = array_map_lookup_elem(map, key); map 619 kernel/bpf/arraymap.c btf_type_seq_show(map->btf, map->btf_value_type_id, map 671 kernel/bpf/arraymap.c static void *perf_event_fd_array_get_ptr(struct bpf_map *map, map 702 kernel/bpf/arraymap.c static void perf_event_fd_array_release(struct bpf_map *map, map 705 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 710 kernel/bpf/arraymap.c for (i = 0; i < array->map.max_entries; i++) { map 713 kernel/bpf/arraymap.c fd_array_map_delete_elem(map, &i); map 732 kernel/bpf/arraymap.c static void *cgroup_fd_array_get_ptr(struct bpf_map *map, map 745 kernel/bpf/arraymap.c static void cgroup_fd_array_free(struct bpf_map *map) map 747 kernel/bpf/arraymap.c bpf_fd_array_map_clear(map); map 748 kernel/bpf/arraymap.c fd_array_map_free(map); map 766 kernel/bpf/arraymap.c struct bpf_map *map, *inner_map_meta; map 772 kernel/bpf/arraymap.c map = array_map_alloc(attr); map 773 kernel/bpf/arraymap.c if (IS_ERR(map)) { map 775 kernel/bpf/arraymap.c return map; map 778 kernel/bpf/arraymap.c map->inner_map_meta = inner_map_meta; map 780 kernel/bpf/arraymap.c return map; map 783 kernel/bpf/arraymap.c static void array_of_map_free(struct bpf_map *map) map 788 kernel/bpf/arraymap.c bpf_map_meta_free(map->inner_map_meta); map 789 kernel/bpf/arraymap.c bpf_fd_array_map_clear(map); map 790 kernel/bpf/arraymap.c fd_array_map_free(map); map 793 kernel/bpf/arraymap.c static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) map 795 kernel/bpf/arraymap.c struct bpf_map **inner_map = array_map_lookup_elem(map, key); map 803 kernel/bpf/arraymap.c static u32 array_of_map_gen_lookup(struct bpf_map *map, map 806 kernel/bpf/arraymap.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 807 kernel/bpf/arraymap.c u32 elem_size = round_up(map->value_size, 8); map 815 kernel/bpf/arraymap.c if (map->unpriv_array) { map 816 kernel/bpf/arraymap.c *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); map 819 kernel/bpf/arraymap.c *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); map 1459 kernel/bpf/core.c struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; map 1460 kernel/bpf/core.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 1464 kernel/bpf/core.c if (unlikely(index >= array->map.max_entries)) map 1675 kernel/bpf/core.c struct bpf_map *map = aux->used_maps[i]; map 1678 kernel/bpf/core.c if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) map 1681 kernel/bpf/core.c array = container_of(map, struct bpf_array, map); map 2052 kernel/bpf/core.c bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, map 72 kernel/bpf/cpumap.c struct bpf_map map; map 99 kernel/bpf/cpumap.c bpf_map_init_from_attr(&cmap->map, attr); map 102 kernel/bpf/cpumap.c if (cmap->map.max_entries > NR_CPUS) { map 108 kernel/bpf/cpumap.c cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); map 112 kernel/bpf/cpumap.c ret = bpf_map_charge_init(&cmap->map.memory, cost); map 126 kernel/bpf/cpumap.c cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries * map 128 kernel/bpf/cpumap.c cmap->map.numa_node); map 132 kernel/bpf/cpumap.c return &cmap->map; map 136 kernel/bpf/cpumap.c bpf_map_charge_finish(&cmap->map.memory); map 455 kernel/bpf/cpumap.c static int cpu_map_delete_elem(struct bpf_map *map, void *key) map 457 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 460 kernel/bpf/cpumap.c if (key_cpu >= map->max_entries) map 468 kernel/bpf/cpumap.c static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value, map 471 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 481 kernel/bpf/cpumap.c if (unlikely(key_cpu >= cmap->map.max_entries)) map 496 kernel/bpf/cpumap.c rcpu = __cpu_map_entry_alloc(qsize, key_cpu, map->id); map 507 kernel/bpf/cpumap.c static void cpu_map_free(struct bpf_map *map) map 509 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 522 kernel/bpf/cpumap.c bpf_clear_redirect_map(map); map 540 kernel/bpf/cpumap.c for (i = 0; i < cmap->map.max_entries; i++) { map 555 kernel/bpf/cpumap.c struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) map 557 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 560 kernel/bpf/cpumap.c if (key >= map->max_entries) map 567 kernel/bpf/cpumap.c static void *cpu_map_lookup_elem(struct bpf_map *map, void *key) map 570 kernel/bpf/cpumap.c __cpu_map_lookup_elem(map, *(u32 *)key); map 575 kernel/bpf/cpumap.c static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key) map 577 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 581 kernel/bpf/cpumap.c if (index >= cmap->map.max_entries) { map 586 kernel/bpf/cpumap.c if (index == cmap->map.max_entries - 1) map 684 kernel/bpf/cpumap.c void __cpu_map_flush(struct bpf_map *map) map 686 kernel/bpf/cpumap.c struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map); map 76 kernel/bpf/devmap.c struct bpf_map map; map 126 kernel/bpf/devmap.c bpf_map_init_from_attr(&dtab->map, attr); map 132 kernel/bpf/devmap.c dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); map 138 kernel/bpf/devmap.c cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); map 142 kernel/bpf/devmap.c err = bpf_map_charge_init(&dtab->map.memory, cost); map 160 kernel/bpf/devmap.c dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * map 162 kernel/bpf/devmap.c dtab->map.numa_node); map 172 kernel/bpf/devmap.c bpf_map_charge_finish(&dtab->map.memory); map 198 kernel/bpf/devmap.c return &dtab->map; map 201 kernel/bpf/devmap.c static void dev_map_free(struct bpf_map *map) map 203 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 218 kernel/bpf/devmap.c bpf_clear_redirect_map(map); map 236 kernel/bpf/devmap.c if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { map 254 kernel/bpf/devmap.c for (i = 0; i < dtab->map.max_entries; i++) { map 273 kernel/bpf/devmap.c static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) map 275 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 279 kernel/bpf/devmap.c if (index >= dtab->map.max_entries) { map 284 kernel/bpf/devmap.c if (index == dtab->map.max_entries - 1) map 290 kernel/bpf/devmap.c struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) map 292 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 304 kernel/bpf/devmap.c static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, map 307 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 318 kernel/bpf/devmap.c dev = __dev_map_hash_lookup_elem(map, idx); map 376 kernel/bpf/devmap.c trace_xdp_devmap_xmit(&obj->dtab->map, obj->idx, map 405 kernel/bpf/devmap.c void __dev_map_flush(struct bpf_map *map) map 407 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 421 kernel/bpf/devmap.c struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) map 423 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 426 kernel/bpf/devmap.c if (key >= map->max_entries) map 496 kernel/bpf/devmap.c static void *dev_map_lookup_elem(struct bpf_map *map, void *key) map 498 kernel/bpf/devmap.c struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); map 504 kernel/bpf/devmap.c static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) map 506 kernel/bpf/devmap.c struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, map 539 kernel/bpf/devmap.c static int dev_map_delete_elem(struct bpf_map *map, void *key) map 541 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 545 kernel/bpf/devmap.c if (k >= map->max_entries) map 562 kernel/bpf/devmap.c static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) map 564 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 572 kernel/bpf/devmap.c old_dev = __dev_map_hash_lookup_elem(map, k); map 594 kernel/bpf/devmap.c dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node); map 623 kernel/bpf/devmap.c static int __dev_map_update_elem(struct net *net, struct bpf_map *map, map 626 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 633 kernel/bpf/devmap.c if (unlikely(i >= dtab->map.max_entries)) map 657 kernel/bpf/devmap.c static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, map 661 kernel/bpf/devmap.c map, key, value, map_flags); map 664 kernel/bpf/devmap.c static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, map 667 kernel/bpf/devmap.c struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); map 679 kernel/bpf/devmap.c old_dev = __dev_map_hash_lookup_elem(map, idx); map 692 kernel/bpf/devmap.c if (dtab->items >= dtab->map.max_entries) { map 714 kernel/bpf/devmap.c static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, map 718 kernel/bpf/devmap.c map, key, value, map_flags); map 783 kernel/bpf/devmap.c if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { map 788 kernel/bpf/devmap.c for (i = 0; i < dtab->map.max_entries; i++) { map 26 kernel/bpf/hashtab.c struct bpf_map map; map 64 kernel/bpf/hashtab.c return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || map 65 kernel/bpf/hashtab.c htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; map 70 kernel/bpf/hashtab.c return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || map 71 kernel/bpf/hashtab.c htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; map 76 kernel/bpf/hashtab.c return !(htab->map.map_flags & BPF_F_NO_PREALLOC); map 90 kernel/bpf/hashtab.c static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) map 92 kernel/bpf/hashtab.c return *(void **)(l->key + roundup(map->key_size, 8)); map 107 kernel/bpf/hashtab.c for (i = 0; i < htab->map.max_entries; i++) { map 111 kernel/bpf/hashtab.c htab->map.key_size); map 127 kernel/bpf/hashtab.c memcpy(l->key, key, htab->map.key_size); map 136 kernel/bpf/hashtab.c u32 num_entries = htab->map.max_entries; map 143 kernel/bpf/hashtab.c htab->map.numa_node); map 151 kernel/bpf/hashtab.c u32 size = round_up(htab->map.value_size, 8); map 157 kernel/bpf/hashtab.c htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, map 165 kernel/bpf/hashtab.c htab->map.map_flags & BPF_F_NO_COMMON_LRU, map 316 kernel/bpf/hashtab.c bpf_map_init_from_attr(&htab->map, attr); map 323 kernel/bpf/hashtab.c htab->map.max_entries = roundup(attr->max_entries, map 325 kernel/bpf/hashtab.c if (htab->map.max_entries < attr->max_entries) map 326 kernel/bpf/hashtab.c htab->map.max_entries = rounddown(attr->max_entries, map 331 kernel/bpf/hashtab.c htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); map 334 kernel/bpf/hashtab.c round_up(htab->map.key_size, 8); map 338 kernel/bpf/hashtab.c htab->elem_size += round_up(htab->map.value_size, 8); map 347 kernel/bpf/hashtab.c (u64) htab->elem_size * htab->map.max_entries; map 350 kernel/bpf/hashtab.c cost += (u64) round_up(htab->map.value_size, 8) * map 351 kernel/bpf/hashtab.c num_possible_cpus() * htab->map.max_entries; map 356 kernel/bpf/hashtab.c err = bpf_map_charge_init(&htab->map.memory, cost); map 363 kernel/bpf/hashtab.c htab->map.numa_node); map 367 kernel/bpf/hashtab.c if (htab->map.map_flags & BPF_F_ZERO_SEED) map 392 kernel/bpf/hashtab.c return &htab->map; map 399 kernel/bpf/hashtab.c bpf_map_charge_finish(&htab->map.memory); map 461 kernel/bpf/hashtab.c static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) map 463 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 471 kernel/bpf/hashtab.c key_size = map->key_size; map 482 kernel/bpf/hashtab.c static void *htab_map_lookup_elem(struct bpf_map *map, void *key) map 484 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); map 487 kernel/bpf/hashtab.c return l->key + round_up(map->key_size, 8); map 503 kernel/bpf/hashtab.c static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) map 509 kernel/bpf/hashtab.c (void *(*)(struct bpf_map *map, void *key))NULL)); map 514 kernel/bpf/hashtab.c round_up(map->key_size, 8)); map 518 kernel/bpf/hashtab.c static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, map 521 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); map 526 kernel/bpf/hashtab.c return l->key + round_up(map->key_size, 8); map 532 kernel/bpf/hashtab.c static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) map 534 kernel/bpf/hashtab.c return __htab_lru_map_lookup_elem(map, key, true); map 537 kernel/bpf/hashtab.c static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) map 539 kernel/bpf/hashtab.c return __htab_lru_map_lookup_elem(map, key, false); map 542 kernel/bpf/hashtab.c static u32 htab_lru_map_gen_lookup(struct bpf_map *map, map 550 kernel/bpf/hashtab.c (void *(*)(struct bpf_map *map, void *key))NULL)); map 563 kernel/bpf/hashtab.c round_up(map->key_size, 8)); map 597 kernel/bpf/hashtab.c static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) map 599 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 607 kernel/bpf/hashtab.c key_size = map->key_size; map 657 kernel/bpf/hashtab.c if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) map 658 kernel/bpf/hashtab.c free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); map 680 kernel/bpf/hashtab.c struct bpf_map *map = &htab->map; map 682 kernel/bpf/hashtab.c if (map->ops->map_fd_put_ptr) { map 683 kernel/bpf/hashtab.c void *ptr = fd_htab_map_get_ptr(map, l); map 685 kernel/bpf/hashtab.c map->ops->map_fd_put_ptr(ptr); map 702 kernel/bpf/hashtab.c memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); map 704 kernel/bpf/hashtab.c u32 size = round_up(htab->map.value_size, 8); map 717 kernel/bpf/hashtab.c return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && map 726 kernel/bpf/hashtab.c u32 size = htab->map.value_size; map 748 kernel/bpf/hashtab.c if (atomic_inc_return(&htab->count) > htab->map.max_entries) map 759 kernel/bpf/hashtab.c htab->map.numa_node); map 764 kernel/bpf/hashtab.c check_and_init_map_lock(&htab->map, map 792 kernel/bpf/hashtab.c copy_map_value(&htab->map, map 819 kernel/bpf/hashtab.c static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, map 822 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 836 kernel/bpf/hashtab.c key_size = map->key_size; map 844 kernel/bpf/hashtab.c if (unlikely(!map_value_has_spin_lock(map))) map 854 kernel/bpf/hashtab.c copy_map_value_locked(map, map 881 kernel/bpf/hashtab.c copy_map_value_locked(map, map 911 kernel/bpf/hashtab.c static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, map 914 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 928 kernel/bpf/hashtab.c key_size = map->key_size; map 943 kernel/bpf/hashtab.c memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); map 975 kernel/bpf/hashtab.c static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, map 979 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 993 kernel/bpf/hashtab.c key_size = map->key_size; map 1028 kernel/bpf/hashtab.c static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, map 1032 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1046 kernel/bpf/hashtab.c key_size = map->key_size; map 1093 kernel/bpf/hashtab.c static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, map 1096 kernel/bpf/hashtab.c return __htab_percpu_map_update_elem(map, key, value, map_flags, false); map 1099 kernel/bpf/hashtab.c static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, map 1102 kernel/bpf/hashtab.c return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, map 1107 kernel/bpf/hashtab.c static int htab_map_delete_elem(struct bpf_map *map, void *key) map 1109 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1119 kernel/bpf/hashtab.c key_size = map->key_size; map 1139 kernel/bpf/hashtab.c static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) map 1141 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1151 kernel/bpf/hashtab.c key_size = map->key_size; map 1189 kernel/bpf/hashtab.c static void htab_map_free(struct bpf_map *map) map 1191 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1214 kernel/bpf/hashtab.c static void htab_map_seq_show_elem(struct bpf_map *map, void *key, map 1221 kernel/bpf/hashtab.c value = htab_map_lookup_elem(map, key); map 1227 kernel/bpf/hashtab.c btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); map 1229 kernel/bpf/hashtab.c btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); map 1261 kernel/bpf/hashtab.c static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) map 1263 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); map 1266 kernel/bpf/hashtab.c return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); map 1271 kernel/bpf/hashtab.c static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) map 1273 kernel/bpf/hashtab.c struct htab_elem *l = __htab_map_lookup_elem(map, key); map 1277 kernel/bpf/hashtab.c return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); map 1283 kernel/bpf/hashtab.c int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) map 1295 kernel/bpf/hashtab.c size = round_up(map->value_size, 8); map 1297 kernel/bpf/hashtab.c l = __htab_map_lookup_elem(map, key); map 1303 kernel/bpf/hashtab.c pptr = htab_elem_get_ptr(l, map->key_size); map 1315 kernel/bpf/hashtab.c int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, map 1318 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1323 kernel/bpf/hashtab.c ret = __htab_lru_percpu_map_update_elem(map, key, value, map 1326 kernel/bpf/hashtab.c ret = __htab_percpu_map_update_elem(map, key, value, map_flags, map 1333 kernel/bpf/hashtab.c static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, map 1342 kernel/bpf/hashtab.c l = __htab_map_lookup_elem(map, key); map 1348 kernel/bpf/hashtab.c btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); map 1350 kernel/bpf/hashtab.c pptr = htab_elem_get_ptr(l, map->key_size); map 1353 kernel/bpf/hashtab.c btf_type_seq_show(map->btf, map->btf_value_type_id, map 1391 kernel/bpf/hashtab.c static void fd_htab_map_free(struct bpf_map *map) map 1393 kernel/bpf/hashtab.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 1403 kernel/bpf/hashtab.c void *ptr = fd_htab_map_get_ptr(map, l); map 1405 kernel/bpf/hashtab.c map->ops->map_fd_put_ptr(ptr); map 1409 kernel/bpf/hashtab.c htab_map_free(map); map 1413 kernel/bpf/hashtab.c int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) map 1418 kernel/bpf/hashtab.c if (!map->ops->map_fd_sys_lookup_elem) map 1422 kernel/bpf/hashtab.c ptr = htab_map_lookup_elem(map, key); map 1424 kernel/bpf/hashtab.c *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); map 1433 kernel/bpf/hashtab.c int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, map 1440 kernel/bpf/hashtab.c ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); map 1444 kernel/bpf/hashtab.c ret = htab_map_update_elem(map, key, &ptr, map_flags); map 1446 kernel/bpf/hashtab.c map->ops->map_fd_put_ptr(ptr); map 1453 kernel/bpf/hashtab.c struct bpf_map *map, *inner_map_meta; map 1459 kernel/bpf/hashtab.c map = htab_map_alloc(attr); map 1460 kernel/bpf/hashtab.c if (IS_ERR(map)) { map 1462 kernel/bpf/hashtab.c return map; map 1465 kernel/bpf/hashtab.c map->inner_map_meta = inner_map_meta; map 1467 kernel/bpf/hashtab.c return map; map 1470 kernel/bpf/hashtab.c static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) map 1472 kernel/bpf/hashtab.c struct bpf_map **inner_map = htab_map_lookup_elem(map, key); map 1480 kernel/bpf/hashtab.c static u32 htab_of_map_gen_lookup(struct bpf_map *map, map 1487 kernel/bpf/hashtab.c (void *(*)(struct bpf_map *map, void *key))NULL)); map 1492 kernel/bpf/hashtab.c round_up(map->key_size, 8)); map 1498 kernel/bpf/hashtab.c static void htab_of_map_free(struct bpf_map *map) map 1500 kernel/bpf/hashtab.c bpf_map_meta_free(map->inner_map_meta); map 1501 kernel/bpf/hashtab.c fd_htab_map_free(map); map 26 kernel/bpf/helpers.c BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) map 29 kernel/bpf/helpers.c return (unsigned long) map->ops->map_lookup_elem(map, key); map 41 kernel/bpf/helpers.c BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, map 45 kernel/bpf/helpers.c return map->ops->map_update_elem(map, key, value, flags); map 59 kernel/bpf/helpers.c BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) map 62 kernel/bpf/helpers.c return map->ops->map_delete_elem(map, key); map 74 kernel/bpf/helpers.c BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) map 76 kernel/bpf/helpers.c return map->ops->map_push_elem(map, value, flags); map 89 kernel/bpf/helpers.c BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) map 91 kernel/bpf/helpers.c return map->ops->map_pop_elem(map, value); map 102 kernel/bpf/helpers.c BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) map 104 kernel/bpf/helpers.c return map->ops->map_peek_elem(map, value); map 299 kernel/bpf/helpers.c void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, map 305 kernel/bpf/helpers.c lock = src + map->spin_lock_off; map 307 kernel/bpf/helpers.c lock = dst + map->spin_lock_off; map 310 kernel/bpf/helpers.c copy_map_value(map, dst, src); map 333 kernel/bpf/helpers.c BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) map 339 kernel/bpf/helpers.c enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); map 174 kernel/bpf/inode.c static struct map_iter *map_iter_alloc(struct bpf_map *map) map 182 kernel/bpf/inode.c iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN); map 195 kernel/bpf/inode.c struct bpf_map *map = seq_file_to_map(m); map 208 kernel/bpf/inode.c if (map->ops->map_get_next_key(map, prev_key, key)) { map 229 kernel/bpf/inode.c struct bpf_map *map = seq_file_to_map(m); map 236 kernel/bpf/inode.c map->ops->map_seq_show_elem(map, key, m); map 251 kernel/bpf/inode.c struct bpf_map *map = inode->i_private; map 256 kernel/bpf/inode.c iter = map_iter_alloc(map); map 331 kernel/bpf/inode.c struct bpf_map *map = arg; map 334 kernel/bpf/inode.c bpf_map_support_seq_show(map) ? map 20 kernel/bpf/local_storage.c struct bpf_map map; map 28 kernel/bpf/local_storage.c static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) map 30 kernel/bpf/local_storage.c return container_of(map, struct bpf_cgroup_storage_map, map); map 49 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, map 52 kernel/bpf/local_storage.c struct rb_root *root = &map->root; map 56 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 73 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 79 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 84 kernel/bpf/local_storage.c static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, map 87 kernel/bpf/local_storage.c struct rb_root *root = &map->root; map 116 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 120 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); map 127 kernel/bpf/local_storage.c static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, map 141 kernel/bpf/local_storage.c !map_value_has_spin_lock(map))) map 144 kernel/bpf/local_storage.c storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, map 150 kernel/bpf/local_storage.c copy_map_value_locked(map, storage->buf->data, value, false); map 155 kernel/bpf/local_storage.c map->value_size, map 157 kernel/bpf/local_storage.c map->numa_node); map 161 kernel/bpf/local_storage.c memcpy(&new->data[0], value, map->value_size); map 162 kernel/bpf/local_storage.c check_and_init_map_lock(map, new->data); map 173 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 180 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); map 203 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 213 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); map 238 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 243 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 245 kernel/bpf/local_storage.c if (list_empty(&map->list)) map 249 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, true); map 257 kernel/bpf/local_storage.c storage = list_first_entry(&map->list, map 261 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 267 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 274 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map; map 299 kernel/bpf/local_storage.c map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), map 301 kernel/bpf/local_storage.c if (!map) { map 306 kernel/bpf/local_storage.c bpf_map_charge_move(&map->map.memory, &mem); map 309 kernel/bpf/local_storage.c bpf_map_init_from_attr(&map->map, attr); map 311 kernel/bpf/local_storage.c spin_lock_init(&map->lock); map 312 kernel/bpf/local_storage.c map->root = RB_ROOT; map 313 kernel/bpf/local_storage.c INIT_LIST_HEAD(&map->list); map 315 kernel/bpf/local_storage.c return &map->map; map 320 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 322 kernel/bpf/local_storage.c WARN_ON(!RB_EMPTY_ROOT(&map->root)); map 323 kernel/bpf/local_storage.c WARN_ON(!list_empty(&map->list)); map 325 kernel/bpf/local_storage.c kfree(map); map 328 kernel/bpf/local_storage.c static int cgroup_storage_delete_elem(struct bpf_map *map, void *key) map 333 kernel/bpf/local_storage.c static int cgroup_storage_check_btf(const struct bpf_map *map, map 376 kernel/bpf/local_storage.c static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, map 379 kernel/bpf/local_storage.c enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); map 385 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map_to_storage(map), key, false); map 391 kernel/bpf/local_storage.c btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); map 392 kernel/bpf/local_storage.c stype = cgroup_storage_type(map); map 395 kernel/bpf/local_storage.c btf_type_seq_show(map->btf, map->btf_value_type_id, map 402 kernel/bpf/local_storage.c btf_type_seq_show(map->btf, map->btf_value_type_id, map 426 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 429 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 431 kernel/bpf/local_storage.c if (map->prog && map->prog != prog) map 437 kernel/bpf/local_storage.c map->prog = prog; map 441 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 449 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map = map_to_storage(_map); map 451 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 452 kernel/bpf/local_storage.c if (map->prog == prog) { map 454 kernel/bpf/local_storage.c map->prog = NULL; map 457 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 460 kernel/bpf/local_storage.c static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) map 464 kernel/bpf/local_storage.c if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { map 465 kernel/bpf/local_storage.c size = sizeof(struct bpf_storage_buffer) + map->value_size; map 469 kernel/bpf/local_storage.c size = map->value_size; map 481 kernel/bpf/local_storage.c struct bpf_map *map; map 486 kernel/bpf/local_storage.c map = prog->aux->cgroup_storage[stype]; map 487 kernel/bpf/local_storage.c if (!map) map 490 kernel/bpf/local_storage.c size = bpf_cgroup_storage_calculate_size(map, &pages); map 492 kernel/bpf/local_storage.c if (bpf_map_charge_memlock(map, pages)) map 496 kernel/bpf/local_storage.c __GFP_ZERO | GFP_USER, map->numa_node); map 503 kernel/bpf/local_storage.c storage->buf = kmalloc_node(size, flags, map->numa_node); map 506 kernel/bpf/local_storage.c check_and_init_map_lock(map, storage->buf->data); map 513 kernel/bpf/local_storage.c storage->map = (struct bpf_cgroup_storage_map *)map; map 518 kernel/bpf/local_storage.c bpf_map_uncharge_memlock(map, pages); map 544 kernel/bpf/local_storage.c struct bpf_map *map; map 550 kernel/bpf/local_storage.c map = &storage->map->map; map 552 kernel/bpf/local_storage.c bpf_cgroup_storage_calculate_size(map, &pages); map 553 kernel/bpf/local_storage.c bpf_map_uncharge_memlock(map, pages); map 555 kernel/bpf/local_storage.c stype = cgroup_storage_type(map); map 566 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map; map 574 kernel/bpf/local_storage.c map = storage->map; map 576 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 577 kernel/bpf/local_storage.c WARN_ON(cgroup_storage_insert(map, storage)); map 578 kernel/bpf/local_storage.c list_add(&storage->list, &map->list); map 579 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 584 kernel/bpf/local_storage.c struct bpf_cgroup_storage_map *map; map 590 kernel/bpf/local_storage.c map = storage->map; map 592 kernel/bpf/local_storage.c spin_lock_bh(&map->lock); map 593 kernel/bpf/local_storage.c root = &map->root; map 597 kernel/bpf/local_storage.c spin_unlock_bh(&map->lock); map 32 kernel/bpf/lpm_trie.c struct bpf_map map; map 227 kernel/bpf/lpm_trie.c static void *trie_lookup_elem(struct bpf_map *map, void *_key) map 229 kernel/bpf/lpm_trie.c struct lpm_trie *trie = container_of(map, struct lpm_trie, map); map 283 kernel/bpf/lpm_trie.c size += trie->map.value_size; map 286 kernel/bpf/lpm_trie.c trie->map.numa_node); map 294 kernel/bpf/lpm_trie.c trie->map.value_size); map 300 kernel/bpf/lpm_trie.c static int trie_update_elem(struct bpf_map *map, map 303 kernel/bpf/lpm_trie.c struct lpm_trie *trie = container_of(map, struct lpm_trie, map); map 322 kernel/bpf/lpm_trie.c if (trie->n_entries == trie->map.max_entries) { map 431 kernel/bpf/lpm_trie.c static int trie_delete_elem(struct bpf_map *map, void *_key) map 433 kernel/bpf/lpm_trie.c struct lpm_trie *trie = container_of(map, struct lpm_trie, map); map 565 kernel/bpf/lpm_trie.c bpf_map_init_from_attr(&trie->map, attr); map 574 kernel/bpf/lpm_trie.c ret = bpf_map_charge_init(&trie->map.memory, cost); map 580 kernel/bpf/lpm_trie.c return &trie->map; map 586 kernel/bpf/lpm_trie.c static void trie_free(struct bpf_map *map) map 588 kernel/bpf/lpm_trie.c struct lpm_trie *trie = container_of(map, struct lpm_trie, map); map 630 kernel/bpf/lpm_trie.c static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) map 633 kernel/bpf/lpm_trie.c struct lpm_trie *trie = container_of(map, struct lpm_trie, map); map 728 kernel/bpf/lpm_trie.c static int trie_check_btf(const struct bpf_map *map, map 64 kernel/bpf/map_in_map.c container_of(inner_map_meta, struct bpf_array, map)->index_mask = map 65 kernel/bpf/map_in_map.c container_of(inner_map, struct bpf_array, map)->index_mask; map 88 kernel/bpf/map_in_map.c void *bpf_map_fd_get_ptr(struct bpf_map *map, map 100 kernel/bpf/map_in_map.c if (bpf_map_meta_equal(map->inner_map_meta, inner_map)) map 16 kernel/bpf/map_in_map.h void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file, map 379 kernel/bpf/offload.c bpf_map_init_from_attr(&offmap->map, attr); map 402 kernel/bpf/offload.c return &offmap->map; map 415 kernel/bpf/offload.c bpf_map_free_id(&offmap->map, true); map 420 kernel/bpf/offload.c void bpf_map_offload_map_free(struct bpf_map *map) map 422 kernel/bpf/offload.c struct bpf_offloaded_map *offmap = map_to_offmap(map); map 434 kernel/bpf/offload.c int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value) map 436 kernel/bpf/offload.c struct bpf_offloaded_map *offmap = map_to_offmap(map); map 447 kernel/bpf/offload.c int bpf_map_offload_update_elem(struct bpf_map *map, map 450 kernel/bpf/offload.c struct bpf_offloaded_map *offmap = map_to_offmap(map); map 465 kernel/bpf/offload.c int bpf_map_offload_delete_elem(struct bpf_map *map, void *key) map 467 kernel/bpf/offload.c struct bpf_offloaded_map *offmap = map_to_offmap(map); map 478 kernel/bpf/offload.c int bpf_map_offload_get_next_key(struct bpf_map *map, void *key, void *next_key) map 480 kernel/bpf/offload.c struct bpf_offloaded_map *offmap = map_to_offmap(map); map 521 kernel/bpf/offload.c int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map) map 524 kernel/bpf/offload.c .offmap = map_to_offmap(map), map 579 kernel/bpf/offload.c bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map) map 584 kernel/bpf/offload.c if (!bpf_map_is_dev_bound(map)) map 585 kernel/bpf/offload.c return bpf_map_offload_neutral(map); map 586 kernel/bpf/offload.c offmap = map_to_offmap(map); map 17 kernel/bpf/queue_stack_maps.c struct bpf_map map; map 25 kernel/bpf/queue_stack_maps.c static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map) map 27 kernel/bpf/queue_stack_maps.c return container_of(map, struct bpf_queue_stack, map); map 89 kernel/bpf/queue_stack_maps.c bpf_map_init_from_attr(&qs->map, attr); map 91 kernel/bpf/queue_stack_maps.c bpf_map_charge_move(&qs->map.memory, &mem); map 96 kernel/bpf/queue_stack_maps.c return &qs->map; map 100 kernel/bpf/queue_stack_maps.c static void queue_stack_map_free(struct bpf_map *map) map 102 kernel/bpf/queue_stack_maps.c struct bpf_queue_stack *qs = bpf_queue_stack(map); map 114 kernel/bpf/queue_stack_maps.c static int __queue_map_get(struct bpf_map *map, void *value, bool delete) map 116 kernel/bpf/queue_stack_maps.c struct bpf_queue_stack *qs = bpf_queue_stack(map); map 124 kernel/bpf/queue_stack_maps.c memset(value, 0, qs->map.value_size); map 129 kernel/bpf/queue_stack_maps.c ptr = &qs->elements[qs->tail * qs->map.value_size]; map 130 kernel/bpf/queue_stack_maps.c memcpy(value, ptr, qs->map.value_size); map 143 kernel/bpf/queue_stack_maps.c static int __stack_map_get(struct bpf_map *map, void *value, bool delete) map 145 kernel/bpf/queue_stack_maps.c struct bpf_queue_stack *qs = bpf_queue_stack(map); map 154 kernel/bpf/queue_stack_maps.c memset(value, 0, qs->map.value_size); map 163 kernel/bpf/queue_stack_maps.c ptr = &qs->elements[index * qs->map.value_size]; map 164 kernel/bpf/queue_stack_maps.c memcpy(value, ptr, qs->map.value_size); map 175 kernel/bpf/queue_stack_maps.c static int queue_map_peek_elem(struct bpf_map *map, void *value) map 177 kernel/bpf/queue_stack_maps.c return __queue_map_get(map, value, false); map 181 kernel/bpf/queue_stack_maps.c static int stack_map_peek_elem(struct bpf_map *map, void *value) map 183 kernel/bpf/queue_stack_maps.c return __stack_map_get(map, value, false); map 187 kernel/bpf/queue_stack_maps.c static int queue_map_pop_elem(struct bpf_map *map, void *value) map 189 kernel/bpf/queue_stack_maps.c return __queue_map_get(map, value, true); map 193 kernel/bpf/queue_stack_maps.c static int stack_map_pop_elem(struct bpf_map *map, void *value) map 195 kernel/bpf/queue_stack_maps.c return __stack_map_get(map, value, true); map 199 kernel/bpf/queue_stack_maps.c static int queue_stack_map_push_elem(struct bpf_map *map, void *value, map 202 kernel/bpf/queue_stack_maps.c struct bpf_queue_stack *qs = bpf_queue_stack(map); map 228 kernel/bpf/queue_stack_maps.c dst = &qs->elements[qs->head * qs->map.value_size]; map 229 kernel/bpf/queue_stack_maps.c memcpy(dst, value, qs->map.value_size); map 240 kernel/bpf/queue_stack_maps.c static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key) map 246 kernel/bpf/queue_stack_maps.c static int queue_stack_map_update_elem(struct bpf_map *map, void *key, map 253 kernel/bpf/queue_stack_maps.c static int queue_stack_map_delete_elem(struct bpf_map *map, void *key) map 259 kernel/bpf/queue_stack_maps.c static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, map 11 kernel/bpf/reuseport_array.c struct bpf_map map; map 15 kernel/bpf/reuseport_array.c static struct reuseport_array *reuseport_array(struct bpf_map *map) map 17 kernel/bpf/reuseport_array.c return (struct reuseport_array *)map; map 49 kernel/bpf/reuseport_array.c static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key) map 51 kernel/bpf/reuseport_array.c struct reuseport_array *array = reuseport_array(map); map 54 kernel/bpf/reuseport_array.c if (unlikely(index >= array->map.max_entries)) map 61 kernel/bpf/reuseport_array.c static int reuseport_array_delete_elem(struct bpf_map *map, void *key) map 63 kernel/bpf/reuseport_array.c struct reuseport_array *array = reuseport_array(map); map 68 kernel/bpf/reuseport_array.c if (index >= map->max_entries) map 93 kernel/bpf/reuseport_array.c static void reuseport_array_free(struct bpf_map *map) map 95 kernel/bpf/reuseport_array.c struct reuseport_array *array = reuseport_array(map); map 127 kernel/bpf/reuseport_array.c for (i = 0; i < map->max_entries; i++) { map 175 kernel/bpf/reuseport_array.c bpf_map_init_from_attr(&array->map, attr); map 176 kernel/bpf/reuseport_array.c bpf_map_charge_move(&array->map.memory, &mem); map 178 kernel/bpf/reuseport_array.c return &array->map; map 181 kernel/bpf/reuseport_array.c int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, map 187 kernel/bpf/reuseport_array.c if (map->value_size != sizeof(u64)) map 191 kernel/bpf/reuseport_array.c sk = reuseport_array_lookup_elem(map, key); map 248 kernel/bpf/reuseport_array.c int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, map 251 kernel/bpf/reuseport_array.c struct reuseport_array *array = reuseport_array(map); map 261 kernel/bpf/reuseport_array.c if (index >= map->max_entries) map 264 kernel/bpf/reuseport_array.c if (map->value_size == sizeof(u64)) { map 334 kernel/bpf/reuseport_array.c static int reuseport_array_get_next_key(struct bpf_map *map, void *key, map 337 kernel/bpf/reuseport_array.c struct reuseport_array *array = reuseport_array(map); map 341 kernel/bpf/reuseport_array.c if (index >= array->map.max_entries) { map 346 kernel/bpf/reuseport_array.c if (index == array->map.max_entries - 1) map 26 kernel/bpf/stackmap.c struct bpf_map map; map 50 kernel/bpf/stackmap.c static inline bool stack_map_use_build_id(struct bpf_map *map) map 52 kernel/bpf/stackmap.c return (map->map_flags & BPF_F_STACK_BUILD_ID); map 55 kernel/bpf/stackmap.c static inline int stack_map_data_size(struct bpf_map *map) map 57 kernel/bpf/stackmap.c return stack_map_use_build_id(map) ? map 63 kernel/bpf/stackmap.c u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; map 66 kernel/bpf/stackmap.c smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, map 67 kernel/bpf/stackmap.c smap->map.numa_node); map 76 kernel/bpf/stackmap.c smap->map.max_entries); map 128 kernel/bpf/stackmap.c bpf_map_init_from_attr(&smap->map, attr); map 129 kernel/bpf/stackmap.c smap->map.value_size = value_size; map 140 kernel/bpf/stackmap.c bpf_map_charge_move(&smap->map.memory, &mem); map 142 kernel/bpf/stackmap.c return &smap->map; map 346 kernel/bpf/stackmap.c BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, map 349 kernel/bpf/stackmap.c struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); map 352 kernel/bpf/stackmap.c u32 max_depth = map->value_size / stack_map_data_size(map); map 394 kernel/bpf/stackmap.c if (stack_map_use_build_id(map)) { map 514 kernel/bpf/stackmap.c static void *stack_map_lookup_elem(struct bpf_map *map, void *key) map 520 kernel/bpf/stackmap.c int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) map 522 kernel/bpf/stackmap.c struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); map 533 kernel/bpf/stackmap.c trace_len = bucket->nr * stack_map_data_size(map); map 535 kernel/bpf/stackmap.c memset(value + trace_len, 0, map->value_size - trace_len); map 543 kernel/bpf/stackmap.c static int stack_map_get_next_key(struct bpf_map *map, void *key, map 546 kernel/bpf/stackmap.c struct bpf_stack_map *smap = container_of(map, map 547 kernel/bpf/stackmap.c struct bpf_stack_map, map); map 572 kernel/bpf/stackmap.c static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, map 579 kernel/bpf/stackmap.c static int stack_map_delete_elem(struct bpf_map *map, void *key) map 581 kernel/bpf/stackmap.c struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); map 598 kernel/bpf/stackmap.c static void stack_map_free(struct bpf_map *map) map 600 kernel/bpf/stackmap.c struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); map 27 kernel/bpf/syscall.c #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ map 28 kernel/bpf/syscall.c (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ map 29 kernel/bpf/syscall.c (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ map 30 kernel/bpf/syscall.c (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) map 31 kernel/bpf/syscall.c #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) map 32 kernel/bpf/syscall.c #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) map 104 kernel/bpf/syscall.c struct bpf_map *map; map 121 kernel/bpf/syscall.c map = ops->map_alloc(attr); map 122 kernel/bpf/syscall.c if (IS_ERR(map)) map 123 kernel/bpf/syscall.c return map; map 124 kernel/bpf/syscall.c map->ops = ops; map 125 kernel/bpf/syscall.c map->map_type = type; map 126 kernel/bpf/syscall.c return map; map 176 kernel/bpf/syscall.c void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) map 178 kernel/bpf/syscall.c map->map_type = attr->map_type; map 179 kernel/bpf/syscall.c map->key_size = attr->key_size; map 180 kernel/bpf/syscall.c map->value_size = attr->value_size; map 181 kernel/bpf/syscall.c map->max_entries = attr->max_entries; map 182 kernel/bpf/syscall.c map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); map 183 kernel/bpf/syscall.c map->numa_node = bpf_map_attr_numa_node(attr); map 240 kernel/bpf/syscall.c int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) map 244 kernel/bpf/syscall.c ret = bpf_charge_memlock(map->memory.user, pages); map 247 kernel/bpf/syscall.c map->memory.pages += pages; map 251 kernel/bpf/syscall.c void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) map 253 kernel/bpf/syscall.c bpf_uncharge_memlock(map->memory.user, pages); map 254 kernel/bpf/syscall.c map->memory.pages -= pages; map 257 kernel/bpf/syscall.c static int bpf_map_alloc_id(struct bpf_map *map) map 263 kernel/bpf/syscall.c id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); map 265 kernel/bpf/syscall.c map->id = id; map 275 kernel/bpf/syscall.c void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) map 284 kernel/bpf/syscall.c if (!map->id) map 292 kernel/bpf/syscall.c idr_remove(&map_idr, map->id); map 293 kernel/bpf/syscall.c map->id = 0; map 304 kernel/bpf/syscall.c struct bpf_map *map = container_of(work, struct bpf_map, work); map 307 kernel/bpf/syscall.c bpf_map_charge_move(&mem, &map->memory); map 308 kernel/bpf/syscall.c security_bpf_map_free(map); map 310 kernel/bpf/syscall.c map->ops->map_free(map); map 314 kernel/bpf/syscall.c static void bpf_map_put_uref(struct bpf_map *map) map 316 kernel/bpf/syscall.c if (atomic_dec_and_test(&map->usercnt)) { map 317 kernel/bpf/syscall.c if (map->ops->map_release_uref) map 318 kernel/bpf/syscall.c map->ops->map_release_uref(map); map 325 kernel/bpf/syscall.c static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) map 327 kernel/bpf/syscall.c if (atomic_dec_and_test(&map->refcnt)) { map 329 kernel/bpf/syscall.c bpf_map_free_id(map, do_idr_lock); map 330 kernel/bpf/syscall.c btf_put(map->btf); map 331 kernel/bpf/syscall.c INIT_WORK(&map->work, bpf_map_free_deferred); map 332 kernel/bpf/syscall.c schedule_work(&map->work); map 336 kernel/bpf/syscall.c void bpf_map_put(struct bpf_map *map) map 338 kernel/bpf/syscall.c __bpf_map_put(map, true); map 342 kernel/bpf/syscall.c void bpf_map_put_with_uref(struct bpf_map *map) map 344 kernel/bpf/syscall.c bpf_map_put_uref(map); map 345 kernel/bpf/syscall.c bpf_map_put(map); map 350 kernel/bpf/syscall.c struct bpf_map *map = filp->private_data; map 352 kernel/bpf/syscall.c if (map->ops->map_release) map 353 kernel/bpf/syscall.c map->ops->map_release(map, filp); map 355 kernel/bpf/syscall.c bpf_map_put_with_uref(map); map 359 kernel/bpf/syscall.c static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) map 366 kernel/bpf/syscall.c if (READ_ONCE(map->frozen)) map 374 kernel/bpf/syscall.c const struct bpf_map *map = filp->private_data; map 379 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { map 380 kernel/bpf/syscall.c array = container_of(map, struct bpf_array, map); map 394 kernel/bpf/syscall.c map->map_type, map 395 kernel/bpf/syscall.c map->key_size, map 396 kernel/bpf/syscall.c map->value_size, map 397 kernel/bpf/syscall.c map->max_entries, map 398 kernel/bpf/syscall.c map->map_flags, map 399 kernel/bpf/syscall.c map->memory.pages * 1ULL << PAGE_SHIFT, map 400 kernel/bpf/syscall.c map->id, map 401 kernel/bpf/syscall.c READ_ONCE(map->frozen)); map 439 kernel/bpf/syscall.c int bpf_map_new_fd(struct bpf_map *map, int flags) map 443 kernel/bpf/syscall.c ret = security_bpf_map(map, OPEN_FMODE(flags)); map 447 kernel/bpf/syscall.c return anon_inode_getfd("bpf-map", &bpf_map_fops, map, map 493 kernel/bpf/syscall.c int map_check_no_btf(const struct bpf_map *map, map 501 kernel/bpf/syscall.c static int map_check_btf(struct bpf_map *map, const struct btf *btf, map 511 kernel/bpf/syscall.c if (!key_type || key_size != map->key_size) map 515 kernel/bpf/syscall.c if (!map->ops->map_check_btf) map 520 kernel/bpf/syscall.c if (!value_type || value_size != map->value_size) map 523 kernel/bpf/syscall.c map->spin_lock_off = btf_find_spin_lock(btf, value_type); map 525 kernel/bpf/syscall.c if (map_value_has_spin_lock(map)) { map 526 kernel/bpf/syscall.c if (map->map_flags & BPF_F_RDONLY_PROG) map 528 kernel/bpf/syscall.c if (map->map_type != BPF_MAP_TYPE_HASH && map 529 kernel/bpf/syscall.c map->map_type != BPF_MAP_TYPE_ARRAY && map 530 kernel/bpf/syscall.c map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map 531 kernel/bpf/syscall.c map->map_type != BPF_MAP_TYPE_SK_STORAGE) map 533 kernel/bpf/syscall.c if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > map 534 kernel/bpf/syscall.c map->value_size) { map 537 kernel/bpf/syscall.c map->spin_lock_off, map->value_size); map 542 kernel/bpf/syscall.c if (map->ops->map_check_btf) map 543 kernel/bpf/syscall.c ret = map->ops->map_check_btf(map, btf, key_type, value_type); map 554 kernel/bpf/syscall.c struct bpf_map *map; map 572 kernel/bpf/syscall.c map = find_and_alloc_map(attr); map 573 kernel/bpf/syscall.c if (IS_ERR(map)) map 574 kernel/bpf/syscall.c return PTR_ERR(map); map 576 kernel/bpf/syscall.c err = bpf_obj_name_cpy(map->name, attr->map_name); map 580 kernel/bpf/syscall.c atomic_set(&map->refcnt, 1); map 581 kernel/bpf/syscall.c atomic_set(&map->usercnt, 1); map 597 kernel/bpf/syscall.c err = map_check_btf(map, btf, attr->btf_key_type_id, map 604 kernel/bpf/syscall.c map->btf = btf; map 605 kernel/bpf/syscall.c map->btf_key_type_id = attr->btf_key_type_id; map 606 kernel/bpf/syscall.c map->btf_value_type_id = attr->btf_value_type_id; map 608 kernel/bpf/syscall.c map->spin_lock_off = -EINVAL; map 611 kernel/bpf/syscall.c err = security_bpf_map_alloc(map); map 615 kernel/bpf/syscall.c err = bpf_map_alloc_id(map); map 619 kernel/bpf/syscall.c err = bpf_map_new_fd(map, f_flags); map 627 kernel/bpf/syscall.c bpf_map_put_with_uref(map); map 634 kernel/bpf/syscall.c security_bpf_map_free(map); map 636 kernel/bpf/syscall.c btf_put(map->btf); map 637 kernel/bpf/syscall.c bpf_map_charge_move(&mem, &map->memory); map 638 kernel/bpf/syscall.c map->ops->map_free(map); map 661 kernel/bpf/syscall.c struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref) map 663 kernel/bpf/syscall.c if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) { map 664 kernel/bpf/syscall.c atomic_dec(&map->refcnt); map 668 kernel/bpf/syscall.c atomic_inc(&map->usercnt); map 669 kernel/bpf/syscall.c return map; map 676 kernel/bpf/syscall.c struct bpf_map *map; map 678 kernel/bpf/syscall.c map = __bpf_map_get(f); map 679 kernel/bpf/syscall.c if (IS_ERR(map)) map 680 kernel/bpf/syscall.c return map; map 682 kernel/bpf/syscall.c map = bpf_map_inc(map, true); map 685 kernel/bpf/syscall.c return map; map 689 kernel/bpf/syscall.c static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, map 694 kernel/bpf/syscall.c refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); map 697 kernel/bpf/syscall.c __bpf_map_put(map, false); map 705 kernel/bpf/syscall.c atomic_inc(&map->usercnt); map 707 kernel/bpf/syscall.c return map; map 710 kernel/bpf/syscall.c struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) map 713 kernel/bpf/syscall.c map = __bpf_map_inc_not_zero(map, uref); map 716 kernel/bpf/syscall.c return map; map 720 kernel/bpf/syscall.c int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) map 744 kernel/bpf/syscall.c struct bpf_map *map; map 757 kernel/bpf/syscall.c map = __bpf_map_get(f); map 758 kernel/bpf/syscall.c if (IS_ERR(map)) map 759 kernel/bpf/syscall.c return PTR_ERR(map); map 760 kernel/bpf/syscall.c if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { map 766 kernel/bpf/syscall.c !map_value_has_spin_lock(map)) { map 771 kernel/bpf/syscall.c key = __bpf_copy_key(ukey, map->key_size); map 777 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map 778 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || map 779 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || map 780 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) map 781 kernel/bpf/syscall.c value_size = round_up(map->value_size, 8) * num_possible_cpus(); map 782 kernel/bpf/syscall.c else if (IS_FD_MAP(map)) map 785 kernel/bpf/syscall.c value_size = map->value_size; map 792 kernel/bpf/syscall.c if (bpf_map_is_dev_bound(map)) { map 793 kernel/bpf/syscall.c err = bpf_map_offload_lookup_elem(map, key, value); map 799 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map 800 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { map 801 kernel/bpf/syscall.c err = bpf_percpu_hash_copy(map, key, value); map 802 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { map 803 kernel/bpf/syscall.c err = bpf_percpu_array_copy(map, key, value); map 804 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { map 805 kernel/bpf/syscall.c err = bpf_percpu_cgroup_storage_copy(map, key, value); map 806 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { map 807 kernel/bpf/syscall.c err = bpf_stackmap_copy(map, key, value); map 808 kernel/bpf/syscall.c } else if (IS_FD_ARRAY(map)) { map 809 kernel/bpf/syscall.c err = bpf_fd_array_map_lookup_elem(map, key, value); map 810 kernel/bpf/syscall.c } else if (IS_FD_HASH(map)) { map 811 kernel/bpf/syscall.c err = bpf_fd_htab_map_lookup_elem(map, key, value); map 812 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { map 813 kernel/bpf/syscall.c err = bpf_fd_reuseport_array_lookup_elem(map, key, value); map 814 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_QUEUE || map 815 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_STACK) { map 816 kernel/bpf/syscall.c err = map->ops->map_peek_elem(map, value); map 819 kernel/bpf/syscall.c if (map->ops->map_lookup_elem_sys_only) map 820 kernel/bpf/syscall.c ptr = map->ops->map_lookup_elem_sys_only(map, key); map 822 kernel/bpf/syscall.c ptr = map->ops->map_lookup_elem(map, key); map 831 kernel/bpf/syscall.c copy_map_value_locked(map, value, ptr, true); map 833 kernel/bpf/syscall.c copy_map_value(map, value, ptr); map 835 kernel/bpf/syscall.c check_and_init_map_lock(map, value); map 861 kernel/bpf/syscall.c static void maybe_wait_bpf_programs(struct bpf_map *map) map 867 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || map 868 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) map 879 kernel/bpf/syscall.c struct bpf_map *map; map 889 kernel/bpf/syscall.c map = __bpf_map_get(f); map 890 kernel/bpf/syscall.c if (IS_ERR(map)) map 891 kernel/bpf/syscall.c return PTR_ERR(map); map 892 kernel/bpf/syscall.c if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { map 898 kernel/bpf/syscall.c !map_value_has_spin_lock(map)) { map 903 kernel/bpf/syscall.c key = __bpf_copy_key(ukey, map->key_size); map 909 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map 910 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || map 911 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY || map 912 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) map 913 kernel/bpf/syscall.c value_size = round_up(map->value_size, 8) * num_possible_cpus(); map 915 kernel/bpf/syscall.c value_size = map->value_size; map 927 kernel/bpf/syscall.c if (bpf_map_is_dev_bound(map)) { map 928 kernel/bpf/syscall.c err = bpf_map_offload_update_elem(map, key, value, attr->flags); map 930 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_CPUMAP || map 931 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_SOCKHASH || map 932 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_SOCKMAP) { map 933 kernel/bpf/syscall.c err = map->ops->map_update_elem(map, key, value, attr->flags); map 942 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || map 943 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { map 944 kernel/bpf/syscall.c err = bpf_percpu_hash_update(map, key, value, attr->flags); map 945 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { map 946 kernel/bpf/syscall.c err = bpf_percpu_array_update(map, key, value, attr->flags); map 947 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { map 948 kernel/bpf/syscall.c err = bpf_percpu_cgroup_storage_update(map, key, value, map 950 kernel/bpf/syscall.c } else if (IS_FD_ARRAY(map)) { map 952 kernel/bpf/syscall.c err = bpf_fd_array_map_update_elem(map, f.file, key, value, map 955 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { map 957 kernel/bpf/syscall.c err = bpf_fd_htab_map_update_elem(map, f.file, key, value, map 960 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) { map 962 kernel/bpf/syscall.c err = bpf_fd_reuseport_array_update_elem(map, key, value, map 964 kernel/bpf/syscall.c } else if (map->map_type == BPF_MAP_TYPE_QUEUE || map 965 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_STACK) { map 966 kernel/bpf/syscall.c err = map->ops->map_push_elem(map, value, attr->flags); map 969 kernel/bpf/syscall.c err = map->ops->map_update_elem(map, key, value, attr->flags); map 974 kernel/bpf/syscall.c maybe_wait_bpf_programs(map); map 991 kernel/bpf/syscall.c struct bpf_map *map; map 1000 kernel/bpf/syscall.c map = __bpf_map_get(f); map 1001 kernel/bpf/syscall.c if (IS_ERR(map)) map 1002 kernel/bpf/syscall.c return PTR_ERR(map); map 1003 kernel/bpf/syscall.c if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { map 1008 kernel/bpf/syscall.c key = __bpf_copy_key(ukey, map->key_size); map 1014 kernel/bpf/syscall.c if (bpf_map_is_dev_bound(map)) { map 1015 kernel/bpf/syscall.c err = bpf_map_offload_delete_elem(map, key); map 1022 kernel/bpf/syscall.c err = map->ops->map_delete_elem(map, key); map 1026 kernel/bpf/syscall.c maybe_wait_bpf_programs(map); map 1042 kernel/bpf/syscall.c struct bpf_map *map; map 1051 kernel/bpf/syscall.c map = __bpf_map_get(f); map 1052 kernel/bpf/syscall.c if (IS_ERR(map)) map 1053 kernel/bpf/syscall.c return PTR_ERR(map); map 1054 kernel/bpf/syscall.c if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { map 1060 kernel/bpf/syscall.c key = __bpf_copy_key(ukey, map->key_size); map 1070 kernel/bpf/syscall.c next_key = kmalloc(map->key_size, GFP_USER); map 1074 kernel/bpf/syscall.c if (bpf_map_is_dev_bound(map)) { map 1075 kernel/bpf/syscall.c err = bpf_map_offload_get_next_key(map, key, next_key); map 1080 kernel/bpf/syscall.c err = map->ops->map_get_next_key(map, key, next_key); map 1087 kernel/bpf/syscall.c if (copy_to_user(unext_key, next_key, map->key_size) != 0) map 1108 kernel/bpf/syscall.c struct bpf_map *map; map 1118 kernel/bpf/syscall.c map = __bpf_map_get(f); map 1119 kernel/bpf/syscall.c if (IS_ERR(map)) map 1120 kernel/bpf/syscall.c return PTR_ERR(map); map 1121 kernel/bpf/syscall.c if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { map 1126 kernel/bpf/syscall.c key = __bpf_copy_key(ukey, map->key_size); map 1132 kernel/bpf/syscall.c value_size = map->value_size; map 1139 kernel/bpf/syscall.c if (map->map_type == BPF_MAP_TYPE_QUEUE || map 1140 kernel/bpf/syscall.c map->map_type == BPF_MAP_TYPE_STACK) { map 1141 kernel/bpf/syscall.c err = map->ops->map_pop_elem(map, value); map 1170 kernel/bpf/syscall.c struct bpf_map *map; map 1177 kernel/bpf/syscall.c map = __bpf_map_get(f); map 1178 kernel/bpf/syscall.c if (IS_ERR(map)) map 1179 kernel/bpf/syscall.c return PTR_ERR(map); map 1180 kernel/bpf/syscall.c if (READ_ONCE(map->frozen)) { map 1189 kernel/bpf/syscall.c WRITE_ONCE(map->frozen, true); map 2192 kernel/bpf/syscall.c struct bpf_map *map; map 2209 kernel/bpf/syscall.c map = idr_find(&map_idr, id); map 2210 kernel/bpf/syscall.c if (map) map 2211 kernel/bpf/syscall.c map = __bpf_map_inc_not_zero(map, true); map 2213 kernel/bpf/syscall.c map = ERR_PTR(-ENOENT); map 2216 kernel/bpf/syscall.c if (IS_ERR(map)) map 2217 kernel/bpf/syscall.c return PTR_ERR(map); map 2219 kernel/bpf/syscall.c fd = bpf_map_new_fd(map, f_flags); map 2221 kernel/bpf/syscall.c bpf_map_put_with_uref(map); map 2230 kernel/bpf/syscall.c const struct bpf_map *map; map 2234 kernel/bpf/syscall.c map = prog->aux->used_maps[i]; map 2235 kernel/bpf/syscall.c if (map == (void *)addr) { map 2237 kernel/bpf/syscall.c return map; map 2239 kernel/bpf/syscall.c if (!map->ops->map_direct_value_meta) map 2241 kernel/bpf/syscall.c if (!map->ops->map_direct_value_meta(map, addr, off)) { map 2243 kernel/bpf/syscall.c return map; map 2252 kernel/bpf/syscall.c const struct bpf_map *map; map 2282 kernel/bpf/syscall.c map = bpf_map_from_imm(prog, imm, &off, &type); map 2283 kernel/bpf/syscall.c if (map) { map 2285 kernel/bpf/syscall.c insns[i].imm = map->id; map 2601 kernel/bpf/syscall.c static int bpf_map_get_info_by_fd(struct bpf_map *map, map 2616 kernel/bpf/syscall.c info.type = map->map_type; map 2617 kernel/bpf/syscall.c info.id = map->id; map 2618 kernel/bpf/syscall.c info.key_size = map->key_size; map 2619 kernel/bpf/syscall.c info.value_size = map->value_size; map 2620 kernel/bpf/syscall.c info.max_entries = map->max_entries; map 2621 kernel/bpf/syscall.c info.map_flags = map->map_flags; map 2622 kernel/bpf/syscall.c memcpy(info.name, map->name, sizeof(map->name)); map 2624 kernel/bpf/syscall.c if (map->btf) { map 2625 kernel/bpf/syscall.c info.btf_id = btf_id(map->btf); map 2626 kernel/bpf/syscall.c info.btf_key_type_id = map->btf_key_type_id; map 2627 kernel/bpf/syscall.c info.btf_value_type_id = map->btf_value_type_id; map 2630 kernel/bpf/syscall.c if (bpf_map_is_dev_bound(map)) { map 2631 kernel/bpf/syscall.c err = bpf_map_offload_info_fill(&info, map); map 190 kernel/bpf/verifier.c const struct bpf_map *map, bool unpriv) map 194 kernel/bpf/verifier.c aux->map_state = (unsigned long)map | map 2151 kernel/bpf/verifier.c struct bpf_map *map = regs[regno].map_ptr; map 2152 kernel/bpf/verifier.c u32 cap = bpf_map_flags_to_cap(map); map 2156 kernel/bpf/verifier.c map->value_size, off, size); map 2162 kernel/bpf/verifier.c map->value_size, off, size); map 2174 kernel/bpf/verifier.c struct bpf_map *map = regs[regno].map_ptr; map 2177 kernel/bpf/verifier.c off + size > map->value_size) { map 2179 kernel/bpf/verifier.c map->value_size, off, size); map 3146 kernel/bpf/verifier.c struct bpf_map *map = reg->map_ptr; map 3159 kernel/bpf/verifier.c if (!map->btf) { map 3162 kernel/bpf/verifier.c map->name); map 3165 kernel/bpf/verifier.c if (!map_value_has_spin_lock(map)) { map 3166 kernel/bpf/verifier.c if (map->spin_lock_off == -E2BIG) map 3169 kernel/bpf/verifier.c map->name); map 3170 kernel/bpf/verifier.c else if (map->spin_lock_off == -ENOENT) map 3173 kernel/bpf/verifier.c map->name); map 3177 kernel/bpf/verifier.c map->name); map 3180 kernel/bpf/verifier.c if (map->spin_lock_off != val + reg->off) { map 3443 kernel/bpf/verifier.c struct bpf_map *map, int func_id) map 3445 kernel/bpf/verifier.c if (!map) map 3449 kernel/bpf/verifier.c switch (map->map_type) { map 3534 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) map 3544 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) map 3548 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) map 3553 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) map 3557 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_DEVMAP && map 3558 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && map 3559 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_CPUMAP && map 3560 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_XSKMAP) map 3566 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_SOCKMAP) map 3572 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_SOCKHASH) map 3576 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && map 3577 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) map 3581 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) map 3587 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_QUEUE && map 3588 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_STACK) map 3593 kernel/bpf/verifier.c if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) map 3603 kernel/bpf/verifier.c map->map_type, func_id_name(func_id), func_id); map 3922 kernel/bpf/verifier.c struct bpf_map *map = meta->map_ptr; map 3933 kernel/bpf/verifier.c if (map == NULL) { map 3942 kernel/bpf/verifier.c if ((map->map_flags & BPF_F_RDONLY_PROG) && map 6026 kernel/bpf/verifier.c struct bpf_map *map; map 6050 kernel/bpf/verifier.c map = env->used_maps[aux->map_index]; map 6052 kernel/bpf/verifier.c regs[insn->dst_reg].map_ptr = map; map 6057 kernel/bpf/verifier.c if (map_value_has_spin_lock(map)) map 7906 kernel/bpf/verifier.c static int check_map_prealloc(struct bpf_map *map) map 7908 kernel/bpf/verifier.c return (map->map_type != BPF_MAP_TYPE_HASH && map 7909 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_PERCPU_HASH && map 7910 kernel/bpf/verifier.c map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || map 7911 kernel/bpf/verifier.c !(map->map_flags & BPF_F_NO_PREALLOC); map 7928 kernel/bpf/verifier.c struct bpf_map *map, map 7938 kernel/bpf/verifier.c if (!check_map_prealloc(map)) { map 7942 kernel/bpf/verifier.c if (map->inner_map_meta && map 7943 kernel/bpf/verifier.c !check_map_prealloc(map->inner_map_meta)) { map 7951 kernel/bpf/verifier.c map_value_has_spin_lock(map)) { map 7956 kernel/bpf/verifier.c if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) && map 7957 kernel/bpf/verifier.c !bpf_offload_prog_map_match(prog, map)) { map 7965 kernel/bpf/verifier.c static bool bpf_map_is_cgroup_storage(struct bpf_map *map) map 7967 kernel/bpf/verifier.c return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || map 7968 kernel/bpf/verifier.c map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); map 8000 kernel/bpf/verifier.c struct bpf_map *map; map 8028 kernel/bpf/verifier.c map = __bpf_map_get(f); map 8029 kernel/bpf/verifier.c if (IS_ERR(map)) { map 8032 kernel/bpf/verifier.c return PTR_ERR(map); map 8035 kernel/bpf/verifier.c err = check_map_prog_compatibility(env, map, env->prog); map 8043 kernel/bpf/verifier.c addr = (unsigned long)map; map 8053 kernel/bpf/verifier.c if (!map->ops->map_direct_value_addr) { map 8059 kernel/bpf/verifier.c err = map->ops->map_direct_value_addr(map, &addr, off); map 8062 kernel/bpf/verifier.c map->value_size, off); map 8076 kernel/bpf/verifier.c if (env->used_maps[j] == map) { map 8093 kernel/bpf/verifier.c map = bpf_map_inc(map, false); map 8094 kernel/bpf/verifier.c if (IS_ERR(map)) { map 8096 kernel/bpf/verifier.c return PTR_ERR(map); map 8100 kernel/bpf/verifier.c env->used_maps[env->used_map_cnt++] = map; map 8102 kernel/bpf/verifier.c if (bpf_map_is_cgroup_storage(map) && map 8103 kernel/bpf/verifier.c bpf_cgroup_storage_assign(env->prog, map)) { map 9128 kernel/bpf/verifier.c map)->index_mask); map 9178 kernel/bpf/verifier.c (void *(*)(struct bpf_map *map, void *key))NULL)); map 9180 kernel/bpf/verifier.c (int (*)(struct bpf_map *map, void *key))NULL)); map 9182 kernel/bpf/verifier.c (int (*)(struct bpf_map *map, void *key, void *value, map 9185 kernel/bpf/verifier.c (int (*)(struct bpf_map *map, void *value, map 9188 kernel/bpf/verifier.c (int (*)(struct bpf_map *map, void *value))NULL)); map 9190 kernel/bpf/verifier.c (int (*)(struct bpf_map *map, void *value))NULL)); map 13 kernel/bpf/xskmap.c struct bpf_map map; map 19 kernel/bpf/xskmap.c int xsk_map_inc(struct xsk_map *map) map 21 kernel/bpf/xskmap.c struct bpf_map *m = &map->map; map 27 kernel/bpf/xskmap.c void xsk_map_put(struct xsk_map *map) map 29 kernel/bpf/xskmap.c bpf_map_put(&map->map); map 32 kernel/bpf/xskmap.c static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, map 42 kernel/bpf/xskmap.c err = xsk_map_inc(map); map 48 kernel/bpf/xskmap.c node->map = map; map 55 kernel/bpf/xskmap.c xsk_map_put(node->map); map 99 kernel/bpf/xskmap.c bpf_map_init_from_attr(&m->map, attr); map 102 kernel/bpf/xskmap.c cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); map 106 kernel/bpf/xskmap.c err = bpf_map_charge_init(&m->map.memory, cost); map 119 kernel/bpf/xskmap.c m->xsk_map = bpf_map_area_alloc(m->map.max_entries * map 121 kernel/bpf/xskmap.c m->map.numa_node); map 124 kernel/bpf/xskmap.c return &m->map; map 129 kernel/bpf/xskmap.c bpf_map_charge_finish(&m->map.memory); map 135 kernel/bpf/xskmap.c static void xsk_map_free(struct bpf_map *map) map 137 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 139 kernel/bpf/xskmap.c bpf_clear_redirect_map(map); map 146 kernel/bpf/xskmap.c static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) map 148 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 152 kernel/bpf/xskmap.c if (index >= m->map.max_entries) { map 157 kernel/bpf/xskmap.c if (index == m->map.max_entries - 1) map 163 kernel/bpf/xskmap.c struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) map 165 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 168 kernel/bpf/xskmap.c if (key >= map->max_entries) map 175 kernel/bpf/xskmap.c int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, map 178 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 192 kernel/bpf/xskmap.c void __xsk_map_flush(struct bpf_map *map) map 194 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 204 kernel/bpf/xskmap.c static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) map 207 kernel/bpf/xskmap.c return __xsk_map_lookup_elem(map, *(u32 *)key); map 210 kernel/bpf/xskmap.c static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key) map 215 kernel/bpf/xskmap.c static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, map 218 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 227 kernel/bpf/xskmap.c if (unlikely(i >= m->map.max_entries)) map 280 kernel/bpf/xskmap.c static int xsk_map_delete_elem(struct bpf_map *map, void *key) map 282 kernel/bpf/xskmap.c struct xsk_map *m = container_of(map, struct xsk_map, map); map 286 kernel/bpf/xskmap.c if (k >= map->max_entries) map 299 kernel/bpf/xskmap.c void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, map 302 kernel/bpf/xskmap.c spin_lock_bh(&map->lock); map 307 kernel/bpf/xskmap.c spin_unlock_bh(&map->lock); map 448 kernel/irq/generic-chip.c .map = irq_map_generic_chip, map 550 kernel/irq/irqdomain.c if (domain->ops->map) { map 551 kernel/irq/irqdomain.c ret = domain->ops->map(domain, virq, hwirq); map 86 kernel/power/swap.c struct swap_map_page *map; map 952 kernel/power/swap.c if (handle->maps->map) map 953 kernel/power/swap.c free_page((unsigned long)handle->maps->map); map 988 kernel/power/swap.c tmp->map = (struct swap_map_page *) map 990 kernel/power/swap.c if (!tmp->map) { map 995 kernel/power/swap.c error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL); map 1000 kernel/power/swap.c offset = tmp->map->next_swap; map 1003 kernel/power/swap.c handle->cur = handle->maps->map; map 1024 kernel/power/swap.c free_page((unsigned long)handle->maps->map); map 1031 kernel/power/swap.c handle->cur = handle->maps->map; map 347 kernel/trace/bpf_trace.c get_map_perf_counter(struct bpf_map *map, u64 flags, map 350 kernel/trace/bpf_trace.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 359 kernel/trace/bpf_trace.c if (unlikely(index >= array->map.max_entries)) map 369 kernel/trace/bpf_trace.c BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags) map 374 kernel/trace/bpf_trace.c err = get_map_perf_counter(map, flags, &value, NULL, NULL); map 392 kernel/trace/bpf_trace.c BPF_CALL_4(bpf_perf_event_read_value, struct bpf_map *, map, u64, flags, map 399 kernel/trace/bpf_trace.c err = get_map_perf_counter(map, flags, &buf->counter, &buf->enabled, map 420 kernel/trace/bpf_trace.c __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, map 423 kernel/trace/bpf_trace.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 431 kernel/trace/bpf_trace.c if (unlikely(index >= array->map.max_entries)) map 459 kernel/trace/bpf_trace.c BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, map 488 kernel/trace/bpf_trace.c err = __bpf_perf_event_output(regs, map, flags, sd); map 513 kernel/trace/bpf_trace.c u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, map 546 kernel/trace/bpf_trace.c ret = __bpf_perf_event_output(regs, map, flags, sd); map 563 kernel/trace/bpf_trace.c BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx) map 565 kernel/trace/bpf_trace.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 568 kernel/trace/bpf_trace.c if (unlikely(idx >= array->map.max_entries)) map 791 kernel/trace/bpf_trace.c BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map, map 801 kernel/trace/bpf_trace.c return ____bpf_perf_event_output(regs, map, flags, data, size); map 815 kernel/trace/bpf_trace.c BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, map 825 kernel/trace/bpf_trace.c return bpf_get_stackid((unsigned long) regs, (unsigned long) map, map 970 kernel/trace/bpf_trace.c struct bpf_map *, map, u64, flags, void *, data, u64, size) map 979 kernel/trace/bpf_trace.c ret = ____bpf_perf_event_output(regs, map, flags, data, size); map 997 kernel/trace/bpf_trace.c struct bpf_map *, map, u64, flags) map 1007 kernel/trace/bpf_trace.c ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map, map 4173 kernel/trace/ftrace.c struct ftrace_func_map *map; map 4179 kernel/trace/ftrace.c map = (struct ftrace_func_map *)entry; map 4180 kernel/trace/ftrace.c return &map->data; map 4195 kernel/trace/ftrace.c struct ftrace_func_map *map; map 4201 kernel/trace/ftrace.c map = kmalloc(sizeof(*map), GFP_KERNEL); map 4202 kernel/trace/ftrace.c if (!map) map 4205 kernel/trace/ftrace.c map->entry.ip = ip; map 4206 kernel/trace/ftrace.c map->data = data; map 4208 kernel/trace/ftrace.c __add_hash_entry(&mapper->hash, &map->entry); map 4227 kernel/trace/ftrace.c struct ftrace_func_map *map; map 4234 kernel/trace/ftrace.c map = (struct ftrace_func_map *)entry; map 4235 kernel/trace/ftrace.c data = map->data; map 4255 kernel/trace/ftrace.c struct ftrace_func_map *map; map 4267 kernel/trace/ftrace.c map = (struct ftrace_func_map *)entry; map 4268 kernel/trace/ftrace.c free_func(map); map 154 kernel/trace/trace.c struct trace_eval_map map; map 2189 kernel/trace/trace.c unsigned map; map 2206 kernel/trace/trace.c map = savedcmd->map_pid_to_cmdline[pid]; map 2207 kernel/trace/trace.c if (map != NO_CMDLINE_MAP) map 2208 kernel/trace/trace.c strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN); map 5258 kernel/trace/trace.c if (!ptr->map.eval_string) { map 5318 kernel/trace/trace.c ptr->map.eval_string, ptr->map.eval_value, map 5319 kernel/trace/trace.c ptr->map.system); map 5361 kernel/trace/trace.c struct trace_eval_map **map; map 5397 kernel/trace/trace.c for (map = start; (unsigned long)map < (unsigned long)stop; map++) { map 5398 kernel/trace/trace.c map_array->map = **map; map 5421 kernel/trace/trace.c struct trace_eval_map **map; map 5426 kernel/trace/trace.c map = start; map 5428 kernel/trace/trace.c trace_event_eval_update(map, len); map 8727 kernel/trace/trace.c union trace_eval_map_item *map; map 8735 kernel/trace/trace.c map = trace_eval_maps; map 8737 kernel/trace/trace.c while (map) { map 8738 kernel/trace/trace.c if (map->head.mod == mod) map 8740 kernel/trace/trace.c map = trace_eval_jmp_to_tail(map); map 8741 kernel/trace/trace.c last = &map->tail.next; map 8742 kernel/trace/trace.c map = map->tail.next; map 8744 kernel/trace/trace.c if (!map) map 8747 kernel/trace/trace.c *last = trace_eval_jmp_to_tail(map)->tail.next; map 8748 kernel/trace/trace.c kfree(map); map 1955 kernel/trace/trace.h void trace_event_eval_update(struct trace_eval_map **map, int len); map 1958 kernel/trace/trace.h static inline void trace_event_eval_update(struct trace_eval_map **map, int len) { } map 303 kernel/trace/trace_entries.h __field_struct( struct mmiotrace_map, map ) map 304 kernel/trace/trace_entries.h __field_desc( resource_size_t, map, phys ) map 305 kernel/trace/trace_entries.h __field_desc( unsigned long, map, virt ) map 306 kernel/trace/trace_entries.h __field_desc( unsigned long, map, len ) map 307 kernel/trace/trace_entries.h __field_desc( int, map, map_id ) map 308 kernel/trace/trace_entries.h __field_desc( unsigned char, map, opcode ) map 2104 kernel/trace/trace_events.c static char *eval_replace(char *ptr, struct trace_eval_map *map, int len) map 2110 kernel/trace/trace_events.c elen = snprintf(ptr, 0, "%ld", map->eval_value); map 2115 kernel/trace/trace_events.c snprintf(ptr, elen + 1, "%ld", map->eval_value); map 2127 kernel/trace/trace_events.c struct trace_eval_map *map) map 2131 kernel/trace/trace_events.c int len = strlen(map->eval_string); map 2162 kernel/trace/trace_events.c if (strncmp(map->eval_string, ptr, len) == 0 && map 2164 kernel/trace/trace_events.c ptr = eval_replace(ptr, map, len); map 2202 kernel/trace/trace_events.c void trace_event_eval_update(struct trace_eval_map **map, int len) map 2230 kernel/trace/trace_events.c if (call->class->system == map[i]->system) { map 2236 kernel/trace/trace_events.c update_event_printk(call, map[i]); map 358 kernel/trace/trace_events_hist.c struct tracing_map *map; map 1968 kernel/trace/trace_events_hist.c var_elt = tracing_map_lookup(var_data->map, key); map 2284 kernel/trace/trace_events_hist.c struct hist_trigger_data *hist_data = elt->map->private_data; map 3527 kernel/trace/trace_events_hist.c idx = tracing_map_add_var(hist_data->map); map 5167 kernel/trace/trace_events_hist.c tracing_map_destroy(hist_data->map); map 5178 kernel/trace/trace_events_hist.c struct tracing_map *map = hist_data->map; map 5200 kernel/trace/trace_events_hist.c idx = tracing_map_add_key_field(map, map 5204 kernel/trace/trace_events_hist.c idx = tracing_map_add_sum_field(map); map 5210 kernel/trace/trace_events_hist.c idx = tracing_map_add_var(map); map 5253 kernel/trace/trace_events_hist.c hist_data->map = tracing_map_create(map_bits, hist_data->key_size, map 5255 kernel/trace/trace_events_hist.c if (IS_ERR(hist_data->map)) { map 5256 kernel/trace/trace_events_hist.c ret = PTR_ERR(hist_data->map); map 5257 kernel/trace/trace_events_hist.c hist_data->map = NULL; map 5396 kernel/trace/trace_events_hist.c elt = tracing_map_insert(hist_data->map, key); map 5543 kernel/trace/trace_events_hist.c struct tracing_map *map = hist_data->map; map 5546 kernel/trace/trace_events_hist.c n_entries = tracing_map_sort_entries(map, hist_data->sort_keys, map 5583 kernel/trace/trace_events_hist.c (u64)atomic64_read(&hist_data->map->hits), map 5584 kernel/trace/trace_events_hist.c n_entries, (u64)atomic64_read(&hist_data->map->drops)); map 5748 kernel/trace/trace_events_hist.c seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits)); map 5874 kernel/trace/trace_events_hist.c tracing_map_clear(hist_data->map); map 6344 kernel/trace/trace_events_hist.c ret = tracing_map_init(hist_data->map); map 223 kernel/trace/trace_mmiotrace.c m = &field->map; map 327 kernel/trace/trace_mmiotrace.c struct mmiotrace_map *map) map 342 kernel/trace/trace_mmiotrace.c entry->map = *map; map 348 kernel/trace/trace_mmiotrace.c void mmio_trace_mapping(struct mmiotrace_map *map) map 355 kernel/trace/trace_mmiotrace.c __trace_mmiotrace_map(tr, data, map); map 201 kernel/trace/tracing_map.c static int tracing_map_add_field(struct tracing_map *map, map 206 kernel/trace/tracing_map.c if (map->n_fields < TRACING_MAP_FIELDS_MAX) { map 207 kernel/trace/tracing_map.c ret = map->n_fields; map 208 kernel/trace/tracing_map.c map->fields[map->n_fields++].cmp_fn = cmp_fn; map 226 kernel/trace/tracing_map.c int tracing_map_add_sum_field(struct tracing_map *map) map 228 kernel/trace/tracing_map.c return tracing_map_add_field(map, tracing_map_cmp_atomic64); map 243 kernel/trace/tracing_map.c int tracing_map_add_var(struct tracing_map *map) map 247 kernel/trace/tracing_map.c if (map->n_vars < TRACING_MAP_VARS_MAX) map 248 kernel/trace/tracing_map.c ret = map->n_vars++; map 269 kernel/trace/tracing_map.c int tracing_map_add_key_field(struct tracing_map *map, map 274 kernel/trace/tracing_map.c int idx = tracing_map_add_field(map, cmp_fn); map 279 kernel/trace/tracing_map.c map->fields[idx].offset = offset; map 281 kernel/trace/tracing_map.c map->key_idx[map->n_keys++] = idx; map 359 kernel/trace/tracing_map.c for (i = 0; i < elt->map->n_fields; i++) map 363 kernel/trace/tracing_map.c for (i = 0; i < elt->map->n_vars; i++) { map 368 kernel/trace/tracing_map.c if (elt->map->ops && elt->map->ops->elt_clear) map 369 kernel/trace/tracing_map.c elt->map->ops->elt_clear(elt); map 378 kernel/trace/tracing_map.c for (i = 0; i < elt->map->n_fields; i++) { map 379 kernel/trace/tracing_map.c elt->fields[i].cmp_fn = elt->map->fields[i].cmp_fn; map 382 kernel/trace/tracing_map.c elt->fields[i].offset = elt->map->fields[i].offset; map 391 kernel/trace/tracing_map.c if (elt->map->ops && elt->map->ops->elt_free) map 392 kernel/trace/tracing_map.c elt->map->ops->elt_free(elt); map 400 kernel/trace/tracing_map.c static struct tracing_map_elt *tracing_map_elt_alloc(struct tracing_map *map) map 409 kernel/trace/tracing_map.c elt->map = map; map 411 kernel/trace/tracing_map.c elt->key = kzalloc(map->key_size, GFP_KERNEL); map 417 kernel/trace/tracing_map.c elt->fields = kcalloc(map->n_fields, sizeof(*elt->fields), GFP_KERNEL); map 423 kernel/trace/tracing_map.c elt->vars = kcalloc(map->n_vars, sizeof(*elt->vars), GFP_KERNEL); map 429 kernel/trace/tracing_map.c elt->var_set = kcalloc(map->n_vars, sizeof(*elt->var_set), GFP_KERNEL); map 437 kernel/trace/tracing_map.c if (map->ops && map->ops->elt_alloc) { map 438 kernel/trace/tracing_map.c err = map->ops->elt_alloc(elt); map 449 kernel/trace/tracing_map.c static struct tracing_map_elt *get_free_elt(struct tracing_map *map) map 454 kernel/trace/tracing_map.c idx = atomic_inc_return(&map->next_elt); map 455 kernel/trace/tracing_map.c if (idx < map->max_elts) { map 456 kernel/trace/tracing_map.c elt = *(TRACING_MAP_ELT(map->elts, idx)); map 457 kernel/trace/tracing_map.c if (map->ops && map->ops->elt_init) map 458 kernel/trace/tracing_map.c map->ops->elt_init(elt); map 464 kernel/trace/tracing_map.c static void tracing_map_free_elts(struct tracing_map *map) map 468 kernel/trace/tracing_map.c if (!map->elts) map 471 kernel/trace/tracing_map.c for (i = 0; i < map->max_elts; i++) { map 472 kernel/trace/tracing_map.c tracing_map_elt_free(*(TRACING_MAP_ELT(map->elts, i))); map 473 kernel/trace/tracing_map.c *(TRACING_MAP_ELT(map->elts, i)) = NULL; map 476 kernel/trace/tracing_map.c tracing_map_array_free(map->elts); map 477 kernel/trace/tracing_map.c map->elts = NULL; map 480 kernel/trace/tracing_map.c static int tracing_map_alloc_elts(struct tracing_map *map) map 484 kernel/trace/tracing_map.c map->elts = tracing_map_array_alloc(map->max_elts, map 486 kernel/trace/tracing_map.c if (!map->elts) map 489 kernel/trace/tracing_map.c for (i = 0; i < map->max_elts; i++) { map 490 kernel/trace/tracing_map.c *(TRACING_MAP_ELT(map->elts, i)) = tracing_map_elt_alloc(map); map 491 kernel/trace/tracing_map.c if (IS_ERR(*(TRACING_MAP_ELT(map->elts, i)))) { map 492 kernel/trace/tracing_map.c *(TRACING_MAP_ELT(map->elts, i)) = NULL; map 493 kernel/trace/tracing_map.c tracing_map_free_elts(map); map 513 kernel/trace/tracing_map.c __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only) map 520 kernel/trace/tracing_map.c key_hash = jhash(key, map->key_size, 0); map 523 kernel/trace/tracing_map.c idx = key_hash >> (32 - (map->map_bits + 1)); map 526 kernel/trace/tracing_map.c idx &= (map->map_size - 1); map 527 kernel/trace/tracing_map.c entry = TRACING_MAP_ENTRY(map->map, idx); map 533 kernel/trace/tracing_map.c keys_match(key, val->key, map->key_size)) { map 535 kernel/trace/tracing_map.c atomic64_inc(&map->hits); map 551 kernel/trace/tracing_map.c if (dup_try > map->map_size) { map 552 kernel/trace/tracing_map.c atomic64_inc(&map->drops); map 566 kernel/trace/tracing_map.c elt = get_free_elt(map); map 568 kernel/trace/tracing_map.c atomic64_inc(&map->drops); map 573 kernel/trace/tracing_map.c memcpy(elt->key, key, map->key_size); map 575 kernel/trace/tracing_map.c atomic64_inc(&map->hits); map 631 kernel/trace/tracing_map.c struct tracing_map_elt *tracing_map_insert(struct tracing_map *map, void *key) map 633 kernel/trace/tracing_map.c return __tracing_map_insert(map, key, false); map 653 kernel/trace/tracing_map.c struct tracing_map_elt *tracing_map_lookup(struct tracing_map *map, void *key) map 655 kernel/trace/tracing_map.c return __tracing_map_insert(map, key, true); map 668 kernel/trace/tracing_map.c void tracing_map_destroy(struct tracing_map *map) map 670 kernel/trace/tracing_map.c if (!map) map 673 kernel/trace/tracing_map.c tracing_map_free_elts(map); map 675 kernel/trace/tracing_map.c tracing_map_array_free(map->map); map 676 kernel/trace/tracing_map.c kfree(map); map 690 kernel/trace/tracing_map.c void tracing_map_clear(struct tracing_map *map) map 694 kernel/trace/tracing_map.c atomic_set(&map->next_elt, -1); map 695 kernel/trace/tracing_map.c atomic64_set(&map->hits, 0); map 696 kernel/trace/tracing_map.c atomic64_set(&map->drops, 0); map 698 kernel/trace/tracing_map.c tracing_map_array_clear(map->map); map 700 kernel/trace/tracing_map.c for (i = 0; i < map->max_elts; i++) map 701 kernel/trace/tracing_map.c tracing_map_elt_clear(*(TRACING_MAP_ELT(map->elts, i))); map 704 kernel/trace/tracing_map.c static void set_sort_key(struct tracing_map *map, map 707 kernel/trace/tracing_map.c map->sort_key = *sort_key; map 765 kernel/trace/tracing_map.c struct tracing_map *map; map 772 kernel/trace/tracing_map.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 773 kernel/trace/tracing_map.c if (!map) map 776 kernel/trace/tracing_map.c map->map_bits = map_bits; map 777 kernel/trace/tracing_map.c map->max_elts = (1 << map_bits); map 778 kernel/trace/tracing_map.c atomic_set(&map->next_elt, -1); map 780 kernel/trace/tracing_map.c map->map_size = (1 << (map_bits + 1)); map 781 kernel/trace/tracing_map.c map->ops = ops; map 783 kernel/trace/tracing_map.c map->private_data = private_data; map 785 kernel/trace/tracing_map.c map->map = tracing_map_array_alloc(map->map_size, map 787 kernel/trace/tracing_map.c if (!map->map) map 790 kernel/trace/tracing_map.c map->key_size = key_size; map 792 kernel/trace/tracing_map.c map->key_idx[i] = -1; map 794 kernel/trace/tracing_map.c return map; map 796 kernel/trace/tracing_map.c tracing_map_destroy(map); map 797 kernel/trace/tracing_map.c map = ERR_PTR(-ENOMEM); map 821 kernel/trace/tracing_map.c int tracing_map_init(struct tracing_map *map) map 825 kernel/trace/tracing_map.c if (map->n_fields < 2) map 828 kernel/trace/tracing_map.c err = tracing_map_alloc_elts(map); map 832 kernel/trace/tracing_map.c tracing_map_clear(map); map 842 kernel/trace/tracing_map.c if (memcmp((*a)->key, (*b)->key, (*a)->elt->map->key_size)) map 861 kernel/trace/tracing_map.c sort_key = &elt_a->map->sort_key; map 889 kernel/trace/tracing_map.c sort_key = &elt_a->map->sort_key; map 976 kernel/trace/tracing_map.c static bool is_key(struct tracing_map *map, unsigned int field_idx) map 980 kernel/trace/tracing_map.c for (i = 0; i < map->n_keys; i++) map 981 kernel/trace/tracing_map.c if (map->key_idx[i] == field_idx) map 986 kernel/trace/tracing_map.c static void sort_secondary(struct tracing_map *map, map 998 kernel/trace/tracing_map.c if (is_key(map, primary_key->field_idx)) map 1003 kernel/trace/tracing_map.c if (is_key(map, secondary_key->field_idx)) map 1024 kernel/trace/tracing_map.c set_sort_key(map, secondary_key); map 1028 kernel/trace/tracing_map.c set_sort_key(map, primary_key); map 1059 kernel/trace/tracing_map.c int tracing_map_sort_entries(struct tracing_map *map, map 1069 kernel/trace/tracing_map.c entries = vmalloc(array_size(sizeof(sort_entry), map->max_elts)); map 1073 kernel/trace/tracing_map.c for (i = 0, n_entries = 0; i < map->map_size; i++) { map 1076 kernel/trace/tracing_map.c entry = TRACING_MAP_ENTRY(map->map, i); map 1099 kernel/trace/tracing_map.c detect_dups(entries, n_entries, map->key_size); map 1101 kernel/trace/tracing_map.c if (is_key(map, sort_keys[0].field_idx)) map 1106 kernel/trace/tracing_map.c set_sort_key(map, &sort_keys[0]); map 1112 kernel/trace/tracing_map.c sort_secondary(map, map 139 kernel/trace/tracing_map.h struct tracing_map *map; map 190 kernel/trace/tracing_map.h struct tracing_map_array *map; map 245 kernel/trace/tracing_map.h extern int tracing_map_init(struct tracing_map *map); map 247 kernel/trace/tracing_map.h extern int tracing_map_add_sum_field(struct tracing_map *map); map 248 kernel/trace/tracing_map.h extern int tracing_map_add_var(struct tracing_map *map); map 249 kernel/trace/tracing_map.h extern int tracing_map_add_key_field(struct tracing_map *map, map 253 kernel/trace/tracing_map.h extern void tracing_map_destroy(struct tracing_map *map); map 254 kernel/trace/tracing_map.h extern void tracing_map_clear(struct tracing_map *map); map 257 kernel/trace/tracing_map.h tracing_map_insert(struct tracing_map *map, void *key); map 259 kernel/trace/tracing_map.h tracing_map_lookup(struct tracing_map *map, void *key); map 275 kernel/trace/tracing_map.h extern void tracing_map_set_field_descr(struct tracing_map *map, map 280 kernel/trace/tracing_map.h tracing_map_sort_entries(struct tracing_map *map, map 29 kernel/user_namespace.c struct uid_gid_map *map); map 254 kernel/user_namespace.c map_id_range_down_max(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) map 262 kernel/user_namespace.c return bsearch(&key, map->forward, extents, map 272 kernel/user_namespace.c map_id_range_down_base(unsigned extents, struct uid_gid_map *map, u32 id, u32 count) map 281 kernel/user_namespace.c first = map->extent[idx].first; map 282 kernel/user_namespace.c last = first + map->extent[idx].count - 1; map 285 kernel/user_namespace.c return &map->extent[idx]; map 290 kernel/user_namespace.c static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count) map 293 kernel/user_namespace.c unsigned extents = map->nr_extents; map 297 kernel/user_namespace.c extent = map_id_range_down_base(extents, map, id, count); map 299 kernel/user_namespace.c extent = map_id_range_down_max(extents, map, id, count); map 310 kernel/user_namespace.c static u32 map_id_down(struct uid_gid_map *map, u32 id) map 312 kernel/user_namespace.c return map_id_range_down(map, id, 1); map 321 kernel/user_namespace.c map_id_up_base(unsigned extents, struct uid_gid_map *map, u32 id) map 328 kernel/user_namespace.c first = map->extent[idx].lower_first; map 329 kernel/user_namespace.c last = first + map->extent[idx].count - 1; map 331 kernel/user_namespace.c return &map->extent[idx]; map 341 kernel/user_namespace.c map_id_up_max(unsigned extents, struct uid_gid_map *map, u32 id) map 349 kernel/user_namespace.c return bsearch(&key, map->reverse, extents, map 353 kernel/user_namespace.c static u32 map_id_up(struct uid_gid_map *map, u32 id) map 356 kernel/user_namespace.c unsigned extents = map->nr_extents; map 360 kernel/user_namespace.c extent = map_id_up_base(extents, map, id); map 362 kernel/user_namespace.c extent = map_id_up_max(extents, map, id); map 641 kernel/user_namespace.c struct uid_gid_map *map) map 644 kernel/user_namespace.c unsigned extents = map->nr_extents; map 651 kernel/user_namespace.c return &map->extent[pos]; map 653 kernel/user_namespace.c return &map->forward[pos]; map 753 kernel/user_namespace.c static int insert_extent(struct uid_gid_map *map, struct uid_gid_extent *extent) map 757 kernel/user_namespace.c if (map->nr_extents == UID_GID_MAP_MAX_BASE_EXTENTS) { map 770 kernel/user_namespace.c memcpy(forward, map->extent, map 771 kernel/user_namespace.c map->nr_extents * sizeof(map->extent[0])); map 773 kernel/user_namespace.c map->forward = forward; map 774 kernel/user_namespace.c map->reverse = NULL; map 777 kernel/user_namespace.c if (map->nr_extents < UID_GID_MAP_MAX_BASE_EXTENTS) map 778 kernel/user_namespace.c dest = &map->extent[map->nr_extents]; map 780 kernel/user_namespace.c dest = &map->forward[map->nr_extents]; map 783 kernel/user_namespace.c map->nr_extents++; map 821 kernel/user_namespace.c static int sort_idmaps(struct uid_gid_map *map) map 823 kernel/user_namespace.c if (map->nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) map 827 kernel/user_namespace.c sort(map->forward, map->nr_extents, sizeof(struct uid_gid_extent), map 831 kernel/user_namespace.c map->reverse = kmemdup(map->forward, map 832 kernel/user_namespace.c map->nr_extents * sizeof(struct uid_gid_extent), map 834 kernel/user_namespace.c if (!map->reverse) map 838 kernel/user_namespace.c sort(map->reverse, map->nr_extents, sizeof(struct uid_gid_extent), map 847 kernel/user_namespace.c struct uid_gid_map *map, map 892 kernel/user_namespace.c if (map->nr_extents != 0) map 1007 kernel/user_namespace.c memcpy(map->extent, new_map.extent, map 1010 kernel/user_namespace.c map->forward = new_map.forward; map 1011 kernel/user_namespace.c map->reverse = new_map.reverse; map 1014 kernel/user_namespace.c map->nr_extents = new_map.nr_extents; map 1022 kernel/user_namespace.c map->forward = NULL; map 1023 kernel/user_namespace.c map->reverse = NULL; map 1024 kernel/user_namespace.c map->nr_extents = 0; map 270 lib/bitmap.c void __bitmap_set(unsigned long *map, unsigned int start, int len) map 272 lib/bitmap.c unsigned long *p = map + BIT_WORD(start); map 291 lib/bitmap.c void __bitmap_clear(unsigned long *map, unsigned int start, int len) map 293 lib/bitmap.c unsigned long *p = map + BIT_WORD(start); map 325 lib/bitmap.c unsigned long bitmap_find_next_zero_area_off(unsigned long *map, map 334 lib/bitmap.c index = find_next_zero_bit(map, size, start); map 342 lib/bitmap.c i = find_next_bit(map, end, index); map 84 lib/genalloc.c static int bitmap_set_ll(unsigned long *map, int start, int nr) map 86 lib/genalloc.c unsigned long *p = map + BIT_WORD(start); map 119 lib/genalloc.c static int bitmap_clear_ll(unsigned long *map, int start, int nr) map 121 lib/genalloc.c unsigned long *p = map + BIT_WORD(start); map 643 lib/genalloc.c unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, map 647 lib/genalloc.c return bitmap_find_next_zero_area(map, size, start, nr, 0); map 661 lib/genalloc.c unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, map 674 lib/genalloc.c return bitmap_find_next_zero_area_off(map, size, start, nr, map 688 lib/genalloc.c unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size, map 703 lib/genalloc.c start_bit = bitmap_find_next_zero_area(map, size, map 722 lib/genalloc.c unsigned long gen_pool_first_fit_order_align(unsigned long *map, map 729 lib/genalloc.c return bitmap_find_next_zero_area(map, size, start, nr, align_mask); map 746 lib/genalloc.c unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, map 754 lib/genalloc.c index = bitmap_find_next_zero_area(map, size, start, nr, 0); map 757 lib/genalloc.c int next_bit = find_next_bit(map, size, index + nr); map 764 lib/genalloc.c index = bitmap_find_next_zero_area(map, size, map 9 lib/iommu-helper.c unsigned long iommu_area_alloc(unsigned long *map, unsigned long size, map 19 lib/iommu-helper.c index = bitmap_find_next_zero_area(map, size, start, nr, align_mask); map 25 lib/iommu-helper.c bitmap_set(map, index, nr); map 21 lib/sbitmap.c spin_lock_irqsave(&sb->map[index].swap_lock, flags); map 23 lib/sbitmap.c if (!sb->map[index].cleared) map 29 lib/sbitmap.c mask = xchg(&sb->map[index].cleared, 0); map 35 lib/sbitmap.c val = sb->map[index].word; map 36 lib/sbitmap.c } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val); map 40 lib/sbitmap.c spin_unlock_irqrestore(&sb->map[index].swap_lock, flags); map 72 lib/sbitmap.c sb->map = NULL; map 76 lib/sbitmap.c sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node); map 77 lib/sbitmap.c if (!sb->map) map 81 lib/sbitmap.c sb->map[i].depth = min(depth, bits_per_word); map 82 lib/sbitmap.c depth -= sb->map[i].depth; map 83 lib/sbitmap.c spin_lock_init(&sb->map[i].swap_lock); map 101 lib/sbitmap.c sb->map[i].depth = min(depth, bits_per_word); map 102 lib/sbitmap.c depth -= sb->map[i].depth; map 145 lib/sbitmap.c nr = __sbitmap_get_word(&sb->map[index].word, map 146 lib/sbitmap.c sb->map[index].depth, alloc_hint, map 202 lib/sbitmap.c nr = __sbitmap_get_word(&sb->map[index].word, map 203 lib/sbitmap.c min(sb->map[index].depth, shallow_depth), map 232 lib/sbitmap.c if (sb->map[i].word & ~sb->map[i].cleared) map 244 lib/sbitmap.c const struct sbitmap_word *word = &sb->map[i]; map 261 lib/sbitmap.c const struct sbitmap_word *word = &sb->map[i]; map 311 lib/sbitmap.c unsigned long word = READ_ONCE(sb->map[i].word); map 312 lib/sbitmap.c unsigned int word_bits = READ_ONCE(sb->map[i].depth); map 190 mm/frontswap.c void __frontswap_init(unsigned type, unsigned long *map) map 201 mm/frontswap.c if (WARN_ON(!map)) map 208 mm/frontswap.c frontswap_map_set(sis, map); map 771 mm/hugetlb.c static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) map 777 mm/hugetlb.c HPAGE_RESV_MASK) | (unsigned long)map); map 351 mm/memcontrol.c memset(new->map, (int)0xff, old_size); map 352 mm/memcontrol.c memset((void *)new->map + old_size, 0, size - old_size); map 364 mm/memcontrol.c struct memcg_shrinker_map *map; map 372 mm/memcontrol.c map = rcu_dereference_protected(pn->shrinker_map, true); map 373 mm/memcontrol.c if (map) map 374 mm/memcontrol.c kvfree(map); map 381 mm/memcontrol.c struct memcg_shrinker_map *map; map 390 mm/memcontrol.c map = kvzalloc(sizeof(*map) + size, GFP_KERNEL); map 391 mm/memcontrol.c if (!map) { map 396 mm/memcontrol.c rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_map, map); map 436 mm/memcontrol.c struct memcg_shrinker_map *map; map 439 mm/memcontrol.c map = rcu_dereference(memcg->nodeinfo[nid]->shrinker_map); map 442 mm/memcontrol.c set_bit(shrinker_id, map->map); map 76 mm/memremap.c #define for_each_device_pfn(pfn, map) \ map 77 mm/memremap.c for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn)) map 6835 mm/page_alloc.c struct page *map; map 6845 mm/page_alloc.c map = memblock_alloc_node(size, SMP_CACHE_BYTES, map 6847 mm/page_alloc.c if (!map) map 6850 mm/page_alloc.c pgdat->node_mem_map = map + offset; map 1131 mm/percpu.c static unsigned long pcpu_find_zero_area(unsigned long *map, map 1141 mm/percpu.c index = find_next_zero_bit(map, size, start); map 1150 mm/percpu.c i = find_next_bit(map, end, index); map 1426 mm/slab.c static void slab_kernel_map(struct kmem_cache *cachep, void *objp, int map) map 1431 mm/slab.c kernel_map_pages(virt_to_page(objp), cachep->size / PAGE_SIZE, map); map 1436 mm/slab.c int map) {} map 450 mm/slub.c static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map) map 456 mm/slub.c set_bit(slab_index(p, s, addr), map); map 3696 mm/slub.c unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC); map 3697 mm/slub.c if (!map) map 3702 mm/slub.c get_map(s, page, map); map 3705 mm/slub.c if (!test_bit(slab_index(p, s, addr), map)) { map 3711 mm/slub.c bitmap_free(map); map 4406 mm/slub.c unsigned long *map) map 4416 mm/slub.c bitmap_zero(map, page->objects); map 4418 mm/slub.c get_map(s, page, map); map 4420 mm/slub.c if (test_bit(slab_index(p, s, addr), map)) map 4426 mm/slub.c if (!test_bit(slab_index(p, s, addr), map)) map 4433 mm/slub.c unsigned long *map) map 4436 mm/slub.c validate_slab(s, page, map); map 4441 mm/slub.c struct kmem_cache_node *n, unsigned long *map) map 4450 mm/slub.c validate_slab_slab(s, page, map); map 4461 mm/slub.c validate_slab_slab(s, page, map); map 4478 mm/slub.c unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); map 4480 mm/slub.c if (!map) map 4485 mm/slub.c count += validate_slab_node(s, n, map); map 4486 mm/slub.c bitmap_free(map); map 4617 mm/slub.c unsigned long *map) map 4622 mm/slub.c bitmap_zero(map, page->objects); map 4623 mm/slub.c get_map(s, page, map); map 4626 mm/slub.c if (!test_bit(slab_index(p, s, addr), map)) map 4638 mm/slub.c unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); map 4640 mm/slub.c if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), map 4642 mm/slub.c bitmap_free(map); map 4657 mm/slub.c process_slab(&t, s, page, alloc, map); map 4659 mm/slub.c process_slab(&t, s, page, alloc, map); map 4708 mm/slub.c bitmap_free(map); map 222 mm/sparse.c static void subsection_mask_set(unsigned long *map, unsigned long pfn, map 228 mm/sparse.c bitmap_set(map, idx, end - idx + 1); map 455 mm/sparse.c struct page *map = sparse_buffer_alloc(size); map 458 mm/sparse.c if (map) map 459 mm/sparse.c return map; map 461 mm/sparse.c map = memblock_alloc_try_nid(size, map 464 mm/sparse.c if (!map) map 468 mm/sparse.c return map; map 533 mm/sparse.c struct page *map; map 548 mm/sparse.c map = __populate_section_memmap(pfn, PAGES_PER_SECTION, map 550 mm/sparse.c if (!map) { map 557 mm/sparse.c sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, map 740 mm/sparse.c DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; map 749 mm/sparse.c subsection_mask_set(map, pfn, nr_pages); map 751 mm/sparse.c bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); map 753 mm/sparse.c if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), map 775 mm/sparse.c bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); map 812 mm/sparse.c DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; map 819 mm/sparse.c subsection_mask_set(map, pfn, nr_pages); map 829 mm/sparse.c if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) map 831 mm/sparse.c else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) map 834 mm/sparse.c bitmap_or(subsection_map, map, subsection_map, map 10 mm/swap_cgroup.c struct page **map; map 51 mm/swap_cgroup.c ctrl->map[idx] = page; map 60 mm/swap_cgroup.c __free_page(ctrl->map[idx]); map 71 mm/swap_cgroup.c mappage = ctrl->map[offset / SC_PER_PAGE]; map 187 mm/swap_cgroup.c ctrl->map = array; map 191 mm/swap_cgroup.c ctrl->map = NULL; map 208 mm/swap_cgroup.c struct page **map; map 217 mm/swap_cgroup.c map = ctrl->map; map 219 mm/swap_cgroup.c ctrl->map = NULL; map 223 mm/swap_cgroup.c if (map) { map 225 mm/swap_cgroup.c struct page *page = map[i]; map 231 mm/swap_cgroup.c vfree(map); map 934 mm/swapfile.c unsigned char *map; map 954 mm/swapfile.c map = si->swap_map + offset; map 956 mm/swapfile.c map[i] = SWAP_HAS_CACHE; map 1331 mm/swapfile.c unsigned char *map; map 1343 mm/swapfile.c map = si->swap_map + offset; map 1345 mm/swapfile.c val = map[i]; map 1503 mm/swapfile.c unsigned char *map; map 1526 mm/swapfile.c map = kmap_atomic(page); map 1527 mm/swapfile.c tmp_count = map[offset]; map 1528 mm/swapfile.c kunmap_atomic(map); map 1542 mm/swapfile.c unsigned char *map = si->swap_map; map 1550 mm/swapfile.c if (swap_count(map[roffset])) map 1555 mm/swapfile.c if (swap_count(map[offset + i])) { map 1588 mm/swapfile.c unsigned char *map = NULL; map 1612 mm/swapfile.c map = si->swap_map; map 1616 mm/swapfile.c if (map) map 1621 mm/swapfile.c if (map) { map 1622 mm/swapfile.c swapcount = swap_count(map[offset + i]); map 3589 mm/swapfile.c unsigned char *map; map 3598 mm/swapfile.c map = kmap_atomic(list_page) + offset; map 3599 mm/swapfile.c count = *map; map 3600 mm/swapfile.c kunmap_atomic(map); map 3638 mm/swapfile.c unsigned char *map; map 3650 mm/swapfile.c map = kmap_atomic(page) + offset; map 3659 mm/swapfile.c while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { map 3660 mm/swapfile.c kunmap_atomic(map); map 3663 mm/swapfile.c map = kmap_atomic(page) + offset; map 3665 mm/swapfile.c if (*map == SWAP_CONT_MAX) { map 3666 mm/swapfile.c kunmap_atomic(map); map 3672 mm/swapfile.c map = kmap_atomic(page) + offset; map 3673 mm/swapfile.c init_map: *map = 0; /* we didn't zero the page */ map 3675 mm/swapfile.c *map += 1; map 3676 mm/swapfile.c kunmap_atomic(map); map 3679 mm/swapfile.c map = kmap_atomic(page) + offset; map 3680 mm/swapfile.c *map = COUNT_CONTINUED; map 3681 mm/swapfile.c kunmap_atomic(map); map 3691 mm/swapfile.c while (*map == COUNT_CONTINUED) { map 3692 mm/swapfile.c kunmap_atomic(map); map 3695 mm/swapfile.c map = kmap_atomic(page) + offset; map 3697 mm/swapfile.c BUG_ON(*map == 0); map 3698 mm/swapfile.c *map -= 1; map 3699 mm/swapfile.c if (*map == 0) map 3701 mm/swapfile.c kunmap_atomic(map); map 3704 mm/swapfile.c map = kmap_atomic(page) + offset; map 3705 mm/swapfile.c *map = SWAP_CONT_MAX | count; map 3707 mm/swapfile.c kunmap_atomic(map); map 2771 mm/vmalloc.c void *map = kmap_atomic(p); map 2772 mm/vmalloc.c memcpy(buf, map + offset, length); map 2773 mm/vmalloc.c kunmap_atomic(map); map 2810 mm/vmalloc.c void *map = kmap_atomic(p); map 2811 mm/vmalloc.c memcpy(map + offset, buf, length); map 2812 mm/vmalloc.c kunmap_atomic(map); map 596 mm/vmscan.c struct memcg_shrinker_map *map; map 606 mm/vmscan.c map = rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_map, map 608 mm/vmscan.c if (unlikely(!map)) map 611 mm/vmscan.c for_each_set_bit(i, map->map, shrinker_nr_max) { map 622 mm/vmscan.c clear_bit(i, map->map); map 633 mm/vmscan.c clear_bit(i, map->map); map 1553 mm/z3fold.c .map = z3fold_zpool_map, map 212 mm/zbud.c .map = zbud_zpool_map, map 347 mm/zpool.c return zpool->driver->map(zpool->pool, handle, mapmode); map 453 mm/zsmalloc.c .map = zs_zpool_map, map 173 net/atm/proc.c static const char *const map[] = { ATM_VS2TXT_MAP }; map 175 net/atm/proc.c return map[ATM_VF2VS(vcc->flags)]; map 109 net/ceph/crush/crush.c void crush_destroy(struct crush_map *map) map 112 net/ceph/crush/crush.c if (map->buckets) { map 114 net/ceph/crush/crush.c for (b = 0; b < map->max_buckets; b++) { map 115 net/ceph/crush/crush.c if (map->buckets[b] == NULL) map 117 net/ceph/crush/crush.c crush_destroy_bucket(map->buckets[b]); map 119 net/ceph/crush/crush.c kfree(map->buckets); map 123 net/ceph/crush/crush.c if (map->rules) { map 125 net/ceph/crush/crush.c for (b = 0; b < map->max_rules; b++) map 126 net/ceph/crush/crush.c crush_destroy_rule(map->rules[b]); map 127 net/ceph/crush/crush.c kfree(map->rules); map 131 net/ceph/crush/crush.c kfree(map->choose_tries); map 133 net/ceph/crush/crush.c clear_choose_args(map); map 135 net/ceph/crush/crush.c kfree(map); map 42 net/ceph/crush/mapper.c int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size) map 46 net/ceph/crush/mapper.c for (i = 0; i < map->max_rules; i++) { map 47 net/ceph/crush/mapper.c if (map->rules[i] && map 48 net/ceph/crush/mapper.c map->rules[i]->mask.ruleset == ruleset && map 49 net/ceph/crush/mapper.c map->rules[i]->mask.type == type && map 50 net/ceph/crush/mapper.c map->rules[i]->mask.min_size <= size && map 51 net/ceph/crush/mapper.c map->rules[i]->mask.max_size >= size) map 413 net/ceph/crush/mapper.c static int is_out(const struct crush_map *map, map 449 net/ceph/crush/mapper.c static int crush_choose_firstn(const struct crush_map *map, map 519 net/ceph/crush/mapper.c if (item >= map->max_devices) { map 527 net/ceph/crush/mapper.c itemtype = map->buckets[-1-item]->type; map 535 net/ceph/crush/mapper.c (-1-item) >= map->max_buckets) { map 540 net/ceph/crush/mapper.c in = map->buckets[-1-item]; map 562 net/ceph/crush/mapper.c map, map 564 net/ceph/crush/mapper.c map->buckets[-1-item], map 588 net/ceph/crush/mapper.c reject = is_out(map, weight, map 629 net/ceph/crush/mapper.c if (map->choose_tries && ftotal <= map->choose_total_tries) map 630 net/ceph/crush/mapper.c map->choose_tries[ftotal]++; map 643 net/ceph/crush/mapper.c static void crush_choose_indep(const struct crush_map *map, map 730 net/ceph/crush/mapper.c if (item >= map->max_devices) { map 741 net/ceph/crush/mapper.c itemtype = map->buckets[-1-item]->type; map 749 net/ceph/crush/mapper.c (-1-item) >= map->max_buckets) { map 758 net/ceph/crush/mapper.c in = map->buckets[-1-item]; map 776 net/ceph/crush/mapper.c map, map 778 net/ceph/crush/mapper.c map->buckets[-1-item], map 797 net/ceph/crush/mapper.c is_out(map, weight, weight_max, item, x)) map 816 net/ceph/crush/mapper.c if (map->choose_tries && ftotal <= map->choose_total_tries) map 817 net/ceph/crush/mapper.c map->choose_tries[ftotal]++; map 848 net/ceph/crush/mapper.c void crush_init_workspace(const struct crush_map *map, void *v) map 863 net/ceph/crush/mapper.c v += map->max_buckets * sizeof(struct crush_work_bucket *); map 864 net/ceph/crush/mapper.c for (b = 0; b < map->max_buckets; ++b) { map 865 net/ceph/crush/mapper.c if (!map->buckets[b]) map 869 net/ceph/crush/mapper.c switch (map->buckets[b]->alg) { map 877 net/ceph/crush/mapper.c v += map->buckets[b]->size * sizeof(__u32); map 879 net/ceph/crush/mapper.c BUG_ON(v - (void *)w != map->working_size); map 894 net/ceph/crush/mapper.c int crush_do_rule(const struct crush_map *map, map 901 net/ceph/crush/mapper.c int *a = cwin + map->working_size; map 919 net/ceph/crush/mapper.c int choose_tries = map->choose_total_tries + 1; map 925 net/ceph/crush/mapper.c int choose_local_retries = map->choose_local_tries; map 926 net/ceph/crush/mapper.c int choose_local_fallback_retries = map->choose_local_fallback_tries; map 928 net/ceph/crush/mapper.c int vary_r = map->chooseleaf_vary_r; map 929 net/ceph/crush/mapper.c int stable = map->chooseleaf_stable; map 931 net/ceph/crush/mapper.c if ((__u32)ruleno >= map->max_rules) { map 936 net/ceph/crush/mapper.c rule = map->rules[ruleno]; map 946 net/ceph/crush/mapper.c curstep->arg1 < map->max_devices) || map 948 net/ceph/crush/mapper.c -1-curstep->arg1 < map->max_buckets && map 949 net/ceph/crush/mapper.c map->buckets[-1-curstep->arg1])) { map 1016 net/ceph/crush/mapper.c if (bno < 0 || bno >= map->max_buckets) { map 1026 net/ceph/crush/mapper.c else if (map->chooseleaf_descend_once) map 1031 net/ceph/crush/mapper.c map, map 1033 net/ceph/crush/mapper.c map->buckets[bno], map 1053 net/ceph/crush/mapper.c map, map 1055 net/ceph/crush/mapper.c map->buckets[bno], map 59 net/ceph/debugfs.c struct ceph_osdmap *map = osdc->osdmap; map 62 net/ceph/debugfs.c if (map == NULL) map 66 net/ceph/debugfs.c seq_printf(s, "epoch %u barrier %u flags 0x%x\n", map->epoch, map 67 net/ceph/debugfs.c osdc->epoch_barrier, map->flags); map 69 net/ceph/debugfs.c for (n = rb_first(&map->pg_pools); n; n = rb_next(n)) { map 79 net/ceph/debugfs.c for (i = 0; i < map->max_osd; i++) { map 80 net/ceph/debugfs.c struct ceph_entity_addr *addr = &map->osd_addr[i]; map 81 net/ceph/debugfs.c u32 state = map->osd_state[i]; map 86 net/ceph/debugfs.c ((map->osd_weight[i]*100) >> 16), map 88 net/ceph/debugfs.c ((ceph_get_primary_affinity(map, i)*100) >> 16)); map 90 net/ceph/debugfs.c for (n = rb_first(&map->pg_temp); n; n = rb_next(n)) { map 101 net/ceph/debugfs.c for (n = rb_first(&map->primary_temp); n; n = rb_next(n)) { map 108 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap); n; n = rb_next(n)) { map 119 net/ceph/debugfs.c for (n = rb_first(&map->pg_upmap_items); n; n = rb_next(n)) { map 2571 net/ceph/osd_client.c struct ceph_osdmap *map = osdc->osdmap; map 2574 net/ceph/osd_client.c WARN_ON(!map->epoch); map 2582 net/ceph/osd_client.c req->r_map_dne_bound = map->epoch; map 2587 net/ceph/osd_client.c req, req->r_tid, req->r_map_dne_bound, map->epoch); map 2591 net/ceph/osd_client.c if (map->epoch >= req->r_map_dne_bound) { map 3168 net/ceph/osd_client.c struct ceph_osdmap *map = osdc->osdmap; map 3171 net/ceph/osd_client.c WARN_ON(!map->epoch); map 3174 net/ceph/osd_client.c lreq->map_dne_bound = map->epoch; map 3180 net/ceph/osd_client.c map->epoch); map 3184 net/ceph/osd_client.c if (map->epoch >= lreq->map_dne_bound) { map 678 net/ceph/osdmap.c struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id) map 680 net/ceph/osdmap.c return __lookup_pg_pool(&map->pg_pools, id); map 683 net/ceph/osdmap.c const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id) map 693 net/ceph/osdmap.c pi = __lookup_pg_pool(&map->pg_pools, (int) id); map 699 net/ceph/osdmap.c int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name) map 703 net/ceph/osdmap.c for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) { map 713 net/ceph/osdmap.c u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id) map 717 net/ceph/osdmap.c pi = __lookup_pg_pool(&map->pg_pools, id); map 893 net/ceph/osdmap.c static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map) map 906 net/ceph/osdmap.c pi = __lookup_pg_pool(&map->pg_pools, pool); map 929 net/ceph/osdmap.c struct ceph_osdmap *map; map 931 net/ceph/osdmap.c map = kzalloc(sizeof(*map), GFP_NOIO); map 932 net/ceph/osdmap.c if (!map) map 935 net/ceph/osdmap.c map->pg_pools = RB_ROOT; map 936 net/ceph/osdmap.c map->pool_max = -1; map 937 net/ceph/osdmap.c map->pg_temp = RB_ROOT; map 938 net/ceph/osdmap.c map->primary_temp = RB_ROOT; map 939 net/ceph/osdmap.c map->pg_upmap = RB_ROOT; map 940 net/ceph/osdmap.c map->pg_upmap_items = RB_ROOT; map 941 net/ceph/osdmap.c mutex_init(&map->crush_workspace_mutex); map 943 net/ceph/osdmap.c return map; map 946 net/ceph/osdmap.c void ceph_osdmap_destroy(struct ceph_osdmap *map) map 948 net/ceph/osdmap.c dout("osdmap_destroy %p\n", map); map 949 net/ceph/osdmap.c if (map->crush) map 950 net/ceph/osdmap.c crush_destroy(map->crush); map 951 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&map->pg_temp)) { map 953 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_temp), map 955 net/ceph/osdmap.c erase_pg_mapping(&map->pg_temp, pg); map 958 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&map->primary_temp)) { map 960 net/ceph/osdmap.c rb_entry(rb_first(&map->primary_temp), map 962 net/ceph/osdmap.c erase_pg_mapping(&map->primary_temp, pg); map 965 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&map->pg_upmap)) { map 967 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_upmap), map 969 net/ceph/osdmap.c rb_erase(&pg->node, &map->pg_upmap); map 972 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) { map 974 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_upmap_items), map 976 net/ceph/osdmap.c rb_erase(&pg->node, &map->pg_upmap_items); map 979 net/ceph/osdmap.c while (!RB_EMPTY_ROOT(&map->pg_pools)) { map 981 net/ceph/osdmap.c rb_entry(rb_first(&map->pg_pools), map 983 net/ceph/osdmap.c __remove_pg_pool(&map->pg_pools, pi); map 985 net/ceph/osdmap.c kvfree(map->osd_state); map 986 net/ceph/osdmap.c kvfree(map->osd_weight); map 987 net/ceph/osdmap.c kvfree(map->osd_addr); map 988 net/ceph/osdmap.c kvfree(map->osd_primary_affinity); map 989 net/ceph/osdmap.c kvfree(map->crush_workspace); map 990 net/ceph/osdmap.c kfree(map); map 998 net/ceph/osdmap.c static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) map 1006 net/ceph/osdmap.c dout("%s old %u new %u\n", __func__, map->max_osd, max); map 1007 net/ceph/osdmap.c if (max == map->max_osd) map 1020 net/ceph/osdmap.c to_copy = min(map->max_osd, max); map 1021 net/ceph/osdmap.c if (map->osd_state) { map 1022 net/ceph/osdmap.c memcpy(state, map->osd_state, to_copy * sizeof(*state)); map 1023 net/ceph/osdmap.c memcpy(weight, map->osd_weight, to_copy * sizeof(*weight)); map 1024 net/ceph/osdmap.c memcpy(addr, map->osd_addr, to_copy * sizeof(*addr)); map 1025 net/ceph/osdmap.c kvfree(map->osd_state); map 1026 net/ceph/osdmap.c kvfree(map->osd_weight); map 1027 net/ceph/osdmap.c kvfree(map->osd_addr); map 1030 net/ceph/osdmap.c map->osd_state = state; map 1031 net/ceph/osdmap.c map->osd_weight = weight; map 1032 net/ceph/osdmap.c map->osd_addr = addr; map 1033 net/ceph/osdmap.c for (i = map->max_osd; i < max; i++) { map 1034 net/ceph/osdmap.c map->osd_state[i] = 0; map 1035 net/ceph/osdmap.c map->osd_weight[i] = CEPH_OSD_OUT; map 1036 net/ceph/osdmap.c memset(map->osd_addr + i, 0, sizeof(*map->osd_addr)); map 1039 net/ceph/osdmap.c if (map->osd_primary_affinity) { map 1047 net/ceph/osdmap.c memcpy(affinity, map->osd_primary_affinity, map 1049 net/ceph/osdmap.c kvfree(map->osd_primary_affinity); map 1051 net/ceph/osdmap.c map->osd_primary_affinity = affinity; map 1052 net/ceph/osdmap.c for (i = map->max_osd; i < max; i++) map 1053 net/ceph/osdmap.c map->osd_primary_affinity[i] = map 1057 net/ceph/osdmap.c map->max_osd = max; map 1062 net/ceph/osdmap.c static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush) map 1079 net/ceph/osdmap.c if (map->crush) map 1080 net/ceph/osdmap.c crush_destroy(map->crush); map 1081 net/ceph/osdmap.c kvfree(map->crush_workspace); map 1082 net/ceph/osdmap.c map->crush = crush; map 1083 net/ceph/osdmap.c map->crush_workspace = workspace; map 1144 net/ceph/osdmap.c static int __decode_pools(void **p, void *end, struct ceph_osdmap *map, map 1157 net/ceph/osdmap.c pi = __lookup_pg_pool(&map->pg_pools, pool); map 1165 net/ceph/osdmap.c ret = __insert_pg_pool(&map->pg_pools, pi); map 1183 net/ceph/osdmap.c static int decode_pools(void **p, void *end, struct ceph_osdmap *map) map 1185 net/ceph/osdmap.c return __decode_pools(p, end, map, false); map 1188 net/ceph/osdmap.c static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map) map 1190 net/ceph/osdmap.c return __decode_pools(p, end, map, true); map 1264 net/ceph/osdmap.c static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map) map 1266 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, map 1270 net/ceph/osdmap.c static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map) map 1272 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp, map 1297 net/ceph/osdmap.c static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map) map 1299 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->primary_temp, map 1304 net/ceph/osdmap.c struct ceph_osdmap *map) map 1306 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->primary_temp, map 1310 net/ceph/osdmap.c u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd) map 1312 net/ceph/osdmap.c BUG_ON(osd >= map->max_osd); map 1314 net/ceph/osdmap.c if (!map->osd_primary_affinity) map 1317 net/ceph/osdmap.c return map->osd_primary_affinity[osd]; map 1320 net/ceph/osdmap.c static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff) map 1322 net/ceph/osdmap.c BUG_ON(osd >= map->max_osd); map 1324 net/ceph/osdmap.c if (!map->osd_primary_affinity) { map 1327 net/ceph/osdmap.c map->osd_primary_affinity = ceph_kvmalloc( map 1328 net/ceph/osdmap.c array_size(map->max_osd, sizeof(*map->osd_primary_affinity)), map 1330 net/ceph/osdmap.c if (!map->osd_primary_affinity) map 1333 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) map 1334 net/ceph/osdmap.c map->osd_primary_affinity[i] = map 1338 net/ceph/osdmap.c map->osd_primary_affinity[osd] = aff; map 1344 net/ceph/osdmap.c struct ceph_osdmap *map) map 1350 net/ceph/osdmap.c kvfree(map->osd_primary_affinity); map 1351 net/ceph/osdmap.c map->osd_primary_affinity = NULL; map 1354 net/ceph/osdmap.c if (len != map->max_osd) map 1357 net/ceph/osdmap.c ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval); map 1359 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) { map 1362 net/ceph/osdmap.c ret = set_primary_affinity(map, i, ceph_decode_32(p)); map 1374 net/ceph/osdmap.c struct ceph_osdmap *map) map 1386 net/ceph/osdmap.c ret = set_primary_affinity(map, osd, aff); map 1405 net/ceph/osdmap.c static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map) map 1407 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, map 1411 net/ceph/osdmap.c static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map) map 1413 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap, map 1417 net/ceph/osdmap.c static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map) map 1419 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true); map 1449 net/ceph/osdmap.c static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map) map 1451 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap_items, map 1456 net/ceph/osdmap.c struct ceph_osdmap *map) map 1458 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap_items, map 1463 net/ceph/osdmap.c struct ceph_osdmap *map) map 1465 net/ceph/osdmap.c return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true); map 1471 net/ceph/osdmap.c static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map) map 1487 net/ceph/osdmap.c ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) + map 1488 net/ceph/osdmap.c sizeof(map->created) + sizeof(map->modified), e_inval); map 1489 net/ceph/osdmap.c ceph_decode_copy(p, &map->fsid, sizeof(map->fsid)); map 1490 net/ceph/osdmap.c epoch = map->epoch = ceph_decode_32(p); map 1491 net/ceph/osdmap.c ceph_decode_copy(p, &map->created, sizeof(map->created)); map 1492 net/ceph/osdmap.c ceph_decode_copy(p, &map->modified, sizeof(map->modified)); map 1495 net/ceph/osdmap.c err = decode_pools(p, end, map); map 1500 net/ceph/osdmap.c err = decode_pool_names(p, end, map); map 1504 net/ceph/osdmap.c ceph_decode_32_safe(p, end, map->pool_max, e_inval); map 1506 net/ceph/osdmap.c ceph_decode_32_safe(p, end, map->flags, e_inval); map 1512 net/ceph/osdmap.c err = osdmap_set_max_osd(map, max); map 1518 net/ceph/osdmap.c map->max_osd*(struct_v >= 5 ? sizeof(u32) : map 1520 net/ceph/osdmap.c sizeof(*map->osd_weight), e_inval); map 1521 net/ceph/osdmap.c if (ceph_decode_32(p) != map->max_osd) map 1525 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) map 1526 net/ceph/osdmap.c map->osd_state[i] = ceph_decode_32(p); map 1528 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) map 1529 net/ceph/osdmap.c map->osd_state[i] = ceph_decode_8(p); map 1532 net/ceph/osdmap.c if (ceph_decode_32(p) != map->max_osd) map 1535 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) map 1536 net/ceph/osdmap.c map->osd_weight[i] = ceph_decode_32(p); map 1538 net/ceph/osdmap.c if (ceph_decode_32(p) != map->max_osd) map 1541 net/ceph/osdmap.c for (i = 0; i < map->max_osd; i++) { map 1542 net/ceph/osdmap.c err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]); map 1548 net/ceph/osdmap.c err = decode_pg_temp(p, end, map); map 1554 net/ceph/osdmap.c err = decode_primary_temp(p, end, map); map 1561 net/ceph/osdmap.c err = decode_primary_affinity(p, end, map); map 1565 net/ceph/osdmap.c WARN_ON(map->osd_primary_affinity); map 1570 net/ceph/osdmap.c err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end))); map 1582 net/ceph/osdmap.c err = decode_pg_upmap(p, end, map); map 1586 net/ceph/osdmap.c err = decode_pg_upmap_items(p, end, map); map 1590 net/ceph/osdmap.c WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap)); map 1591 net/ceph/osdmap.c WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items)); map 1597 net/ceph/osdmap.c dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); map 1616 net/ceph/osdmap.c struct ceph_osdmap *map; map 1619 net/ceph/osdmap.c map = ceph_osdmap_alloc(); map 1620 net/ceph/osdmap.c if (!map) map 1623 net/ceph/osdmap.c ret = osdmap_decode(p, end, map); map 1625 net/ceph/osdmap.c ceph_osdmap_destroy(map); map 1629 net/ceph/osdmap.c return map; map 1641 net/ceph/osdmap.c struct ceph_osdmap *map) map 1674 net/ceph/osdmap.c BUG_ON(osd >= map->max_osd); map 1678 net/ceph/osdmap.c map->osd_weight[osd] = w; map 1685 net/ceph/osdmap.c map->osd_state[osd] |= CEPH_OSD_EXISTS; map 1686 net/ceph/osdmap.c map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT | map 1707 net/ceph/osdmap.c BUG_ON(osd >= map->max_osd); map 1708 net/ceph/osdmap.c if ((map->osd_state[osd] & CEPH_OSD_UP) && map 1711 net/ceph/osdmap.c if ((map->osd_state[osd] & CEPH_OSD_EXISTS) && map 1714 net/ceph/osdmap.c ret = set_primary_affinity(map, osd, map 1718 net/ceph/osdmap.c memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr)); map 1719 net/ceph/osdmap.c map->osd_state[osd] = 0; map 1721 net/ceph/osdmap.c map->osd_state[osd] ^= xorstate; map 1733 net/ceph/osdmap.c BUG_ON(osd >= map->max_osd); map 1737 net/ceph/osdmap.c map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP; map 1738 net/ceph/osdmap.c map->osd_addr[osd] = addr; map 1752 net/ceph/osdmap.c struct ceph_osdmap *map) map 1776 net/ceph/osdmap.c BUG_ON(epoch != map->epoch+1); map 1792 net/ceph/osdmap.c err = osdmap_set_crush(map, map 1801 net/ceph/osdmap.c map->flags = new_flags; map 1803 net/ceph/osdmap.c map->pool_max = new_pool_max; map 1808 net/ceph/osdmap.c err = osdmap_set_max_osd(map, max); map 1813 net/ceph/osdmap.c map->epoch++; map 1814 net/ceph/osdmap.c map->modified = modified; map 1817 net/ceph/osdmap.c err = decode_new_pools(p, end, map); map 1822 net/ceph/osdmap.c err = decode_pool_names(p, end, map); map 1832 net/ceph/osdmap.c pi = __lookup_pg_pool(&map->pg_pools, pool); map 1834 net/ceph/osdmap.c __remove_pg_pool(&map->pg_pools, pi); map 1838 net/ceph/osdmap.c err = decode_new_up_state_weight(p, end, struct_v, map); map 1843 net/ceph/osdmap.c err = decode_new_pg_temp(p, end, map); map 1849 net/ceph/osdmap.c err = decode_new_primary_temp(p, end, map); map 1856 net/ceph/osdmap.c err = decode_new_primary_affinity(p, end, map); map 1870 net/ceph/osdmap.c err = decode_new_pg_upmap(p, end, map); map 1874 net/ceph/osdmap.c err = decode_old_pg_upmap(p, end, map); map 1878 net/ceph/osdmap.c err = decode_new_pg_upmap_items(p, end, map); map 1882 net/ceph/osdmap.c err = decode_old_pg_upmap_items(p, end, map); map 1890 net/ceph/osdmap.c dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd); map 1891 net/ceph/osdmap.c return map; map 2276 net/ceph/osdmap.c static int do_crush(struct ceph_osdmap *map, int ruleno, int x, map 2286 net/ceph/osdmap.c arg_map = lookup_choose_arg_map(&map->crush->choose_args, map 2289 net/ceph/osdmap.c arg_map = lookup_choose_arg_map(&map->crush->choose_args, map 2292 net/ceph/osdmap.c mutex_lock(&map->crush_workspace_mutex); map 2293 net/ceph/osdmap.c r = crush_do_rule(map->crush, ruleno, x, result, result_max, map 2294 net/ceph/osdmap.c weight, weight_max, map->crush_workspace, map 2296 net/ceph/osdmap.c mutex_unlock(&map->crush_workspace_mutex); map 42 net/core/bpf_sk_storage.c struct bpf_map map; map 133 net/core/bpf_sk_storage.c memcpy(SDATA(selem)->data, value, smap->map.value_size); map 299 net/core/bpf_sk_storage.c sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit) map 308 net/core/bpf_sk_storage.c smap = (struct bpf_sk_storage_map *)map; map 388 net/core/bpf_sk_storage.c struct bpf_map *map, map 401 net/core/bpf_sk_storage.c unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map))) map 404 net/core/bpf_sk_storage.c smap = (struct bpf_sk_storage_map *)map; map 436 net/core/bpf_sk_storage.c copy_map_value_locked(map, old_sdata->data, map 461 net/core/bpf_sk_storage.c copy_map_value_locked(map, old_sdata->data, value, false); map 501 net/core/bpf_sk_storage.c static int sk_storage_delete(struct sock *sk, struct bpf_map *map) map 505 net/core/bpf_sk_storage.c sdata = sk_storage_lookup(sk, map, false); map 553 net/core/bpf_sk_storage.c static void bpf_sk_storage_map_free(struct bpf_map *map) map 560 net/core/bpf_sk_storage.c smap = (struct bpf_sk_storage_map *)map; map 606 net/core/bpf_sk_storage.c kfree(map); map 644 net/core/bpf_sk_storage.c bpf_map_init_from_attr(&smap->map, attr); map 652 net/core/bpf_sk_storage.c ret = bpf_map_charge_init(&smap->map.memory, cost); map 661 net/core/bpf_sk_storage.c bpf_map_charge_finish(&smap->map.memory); map 675 net/core/bpf_sk_storage.c return &smap->map; map 678 net/core/bpf_sk_storage.c static int notsupp_get_next_key(struct bpf_map *map, void *key, map 684 net/core/bpf_sk_storage.c static int bpf_sk_storage_map_check_btf(const struct bpf_map *map, map 701 net/core/bpf_sk_storage.c static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key) map 710 net/core/bpf_sk_storage.c sdata = sk_storage_lookup(sock->sk, map, true); map 718 net/core/bpf_sk_storage.c static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, map 728 net/core/bpf_sk_storage.c sdata = sk_storage_update(sock->sk, map, value, map_flags); map 736 net/core/bpf_sk_storage.c static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key) map 744 net/core/bpf_sk_storage.c err = sk_storage_delete(sock->sk, map); map 763 net/core/bpf_sk_storage.c if (map_value_has_spin_lock(&smap->map)) map 764 net/core/bpf_sk_storage.c copy_map_value_locked(&smap->map, SDATA(copy_selem)->data, map 767 net/core/bpf_sk_storage.c copy_map_value(&smap->map, SDATA(copy_selem)->data, map 791 net/core/bpf_sk_storage.c struct bpf_map *map; map 794 net/core/bpf_sk_storage.c if (!(smap->map.map_flags & BPF_F_CLONE)) map 802 net/core/bpf_sk_storage.c map = bpf_map_inc_not_zero(&smap->map, false); map 803 net/core/bpf_sk_storage.c if (IS_ERR(map)) map 809 net/core/bpf_sk_storage.c bpf_map_put(map); map 822 net/core/bpf_sk_storage.c bpf_map_put(map); map 828 net/core/bpf_sk_storage.c bpf_map_put(map); map 841 net/core/bpf_sk_storage.c BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, map 849 net/core/bpf_sk_storage.c sdata = sk_storage_lookup(sk, map, true); map 860 net/core/bpf_sk_storage.c sdata = sk_storage_update(sk, map, value, BPF_NOEXIST); map 872 net/core/bpf_sk_storage.c BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) map 877 net/core/bpf_sk_storage.c err = sk_storage_delete(sk, map); map 484 net/core/dev.c static int netdev_boot_setup_add(char *name, struct ifmap *map) map 494 net/core/dev.c memcpy(&s[i].map, map, sizeof(s[i].map)); map 519 net/core/dev.c dev->irq = s[i].map.irq; map 520 net/core/dev.c dev->base_addr = s[i].map.base_addr; map 521 net/core/dev.c dev->mem_start = s[i].map.mem_start; map 522 net/core/dev.c dev->mem_end = s[i].map.mem_end; map 558 net/core/dev.c return s[i].map.base_addr; map 568 net/core/dev.c struct ifmap map; map 575 net/core/dev.c memset(&map, 0, sizeof(map)); map 577 net/core/dev.c map.irq = ints[1]; map 579 net/core/dev.c map.base_addr = ints[2]; map 581 net/core/dev.c map.mem_start = ints[3]; map 583 net/core/dev.c map.mem_end = ints[4]; map 586 net/core/dev.c return netdev_boot_setup_add(str, &map); map 2073 net/core/dev.c struct xps_map *map = NULL; map 2077 net/core/dev.c map = xmap_dereference(dev_maps->attr_map[tci]); map 2078 net/core/dev.c if (!map) map 2081 net/core/dev.c for (pos = map->len; pos--;) { map 2082 net/core/dev.c if (map->queues[pos] != index) map 2085 net/core/dev.c if (map->len > 1) { map 2086 net/core/dev.c map->queues[pos] = map->queues[--map->len]; map 2091 net/core/dev.c kfree_rcu(map, rcu); map 2199 net/core/dev.c static struct xps_map *expand_xps_map(struct xps_map *map, int attr_index, map 2206 net/core/dev.c for (pos = 0; map && pos < map->len; pos++) { map 2207 net/core/dev.c if (map->queues[pos] != index) map 2209 net/core/dev.c return map; map 2213 net/core/dev.c if (map) { map 2214 net/core/dev.c if (pos < map->alloc_len) map 2215 net/core/dev.c return map; map 2217 net/core/dev.c alloc_len = map->alloc_len * 2; map 2232 net/core/dev.c new_map->queues[i] = map->queues[i]; map 2247 net/core/dev.c struct xps_map *map, *new_map; map 2294 net/core/dev.c map = dev_maps ? xmap_dereference(dev_maps->attr_map[tci]) : map 2297 net/core/dev.c map = expand_xps_map(map, j, index, is_rxqs_map); map 2298 net/core/dev.c if (!map) map 2301 net/core/dev.c RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); map 2319 net/core/dev.c map = xmap_dereference(dev_maps->attr_map[tci]); map 2320 net/core/dev.c RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); map 2333 net/core/dev.c map = xmap_dereference(new_dev_maps->attr_map[tci]); map 2334 net/core/dev.c while ((pos < map->len) && (map->queues[pos] != index)) map 2337 net/core/dev.c if (pos == map->len) map 2338 net/core/dev.c map->queues[map->len++] = index; map 2349 net/core/dev.c map = xmap_dereference(dev_maps->attr_map[tci]); map 2350 net/core/dev.c RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); map 2356 net/core/dev.c map = xmap_dereference(dev_maps->attr_map[tci]); map 2357 net/core/dev.c RCU_INIT_POINTER(new_dev_maps->attr_map[tci], map); map 2374 net/core/dev.c map = xmap_dereference(dev_maps->attr_map[tci]); map 2375 net/core/dev.c if (map && map != new_map) map 2376 net/core/dev.c kfree_rcu(map, rcu); map 2423 net/core/dev.c map = dev_maps ? map 2426 net/core/dev.c if (new_map && new_map != map) map 3454 net/core/dev.c const struct netprio_map *map; map 3460 net/core/dev.c map = rcu_dereference_bh(skb->dev->priomap); map 3461 net/core/dev.c if (!map) map 3469 net/core/dev.c if (prioidx < map->priomap_len) map 3470 net/core/dev.c skb->priority = map->priomap[prioidx]; map 3541 net/core/dev.c struct xps_map *map; map 3549 net/core/dev.c map = rcu_dereference(dev_maps->attr_map[tci]); map 3550 net/core/dev.c if (map) { map 3551 net/core/dev.c if (map->len == 1) map 3552 net/core/dev.c queue_index = map->queues[0]; map 3554 net/core/dev.c queue_index = map->queues[reciprocal_scale( map 3555 net/core/dev.c skb_get_hash(skb), map->len)]; map 3953 net/core/dev.c struct rps_map *map; map 3974 net/core/dev.c map = rcu_dereference(rxqueue->rps_map); map 3975 net/core/dev.c if (!flow_table && !map) map 4030 net/core/dev.c if (map) { map 4031 net/core/dev.c tcpu = map->cpus[reciprocal_scale(hash, map->len)]; map 3463 net/core/filter.c struct bpf_map *map, map 3515 net/core/filter.c struct bpf_map *map, map 3521 net/core/filter.c switch (map->map_type) { map 3542 net/core/filter.c err = __xsk_map_redirect(map, xdp, xs); map 3554 net/core/filter.c struct bpf_map *map = ri->map_to_flush; map 3557 net/core/filter.c if (map) { map 3558 net/core/filter.c switch (map->map_type) { map 3561 net/core/filter.c __dev_map_flush(map); map 3564 net/core/filter.c __cpu_map_flush(map); map 3567 net/core/filter.c __xsk_map_flush(map); map 3576 net/core/filter.c static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) map 3578 net/core/filter.c switch (map->map_type) { map 3580 net/core/filter.c return __dev_map_lookup_elem(map, index); map 3582 net/core/filter.c return __dev_map_hash_lookup_elem(map, index); map 3584 net/core/filter.c return __cpu_map_lookup_elem(map, index); map 3586 net/core/filter.c return __xsk_map_lookup_elem(map, index); map 3592 net/core/filter.c void bpf_clear_redirect_map(struct bpf_map *map) map 3604 net/core/filter.c if (unlikely(READ_ONCE(ri->map) == map)) map 3605 net/core/filter.c cmpxchg(&ri->map, map, NULL); map 3610 net/core/filter.c struct bpf_prog *xdp_prog, struct bpf_map *map, map 3619 net/core/filter.c WRITE_ONCE(ri->map, NULL); map 3621 net/core/filter.c if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) map 3624 net/core/filter.c err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); map 3628 net/core/filter.c ri->map_to_flush = map; map 3629 net/core/filter.c _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); map 3632 net/core/filter.c _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); map 3640 net/core/filter.c struct bpf_map *map = READ_ONCE(ri->map); map 3642 net/core/filter.c if (likely(map)) map 3643 net/core/filter.c return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); map 3653 net/core/filter.c struct bpf_map *map) map 3662 net/core/filter.c WRITE_ONCE(ri->map, NULL); map 3664 net/core/filter.c if (map->map_type == BPF_MAP_TYPE_DEVMAP || map 3665 net/core/filter.c map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { map 3671 net/core/filter.c } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { map 3684 net/core/filter.c _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); map 3687 net/core/filter.c _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); map 3695 net/core/filter.c struct bpf_map *map = READ_ONCE(ri->map); map 3700 net/core/filter.c if (map) map 3702 net/core/filter.c map); map 3734 net/core/filter.c WRITE_ONCE(ri->map, NULL); map 3747 net/core/filter.c BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, map 3756 net/core/filter.c ri->tgt_value = __xdp_map_lookup_elem(map, ifindex); map 3763 net/core/filter.c WRITE_ONCE(ri->map, NULL); map 3769 net/core/filter.c WRITE_ONCE(ri->map, map); map 3796 net/core/filter.c BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, map 3806 net/core/filter.c return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, map 4055 net/core/filter.c BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, map 4058 net/core/filter.c struct bpf_array *array = container_of(map, struct bpf_array, map); map 4065 net/core/filter.c if (unlikely(idx >= array->map.max_entries)) map 4138 net/core/filter.c BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, map 4148 net/core/filter.c return bpf_event_output(map, flags, meta, meta_size, xdp->data, map 4218 net/core/filter.c struct bpf_map *, map, u64, flags, void *, data, u64, size) map 4223 net/core/filter.c return bpf_event_output(map, flags, data, size, NULL, 0, NULL); map 8716 net/core/filter.c struct bpf_map *, map, void *, key, u32, flags) map 8721 net/core/filter.c selected_sk = map->ops->map_lookup_elem(map, key); map 688 net/core/net-sysfs.c struct rps_map *map; map 696 net/core/net-sysfs.c map = rcu_dereference(queue->rps_map); map 697 net/core/net-sysfs.c if (map) map 698 net/core/net-sysfs.c for (i = 0; i < map->len; i++) map 699 net/core/net-sysfs.c cpumask_set_cpu(map->cpus[i], mask); map 711 net/core/net-sysfs.c struct rps_map *old_map, *map; map 728 net/core/net-sysfs.c map = kzalloc(max_t(unsigned int, map 731 net/core/net-sysfs.c if (!map) { map 738 net/core/net-sysfs.c map->cpus[i++] = cpu; map 741 net/core/net-sysfs.c map->len = i; map 743 net/core/net-sysfs.c kfree(map); map 744 net/core/net-sysfs.c map = NULL; map 750 net/core/net-sysfs.c rcu_assign_pointer(queue->rps_map, map); map 752 net/core/net-sysfs.c if (map) map 868 net/core/net-sysfs.c struct rps_map *map; map 871 net/core/net-sysfs.c map = rcu_dereference_protected(queue->rps_map, 1); map 872 net/core/net-sysfs.c if (map) { map 874 net/core/net-sysfs.c kfree_rcu(map, rcu); map 1271 net/core/net-sysfs.c struct xps_map *map; map 1273 net/core/net-sysfs.c map = rcu_dereference(dev_maps->attr_map[tci]); map 1274 net/core/net-sysfs.c if (!map) map 1277 net/core/net-sysfs.c for (i = map->len; i--;) { map 1278 net/core/net-sysfs.c if (map->queues[i] == index) { map 1354 net/core/net-sysfs.c struct xps_map *map; map 1356 net/core/net-sysfs.c map = rcu_dereference(dev_maps->attr_map[tci]); map 1357 net/core/net-sysfs.c if (!map) map 1360 net/core/net-sysfs.c for (i = map->len; i--;) { map 1361 net/core/net-sysfs.c if (map->queues[i] == index) { map 95 net/core/netprio_cgroup.c struct netprio_map *map = rcu_dereference_rtnl(dev->priomap); map 98 net/core/netprio_cgroup.c if (map && id < map->priomap_len) map 99 net/core/netprio_cgroup.c return map->priomap[id]; map 115 net/core/netprio_cgroup.c struct netprio_map *map; map 120 net/core/netprio_cgroup.c map = rtnl_dereference(dev->priomap); map 121 net/core/netprio_cgroup.c if (!prio && (!map || map->priomap_len <= id)) map 128 net/core/netprio_cgroup.c map = rtnl_dereference(dev->priomap); map 129 net/core/netprio_cgroup.c map->priomap[id] = prio; map 1351 net/core/rtnetlink.c struct rtnl_link_ifmap map; map 1353 net/core/rtnetlink.c memset(&map, 0, sizeof(map)); map 1354 net/core/rtnetlink.c map.mem_start = dev->mem_start; map 1355 net/core/rtnetlink.c map.mem_end = dev->mem_end; map 1356 net/core/rtnetlink.c map.base_addr = dev->base_addr; map 1357 net/core/rtnetlink.c map.irq = dev->irq; map 1358 net/core/rtnetlink.c map.dma = dev->dma; map 1359 net/core/rtnetlink.c map.port = dev->if_port; map 1361 net/core/rtnetlink.c if (nla_put_64bit(skb, IFLA_MAP, sizeof(map), &map, IFLA_PAD)) map 15 net/core/sock_map.c struct bpf_map map; map 42 net/core/sock_map.c bpf_map_init_from_attr(&stab->map, attr); map 46 net/core/sock_map.c cost = (u64) stab->map.max_entries * sizeof(struct sock *); map 47 net/core/sock_map.c err = bpf_map_charge_init(&stab->map.memory, cost); map 51 net/core/sock_map.c stab->sks = bpf_map_area_alloc(stab->map.max_entries * map 53 net/core/sock_map.c stab->map.numa_node); map 55 net/core/sock_map.c return &stab->map; map 57 net/core/sock_map.c bpf_map_charge_finish(&stab->map.memory); map 66 net/core/sock_map.c struct bpf_map *map; map 71 net/core/sock_map.c map = __bpf_map_get(f); map 72 net/core/sock_map.c if (IS_ERR(map)) map 73 net/core/sock_map.c return PTR_ERR(map); map 74 net/core/sock_map.c ret = sock_map_prog_update(map, prog, attr->attach_type); map 97 net/core/sock_map.c struct bpf_map *map, void *link_raw) map 100 net/core/sock_map.c link->map = map; map 115 net/core/sock_map.c struct bpf_map *map = link->map; map 116 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map 117 net/core/sock_map.c map); map 142 net/core/sock_map.c static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs, map 187 net/core/sock_map.c psock = sk_psock_init(sk, map->numa_node); map 231 net/core/sock_map.c static void sock_map_free(struct bpf_map *map) map 233 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 241 net/core/sock_map.c for (i = 0; i < stab->map.max_entries; i++) { map 262 net/core/sock_map.c static void sock_map_release_progs(struct bpf_map *map) map 264 net/core/sock_map.c psock_progs_drop(&container_of(map, struct bpf_stab, map)->progs); map 267 net/core/sock_map.c static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key) map 269 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 273 net/core/sock_map.c if (unlikely(key >= map->max_entries)) map 278 net/core/sock_map.c static void *sock_map_lookup(struct bpf_map *map, void *key) map 303 net/core/sock_map.c static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk, map 306 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 311 net/core/sock_map.c static int sock_map_delete_elem(struct bpf_map *map, void *key) map 313 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 317 net/core/sock_map.c if (unlikely(i >= map->max_entries)) map 324 net/core/sock_map.c static int sock_map_get_next_key(struct bpf_map *map, void *key, void *next) map 326 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 330 net/core/sock_map.c if (i == stab->map.max_entries - 1) map 332 net/core/sock_map.c if (i >= stab->map.max_entries) map 339 net/core/sock_map.c static int sock_map_update_common(struct bpf_map *map, u32 idx, map 342 net/core/sock_map.c struct bpf_stab *stab = container_of(map, struct bpf_stab, map); map 352 net/core/sock_map.c if (unlikely(idx >= map->max_entries)) map 361 net/core/sock_map.c ret = sock_map_link(map, &stab->progs, sk); map 378 net/core/sock_map.c sock_map_add_link(psock, link, map, &stab->sks[idx]); map 405 net/core/sock_map.c static int sock_map_update_elem(struct bpf_map *map, void *key, map 431 net/core/sock_map.c ret = sock_map_update_common(map, idx, sk, flags); map 439 net/core/sock_map.c struct bpf_map *, map, void *, key, u64, flags) map 445 net/core/sock_map.c return sock_map_update_common(map, *(u32 *)key, sops->sk, map 462 net/core/sock_map.c struct bpf_map *, map, u32, key, u64, flags) map 469 net/core/sock_map.c tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key); map 486 net/core/sock_map.c struct bpf_map *, map, u32, key, u64, flags) map 491 net/core/sock_map.c msg->sk_redir = __sock_map_lookup_elem(map, key); map 532 net/core/sock_map.c struct bpf_map map; map 566 net/core/sock_map.c static struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key) map 568 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 569 net/core/sock_map.c u32 key_size = map->key_size, hash; map 589 net/core/sock_map.c static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk, map 592 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 605 net/core/sock_map.c elem->key, map->key_size); map 614 net/core/sock_map.c static int sock_hash_delete_elem(struct bpf_map *map, void *key) map 616 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 617 net/core/sock_map.c u32 hash, key_size = map->key_size; map 644 net/core/sock_map.c if (atomic_inc_return(&htab->count) > htab->map.max_entries) { map 652 net/core/sock_map.c htab->map.numa_node); map 663 net/core/sock_map.c static int sock_hash_update_common(struct bpf_map *map, void *key, map 666 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 668 net/core/sock_map.c u32 key_size = map->key_size, hash; map 685 net/core/sock_map.c ret = sock_map_link(map, &htab->progs, sk); map 711 net/core/sock_map.c sock_map_add_link(psock, link, map, elem_new); map 731 net/core/sock_map.c static int sock_hash_update_elem(struct bpf_map *map, void *key, map 756 net/core/sock_map.c ret = sock_hash_update_common(map, key, sk, flags); map 763 net/core/sock_map.c static int sock_hash_get_next_key(struct bpf_map *map, void *key, map 766 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 768 net/core/sock_map.c u32 hash, key_size = map->key_size; map 823 net/core/sock_map.c bpf_map_init_from_attr(&htab->map, attr); map 825 net/core/sock_map.c htab->buckets_num = roundup_pow_of_two(htab->map.max_entries); map 827 net/core/sock_map.c round_up(htab->map.key_size, 8); map 835 net/core/sock_map.c (u64) htab->elem_size * htab->map.max_entries; map 843 net/core/sock_map.c htab->map.numa_node); map 854 net/core/sock_map.c return &htab->map; map 860 net/core/sock_map.c static void sock_hash_free(struct bpf_map *map) map 862 net/core/sock_map.c struct bpf_htab *htab = container_of(map, struct bpf_htab, map); map 895 net/core/sock_map.c static void sock_hash_release_progs(struct bpf_map *map) map 897 net/core/sock_map.c psock_progs_drop(&container_of(map, struct bpf_htab, map)->progs); map 901 net/core/sock_map.c struct bpf_map *, map, void *, key, u64, flags) map 907 net/core/sock_map.c return sock_hash_update_common(map, key, sops->sk, flags); map 923 net/core/sock_map.c struct bpf_map *, map, void *, key, u64, flags) map 930 net/core/sock_map.c tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key); map 947 net/core/sock_map.c struct bpf_map *, map, void *, key, u64, flags) map 952 net/core/sock_map.c msg->sk_redir = __sock_hash_lookup_elem(map, key); map 979 net/core/sock_map.c static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) map 981 net/core/sock_map.c switch (map->map_type) { map 983 net/core/sock_map.c return &container_of(map, struct bpf_stab, map)->progs; map 985 net/core/sock_map.c return &container_of(map, struct bpf_htab, map)->progs; map 993 net/core/sock_map.c int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, map 996 net/core/sock_map.c struct sk_psock_progs *progs = sock_map_progs(map); map 1020 net/core/sock_map.c switch (link->map->map_type) { map 1022 net/core/sock_map.c return sock_map_delete_from_link(link->map, sk, map 1025 net/core/sock_map.c return sock_hash_delete_from_link(link->map, sk, map 1983 net/dcb/dcbnl.c memset(p_map->map, 0, sizeof(p_map->map)); map 1992 net/dcb/dcbnl.c p_map->map[prio] |= 1ULL << itr->app.protocol; map 2012 net/dcb/dcbnl.c memset(p_map->map, 0, sizeof(p_map->map)); map 2020 net/dcb/dcbnl.c p_map->map[itr->app.protocol] |= 1 << itr->app.priority; map 471 net/ipv4/cipso_ipv4.c kfree(doi_def->map.std->lvl.cipso); map 472 net/ipv4/cipso_ipv4.c kfree(doi_def->map.std->lvl.local); map 473 net/ipv4/cipso_ipv4.c kfree(doi_def->map.std->cat.cipso); map 474 net/ipv4/cipso_ipv4.c kfree(doi_def->map.std->cat.local); map 656 net/ipv4/cipso_ipv4.c if ((level < doi_def->map.std->lvl.cipso_size) && map 657 net/ipv4/cipso_ipv4.c (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)) map 686 net/ipv4/cipso_ipv4.c if (host_lvl < doi_def->map.std->lvl.local_size && map 687 net/ipv4/cipso_ipv4.c doi_def->map.std->lvl.local[host_lvl] < CIPSO_V4_INV_LVL) { map 688 net/ipv4/cipso_ipv4.c *net_lvl = doi_def->map.std->lvl.local[host_lvl]; map 720 net/ipv4/cipso_ipv4.c map_tbl = doi_def->map.std; map 723 net/ipv4/cipso_ipv4.c *host_lvl = doi_def->map.std->lvl.cipso[net_lvl]; map 757 net/ipv4/cipso_ipv4.c cipso_cat_size = doi_def->map.std->cat.cipso_size; map 758 net/ipv4/cipso_ipv4.c cipso_array = doi_def->map.std->cat.cipso; map 805 net/ipv4/cipso_ipv4.c host_cat_size = doi_def->map.std->cat.local_size; map 806 net/ipv4/cipso_ipv4.c host_cat_array = doi_def->map.std->cat.local; map 866 net/ipv4/cipso_ipv4.c net_cat_size = doi_def->map.std->cat.cipso_size; map 867 net/ipv4/cipso_ipv4.c net_cat_array = doi_def->map.std->cat.cipso; map 947 net/ipv6/mcast.c struct ifmcaddr6 *ma, **map; map 952 net/ipv6/mcast.c for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) { map 955 net/ipv6/mcast.c *map = ma->next; map 33 net/netfilter/ipset/ip_set_bitmap_gen.h #define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id))) map 38 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 40 net/netfilter/ipset/ip_set_bitmap_gen.h timer_setup(&map->gc, gc, 0); map 41 net/netfilter/ipset/ip_set_bitmap_gen.h mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ); map 47 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 50 net/netfilter/ipset/ip_set_bitmap_gen.h for (id = 0; id < map->elements; id++) map 51 net/netfilter/ipset/ip_set_bitmap_gen.h if (test_bit(id, map->members)) map 52 net/netfilter/ipset/ip_set_bitmap_gen.h ip_set_ext_destroy(set, get_ext(set, map, id)); map 58 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 61 net/netfilter/ipset/ip_set_bitmap_gen.h del_timer_sync(&map->gc); map 65 net/netfilter/ipset/ip_set_bitmap_gen.h ip_set_free(map->members); map 66 net/netfilter/ipset/ip_set_bitmap_gen.h ip_set_free(map); map 74 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 78 net/netfilter/ipset/ip_set_bitmap_gen.h bitmap_zero(map->members, map->elements); map 85 net/netfilter/ipset/ip_set_bitmap_gen.h mtype_memsize(const struct mtype *map, size_t dsize) map 87 net/netfilter/ipset/ip_set_bitmap_gen.h return sizeof(*map) + map->memsize + map 88 net/netfilter/ipset/ip_set_bitmap_gen.h map->elements * dsize; map 94 net/netfilter/ipset/ip_set_bitmap_gen.h const struct mtype *map = set->data; map 96 net/netfilter/ipset/ip_set_bitmap_gen.h size_t memsize = mtype_memsize(map, set->dsize) + set->ext_size; map 101 net/netfilter/ipset/ip_set_bitmap_gen.h if (mtype_do_head(skb, map) || map 119 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 121 net/netfilter/ipset/ip_set_bitmap_gen.h void *x = get_ext(set, map, e->id); map 122 net/netfilter/ipset/ip_set_bitmap_gen.h int ret = mtype_do_test(e, map, set->dsize); map 133 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 135 net/netfilter/ipset/ip_set_bitmap_gen.h void *x = get_ext(set, map, e->id); map 136 net/netfilter/ipset/ip_set_bitmap_gen.h int ret = mtype_do_add(e, map, flags, set->dsize); map 144 net/netfilter/ipset/ip_set_bitmap_gen.h set_bit(e->id, map->members); map 155 net/netfilter/ipset/ip_set_bitmap_gen.h mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret); map 168 net/netfilter/ipset/ip_set_bitmap_gen.h set_bit(e->id, map->members); map 178 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 180 net/netfilter/ipset/ip_set_bitmap_gen.h void *x = get_ext(set, map, e->id); map 182 net/netfilter/ipset/ip_set_bitmap_gen.h if (mtype_do_del(e, map)) map 206 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = set->data; map 217 net/netfilter/ipset/ip_set_bitmap_gen.h for (; cb->args[IPSET_CB_ARG0] < map->elements; map 221 net/netfilter/ipset/ip_set_bitmap_gen.h x = get_ext(set, map, id); map 222 net/netfilter/ipset/ip_set_bitmap_gen.h if (!test_bit(id, map->members) || map 239 net/netfilter/ipset/ip_set_bitmap_gen.h if (mtype_do_list(skb, map, id, set->dsize)) map 267 net/netfilter/ipset/ip_set_bitmap_gen.h struct mtype *map = from_timer(map, t, gc); map 268 net/netfilter/ipset/ip_set_bitmap_gen.h struct ip_set *set = map->set; map 276 net/netfilter/ipset/ip_set_bitmap_gen.h for (id = 0; id < map->elements; id++) map 277 net/netfilter/ipset/ip_set_bitmap_gen.h if (mtype_gc_test(id, map, set->dsize)) { map 278 net/netfilter/ipset/ip_set_bitmap_gen.h x = get_ext(set, map, id); map 280 net/netfilter/ipset/ip_set_bitmap_gen.h clear_bit(id, map->members); map 287 net/netfilter/ipset/ip_set_bitmap_gen.h map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; map 288 net/netfilter/ipset/ip_set_bitmap_gen.h add_timer(&map->gc); map 68 net/netfilter/ipset/ip_set_bitmap_ip.c struct bitmap_ip *map, size_t dsize) map 70 net/netfilter/ipset/ip_set_bitmap_ip.c return !!test_bit(e->id, map->members); map 74 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_gc_test(u16 id, const struct bitmap_ip *map, size_t dsize) map 76 net/netfilter/ipset/ip_set_bitmap_ip.c return !!test_bit(id, map->members); map 80 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_add(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map, map 83 net/netfilter/ipset/ip_set_bitmap_ip.c return !!test_bit(e->id, map->members); map 87 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_del(const struct bitmap_ip_adt_elem *e, struct bitmap_ip *map) map 89 net/netfilter/ipset/ip_set_bitmap_ip.c return !test_and_clear_bit(e->id, map->members); map 93 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_list(struct sk_buff *skb, const struct bitmap_ip *map, u32 id, map 97 net/netfilter/ipset/ip_set_bitmap_ip.c htonl(map->first_ip + id * map->hosts)); map 101 net/netfilter/ipset/ip_set_bitmap_ip.c bitmap_ip_do_head(struct sk_buff *skb, const struct bitmap_ip *map) map 103 net/netfilter/ipset/ip_set_bitmap_ip.c return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || map 104 net/netfilter/ipset/ip_set_bitmap_ip.c nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) || map 105 net/netfilter/ipset/ip_set_bitmap_ip.c (map->netmask != 32 && map 106 net/netfilter/ipset/ip_set_bitmap_ip.c nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)); map 114 net/netfilter/ipset/ip_set_bitmap_ip.c struct bitmap_ip *map = set->data; map 121 net/netfilter/ipset/ip_set_bitmap_ip.c if (ip < map->first_ip || ip > map->last_ip) map 124 net/netfilter/ipset/ip_set_bitmap_ip.c e.id = ip_to_id(map, ip); map 133 net/netfilter/ipset/ip_set_bitmap_ip.c struct bitmap_ip *map = set->data; map 154 net/netfilter/ipset/ip_set_bitmap_ip.c if (ip < map->first_ip || ip > map->last_ip) map 158 net/netfilter/ipset/ip_set_bitmap_ip.c e.id = ip_to_id(map, ip); map 168 net/netfilter/ipset/ip_set_bitmap_ip.c if (ip < map->first_ip) map 181 net/netfilter/ipset/ip_set_bitmap_ip.c if (ip_to > map->last_ip) map 184 net/netfilter/ipset/ip_set_bitmap_ip.c for (; !before(ip_to, ip); ip += map->hosts) { map 185 net/netfilter/ipset/ip_set_bitmap_ip.c e.id = ip_to_id(map, ip); map 219 net/netfilter/ipset/ip_set_bitmap_ip.c init_map_ip(struct ip_set *set, struct bitmap_ip *map, map 223 net/netfilter/ipset/ip_set_bitmap_ip.c map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); map 224 net/netfilter/ipset/ip_set_bitmap_ip.c if (!map->members) map 226 net/netfilter/ipset/ip_set_bitmap_ip.c map->first_ip = first_ip; map 227 net/netfilter/ipset/ip_set_bitmap_ip.c map->last_ip = last_ip; map 228 net/netfilter/ipset/ip_set_bitmap_ip.c map->elements = elements; map 229 net/netfilter/ipset/ip_set_bitmap_ip.c map->hosts = hosts; map 230 net/netfilter/ipset/ip_set_bitmap_ip.c map->netmask = netmask; map 233 net/netfilter/ipset/ip_set_bitmap_ip.c map->set = set; map 234 net/netfilter/ipset/ip_set_bitmap_ip.c set->data = map; map 244 net/netfilter/ipset/ip_set_bitmap_ip.c struct bitmap_ip *map; map 309 net/netfilter/ipset/ip_set_bitmap_ip.c map = ip_set_alloc(sizeof(*map) + elements * set->dsize); map 310 net/netfilter/ipset/ip_set_bitmap_ip.c if (!map) map 313 net/netfilter/ipset/ip_set_bitmap_ip.c map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); map 315 net/netfilter/ipset/ip_set_bitmap_ip.c if (!init_map_ip(set, map, first_ip, last_ip, map 317 net/netfilter/ipset/ip_set_bitmap_ip.c kfree(map); map 84 net/netfilter/ipset/ip_set_bitmap_ipmac.c const struct bitmap_ipmac *map, size_t dsize) map 88 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (!test_bit(e->id, map->members)) map 90 net/netfilter/ipset/ip_set_bitmap_ipmac.c elem = get_const_elem(map->extensions, e->id, dsize); map 98 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_gc_test(u16 id, const struct bitmap_ipmac *map, size_t dsize) map 102 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (!test_bit(id, map->members)) map 104 net/netfilter/ipset/ip_set_bitmap_ipmac.c elem = get_const_elem(map->extensions, id, dsize); map 119 net/netfilter/ipset/ip_set_bitmap_ipmac.c struct bitmap_ipmac *map, int mode) map 144 net/netfilter/ipset/ip_set_bitmap_ipmac.c struct bitmap_ipmac *map, u32 flags, size_t dsize) map 148 net/netfilter/ipset/ip_set_bitmap_ipmac.c elem = get_elem(map->extensions, e->id, dsize); map 149 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (test_bit(e->id, map->members)) { map 155 net/netfilter/ipset/ip_set_bitmap_ipmac.c clear_bit(e->id, map->members); map 164 net/netfilter/ipset/ip_set_bitmap_ipmac.c clear_bit(e->id, map->members); map 182 net/netfilter/ipset/ip_set_bitmap_ipmac.c struct bitmap_ipmac *map) map 184 net/netfilter/ipset/ip_set_bitmap_ipmac.c return !test_and_clear_bit(e->id, map->members); map 188 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_list(struct sk_buff *skb, const struct bitmap_ipmac *map, map 192 net/netfilter/ipset/ip_set_bitmap_ipmac.c get_const_elem(map->extensions, id, dsize); map 195 net/netfilter/ipset/ip_set_bitmap_ipmac.c htonl(map->first_ip + id)) || map 201 net/netfilter/ipset/ip_set_bitmap_ipmac.c bitmap_ipmac_do_head(struct sk_buff *skb, const struct bitmap_ipmac *map) map 203 net/netfilter/ipset/ip_set_bitmap_ipmac.c return nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) || map 204 net/netfilter/ipset/ip_set_bitmap_ipmac.c nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); map 212 net/netfilter/ipset/ip_set_bitmap_ipmac.c struct bitmap_ipmac *map = set->data; map 219 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (ip < map->first_ip || ip > map->last_ip) map 227 net/netfilter/ipset/ip_set_bitmap_ipmac.c e.id = ip_to_id(map, ip); map 244 net/netfilter/ipset/ip_set_bitmap_ipmac.c const struct bitmap_ipmac *map = set->data; map 265 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (ip < map->first_ip || ip > map->last_ip) map 268 net/netfilter/ipset/ip_set_bitmap_ipmac.c e.id = ip_to_id(map, ip); map 299 net/netfilter/ipset/ip_set_bitmap_ipmac.c init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, map 302 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->members = bitmap_zalloc(elements, GFP_KERNEL | __GFP_NOWARN); map 303 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (!map->members) map 305 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->first_ip = first_ip; map 306 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->last_ip = last_ip; map 307 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->elements = elements; map 310 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->set = set; map 311 net/netfilter/ipset/ip_set_bitmap_ipmac.c set->data = map; map 323 net/netfilter/ipset/ip_set_bitmap_ipmac.c struct bitmap_ipmac *map; map 359 net/netfilter/ipset/ip_set_bitmap_ipmac.c map = ip_set_alloc(sizeof(*map) + elements * set->dsize); map 360 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (!map) map 363 net/netfilter/ipset/ip_set_bitmap_ipmac.c map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); map 365 net/netfilter/ipset/ip_set_bitmap_ipmac.c if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { map 366 net/netfilter/ipset/ip_set_bitmap_ipmac.c kfree(map); map 59 net/netfilter/ipset/ip_set_bitmap_port.c const struct bitmap_port *map, size_t dsize) map 61 net/netfilter/ipset/ip_set_bitmap_port.c return !!test_bit(e->id, map->members); map 65 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_gc_test(u16 id, const struct bitmap_port *map, size_t dsize) map 67 net/netfilter/ipset/ip_set_bitmap_port.c return !!test_bit(id, map->members); map 72 net/netfilter/ipset/ip_set_bitmap_port.c struct bitmap_port *map, u32 flags, size_t dsize) map 74 net/netfilter/ipset/ip_set_bitmap_port.c return !!test_bit(e->id, map->members); map 79 net/netfilter/ipset/ip_set_bitmap_port.c struct bitmap_port *map) map 81 net/netfilter/ipset/ip_set_bitmap_port.c return !test_and_clear_bit(e->id, map->members); map 85 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_list(struct sk_buff *skb, const struct bitmap_port *map, u32 id, map 89 net/netfilter/ipset/ip_set_bitmap_port.c htons(map->first_port + id)); map 93 net/netfilter/ipset/ip_set_bitmap_port.c bitmap_port_do_head(struct sk_buff *skb, const struct bitmap_port *map) map 95 net/netfilter/ipset/ip_set_bitmap_port.c return nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) || map 96 net/netfilter/ipset/ip_set_bitmap_port.c nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); map 104 net/netfilter/ipset/ip_set_bitmap_port.c struct bitmap_port *map = set->data; map 117 net/netfilter/ipset/ip_set_bitmap_port.c if (port < map->first_port || port > map->last_port) map 120 net/netfilter/ipset/ip_set_bitmap_port.c e.id = port_to_id(map, port); map 129 net/netfilter/ipset/ip_set_bitmap_port.c struct bitmap_port *map = set->data; map 145 net/netfilter/ipset/ip_set_bitmap_port.c if (port < map->first_port || port > map->last_port) map 152 net/netfilter/ipset/ip_set_bitmap_port.c e.id = port_to_id(map, port); map 160 net/netfilter/ipset/ip_set_bitmap_port.c if (port < map->first_port) map 167 net/netfilter/ipset/ip_set_bitmap_port.c if (port_to > map->last_port) map 171 net/netfilter/ipset/ip_set_bitmap_port.c e.id = port_to_id(map, port); map 204 net/netfilter/ipset/ip_set_bitmap_port.c init_map_port(struct ip_set *set, struct bitmap_port *map, map 207 net/netfilter/ipset/ip_set_bitmap_port.c map->members = bitmap_zalloc(map->elements, GFP_KERNEL | __GFP_NOWARN); map 208 net/netfilter/ipset/ip_set_bitmap_port.c if (!map->members) map 210 net/netfilter/ipset/ip_set_bitmap_port.c map->first_port = first_port; map 211 net/netfilter/ipset/ip_set_bitmap_port.c map->last_port = last_port; map 214 net/netfilter/ipset/ip_set_bitmap_port.c map->set = set; map 215 net/netfilter/ipset/ip_set_bitmap_port.c set->data = map; map 225 net/netfilter/ipset/ip_set_bitmap_port.c struct bitmap_port *map; map 242 net/netfilter/ipset/ip_set_bitmap_port.c map = ip_set_alloc(sizeof(*map) + elements * set->dsize); map 243 net/netfilter/ipset/ip_set_bitmap_port.c if (!map) map 246 net/netfilter/ipset/ip_set_bitmap_port.c map->elements = elements; map 247 net/netfilter/ipset/ip_set_bitmap_port.c map->memsize = BITS_TO_LONGS(elements) * sizeof(unsigned long); map 249 net/netfilter/ipset/ip_set_bitmap_port.c if (!init_map_port(set, map, first_port, last_port)) { map 250 net/netfilter/ipset/ip_set_bitmap_port.c kfree(map); map 53 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 63 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_rcu(e, &map->members, list) { map 78 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 82 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry(e, &map->members, list) { map 98 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 102 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry(e, &map->members, list) { map 155 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 159 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, e->id); map 166 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 169 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, old->id); map 176 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 179 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) map 188 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 193 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry(e, &map->members, list) { map 206 net/netfilter/ipset/ip_set_list_set.c ret = !list_is_last(&e->list, &map->members) && map 235 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 242 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry(e, &map->members, list) { map 270 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, d->id); map 276 net/netfilter/ipset/ip_set_list_set.c n = list_empty(&map->members) ? NULL : map 277 net/netfilter/ipset/ip_set_list_set.c list_last_entry(&map->members, struct set_elem, list); map 280 net/netfilter/ipset/ip_set_list_set.c if (!list_is_last(&next->list, &map->members)) map 284 net/netfilter/ipset/ip_set_list_set.c if (prev->list.prev != &map->members) map 307 net/netfilter/ipset/ip_set_list_set.c list_add_tail_rcu(&e->list, &map->members); map 317 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 321 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry(e, &map->members, list) { map 332 net/netfilter/ipset/ip_set_list_set.c if (list_is_last(&e->list, &map->members) || map 349 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 366 net/netfilter/ipset/ip_set_list_set.c e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s); map 387 net/netfilter/ipset/ip_set_list_set.c e.refid = ip_set_get_byname(map->net, map 404 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, e.refid); map 406 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, e.id); map 414 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 417 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) map 426 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 430 net/netfilter/ipset/ip_set_list_set.c del_timer_sync(&map->gc); map 432 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_safe(e, n, &map->members, list) { map 434 net/netfilter/ipset/ip_set_list_set.c ip_set_put_byindex(map->net, e->id); map 438 net/netfilter/ipset/ip_set_list_set.c kfree(map); map 445 net/netfilter/ipset/ip_set_list_set.c list_set_memsize(const struct list_set *map, size_t dsize) map 451 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_rcu(e, &map->members, list) map 455 net/netfilter/ipset/ip_set_list_set.c return (sizeof(*map) + n * dsize); map 461 net/netfilter/ipset/ip_set_list_set.c const struct list_set *map = set->data; map 463 net/netfilter/ipset/ip_set_list_set.c size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size; map 468 net/netfilter/ipset/ip_set_list_set.c if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) || map 486 net/netfilter/ipset/ip_set_list_set.c const struct list_set *map = set->data; map 498 net/netfilter/ipset/ip_set_list_set.c list_for_each_entry_rcu(e, &map->members, list) { map 508 net/netfilter/ipset/ip_set_list_set.c ip_set_name_byindex(map->net, e->id, name); map 566 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = from_timer(map, t, gc); map 567 net/netfilter/ipset/ip_set_list_set.c struct ip_set *set = map->set; map 573 net/netfilter/ipset/ip_set_list_set.c map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ; map 574 net/netfilter/ipset/ip_set_list_set.c add_timer(&map->gc); map 580 net/netfilter/ipset/ip_set_list_set.c struct list_set *map = set->data; map 582 net/netfilter/ipset/ip_set_list_set.c timer_setup(&map->gc, gc, 0); map 583 net/netfilter/ipset/ip_set_list_set.c mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ); map 591 net/netfilter/ipset/ip_set_list_set.c struct list_set *map; map 593 net/netfilter/ipset/ip_set_list_set.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 594 net/netfilter/ipset/ip_set_list_set.c if (!map) map 597 net/netfilter/ipset/ip_set_list_set.c map->size = size; map 598 net/netfilter/ipset/ip_set_list_set.c map->net = net; map 599 net/netfilter/ipset/ip_set_list_set.c map->set = set; map 600 net/netfilter/ipset/ip_set_list_set.c INIT_LIST_HEAD(&map->members); map 601 net/netfilter/ipset/ip_set_list_set.c set->data = map; map 164 net/netfilter/nf_conntrack_proto_sctp.c unsigned long *map) map 193 net/netfilter/nf_conntrack_proto_sctp.c if (map) map 194 net/netfilter/nf_conntrack_proto_sctp.c set_bit(sch->type, map); map 371 net/netfilter/nf_conntrack_proto_sctp.c unsigned long map[256 / sizeof(unsigned long)] = { 0 }; map 380 net/netfilter/nf_conntrack_proto_sctp.c if (do_basic_checks(ct, skb, dataoff, map) != 0) map 385 net/netfilter/nf_conntrack_proto_sctp.c if (test_bit(SCTP_CID_ABORT, map) || map 386 net/netfilter/nf_conntrack_proto_sctp.c test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) || map 387 net/netfilter/nf_conntrack_proto_sctp.c test_bit(SCTP_CID_COOKIE_ACK, map)) map 395 net/netfilter/nf_conntrack_proto_sctp.c if (!test_bit(SCTP_CID_INIT, map) && map 396 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && map 397 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_COOKIE_ECHO, map) && map 398 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_ABORT, map) && map 399 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && map 400 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_HEARTBEAT, map) && map 401 net/netfilter/nf_conntrack_proto_sctp.c !test_bit(SCTP_CID_HEARTBEAT_ACK, map) && map 145 net/netlabel/netlabel_cipso_v4.c doi_def->map.std = kzalloc(sizeof(*doi_def->map.std), GFP_KERNEL); map 146 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std == NULL) { map 173 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local_size) map 174 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local_size = map 182 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.cipso_size) map 183 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.cipso_size = map 188 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size, map 191 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->lvl.local == NULL) { map 195 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size, map 198 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->lvl.cipso == NULL) { map 202 net/netlabel/netlabel_cipso_v4.c for (iter = 0; iter < doi_def->map.std->lvl.local_size; iter++) map 203 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local[iter] = CIPSO_V4_INV_LVL; map 204 net/netlabel/netlabel_cipso_v4.c for (iter = 0; iter < doi_def->map.std->lvl.cipso_size; iter++) map 205 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.cipso[iter] = CIPSO_V4_INV_LVL; map 219 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local[nla_get_u32(lvl_loc)] = map 221 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.cipso[nla_get_u32(lvl_rem)] = map 248 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local_size) map 249 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local_size = map 257 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso_size) map 258 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso_size = map 263 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local = kcalloc( map 264 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local_size, map 267 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->cat.local == NULL) { map 271 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso = kcalloc( map 272 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso_size, map 275 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->cat.cipso == NULL) { map 279 net/netlabel/netlabel_cipso_v4.c for (iter = 0; iter < doi_def->map.std->cat.local_size; iter++) map 280 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local[iter] = CIPSO_V4_INV_CAT; map 281 net/netlabel/netlabel_cipso_v4.c for (iter = 0; iter < doi_def->map.std->cat.cipso_size; iter++) map 282 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso[iter] = CIPSO_V4_INV_CAT; map 296 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local[ map 299 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.cipso[ map 517 net/netlabel/netlabel_cipso_v4.c iter < doi_def->map.std->lvl.local_size; map 519 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->lvl.local[iter] == map 536 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->lvl.local[iter]); map 550 net/netlabel/netlabel_cipso_v4.c iter < doi_def->map.std->cat.local_size; map 552 net/netlabel/netlabel_cipso_v4.c if (doi_def->map.std->cat.local[iter] == map 569 net/netlabel/netlabel_cipso_v4.c doi_def->map.std->cat.local[iter]); map 150 net/netlabel/netlabel_mgmt.c struct netlbl_domaddr4_map *map; map 173 net/netlabel/netlabel_mgmt.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 174 net/netlabel/netlabel_mgmt.c if (map == NULL) { map 178 net/netlabel/netlabel_mgmt.c map->list.addr = addr->s_addr & mask->s_addr; map 179 net/netlabel/netlabel_mgmt.c map->list.mask = mask->s_addr; map 180 net/netlabel/netlabel_mgmt.c map->list.valid = 1; map 181 net/netlabel/netlabel_mgmt.c map->def.type = entry->def.type; map 183 net/netlabel/netlabel_mgmt.c map->def.cipso = cipsov4; map 185 net/netlabel/netlabel_mgmt.c ret_val = netlbl_af4list_add(&map->list, &addrmap->list4); map 187 net/netlabel/netlabel_mgmt.c kfree(map); map 198 net/netlabel/netlabel_mgmt.c struct netlbl_domaddr6_map *map; map 221 net/netlabel/netlabel_mgmt.c map = kzalloc(sizeof(*map), GFP_KERNEL); map 222 net/netlabel/netlabel_mgmt.c if (map == NULL) { map 226 net/netlabel/netlabel_mgmt.c map->list.addr = *addr; map 227 net/netlabel/netlabel_mgmt.c map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; map 228 net/netlabel/netlabel_mgmt.c map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; map 229 net/netlabel/netlabel_mgmt.c map->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; map 230 net/netlabel/netlabel_mgmt.c map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; map 231 net/netlabel/netlabel_mgmt.c map->list.mask = *mask; map 232 net/netlabel/netlabel_mgmt.c map->list.valid = 1; map 233 net/netlabel/netlabel_mgmt.c map->def.type = entry->def.type; map 235 net/netlabel/netlabel_mgmt.c map->def.calipso = calipso; map 237 net/netlabel/netlabel_mgmt.c ret_val = netlbl_af6list_add(&map->list, &addrmap->list6); map 239 net/netlabel/netlabel_mgmt.c kfree(map); map 109 net/rds/cong.c struct rds_cong_map *map; map 115 net/rds/cong.c map = rb_entry(parent, struct rds_cong_map, m_rb_node); map 117 net/rds/cong.c diff = rds_addr_cmp(addr, &map->m_addr); map 123 net/rds/cong.c return map; map 140 net/rds/cong.c struct rds_cong_map *map; map 146 net/rds/cong.c map = kzalloc(sizeof(struct rds_cong_map), GFP_KERNEL); map 147 net/rds/cong.c if (!map) map 150 net/rds/cong.c map->m_addr = *addr; map 151 net/rds/cong.c init_waitqueue_head(&map->m_waitq); map 152 net/rds/cong.c INIT_LIST_HEAD(&map->m_conn_list); map 158 net/rds/cong.c map->m_page_addrs[i] = zp; map 162 net/rds/cong.c ret = rds_cong_tree_walk(addr, map); map 166 net/rds/cong.c ret = map; map 167 net/rds/cong.c map = NULL; map 171 net/rds/cong.c if (map) { map 172 net/rds/cong.c for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) map 173 net/rds/cong.c free_page(map->m_page_addrs[i]); map 174 net/rds/cong.c kfree(map); map 217 net/rds/cong.c void rds_cong_queue_updates(struct rds_cong_map *map) map 224 net/rds/cong.c list_for_each_entry(conn, &map->m_conn_list, c_map_item) { map 253 net/rds/cong.c void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask) map 256 net/rds/cong.c map, &map->m_addr); map 259 net/rds/cong.c if (waitqueue_active(&map->m_waitq)) map 260 net/rds/cong.c wake_up(&map->m_waitq); map 299 net/rds/cong.c void rds_cong_set_bit(struct rds_cong_map *map, __be16 port) map 305 net/rds/cong.c &map->m_addr, ntohs(port), map); map 310 net/rds/cong.c set_bit_le(off, (void *)map->m_page_addrs[i]); map 313 net/rds/cong.c void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port) map 319 net/rds/cong.c &map->m_addr, ntohs(port), map); map 324 net/rds/cong.c clear_bit_le(off, (void *)map->m_page_addrs[i]); map 327 net/rds/cong.c static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port) map 335 net/rds/cong.c return test_bit_le(off, (void *)map->m_page_addrs[i]); map 351 net/rds/cong.c struct rds_cong_map *map; map 359 net/rds/cong.c map = rds_cong_tree_walk(&rs->rs_bound_addr, NULL); map 362 net/rds/cong.c if (map && rds_cong_test_bit(map, rs->rs_bound_port)) { map 363 net/rds/cong.c rds_cong_clear_bit(map, rs->rs_bound_port); map 364 net/rds/cong.c rds_cong_queue_updates(map); map 368 net/rds/cong.c int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, map 371 net/rds/cong.c if (!rds_cong_test_bit(map, port)) map 385 net/rds/cong.c if (!rds_cong_test_bit(map, port)) map 393 net/rds/cong.c rdsdebug("waiting on map %p for port %u\n", map, be16_to_cpu(port)); map 395 net/rds/cong.c return wait_event_interruptible(map->m_waitq, map 396 net/rds/cong.c !rds_cong_test_bit(map, port)); map 402 net/rds/cong.c struct rds_cong_map *map; map 406 net/rds/cong.c map = rb_entry(node, struct rds_cong_map, m_rb_node); map 407 net/rds/cong.c rdsdebug("freeing map %p\n", map); map 408 net/rds/cong.c rb_erase(&map->m_rb_node, &rds_cong_tree); map 409 net/rds/cong.c for (i = 0; i < RDS_CONG_MAP_PAGES && map->m_page_addrs[i]; i++) map 410 net/rds/cong.c free_page(map->m_page_addrs[i]); map 411 net/rds/cong.c kfree(map); map 420 net/rds/cong.c struct rds_cong_map *map = conn->c_lcong; map 423 net/rds/cong.c rm = rds_message_map_pages(map->m_page_addrs, RDS_CONG_MAP_BYTES); map 779 net/rds/ib_recv.c struct rds_cong_map *map; map 793 net/rds/ib_recv.c map = conn->c_fcong; map 812 net/rds/ib_recv.c dst = (void *)map->m_page_addrs[map_page] + map_off; map 838 net/rds/ib_recv.c rds_cong_map_updated(map, le64_to_cpu(uncongested)); map 754 net/rds/rds.h void rds_cong_set_bit(struct rds_cong_map *map, __be16 port); map 755 net/rds/rds.h void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port); map 756 net/rds/rds.h int rds_cong_wait(struct rds_cong_map *map, __be16 port, int nonblock, struct rds_sock *rs); map 757 net/rds/rds.h void rds_cong_queue_updates(struct rds_cong_map *map); map 758 net/rds/rds.h void rds_cong_map_updated(struct rds_cong_map *map, uint64_t); map 88 net/rds/recv.c struct rds_cong_map *map, map 117 net/rds/recv.c rds_cong_set_bit(map, port); map 118 net/rds/recv.c rds_cong_queue_updates(map); map 125 net/rds/recv.c rds_cong_clear_bit(map, port); map 126 net/rds/recv.c rds_cong_queue_updates(map); map 112 net/rds/tcp_recv.c struct rds_cong_map *map; map 121 net/rds/tcp_recv.c map = conn->c_fcong; map 133 net/rds/tcp_recv.c (void *)map->m_page_addrs[map_page] + map_off, map 146 net/rds/tcp_recv.c rds_cong_map_updated(map, ~(u64) 0); map 756 net/sctp/sm_make_chunk.c struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; map 766 net/sctp/sm_make_chunk.c ctsn = sctp_tsnmap_get_ctsn(map); map 771 net/sctp/sm_make_chunk.c num_gabs = sctp_tsnmap_num_gabs(map, gabs); map 772 net/sctp/sm_make_chunk.c num_dup_tsns = sctp_tsnmap_num_dups(map); map 833 net/sctp/sm_make_chunk.c sctp_tsnmap_get_dups(map)); map 6329 net/sctp/sm_statefuns.c struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; map 6416 net/sctp/sm_statefuns.c if (sctp_tsnmap_has_gap(map) && map 6417 net/sctp/sm_statefuns.c (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { map 6436 net/sctp/sm_statefuns.c if (sctp_tsnmap_has_gap(map) && map 6437 net/sctp/sm_statefuns.c (sctp_tsnmap_get_ctsn(map) + 1) == tsn) { map 29 net/sctp/tsnmap.c static void sctp_tsnmap_update(struct sctp_tsnmap *map); map 30 net/sctp/tsnmap.c static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, map 32 net/sctp/tsnmap.c static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size); map 35 net/sctp/tsnmap.c struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, map 38 net/sctp/tsnmap.c if (!map->tsn_map) { map 39 net/sctp/tsnmap.c map->tsn_map = kzalloc(len>>3, gfp); map 40 net/sctp/tsnmap.c if (map->tsn_map == NULL) map 43 net/sctp/tsnmap.c map->len = len; map 45 net/sctp/tsnmap.c bitmap_zero(map->tsn_map, map->len); map 49 net/sctp/tsnmap.c map->base_tsn = initial_tsn; map 50 net/sctp/tsnmap.c map->cumulative_tsn_ack_point = initial_tsn - 1; map 51 net/sctp/tsnmap.c map->max_tsn_seen = map->cumulative_tsn_ack_point; map 52 net/sctp/tsnmap.c map->num_dup_tsns = 0; map 54 net/sctp/tsnmap.c return map; map 57 net/sctp/tsnmap.c void sctp_tsnmap_free(struct sctp_tsnmap *map) map 59 net/sctp/tsnmap.c map->len = 0; map 60 net/sctp/tsnmap.c kfree(map->tsn_map); map 69 net/sctp/tsnmap.c int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn) map 74 net/sctp/tsnmap.c if (TSN_lte(tsn, map->cumulative_tsn_ack_point)) map 80 net/sctp/tsnmap.c if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) map 84 net/sctp/tsnmap.c gap = tsn - map->base_tsn; map 87 net/sctp/tsnmap.c if (gap < map->len && test_bit(gap, map->tsn_map)) map 95 net/sctp/tsnmap.c int sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn, map 100 net/sctp/tsnmap.c if (TSN_lt(tsn, map->base_tsn)) map 103 net/sctp/tsnmap.c gap = tsn - map->base_tsn; map 105 net/sctp/tsnmap.c if (gap >= map->len && !sctp_tsnmap_grow(map, gap + 1)) map 108 net/sctp/tsnmap.c if (!sctp_tsnmap_has_gap(map) && gap == 0) { map 113 net/sctp/tsnmap.c map->max_tsn_seen++; map 114 net/sctp/tsnmap.c map->cumulative_tsn_ack_point++; map 118 net/sctp/tsnmap.c map->base_tsn++; map 125 net/sctp/tsnmap.c if (TSN_lt(map->max_tsn_seen, tsn)) map 126 net/sctp/tsnmap.c map->max_tsn_seen = tsn; map 129 net/sctp/tsnmap.c set_bit(gap, map->tsn_map); map 134 net/sctp/tsnmap.c sctp_tsnmap_update(map); map 142 net/sctp/tsnmap.c static void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, map 146 net/sctp/tsnmap.c iter->start = map->cumulative_tsn_ack_point + 1; map 152 net/sctp/tsnmap.c static int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, map 160 net/sctp/tsnmap.c if (TSN_lte(map->max_tsn_seen, iter->start)) map 163 net/sctp/tsnmap.c offset = iter->start - map->base_tsn; map 164 net/sctp/tsnmap.c sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, map 169 net/sctp/tsnmap.c end_ = map->len - 1; map 182 net/sctp/tsnmap.c iter->start = map->cumulative_tsn_ack_point + *end + 1; map 190 net/sctp/tsnmap.c void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) map 194 net/sctp/tsnmap.c if (TSN_lt(tsn, map->base_tsn)) map 196 net/sctp/tsnmap.c if (!TSN_lt(tsn, map->base_tsn + SCTP_TSN_MAP_SIZE)) map 200 net/sctp/tsnmap.c if (TSN_lt(map->max_tsn_seen, tsn)) map 201 net/sctp/tsnmap.c map->max_tsn_seen = tsn; map 203 net/sctp/tsnmap.c gap = tsn - map->base_tsn + 1; map 205 net/sctp/tsnmap.c map->base_tsn += gap; map 206 net/sctp/tsnmap.c map->cumulative_tsn_ack_point += gap; map 207 net/sctp/tsnmap.c if (gap >= map->len) { map 211 net/sctp/tsnmap.c bitmap_zero(map->tsn_map, map->len); map 216 net/sctp/tsnmap.c bitmap_shift_right(map->tsn_map, map->tsn_map, gap, map->len); map 217 net/sctp/tsnmap.c sctp_tsnmap_update(map); map 228 net/sctp/tsnmap.c static void sctp_tsnmap_update(struct sctp_tsnmap *map) map 234 net/sctp/tsnmap.c len = map->max_tsn_seen - map->cumulative_tsn_ack_point; map 235 net/sctp/tsnmap.c zero_bit = find_first_zero_bit(map->tsn_map, len); map 239 net/sctp/tsnmap.c map->base_tsn += zero_bit; map 240 net/sctp/tsnmap.c map->cumulative_tsn_ack_point += zero_bit; map 242 net/sctp/tsnmap.c bitmap_shift_right(map->tsn_map, map->tsn_map, zero_bit, map->len); map 247 net/sctp/tsnmap.c __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map) map 249 net/sctp/tsnmap.c __u32 cum_tsn = map->cumulative_tsn_ack_point; map 250 net/sctp/tsnmap.c __u32 max_tsn = map->max_tsn_seen; map 251 net/sctp/tsnmap.c __u32 base_tsn = map->base_tsn; map 258 net/sctp/tsnmap.c if (gap == 0 || gap >= map->len) map 261 net/sctp/tsnmap.c pending_data -= bitmap_weight(map->tsn_map, gap + 1); map 272 net/sctp/tsnmap.c static void sctp_tsnmap_find_gap_ack(unsigned long *map, __u16 off, map 284 net/sctp/tsnmap.c i = find_next_bit(map, len, off); map 293 net/sctp/tsnmap.c i = find_next_zero_bit(map, len, i); map 300 net/sctp/tsnmap.c void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn) map 304 net/sctp/tsnmap.c if (TSN_lt(tsn, map->base_tsn)) map 307 net/sctp/tsnmap.c if (!TSN_lt(tsn, map->base_tsn + map->len)) map 310 net/sctp/tsnmap.c gap = tsn - map->base_tsn; map 313 net/sctp/tsnmap.c clear_bit(gap, map->tsn_map); map 317 net/sctp/tsnmap.c __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map, map 324 net/sctp/tsnmap.c if (sctp_tsnmap_has_gap(map)) { map 326 net/sctp/tsnmap.c sctp_tsnmap_iter_init(map, &iter); map 327 net/sctp/tsnmap.c while (sctp_tsnmap_next_gap_ack(map, &iter, map 341 net/sctp/tsnmap.c static int sctp_tsnmap_grow(struct sctp_tsnmap *map, u16 size) map 350 net/sctp/tsnmap.c inc = ALIGN((size - map->len), BITS_PER_LONG) + SCTP_TSN_MAP_INCREMENT; map 351 net/sctp/tsnmap.c len = min_t(u16, map->len + inc, SCTP_TSN_MAP_SIZE); map 357 net/sctp/tsnmap.c bitmap_copy(new, map->tsn_map, map 358 net/sctp/tsnmap.c map->max_tsn_seen - map->cumulative_tsn_ack_point); map 359 net/sctp/tsnmap.c kfree(map->tsn_map); map 360 net/sctp/tsnmap.c map->tsn_map = new; map 361 net/sctp/tsnmap.c map->len = len; map 157 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map = data; map 159 net/sunrpc/rpcb_clnt.c rpcb_wake_rpcbind_waiters(map->r_xprt, map->r_status); map 160 net/sunrpc/rpcb_clnt.c xprt_put(map->r_xprt); map 161 net/sunrpc/rpcb_clnt.c kfree(map->r_addr); map 162 net/sunrpc/rpcb_clnt.c kfree(map); map 452 net/sunrpc/rpcb_clnt.c struct rpcbind_args map = { map 459 net/sunrpc/rpcb_clnt.c .rpc_argp = &map, map 485 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map = msg->rpc_argp; map 490 net/sunrpc/rpcb_clnt.c map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); map 494 net/sunrpc/rpcb_clnt.c map->r_prog, map->r_vers, map 495 net/sunrpc/rpcb_clnt.c map->r_addr, map->r_netid); map 504 net/sunrpc/rpcb_clnt.c kfree(map->r_addr); map 516 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map = msg->rpc_argp; map 521 net/sunrpc/rpcb_clnt.c map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); map 525 net/sunrpc/rpcb_clnt.c map->r_prog, map->r_vers, map 526 net/sunrpc/rpcb_clnt.c map->r_addr, map->r_netid); map 535 net/sunrpc/rpcb_clnt.c kfree(map->r_addr); map 542 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map = msg->rpc_argp; map 546 net/sunrpc/rpcb_clnt.c map->r_prog, map->r_vers, map->r_netid); map 548 net/sunrpc/rpcb_clnt.c map->r_addr = ""; map 601 net/sunrpc/rpcb_clnt.c struct rpcbind_args map = { map 608 net/sunrpc/rpcb_clnt.c .rpc_argp = &map, map 629 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map, const struct rpc_procinfo *proc) map 633 net/sunrpc/rpcb_clnt.c .rpc_argp = map, map 634 net/sunrpc/rpcb_clnt.c .rpc_resp = map, map 640 net/sunrpc/rpcb_clnt.c .callback_data = map, map 684 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map; map 762 net/sunrpc/rpcb_clnt.c map = kzalloc(sizeof(struct rpcbind_args), GFP_NOFS); map 763 net/sunrpc/rpcb_clnt.c if (!map) { map 769 net/sunrpc/rpcb_clnt.c map->r_prog = clnt->cl_prog; map 770 net/sunrpc/rpcb_clnt.c map->r_vers = clnt->cl_vers; map 771 net/sunrpc/rpcb_clnt.c map->r_prot = xprt->prot; map 772 net/sunrpc/rpcb_clnt.c map->r_port = 0; map 773 net/sunrpc/rpcb_clnt.c map->r_xprt = xprt; map 774 net/sunrpc/rpcb_clnt.c map->r_status = -EIO; map 779 net/sunrpc/rpcb_clnt.c map->r_netid = xprt->address_strings[RPC_DISPLAY_NETID]; map 780 net/sunrpc/rpcb_clnt.c map->r_addr = rpc_sockaddr2uaddr(sap, GFP_NOFS); map 781 net/sunrpc/rpcb_clnt.c if (!map->r_addr) { map 787 net/sunrpc/rpcb_clnt.c map->r_owner = ""; map 790 net/sunrpc/rpcb_clnt.c map->r_addr = NULL; map 796 net/sunrpc/rpcb_clnt.c child = rpcb_call_async(rpcb_clnt, map, proc); map 810 net/sunrpc/rpcb_clnt.c kfree(map); map 825 net/sunrpc/rpcb_clnt.c struct rpcbind_args *map = data; map 826 net/sunrpc/rpcb_clnt.c struct rpc_xprt *xprt = map->r_xprt; map 840 net/sunrpc/rpcb_clnt.c } else if (map->r_port == 0) { map 846 net/sunrpc/rpcb_clnt.c xprt->ops->set_port(xprt, map->r_port); map 852 net/sunrpc/rpcb_clnt.c child->tk_pid, status, map->r_port); map 854 net/sunrpc/rpcb_clnt.c map->r_status = status; map 2378 net/tipc/link.c struct nla_map map[] = { map 2420 net/tipc/link.c for (i = 0; i < ARRAY_SIZE(map); i++) map 2421 net/tipc/link.c if (nla_put_u32(skb, map[i].key, map[i].val)) map 2514 net/tipc/link.c struct nla_map map[] = { map 2541 net/tipc/link.c for (i = 0; i < ARRAY_SIZE(map); i++) map 2542 net/tipc/link.c if (nla_put_u32(skb, map[i].key, map[i].val)) map 2030 net/wireless/util.c u16 map = le16_to_cpu(cap->supp_mcs.rx_mcs_map); map 2036 net/wireless/util.c if (map == 0xffff) map 2050 net/wireless/util.c int supp = (map >> (2 * i)) & 3; map 496 net/xdp/xsk.c struct xsk_map *map = NULL; map 505 net/xdp/xsk.c WARN_ON(xsk_map_inc(node->map)); map 506 net/xdp/xsk.c map = node->map; map 510 net/xdp/xsk.c return map; map 531 net/xdp/xsk.c struct xsk_map *map; map 533 net/xdp/xsk.c while ((map = xsk_get_map_list_entry(xs, &map_entry))) { map 534 net/xdp/xsk.c xsk_map_try_sock_delete(map, xs, map_entry); map 535 net/xdp/xsk.c xsk_map_put(map); map 27 samples/bpf/bpf_load.h typedef void (*fixup_map_cb)(struct bpf_map_data *map, int idx); map 130 samples/bpf/hbm.c struct bpf_map *map; map 141 samples/bpf/hbm.c map = bpf_object__find_map_by_name(obj, "queue_stats"); map 142 samples/bpf/hbm.c map_fd = bpf_map__fd(map); map 395 samples/bpf/map_perf_test_user.c static void fixup_map(struct bpf_map_data *map, int idx) map 399 samples/bpf/map_perf_test_user.c if (!strcmp("inner_lru_hash_map", map->name)) { map 401 samples/bpf/map_perf_test_user.c inner_lru_hash_size = map->def.max_entries; map 404 samples/bpf/map_perf_test_user.c if (!strcmp("array_of_lru_hashs", map->name)) { map 409 samples/bpf/map_perf_test_user.c map->def.inner_map_idx = inner_lru_hash_idx; map 413 samples/bpf/map_perf_test_user.c if (!strcmp("lru_hash_lookup_map", map->name)) map 423 samples/bpf/map_perf_test_user.c if (!strcmp(test_map_names[i], map->name) && map 425 samples/bpf/map_perf_test_user.c map->def.max_entries = num_map_entries; map 35 samples/bpf/syscall_tp_kern.c static __always_inline void count(void *map) map 40 samples/bpf/syscall_tp_kern.c value = bpf_map_lookup_elem(map, &key); map 44 samples/bpf/syscall_tp_kern.c bpf_map_update_elem(map, &key, &init_val, BPF_NOEXIST); map 91 samples/bpf/xdp1_user.c struct bpf_map *map; map 134 samples/bpf/xdp1_user.c map = bpf_map__next(NULL, obj); map 135 samples/bpf/xdp1_user.c if (!map) { map 139 samples/bpf/xdp1_user.c map_fd = bpf_map__fd(map); map 94 samples/bpf/xdp_adjust_tail_user.c struct bpf_map *map; map 153 samples/bpf/xdp_adjust_tail_user.c map = bpf_map__next(NULL, obj); map 154 samples/bpf/xdp_adjust_tail_user.c if (!map) { map 158 samples/bpf/xdp_adjust_tail_user.c map_fd = bpf_map__fd(map); map 470 samples/bpf/xdp_rxq_info_user.c struct bpf_map *map; map 492 samples/bpf/xdp_rxq_info_user.c map = bpf_object__find_map_by_name(obj, "config_map"); map 495 samples/bpf/xdp_rxq_info_user.c if (!map || !stats_global_map || !rx_queue_index_map) { map 499 samples/bpf/xdp_rxq_info_user.c map_fd = bpf_map__fd(map); map 121 samples/bpf/xdp_sample_pkts_user.c struct bpf_map *map; map 157 samples/bpf/xdp_sample_pkts_user.c map = bpf_map__next(NULL, obj); map 158 samples/bpf/xdp_sample_pkts_user.c if (!map) { map 162 samples/bpf/xdp_sample_pkts_user.c map_fd = bpf_map__fd(map); map 1548 samples/mic/mpssd/mpssd.c char *map, *temp, log_buf[17] = {'\0'}; map 1563 samples/mic/mpssd/mpssd.c map = mmap(NULL, len, PROT_READ, MAP_PRIVATE, fd, 0); map 1564 samples/mic/mpssd/mpssd.c if (map == MAP_FAILED) { map 1570 samples/mic/mpssd/mpssd.c temp = strstr(map, "__log_buf"); map 1573 samples/mic/mpssd/mpssd.c munmap(map, len); map 1580 samples/mic/mpssd/mpssd.c temp = strstr(map, "log_buf_len"); map 1583 samples/mic/mpssd/mpssd.c munmap(map, len); map 1590 samples/mic/mpssd/mpssd.c munmap(map, len); map 445 samples/vfio-mdev/mbochs.c char *map; map 482 samples/vfio-mdev/mbochs.c map = kmap(pg); map 484 samples/vfio-mdev/mbochs.c memcpy(map + poff, buf, count); map 486 samples/vfio-mdev/mbochs.c memcpy(buf, map + poff, count); map 912 samples/vfio-mdev/mbochs.c .map = mbochs_kmap_dmabuf, map 204 scripts/insert-sys-cert.c void *map; map 218 scripts/insert-sys-cert.c map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); map 219 scripts/insert-sys-cert.c if (map == MAP_FAILED) { map 225 scripts/insert-sys-cert.c return map; map 422 scripts/mod/modpost.c void *map = MAP_FAILED; map 432 scripts/mod/modpost.c map = mmap(NULL, *size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); map 436 scripts/mod/modpost.c if (map == MAP_FAILED) map 438 scripts/mod/modpost.c return map; map 218 scripts/recordmcount.c static int (*make_nop)(void *map, size_t const offset); map 220 scripts/recordmcount.c static int make_nop_x86(void *map, size_t const offset) map 226 scripts/recordmcount.c ptr = map + offset; map 230 scripts/recordmcount.c op = map + offset - 1; map 262 scripts/recordmcount.c static int make_nop_arm(void *map, size_t const offset) map 269 scripts/recordmcount.c ptr = map + offset; map 298 scripts/recordmcount.c static int make_nop_arm64(void *map, size_t const offset) map 302 scripts/recordmcount.c ptr = map + offset; map 63 scripts/selinux/genheaders/genheaders.c struct security_class_mapping *map = &secclass_map[i]; map 64 scripts/selinux/genheaders/genheaders.c map->name = stoupperx(map->name); map 65 scripts/selinux/genheaders/genheaders.c for (j = 0; map->perms[j]; j++) map 66 scripts/selinux/genheaders/genheaders.c map->perms[j] = stoupperx(map->perms[j]); map 77 scripts/selinux/genheaders/genheaders.c struct security_class_mapping *map = &secclass_map[i]; map 78 scripts/selinux/genheaders/genheaders.c fprintf(fout, "#define SECCLASS_%-39s %2d\n", map->name, i+1); map 94 scripts/selinux/genheaders/genheaders.c struct security_class_mapping *map = &secclass_map[i]; map 95 scripts/selinux/genheaders/genheaders.c int len = strlen(map->name), l = sizeof(s) - 1; map 96 scripts/selinux/genheaders/genheaders.c if (len >= l && memcmp(map->name + len - l, s, l) == 0) map 97 scripts/selinux/genheaders/genheaders.c fprintf(fout, "\tcase SECCLASS_%s:\n", map->name); map 121 scripts/selinux/genheaders/genheaders.c struct security_class_mapping *map = &secclass_map[i]; map 122 scripts/selinux/genheaders/genheaders.c int len = strlen(map->name); map 123 scripts/selinux/genheaders/genheaders.c for (j = 0; map->perms[j]; j++) { map 126 scripts/selinux/genheaders/genheaders.c map->name, map->perms[j]); map 129 scripts/selinux/genheaders/genheaders.c fprintf(fout, "#define %s__%-*s 0x%08xU\n", map->name, map 130 scripts/selinux/genheaders/genheaders.c 39-len, map->perms[j], 1U<<j); map 76 scripts/selinux/mdp/mdp.c struct security_class_mapping *map = &secclass_map[i]; map 77 scripts/selinux/mdp/mdp.c fprintf(fout, "class %s\n", map->name); map 79 scripts/selinux/mdp/mdp.c for (j = 0; map->perms[j]; j++) map 80 scripts/selinux/mdp/mdp.c fprintf(fout, "\t%s\n", map->perms[j]); map 97 scripts/selinux/mdp/mdp.c struct security_class_mapping *map = &secclass_map[i]; map 99 scripts/selinux/mdp/mdp.c fprintf(fout, "mlsconstrain %s {\n", map->name); map 100 scripts/selinux/mdp/mdp.c for (j = 0; map->perms[j]; j++) map 101 scripts/selinux/mdp/mdp.c fprintf(fout, "\t%s\n", map->perms[j]); map 2376 security/security.c int security_bpf_map(struct bpf_map *map, fmode_t fmode) map 2378 security/security.c return call_int_hook(bpf_map, 0, map, fmode); map 2384 security/security.c int security_bpf_map_alloc(struct bpf_map *map) map 2386 security/security.c return call_int_hook(bpf_map_alloc_security, 0, map); map 2392 security/security.c void security_bpf_map_free(struct bpf_map *map) map 2394 security/security.c call_void_hook(bpf_map_free_security, map); map 6732 security/selinux/hooks.c struct bpf_map *map; map 6736 security/selinux/hooks.c map = file->private_data; map 6737 security/selinux/hooks.c bpfsec = map->security; map 6755 security/selinux/hooks.c static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode) map 6760 security/selinux/hooks.c bpfsec = map->security; map 6777 security/selinux/hooks.c static int selinux_bpf_map_alloc(struct bpf_map *map) map 6786 security/selinux/hooks.c map->security = bpfsec; map 6791 security/selinux/hooks.c static void selinux_bpf_map_free(struct bpf_map *map) map 6793 security/selinux/hooks.c struct bpf_security_struct *bpfsec = map->security; map 6795 security/selinux/hooks.c map->security = NULL; map 351 security/selinux/ss/ebitmap.c u64 map; map 433 security/selinux/ss/ebitmap.c map = le64_to_cpu(mapbits); map 436 security/selinux/ss/ebitmap.c while (map) { map 437 security/selinux/ss/ebitmap.c n->maps[index++] = map & (-1UL); map 438 security/selinux/ss/ebitmap.c map = EBITMAP_SHIFT_UNIT_SIZE(map); map 457 security/selinux/ss/ebitmap.c u64 map; map 479 security/selinux/ss/ebitmap.c map = 0; map 486 security/selinux/ss/ebitmap.c if (!map) { map 488 security/selinux/ss/ebitmap.c map = (u64)1 << (bit - last_startbit); map 498 security/selinux/ss/ebitmap.c buf64[0] = cpu_to_le64(map); map 504 security/selinux/ss/ebitmap.c map = 0; map 507 security/selinux/ss/ebitmap.c map |= (u64)1 << (bit - last_startbit); map 510 security/selinux/ss/ebitmap.c if (map) { map 519 security/selinux/ss/ebitmap.c buf64[0] = cpu_to_le64(map); map 102 security/selinux/ss/services.c struct security_class_mapping *map, map 110 security/selinux/ss/services.c if (!map) map 113 security/selinux/ss/services.c while (map[i].name) map 123 security/selinux/ss/services.c while (map[j].name) { map 124 security/selinux/ss/services.c struct security_class_mapping *p_in = map + (j++); map 182 security/selinux/ss/services.c static u16 unmap_class(struct selinux_map *map, u16 tclass) map 184 security/selinux/ss/services.c if (tclass < map->size) map 185 security/selinux/ss/services.c return map->mapping[tclass].value; map 193 security/selinux/ss/services.c static u16 map_class(struct selinux_map *map, u16 pol_value) map 197 security/selinux/ss/services.c for (i = 1; i < map->size; i++) { map 198 security/selinux/ss/services.c if (map->mapping[i].value == pol_value) map 205 security/selinux/ss/services.c static void map_decision(struct selinux_map *map, map 209 security/selinux/ss/services.c if (tclass < map->size) { map 210 security/selinux/ss/services.c struct selinux_mapping *mapping = &map->mapping[tclass]; map 772 security/selinux/ss/services.c tclass = unmap_class(&state->ss->map, orig_tclass); map 1042 security/selinux/ss/services.c tclass = unmap_class(&state->ss->map, orig_tclass); map 1131 security/selinux/ss/services.c tclass = unmap_class(&state->ss->map, orig_tclass); map 1139 security/selinux/ss/services.c map_decision(&state->ss->map, orig_tclass, avd, map 1674 security/selinux/ss/services.c tclass = unmap_class(&state->ss->map, orig_tclass); map 1678 security/selinux/ss/services.c sock = security_is_socket_class(map_class(&state->ss->map, map 2121 security/selinux/ss/services.c &state->ss->map); map 2209 security/selinux/ss/services.c oldmapping = state->ss->map.mapping; map 2210 security/selinux/ss/services.c state->ss->map.mapping = newmap.mapping; map 2211 security/selinux/ss/services.c state->ss->map.size = newmap.size; map 2685 security/selinux/ss/services.c sclass = unmap_class(&state->ss->map, orig_sclass); map 31 security/selinux/ss/services.h struct selinux_map map; map 699 security/smack/smackfs.c doip->map.std = NULL; map 2258 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_MONO } }, map 2260 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 2262 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2265 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2269 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2280 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_MONO } }, map 2282 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 2284 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2287 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2291 sound/core/pcm_lib.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2328 sound/core/pcm_lib.c const struct snd_pcm_chmap_elem *map; map 2339 sound/core/pcm_lib.c for (map = info->chmap; map->channels; map++) { map 2341 sound/core/pcm_lib.c if (map->channels == substream->runtime->channels && map 2342 sound/core/pcm_lib.c valid_chmap_channels(info, map->channels)) { map 2343 sound/core/pcm_lib.c for (i = 0; i < map->channels; i++) map 2344 sound/core/pcm_lib.c ucontrol->value.integer.value[i] = map->map[i]; map 2358 sound/core/pcm_lib.c const struct snd_pcm_chmap_elem *map; map 2370 sound/core/pcm_lib.c for (map = info->chmap; map->channels; map++) { map 2371 sound/core/pcm_lib.c int chs_bytes = map->channels * 4; map 2372 sound/core/pcm_lib.c if (!valid_chmap_channels(info, map->channels)) map 2386 sound/core/pcm_lib.c for (c = 0; c < map->channels; c++) { map 2387 sound/core/pcm_lib.c if (put_user(map->map[c], dst)) map 264 sound/drivers/mts64.c static u8 map[] = { 0, 1, 4, 2, 3 }; map 266 sound/drivers/mts64.c return map[c]; map 181 sound/firewire/bebob/bebob_focusrite.c const signed char *map; map 189 sound/firewire/bebob/bebob_focusrite.c map = saffirepro_clk_maps[0]; map 191 sound/firewire/bebob/bebob_focusrite.c map = saffirepro_clk_maps[1]; map 195 sound/firewire/bebob/bebob_focusrite.c if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) { map 200 sound/firewire/bebob/bebob_focusrite.c *id = (unsigned int)map[value]; map 391 sound/hda/hdmi_chmap.c unsigned char map; /* ALSA API channel map position */ map 421 sound/hda/hdmi_chmap.c for (; t->map; t++) { map 422 sound/hda/hdmi_chmap.c if (t->map == c) map 456 sound/hda/hdmi_chmap.c for (; t->map; t++) { map 458 sound/hda/hdmi_chmap.c return t->map; map 481 sound/hda/hdmi_chmap.c static int hdmi_manual_channel_allocation(int chs, unsigned char *map) map 486 sound/hda/hdmi_chmap.c int mask = snd_hdac_chmap_to_spk_mask(map[i]); map 507 sound/hda/hdmi_chmap.c int chs, unsigned char *map, map 516 sound/hda/hdmi_chmap.c hdmi_slot = to_cea_slot(ordered_ca, map[alsa_pos]); map 536 sound/hda/hdmi_chmap.c static void hdmi_setup_fake_chmap(unsigned char *map, int ca) map 544 sound/hda/hdmi_chmap.c map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f); map 546 sound/hda/hdmi_chmap.c map[i] = 0; map 552 sound/hda/hdmi_chmap.c int channels, unsigned char *map, map 557 sound/hda/hdmi_chmap.c channels, map, ca); map 560 sound/hda/hdmi_chmap.c hdmi_setup_fake_chmap(map, ca); map 588 sound/hda/hdmi_chmap.c int channels, bool chmap_set, bool non_pcm, unsigned char *map) map 593 sound/hda/hdmi_chmap.c ca = hdmi_manual_channel_allocation(channels, map); map 694 sound/isa/sb/sb_mixer.c unsigned char map[][2], map 710 sound/isa/sb/sb_mixer.c snd_sbmixer_write(chip, map[idx][0], map[idx][1]); map 2528 sound/pci/ac97/ac97_patch.c struct snd_pcm_chmap *map = ac97->chmaps[SNDRV_PCM_STREAM_PLAYBACK]; map 2530 sound/pci/ac97/ac97_patch.c if (map) { map 2532 sound/pci/ac97/ac97_patch.c map->chmap = snd_pcm_std_chmaps; map 2534 sound/pci/ac97/ac97_patch.c map->chmap = snd_pcm_alt_chmaps; map 1333 sound/pci/ca0106/ca0106_main.c .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, map 1339 sound/pci/ca0106/ca0106_main.c .map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, map 1345 sound/pci/ca0106/ca0106_main.c .map = { SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, map 1353 sound/pci/ca0106/ca0106_main.c const struct snd_pcm_chmap_elem *map = NULL; map 1366 sound/pci/ca0106/ca0106_main.c map = snd_pcm_std_chmaps; map 1371 sound/pci/ca0106/ca0106_main.c map = surround_map; map 1376 sound/pci/ca0106/ca0106_main.c map = clfe_map; map 1381 sound/pci/ca0106/ca0106_main.c map = side_map; map 1404 sound/pci/ca0106/ca0106_main.c err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2, map 496 sound/pci/cs46xx/cs46xx_lib.c u32 map[BA1_DWORD_SIZE]; map 542 sound/pci/cs46xx/cs46xx_lib.c &ba1->map[offset], map 103 sound/pci/cs5530.c u16 map; map 139 sound/pci/cs5530.c map = readw(mem + 0x18); map 151 sound/pci/cs5530.c sb_base = 0x220 + 0x20 * (map & 3); map 153 sound/pci/cs5530.c if (map & (1<<2)) map 161 sound/pci/cs5530.c if (map & (1<<5)) map 163 sound/pci/cs5530.c else if (map & (1<<6)) map 144 sound/pci/ctxfi/ctatc.c apcm->vm_block = vm->map(vm, apcm->substream, runtime->dma_bytes); map 684 sound/pci/ctxfi/ctatc.c srcimp->ops->map(srcimp, src, out_ports[i%multi]); map 706 sound/pci/ctxfi/ctatc.c srcimp->ops->map(srcimp, apcm->src, map 713 sound/pci/ctxfi/ctatc.c srcimp->ops->map(srcimp, apcm->src, out_ports[i]); map 1462 sound/pci/ctxfi/ctatc.c srcimp->ops->map(srcimp, src, rscs[i]); map 400 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_MONO } }, map 402 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, map 408 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_MONO } }, map 410 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, map 416 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_MONO } }, map 418 sound/pci/ctxfi/ctpcm.c .map = { SNDRV_CHMAP_SL, SNDRV_CHMAP_SR } }, map 428 sound/pci/ctxfi/ctpcm.c const struct snd_pcm_chmap_elem *map; map 461 sound/pci/ctxfi/ctpcm.c map = snd_pcm_std_chmaps; map 464 sound/pci/ctxfi/ctpcm.c map = surround_map; map 467 sound/pci/ctxfi/ctpcm.c map = clfe_map; map 470 sound/pci/ctxfi/ctpcm.c map = side_map; map 473 sound/pci/ctxfi/ctpcm.c map = snd_pcm_std_chmaps; map 476 sound/pci/ctxfi/ctpcm.c err = snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, chs, map 662 sound/pci/ctxfi/ctsrc.c .map = srcimp_map, map 113 sound/pci/ctxfi/ctsrc.h int (*map)(struct srcimp *srcimp, struct src *user, struct rsc *input); map 197 sound/pci/ctxfi/ctvmem.c vm->map = ct_vm_map; map 50 sound/pci/ctxfi/ctvmem.h struct ct_vm_block *(*map)(struct ct_vm *, struct snd_pcm_substream *, map 826 sound/pci/emu10k1/emu10k1x.c .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, map 832 sound/pci/emu10k1/emu10k1x.c .map = { SNDRV_CHMAP_FC, SNDRV_CHMAP_LFE } }, map 839 sound/pci/emu10k1/emu10k1x.c const struct snd_pcm_chmap_elem *map = NULL; map 866 sound/pci/emu10k1/emu10k1x.c map = snd_pcm_std_chmaps; map 870 sound/pci/emu10k1/emu10k1x.c map = surround_map; map 874 sound/pci/emu10k1/emu10k1x.c map = clfe_map; map 883 sound/pci/emu10k1/emu10k1x.c return snd_pcm_add_chmap_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK, map, 2, map 1250 sound/pci/ens1370.c .map = { SNDRV_CHMAP_MONO } }, map 1252 sound/pci/ens1370.c .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, map 3046 sound/pci/hda/hda_codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 3048 sound/pci/hda/hda_codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 395 sound/pci/hda/hda_jack.c const struct hda_jack_keymap *map; map 409 sound/pci/hda/hda_jack.c for (map = keymap; map->type; map++) map 410 sound/pci/hda/hda_jack.c buttons |= map->type; map 424 sound/pci/hda/hda_jack.c for (map = keymap; map->type; map++) map 425 sound/pci/hda/hda_jack.c snd_jack_set_key(jack->jack, map->type, map->key); map 6261 sound/pci/hda/patch_ca0132.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 6263 sound/pci/hda/patch_ca0132.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 6266 sound/pci/hda/patch_ca0132.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 2874 sound/pci/hda/patch_hdmi.c static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb}; map 2876 sound/pci/hda/patch_hdmi.c return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); map 2885 sound/pci/hda/patch_hdmi.c static const int map[] = {0x4, 0x6, 0x8, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf}; map 2887 sound/pci/hda/patch_hdmi.c return intel_hsw_common_init(codec, 0x02, map, ARRAY_SIZE(map)); map 3472 sound/pci/hda/patch_hdmi.c int ca, int chs, unsigned char *map) map 3474 sound/pci/hda/patch_hdmi.c if (ca == 0x00 && (map[0] != SNDRV_CHMAP_FL || map[1] != SNDRV_CHMAP_FR)) map 3745 sound/pci/hda/patch_hdmi.c int ca, int chs, unsigned char *map) map 3754 sound/pci/hda/patch_hdmi.c int mask = snd_hdac_chmap_to_spk_mask(map[i]); map 3770 sound/pci/hda/patch_hdmi.c int comp_mask_req = snd_hdac_chmap_to_spk_mask(map[i+1]); map 8696 sound/pci/hda/patch_realtek.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 8698 sound/pci/hda/patch_realtek.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, map 203 sound/pci/oxygen/oxygen.h void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data); map 218 sound/pci/oxygen/oxygen_io.c void oxygen_write_i2c(struct oxygen *chip, u8 device, u8 map, u8 data) map 223 sound/pci/oxygen/oxygen_io.c oxygen_write8(chip, OXYGEN_2WIRE_MAP, map); map 1238 sound/pci/ymfpci/ymfpci_main.c .map = { SNDRV_CHMAP_MONO } }, map 1240 sound/pci/ymfpci/ymfpci_main.c .map = { SNDRV_CHMAP_RL, SNDRV_CHMAP_RR } }, map 108 sound/soc/codecs/alc5632.c static inline int alc5632_reset(struct regmap *map) map 110 sound/soc/codecs/alc5632.c return regmap_write(map, ALC5632_RESET, 0x59B4); map 1571 sound/soc/codecs/hdac_hdmi.c const int *map = hdmi->drv_data->port_map; map 1582 sound/soc/codecs/hdac_hdmi.c if (pin == map[i]) map 24 sound/soc/codecs/hdmi-codec.c unsigned char map; /* ALSA API channel map position */ map 65 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 72 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } }, map 74 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 77 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 80 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 83 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 86 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 89 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 92 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 95 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 98 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 101 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 104 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 107 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 111 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 115 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 119 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 123 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 127 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 131 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 135 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 139 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 143 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 147 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 151 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 155 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 159 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 163 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 167 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 171 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 175 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 179 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_NA, map 183 sound/soc/codecs/hdmi-codec.c .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR, SNDRV_CHMAP_LFE, map 370 sound/soc/codecs/hdmi-codec.c unsigned const char *map; map 375 sound/soc/codecs/hdmi-codec.c map = info->chmap[hcp->chmap_idx].map; map 381 sound/soc/codecs/hdmi-codec.c ucontrol->value.integer.value[i] = map[i]; map 196 sound/soc/codecs/jz4725b.c struct regmap *map = icdc->regmap; map 201 sound/soc/codecs/jz4725b.c return regmap_update_bits(map, JZ4725B_CODEC_REG_IFR, map 204 sound/soc/codecs/jz4725b.c return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR, map 208 sound/soc/codecs/jz4725b.c return regmap_update_bits(map, JZ4725B_CODEC_REG_IFR, map 211 sound/soc/codecs/jz4725b.c return regmap_read_poll_timeout(map, JZ4725B_CODEC_REG_IFR, map 302 sound/soc/codecs/jz4725b.c struct regmap *map = icdc->regmap; map 306 sound/soc/codecs/jz4725b.c regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2, map 311 sound/soc/codecs/jz4725b.c regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2, map 316 sound/soc/codecs/jz4725b.c regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2, map 321 sound/soc/codecs/jz4725b.c regmap_update_bits(map, JZ4725B_CODEC_REG_PMR2, map 333 sound/soc/codecs/jz4725b.c struct regmap *map = icdc->regmap; map 340 sound/soc/codecs/jz4725b.c regmap_write(map, JZ4725B_CODEC_REG_AICR, map 342 sound/soc/codecs/jz4725b.c regmap_write(map, JZ4725B_CODEC_REG_CCR1, map 126 sound/soc/codecs/max98504.c struct regmap *map = max98504->regmap; map 133 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_SOFTWARE_RESET, 0x1); map 139 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PVDD_BROWNOUT_ENABLE, 0x1); map 141 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_1, map 145 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_2, map 148 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_3, map 151 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PVDD_BROWNOUT_CONFIG_4, map 204 sound/soc/codecs/max98504.c struct regmap *map = max98504->regmap; map 209 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PCM_TX_ENABLE, tx_mask); map 214 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PDM_TX_ENABLE, tx_mask); map 227 sound/soc/codecs/max98504.c struct regmap *map = max98504->regmap; map 236 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PCM_TX_CHANNEL_SOURCES, map 241 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_PDM_TX_CONTROL, sources); map 247 sound/soc/codecs/max98504.c regmap_write(map, MAX98504_MEASUREMENT_ENABLE, sources ? 0x3 : 0x01); map 538 sound/soc/codecs/nau8810.c struct regmap *map = nau8810->regmap; map 552 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_PLLN, map 556 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_PLLK1, map 559 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_PLLK2, map 562 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_PLLK3, map 564 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_CLOCK, NAU8810_MCLKSEL_MASK, map 566 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_CLOCK, map 743 sound/soc/codecs/nau8810.c struct regmap *map = nau8810->regmap; map 748 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_POWER1, map 753 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_POWER1, map 758 sound/soc/codecs/nau8810.c regcache_sync(map); map 759 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_POWER1, map 763 sound/soc/codecs/nau8810.c regmap_update_bits(map, NAU8810_REG_POWER1, map 768 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_POWER1, 0); map 769 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_POWER2, 0); map 770 sound/soc/codecs/nau8810.c regmap_write(map, NAU8810_REG_POWER3, 0); map 26 sound/soc/codecs/rl6231.c int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft) map 30 sound/soc/codecs/rl6231.c regmap_read(map, reg, &val); map 30 sound/soc/codecs/rl6231.h int rl6231_get_pre_div(struct regmap *map, unsigned int reg, int sft); map 5194 sound/soc/codecs/rt5677.c .map = rt5677_irq_map, map 234 sound/soc/codecs/wm9081.c static int wm9081_reset(struct regmap *map) map 236 sound/soc/codecs/wm9081.c return regmap_write(map, WM9081_SOFTWARE_RESET, 0x9081); map 160 sound/soc/intel/atom/sst-atom-controls.c u8 *map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map; map 166 sound/soc/intel/atom/sst-atom-controls.c if (map[mux - 1] & val) map 174 sound/soc/intel/atom/sst-atom-controls.c e->texts[mux], mux ? map[mux - 1] : -1); map 220 sound/soc/intel/atom/sst-atom-controls.c u8 *map; map 222 sound/soc/intel/atom/sst-atom-controls.c map = is_tx ? sst_ssp_rx_map : sst_ssp_tx_map; map 232 sound/soc/intel/atom/sst-atom-controls.c map[i] &= ~val; map 244 sound/soc/intel/atom/sst-atom-controls.c map[slot_channel_no] |= val; map 248 sound/soc/intel/atom/sst-atom-controls.c e->texts[mux], map[slot_channel_no]); map 1041 sound/soc/intel/atom/sst-atom-controls.c cmd.map = 0; /* Algo sequence: Gain - DRP - FIR - IIR */ map 433 sound/soc/intel/atom/sst-atom-controls.h u16 map; map 157 sound/soc/intel/atom/sst-mfld-platform-pcm.c struct sst_dev_stream_map *map, int size) map 161 sound/soc/intel/atom/sst-mfld-platform-pcm.c if (map == NULL) map 167 sound/soc/intel/atom/sst-mfld-platform-pcm.c if ((map[i].dev_num == dev) && (map[i].direction == dir)) map 178 sound/soc/intel/atom/sst-mfld-platform-pcm.c struct sst_dev_stream_map *map; map 182 sound/soc/intel/atom/sst-mfld-platform-pcm.c map = ctx->pdata->pdev_strm_map; map 196 sound/soc/intel/atom/sst-mfld-platform-pcm.c map, map_size); map 201 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->device_type = map[index].device_id; map 202 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->task = map[index].task_id; map 210 sound/soc/intel/atom/sst-mfld-platform-pcm.c map, map_size); map 214 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->device_type = map[index].device_id; map 215 sound/soc/intel/atom/sst-mfld-platform-pcm.c str_params->task = map[index].task_id; map 99 sound/soc/intel/boards/bytcr_rt5640.c int map; map 106 sound/soc/intel/boards/bytcr_rt5640.c map = BYT_RT5640_MAP(byt_rt5640_quirk); map 107 sound/soc/intel/boards/bytcr_rt5640.c switch (map) { map 121 sound/soc/intel/boards/bytcr_rt5640.c dev_err(dev, "quirk map 0x%x is not supported, microphone input will not work\n", map); map 1068 sound/soc/intel/haswell/sst-haswell-ipc.c struct sst_hsw_stream *stream, u32 map, map 1076 sound/soc/intel/haswell/sst-haswell-ipc.c stream->request.format.map = map; map 1148 sound/soc/intel/haswell/sst-haswell-ipc.c struct sst_hsw_module_map *map = &stream->request.map; map 1158 sound/soc/intel/haswell/sst-haswell-ipc.c map->module_entries_count = 1; map 1159 sound/soc/intel/haswell/sst-haswell-ipc.c map->module_entries[0].module_id = module->id; map 1160 sound/soc/intel/haswell/sst-haswell-ipc.c map->module_entries[0].entry_point = module->entry; map 1925 sound/soc/intel/haswell/sst-haswell-ipc.c config.map.module_entries_count = 1; map 1926 sound/soc/intel/haswell/sst-haswell-ipc.c config.map.module_entries[0].module_id = module->id; map 1927 sound/soc/intel/haswell/sst-haswell-ipc.c config.map.module_entries[0].entry_point = module->entry; map 1939 sound/soc/intel/haswell/sst-haswell-ipc.c config.map.module_entries[0].module_id, map 1943 sound/soc/intel/haswell/sst-haswell-ipc.c config.map.module_entries[0].entry_point); map 237 sound/soc/intel/haswell/sst-haswell-ipc.h struct sst_hsw_module_map map; map 335 sound/soc/intel/haswell/sst-haswell-ipc.h u32 map; map 351 sound/soc/intel/haswell/sst-haswell-ipc.h struct sst_hsw_module_map map; map 443 sound/soc/intel/haswell/sst-haswell-ipc.h struct sst_hsw_stream *stream, u32 map, map 475 sound/soc/intel/haswell/sst-haswell-pcm.c u32 rate, bits, map, pages, module_id; map 584 sound/soc/intel/haswell/sst-haswell-pcm.c map = create_channel_map(SST_HSW_CHANNEL_CONFIG_STEREO); map 586 sound/soc/intel/haswell/sst-haswell-pcm.c map, SST_HSW_CHANNEL_CONFIG_STEREO); map 716 sound/soc/intel/skylake/skl-topology.c struct skl_mod_inst_map *inst = kpb_params->u.map; map 823 sound/soc/intel/skylake/skl-topology.c size = struct_size(params, u.map, uuid_params->num_modules); map 839 sound/soc/intel/skylake/skl-topology.c params->u.map[i].mod_id = module_id; map 840 sound/soc/intel/skylake/skl-topology.c params->u.map[i].inst_id = map 219 sound/soc/intel/skylake/skl-topology.h struct skl_mod_inst_map map[0]; map 19 sound/soc/mediatek/common/mtk-afe-fe-dai.c static int mtk_regmap_update_bits(struct regmap *map, int reg, map 25 sound/soc/mediatek/common/mtk-afe-fe-dai.c return regmap_update_bits(map, reg, mask << shift, val << shift); map 28 sound/soc/mediatek/common/mtk-afe-fe-dai.c static int mtk_regmap_write(struct regmap *map, int reg, unsigned int val) map 32 sound/soc/mediatek/common/mtk-afe-fe-dai.c return regmap_write(map, reg, val); map 69 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, map 101 sound/soc/meson/axg-fifo.c regmap_read(fifo->map, FIFO_STATUS2, &addr); map 123 sound/soc/meson/axg-fifo.c regmap_write(fifo->map, FIFO_START_ADDR, runtime->dma_addr); map 124 sound/soc/meson/axg-fifo.c regmap_write(fifo->map, FIFO_FINISH_ADDR, end_ptr); map 128 sound/soc/meson/axg-fifo.c regmap_write(fifo->map, FIFO_INT_ADDR, burst_num); map 147 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL0, map 166 sound/soc/meson/axg-fifo.c regmap_write(fifo->map, FIFO_INIT_ADDR, runtime->dma_addr); map 176 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL0, map 184 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 189 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 200 sound/soc/meson/axg-fifo.c regmap_read(fifo->map, FIFO_STATUS1, &status); map 250 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 258 sound/soc/meson/axg-fifo.c regmap_update_bits(fifo->map, FIFO_CTRL0, map 352 sound/soc/meson/axg-fifo.c fifo->map = devm_regmap_init_mmio(dev, regs, &axg_fifo_regmap_cfg); map 353 sound/soc/meson/axg-fifo.c if (IS_ERR(fifo->map)) { map 355 sound/soc/meson/axg-fifo.c PTR_ERR(fifo->map)); map 356 sound/soc/meson/axg-fifo.c return PTR_ERR(fifo->map); map 382 sound/soc/meson/axg-fifo.c devm_regmap_field_alloc(dev, fifo->map, data->field_threshold); map 67 sound/soc/meson/axg-fifo.h struct regmap *map; map 39 sound/soc/meson/axg-frddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 41 sound/soc/meson/axg-frddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 43 sound/soc/meson/axg-frddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 62 sound/soc/meson/axg-frddr.c regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_FRDDR_PP_MODE, 0); map 71 sound/soc/meson/axg-frddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, CTRL1_FRDDR_DEPTH_MASK, map 93 sound/soc/meson/axg-pdm.c struct regmap *map; map 99 sound/soc/meson/axg-pdm.c static void axg_pdm_enable(struct regmap *map) map 102 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, PDM_CTRL_RST_FIFO); map 103 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, 0); map 106 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, PDM_CTRL_EN); map 109 sound/soc/meson/axg-pdm.c static void axg_pdm_disable(struct regmap *map) map 111 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, 0); map 114 sound/soc/meson/axg-pdm.c static void axg_pdm_filters_enable(struct regmap *map, bool enable) map 118 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_HCIC_CTRL1, PDM_FILTER_EN, val); map 119 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_F1_CTRL, PDM_FILTER_EN, val); map 120 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_F2_CTRL, PDM_FILTER_EN, val); map 121 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_F3_CTRL, PDM_FILTER_EN, val); map 122 sound/soc/meson/axg-pdm.c regmap_update_bits(map, PDM_HPF_CTRL, PDM_FILTER_EN, val); map 134 sound/soc/meson/axg-pdm.c axg_pdm_enable(priv->map); map 140 sound/soc/meson/axg-pdm.c axg_pdm_disable(priv->map); map 200 sound/soc/meson/axg-pdm.c regmap_write(priv->map, PDM_CHAN_CTRL, val); map 201 sound/soc/meson/axg-pdm.c regmap_write(priv->map, PDM_CHAN_CTRL1, val); map 212 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_CTRL, map 216 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_CTRL, map 245 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_OUT_MODE, val); map 283 sound/soc/meson/axg-pdm.c axg_pdm_filters_enable(priv->map, true); map 293 sound/soc/meson/axg-pdm.c axg_pdm_filters_enable(priv->map, false); map 314 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_HCIC_CTRL1, map 325 sound/soc/meson/axg-pdm.c unsigned int offset = index * regmap_get_reg_stride(priv->map) map 333 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, offset, map 348 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_HPF_CTRL, map 368 sound/soc/meson/axg-pdm.c regmap_write(priv->map, PDM_COEFF_ADDR, 0); map 375 sound/soc/meson/axg-pdm.c regmap_write(priv->map, PDM_COEFF_DATA, lpf[i].tap[j]); map 409 sound/soc/meson/axg-pdm.c axg_pdm_disable(priv->map); map 412 sound/soc/meson/axg-pdm.c regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_BYPASS_MODE, 0); map 606 sound/soc/meson/axg-pdm.c priv->map = devm_regmap_init_mmio(dev, regs, &axg_pdm_regmap_cfg); map 607 sound/soc/meson/axg-pdm.c if (IS_ERR(priv->map)) { map 609 sound/soc/meson/axg-pdm.c PTR_ERR(priv->map)); map 610 sound/soc/meson/axg-pdm.c return PTR_ERR(priv->map); map 54 sound/soc/meson/axg-spdifin.c struct regmap *map; map 80 sound/soc/meson/axg-spdifin.c regmap_read(priv->map, SPDIFIN_STAT0, &stat); map 101 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, map 107 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, map 109 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, map 128 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, map 139 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, SPDIFIN_CTRL0_EN, 0); map 143 sound/soc/meson/axg-spdifin.c static void axg_spdifin_write_mode_param(struct regmap *map, int mode, map 154 sound/soc/meson/axg-spdifin.c reg = offset * regmap_get_reg_stride(map) + base_reg; map 157 sound/soc/meson/axg-spdifin.c regmap_update_bits(map, reg, GENMASK(width - 1, 0) << shift, map 161 sound/soc/meson/axg-spdifin.c static void axg_spdifin_write_timer(struct regmap *map, int mode, map 164 sound/soc/meson/axg-spdifin.c axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_TIMER_PER_REG, map 168 sound/soc/meson/axg-spdifin.c static void axg_spdifin_write_threshold(struct regmap *map, int mode, map 171 sound/soc/meson/axg-spdifin.c axg_spdifin_write_mode_param(map, mode, val, SPDIFIN_THRES_PER_REG, map 206 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL1, map 211 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, map 216 sound/soc/meson/axg-spdifin.c axg_spdifin_write_timer(priv->map, i, t_next); map 227 sound/soc/meson/axg-spdifin.c axg_spdifin_write_timer(priv->map, i, t); map 230 sound/soc/meson/axg-spdifin.c axg_spdifin_write_threshold(priv->map, i, t + t_next); map 305 sound/soc/meson/axg-spdifin.c regmap_update_bits(priv->map, SPDIFIN_CTRL0, map 309 sound/soc/meson/axg-spdifin.c regmap_read(priv->map, SPDIFIN_STAT1, &val); map 474 sound/soc/meson/axg-spdifin.c priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifin_regmap_cfg); map 475 sound/soc/meson/axg-spdifin.c if (IS_ERR(priv->map)) { map 477 sound/soc/meson/axg-spdifin.c PTR_ERR(priv->map)); map 478 sound/soc/meson/axg-spdifin.c return PTR_ERR(priv->map); map 60 sound/soc/meson/axg-spdifout.c struct regmap *map; map 65 sound/soc/meson/axg-spdifout.c static void axg_spdifout_enable(struct regmap *map) map 68 sound/soc/meson/axg-spdifout.c regmap_update_bits(map, SPDIFOUT_CTRL0, map 73 sound/soc/meson/axg-spdifout.c regmap_update_bits(map, SPDIFOUT_CTRL0, map 75 sound/soc/meson/axg-spdifout.c regmap_update_bits(map, SPDIFOUT_CTRL0, map 79 sound/soc/meson/axg-spdifout.c regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN, map 83 sound/soc/meson/axg-spdifout.c static void axg_spdifout_disable(struct regmap *map) map 85 sound/soc/meson/axg-spdifout.c regmap_update_bits(map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_EN, 0); map 97 sound/soc/meson/axg-spdifout.c axg_spdifout_enable(priv->map); map 103 sound/soc/meson/axg-spdifout.c axg_spdifout_disable(priv->map); map 116 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL0, SPDIFOUT_CTRL0_VSET, map 142 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL0, map 168 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL1, map 172 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL0, map 197 sound/soc/meson/axg-spdifout.c regmap_write(priv->map, SPDIFOUT_CHSTS0, val); map 201 sound/soc/meson/axg-spdifout.c offset += regmap_get_reg_stride(priv->map)) map 202 sound/soc/meson/axg-spdifout.c regmap_write(priv->map, offset, 0); map 205 sound/soc/meson/axg-spdifout.c regmap_write(priv->map, SPDIFOUT_CHSTS6, val); map 209 sound/soc/meson/axg-spdifout.c offset += regmap_get_reg_stride(priv->map)) map 210 sound/soc/meson/axg-spdifout.c regmap_write(priv->map, offset, 0); map 259 sound/soc/meson/axg-spdifout.c axg_spdifout_disable(priv->map); map 262 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL0, map 267 sound/soc/meson/axg-spdifout.c regmap_update_bits(priv->map, SPDIFOUT_CTRL0, map 273 sound/soc/meson/axg-spdifout.c regmap_write(priv->map, SPDIFOUT_SWAP, 0x10); map 416 sound/soc/meson/axg-spdifout.c priv->map = devm_regmap_init_mmio(dev, regs, &axg_spdifout_regmap_cfg); map 417 sound/soc/meson/axg-spdifout.c if (IS_ERR(priv->map)) { map 419 sound/soc/meson/axg-spdifout.c PTR_ERR(priv->map)); map 420 sound/soc/meson/axg-spdifout.c return PTR_ERR(priv->map); map 26 sound/soc/meson/axg-tdm-formatter.c struct regmap *map; map 29 sound/soc/meson/axg-tdm-formatter.c int axg_tdm_formatter_set_channel_masks(struct regmap *map, map 52 sound/soc/meson/axg-tdm-formatter.c regmap_write(map, offset, val); map 53 sound/soc/meson/axg-tdm-formatter.c offset += regmap_get_reg_stride(map); map 108 sound/soc/meson/axg-tdm-formatter.c ret = formatter->drv->ops->prepare(formatter->map, map 126 sound/soc/meson/axg-tdm-formatter.c formatter->drv->ops->enable(formatter->map); map 138 sound/soc/meson/axg-tdm-formatter.c formatter->drv->ops->disable(formatter->map); map 275 sound/soc/meson/axg-tdm-formatter.c formatter->map = devm_regmap_init_mmio(dev, regs, drv->regmap_cfg); map 276 sound/soc/meson/axg-tdm-formatter.c if (IS_ERR(formatter->map)) { map 278 sound/soc/meson/axg-tdm-formatter.c PTR_ERR(formatter->map)); map 279 sound/soc/meson/axg-tdm-formatter.c return PTR_ERR(formatter->map); map 24 sound/soc/meson/axg-tdm-formatter.h void (*enable)(struct regmap *map); map 25 sound/soc/meson/axg-tdm-formatter.h void (*disable)(struct regmap *map); map 26 sound/soc/meson/axg-tdm-formatter.h int (*prepare)(struct regmap *map, map 38 sound/soc/meson/axg-tdm-formatter.h int axg_tdm_formatter_set_channel_masks(struct regmap *map, map 89 sound/soc/meson/axg-tdmin.c static void axg_tdmin_enable(struct regmap *map) map 92 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, map 96 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, map 98 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, map 102 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, map 106 sound/soc/meson/axg-tdmin.c static void axg_tdmin_disable(struct regmap *map) map 108 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, TDMIN_CTRL_ENABLE, 0); map 111 sound/soc/meson/axg-tdmin.c static int axg_tdmin_prepare(struct regmap *map, map 156 sound/soc/meson/axg-tdmin.c regmap_update_bits(map, TDMIN_CTRL, map 162 sound/soc/meson/axg-tdmin.c regmap_write(map, TDMIN_SWAP, 0x76543210); map 164 sound/soc/meson/axg-tdmin.c return axg_tdm_formatter_set_channel_masks(map, ts, TDMIN_MASK0); map 87 sound/soc/meson/axg-tdmout.c static void axg_tdmout_enable(struct regmap *map) map 90 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, map 94 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, map 96 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, map 100 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, map 104 sound/soc/meson/axg-tdmout.c static void axg_tdmout_disable(struct regmap *map) map 106 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, TDMOUT_CTRL0_ENABLE, 0); map 109 sound/soc/meson/axg-tdmout.c static int axg_tdmout_prepare(struct regmap *map, map 140 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL0, map 172 sound/soc/meson/axg-tdmout.c regmap_update_bits(map, TDMOUT_CTRL1, map 177 sound/soc/meson/axg-tdmout.c regmap_write(map, TDMOUT_SWAP, 0x76543210); map 179 sound/soc/meson/axg-tdmout.c return axg_tdm_formatter_set_channel_masks(map, ts, TDMOUT_MASK0); map 44 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 46 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 48 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL1, map 77 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL0, map 100 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_SEL_RESAMPLE, 0); map 103 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_EXT_SIGNED, map 107 sound/soc/meson/axg-toddr.c regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_TODDR_PP_MODE, 0); map 380 sound/soc/meson/g12a-tohdmitx.c struct regmap *map; map 386 sound/soc/meson/g12a-tohdmitx.c map = devm_regmap_init_mmio(dev, regs, &g12a_tohdmitx_regmap_cfg); map 387 sound/soc/meson/g12a-tohdmitx.c if (IS_ERR(map)) { map 389 sound/soc/meson/g12a-tohdmitx.c PTR_ERR(map)); map 390 sound/soc/meson/g12a-tohdmitx.c return PTR_ERR(map); map 958 sound/soc/soc-core.c struct snd_soc_codec_conf *map = &card->codec_conf[i]; map 961 sound/soc/soc-core.c if (map->of_node && of_node != map->of_node) map 963 sound/soc/soc-core.c if (map->dev_name && strcmp(component->name, map->dev_name)) map 965 sound/soc/soc-core.c component->name_prefix = map->name_prefix; map 193 sound/soc/soc-topology.c struct snd_soc_tplg_channel *chan, int map) map 198 sound/soc/soc-topology.c if (le32_to_cpu(chan[i].id) == map) map 206 sound/soc/soc-topology.c struct snd_soc_tplg_channel *chan, int map) map 211 sound/soc/soc-topology.c if (le32_to_cpu(chan[i].id) == map) map 417 sound/soc/sof/debug.c const struct snd_sof_debugfs_map *map; map 429 sound/soc/sof/debug.c map = &ops->debug_map[i]; map 431 sound/soc/sof/debug.c err = snd_sof_debugfs_io_item(sdev, sdev->bar[map->bar] + map 432 sound/soc/sof/debug.c map->offset, map->size, map 433 sound/soc/sof/debug.c map->name, map->access_type); map 206 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->rb.map); map 208 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->ch.map); map 216 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->iif.map); map 218 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->iport.map); map 221 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->oif.map); map 223 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->oport.map); map 228 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->oif.map); map 230 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->oport.map); map 232 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->och.map); map 234 sound/soc/uniphier/aio-core.c MAPCTR0_EN | sub->swm->iif.map); map 255 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTRSTCTR0, BIT(sub->swm->oport.map)); map 256 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTRSTCTR1, BIT(sub->swm->oport.map)); map 258 sound/soc/uniphier/aio-core.c regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map), map 261 sound/soc/uniphier/aio-core.c regmap_update_bits(r, IPORTMXRSTCTR(sub->swm->iport.map), map 309 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i), map 311 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXTYSLOTCTR(sub->swm->oport.map, i), map 382 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map), map 430 sound/soc/uniphier/aio-core.c regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map), map 473 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXCTR1(sub->swm->oport.map), map 495 sound/soc/uniphier/aio-core.c regmap_update_bits(r, IPORTMXCTR1(sub->swm->iport.map), map 589 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXCTR2(sub->swm->oport.map), v); map 595 sound/soc/uniphier/aio-core.c regmap_write(r, IPORTMXCTR2(sub->swm->iport.map), v); map 657 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXCTR3(sub->swm->oport.map), v); map 659 sound/soc/uniphier/aio-core.c regmap_write(r, IPORTMXACLKSEL0EX(sub->swm->iport.map), map 661 sound/soc/uniphier/aio-core.c regmap_write(r, IPORTMXEXNOE(sub->swm->iport.map), map 680 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXPATH(sub->swm->oport.map), map 681 sound/soc/uniphier/aio-core.c sub->swm->oif.map); map 683 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXMASK(sub->swm->oport.map), map 694 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTENCTR0, BIT(sub->swm->oport.map)); map 696 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTENCTR1, BIT(sub->swm->oport.map)); map 698 sound/soc/uniphier/aio-core.c regmap_update_bits(r, IPORTMXMASK(sub->swm->iport.map), map 706 sound/soc/uniphier/aio-core.c IPORTMXCTR2(sub->swm->iport.map), map 711 sound/soc/uniphier/aio-core.c IPORTMXCTR2(sub->swm->iport.map), map 728 sound/soc/uniphier/aio-core.c regmap_read(r, OPORTMXTYVOLGAINSTATUS(sub->swm->oport.map, 0), &v); map 745 sound/soc/uniphier/aio-core.c int oport_map = sub->swm->oport.map; map 812 sound/soc/uniphier/aio-core.c regmap_write(r, PBOUTMXCTR0(sub->swm->oif.map), v); map 813 sound/soc/uniphier/aio-core.c regmap_write(r, PBOUTMXCTR1(sub->swm->oif.map), 0); map 815 sound/soc/uniphier/aio-core.c regmap_write(r, PBINMXCTR(sub->swm->iif.map), map 818 sound/soc/uniphier/aio-core.c (sub->swm->iport.map << PBINMXCTR_PBINSEL_SHIFT) | map 883 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXREPET(sub->swm->oport.map), repet); map 884 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXPAUDAT(sub->swm->oport.map), pause); map 904 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTSRCRSTCTR0, BIT(sub->swm->oport.map)); map 905 sound/soc/uniphier/aio-core.c regmap_write(r, AOUTSRCRSTCTR1, BIT(sub->swm->oport.map)); map 928 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXSRC1CTR(sub->swm->oport.map), map 954 sound/soc/uniphier/aio-core.c regmap_write(r, OPORTMXRATE_I(sub->swm->oport.map), map 957 sound/soc/uniphier/aio-core.c regmap_update_bits(r, OPORTMXRATE_I(sub->swm->oport.map), map 968 sound/soc/uniphier/aio-core.c regmap_write(r, PBINMXCTR(sub->swm->iif.map), map 971 sound/soc/uniphier/aio-core.c (sub->swm->oport.map << PBINMXCTR_PBINSEL_SHIFT) | map 982 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->och.map), map 985 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->och.map), map 990 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->och.map), map 994 sound/soc/uniphier/aio-core.c (sub->swm->och.map << CDA2D_CHMXAMODE_RSSEL_SHIFT)); map 1010 sound/soc/uniphier/aio-core.c v | BIT(sub->swm->och.map)); map 1018 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXCTRL1(sub->swm->ch.map), map 1024 sound/soc/uniphier/aio-core.c (sub->swm->rb.map << CDA2D_CHMXAMODE_RSSEL_SHIFT); map 1026 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXSRCAMODE(sub->swm->ch.map), v); map 1028 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_CHMXDSTAMODE(sub->swm->ch.map), v); map 1039 sound/soc/uniphier/aio-core.c CDA2D_STRT0_STOP_START | BIT(sub->swm->ch.map)); map 1042 sound/soc/uniphier/aio-core.c BIT(sub->swm->rb.map), map 1043 sound/soc/uniphier/aio-core.c BIT(sub->swm->rb.map)); map 1046 sound/soc/uniphier/aio-core.c CDA2D_STRT0_STOP_STOP | BIT(sub->swm->ch.map)); map 1049 sound/soc/uniphier/aio-core.c BIT(sub->swm->rb.map), map 1061 sound/soc/uniphier/aio-core.c CDA2D_RDPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map)); map 1064 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l); map 1066 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &pos_l); map 1067 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), &pos_u); map 1079 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), (u32)pos); map 1080 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXRDPTRU(sub->swm->rb.map), (u32)(pos >> 32)); map 1081 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RDPTRLOAD, BIT(sub->swm->rb.map)); map 1084 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXRDPTR(sub->swm->rb.map), &tmp); map 1094 sound/soc/uniphier/aio-core.c CDA2D_WRPTRLOAD_LSFLAG_STORE | BIT(sub->swm->rb.map)); map 1097 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l); map 1099 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &pos_l); map 1100 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), &pos_u); map 1112 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), map 1114 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXWRPTRU(sub->swm->rb.map), map 1116 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_WRPTRLOAD, BIT(sub->swm->rb.map)); map 1119 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXWRPTR(sub->swm->rb.map), &tmp); map 1129 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXBTH(sub->swm->rb.map), th); map 1130 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXRTH(sub->swm->rb.map), th); map 1145 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXCNFG(sub->swm->rb.map), 0); map 1146 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXBGNADRS(sub->swm->rb.map), map 1148 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXBGNADRSU(sub->swm->rb.map), map 1150 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXENDADRS(sub->swm->rb.map), map 1152 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXENDADRSU(sub->swm->rb.map), map 1155 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBADRSLOAD, BIT(sub->swm->rb.map)); map 1165 sound/soc/uniphier/aio-core.c regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map), map 1172 sound/soc/uniphier/aio-core.c regmap_update_bits(r, CDA2D_RBMXIE(sub->swm->rb.map), map 1233 sound/soc/uniphier/aio-core.c regmap_read(r, CDA2D_RBMXIR(sub->swm->rb.map), &ir); map 1246 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map), map 1249 sound/soc/uniphier/aio-core.c regmap_write(r, CDA2D_RBMXIR(sub->swm->rb.map), map 155 sound/soc/uniphier/aio.h int map; map 109 sound/soc/uniphier/evea.c struct regmap *map = evea->regmap; map 111 sound/soc/uniphier/evea.c regmap_update_bits(map, AANAPOW, AANAPOW_A_POWD, map 114 sound/soc/uniphier/evea.c regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK, map 117 sound/soc/uniphier/evea.c regmap_update_bits(map, ADAC1ODC, ADAC1ODC_ADAC_RAMPCLT_MASK, map 120 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ2(0), ADACSEQ2_ADACIN_FIX, 0); map 121 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ2(1), ADACSEQ2_ADACIN_FIX, 0); map 122 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ2(2), ADACSEQ2_ADACIN_FIX, 0); map 127 sound/soc/uniphier/evea.c struct regmap *map = evea->regmap; map 129 sound/soc/uniphier/evea.c regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK, map 132 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, map 134 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, map 136 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, map 139 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0); map 140 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO2OUTPOW, ALO2OUTPOW_LO2_ON, 0); map 141 sound/soc/uniphier/evea.c regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0); map 146 sound/soc/uniphier/evea.c struct regmap *map = evea->regmap; map 149 sound/soc/uniphier/evea.c regmap_update_bits(map, ALINEPOW, map 153 sound/soc/uniphier/evea.c regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, map 155 sound/soc/uniphier/evea.c regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, map 158 sound/soc/uniphier/evea.c regmap_update_bits(map, AADCPOW(0), AADCPOW_AADC_POWD, 0); map 159 sound/soc/uniphier/evea.c regmap_update_bits(map, AADCPOW(1), AADCPOW_AADC_POWD, 0); map 161 sound/soc/uniphier/evea.c regmap_update_bits(map, ALINEPOW, map 170 sound/soc/uniphier/evea.c struct regmap *map = evea->regmap; map 173 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, 0); map 174 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, 0); map 176 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, map 178 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO2OUTPOW, map 182 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(0), ADACSEQ1_MMUTE, map 184 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(2), ADACSEQ1_MMUTE, map 187 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO1OUTPOW, ALO1OUTPOW_LO1_ON, 0); map 188 sound/soc/uniphier/evea.c regmap_update_bits(map, ALO2OUTPOW, map 198 sound/soc/uniphier/evea.c struct regmap *map = evea->regmap; map 201 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, 0); map 203 sound/soc/uniphier/evea.c regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, map 206 sound/soc/uniphier/evea.c regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK, map 209 sound/soc/uniphier/evea.c regmap_update_bits(map, ADAC1ODC, ADAC1ODC_HP_DIS_RES_MASK, map 212 sound/soc/uniphier/evea.c regmap_update_bits(map, ADACSEQ1(1), ADACSEQ1_MMUTE, map 215 sound/soc/uniphier/evea.c regmap_update_bits(map, AHPOUTPOW, AHPOUTPOW_HP_ON, 0); map 323 sound/sparc/amd7930.c struct amd7930_map map; map 385 sound/sparc/amd7930.c struct amd7930_map *map = &amd->map; map 388 sound/sparc/amd7930.c sbus_writeb(((map->gx >> 0) & 0xff), amd->regs + AMD7930_DR); map 389 sound/sparc/amd7930.c sbus_writeb(((map->gx >> 8) & 0xff), amd->regs + AMD7930_DR); map 392 sound/sparc/amd7930.c sbus_writeb(((map->gr >> 0) & 0xff), amd->regs + AMD7930_DR); map 393 sound/sparc/amd7930.c sbus_writeb(((map->gr >> 8) & 0xff), amd->regs + AMD7930_DR); map 396 sound/sparc/amd7930.c sbus_writeb(((map->stgr >> 0) & 0xff), amd->regs + AMD7930_DR); map 397 sound/sparc/amd7930.c sbus_writeb(((map->stgr >> 8) & 0xff), amd->regs + AMD7930_DR); map 400 sound/sparc/amd7930.c sbus_writeb(((map->ger >> 0) & 0xff), amd->regs + AMD7930_DR); map 401 sound/sparc/amd7930.c sbus_writeb(((map->ger >> 8) & 0xff), amd->regs + AMD7930_DR); map 404 sound/sparc/amd7930.c sbus_writeb(map->mmr1, amd->regs + AMD7930_DR); map 407 sound/sparc/amd7930.c sbus_writeb(map->mmr2, amd->regs + AMD7930_DR); map 479 sound/sparc/amd7930.c struct amd7930_map *map = &amd->map; map 482 sound/sparc/amd7930.c map->gx = gx_coeff[amd->rgain]; map 483 sound/sparc/amd7930.c map->stgr = gx_coeff[amd->mgain]; map 486 sound/sparc/amd7930.c map->ger = ger_coeff[level - 256]; map 487 sound/sparc/amd7930.c map->gr = gx_coeff[255]; map 489 sound/sparc/amd7930.c map->ger = ger_coeff[0]; map 490 sound/sparc/amd7930.c map->gr = gx_coeff[level]; map 599 sound/sparc/amd7930.c new_mmr1 = amd->map.mmr1; map 604 sound/sparc/amd7930.c if (new_mmr1 != amd->map.mmr1) { map 605 sound/sparc/amd7930.c amd->map.mmr1 = new_mmr1; map 631 sound/sparc/amd7930.c new_mmr1 = amd->map.mmr1; map 636 sound/sparc/amd7930.c if (new_mmr1 != amd->map.mmr1) { map 637 sound/sparc/amd7930.c amd->map.mmr1 = new_mmr1; map 983 sound/sparc/amd7930.c memset(&amd->map, 0, sizeof(amd->map)); map 984 sound/sparc/amd7930.c amd->map.mmr1 = (AM_MAP_MMR1_GX | AM_MAP_MMR1_GER | map 986 sound/sparc/amd7930.c amd->map.mmr2 = (AM_MAP_MMR2_LS | AM_MAP_MMR2_AINB); map 416 sound/synth/emux/soundfont.c struct soundfont_voice_map map; map 419 sound/synth/emux/soundfont.c if (count < (int)sizeof(map)) map 421 sound/synth/emux/soundfont.c if (copy_from_user(&map, data, sizeof(map))) map 424 sound/synth/emux/soundfont.c if (map.map_instr < 0 || map.map_instr >= SF_MAX_INSTRUMENTS) map 434 sound/synth/emux/soundfont.c zp->instr == map.map_instr && map 435 sound/synth/emux/soundfont.c zp->bank == map.map_bank && map 436 sound/synth/emux/soundfont.c zp->v.low == map.map_key && map 437 sound/synth/emux/soundfont.c zp->v.start == map.src_instr && map 438 sound/synth/emux/soundfont.c zp->v.end == map.src_bank && map 439 sound/synth/emux/soundfont.c zp->v.fixkey == map.src_key) { map 457 sound/synth/emux/soundfont.c zp->bank = map.map_bank; map 458 sound/synth/emux/soundfont.c zp->instr = map.map_instr; map 460 sound/synth/emux/soundfont.c if (map.map_key >= 0) { map 461 sound/synth/emux/soundfont.c zp->v.low = map.map_key; map 462 sound/synth/emux/soundfont.c zp->v.high = map.map_key; map 464 sound/synth/emux/soundfont.c zp->v.start = map.src_instr; map 465 sound/synth/emux/soundfont.c zp->v.end = map.src_bank; map 466 sound/synth/emux/soundfont.c zp->v.fixkey = map.src_key; map 73 sound/usb/mixer.c const struct usbmix_name_map *map; map 1568 sound/usb/mixer.c const struct usbmix_name_map *map; map 1576 sound/usb/mixer.c map = find_map(imap, unitid, control); map 1577 sound/usb/mixer.c if (check_ignored_ctl(map)) map 1627 sound/usb/mixer.c len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); map 1686 sound/usb/mixer.c check_mapped_dB(map, cval); map 1727 sound/usb/mixer.c __build_feature_ctl(state->mixer, state->map, ctl_mask, control, map 1766 sound/usb/mixer.c const struct usbmix_name_map *map; map 1768 sound/usb/mixer.c map = find_map(imap, term->id, 0); map 1769 sound/usb/mixer.c if (check_ignored_ctl(map)) map 1802 sound/usb/mixer.c if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) map 2040 sound/usb/mixer.c const struct usbmix_name_map *map; map 2042 sound/usb/mixer.c map = find_map(state->map, unitid, 0); map 2043 sound/usb/mixer.c if (check_ignored_ctl(map)) map 2073 sound/usb/mixer.c len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); map 2111 sound/usb/mixer.c build_connector_control(state->mixer, state->map, &iterm, true); map 2350 sound/usb/mixer.c const struct usbmix_name_map *map; map 2388 sound/usb/mixer.c map = find_map(state->map, unitid, valinfo->control); map 2389 sound/usb/mixer.c if (check_ignored_ctl(map)) map 2457 sound/usb/mixer.c if (check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name))) { map 2613 sound/usb/mixer.c const struct usbmix_name_map *map; map 2625 sound/usb/mixer.c map = find_map(state->map, unitid, 0); map 2626 sound/usb/mixer.c if (check_ignored_ctl(map)) map 2688 sound/usb/mixer.c len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name)); map 2939 sound/usb/mixer.c const struct usbmix_ctl_map *map; map 3016 sound/usb/mixer.c for (map = uac3_badd_usbmix_ctl_maps; map->id; map++) { map 3017 sound/usb/mixer.c if (map->id == badd_profile) map 3021 sound/usb/mixer.c if (!map->id) map 3038 sound/usb/mixer.c UAC3_BADD_FU_ID2, map->map); map 3041 sound/usb/mixer.c UAC3_BADD_FU_ID2, map->map); map 3048 sound/usb/mixer.c UAC3_BADD_FU_ID5, map->map); map 3051 sound/usb/mixer.c UAC3_BADD_FU_ID5, map->map); map 3058 sound/usb/mixer.c UAC3_BADD_FU_ID7, map->map); map 3061 sound/usb/mixer.c UAC3_BADD_FU_ID7, map->map); map 3072 sound/usb/mixer.c build_connector_control(mixer, map->map, &iterm, true); map 3078 sound/usb/mixer.c build_connector_control(mixer, map->map, &oterm, false); map 3093 sound/usb/mixer.c const struct usbmix_ctl_map *map; map 3103 sound/usb/mixer.c for (map = usbmix_ctl_maps; map->id; map++) { map 3104 sound/usb/mixer.c if (map->id == state.chip->usb_id) { map 3105 sound/usb/mixer.c state.map = map->map; map 3106 sound/usb/mixer.c state.selector_map = map->selector_map; map 3107 sound/usb/mixer.c mixer->connector_map = map->connector_map; map 3108 sound/usb/mixer.c mixer->ignore_ctl_error |= map->ignore_ctl_error; map 3154 sound/usb/mixer.c build_connector_control(state.mixer, state.map, map 3180 sound/usb/mixer.c build_connector_control(state.mixer, state.map, map 3192 sound/usb/mixer.c const struct usbmix_connector_map *map = mixer->connector_map; map 3194 sound/usb/mixer.c if (!map) map 3197 sound/usb/mixer.c for (; map->id; map++) { map 3198 sound/usb/mixer.c if (map->id == unitid) { map 3199 sound/usb/mixer.c if (control && map->control) map 3200 sound/usb/mixer.c *control = map->control; map 3201 sound/usb/mixer.c if (channel && map->channel) map 3202 sound/usb/mixer.c *channel = map->channel; map 3203 sound/usb/mixer.c return map->delegated_id; map 28 sound/usb/mixer_maps.c const struct usbmix_name_map *map; map 412 sound/usb/mixer_maps.c .map = extigy_map, map 417 sound/usb/mixer_maps.c .map = mp3plus_map, map 421 sound/usb/mixer_maps.c .map = audigy2nx_map, map 426 sound/usb/mixer_maps.c .map = live24ext_map, map 430 sound/usb/mixer_maps.c .map = audigy2nx_map, map 443 sound/usb/mixer_maps.c .map = gamecom780_map, map 460 sound/usb/mixer_maps.c .map = hercules_usb51_map, map 472 sound/usb/mixer_maps.c .map = linex_map, map 477 sound/usb/mixer_maps.c .map = maya44_map, map 481 sound/usb/mixer_maps.c .map = justlink_map, map 485 sound/usb/mixer_maps.c .map = aureon_51_2_map, map 489 sound/usb/mixer_maps.c .map = dell_alc4020_map, map 493 sound/usb/mixer_maps.c .map = mbox1_map, map 497 sound/usb/mixer_maps.c .map = scratch_live_map, map 502 sound/usb/mixer_maps.c .map = ebox44_map, map 507 sound/usb/mixer_maps.c .map = maya44_map, map 512 sound/usb/mixer_maps.c .map = scms_usb3318_map, map 517 sound/usb/mixer_maps.c .map = scms_usb3318_map, map 522 sound/usb/mixer_maps.c .map = bose_companion5_map, map 526 sound/usb/mixer_maps.c .map = aorus_master_alc1220vb_map, map 530 sound/usb/mixer_maps.c .map = trx40_mobo_map, map 535 sound/usb/mixer_maps.c .map = asus_rog_map, map 539 sound/usb/mixer_maps.c .map = asus_rog_map, map 543 sound/usb/mixer_maps.c .map = trx40_mobo_map, map 548 sound/usb/mixer_maps.c .map = trx40_mobo_map, map 553 sound/usb/mixer_maps.c .map = trx40_mobo_map, map 596 sound/usb/mixer_maps.c .map = uac3_badd_generic_io_map, map 600 sound/usb/mixer_maps.c .map = uac3_badd_headphone_map, map 604 sound/usb/mixer_maps.c .map = uac3_badd_speaker_map, map 608 sound/usb/mixer_maps.c .map = uac3_badd_microphone_map, map 612 sound/usb/mixer_maps.c .map = uac3_badd_headset_map, map 616 sound/usb/mixer_maps.c .map = uac3_badd_headset_map, map 620 sound/usb/mixer_maps.c .map = uac3_badd_speakerphone_map, map 177 sound/usb/stream.c if (put_user(fp->chmap->map[i], dst)) map 203 sound/usb/stream.c ucontrol->value.integer.value[i] = chmap->map[i]; map 291 sound/usb/stream.c if (channels > ARRAY_SIZE(chmap->map)) map 305 sound/usb/stream.c chmap->map[c++] = *maps; map 310 sound/usb/stream.c chmap->map[c++] = SNDRV_CHMAP_MONO; map 313 sound/usb/stream.c chmap->map[c++] = *maps; map 317 sound/usb/stream.c chmap->map[c] = SNDRV_CHMAP_UNKNOWN; map 332 sound/usb/stream.c if (channels > ARRAY_SIZE(chmap->map)) map 353 sound/usb/stream.c unsigned char map; map 361 sound/usb/stream.c map = SNDRV_CHMAP_MONO; map 366 sound/usb/stream.c map = SNDRV_CHMAP_FL; map 371 sound/usb/stream.c map = SNDRV_CHMAP_FR; map 374 sound/usb/stream.c map = SNDRV_CHMAP_FC; map 377 sound/usb/stream.c map = SNDRV_CHMAP_FLC; map 380 sound/usb/stream.c map = SNDRV_CHMAP_FRC; map 383 sound/usb/stream.c map = SNDRV_CHMAP_SL; map 386 sound/usb/stream.c map = SNDRV_CHMAP_SR; map 389 sound/usb/stream.c map = SNDRV_CHMAP_RL; map 392 sound/usb/stream.c map = SNDRV_CHMAP_RR; map 395 sound/usb/stream.c map = SNDRV_CHMAP_RC; map 398 sound/usb/stream.c map = SNDRV_CHMAP_RLC; map 401 sound/usb/stream.c map = SNDRV_CHMAP_RRC; map 404 sound/usb/stream.c map = SNDRV_CHMAP_TC; map 407 sound/usb/stream.c map = SNDRV_CHMAP_TFL; map 410 sound/usb/stream.c map = SNDRV_CHMAP_TFR; map 413 sound/usb/stream.c map = SNDRV_CHMAP_TFC; map 416 sound/usb/stream.c map = SNDRV_CHMAP_TFLC; map 419 sound/usb/stream.c map = SNDRV_CHMAP_TFRC; map 422 sound/usb/stream.c map = SNDRV_CHMAP_TSL; map 425 sound/usb/stream.c map = SNDRV_CHMAP_TSR; map 428 sound/usb/stream.c map = SNDRV_CHMAP_TRL; map 431 sound/usb/stream.c map = SNDRV_CHMAP_TRR; map 434 sound/usb/stream.c map = SNDRV_CHMAP_TRC; map 437 sound/usb/stream.c map = SNDRV_CHMAP_BC; map 440 sound/usb/stream.c map = SNDRV_CHMAP_LFE; map 443 sound/usb/stream.c map = SNDRV_CHMAP_LLFE; map 446 sound/usb/stream.c map = SNDRV_CHMAP_RLFE; map 450 sound/usb/stream.c map = SNDRV_CHMAP_UNKNOWN; map 453 sound/usb/stream.c chmap->map[c++] = map; map 464 sound/usb/stream.c chmap->map[c] = SNDRV_CHMAP_UNKNOWN; map 915 sound/usb/stream.c chmap->map[0] = SNDRV_CHMAP_MONO; map 917 sound/usb/stream.c chmap->map[0] = SNDRV_CHMAP_FL; map 918 sound/usb/stream.c chmap->map[1] = SNDRV_CHMAP_FR; map 461 sound/x86/intel_hdmi_audio.c for (; t->map; t++) { map 463 sound/x86/intel_hdmi_audio.c return t->map; map 517 sound/x86/intel_hdmi_audio.c chmap->map[c] = spk_to_chmap( map 561 sound/x86/intel_hdmi_audio.c ucontrol->value.integer.value[i] = chmap->map[i]; map 77 sound/x86/intel_hdmi_audio.h unsigned char map; /* ALSA API channel map position */ map 1104 tools/bpf/bpftool/prog.c struct bpf_map *map; map 1259 tools/bpf/bpftool/prog.c bpf_object__for_each_map(map, obj) { map 1260 tools/bpf/bpftool/prog.c if (!strcmp(bpf_map__name(map), map_replace[j].name)) { map 1280 tools/bpf/bpftool/prog.c bpf_object__for_each_map(map, obj) { map 1281 tools/bpf/bpftool/prog.c if (!bpf_map__is_offload_neutral(map)) map 1282 tools/bpf/bpftool/prog.c bpf_map__set_ifindex(map, ifindex); map 1285 tools/bpf/bpftool/prog.c err = bpf_map__reuse_fd(map, map_replace[j++].fd); map 47 tools/build/fixdep.c static void parse_dep_file(void *map, size_t len) map 49 tools/build/fixdep.c char *m = map; map 125 tools/build/fixdep.c void *map; map 143 tools/build/fixdep.c map = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); map 144 tools/build/fixdep.c if ((long) map == -1) { map 150 tools/build/fixdep.c parse_dep_file(map, st.st_size); map 152 tools/build/fixdep.c munmap(map, st.st_size); map 18 tools/include/linux/bitmap.h void bitmap_clear(unsigned long *map, unsigned int start, int len); map 1290 tools/lib/bpf/btf.c __u32 *map; map 1358 tools/lib/bpf/btf.c free(d->map); map 1359 tools/lib/bpf/btf.c d->map = NULL; map 1410 tools/lib/bpf/btf.c d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); map 1411 tools/lib/bpf/btf.c if (!d->map) { map 1416 tools/lib/bpf/btf.c d->map[0] = 0; map 1422 tools/lib/bpf/btf.c d->map[i] = i; map 1424 tools/lib/bpf/btf.c d->map[i] = BTF_UNPROCESSED_ID; map 2046 tools/lib/bpf/btf.c d->map[cand_id] = type_id; map 2067 tools/lib/bpf/btf.c d->map[type_id] = new_id; map 2091 tools/lib/bpf/btf.c return d->map[type_id] <= BTF_MAX_NR_TYPES; map 2101 tools/lib/bpf/btf.c while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) map 2102 tools/lib/bpf/btf.c type_id = d->map[type_id]; map 2117 tools/lib/bpf/btf.c while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) map 2118 tools/lib/bpf/btf.c type_id = d->map[type_id]; map 2415 tools/lib/bpf/btf.c d->map[c_id] = t_id; map 2417 tools/lib/bpf/btf.c d->map[t_id] = c_id; map 2429 tools/lib/bpf/btf.c d->map[t_id] = c_id; map 2466 tools/lib/bpf/btf.c if (d->map[type_id] <= BTF_MAX_NR_TYPES) map 2505 tools/lib/bpf/btf.c d->map[type_id] = new_id; map 2557 tools/lib/bpf/btf.c if (d->map[type_id] == BTF_IN_PROGRESS_ID) map 2559 tools/lib/bpf/btf.c if (d->map[type_id] <= BTF_MAX_NR_TYPES) map 2563 tools/lib/bpf/btf.c d->map[type_id] = BTF_IN_PROGRESS_ID; map 2649 tools/lib/bpf/btf.c d->map[type_id] = new_id; map 2698 tools/lib/bpf/btf.c if (d->map[i] != i) map 32 tools/lib/bpf/hashmap.c void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, map 35 tools/lib/bpf/hashmap.c map->hash_fn = hash_fn; map 36 tools/lib/bpf/hashmap.c map->equal_fn = equal_fn; map 37 tools/lib/bpf/hashmap.c map->ctx = ctx; map 39 tools/lib/bpf/hashmap.c map->buckets = NULL; map 40 tools/lib/bpf/hashmap.c map->cap = 0; map 41 tools/lib/bpf/hashmap.c map->cap_bits = 0; map 42 tools/lib/bpf/hashmap.c map->sz = 0; map 49 tools/lib/bpf/hashmap.c struct hashmap *map = malloc(sizeof(struct hashmap)); map 51 tools/lib/bpf/hashmap.c if (!map) map 53 tools/lib/bpf/hashmap.c hashmap__init(map, hash_fn, equal_fn, ctx); map 54 tools/lib/bpf/hashmap.c return map; map 57 tools/lib/bpf/hashmap.c void hashmap__clear(struct hashmap *map) map 59 tools/lib/bpf/hashmap.c free(map->buckets); map 60 tools/lib/bpf/hashmap.c map->cap = map->cap_bits = map->sz = 0; map 63 tools/lib/bpf/hashmap.c void hashmap__free(struct hashmap *map) map 65 tools/lib/bpf/hashmap.c if (!map) map 68 tools/lib/bpf/hashmap.c hashmap__clear(map); map 69 tools/lib/bpf/hashmap.c free(map); map 72 tools/lib/bpf/hashmap.c size_t hashmap__size(const struct hashmap *map) map 74 tools/lib/bpf/hashmap.c return map->sz; map 77 tools/lib/bpf/hashmap.c size_t hashmap__capacity(const struct hashmap *map) map 79 tools/lib/bpf/hashmap.c return map->cap; map 82 tools/lib/bpf/hashmap.c static bool hashmap_needs_to_grow(struct hashmap *map) map 85 tools/lib/bpf/hashmap.c return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); map 88 tools/lib/bpf/hashmap.c static int hashmap_grow(struct hashmap *map) map 96 tools/lib/bpf/hashmap.c new_cap_bits = map->cap_bits + 1; map 105 tools/lib/bpf/hashmap.c hashmap__for_each_entry_safe(map, cur, tmp, bkt) { map 106 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); map 110 tools/lib/bpf/hashmap.c map->cap = new_cap; map 111 tools/lib/bpf/hashmap.c map->cap_bits = new_cap_bits; map 112 tools/lib/bpf/hashmap.c free(map->buckets); map 113 tools/lib/bpf/hashmap.c map->buckets = new_buckets; map 118 tools/lib/bpf/hashmap.c static bool hashmap_find_entry(const struct hashmap *map, map 125 tools/lib/bpf/hashmap.c if (!map->buckets) map 128 tools/lib/bpf/hashmap.c for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; map 131 tools/lib/bpf/hashmap.c if (map->equal_fn(cur->key, key, map->ctx)) { map 142 tools/lib/bpf/hashmap.c int hashmap__insert(struct hashmap *map, const void *key, void *value, map 155 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); map 157 tools/lib/bpf/hashmap.c hashmap_find_entry(map, key, h, NULL, &entry)) { map 175 tools/lib/bpf/hashmap.c if (hashmap_needs_to_grow(map)) { map 176 tools/lib/bpf/hashmap.c err = hashmap_grow(map); map 179 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); map 188 tools/lib/bpf/hashmap.c hashmap_add_entry(&map->buckets[h], entry); map 189 tools/lib/bpf/hashmap.c map->sz++; map 194 tools/lib/bpf/hashmap.c bool hashmap__find(const struct hashmap *map, const void *key, void **value) map 199 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); map 200 tools/lib/bpf/hashmap.c if (!hashmap_find_entry(map, key, h, NULL, &entry)) map 208 tools/lib/bpf/hashmap.c bool hashmap__delete(struct hashmap *map, const void *key, map 214 tools/lib/bpf/hashmap.c h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); map 215 tools/lib/bpf/hashmap.c if (!hashmap_find_entry(map, key, h, &pprev, &entry)) map 225 tools/lib/bpf/hashmap.c map->sz--; map 56 tools/lib/bpf/hashmap.h void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, map 61 tools/lib/bpf/hashmap.h void hashmap__clear(struct hashmap *map); map 62 tools/lib/bpf/hashmap.h void hashmap__free(struct hashmap *map); map 64 tools/lib/bpf/hashmap.h size_t hashmap__size(const struct hashmap *map); map 65 tools/lib/bpf/hashmap.h size_t hashmap__capacity(const struct hashmap *map); map 95 tools/lib/bpf/hashmap.h int hashmap__insert(struct hashmap *map, const void *key, void *value, map 99 tools/lib/bpf/hashmap.h static inline int hashmap__add(struct hashmap *map, map 102 tools/lib/bpf/hashmap.h return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL); map 105 tools/lib/bpf/hashmap.h static inline int hashmap__set(struct hashmap *map, map 109 tools/lib/bpf/hashmap.h return hashmap__insert(map, key, value, HASHMAP_SET, map 113 tools/lib/bpf/hashmap.h static inline int hashmap__update(struct hashmap *map, map 117 tools/lib/bpf/hashmap.h return hashmap__insert(map, key, value, HASHMAP_UPDATE, map 121 tools/lib/bpf/hashmap.h static inline int hashmap__append(struct hashmap *map, map 124 tools/lib/bpf/hashmap.h return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL); map 127 tools/lib/bpf/hashmap.h bool hashmap__delete(struct hashmap *map, const void *key, map 130 tools/lib/bpf/hashmap.h bool hashmap__find(const struct hashmap *map, const void *key, void **value); map 138 tools/lib/bpf/hashmap.h #define hashmap__for_each_entry(map, cur, bkt) \ map 139 tools/lib/bpf/hashmap.h for (bkt = 0; bkt < map->cap; bkt++) \ map 140 tools/lib/bpf/hashmap.h for (cur = map->buckets[bkt]; cur; cur = cur->next) map 150 tools/lib/bpf/hashmap.h #define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \ map 151 tools/lib/bpf/hashmap.h for (bkt = 0; bkt < map->cap; bkt++) \ map 152 tools/lib/bpf/hashmap.h for (cur = map->buckets[bkt]; \ map 162 tools/lib/bpf/hashmap.h #define hashmap__for_each_key_entry(map, cur, _key) \ map 163 tools/lib/bpf/hashmap.h for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ map 164 tools/lib/bpf/hashmap.h map->cap_bits); \ map 165 tools/lib/bpf/hashmap.h map->buckets ? map->buckets[bkt] : NULL; }); \ map 168 tools/lib/bpf/hashmap.h if (map->equal_fn(cur->key, (_key), map->ctx)) map 170 tools/lib/bpf/hashmap.h #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ map 171 tools/lib/bpf/hashmap.h for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ map 172 tools/lib/bpf/hashmap.h map->cap_bits); \ map 173 tools/lib/bpf/hashmap.h cur = map->buckets ? map->buckets[bkt] : NULL; }); \ map 176 tools/lib/bpf/hashmap.h if (map->equal_fn(cur->key, (_key), map->ctx)) map 818 tools/lib/bpf/libbpf.c struct bpf_map *map; map 820 tools/lib/bpf/libbpf.c map = bpf_object__add_map(obj); map 821 tools/lib/bpf/libbpf.c if (IS_ERR(map)) map 822 tools/lib/bpf/libbpf.c return PTR_ERR(map); map 824 tools/lib/bpf/libbpf.c map->libbpf_type = type; map 825 tools/lib/bpf/libbpf.c map->sec_idx = sec_idx; map 826 tools/lib/bpf/libbpf.c map->sec_offset = 0; map 829 tools/lib/bpf/libbpf.c map->name = strdup(map_name); map 830 tools/lib/bpf/libbpf.c if (!map->name) { map 835 tools/lib/bpf/libbpf.c map_name, map->sec_idx, map->sec_offset); map 837 tools/lib/bpf/libbpf.c def = &map->def; map 846 tools/lib/bpf/libbpf.c zfree(&map->name); map 853 tools/lib/bpf/libbpf.c pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); map 948 tools/lib/bpf/libbpf.c struct bpf_map *map; map 955 tools/lib/bpf/libbpf.c map = bpf_object__add_map(obj); map 956 tools/lib/bpf/libbpf.c if (IS_ERR(map)) map 957 tools/lib/bpf/libbpf.c return PTR_ERR(map); map 967 tools/lib/bpf/libbpf.c map->libbpf_type = LIBBPF_MAP_UNSPEC; map 968 tools/lib/bpf/libbpf.c map->sec_idx = sym.st_shndx; map 969 tools/lib/bpf/libbpf.c map->sec_offset = sym.st_value; map 971 tools/lib/bpf/libbpf.c map_name, map->sec_idx, map->sec_offset); map 978 tools/lib/bpf/libbpf.c map->name = strdup(map_name); map 979 tools/lib/bpf/libbpf.c if (!map->name) { map 983 tools/lib/bpf/libbpf.c pr_debug("map %d is \"%s\"\n", i, map->name); map 992 tools/lib/bpf/libbpf.c memcpy(&map->def, def, map_def_sz); map 1012 tools/lib/bpf/libbpf.c memcpy(&map->def, def, sizeof(struct bpf_map_def)); map 1082 tools/lib/bpf/libbpf.c struct bpf_map *map; map 1122 tools/lib/bpf/libbpf.c map = bpf_object__add_map(obj); map 1123 tools/lib/bpf/libbpf.c if (IS_ERR(map)) map 1124 tools/lib/bpf/libbpf.c return PTR_ERR(map); map 1125 tools/lib/bpf/libbpf.c map->name = strdup(map_name); map 1126 tools/lib/bpf/libbpf.c if (!map->name) { map 1130 tools/lib/bpf/libbpf.c map->libbpf_type = LIBBPF_MAP_UNSPEC; map 1131 tools/lib/bpf/libbpf.c map->def.type = BPF_MAP_TYPE_UNSPEC; map 1132 tools/lib/bpf/libbpf.c map->sec_idx = sec_idx; map 1133 tools/lib/bpf/libbpf.c map->sec_offset = vi->offset; map 1135 tools/lib/bpf/libbpf.c map_name, map->sec_idx, map->sec_offset); map 1149 tools/lib/bpf/libbpf.c &map->def.type)) map 1152 tools/lib/bpf/libbpf.c map_name, map->def.type); map 1155 tools/lib/bpf/libbpf.c &map->def.max_entries)) map 1158 tools/lib/bpf/libbpf.c map_name, map->def.max_entries); map 1161 tools/lib/bpf/libbpf.c &map->def.map_flags)) map 1164 tools/lib/bpf/libbpf.c map_name, map->def.map_flags); map 1173 tools/lib/bpf/libbpf.c if (map->def.key_size && map->def.key_size != sz) { map 1175 tools/lib/bpf/libbpf.c map_name, map->def.key_size, sz); map 1178 tools/lib/bpf/libbpf.c map->def.key_size = sz; map 1201 tools/lib/bpf/libbpf.c if (map->def.key_size && map->def.key_size != sz) { map 1203 tools/lib/bpf/libbpf.c map_name, map->def.key_size, sz); map 1206 tools/lib/bpf/libbpf.c map->def.key_size = sz; map 1207 tools/lib/bpf/libbpf.c map->btf_key_type_id = t->type; map 1216 tools/lib/bpf/libbpf.c if (map->def.value_size && map->def.value_size != sz) { map 1218 tools/lib/bpf/libbpf.c map_name, map->def.value_size, sz); map 1221 tools/lib/bpf/libbpf.c map->def.value_size = sz; map 1244 tools/lib/bpf/libbpf.c if (map->def.value_size && map->def.value_size != sz) { map 1246 tools/lib/bpf/libbpf.c map_name, map->def.value_size, sz); map 1249 tools/lib/bpf/libbpf.c map->def.value_size = sz; map 1250 tools/lib/bpf/libbpf.c map->btf_value_type_id = t->type; map 1262 tools/lib/bpf/libbpf.c if (map->def.type == BPF_MAP_TYPE_UNSPEC) { map 1853 tools/lib/bpf/libbpf.c static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) map 1855 tools/lib/bpf/libbpf.c struct bpf_map_def *def = &map->def; map 1860 tools/lib/bpf/libbpf.c if (map->sec_idx == obj->efile.btf_maps_shndx) map 1863 tools/lib/bpf/libbpf.c if (!bpf_map__is_internal(map)) { map 1864 tools/lib/bpf/libbpf.c ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, map 1873 tools/lib/bpf/libbpf.c libbpf_type_to_btf_name[map->libbpf_type]); map 1878 tools/lib/bpf/libbpf.c map->btf_key_type_id = key_type_id; map 1879 tools/lib/bpf/libbpf.c map->btf_value_type_id = bpf_map__is_internal(map) ? map 1884 tools/lib/bpf/libbpf.c int bpf_map__reuse_fd(struct bpf_map *map, int fd) map 1911 tools/lib/bpf/libbpf.c err = zclose(map->fd); map 1916 tools/lib/bpf/libbpf.c free(map->name); map 1918 tools/lib/bpf/libbpf.c map->fd = new_fd; map 1919 tools/lib/bpf/libbpf.c map->name = new_name; map 1920 tools/lib/bpf/libbpf.c map->def.type = info.type; map 1921 tools/lib/bpf/libbpf.c map->def.key_size = info.key_size; map 1922 tools/lib/bpf/libbpf.c map->def.value_size = info.value_size; map 1923 tools/lib/bpf/libbpf.c map->def.max_entries = info.max_entries; map 1924 tools/lib/bpf/libbpf.c map->def.map_flags = info.map_flags; map 1925 tools/lib/bpf/libbpf.c map->btf_key_type_id = info.btf_key_type_id; map 1926 tools/lib/bpf/libbpf.c map->btf_value_type_id = info.btf_value_type_id; map 1937 tools/lib/bpf/libbpf.c int bpf_map__resize(struct bpf_map *map, __u32 max_entries) map 1939 tools/lib/bpf/libbpf.c if (!map || !max_entries) map 1943 tools/lib/bpf/libbpf.c if (map->fd >= 0) map 1946 tools/lib/bpf/libbpf.c map->def.max_entries = max_entries; map 2003 tools/lib/bpf/libbpf.c int ret, map; map 2011 tools/lib/bpf/libbpf.c map = bpf_create_map_xattr(&map_attr); map 2012 tools/lib/bpf/libbpf.c if (map < 0) { map 2019 tools/lib/bpf/libbpf.c insns[0].imm = map; map 2033 tools/lib/bpf/libbpf.c close(map); map 2111 tools/lib/bpf/libbpf.c bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) map 2118 tools/lib/bpf/libbpf.c if (map->libbpf_type == LIBBPF_MAP_BSS) map 2121 tools/lib/bpf/libbpf.c data = map->libbpf_type == LIBBPF_MAP_DATA ? map 2124 tools/lib/bpf/libbpf.c err = bpf_map_update_elem(map->fd, &zero, data, 0); map 2126 tools/lib/bpf/libbpf.c if (!err && map->libbpf_type == LIBBPF_MAP_RODATA) { map 2127 tools/lib/bpf/libbpf.c err = bpf_map_freeze(map->fd); map 2131 tools/lib/bpf/libbpf.c map->name, cp); map 2147 tools/lib/bpf/libbpf.c struct bpf_map *map = &obj->maps[i]; map 2148 tools/lib/bpf/libbpf.c struct bpf_map_def *def = &map->def; map 2150 tools/lib/bpf/libbpf.c int *pfd = &map->fd; map 2152 tools/lib/bpf/libbpf.c if (map->fd >= 0) { map 2154 tools/lib/bpf/libbpf.c map->name, map->fd); map 2159 tools/lib/bpf/libbpf.c create_attr.name = map->name; map 2160 tools/lib/bpf/libbpf.c create_attr.map_ifindex = map->map_ifindex; map 2176 tools/lib/bpf/libbpf.c map->name, nr_cpus); map 2185 tools/lib/bpf/libbpf.c map->inner_map_fd >= 0) map 2186 tools/lib/bpf/libbpf.c create_attr.inner_map_fd = map->inner_map_fd; map 2188 tools/lib/bpf/libbpf.c if (obj->btf && !bpf_map_find_btf_info(obj, map)) { map 2190 tools/lib/bpf/libbpf.c create_attr.btf_key_type_id = map->btf_key_type_id; map 2191 tools/lib/bpf/libbpf.c create_attr.btf_value_type_id = map->btf_value_type_id; map 2200 tools/lib/bpf/libbpf.c map->name, cp, err); map 2204 tools/lib/bpf/libbpf.c map->btf_key_type_id = 0; map 2205 tools/lib/bpf/libbpf.c map->btf_value_type_id = 0; map 2216 tools/lib/bpf/libbpf.c map->name, cp, err); map 2222 tools/lib/bpf/libbpf.c if (bpf_map__is_internal(map)) { map 2223 tools/lib/bpf/libbpf.c err = bpf_object__populate_internal_map(obj, map); map 2230 tools/lib/bpf/libbpf.c pr_debug("created map %s: fd=%d\n", map->name, *pfd); map 3958 tools/lib/bpf/libbpf.c int bpf_map__pin(struct bpf_map *map, const char *path) map 3967 tools/lib/bpf/libbpf.c if (map == NULL) { map 3972 tools/lib/bpf/libbpf.c if (bpf_obj_pin(map->fd, path)) { map 3983 tools/lib/bpf/libbpf.c int bpf_map__unpin(struct bpf_map *map, const char *path) map 3991 tools/lib/bpf/libbpf.c if (map == NULL) { map 4006 tools/lib/bpf/libbpf.c struct bpf_map *map; map 4021 tools/lib/bpf/libbpf.c bpf_object__for_each_map(map, obj) { map 4026 tools/lib/bpf/libbpf.c bpf_map__name(map)); map 4035 tools/lib/bpf/libbpf.c err = bpf_map__pin(map, buf); map 4043 tools/lib/bpf/libbpf.c while ((map = bpf_map__prev(map, obj))) { map 4048 tools/lib/bpf/libbpf.c bpf_map__name(map)); map 4054 tools/lib/bpf/libbpf.c bpf_map__unpin(map, buf); map 4062 tools/lib/bpf/libbpf.c struct bpf_map *map; map 4068 tools/lib/bpf/libbpf.c bpf_object__for_each_map(map, obj) { map 4073 tools/lib/bpf/libbpf.c bpf_map__name(map)); map 4079 tools/lib/bpf/libbpf.c err = bpf_map__unpin(map, buf); map 4650 tools/lib/bpf/libbpf.c int bpf_map__fd(const struct bpf_map *map) map 4652 tools/lib/bpf/libbpf.c return map ? map->fd : -EINVAL; map 4655 tools/lib/bpf/libbpf.c const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) map 4657 tools/lib/bpf/libbpf.c return map ? &map->def : ERR_PTR(-EINVAL); map 4660 tools/lib/bpf/libbpf.c const char *bpf_map__name(const struct bpf_map *map) map 4662 tools/lib/bpf/libbpf.c return map ? map->name : NULL; map 4665 tools/lib/bpf/libbpf.c __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) map 4667 tools/lib/bpf/libbpf.c return map ? map->btf_key_type_id : 0; map 4670 tools/lib/bpf/libbpf.c __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) map 4672 tools/lib/bpf/libbpf.c return map ? map->btf_value_type_id : 0; map 4675 tools/lib/bpf/libbpf.c int bpf_map__set_priv(struct bpf_map *map, void *priv, map 4678 tools/lib/bpf/libbpf.c if (!map) map 4681 tools/lib/bpf/libbpf.c if (map->priv) { map 4682 tools/lib/bpf/libbpf.c if (map->clear_priv) map 4683 tools/lib/bpf/libbpf.c map->clear_priv(map, map->priv); map 4686 tools/lib/bpf/libbpf.c map->priv = priv; map 4687 tools/lib/bpf/libbpf.c map->clear_priv = clear_priv; map 4691 tools/lib/bpf/libbpf.c void *bpf_map__priv(const struct bpf_map *map) map 4693 tools/lib/bpf/libbpf.c return map ? map->priv : ERR_PTR(-EINVAL); map 4696 tools/lib/bpf/libbpf.c bool bpf_map__is_offload_neutral(const struct bpf_map *map) map 4698 tools/lib/bpf/libbpf.c return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; map 4701 tools/lib/bpf/libbpf.c bool bpf_map__is_internal(const struct bpf_map *map) map 4703 tools/lib/bpf/libbpf.c return map->libbpf_type != LIBBPF_MAP_UNSPEC; map 4706 tools/lib/bpf/libbpf.c void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) map 4708 tools/lib/bpf/libbpf.c map->map_ifindex = ifindex; map 4711 tools/lib/bpf/libbpf.c int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) map 4713 tools/lib/bpf/libbpf.c if (!bpf_map_type__is_map_in_map(map->def.type)) { map 4717 tools/lib/bpf/libbpf.c if (map->inner_map_fd != -1) { map 4721 tools/lib/bpf/libbpf.c map->inner_map_fd = fd; map 4820 tools/lib/bpf/libbpf.c struct bpf_map *map; map 4862 tools/lib/bpf/libbpf.c bpf_object__for_each_map(map, obj) { map 4863 tools/lib/bpf/libbpf.c if (!bpf_map__is_offload_neutral(map)) map 4864 tools/lib/bpf/libbpf.c map->map_ifindex = attr->ifindex; map 5467 tools/lib/bpf/libbpf.c struct bpf_map_info map = {}; map 5479 tools/lib/bpf/libbpf.c map_info_len = sizeof(map); map 5480 tools/lib/bpf/libbpf.c err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); map 5488 tools/lib/bpf/libbpf.c if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { map 5490 tools/lib/bpf/libbpf.c map.name); map 5523 tools/lib/bpf/libbpf.c if (map.max_entries < pb->cpu_cnt) map 5524 tools/lib/bpf/libbpf.c pb->cpu_cnt = map.max_entries; map 313 tools/lib/bpf/libbpf.h bpf_map__next(const struct bpf_map *map, const struct bpf_object *obj); map 321 tools/lib/bpf/libbpf.h bpf_map__prev(const struct bpf_map *map, const struct bpf_object *obj); map 323 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__fd(const struct bpf_map *map); map 324 tools/lib/bpf/libbpf.h LIBBPF_API const struct bpf_map_def *bpf_map__def(const struct bpf_map *map); map 325 tools/lib/bpf/libbpf.h LIBBPF_API const char *bpf_map__name(const struct bpf_map *map); map 326 tools/lib/bpf/libbpf.h LIBBPF_API __u32 bpf_map__btf_key_type_id(const struct bpf_map *map); map 327 tools/lib/bpf/libbpf.h LIBBPF_API __u32 bpf_map__btf_value_type_id(const struct bpf_map *map); map 330 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv, map 332 tools/lib/bpf/libbpf.h LIBBPF_API void *bpf_map__priv(const struct bpf_map *map); map 333 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__reuse_fd(struct bpf_map *map, int fd); map 334 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); map 335 tools/lib/bpf/libbpf.h LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); map 336 tools/lib/bpf/libbpf.h LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); map 337 tools/lib/bpf/libbpf.h LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); map 338 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); map 339 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); map 341 tools/lib/bpf/libbpf.h LIBBPF_API int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd); map 211 tools/lib/bpf/xsk.c void *map; map 265 tools/lib/bpf/xsk.c map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), map 268 tools/lib/bpf/xsk.c if (map == MAP_FAILED) { map 276 tools/lib/bpf/xsk.c fill->producer = map + off.fr.producer; map 277 tools/lib/bpf/xsk.c fill->consumer = map + off.fr.consumer; map 278 tools/lib/bpf/xsk.c fill->flags = map + off.fr.flags; map 279 tools/lib/bpf/xsk.c fill->ring = map + off.fr.desc; map 282 tools/lib/bpf/xsk.c map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), map 285 tools/lib/bpf/xsk.c if (map == MAP_FAILED) { map 293 tools/lib/bpf/xsk.c comp->producer = map + off.cr.producer; map 294 tools/lib/bpf/xsk.c comp->consumer = map + off.cr.consumer; map 295 tools/lib/bpf/xsk.c comp->flags = map + off.cr.flags; map 296 tools/lib/bpf/xsk.c comp->ring = map + off.cr.desc; map 302 tools/lib/bpf/xsk.c munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64)); map 502 tools/lib/traceevent/event-parse.c struct func_map map; map 547 tools/lib/traceevent/event-parse.c struct func_map *map; map 552 tools/lib/traceevent/event-parse.c map = &tep->func_resolver->map; map 553 tools/lib/traceevent/event-parse.c map->mod = NULL; map 554 tools/lib/traceevent/event-parse.c map->addr = addr; map 555 tools/lib/traceevent/event-parse.c map->func = tep->func_resolver->func(tep->func_resolver->priv, map 556 tools/lib/traceevent/event-parse.c &map->addr, &map->mod); map 557 tools/lib/traceevent/event-parse.c if (map->func == NULL) map 560 tools/lib/traceevent/event-parse.c return map; map 574 tools/lib/traceevent/event-parse.c struct func_map *map; map 576 tools/lib/traceevent/event-parse.c map = find_func(tep, addr); map 577 tools/lib/traceevent/event-parse.c if (!map) map 580 tools/lib/traceevent/event-parse.c return map->func; map 595 tools/lib/traceevent/event-parse.c struct func_map *map; map 597 tools/lib/traceevent/event-parse.c map = find_func(tep, addr); map 598 tools/lib/traceevent/event-parse.c if (!map) map 601 tools/lib/traceevent/event-parse.c return map->addr; map 17 tools/perf/arch/arm/tests/dwarf-unwind.c struct map *map; map 29 tools/perf/arch/arm/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); map 30 tools/perf/arch/arm/tests/dwarf-unwind.c if (!map) { map 36 tools/perf/arch/arm/tests/dwarf-unwind.c stack_size = map->end - sp; map 17 tools/perf/arch/arm64/tests/dwarf-unwind.c struct map *map; map 29 tools/perf/arch/arm64/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); map 30 tools/perf/arch/arm64/tests/dwarf-unwind.c if (!map) { map 36 tools/perf/arch/arm64/tests/dwarf-unwind.c stack_size = map->end - sp; map 36 tools/perf/arch/arm64/util/header.c sysfs, cpus->map[cpu]); map 18 tools/perf/arch/powerpc/tests/dwarf-unwind.c struct map *map; map 30 tools/perf/arch/powerpc/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); map 31 tools/perf/arch/powerpc/tests/dwarf-unwind.c if (!map) { map 37 tools/perf/arch/powerpc/tests/dwarf-unwind.c stack_size = map->end - sp; map 257 tools/perf/arch/powerpc/util/skip-callchain-idx.c if (al.map) map 258 tools/perf/arch/powerpc/util/skip-callchain-idx.c dso = al.map->dso; map 265 tools/perf/arch/powerpc/util/skip-callchain-idx.c rc = check_return_addr(dso, al.map->start, ip); map 89 tools/perf/arch/powerpc/util/sym-handling.c struct probe_trace_event *tev, struct map *map, map 104 tools/perf/arch/powerpc/util/sym-handling.c if (pev->point.offset || !map || !sym) map 117 tools/perf/arch/powerpc/util/sym-handling.c if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) map 132 tools/perf/arch/powerpc/util/sym-handling.c struct map *map; map 137 tools/perf/arch/powerpc/util/sym-handling.c map = get_target_map(pev->target, pev->nsi, pev->uprobes); map 138 tools/perf/arch/powerpc/util/sym-handling.c if (!map || map__load(map) < 0) map 143 tools/perf/arch/powerpc/util/sym-handling.c map__for_each_symbol(map, sym, tmp) { map 144 tools/perf/arch/powerpc/util/sym-handling.c if (map->unmap_ip(map, sym->start) == tev->point.address) { map 145 tools/perf/arch/powerpc/util/sym-handling.c arch__fix_tev_from_maps(pev, tev, map, sym); map 8 tools/perf/arch/s390/annotate/instructions.c struct map *map = ms->map; map 10 tools/perf/arch/s390/annotate/instructions.c .map = map, map 39 tools/perf/arch/s390/annotate/instructions.c target.addr = map__objdump_2mem(map, ops->target.addr); map 42 tools/perf/arch/s390/annotate/instructions.c map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr) map 18 tools/perf/arch/x86/tests/dwarf-unwind.c struct map *map; map 30 tools/perf/arch/x86/tests/dwarf-unwind.c map = map_groups__find(thread->mg, (u64)sp); map 31 tools/perf/arch/x86/tests/dwarf-unwind.c if (!map) { map 37 tools/perf/arch/x86/tests/dwarf-unwind.c stack_size = map->end - sp; map 20 tools/perf/arch/x86/util/event.c struct map *pos; map 258 tools/perf/bench/epoll-ctl.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 347 tools/perf/bench/epoll-wait.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 172 tools/perf/bench/futex-hash.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 138 tools/perf/bench/futex-lock-pi.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 98 tools/perf/bench/futex-requeue.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 152 tools/perf/bench/futex-wake-parallel.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 104 tools/perf/bench/futex-wake.c CPU_SET(cpu->map[i % cpu->nr], &cpuset); map 201 tools/perf/builtin-annotate.c if (a.map != NULL) map 202 tools/perf/builtin-annotate.c a.map->dso->hit = 1; map 236 tools/perf/builtin-annotate.c &al->map->dso->symbols); map 238 tools/perf/builtin-annotate.c dso__reset_find_symbol_cache(al->map->dso); map 304 tools/perf/builtin-annotate.c return symbol__tty_annotate(he->ms.sym, he->ms.map, evsel, &ann->opts); map 306 tools/perf/builtin-annotate.c return symbol__tty_annotate2(he->ms.sym, he->ms.map, evsel, &ann->opts); map 320 tools/perf/builtin-annotate.c if (he->ms.sym == NULL || he->ms.map->dso->annotate_warned) map 2062 tools/perf/builtin-c2c.c struct perf_cpu_map *map = n[node].map; map 2072 tools/perf/builtin-c2c.c if (perf_cpu_map__empty(map)) map 2075 tools/perf/builtin-c2c.c for (cpu = 0; cpu < map->nr; cpu++) { map 2076 tools/perf/builtin-c2c.c set_bit(map->map[cpu], set); map 2078 tools/perf/builtin-c2c.c if (WARN_ONCE(cpu2node[map->map[cpu]] != -1, "node/cpu topology bug")) map 2081 tools/perf/builtin-c2c.c cpu2node[map->map[cpu]] = node; map 621 tools/perf/builtin-diff.c if (!he->ms.map || !he->ms.sym) map 1352 tools/perf/builtin-diff.c start_line = map__srcline(he->ms.map, bi->sym->start + bi->start, map 1355 tools/perf/builtin-diff.c end_line = map__srcline(he->ms.map, bi->sym->start + bi->end, map 164 tools/perf/builtin-ftrace.c ftrace->evlist->core.threads->map[i]); map 443 tools/perf/builtin-inject.c if (!al.map->dso->hit) { map 444 tools/perf/builtin-inject.c al.map->dso->hit = 1; map 445 tools/perf/builtin-inject.c if (map__load(al.map) >= 0) { map 446 tools/perf/builtin-inject.c dso__inject_build_id(al.map->dso, tool, machine); map 455 tools/perf/builtin-inject.c al.map->dso->long_name); map 30 tools/perf/builtin-kallsyms.c struct map *map; map 31 tools/perf/builtin-kallsyms.c struct symbol *symbol = machine__find_kernel_symbol_by_name(machine, argv[i], &map); map 39 tools/perf/builtin-kallsyms.c symbol->name, map->dso->short_name, map->dso->long_name, map 40 tools/perf/builtin-kallsyms.c map->unmap_ip(map, symbol->start), map->unmap_ip(map, symbol->end), map 336 tools/perf/builtin-kmem.c struct map *kernel_map; map 415 tools/perf/builtin-kmem.c if (node->map) map 416 tools/perf/builtin-kmem.c addr = map__unmap_ip(node->map, node->ip); map 1007 tools/perf/builtin-kmem.c struct map *map; map 1014 tools/perf/builtin-kmem.c sym = machine__find_kernel_symbol(machine, addr, &map); map 1020 tools/perf/builtin-kmem.c addr - map->unmap_ip(map, sym->start)); map 1073 tools/perf/builtin-kmem.c struct map *map; map 1078 tools/perf/builtin-kmem.c sym = machine__find_kernel_symbol(machine, data->callsite, &map); map 1115 tools/perf/builtin-kmem.c struct map *map; map 1120 tools/perf/builtin-kmem.c sym = machine__find_kernel_symbol(machine, data->callsite, &map); map 171 tools/perf/builtin-mem.c if (al.map != NULL) map 172 tools/perf/builtin-mem.c al.map->dso->hit = 1; map 200 tools/perf/builtin-mem.c al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???", map 225 tools/perf/builtin-mem.c al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???", map 123 tools/perf/builtin-record.c static int record__write(struct record *rec, struct mmap *map __maybe_unused, map 259 tools/perf/builtin-record.c static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size) map 279 tools/perf/builtin-record.c perf_mmap__mmap_len(map) - aio->size, map 296 tools/perf/builtin-record.c perf_mmap__get(map); map 304 tools/perf/builtin-record.c static int record__aio_push(struct record *rec, struct mmap *map, off_t *off) map 315 tools/perf/builtin-record.c idx = record__aio_sync(map, false); map 316 tools/perf/builtin-record.c aio.data = map->aio.data[idx]; map 317 tools/perf/builtin-record.c ret = perf_mmap__push(map, &aio, record__aio_pushfn); map 322 tools/perf/builtin-record.c ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off); map 335 tools/perf/builtin-record.c perf_mmap__put(map); map 361 tools/perf/builtin-record.c struct mmap *map = &maps[i]; map 363 tools/perf/builtin-record.c if (map->core.base) map 364 tools/perf/builtin-record.c record__aio_sync(map, true); map 391 tools/perf/builtin-record.c static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused, map 486 tools/perf/builtin-record.c static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size) map 491 tools/perf/builtin-record.c size = zstd_compress(rec->session, map->data, perf_mmap__mmap_len(map), bf, size); map 492 tools/perf/builtin-record.c bf = map->data; map 496 tools/perf/builtin-record.c return record__write(rec, map, bf, size); map 531 tools/perf/builtin-record.c struct mmap *map, map 559 tools/perf/builtin-record.c record__write(rec, map, event, event->header.size); map 560 tools/perf/builtin-record.c record__write(rec, map, data1, len1); map 562 tools/perf/builtin-record.c record__write(rec, map, data2, len2); map 563 tools/perf/builtin-record.c record__write(rec, map, &pad, padding); map 569 tools/perf/builtin-record.c struct mmap *map) map 573 tools/perf/builtin-record.c ret = auxtrace_mmap__read(map, rec->itr, &rec->tool, map 585 tools/perf/builtin-record.c struct mmap *map) map 589 tools/perf/builtin-record.c ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool, map 607 tools/perf/builtin-record.c struct mmap *map = &rec->evlist->mmap[i]; map 609 tools/perf/builtin-record.c if (!map->auxtrace_mmap.base) map 612 tools/perf/builtin-record.c if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) { map 672 tools/perf/builtin-record.c struct mmap *map __maybe_unused) map 905 tools/perf/builtin-record.c static void record__adjust_affinity(struct record *rec, struct mmap *map) map 908 tools/perf/builtin-record.c !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) { map 910 tools/perf/builtin-record.c CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask); map 971 tools/perf/builtin-record.c struct mmap *map = &maps[i]; map 973 tools/perf/builtin-record.c if (map->core.base) { map 974 tools/perf/builtin-record.c record__adjust_affinity(rec, map); map 976 tools/perf/builtin-record.c flush = map->core.flush; map 977 tools/perf/builtin-record.c map->core.flush = 1; map 980 tools/perf/builtin-record.c if (perf_mmap__push(map, rec, record__pushfn) < 0) { map 982 tools/perf/builtin-record.c map->core.flush = flush; map 987 tools/perf/builtin-record.c if (record__aio_push(rec, map, &off) < 0) { map 990 tools/perf/builtin-record.c map->core.flush = flush; map 996 tools/perf/builtin-record.c map->core.flush = flush; map 999 tools/perf/builtin-record.c if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode && map 1000 tools/perf/builtin-record.c record__auxtrace_mmap_read(rec, map) != 0) { map 289 tools/perf/builtin-report.c if (al.map != NULL) map 290 tools/perf/builtin-report.c al.map->dso->hit = 1; map 526 tools/perf/builtin-report.c struct map *kernel_map = machine__kernel_map(&rep->session->machines.host); map 648 tools/perf/builtin-report.c symbol__annotate2(sym, he->ms.map, evsel, map 732 tools/perf/builtin-report.c struct map *map = rb_entry(nd, struct map, rb_node); map 735 tools/perf/builtin-report.c indent, "", map->start, map->end, map 736 tools/perf/builtin-report.c map->prot & PROT_READ ? 'r' : '-', map 737 tools/perf/builtin-report.c map->prot & PROT_WRITE ? 'w' : '-', map 738 tools/perf/builtin-report.c map->prot & PROT_EXEC ? 'x' : '-', map 739 tools/perf/builtin-report.c map->flags & MAP_SHARED ? 's' : 'p', map 740 tools/perf/builtin-report.c map->pgoff, map 741 tools/perf/builtin-report.c map->ino, map->dso->name); map 227 tools/perf/builtin-sched.c struct perf_sched_map map; map 1513 tools/perf/builtin-sched.c if (!sched->map.color_pids || !thread || thread__priv(thread)) map 1516 tools/perf/builtin-sched.c if (thread_map__has(sched->map.color_pids, tid)) map 1543 tools/perf/builtin-sched.c if (sched->map.comp) { map 1544 tools/perf/builtin-sched.c cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS); map 1545 tools/perf/builtin-sched.c if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) { map 1546 tools/perf/builtin-sched.c sched->map.comp_cpus[cpus_nr++] = this_cpu; map 1605 tools/perf/builtin-sched.c int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i; map 1614 tools/perf/builtin-sched.c if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu)) map 1617 tools/perf/builtin-sched.c if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu)) map 1636 tools/perf/builtin-sched.c if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu)) map 1652 tools/perf/builtin-sched.c if (sched->map.comp && new_cpu) map 3179 tools/perf/builtin-sched.c struct perf_cpu_map *map; map 3183 tools/perf/builtin-sched.c if (sched->map.comp) { map 3184 tools/perf/builtin-sched.c sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int)); map 3185 tools/perf/builtin-sched.c if (!sched->map.comp_cpus) map 3189 tools/perf/builtin-sched.c if (!sched->map.cpus_str) map 3192 tools/perf/builtin-sched.c map = perf_cpu_map__new(sched->map.cpus_str); map 3193 tools/perf/builtin-sched.c if (!map) { map 3194 tools/perf/builtin-sched.c pr_err("failed to get cpus map from %s\n", sched->map.cpus_str); map 3198 tools/perf/builtin-sched.c sched->map.cpus = map; map 3204 tools/perf/builtin-sched.c struct perf_thread_map *map; map 3206 tools/perf/builtin-sched.c if (!sched->map.color_pids_str) map 3209 tools/perf/builtin-sched.c map = thread_map__new_by_tid_str(sched->map.color_pids_str); map 3210 tools/perf/builtin-sched.c if (!map) { map 3211 tools/perf/builtin-sched.c pr_err("failed to get thread map from %s\n", sched->map.color_pids_str); map 3215 tools/perf/builtin-sched.c sched->map.color_pids = map; map 3221 tools/perf/builtin-sched.c struct perf_cpu_map *map; map 3223 tools/perf/builtin-sched.c if (!sched->map.color_cpus_str) map 3226 tools/perf/builtin-sched.c map = perf_cpu_map__new(sched->map.color_cpus_str); map 3227 tools/perf/builtin-sched.c if (!map) { map 3228 tools/perf/builtin-sched.c pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str); map 3232 tools/perf/builtin-sched.c sched->map.color_cpus = map; map 3395 tools/perf/builtin-sched.c OPT_BOOLEAN(0, "compact", &sched.map.comp, map 3397 tools/perf/builtin-sched.c OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids", map 3399 tools/perf/builtin-sched.c OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus", map 3401 tools/perf/builtin-sched.c OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus", map 759 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alf.map, fp); map 766 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alt.map, fp); map 805 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alf.map, fp); map 812 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alt.map, fp); map 845 tools/perf/builtin-script.c !alf.map->dso->adjust_symbols) map 846 tools/perf/builtin-script.c from = map__map_ip(alf.map, from); map 849 tools/perf/builtin-script.c !alt.map->dso->adjust_symbols) map 850 tools/perf/builtin-script.c to = map__map_ip(alt.map, to); map 855 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alf.map, fp); map 861 tools/perf/builtin-script.c printed += map__fprintf_dsoname(alt.map, fp); map 912 tools/perf/builtin-script.c if (!thread__find_map(thread, *cpumode, start, &al) || !al.map->dso) { map 916 tools/perf/builtin-script.c if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR) { map 922 tools/perf/builtin-script.c map__load(al.map); map 924 tools/perf/builtin-script.c offset = al.map->map_ip(al.map, start); map 925 tools/perf/builtin-script.c len = dso__data_read_offset(al.map->dso, machine, offset, (u8 *)buffer, map 928 tools/perf/builtin-script.c *is64bit = al.map->dso->is_64_bit; map 942 tools/perf/builtin-script.c if (!al.map) map 944 tools/perf/builtin-script.c ret = map__fprintf_srccode(al.map, al.addr, stdout, map 986 tools/perf/builtin-script.c if (al.map) map 987 tools/perf/builtin-script.c al.sym = map__find_symbol(al.map, al.addr); map 995 tools/perf/builtin-script.c off = al.addr - al.map->start - al.sym->start; map 1001 tools/perf/builtin-script.c printed += map__fprintf_srcline(al.map, al.addr, "\t", fp); map 1170 tools/perf/builtin-script.c printed += map__fprintf_dsoname(al.map, fp); map 1230 tools/perf/builtin-script.c dlen += map__fprintf_dsoname(al->map, fp); map 1348 tools/perf/builtin-script.c printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp); map 1353 tools/perf/builtin-script.c int ret = map__fprintf_srccode(al->map, al->addr, stdout, map 1905 tools/perf/builtin-script.c if (map__fprintf_srccode(al->map, al->addr, stdout, map 1942 tools/perf/builtin-script.c counter->core.cpus->map[cpu], map 414 tools/perf/builtin-stat.c threads->map[i].pid); map 810 tools/perf/builtin-stat.c struct perf_cpu_map *map, int cpu) map 812 tools/perf/builtin-stat.c return cpu_map__get_socket(map, cpu, NULL); map 816 tools/perf/builtin-stat.c struct perf_cpu_map *map, int cpu) map 818 tools/perf/builtin-stat.c return cpu_map__get_die(map, cpu, NULL); map 822 tools/perf/builtin-stat.c struct perf_cpu_map *map, int cpu) map 824 tools/perf/builtin-stat.c return cpu_map__get_core(map, cpu, NULL); map 828 tools/perf/builtin-stat.c aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) map 832 tools/perf/builtin-stat.c if (idx >= map->nr) map 835 tools/perf/builtin-stat.c cpu = map->map[idx]; map 837 tools/perf/builtin-stat.c if (config->cpus_aggr_map->map[cpu] == -1) map 838 tools/perf/builtin-stat.c config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); map 840 tools/perf/builtin-stat.c return config->cpus_aggr_map->map[cpu]; map 844 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 846 tools/perf/builtin-stat.c return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); map 850 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 852 tools/perf/builtin-stat.c return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); map 856 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 858 tools/perf/builtin-stat.c return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); map 934 tools/perf/builtin-stat.c static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) map 938 tools/perf/builtin-stat.c if (idx > map->nr) map 941 tools/perf/builtin-stat.c cpu = map->map[idx]; map 949 tools/perf/builtin-stat.c static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) map 952 tools/perf/builtin-stat.c int cpu = perf_env__get_cpu(env, map, idx); map 957 tools/perf/builtin-stat.c static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) map 960 tools/perf/builtin-stat.c int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); map 981 tools/perf/builtin-stat.c static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) map 984 tools/perf/builtin-stat.c int core = -1, cpu = perf_env__get_cpu(env, map, idx); map 1030 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 1032 tools/perf/builtin-stat.c return perf_env__get_socket(map, idx, &perf_stat.session->header.env); map 1035 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 1037 tools/perf/builtin-stat.c return perf_env__get_die(map, idx, &perf_stat.session->header.env); map 1041 tools/perf/builtin-stat.c struct perf_cpu_map *map, int idx) map 1043 tools/perf/builtin-stat.c return perf_env__get_core(map, idx, &perf_stat.session->header.env); map 112 tools/perf/builtin-top.c struct map *map; map 121 tools/perf/builtin-top.c map = he->ms.map; map 126 tools/perf/builtin-top.c if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && map 127 tools/perf/builtin-top.c !dso__is_kcore(map->dso)) { map 145 tools/perf/builtin-top.c err = symbol__annotate(sym, map, evsel, 0, &top->annotation_opts, NULL); map 150 tools/perf/builtin-top.c symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg)); map 164 tools/perf/builtin-top.c static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip) map 179 tools/perf/builtin-top.c ip, map->dso->long_name, dso__symtab_origin(map->dso), map 180 tools/perf/builtin-top.c map->start, map->end, sym->start, sym->end, map 188 tools/perf/builtin-top.c map->erange_warned = true; map 221 tools/perf/builtin-top.c if (err == -ERANGE && !he->ms.map->erange_warned) map 222 tools/perf/builtin-top.c ui__warn_map_erange(he->ms.map, sym, ip); map 259 tools/perf/builtin-top.c more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel, &top->annotation_opts); map 775 tools/perf/builtin-top.c al.map && map__has_symbols(al.map) ? map 783 tools/perf/builtin-top.c if (al.sym == NULL && al.map != NULL) { map 797 tools/perf/builtin-top.c __map__is_kernel(al.map) && map__has_symbols(al.map)) { map 800 tools/perf/builtin-top.c dso__strerror_load(al.map->dso, serr, sizeof(serr)); map 94 tools/perf/builtin-trace.c struct bpf_map *map; map 107 tools/perf/builtin-trace.c struct bpf_map *map; map 129 tools/perf/builtin-trace.c struct bpf_map *map; map 2438 tools/perf/builtin-trace.c if ((verbose > 0 || print_dso) && al->map) map 2439 tools/perf/builtin-trace.c fprintf(f, "%s@", al->map->dso->long_name); map 2444 tools/perf/builtin-trace.c else if (al->map) map 2499 tools/perf/builtin-trace.c if (!al.map) { map 2502 tools/perf/builtin-trace.c if (al.map) map 2874 tools/perf/builtin-trace.c int fd = bpf_map__fd(trace->syscalls.map); map 2899 tools/perf/builtin-trace.c int fd = bpf_map__fd(trace->syscalls.map); map 3131 tools/perf/builtin-trace.c if (trace->syscalls.map) map 3138 tools/perf/builtin-trace.c static int bpf_map__set_filter_pids(struct bpf_map *map __maybe_unused, map 3144 tools/perf/builtin-trace.c int map_fd = bpf_map__fd(map); map 3179 tools/perf/builtin-trace.c if (!err && trace->filter_pids.map) map 3180 tools/perf/builtin-trace.c err = bpf_map__set_filter_pids(trace->filter_pids.map, nr, pids); map 3197 tools/perf/builtin-trace.c if (!err && trace->filter_pids.map) { map 3198 tools/perf/builtin-trace.c err = bpf_map__set_filter_pids(trace->filter_pids.map, trace->filter_pids.nr, map 3378 tools/perf/builtin-trace.c if (trace->syscalls.map) map 3412 tools/perf/builtin-trace.c if (trace->dump.map) map 3413 tools/perf/builtin-trace.c bpf_map__fprintf(trace->dump.map, trace->output); map 4016 tools/perf/builtin-trace.c trace->filter_pids.map = trace__find_bpf_map_by_name(trace, "pids_filtered"); map 4021 tools/perf/builtin-trace.c trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls"); map 4256 tools/perf/builtin-trace.c trace.dump.map = trace__find_bpf_map_by_name(&trace, map_dump_str); map 4257 tools/perf/builtin-trace.c if (trace.dump.map == NULL) { map 45 tools/perf/include/bpf/bpf.h static int (*bpf_map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags) = (void *)BPF_FUNC_map_update_elem; map 46 tools/perf/include/bpf/bpf.h static void *(*bpf_map_lookup_elem)(struct bpf_map *map, void *key) = (void *)BPF_FUNC_map_lookup_elem; map 48 tools/perf/include/bpf/bpf.h static void (*bpf_tail_call)(void *ctx, void *map, int index) = (void *)BPF_FUNC_tail_call; map 240 tools/perf/jvmti/libjvmti.c jvmtiAddrLocationMap const *map, map 264 tools/perf/jvmti/libjvmti.c if (has_line_numbers && map && map_length) { map 19 tools/perf/lib/cpumap.c cpus->map[0] = -1; map 26 tools/perf/lib/cpumap.c static void cpu_map__delete(struct perf_cpu_map *map) map 28 tools/perf/lib/cpumap.c if (map) { map 29 tools/perf/lib/cpumap.c WARN_ONCE(refcount_read(&map->refcnt) != 0, map 31 tools/perf/lib/cpumap.c free(map); map 35 tools/perf/lib/cpumap.c struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map) map 37 tools/perf/lib/cpumap.c if (map) map 38 tools/perf/lib/cpumap.c refcount_inc(&map->refcnt); map 39 tools/perf/lib/cpumap.c return map; map 42 tools/perf/lib/cpumap.c void perf_cpu_map__put(struct perf_cpu_map *map) map 44 tools/perf/lib/cpumap.c if (map && refcount_dec_and_test(&map->refcnt)) map 45 tools/perf/lib/cpumap.c cpu_map__delete(map); map 62 tools/perf/lib/cpumap.c cpus->map[i] = i; map 78 tools/perf/lib/cpumap.c memcpy(cpus->map, tmp_cpus, payload_size); map 237 tools/perf/lib/cpumap.c return cpus->map[idx]; map 247 tools/perf/lib/cpumap.c bool perf_cpu_map__empty(const struct perf_cpu_map *map) map 249 tools/perf/lib/cpumap.c return map ? map->map[0] == -1 : true; map 257 tools/perf/lib/cpumap.c if (cpus->map[i] == cpu) map 264 tools/perf/lib/cpumap.c int perf_cpu_map__max(struct perf_cpu_map *map) map 268 tools/perf/lib/cpumap.c for (i = 0; i < map->nr; i++) { map 269 tools/perf/lib/cpumap.c if (map->map[i] > max) map 270 tools/perf/lib/cpumap.c max = map->map[i]; map 104 tools/perf/lib/evsel.c threads->map[thread].pid, map 105 tools/perf/lib/evsel.c cpus->map[cpu], -1, 0); map 10 tools/perf/lib/include/internal/cpumap.h int map[]; map 18 tools/perf/lib/include/internal/threadmap.h struct thread_map_data map[]; map 21 tools/perf/lib/include/internal/threadmap.h struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr); map 14 tools/perf/lib/include/perf/cpumap.h LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); map 15 tools/perf/lib/include/perf/cpumap.h LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); map 18 tools/perf/lib/include/perf/cpumap.h LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); map 19 tools/perf/lib/include/perf/cpumap.h LIBPERF_API int perf_cpu_map__max(struct perf_cpu_map *map); map 12 tools/perf/lib/include/perf/threadmap.h LIBPERF_API void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid); map 13 tools/perf/lib/include/perf/threadmap.h LIBPERF_API char *perf_thread_map__comm(struct perf_thread_map *map, int thread); map 15 tools/perf/lib/include/perf/threadmap.h LIBPERF_API pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread); map 17 tools/perf/lib/include/perf/threadmap.h LIBPERF_API struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map); map 18 tools/perf/lib/include/perf/threadmap.h LIBPERF_API void perf_thread_map__put(struct perf_thread_map *map); map 10 tools/perf/lib/threadmap.c static void perf_thread_map__reset(struct perf_thread_map *map, int start, int nr) map 12 tools/perf/lib/threadmap.c size_t size = (nr - start) * sizeof(map->map[0]); map 14 tools/perf/lib/threadmap.c memset(&map->map[start], 0, size); map 15 tools/perf/lib/threadmap.c map->err_thread = -1; map 18 tools/perf/lib/threadmap.c struct perf_thread_map *perf_thread_map__realloc(struct perf_thread_map *map, int nr) map 20 tools/perf/lib/threadmap.c size_t size = sizeof(*map) + sizeof(map->map[0]) * nr; map 21 tools/perf/lib/threadmap.c int start = map ? map->nr : 0; map 23 tools/perf/lib/threadmap.c map = realloc(map, size); map 27 tools/perf/lib/threadmap.c if (map) map 28 tools/perf/lib/threadmap.c perf_thread_map__reset(map, start, nr); map 30 tools/perf/lib/threadmap.c return map; map 35 tools/perf/lib/threadmap.c void perf_thread_map__set_pid(struct perf_thread_map *map, int thread, pid_t pid) map 37 tools/perf/lib/threadmap.c map->map[thread].pid = pid; map 40 tools/perf/lib/threadmap.c char *perf_thread_map__comm(struct perf_thread_map *map, int thread) map 42 tools/perf/lib/threadmap.c return map->map[thread].comm; map 70 tools/perf/lib/threadmap.c struct perf_thread_map *perf_thread_map__get(struct perf_thread_map *map) map 72 tools/perf/lib/threadmap.c if (map) map 73 tools/perf/lib/threadmap.c refcount_inc(&map->refcnt); map 74 tools/perf/lib/threadmap.c return map; map 77 tools/perf/lib/threadmap.c void perf_thread_map__put(struct perf_thread_map *map) map 79 tools/perf/lib/threadmap.c if (map && refcount_dec_and_test(&map->refcnt)) map 80 tools/perf/lib/threadmap.c perf_thread_map__delete(map); map 88 tools/perf/lib/threadmap.c pid_t perf_thread_map__pid(struct perf_thread_map *map, int thread) map 90 tools/perf/lib/threadmap.c return map->map[thread].pid; map 79 tools/perf/pmu-events/jevents.c static void addfield(char *map, char **dst, const char *sep, map 100 tools/perf/pmu-events/jevents.c strncat(*dst, map + bt->start, blen); map 184 tools/perf/pmu-events/jevents.c static void cut_comma(char *map, jsmntok_t *newval) map 190 tools/perf/pmu-events/jevents.c if (map[i] == ',') map 195 tools/perf/pmu-events/jevents.c static int match_field(char *map, jsmntok_t *field, int nz, map 202 tools/perf/pmu-events/jevents.c if (json_streq(map, field, f->field) && nz) { map 203 tools/perf/pmu-events/jevents.c cut_comma(map, &newval); map 204 tools/perf/pmu-events/jevents.c addfield(map, event, ",", f->kernel, &newval); map 210 tools/perf/pmu-events/jevents.c static struct msrmap *lookup_msr(char *map, jsmntok_t *val) map 216 tools/perf/pmu-events/jevents.c cut_comma(map, &newval); map 218 tools/perf/pmu-events/jevents.c if (json_streq(map, &newval, msrmap[i].num)) map 223 tools/perf/pmu-events/jevents.c json_len(val), map + val->start); map 246 tools/perf/pmu-events/jevents.c static const char *field_to_perf(struct map *table, char *map, jsmntok_t *val) map 251 tools/perf/pmu-events/jevents.c if (json_streq(map, val, table[i].json)) map 262 tools/perf/pmu-events/jevents.c json_line(map, loc), \ map 517 tools/perf/pmu-events/jevents.c char *map; map 523 tools/perf/pmu-events/jevents.c tokens = parse_json(fn, &map, &size, &len); map 559 tools/perf/pmu-events/jevents.c nz = !json_streq(map, val, "0"); map 560 tools/perf/pmu-events/jevents.c if (match_field(map, field, nz, &event, val)) { map 562 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "EventCode")) { map 564 tools/perf/pmu-events/jevents.c addfield(map, &code, "", "", val); map 567 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "ExtSel")) { map 569 tools/perf/pmu-events/jevents.c addfield(map, &code, "", "", val); map 572 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "EventName")) { map 573 tools/perf/pmu-events/jevents.c addfield(map, &name, "", "", val); map 574 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "BriefDescription")) { map 575 tools/perf/pmu-events/jevents.c addfield(map, &desc, "", "", val); map 577 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, map 579 tools/perf/pmu-events/jevents.c addfield(map, &long_desc, "", "", val); map 581 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "PEBS") && nz) { map 583 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "MSRIndex") && nz) { map 584 tools/perf/pmu-events/jevents.c msr = lookup_msr(map, val); map 585 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "MSRValue")) { map 587 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "Errata") && map 588 tools/perf/pmu-events/jevents.c !json_streq(map, val, "null")) { map 589 tools/perf/pmu-events/jevents.c addfield(map, &extra_desc, ". ", map 591 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "Data_LA") && nz) { map 592 tools/perf/pmu-events/jevents.c addfield(map, &extra_desc, ". ", map 595 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "Unit")) { map 598 tools/perf/pmu-events/jevents.c ppmu = field_to_perf(unit_to_pmu, map, val); map 604 tools/perf/pmu-events/jevents.c addfield(map, &pmu, "", "", val); map 608 tools/perf/pmu-events/jevents.c addfield(map, &desc, ". ", "Unit: ", NULL); map 609 tools/perf/pmu-events/jevents.c addfield(map, &desc, "", pmu, NULL); map 610 tools/perf/pmu-events/jevents.c addfield(map, &desc, "", " ", NULL); map 611 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "Filter")) { map 612 tools/perf/pmu-events/jevents.c addfield(map, &filter, "", "", val); map 613 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "ScaleUnit")) { map 614 tools/perf/pmu-events/jevents.c addfield(map, &unit, "", "", val); map 615 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "PerPkg")) { map 616 tools/perf/pmu-events/jevents.c addfield(map, &perpkg, "", "", val); map 617 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "MetricName")) { map 618 tools/perf/pmu-events/jevents.c addfield(map, &metric_name, "", "", val); map 619 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "MetricGroup")) { map 620 tools/perf/pmu-events/jevents.c addfield(map, &metric_group, "", "", val); map 621 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "MetricExpr")) { map 622 tools/perf/pmu-events/jevents.c addfield(map, &metric_expr, "", "", val); map 625 tools/perf/pmu-events/jevents.c } else if (json_streq(map, field, "ArchStdEvent")) { map 626 tools/perf/pmu-events/jevents.c addfield(map, &arch_std, "", "", val); map 633 tools/perf/pmu-events/jevents.c if (json_streq(map, precise, "2")) map 634 tools/perf/pmu-events/jevents.c addfield(map, &extra_desc, " ", map 637 tools/perf/pmu-events/jevents.c addfield(map, &extra_desc, " ", map 641 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", buf, NULL); map 643 tools/perf/pmu-events/jevents.c addfield(map, &desc, " ", extra_desc, NULL); map 645 tools/perf/pmu-events/jevents.c addfield(map, &long_desc, " ", extra_desc, NULL); map 647 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", filter, NULL); map 649 tools/perf/pmu-events/jevents.c addfield(map, &event, ",", msr->pname, msrval); map 689 tools/perf/pmu-events/jevents.c free_json(map, size, tokens); map 48 tools/perf/pmu-events/json.c char *map = NULL; map 63 tools/perf/pmu-events/json.c map = mmap(NULL, map 66 tools/perf/pmu-events/json.c if (map == MAP_FAILED) map 67 tools/perf/pmu-events/json.c map = NULL; map 70 tools/perf/pmu-events/json.c return map; map 73 tools/perf/pmu-events/json.c static void unmapfile(char *map, size_t size) map 76 tools/perf/pmu-events/json.c munmap(map, roundup(size, ps)); map 83 tools/perf/pmu-events/json.c jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len) map 90 tools/perf/pmu-events/json.c *map = mapfile(fn, size); map 91 tools/perf/pmu-events/json.c if (!*map) map 99 tools/perf/pmu-events/json.c res = jsmn_parse(&parser, *map, *size, tokens, map 111 tools/perf/pmu-events/json.c unmapfile(*map, *size); map 115 tools/perf/pmu-events/json.c void free_json(char *map, size_t size, jsmntok_t *tokens) map 118 tools/perf/pmu-events/json.c unmapfile(map, size); map 121 tools/perf/pmu-events/json.c static int countchar(char *map, char c, int end) map 126 tools/perf/pmu-events/json.c if (map[i] == c) map 132 tools/perf/pmu-events/json.c int json_line(char *map, jsmntok_t *t) map 134 tools/perf/pmu-events/json.c return countchar(map, '\n', t->start) + 1; map 158 tools/perf/pmu-events/json.c int json_streq(char *map, jsmntok_t *t, const char *s) map 161 tools/perf/pmu-events/json.c return len == strlen(s) && !strncasecmp(map + t->start, s, len); map 7 tools/perf/pmu-events/json.h jsmntok_t *parse_json(const char *fn, char **map, size_t *size, int *len); map 8 tools/perf/pmu-events/json.h void free_json(char *map, size_t size, jsmntok_t *tokens); map 9 tools/perf/pmu-events/json.h int json_line(char *map, jsmntok_t *t); map 11 tools/perf/pmu-events/json.h int json_streq(char *map, jsmntok_t *t, const char *s); map 37 tools/perf/tests/backward-ring-buffer.c struct mmap *map = &evlist->overwrite_mmap[i]; map 40 tools/perf/tests/backward-ring-buffer.c perf_mmap__read_init(map); map 41 tools/perf/tests/backward-ring-buffer.c while ((event = perf_mmap__read_event(map)) != NULL) { map 56 tools/perf/tests/backward-ring-buffer.c perf_mmap__read_done(map); map 13 tools/perf/tests/bitmap.c struct perf_cpu_map *map = perf_cpu_map__new(str); map 19 tools/perf/tests/bitmap.c if (map && bm) { map 20 tools/perf/tests/bitmap.c for (i = 0; i < map->nr; i++) map 21 tools/perf/tests/bitmap.c set_bit(map->map[i], bm); map 24 tools/perf/tests/bitmap.c if (map) map 25 tools/perf/tests/bitmap.c perf_cpu_map__put(map); map 15 tools/perf/tests/bpf-script-example.c static void *(*bpf_map_lookup_elem)(void *map, void *key) = map 17 tools/perf/tests/bpf-script-example.c static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) = map 15 tools/perf/tests/bpf-script-test-relocation.c static void *(*bpf_map_lookup_elem)(void *map, void *key) = map 17 tools/perf/tests/bpf-script-test-relocation.c static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) = map 250 tools/perf/tests/code-reading.c if (!thread__find_map(thread, cpumode, addr, &al) || !al.map->dso) { map 260 tools/perf/tests/code-reading.c pr_debug("File is: %s\n", al.map->dso->long_name); map 262 tools/perf/tests/code-reading.c if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && map 263 tools/perf/tests/code-reading.c !dso__is_kcore(al.map->dso)) { map 274 tools/perf/tests/code-reading.c if (addr + len > al.map->end) map 275 tools/perf/tests/code-reading.c len = al.map->end - addr; map 278 tools/perf/tests/code-reading.c ret_len = dso__data_read_offset(al.map->dso, thread->mg->machine, map 289 tools/perf/tests/code-reading.c if (map__load(al.map)) map 293 tools/perf/tests/code-reading.c if (dso__is_kcore(al.map->dso)) { map 297 tools/perf/tests/code-reading.c if (state->done[d] == al.map->start) { map 307 tools/perf/tests/code-reading.c state->done[state->done_cnt++] = al.map->start; map 310 tools/perf/tests/code-reading.c objdump_name = al.map->dso->long_name; map 311 tools/perf/tests/code-reading.c if (dso__needs_decompress(al.map->dso)) { map 312 tools/perf/tests/code-reading.c if (dso__decompress_kmodule_path(al.map->dso, objdump_name, map 324 tools/perf/tests/code-reading.c objdump_addr = map__rip_2objdump(al.map, al.addr); map 340 tools/perf/tests/code-reading.c } else if (dso__is_kcore(al.map->dso)) { map 573 tools/perf/tests/code-reading.c struct map *map; map 592 tools/perf/tests/code-reading.c map = machine__kernel_map(machine); map 593 tools/perf/tests/code-reading.c ret = map__load(map); map 598 tools/perf/tests/code-reading.c have_vmlinux = dso__is_vmlinux(map->dso); map 599 tools/perf/tests/code-reading.c have_kcore = dso__is_kcore(map->dso); map 22 tools/perf/tests/cpumap.c struct perf_cpu_map *map; map 37 tools/perf/tests/cpumap.c map = cpu_map__new_data(data); map 38 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong nr", map->nr == 20); map 41 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong cpu", map->map[i] == i); map 44 tools/perf/tests/cpumap.c perf_cpu_map__put(map); map 56 tools/perf/tests/cpumap.c struct perf_cpu_map *map; map 68 tools/perf/tests/cpumap.c map = cpu_map__new_data(data); map 69 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong nr", map->nr == 2); map 70 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong cpu", map->map[0] == 1); map 71 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong cpu", map->map[1] == 256); map 72 tools/perf/tests/cpumap.c TEST_ASSERT_VAL("wrong refcnt", refcount_read(&map->refcnt) == 1); map 73 tools/perf/tests/cpumap.c perf_cpu_map__put(map); map 102 tools/perf/tests/cpumap.c struct perf_cpu_map *map = perf_cpu_map__new(str); map 105 tools/perf/tests/cpumap.c if (!map) map 108 tools/perf/tests/cpumap.c cpu_map__snprint(map, buf, sizeof(buf)); map 70 tools/perf/tests/event_update.c struct perf_cpu_map *map; map 74 tools/perf/tests/event_update.c map = cpu_map__new_data(&ev_data->cpus); map 78 tools/perf/tests/event_update.c TEST_ASSERT_VAL("wrong cpus", map->nr == 3); map 79 tools/perf/tests/event_update.c TEST_ASSERT_VAL("wrong cpus", map->map[0] == 1); map 80 tools/perf/tests/event_update.c TEST_ASSERT_VAL("wrong cpus", map->map[1] == 2); map 81 tools/perf/tests/event_update.c TEST_ASSERT_VAL("wrong cpus", map->map[2] == 3); map 82 tools/perf/tests/event_update.c perf_cpu_map__put(map); map 184 tools/perf/tests/hists_common.c he->ms.map->dso->short_name, map 211 tools/perf/tests/hists_common.c he->ms.map->dso->short_name, map 21 tools/perf/tests/hists_cumulate.c struct map *map; map 115 tools/perf/tests/hists_cumulate.c fake_samples[i].map = al.map; map 153 tools/perf/tests/hists_cumulate.c #define DSO(he) (he->ms.map->dso->short_name) map 158 tools/perf/tests/hists_cumulate.c #define CDSO(cl) (cl->ms.map->dso->short_name) map 19 tools/perf/tests/hists_filter.c struct map *map; map 92 tools/perf/tests/hists_filter.c fake_samples[i].map = al.map; map 197 tools/perf/tests/hists_filter.c hists->dso_filter = fake_samples[0].map->dso; map 291 tools/perf/tests/hists_filter.c hists->dso_filter = fake_samples[1].map->dso; map 19 tools/perf/tests/hists_link.c struct map *map; map 97 tools/perf/tests/hists_link.c fake_common_samples[k].map = al.map; map 116 tools/perf/tests/hists_link.c fake_samples[i][k].map = al.map; map 130 tools/perf/tests/hists_link.c struct thread *t, struct map *m, struct symbol *s) map 133 tools/perf/tests/hists_link.c if (samples->thread == t && samples->map == m && map 164 tools/perf/tests/hists_link.c he->thread, he->ms.map, he->ms.sym)) { map 216 tools/perf/tests/hists_link.c he->thread, he->ms.map, he->ms.sym) && map 219 tools/perf/tests/hists_link.c he->thread, he->ms.map, he->ms.sym)) { map 22 tools/perf/tests/hists_output.c struct map *map; map 81 tools/perf/tests/hists_output.c fake_samples[i].map = al.map; map 119 tools/perf/tests/hists_output.c #define DSO(he) (he->ms.map->dso->short_name) map 18 tools/perf/tests/map_groups.c struct map *map; map 21 tools/perf/tests/map_groups.c map = map_groups__first(mg); map 22 tools/perf/tests/map_groups.c while (map) { map 23 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("wrong map start", map->start == merged[i].start); map 24 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("wrong map end", map->end == merged[i].end); map 25 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("wrong map name", !strcmp(map->dso->name, merged[i].name)); map 26 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("wrong map refcnt", refcount_read(&map->refcnt) == 2); map 29 tools/perf/tests/map_groups.c map = map_groups__next(map); map 31 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("less maps expected", (map && i < size) || (!map && i == size)); map 65 tools/perf/tests/map_groups.c struct map *map_kcore1, *map_kcore2, *map_kcore3; map 71 tools/perf/tests/map_groups.c struct map *map; map 73 tools/perf/tests/map_groups.c map = dso__new_map(bpf_progs[i].name); map 74 tools/perf/tests/map_groups.c TEST_ASSERT_VAL("failed to create map", map); map 76 tools/perf/tests/map_groups.c map->start = bpf_progs[i].start; map 77 tools/perf/tests/map_groups.c map->end = bpf_progs[i].end; map 78 tools/perf/tests/map_groups.c map_groups__insert(&mg, map); map 79 tools/perf/tests/map_groups.c map__put(map); map 15 tools/perf/tests/mem2node.c const char *map; map 17 tools/perf/tests/mem2node.c { .node = 0, .map = "0" }, map 18 tools/perf/tests/mem2node.c { .node = 1, .map = "1-2" }, map 19 tools/perf/tests/mem2node.c { .node = 3, .map = "5-7,9" }, map 26 tools/perf/tests/mem2node.c struct perf_cpu_map *map = perf_cpu_map__new(str); map 32 tools/perf/tests/mem2node.c if (map && bm) { map 33 tools/perf/tests/mem2node.c for (i = 0; i < map->nr; i++) { map 34 tools/perf/tests/mem2node.c set_bit(map->map[i], bm); map 38 tools/perf/tests/mem2node.c if (map) map 39 tools/perf/tests/mem2node.c perf_cpu_map__put(map); map 43 tools/perf/tests/mem2node.c return bm && map ? bm : NULL; map 48 tools/perf/tests/mem2node.c struct mem2node map; map 62 tools/perf/tests/mem2node.c (nodes[i].set = get_bitmap(test_nodes[i].map, 10))); map 65 tools/perf/tests/mem2node.c T("failed: mem2node__init", !mem2node__init(&map, &env)); map 66 tools/perf/tests/mem2node.c T("failed: mem2node__node", 0 == mem2node__node(&map, 0x50)); map 67 tools/perf/tests/mem2node.c T("failed: mem2node__node", 1 == mem2node__node(&map, 0x100)); map 68 tools/perf/tests/mem2node.c T("failed: mem2node__node", 1 == mem2node__node(&map, 0x250)); map 69 tools/perf/tests/mem2node.c T("failed: mem2node__node", 3 == mem2node__node(&map, 0x500)); map 70 tools/perf/tests/mem2node.c T("failed: mem2node__node", 3 == mem2node__node(&map, 0x650)); map 71 tools/perf/tests/mem2node.c T("failed: mem2node__node", -1 == mem2node__node(&map, 0x450)); map 72 tools/perf/tests/mem2node.c T("failed: mem2node__node", -1 == mem2node__node(&map, 0x1050)); map 77 tools/perf/tests/mem2node.c mem2node__exit(&map); map 61 tools/perf/tests/mmap-basic.c CPU_SET(cpus->map[0], &cpu_set); map 65 tools/perf/tests/mmap-basic.c cpus->map[0], str_error_r(errno, sbuf, sizeof(sbuf))); map 28 tools/perf/tests/mmap-thread-lookup.c void *map; map 36 tools/perf/tests/mmap-thread-lookup.c void *map; map 38 tools/perf/tests/mmap-thread-lookup.c map = mmap(NULL, page_size, map 42 tools/perf/tests/mmap-thread-lookup.c if (map == MAP_FAILED) { map 47 tools/perf/tests/mmap-thread-lookup.c td->map = map; map 50 tools/perf/tests/mmap-thread-lookup.c pr_debug("tid = %d, map = %p\n", td->tid, map); map 75 tools/perf/tests/mmap-thread-lookup.c munmap(td->map, page_size); map 122 tools/perf/tests/mmap-thread-lookup.c munmap(td0->map, page_size); map 143 tools/perf/tests/mmap-thread-lookup.c struct perf_thread_map *map; map 146 tools/perf/tests/mmap-thread-lookup.c map = thread_map__new_by_pid(getpid()); map 148 tools/perf/tests/mmap-thread-lookup.c err = perf_event__synthesize_thread_map(NULL, map, map 152 tools/perf/tests/mmap-thread-lookup.c perf_thread_map__put(map); map 192 tools/perf/tests/mmap-thread-lookup.c pr_debug("looking for map %p\n", td->map); map 195 tools/perf/tests/mmap-thread-lookup.c (unsigned long) (td->map + 1), &al); map 199 tools/perf/tests/mmap-thread-lookup.c if (!al.map) { map 205 tools/perf/tests/mmap-thread-lookup.c pr_debug("map %p, addr %" PRIx64 "\n", al.map, al.map->start); map 69 tools/perf/tests/openat-syscall-all-cpus.c if (cpus->map[cpu] >= CPU_SETSIZE) { map 70 tools/perf/tests/openat-syscall-all-cpus.c pr_debug("Ignoring CPU %d\n", cpus->map[cpu]); map 74 tools/perf/tests/openat-syscall-all-cpus.c CPU_SET(cpus->map[cpu], &cpu_set); map 77 tools/perf/tests/openat-syscall-all-cpus.c cpus->map[cpu], map 85 tools/perf/tests/openat-syscall-all-cpus.c CPU_CLR(cpus->map[cpu], &cpu_set); map 103 tools/perf/tests/openat-syscall-all-cpus.c if (cpus->map[cpu] >= CPU_SETSIZE) map 115 tools/perf/tests/openat-syscall-all-cpus.c expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val); map 24 tools/perf/tests/thread-map.c struct perf_thread_map *map; map 30 tools/perf/tests/thread-map.c map = thread_map__new_by_pid(getpid()); map 31 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("failed to alloc map", map); map 33 tools/perf/tests/thread-map.c thread_map__read_comms(map); map 35 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong nr", map->nr == 1); map 37 tools/perf/tests/thread-map.c perf_thread_map__pid(map, 0) == getpid()); map 39 tools/perf/tests/thread-map.c perf_thread_map__comm(map, 0) && map 40 tools/perf/tests/thread-map.c !strcmp(perf_thread_map__comm(map, 0), NAME)); map 42 tools/perf/tests/thread-map.c refcount_read(&map->refcnt) == 1); map 43 tools/perf/tests/thread-map.c perf_thread_map__put(map); map 46 tools/perf/tests/thread-map.c map = perf_thread_map__new_dummy(); map 47 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("failed to alloc map", map); map 49 tools/perf/tests/thread-map.c thread_map__read_comms(map); map 51 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong nr", map->nr == 1); map 52 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong pid", perf_thread_map__pid(map, 0) == -1); map 54 tools/perf/tests/thread-map.c perf_thread_map__comm(map, 0) && map 55 tools/perf/tests/thread-map.c !strcmp(perf_thread_map__comm(map, 0), "dummy")); map 57 tools/perf/tests/thread-map.c refcount_read(&map->refcnt) == 1); map 58 tools/perf/tests/thread-map.c perf_thread_map__put(map); map 67 tools/perf/tests/thread-map.c struct perf_record_thread_map *map = &event->thread_map; map 70 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong nr", map->nr == 1); map 71 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong pid", map->entries[0].pid == (u64) getpid()); map 72 tools/perf/tests/thread-map.c TEST_ASSERT_VAL("wrong comm", !strcmp(map->entries[0].comm, NAME)); map 145 tools/perf/tests/thread-map.c zfree(&threads->map[i].comm); map 62 tools/perf/tests/topology.c static int check_cpu_topology(char *path, struct perf_cpu_map *map) map 98 tools/perf/tests/topology.c if (!cpu_map__has(map, i)) map 105 tools/perf/tests/topology.c for (i = 0; i < map->nr; i++) { map 107 tools/perf/tests/topology.c (session->header.env.cpu[map->map[i]].core_id == (cpu_map__get_core(map, i, NULL) & 0xffff))); map 110 tools/perf/tests/topology.c (session->header.env.cpu[map->map[i]].socket_id == cpu_map__get_socket(map, i, NULL))); map 121 tools/perf/tests/topology.c struct perf_cpu_map *map; map 131 tools/perf/tests/topology.c map = perf_cpu_map__new(NULL); map 132 tools/perf/tests/topology.c if (map == NULL) { map 137 tools/perf/tests/topology.c ret = check_cpu_topology(path, map); map 138 tools/perf/tests/topology.c perf_cpu_map__put(map); map 22 tools/perf/tests/vmlinux-kallsyms.c struct map *kallsyms_map, *vmlinux_map, *map; map 185 tools/perf/tests/vmlinux-kallsyms.c for (map = maps__first(maps); map; map = map__next(map)) { map 186 tools/perf/tests/vmlinux-kallsyms.c struct map * map 194 tools/perf/tests/vmlinux-kallsyms.c (map->dso->kernel ? map 195 tools/perf/tests/vmlinux-kallsyms.c map->dso->short_name : map 196 tools/perf/tests/vmlinux-kallsyms.c map->dso->name)); map 204 tools/perf/tests/vmlinux-kallsyms.c map__fprintf(map, stderr); map 210 tools/perf/tests/vmlinux-kallsyms.c for (map = maps__first(maps); map; map = map__next(map)) { map 211 tools/perf/tests/vmlinux-kallsyms.c struct map *pair; map 213 tools/perf/tests/vmlinux-kallsyms.c mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start); map 214 tools/perf/tests/vmlinux-kallsyms.c mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end); map 227 tools/perf/tests/vmlinux-kallsyms.c map->start, map->end, map->pgoff, map->dso->name); map 240 tools/perf/tests/vmlinux-kallsyms.c for (map = maps__first(maps); map; map = map__next(map)) { map 241 tools/perf/tests/vmlinux-kallsyms.c if (!map->priv) { map 246 tools/perf/tests/vmlinux-kallsyms.c map__fprintf(map, stderr); map 393 tools/perf/ui/browsers/annotate.c static int sym_title(struct symbol *sym, struct map *map, char *title, map 396 tools/perf/ui/browsers/annotate.c return snprintf(title, sz, "%s %s [Percent: %s]", sym->name, map->dso->long_name, map 434 tools/perf/ui/browsers/annotate.c symbol__tui_annotate(dl->ops.target.sym, ms->map, evsel, hbt, browser->opts); map 435 tools/perf/ui/browsers/annotate.c sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type); map 618 tools/perf/ui/browsers/annotate.c sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type); map 877 tools/perf/ui/browsers/annotate.c return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt, opts); map 891 tools/perf/ui/browsers/annotate.c int symbol__tui_annotate(struct symbol *sym, struct map *map, map 898 tools/perf/ui/browsers/annotate.c .map = map, map 918 tools/perf/ui/browsers/annotate.c if (map->dso->annotate_warned) map 921 tools/perf/ui/browsers/annotate.c err = symbol__annotate2(sym, map, evsel, opts, &browser.arch); map 924 tools/perf/ui/browsers/annotate.c symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg)); map 2403 tools/perf/ui/browsers/hists.c struct map *map, struct symbol *sym) map 2405 tools/perf/ui/browsers/hists.c if (sym == NULL || map->dso->annotate_warned) map 2411 tools/perf/ui/browsers/hists.c act->ms.map = map; map 2482 tools/perf/ui/browsers/hists.c struct map *map = act->ms.map; map 2484 tools/perf/ui/browsers/hists.c if (!hists__has(browser->hists, dso) || map == NULL) map 2494 tools/perf/ui/browsers/hists.c __map__is_kernel(map) ? "the Kernel" : map->dso->short_name); map 2495 tools/perf/ui/browsers/hists.c browser->hists->dso_filter = map->dso; map 2507 tools/perf/ui/browsers/hists.c char **optstr, struct map *map) map 2509 tools/perf/ui/browsers/hists.c if (!hists__has(browser->hists, dso) || map == NULL) map 2514 tools/perf/ui/browsers/hists.c __map__is_kernel(map) ? "the Kernel" : map->dso->short_name) < 0) map 2517 tools/perf/ui/browsers/hists.c act->ms.map = map; map 2526 tools/perf/ui/browsers/hists.c map__browse(act->ms.map); map 2532 tools/perf/ui/browsers/hists.c struct popup_action *act, char **optstr, struct map *map) map 2534 tools/perf/ui/browsers/hists.c if (!hists__has(browser->hists, dso) || map == NULL) map 2540 tools/perf/ui/browsers/hists.c act->ms.map = map; map 2908 tools/perf/ui/browsers/hists.c struct map *map = NULL; map 2919 tools/perf/ui/browsers/hists.c map = browser->selection->map; map 2942 tools/perf/ui/browsers/hists.c browser->selection->map->dso->annotate_warned) map 2945 tools/perf/ui/browsers/hists.c actions->ms.map = browser->selection->map; map 2953 tools/perf/ui/browsers/hists.c actions->ms.map = map; map 3057 tools/perf/ui/browsers/hists.c actions->ms.map = map; map 3114 tools/perf/ui/browsers/hists.c bi->from.map, map 3120 tools/perf/ui/browsers/hists.c bi->to.map, map 3126 tools/perf/ui/browsers/hists.c browser->selection->map, map 3133 tools/perf/ui/browsers/hists.c &options[nr_options], map); map 3137 tools/perf/ui/browsers/hists.c browser->selection->map : NULL); map 21 tools/perf/ui/browsers/map.c struct map *map; map 60 tools/perf/ui/browsers/map.c sym = map__find_symbol(browser->map, addr); map 62 tools/perf/ui/browsers/map.c sym = map__find_symbol_by_name(browser->map, target); map 79 tools/perf/ui/browsers/map.c if (ui_browser__show(&browser->b, browser->map->dso->long_name, map 105 tools/perf/ui/browsers/map.c int map__browse(struct map *map) map 109 tools/perf/ui/browsers/map.c .entries = &map->dso->symbols, map 114 tools/perf/ui/browsers/map.c .map = map, map 4 tools/perf/ui/browsers/map.h struct map; map 6 tools/perf/ui/browsers/map.h int map__browse(struct map *map); map 58 tools/perf/ui/gtk/annotate.c struct map *map, struct disasm_line *dl) map 60 tools/perf/ui/gtk/annotate.c u64 start = map__rip_2objdump(map, sym->start); map 95 tools/perf/ui/gtk/annotate.c struct map *map, struct evsel *evsel, map 147 tools/perf/ui/gtk/annotate.c if (perf_gtk__get_offset(s, sizeof(s), sym, map, pos)) map 163 tools/perf/ui/gtk/annotate.c static int symbol__gtk_annotate(struct symbol *sym, struct map *map, map 173 tools/perf/ui/gtk/annotate.c if (map->dso->annotate_warned) map 176 tools/perf/ui/gtk/annotate.c err = symbol__annotate(sym, map, evsel, 0, &annotation__default_options, NULL); map 179 tools/perf/ui/gtk/annotate.c symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg)); map 237 tools/perf/ui/gtk/annotate.c perf_gtk__annotate_symbol(scrolled_window, sym, map, evsel, hbt); map 245 tools/perf/ui/gtk/annotate.c return symbol__gtk_annotate(he->ms.sym, he->ms.map, evsel, hbt); map 860 tools/perf/ui/stdio/hist.c if (h->ms.map == NULL && verbose > 1) { map 243 tools/perf/util/annotate.c struct map *map = ms->map; map 245 tools/perf/util/annotate.c .map = map, map 271 tools/perf/util/annotate.c target.addr = map__objdump_2mem(map, ops->target.addr); map 274 tools/perf/util/annotate.c map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr) map 331 tools/perf/util/annotate.c struct map *map = ms->map; map 334 tools/perf/util/annotate.c .map = map, map 369 tools/perf/util/annotate.c target.addr = map__objdump_2mem(map, ops->target.addr); map 370 tools/perf/util/annotate.c start = map->unmap_ip(map, sym->start), map 371 tools/perf/util/annotate.c end = map->unmap_ip(map, sym->end); map 394 tools/perf/util/annotate.c map__rip_2objdump(target.map, map->map_ip(target.map, target.addr)) == ops->target.addr) map 863 tools/perf/util/annotate.c static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map, map 870 tools/perf/util/annotate.c pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); map 937 tools/perf/util/annotate.c static int symbol__inc_addr_samples(struct symbol *sym, struct map *map, map 946 tools/perf/util/annotate.c return (src) ? __symbol__inc_addr_samples(sym, map, src, evsel->idx, map 997 tools/perf/util/annotate.c start->addr == ams->sym->start + ams->map->start))) map 1003 tools/perf/util/annotate.c ams->sym ? ams->sym->start + ams->map->start : 0, map 1091 tools/perf/util/annotate.c return symbol__inc_addr_samples(ams->sym, ams->map, evsel, ams->al_addr, sample); map 1097 tools/perf/util/annotate.c return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evsel, ip, sample); map 1492 tools/perf/util/annotate.c struct map *map = args->ms.map; map 1526 tools/perf/util/annotate.c u64 start = map__rip_2objdump(map, sym->start), map 1527 tools/perf/util/annotate.c end = map__rip_2objdump(map, sym->end); map 1550 tools/perf/util/annotate.c map__rip_2objdump(map, sym->start); map 1557 tools/perf/util/annotate.c .map = map, map 1600 tools/perf/util/annotate.c int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *map, map 1603 tools/perf/util/annotate.c struct dso *dso = map->dso; map 1721 tools/perf/util/annotate.c struct map *map = args->ms.map; map 1723 tools/perf/util/annotate.c struct dso *dso = map->dso; map 1863 tools/perf/util/annotate.c struct map *map = args->ms.map; map 1864 tools/perf/util/annotate.c struct dso *dso = map->dso; map 1881 tools/perf/util/annotate.c symfs_filename, sym->name, map->unmap_ip(map, sym->start), map 1882 tools/perf/util/annotate.c map->unmap_ip(map, sym->end)); map 1891 tools/perf/util/annotate.c kce.addr = map__rip_2objdump(map, sym->start); map 1917 tools/perf/util/annotate.c map__rip_2objdump(map, sym->start), map 1918 tools/perf/util/annotate.c map__rip_2objdump(map, sym->end), map 2074 tools/perf/util/annotate.c int symbol__annotate(struct symbol *sym, struct map *map, map 2108 tools/perf/util/annotate.c args.ms.map = map; map 2110 tools/perf/util/annotate.c notes->start = map__rip_2objdump(map, sym->start); map 2266 tools/perf/util/annotate.c int symbol__annotate_printf(struct symbol *sym, struct map *map, map 2270 tools/perf/util/annotate.c struct dso *dso = map->dso; map 2277 tools/perf/util/annotate.c u64 start = map__rip_2objdump(map, sym->start); map 2453 tools/perf/util/annotate.c ms->sym->name, ms->map->dso->long_name, ev_name); map 2641 tools/perf/util/annotate.c static void annotation__calc_lines(struct annotation *notes, struct map *map, map 2665 tools/perf/util/annotate.c al->path = get_srcline(map->dso, notes->start + al->offset, NULL, map 2673 tools/perf/util/annotate.c static void symbol__calc_lines(struct symbol *sym, struct map *map, map 2679 tools/perf/util/annotate.c annotation__calc_lines(notes, map, root, opts); map 2682 tools/perf/util/annotate.c int symbol__tty_annotate2(struct symbol *sym, struct map *map, map 2686 tools/perf/util/annotate.c struct dso *dso = map->dso; map 2691 tools/perf/util/annotate.c if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0) map 2696 tools/perf/util/annotate.c symbol__calc_lines(sym, map, &source_line, opts); map 2710 tools/perf/util/annotate.c int symbol__tty_annotate(struct symbol *sym, struct map *map, map 2714 tools/perf/util/annotate.c struct dso *dso = map->dso; map 2717 tools/perf/util/annotate.c if (symbol__annotate(sym, map, evsel, 0, opts, NULL) < 0) map 2724 tools/perf/util/annotate.c symbol__calc_lines(sym, map, &source_line, opts); map 2728 tools/perf/util/annotate.c symbol__annotate_printf(sym, map, evsel, opts); map 2982 tools/perf/util/annotate.c int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel, map 2996 tools/perf/util/annotate.c err = symbol__annotate(sym, map, evsel, 0, options, parch); map 18 tools/perf/util/annotate.h struct map; map 350 tools/perf/util/annotate.h int symbol__annotate(struct symbol *sym, struct map *map, map 354 tools/perf/util/annotate.h int symbol__annotate2(struct symbol *sym, struct map *map, map 381 tools/perf/util/annotate.h int symbol__strerror_disassemble(struct symbol *sym, struct map *map, map 384 tools/perf/util/annotate.h int symbol__annotate_printf(struct symbol *sym, struct map *map, map 396 tools/perf/util/annotate.h int symbol__tty_annotate(struct symbol *sym, struct map *map, map 399 tools/perf/util/annotate.h int symbol__tty_annotate2(struct symbol *sym, struct map *map, map 403 tools/perf/util/annotate.h int symbol__tui_annotate(struct symbol *sym, struct map *map, map 409 tools/perf/util/annotate.h struct map *map __maybe_unused, map 136 tools/perf/util/auxtrace.c mp->cpu = evlist->core.cpus->map[idx]; map 1231 tools/perf/util/auxtrace.c static int __auxtrace_mmap__read(struct mmap *map, map 1236 tools/perf/util/auxtrace.c struct auxtrace_mmap *mm = &map->auxtrace_mmap; map 1323 tools/perf/util/auxtrace.c if (fn(tool, map, &ev, data1, len1, data2, len2)) map 1342 tools/perf/util/auxtrace.c int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, map 1345 tools/perf/util/auxtrace.c return __auxtrace_mmap__read(map, itr, tool, fn, false, 0); map 1348 tools/perf/util/auxtrace.c int auxtrace_mmap__read_snapshot(struct mmap *map, map 1353 tools/perf/util/auxtrace.c return __auxtrace_mmap__read(map, itr, tool, fn, true, snapshot_size); map 1922 tools/perf/util/auxtrace.c struct map *map; map 1925 tools/perf/util/auxtrace.c map = dso__new_map(name); map 1926 tools/perf/util/auxtrace.c if (!map) map 1929 tools/perf/util/auxtrace.c if (map__load(map) < 0) map 1932 tools/perf/util/auxtrace.c dso = dso__get(map->dso); map 1934 tools/perf/util/auxtrace.c map__put(map); map 448 tools/perf/util/auxtrace.h struct mmap *map, map 452 tools/perf/util/auxtrace.h int auxtrace_mmap__read(struct mmap *map, struct auxtrace_record *itr, map 455 tools/perf/util/auxtrace.h int auxtrace_mmap__read_snapshot(struct mmap *map, map 55 tools/perf/util/bpf-event.c struct map *map; map 57 tools/perf/util/bpf-event.c map = map_groups__find(&machine->kmaps, addr); map 59 tools/perf/util/bpf-event.c if (map) { map 60 tools/perf/util/bpf-event.c map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO; map 61 tools/perf/util/bpf-event.c map->dso->bpf_prog.id = id; map 62 tools/perf/util/bpf-event.c map->dso->bpf_prog.sub_id = i; map 63 tools/perf/util/bpf-event.c map->dso->bpf_prog.env = env; map 853 tools/perf/util/bpf-loader.c bpf_map_priv__clear(struct bpf_map *map __maybe_unused, map 958 tools/perf/util/bpf-loader.c bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) map 960 tools/perf/util/bpf-loader.c const char *map_name = bpf_map__name(map); map 961 tools/perf/util/bpf-loader.c struct bpf_map_priv *priv = bpf_map__priv(map); map 976 tools/perf/util/bpf-loader.c if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { map 987 tools/perf/util/bpf-loader.c bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) map 996 tools/perf/util/bpf-loader.c err = bpf_map__add_op(map, op); map 1005 tools/perf/util/bpf-loader.c __bpf_map__config_value(struct bpf_map *map, map 1009 tools/perf/util/bpf-loader.c const char *map_name = bpf_map__name(map); map 1010 tools/perf/util/bpf-loader.c const struct bpf_map_def *def = bpf_map__def(map); map 1038 tools/perf/util/bpf-loader.c op = bpf_map__add_newop(map, term); map 1047 tools/perf/util/bpf-loader.c bpf_map__config_value(struct bpf_map *map, map 1061 tools/perf/util/bpf-loader.c return __bpf_map__config_value(map, term); map 1065 tools/perf/util/bpf-loader.c __bpf_map__config_event(struct bpf_map *map, map 1072 tools/perf/util/bpf-loader.c const char *map_name = bpf_map__name(map); map 1081 tools/perf/util/bpf-loader.c def = bpf_map__def(map); map 1098 tools/perf/util/bpf-loader.c op = bpf_map__add_newop(map, term); map 1107 tools/perf/util/bpf-loader.c bpf_map__config_event(struct bpf_map *map, map 1121 tools/perf/util/bpf-loader.c return __bpf_map__config_event(map, term, evlist); map 1137 tools/perf/util/bpf-loader.c struct bpf_map *map, map 1152 tools/perf/util/bpf-loader.c def = bpf_map__def(map); map 1180 tools/perf/util/bpf-loader.c struct bpf_map *map; map 1200 tools/perf/util/bpf-loader.c map = bpf_object__find_map_by_name(obj, map_name); map 1201 tools/perf/util/bpf-loader.c if (!map) { map 1208 tools/perf/util/bpf-loader.c err = config_map_indices_range_check(term, map, map_name); map 1218 tools/perf/util/bpf-loader.c err = func->config_func(map, term, evlist); map 1309 tools/perf/util/bpf-loader.c bpf_map_config_foreach_key(struct bpf_map *map, map 1316 tools/perf/util/bpf-loader.c const char *name = bpf_map__name(map); map 1317 tools/perf/util/bpf-loader.c struct bpf_map_priv *priv = bpf_map__priv(map); map 1328 tools/perf/util/bpf-loader.c def = bpf_map__def(map); map 1333 tools/perf/util/bpf-loader.c map_fd = bpf_map__fd(map); map 1484 tools/perf/util/bpf-loader.c apply_obj_config_map(struct bpf_map *map) map 1486 tools/perf/util/bpf-loader.c return bpf_map_config_foreach_key(map, map 1494 tools/perf/util/bpf-loader.c struct bpf_map *map; map 1497 tools/perf/util/bpf-loader.c bpf_object__for_each_map(map, obj) { map 1498 tools/perf/util/bpf-loader.c err = apply_obj_config_map(map); map 1534 tools/perf/util/bpf-loader.c struct bpf_map *map; map 1538 tools/perf/util/bpf-loader.c bpf__for_each_map_named(map, obj, tmp, name) { map 1539 tools/perf/util/bpf-loader.c struct bpf_map_priv *priv = bpf_map__priv(map); map 1574 tools/perf/util/bpf-loader.c bpf__for_each_map_named(map, obj, tmp, name) { map 1575 tools/perf/util/bpf-loader.c struct bpf_map_priv *priv = bpf_map__priv(map); map 1587 tools/perf/util/bpf-loader.c err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); map 1589 tools/perf/util/bpf-loader.c bpf_map_priv__clear(map, priv); map 1595 tools/perf/util/bpf-loader.c op = bpf_map__add_newop(map, NULL); map 28 tools/perf/util/bpf_map.c int bpf_map__fprintf(struct bpf_map *map, FILE *fp) map 30 tools/perf/util/bpf_map.c const struct bpf_map_def *def = bpf_map__def(map); map 32 tools/perf/util/bpf_map.c int fd = bpf_map__fd(map), err; map 11 tools/perf/util/bpf_map.h int bpf_map__fprintf(struct bpf_map *map, FILE *fp); map 15 tools/perf/util/bpf_map.h static inline int bpf_map__fprintf(struct bpf_map *map __maybe_unused, FILE *fp __maybe_unused) map 56 tools/perf/util/build-id.c al.map->dso->hit = 1; map 586 tools/perf/util/callchain.c call->ms.map = map__get(cursor_node->map); map 645 tools/perf/util/callchain.c map__zput(call->ms.map); map 695 tools/perf/util/callchain.c static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip, map 696 tools/perf/util/callchain.c struct map *right_map, u64 right_ip) map 736 tools/perf/util/callchain.c match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start, map 737 tools/perf/util/callchain.c node->map, node->sym->start); map 745 tools/perf/util/callchain.c match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip); map 1008 tools/perf/util/callchain.c list->ms.map, list->ms.sym, map 1011 tools/perf/util/callchain.c map__zput(list->ms.map); map 1047 tools/perf/util/callchain.c u64 ip, struct map *map, struct symbol *sym, map 1063 tools/perf/util/callchain.c map__zput(node->map); map 1064 tools/perf/util/callchain.c node->map = map__get(map); map 1110 tools/perf/util/callchain.c al->map = node->map; map 1118 tools/perf/util/callchain.c if (al->map == NULL) map 1122 tools/perf/util/callchain.c if (al->map->groups == &al->machine->kmaps) { map 1169 tools/perf/util/callchain.c cl->ms.map ? map 1170 tools/perf/util/callchain.c cl->ms.map->dso->short_name : map 1462 tools/perf/util/callchain.c map__zput(list->ms.map); map 1468 tools/perf/util/callchain.c map__zput(list->ms.map); map 1532 tools/perf/util/callchain.c map__get(new->ms.map); map 1553 tools/perf/util/callchain.c map__zput(chain->ms.map); map 1574 tools/perf/util/callchain.c rc = callchain_cursor_append(dst, node->ip, node->map, node->sym, map 1600 tools/perf/util/callchain.c map__zput(node->map); map 13 tools/perf/util/callchain.h struct map; map 144 tools/perf/util/callchain.h struct map *map; map 198 tools/perf/util/callchain.h struct map *map, struct symbol *sym, map 23 tools/perf/util/cpumap.c struct perf_cpu_map *map; map 25 tools/perf/util/cpumap.c map = perf_cpu_map__empty_new(cpus->nr); map 26 tools/perf/util/cpumap.c if (map) { map 36 tools/perf/util/cpumap.c map->map[i] = -1; map 38 tools/perf/util/cpumap.c map->map[i] = (int) cpus->cpu[i]; map 42 tools/perf/util/cpumap.c return map; map 47 tools/perf/util/cpumap.c struct perf_cpu_map *map; map 52 tools/perf/util/cpumap.c map = perf_cpu_map__empty_new(nr); map 53 tools/perf/util/cpumap.c if (map) { map 57 tools/perf/util/cpumap.c map->map[i++] = cpu; map 59 tools/perf/util/cpumap.c return map; map 71 tools/perf/util/cpumap.c size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp) map 76 tools/perf/util/cpumap.c cpu_map__snprint(map, buf, sizeof(buf)); map 90 tools/perf/util/cpumap.c cpus->map[i] = -1; map 114 tools/perf/util/cpumap.c int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data __maybe_unused) map 118 tools/perf/util/cpumap.c if (idx > map->nr) map 121 tools/perf/util/cpumap.c cpu = map->map[idx]; map 132 tools/perf/util/cpumap.c int (*f)(struct perf_cpu_map *map, int cpu, void *data), map 147 tools/perf/util/cpumap.c if (s1 == c->map[s2]) map 151 tools/perf/util/cpumap.c c->map[c->nr] = s1; map 156 tools/perf/util/cpumap.c qsort(c->map, c->nr, sizeof(int), cmp_ids); map 170 tools/perf/util/cpumap.c int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data) map 174 tools/perf/util/cpumap.c if (idx > map->nr) map 177 tools/perf/util/cpumap.c cpu = map->map[idx]; map 184 tools/perf/util/cpumap.c s = cpu_map__get_socket(map, idx, data); map 209 tools/perf/util/cpumap.c int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data) map 213 tools/perf/util/cpumap.c if (idx > map->nr) map 216 tools/perf/util/cpumap.c cpu = map->map[idx]; map 221 tools/perf/util/cpumap.c s_die = cpu_map__get_die(map, idx, data); map 467 tools/perf/util/cpumap.c return cpus->map[idx]; map 470 tools/perf/util/cpumap.c size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size) map 478 tools/perf/util/cpumap.c for (i = 0; i < map->nr + 1; i++) { map 479 tools/perf/util/cpumap.c bool last = i == map->nr; map 481 tools/perf/util/cpumap.c cpu = last ? INT_MAX : map->map[i]; map 488 tools/perf/util/cpumap.c map->map[i]); map 490 tools/perf/util/cpumap.c } else if (((i - start) != (cpu - map->map[start])) || last) { map 496 tools/perf/util/cpumap.c map->map[start]); map 500 tools/perf/util/cpumap.c map->map[start], map->map[end]); map 522 tools/perf/util/cpumap.c size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size) map 527 tools/perf/util/cpumap.c int last_cpu = cpu_map__cpu(map, map->nr - 1); map 538 tools/perf/util/cpumap.c for (i = 0; i < map->nr; i++) { map 539 tools/perf/util/cpumap.c cpu = cpu_map__cpu(map, i); map 14 tools/perf/util/cpumap.h size_t cpu_map__snprint(struct perf_cpu_map *map, char *buf, size_t size); map 15 tools/perf/util/cpumap.h size_t cpu_map__snprint_mask(struct perf_cpu_map *map, char *buf, size_t size); map 16 tools/perf/util/cpumap.h size_t cpu_map__fprintf(struct perf_cpu_map *map, FILE *fp); map 18 tools/perf/util/cpumap.h int cpu_map__get_socket(struct perf_cpu_map *map, int idx, void *data); map 20 tools/perf/util/cpumap.h int cpu_map__get_die(struct perf_cpu_map *map, int idx, void *data); map 22 tools/perf/util/cpumap.h int cpu_map__get_core(struct perf_cpu_map *map, int idx, void *data); map 32 tools/perf/util/cpumap.h return sock->map[s]; map 58 tools/perf/util/cpumap.h int (*f)(struct perf_cpu_map *map, int cpu, void *data), map 182 tools/perf/util/cputopo.c struct perf_cpu_map *map; map 188 tools/perf/util/cputopo.c map = perf_cpu_map__new(NULL); map 189 tools/perf/util/cputopo.c if (map == NULL) { map 216 tools/perf/util/cputopo.c if (!cpu_map__has(map, i)) map 225 tools/perf/util/cputopo.c perf_cpu_map__put(map); map 331 tools/perf/util/cputopo.c if (load_numa_node(&tp->nodes[i], node_map->map[i])) { map 665 tools/perf/util/cs-etm.c if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso) map 668 tools/perf/util/cs-etm.c if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && map 669 tools/perf/util/cs-etm.c dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE)) map 672 tools/perf/util/cs-etm.c offset = al.map->map_ip(al.map, address); map 674 tools/perf/util/cs-etm.c map__load(al.map); map 676 tools/perf/util/cs-etm.c len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size); map 181 tools/perf/util/db-export.c if (al->map) { map 182 tools/perf/util/db-export.c struct dso *dso = al->map->dso; map 253 tools/perf/util/db-export.c al.map = node->map; map 257 tools/perf/util/db-export.c if (al.map && !al.sym) map 258 tools/perf/util/db-export.c al.sym = dso__find_symbol(al.map->dso, al.addr); map 1060 tools/perf/util/dso.c ssize_t dso__data_read_addr(struct dso *dso, struct map *map, map 1064 tools/perf/util/dso.c u64 offset = map->map_ip(map, addr); map 1068 tools/perf/util/dso.c struct map *dso__new_map(const char *name) map 1070 tools/perf/util/dso.c struct map *map = NULL; map 1074 tools/perf/util/dso.c map = map__new2(0, dso); map 1076 tools/perf/util/dso.c return map; map 16 tools/perf/util/dso.h struct map; map 331 tools/perf/util/dso.h ssize_t dso__data_read_addr(struct dso *dso, struct map *map, map 336 tools/perf/util/dso.h struct map *dso__new_map(const char *name); map 184 tools/perf/util/env.c perf_cpu_map__put(env->numa_nodes[i].map); map 293 tools/perf/util/env.c zfree(&cache->map); map 24 tools/perf/util/env.h char *map; map 31 tools/perf/util/env.h struct perf_cpu_map *map; map 457 tools/perf/util/event.c struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, map 471 tools/perf/util/event.c al->map = NULL; map 489 tools/perf/util/event.c al->map = NULL; map 503 tools/perf/util/event.c al->map = map_groups__find(mg, al->addr); map 504 tools/perf/util/event.c if (al->map != NULL) { map 510 tools/perf/util/event.c map__load(al->map); map 511 tools/perf/util/event.c al->addr = al->map->map_ip(al->map, al->addr); map 514 tools/perf/util/event.c return al->map; map 522 tools/perf/util/event.c struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr, map 525 tools/perf/util/event.c struct map *map = thread__find_map(thread, cpumode, addr, al); map 529 tools/perf/util/event.c if (map || addr_cpumode == cpumode) map 530 tools/perf/util/event.c return map; map 540 tools/perf/util/event.c al->sym = map__find_symbol(al->map, al->addr); map 549 tools/perf/util/event.c al->sym = map__find_symbol(al->map, al->addr); map 569 tools/perf/util/event.c al->map ? al->map->dso->long_name : map 587 tools/perf/util/event.c if (al->map) { map 588 tools/perf/util/event.c struct dso *dso = al->map->dso; map 599 tools/perf/util/event.c al->sym = map__find_symbol(al->map, al->addr); map 651 tools/perf/util/event.c if (al->map) map 652 tools/perf/util/event.c al->sym = map__find_symbol(al->map, al->addr); map 380 tools/perf/util/event.h void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max); map 381 tools/perf/util/event.h void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map, map 409 tools/perf/util/evlist.c struct mmap *map = fda->priv[fd].ptr; map 411 tools/perf/util/evlist.c if (map) map 412 tools/perf/util/evlist.c perf_mmap__put(map); map 433 tools/perf/util/evlist.c sid->cpu = evlist->core.cpus->map[cpu]; map 598 tools/perf/util/evlist.c struct mmap *map; map 603 tools/perf/util/evlist.c map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap)); map 604 tools/perf/util/evlist.c if (!map) map 608 tools/perf/util/evlist.c map[i].core.fd = -1; map 609 tools/perf/util/evlist.c map[i].core.overwrite = overwrite; map 619 tools/perf/util/evlist.c refcount_set(&map[i].core.refcnt, 0); map 621 tools/perf/util/evlist.c return map; map 1729 tools/perf/util/evlist.c struct mmap *map = &evlist->mmap[i]; map 1732 tools/perf/util/evlist.c if (perf_mmap__read_init(map)) map 1734 tools/perf/util/evlist.c while ((event = perf_mmap__read_event(map)) != NULL) { map 1742 tools/perf/util/evlist.c perf_mmap__consume(map); map 1745 tools/perf/util/evlist.c perf_mmap__read_done(map); map 1662 tools/perf/util/evsel.c fd = perf_event_open(evsel, pid, cpus->map[cpu], map 145 tools/perf/util/evsel_fprintf.c if (node->map) map 146 tools/perf/util/evsel_fprintf.c addr = node->map->map_ip(node->map, node->ip); map 151 tools/perf/util/evsel_fprintf.c node_al.map = node->map; map 165 tools/perf/util/evsel_fprintf.c printed += map__fprintf_dsoname(node->map, fp); map 170 tools/perf/util/evsel_fprintf.c printed += map__fprintf_srcline(node->map, addr, "\n ", fp); map 228 tools/perf/util/evsel_fprintf.c printed += map__fprintf_dsoname(al->map, fp); map 233 tools/perf/util/evsel_fprintf.c printed += map__fprintf_srcline(al->map, al->addr, "\n ", fp); map 1025 tools/perf/util/header.c if (strcmp(a->map, b->map)) map 1076 tools/perf/util/header.c if (sysfs__read_str(file, &cache->map, &len)) { map 1082 tools/perf/util/header.c cache->map[len] = 0; map 1083 tools/perf/util/header.c cache->map = strim(cache->map); map 1089 tools/perf/util/header.c fprintf(out, "L%d %-15s %8s [%s]\n", c->level, c->type, c->size, c->map); map 1172 tools/perf/util/header.c _W(map) map 1733 tools/perf/util/header.c cpu_map__fprintf(n->map, fp); map 2360 tools/perf/util/header.c n->map = perf_cpu_map__new(str); map 2361 tools/perf/util/header.c if (!n->map) map 2558 tools/perf/util/header.c _R(map) map 3685 tools/perf/util/header.c struct perf_cpu_map *map; map 3705 tools/perf/util/header.c map = cpu_map__new_data(&ev_cpus->cpus); map 3706 tools/perf/util/header.c if (map) map 3707 tools/perf/util/header.c ret += cpu_map__fprintf(map, fp); map 3766 tools/perf/util/header.c struct perf_cpu_map *map; map 3791 tools/perf/util/header.c map = cpu_map__new_data(&ev_cpus->cpus); map 3792 tools/perf/util/header.c if (map) map 3793 tools/perf/util/header.c evsel->core.own_cpus = map; map 103 tools/perf/util/hist.c if (h->ms.map) { map 104 tools/perf/util/hist.c len = dso__name_len(h->ms.map->dso); map 118 tools/perf/util/hist.c symlen = dso__name_len(h->branch_info->from.map->dso); map 132 tools/perf/util/hist.c symlen = dso__name_len(h->branch_info->to.map->dso); map 175 tools/perf/util/hist.c if (h->mem_info->daddr.map) { map 176 tools/perf/util/hist.c symlen = dso__name_len(h->mem_info->daddr.map->dso); map 428 tools/perf/util/hist.c map__get(he->ms.map); map 443 tools/perf/util/hist.c map__get(he->branch_info->from.map); map 444 tools/perf/util/hist.c map__get(he->branch_info->to.map); map 448 tools/perf/util/hist.c map__get(he->mem_info->iaddr.map); map 449 tools/perf/util/hist.c map__get(he->mem_info->daddr.map); map 492 tools/perf/util/hist.c map__put(he->branch_info->from.map); map 493 tools/perf/util/hist.c map__put(he->branch_info->to.map); map 497 tools/perf/util/hist.c map__put(he->mem_info->iaddr.map); map 498 tools/perf/util/hist.c map__put(he->mem_info->daddr.map); map 501 tools/perf/util/hist.c map__zput(he->ms.map); map 613 tools/perf/util/hist.c if (he->ms.map != entry->ms.map) { map 614 tools/perf/util/hist.c map__put(he->ms.map); map 615 tools/perf/util/hist.c he->ms.map = map__get(entry->ms.map); map 692 tools/perf/util/hist.c .map = al->map, map 889 tools/perf/util/hist.c al->map = bi[i].to.map; map 1065 tools/perf/util/hist.c .map = al->map, map 1151 tools/perf/util/hist.c struct map *alm = NULL; map 1154 tools/perf/util/hist.c alm = map__get(al->map); map 1244 tools/perf/util/hist.c map__zput(he->ms.map); map 1247 tools/perf/util/hist.c map__zput(he->branch_info->from.map); map 1248 tools/perf/util/hist.c map__zput(he->branch_info->to.map); map 1255 tools/perf/util/hist.c map__zput(he->mem_info->iaddr.map); map 1256 tools/perf/util/hist.c map__zput(he->mem_info->daddr.map); map 2044 tools/perf/util/hist.c (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { map 95 tools/perf/util/intel-pt-decoder/intel-pt-decoder.h const int map[INTEL_PT_BLK_TYPE_MAX] = { map 106 tools/perf/util/intel-pt-decoder/intel-pt-decoder.h return blk_type < INTEL_PT_BLK_TYPE_MAX ? map[blk_type] - 1 : -1; map 546 tools/perf/util/intel-pt.c if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso) map 549 tools/perf/util/intel-pt.c if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR && map 550 tools/perf/util/intel-pt.c dso__data_status_seen(al.map->dso, map 554 tools/perf/util/intel-pt.c offset = al.map->map_ip(al.map, *ip); map 559 tools/perf/util/intel-pt.c e = intel_pt_cache_lookup(al.map->dso, machine, offset); map 579 tools/perf/util/intel-pt.c map__load(al.map); map 581 tools/perf/util/intel-pt.c x86_64 = al.map->dso->is_64_bit; map 584 tools/perf/util/intel-pt.c len = dso__data_read_offset(al.map->dso, machine, map 608 tools/perf/util/intel-pt.c if (*ip >= al.map->end) map 628 tools/perf/util/intel-pt.c e = intel_pt_cache_lookup(al.map->dso, machine, start_offset); map 634 tools/perf/util/intel-pt.c intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt, map 699 tools/perf/util/intel-pt.c if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso) map 702 tools/perf/util/intel-pt.c offset = al.map->map_ip(al.map, ip); map 705 tools/perf/util/intel-pt.c al.map->dso->long_name); map 2020 tools/perf/util/intel-pt.c struct map *map; map 2028 tools/perf/util/intel-pt.c map = machine__kernel_map(machine); map 2029 tools/perf/util/intel-pt.c if (!map) map 2032 tools/perf/util/intel-pt.c if (map__load(map)) map 2035 tools/perf/util/intel-pt.c start = dso__first_symbol(map->dso); map 2040 tools/perf/util/intel-pt.c ip = map->unmap_ip(map, sym->start); map 2041 tools/perf/util/intel-pt.c if (ip >= map->start && ip < map->end) { map 2058 tools/perf/util/intel-pt.c ip = map->unmap_ip(map, sym->start); map 2059 tools/perf/util/intel-pt.c if (ip >= map->start && ip < map->end) { map 722 tools/perf/util/machine.c struct map *map; map 724 tools/perf/util/machine.c map = map_groups__find(&machine->kmaps, event->ksymbol.addr); map 725 tools/perf/util/machine.c if (!map) { map 726 tools/perf/util/machine.c map = dso__new_map(event->ksymbol.name); map 727 tools/perf/util/machine.c if (!map) map 730 tools/perf/util/machine.c map->start = event->ksymbol.addr; map 731 tools/perf/util/machine.c map->end = map->start + event->ksymbol.len; map 732 tools/perf/util/machine.c map_groups__insert(&machine->kmaps, map); map 735 tools/perf/util/machine.c sym = symbol__new(map->map_ip(map, map->start), map 740 tools/perf/util/machine.c dso__insert_symbol(map->dso, sym); map 748 tools/perf/util/machine.c struct map *map; map 750 tools/perf/util/machine.c map = map_groups__find(&machine->kmaps, event->ksymbol.addr); map 751 tools/perf/util/machine.c if (map) map 752 tools/perf/util/machine.c map_groups__remove(&machine->kmaps, map); map 770 tools/perf/util/machine.c struct map *machine__findnew_module_map(struct machine *machine, u64 start, map 773 tools/perf/util/machine.c struct map *map = NULL; map 780 tools/perf/util/machine.c map = map_groups__find_by_name(&machine->kmaps, m.name); map 781 tools/perf/util/machine.c if (map) map 788 tools/perf/util/machine.c map = map__new2(start, dso); map 789 tools/perf/util/machine.c if (map == NULL) map 792 tools/perf/util/machine.c map_groups__insert(&machine->kmaps, map); map 795 tools/perf/util/machine.c map__put(map); map 800 tools/perf/util/machine.c return map; map 965 tools/perf/util/machine.c struct map *map; map 967 tools/perf/util/machine.c map = map__new2(xm->start, kernel); map 968 tools/perf/util/machine.c if (!map) map 971 tools/perf/util/machine.c map->end = xm->end; map 972 tools/perf/util/machine.c map->pgoff = xm->pgoff; map 974 tools/perf/util/machine.c kmap = map__kmap(map); map 979 tools/perf/util/machine.c map_groups__insert(&machine->kmaps, map); map 982 tools/perf/util/machine.c kmap->name, map->start, map->end); map 984 tools/perf/util/machine.c map__put(map); map 1028 tools/perf/util/machine.c struct map *map; map 1035 tools/perf/util/machine.c for (map = maps__first(maps); map; map = map__next(map)) { map 1036 tools/perf/util/machine.c struct kmap *kmap = __map__kmap(map); map 1037 tools/perf/util/machine.c struct map *dest_map; map 1042 tools/perf/util/machine.c dest_map = map_groups__find(kmaps, map->pgoff); map 1043 tools/perf/util/machine.c if (dest_map != map) map 1044 tools/perf/util/machine.c map->pgoff = dest_map->map_ip(dest_map, map->pgoff); map 1088 tools/perf/util/machine.c struct map *map; map 1098 tools/perf/util/machine.c map = machine__kernel_map(machine); map 1099 tools/perf/util/machine.c kmap = map__kmap(map); map 1104 tools/perf/util/machine.c map_groups__insert(&machine->kmaps, map); map 1112 tools/perf/util/machine.c struct map *map = machine__kernel_map(machine); map 1114 tools/perf/util/machine.c if (map == NULL) map 1117 tools/perf/util/machine.c kmap = map__kmap(map); map 1118 tools/perf/util/machine.c map_groups__remove(&machine->kmaps, map); map 1203 tools/perf/util/machine.c struct map *map = machine__kernel_map(machine); map 1204 tools/perf/util/machine.c int ret = __dso__load_kallsyms(map->dso, filename, map, true); map 1207 tools/perf/util/machine.c dso__set_loaded(map->dso); map 1221 tools/perf/util/machine.c struct map *map = machine__kernel_map(machine); map 1222 tools/perf/util/machine.c int ret = dso__load_vmlinux_path(map->dso, map); map 1225 tools/perf/util/machine.c dso__set_loaded(map->dso); map 1268 tools/perf/util/machine.c struct map *map = map_groups__find_by_name(mg, m->name); map 1270 tools/perf/util/machine.c if (map == NULL) map 1277 tools/perf/util/machine.c dso__set_long_name(map->dso, long_name, true); map 1278 tools/perf/util/machine.c dso__kernel_module_get_build_id(map->dso, ""); map 1284 tools/perf/util/machine.c if (m->comp && is_kmod_dso(map->dso)) { map 1285 tools/perf/util/machine.c map->dso->symtab_type++; map 1286 tools/perf/util/machine.c map->dso->comp = m->comp; map 1377 tools/perf/util/machine.c struct map *map; map 1382 tools/perf/util/machine.c map = machine__findnew_module_map(machine, start, name); map 1383 tools/perf/util/machine.c if (map == NULL) map 1385 tools/perf/util/machine.c map->end = start + size; map 1387 tools/perf/util/machine.c dso__kernel_module_get_build_id(map->dso, machine->root_dir); map 1434 tools/perf/util/machine.c struct map *map = machine__kernel_map(machine); map 1436 tools/perf/util/machine.c map__get(map); map 1437 tools/perf/util/machine.c map_groups__remove(&machine->kmaps, map); map 1441 tools/perf/util/machine.c map_groups__insert(&machine->kmaps, map); map 1442 tools/perf/util/machine.c map__put(map); map 1449 tools/perf/util/machine.c struct map *map; map 1489 tools/perf/util/machine.c map = map__next(machine__kernel_map(machine)); map 1490 tools/perf/util/machine.c if (map) map 1491 tools/perf/util/machine.c machine__set_kernel_mmap(machine, start, map->start); map 1521 tools/perf/util/machine.c struct map *kernel_map = machine__kernel_map(machine); map 1540 tools/perf/util/machine.c struct map *map; map 1558 tools/perf/util/machine.c map = machine__findnew_module_map(machine, event->mmap.start, map 1560 tools/perf/util/machine.c if (map == NULL) map 1563 tools/perf/util/machine.c map->end = map->start + event->mmap.len; map 1653 tools/perf/util/machine.c struct map *map; map 1672 tools/perf/util/machine.c map = map__new(machine, event->mmap2.start, map 1681 tools/perf/util/machine.c if (map == NULL) map 1684 tools/perf/util/machine.c ret = thread__insert_map(thread, map); map 1689 tools/perf/util/machine.c map__put(map); map 1693 tools/perf/util/machine.c map__put(map); map 1705 tools/perf/util/machine.c struct map *map; map 1728 tools/perf/util/machine.c map = map__new(machine, event->mmap.start, map 1734 tools/perf/util/machine.c if (map == NULL) map 1737 tools/perf/util/machine.c ret = thread__insert_map(thread, map); map 1742 tools/perf/util/machine.c map__put(map); map 1746 tools/perf/util/machine.c map__put(map); map 1943 tools/perf/util/machine.c ams->map = al.map; map 1960 tools/perf/util/machine.c ams->map = al.map; map 1980 tools/perf/util/machine.c static char *callchain_srcline(struct map *map, struct symbol *sym, u64 ip) map 1984 tools/perf/util/machine.c if (!map || callchain_param.key == CCKEY_FUNCTION) map 1987 tools/perf/util/machine.c srcline = srcline__tree_find(&map->dso->srclines, ip); map 1992 tools/perf/util/machine.c srcline = get_srcline(map->dso, map__rip_2objdump(map, ip), map 1994 tools/perf/util/machine.c srcline__tree_insert(&map->dso->srclines, ip, srcline); map 2073 tools/perf/util/machine.c srcline = callchain_srcline(al.map, al.sym, al.addr); map 2074 tools/perf/util/machine.c return callchain_cursor_append(cursor, ip, al.map, al.sym, map 2423 tools/perf/util/machine.c struct map *map, struct symbol *sym, u64 ip) map 2430 tools/perf/util/machine.c if (!symbol_conf.inline_name || !map || !sym) map 2433 tools/perf/util/machine.c addr = map__map_ip(map, ip); map 2434 tools/perf/util/machine.c addr = map__rip_2objdump(map, addr); map 2436 tools/perf/util/machine.c inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr); map 2438 tools/perf/util/machine.c inline_node = dso__parse_addr_inlines(map->dso, addr, sym); map 2441 tools/perf/util/machine.c inlines__tree_insert(&map->dso->inlined_nodes, inline_node); map 2445 tools/perf/util/machine.c ret = callchain_cursor_append(cursor, ip, map, map 2465 tools/perf/util/machine.c if (append_inlines(cursor, entry->map, entry->sym, entry->ip) == 0) map 2472 tools/perf/util/machine.c if (entry->map) map 2473 tools/perf/util/machine.c addr = map__map_ip(entry->map, entry->ip); map 2475 tools/perf/util/machine.c srcline = callchain_srcline(entry->map, entry->sym, addr); map 2477 tools/perf/util/machine.c entry->map, entry->sym, map 2651 tools/perf/util/machine.c struct map *map = machine__kernel_map(machine); map 2663 tools/perf/util/machine.c if (map) { map 2664 tools/perf/util/machine.c err = map__load(map); map 2671 tools/perf/util/machine.c machine->kernel_start = map->start; map 2711 tools/perf/util/machine.c struct map *map; map 2712 tools/perf/util/machine.c struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map); map 2717 tools/perf/util/machine.c *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL; map 2718 tools/perf/util/machine.c *addrp = map->unmap_ip(map, sym->start); map 54 tools/perf/util/machine.h struct map *vmlinux_map; map 74 tools/perf/util/machine.h struct map *machine__kernel_map(struct machine *machine) map 211 tools/perf/util/machine.h struct map **mapp) map 219 tools/perf/util/machine.h struct map **mapp) map 224 tools/perf/util/machine.h struct map *machine__findnew_module_map(struct machine *machine, u64 start, map 28 tools/perf/util/map.c static void __maps__insert(struct maps *maps, struct map *map); map 29 tools/perf/util/map.c static void __maps__insert_name(struct maps *maps, struct map *map); map 133 tools/perf/util/map.c void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) map 135 tools/perf/util/map.c map->start = start; map 136 tools/perf/util/map.c map->end = end; map 137 tools/perf/util/map.c map->pgoff = pgoff; map 138 tools/perf/util/map.c map->reloc = 0; map 139 tools/perf/util/map.c map->dso = dso__get(dso); map 140 tools/perf/util/map.c map->map_ip = map__map_ip; map 141 tools/perf/util/map.c map->unmap_ip = map__unmap_ip; map 142 tools/perf/util/map.c RB_CLEAR_NODE(&map->rb_node); map 143 tools/perf/util/map.c map->groups = NULL; map 144 tools/perf/util/map.c map->erange_warned = false; map 145 tools/perf/util/map.c refcount_set(&map->refcnt, 1); map 148 tools/perf/util/map.c struct map *map__new(struct machine *machine, u64 start, u64 len, map 153 tools/perf/util/map.c struct map *map = malloc(sizeof(*map)); map 157 tools/perf/util/map.c if (map != NULL) { map 167 tools/perf/util/map.c map->maj = d_maj; map 168 tools/perf/util/map.c map->min = d_min; map 169 tools/perf/util/map.c map->ino = ino; map 170 tools/perf/util/map.c map->ino_generation = ino_gen; map 171 tools/perf/util/map.c map->prot = prot; map 172 tools/perf/util/map.c map->flags = flags; map 205 tools/perf/util/map.c map__init(map, start, start + len, pgoff, dso); map 208 tools/perf/util/map.c map->map_ip = map->unmap_ip = identity__map_ip; map 221 tools/perf/util/map.c return map; map 224 tools/perf/util/map.c free(map); map 233 tools/perf/util/map.c struct map *map__new2(u64 start, struct dso *dso) map 235 tools/perf/util/map.c struct map *map = calloc(1, (sizeof(*map) + map 237 tools/perf/util/map.c if (map != NULL) { map 241 tools/perf/util/map.c map__init(map, start, 0, 0, dso); map 244 tools/perf/util/map.c return map; map 256 tools/perf/util/map.c bool __map__is_kernel(const struct map *map) map 258 tools/perf/util/map.c return machine__kernel_map(map->groups->machine) == map; map 261 tools/perf/util/map.c bool __map__is_extra_kernel_map(const struct map *map) map 263 tools/perf/util/map.c struct kmap *kmap = __map__kmap((struct map *)map); map 268 tools/perf/util/map.c bool __map__is_bpf_prog(const struct map *map) map 272 tools/perf/util/map.c if (map->dso->binary_type == DSO_BINARY_TYPE__BPF_PROG_INFO) map 280 tools/perf/util/map.c name = map->dso->short_name; map 284 tools/perf/util/map.c bool map__has_symbols(const struct map *map) map 286 tools/perf/util/map.c return dso__has_symbols(map->dso); map 289 tools/perf/util/map.c static void map__exit(struct map *map) map 291 tools/perf/util/map.c BUG_ON(!RB_EMPTY_NODE(&map->rb_node)); map 292 tools/perf/util/map.c dso__zput(map->dso); map 295 tools/perf/util/map.c void map__delete(struct map *map) map 297 tools/perf/util/map.c map__exit(map); map 298 tools/perf/util/map.c free(map); map 301 tools/perf/util/map.c void map__put(struct map *map) map 303 tools/perf/util/map.c if (map && refcount_dec_and_test(&map->refcnt)) map 304 tools/perf/util/map.c map__delete(map); map 307 tools/perf/util/map.c void map__fixup_start(struct map *map) map 309 tools/perf/util/map.c struct rb_root_cached *symbols = &map->dso->symbols; map 313 tools/perf/util/map.c map->start = sym->start; map 317 tools/perf/util/map.c void map__fixup_end(struct map *map) map 319 tools/perf/util/map.c struct rb_root_cached *symbols = &map->dso->symbols; map 323 tools/perf/util/map.c map->end = sym->end; map 329 tools/perf/util/map.c int map__load(struct map *map) map 331 tools/perf/util/map.c const char *name = map->dso->long_name; map 334 tools/perf/util/map.c if (dso__loaded(map->dso)) map 337 tools/perf/util/map.c nr = dso__load(map->dso, map); map 339 tools/perf/util/map.c if (map->dso->has_build_id) { map 342 tools/perf/util/map.c build_id__sprintf(map->dso->build_id, map 343 tools/perf/util/map.c sizeof(map->dso->build_id), map 371 tools/perf/util/map.c struct symbol *map__find_symbol(struct map *map, u64 addr) map 373 tools/perf/util/map.c if (map__load(map) < 0) map 376 tools/perf/util/map.c return dso__find_symbol(map->dso, addr); map 379 tools/perf/util/map.c struct symbol *map__find_symbol_by_name(struct map *map, const char *name) map 381 tools/perf/util/map.c if (map__load(map) < 0) map 384 tools/perf/util/map.c if (!dso__sorted_by_name(map->dso)) map 385 tools/perf/util/map.c dso__sort_by_name(map->dso); map 387 tools/perf/util/map.c return dso__find_symbol_by_name(map->dso, name); map 390 tools/perf/util/map.c struct map *map__clone(struct map *from) map 392 tools/perf/util/map.c struct map *map = memdup(from, sizeof(*map)); map 394 tools/perf/util/map.c if (map != NULL) { map 395 tools/perf/util/map.c refcount_set(&map->refcnt, 1); map 396 tools/perf/util/map.c RB_CLEAR_NODE(&map->rb_node); map 397 tools/perf/util/map.c dso__get(map->dso); map 398 tools/perf/util/map.c map->groups = NULL; map 401 tools/perf/util/map.c return map; map 404 tools/perf/util/map.c size_t map__fprintf(struct map *map, FILE *fp) map 407 tools/perf/util/map.c map->start, map->end, map->pgoff, map->dso->name); map 410 tools/perf/util/map.c size_t map__fprintf_dsoname(struct map *map, FILE *fp) map 415 tools/perf/util/map.c if (map && map->dso) { map 416 tools/perf/util/map.c if (symbol_conf.show_kernel_path && map->dso->long_name) map 417 tools/perf/util/map.c dsoname = map->dso->long_name; map 419 tools/perf/util/map.c dsoname = map->dso->name; map 430 tools/perf/util/map.c char *map__srcline(struct map *map, u64 addr, struct symbol *sym) map 432 tools/perf/util/map.c if (map == NULL) map 434 tools/perf/util/map.c return get_srcline(map->dso, map__rip_2objdump(map, addr), sym, true, true, addr); map 437 tools/perf/util/map.c int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, map 442 tools/perf/util/map.c if (map && map->dso) { map 443 tools/perf/util/map.c char *srcline = map__srcline(map, addr, NULL); map 451 tools/perf/util/map.c int map__fprintf_srccode(struct map *map, u64 addr, map 461 tools/perf/util/map.c if (!map || !map->dso) map 463 tools/perf/util/map.c srcfile = get_srcline_split(map->dso, map 464 tools/perf/util/map.c map__rip_2objdump(map, addr), map 513 tools/perf/util/map.c u64 map__rip_2objdump(struct map *map, u64 rip) map 515 tools/perf/util/map.c struct kmap *kmap = __map__kmap(map); map 523 tools/perf/util/map.c struct map *kernel_map = machine__kernel_map(kmap->kmaps->machine); map 526 tools/perf/util/map.c map = kernel_map; map 529 tools/perf/util/map.c if (!map->dso->adjust_symbols) map 532 tools/perf/util/map.c if (map->dso->rel) map 533 tools/perf/util/map.c return rip - map->pgoff; map 539 tools/perf/util/map.c if (map->dso->kernel == DSO_TYPE_USER) map 540 tools/perf/util/map.c return rip + map->dso->text_offset; map 542 tools/perf/util/map.c return map->unmap_ip(map, rip) - map->reloc; map 557 tools/perf/util/map.c u64 map__objdump_2mem(struct map *map, u64 ip) map 559 tools/perf/util/map.c if (!map->dso->adjust_symbols) map 560 tools/perf/util/map.c return map->unmap_ip(map, ip); map 562 tools/perf/util/map.c if (map->dso->rel) map 563 tools/perf/util/map.c return map->unmap_ip(map, ip + map->pgoff); map 569 tools/perf/util/map.c if (map->dso->kernel == DSO_TYPE_USER) map 570 tools/perf/util/map.c return map->unmap_ip(map, ip - map->dso->text_offset); map 572 tools/perf/util/map.c return ip + map->reloc; map 589 tools/perf/util/map.c void map_groups__insert(struct map_groups *mg, struct map *map) map 591 tools/perf/util/map.c maps__insert(&mg->maps, map); map 592 tools/perf/util/map.c map->groups = mg; map 601 tools/perf/util/map.c struct map *pos = rb_entry(next, struct map, rb_node); map 615 tools/perf/util/map.c struct map *pos = rb_entry(next, struct map, rb_node_name); map 665 tools/perf/util/map.c u64 addr, struct map **mapp) map 667 tools/perf/util/map.c struct map *map = map_groups__find(mg, addr); map 670 tools/perf/util/map.c if (map != NULL && map__load(map) >= 0) { map 672 tools/perf/util/map.c *mapp = map; map 673 tools/perf/util/map.c return map__find_symbol(map, map->map_ip(map, addr)); map 679 tools/perf/util/map.c static bool map__contains_symbol(struct map *map, struct symbol *sym) map 681 tools/perf/util/map.c u64 ip = map->unmap_ip(map, sym->start); map 683 tools/perf/util/map.c return ip >= map->start && ip < map->end; map 687 tools/perf/util/map.c struct map **mapp) map 695 tools/perf/util/map.c struct map *pos = rb_entry(nd, struct map, rb_node); map 718 tools/perf/util/map.c struct map **mapp) map 725 tools/perf/util/map.c if (ams->addr < ams->map->start || ams->addr >= ams->map->end) { map 726 tools/perf/util/map.c if (ams->map->groups == NULL) map 728 tools/perf/util/map.c ams->map = map_groups__find(ams->map->groups, ams->addr); map 729 tools/perf/util/map.c if (ams->map == NULL) map 733 tools/perf/util/map.c ams->al_addr = ams->map->map_ip(ams->map, ams->addr); map 734 tools/perf/util/map.c ams->sym = map__find_symbol(ams->map, ams->al_addr); map 747 tools/perf/util/map.c struct map *pos = rb_entry(nd, struct map, rb_node); map 766 tools/perf/util/map.c static void __map_groups__insert(struct map_groups *mg, struct map *map) map 768 tools/perf/util/map.c __maps__insert(&mg->maps, map); map 769 tools/perf/util/map.c __maps__insert_name(&mg->maps, map); map 770 tools/perf/util/map.c map->groups = mg; map 773 tools/perf/util/map.c static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) map 790 tools/perf/util/map.c struct map *pos = rb_entry(next, struct map, rb_node); map 792 tools/perf/util/map.c if (pos->end > map->start) { map 794 tools/perf/util/map.c if (pos->start <= map->start) map 803 tools/perf/util/map.c struct map *pos = rb_entry(next, struct map, rb_node); map 810 tools/perf/util/map.c if (pos->start >= map->end) map 817 tools/perf/util/map.c map->dso->name); map 820 tools/perf/util/map.c map__fprintf(map, fp); map 830 tools/perf/util/map.c if (map->start > pos->start) { map 831 tools/perf/util/map.c struct map *before = map__clone(pos); map 838 tools/perf/util/map.c before->end = map->start; map 845 tools/perf/util/map.c if (map->end < pos->end) { map 846 tools/perf/util/map.c struct map *after = map__clone(pos); map 853 tools/perf/util/map.c after->start = map->end; map 854 tools/perf/util/map.c after->pgoff += map->end - pos->start; map 855 tools/perf/util/map.c assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end)); map 874 tools/perf/util/map.c int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, map 877 tools/perf/util/map.c return maps__fixup_overlappings(&mg->maps, map, fp); map 887 tools/perf/util/map.c struct map *map; map 892 tools/perf/util/map.c for (map = maps__first(maps); map; map = map__next(map)) { map 893 tools/perf/util/map.c struct map *new = map__clone(map); map 911 tools/perf/util/map.c static void __maps__insert(struct maps *maps, struct map *map) map 915 tools/perf/util/map.c const u64 ip = map->start; map 916 tools/perf/util/map.c struct map *m; map 920 tools/perf/util/map.c m = rb_entry(parent, struct map, rb_node); map 927 tools/perf/util/map.c rb_link_node(&map->rb_node, parent, p); map 928 tools/perf/util/map.c rb_insert_color(&map->rb_node, &maps->entries); map 929 tools/perf/util/map.c map__get(map); map 932 tools/perf/util/map.c static void __maps__insert_name(struct maps *maps, struct map *map) map 936 tools/perf/util/map.c struct map *m; map 941 tools/perf/util/map.c m = rb_entry(parent, struct map, rb_node_name); map 942 tools/perf/util/map.c rc = strcmp(m->dso->short_name, map->dso->short_name); map 948 tools/perf/util/map.c rb_link_node(&map->rb_node_name, parent, p); map 949 tools/perf/util/map.c rb_insert_color(&map->rb_node_name, &maps->names); map 950 tools/perf/util/map.c map__get(map); map 953 tools/perf/util/map.c void maps__insert(struct maps *maps, struct map *map) map 956 tools/perf/util/map.c __maps__insert(maps, map); map 957 tools/perf/util/map.c __maps__insert_name(maps, map); map 961 tools/perf/util/map.c static void __maps__remove(struct maps *maps, struct map *map) map 963 tools/perf/util/map.c rb_erase_init(&map->rb_node, &maps->entries); map 964 tools/perf/util/map.c map__put(map); map 966 tools/perf/util/map.c rb_erase_init(&map->rb_node_name, &maps->names); map 967 tools/perf/util/map.c map__put(map); map 970 tools/perf/util/map.c void maps__remove(struct maps *maps, struct map *map) map 973 tools/perf/util/map.c __maps__remove(maps, map); map 977 tools/perf/util/map.c struct map *maps__find(struct maps *maps, u64 ip) map 980 tools/perf/util/map.c struct map *m; map 986 tools/perf/util/map.c m = rb_entry(p, struct map, rb_node); map 1001 tools/perf/util/map.c struct map *maps__first(struct maps *maps) map 1006 tools/perf/util/map.c return rb_entry(first, struct map, rb_node); map 1010 tools/perf/util/map.c struct map *map__next(struct map *map) map 1012 tools/perf/util/map.c struct rb_node *next = rb_next(&map->rb_node); map 1015 tools/perf/util/map.c return rb_entry(next, struct map, rb_node); map 1019 tools/perf/util/map.c struct kmap *__map__kmap(struct map *map) map 1021 tools/perf/util/map.c if (!map->dso || !map->dso->kernel) map 1023 tools/perf/util/map.c return (struct kmap *)(map + 1); map 1026 tools/perf/util/map.c struct kmap *map__kmap(struct map *map) map 1028 tools/perf/util/map.c struct kmap *kmap = __map__kmap(map); map 1035 tools/perf/util/map.c struct map_groups *map__kmaps(struct map *map) map 1037 tools/perf/util/map.c struct kmap *kmap = map__kmap(map); map 40 tools/perf/util/map.h u64 (*map_ip)(struct map *, u64); map 42 tools/perf/util/map.h u64 (*unmap_ip)(struct map *, u64); map 51 tools/perf/util/map.h struct kmap *__map__kmap(struct map *map); map 52 tools/perf/util/map.h struct kmap *map__kmap(struct map *map); map 53 tools/perf/util/map.h struct map_groups *map__kmaps(struct map *map); map 55 tools/perf/util/map.h static inline u64 map__map_ip(struct map *map, u64 ip) map 57 tools/perf/util/map.h return ip - map->start + map->pgoff; map 60 tools/perf/util/map.h static inline u64 map__unmap_ip(struct map *map, u64 ip) map 62 tools/perf/util/map.h return ip + map->start - map->pgoff; map 65 tools/perf/util/map.h static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) map 70 tools/perf/util/map.h static inline size_t map__size(const struct map *map) map 72 tools/perf/util/map.h return map->end - map->start; map 76 tools/perf/util/map.h u64 map__rip_2objdump(struct map *map, u64 rip); map 79 tools/perf/util/map.h u64 map__objdump_2mem(struct map *map, u64 ip); map 91 tools/perf/util/map.h #define map__for_each_symbol(map, pos, n) \ map 92 tools/perf/util/map.h dso__for_each_symbol(map->dso, pos, n) map 101 tools/perf/util/map.h #define __map__for_each_symbol_by_name(map, sym_name, pos) \ map 102 tools/perf/util/map.h for (pos = map__find_symbol_by_name(map, sym_name); \ map 108 tools/perf/util/map.h #define map__for_each_symbol_by_name(map, sym_name, pos) \ map 109 tools/perf/util/map.h __map__for_each_symbol_by_name(map, sym_name, (pos)) map 111 tools/perf/util/map.h void map__init(struct map *map, map 113 tools/perf/util/map.h struct map *map__new(struct machine *machine, u64 start, u64 len, map 117 tools/perf/util/map.h struct map *map__new2(u64 start, struct dso *dso); map 118 tools/perf/util/map.h void map__delete(struct map *map); map 119 tools/perf/util/map.h struct map *map__clone(struct map *map); map 121 tools/perf/util/map.h static inline struct map *map__get(struct map *map) map 123 tools/perf/util/map.h if (map) map 124 tools/perf/util/map.h refcount_inc(&map->refcnt); map 125 tools/perf/util/map.h return map; map 128 tools/perf/util/map.h void map__put(struct map *map); map 130 tools/perf/util/map.h static inline void __map__zput(struct map **map) map 132 tools/perf/util/map.h map__put(*map); map 133 tools/perf/util/map.h *map = NULL; map 136 tools/perf/util/map.h #define map__zput(map) __map__zput(&map) map 138 tools/perf/util/map.h size_t map__fprintf(struct map *map, FILE *fp); map 139 tools/perf/util/map.h size_t map__fprintf_dsoname(struct map *map, FILE *fp); map 140 tools/perf/util/map.h char *map__srcline(struct map *map, u64 addr, struct symbol *sym); map 141 tools/perf/util/map.h int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, map 146 tools/perf/util/map.h int map__fprintf_srccode(struct map *map, u64 addr, map 149 tools/perf/util/map.h int map__load(struct map *map); map 150 tools/perf/util/map.h struct symbol *map__find_symbol(struct map *map, u64 addr); map 151 tools/perf/util/map.h struct symbol *map__find_symbol_by_name(struct map *map, const char *name); map 152 tools/perf/util/map.h void map__fixup_start(struct map *map); map 153 tools/perf/util/map.h void map__fixup_end(struct map *map); map 155 tools/perf/util/map.h void map__reloc_vmlinux(struct map *map); map 157 tools/perf/util/map.h int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, map 160 tools/perf/util/map.h bool __map__is_kernel(const struct map *map); map 161 tools/perf/util/map.h bool __map__is_extra_kernel_map(const struct map *map); map 162 tools/perf/util/map.h bool __map__is_bpf_prog(const struct map *map); map 164 tools/perf/util/map.h static inline bool __map__is_kmodule(const struct map *map) map 166 tools/perf/util/map.h return !__map__is_kernel(map) && !__map__is_extra_kernel_map(map) && map 167 tools/perf/util/map.h !__map__is_bpf_prog(map); map 170 tools/perf/util/map.h bool map__has_symbols(const struct map *map); map 14 tools/perf/util/map_groups.h struct map; map 23 tools/perf/util/map_groups.h void maps__insert(struct maps *maps, struct map *map); map 24 tools/perf/util/map_groups.h void maps__remove(struct maps *maps, struct map *map); map 25 tools/perf/util/map_groups.h struct map *maps__find(struct maps *maps, u64 addr); map 26 tools/perf/util/map_groups.h struct map *maps__first(struct maps *maps); map 27 tools/perf/util/map_groups.h struct map *map__next(struct map *map); map 28 tools/perf/util/map_groups.h struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp); map 65 tools/perf/util/map_groups.h void map_groups__insert(struct map_groups *mg, struct map *map); map 67 tools/perf/util/map_groups.h static inline void map_groups__remove(struct map_groups *mg, struct map *map) map 69 tools/perf/util/map_groups.h maps__remove(&mg->maps, map); map 72 tools/perf/util/map_groups.h static inline struct map *map_groups__find(struct map_groups *mg, u64 addr) map 77 tools/perf/util/map_groups.h struct map *map_groups__first(struct map_groups *mg); map 79 tools/perf/util/map_groups.h static inline struct map *map_groups__next(struct map *map) map 81 tools/perf/util/map_groups.h return map__next(map); map 84 tools/perf/util/map_groups.h struct symbol *map_groups__find_symbol(struct map_groups *mg, u64 addr, struct map **mapp); map 85 tools/perf/util/map_groups.h struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, const char *name, struct map **mapp); map 91 tools/perf/util/map_groups.h int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, FILE *fp); map 93 tools/perf/util/map_groups.h struct map *map_groups__find_by_name(struct map_groups *mg, const char *name); map 95 tools/perf/util/map_groups.h int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map); map 7 tools/perf/util/map_symbol.h struct map; map 11 tools/perf/util/map_symbol.h struct map *map; map 16 tools/perf/util/map_symbol.h struct map *map; map 413 tools/perf/util/mem-events.c if (!mi->daddr.map || !mi->iaddr.map) { map 46 tools/perf/util/mem2node.c int mem2node__init(struct mem2node *map, struct perf_env *env) map 53 tools/perf/util/mem2node.c memset(map, 0x0, sizeof(*map)); map 54 tools/perf/util/mem2node.c map->root = RB_ROOT; map 105 tools/perf/util/mem2node.c phys_entry__insert(&entries[i], &map->root); map 108 tools/perf/util/mem2node.c map->entries = entries; map 112 tools/perf/util/mem2node.c void mem2node__exit(struct mem2node *map) map 114 tools/perf/util/mem2node.c zfree(&map->entries); map 117 tools/perf/util/mem2node.c int mem2node__node(struct mem2node *map, u64 addr) map 122 tools/perf/util/mem2node.c p = &map->root.rb_node; map 16 tools/perf/util/mem2node.h int mem2node__init(struct mem2node *map, struct perf_env *env); map 17 tools/perf/util/mem2node.h void mem2node__exit(struct mem2node *map); map 18 tools/perf/util/mem2node.h int mem2node__node(struct mem2node *map, u64 addr); map 302 tools/perf/util/metricgroup.c struct pmu_events_map *map = perf_pmu__find_map(NULL); map 309 tools/perf/util/metricgroup.c if (!map) map 324 tools/perf/util/metricgroup.c pe = &map->table[i]; map 405 tools/perf/util/metricgroup.c struct pmu_events_map *map = perf_pmu__find_map(NULL); map 410 tools/perf/util/metricgroup.c if (!map) map 414 tools/perf/util/metricgroup.c pe = &map->table[i]; map 545 tools/perf/util/metricgroup.c struct pmu_events_map *map = perf_pmu__find_map(NULL); map 549 tools/perf/util/metricgroup.c if (!map) map 553 tools/perf/util/metricgroup.c pe = &map->table[i]; map 26 tools/perf/util/mmap.c size_t perf_mmap__mmap_len(struct mmap *map) map 28 tools/perf/util/mmap.c return map->core.mask + 1 + page_size; map 32 tools/perf/util/mmap.c static union perf_event *perf_mmap__read(struct mmap *map, map 35 tools/perf/util/mmap.c unsigned char *data = map->core.base + page_size; map 42 tools/perf/util/mmap.c event = (union perf_event *)&data[*startp & map->core.mask]; map 52 tools/perf/util/mmap.c if ((*startp & map->core.mask) + size != ((*startp + size) & map->core.mask)) { map 55 tools/perf/util/mmap.c void *dst = map->core.event_copy; map 58 tools/perf/util/mmap.c cpy = min(map->core.mask + 1 - (offset & map->core.mask), len); map 59 tools/perf/util/mmap.c memcpy(dst, &data[offset & map->core.mask], cpy); map 65 tools/perf/util/mmap.c event = (union perf_event *)map->core.event_copy; map 86 tools/perf/util/mmap.c union perf_event *perf_mmap__read_event(struct mmap *map) map 93 tools/perf/util/mmap.c if (!refcount_read(&map->core.refcnt)) map 97 tools/perf/util/mmap.c if (!map->core.overwrite) map 98 tools/perf/util/mmap.c map->core.end = perf_mmap__read_head(map); map 100 tools/perf/util/mmap.c event = perf_mmap__read(map, &map->core.start, map->core.end); map 102 tools/perf/util/mmap.c if (!map->core.overwrite) map 103 tools/perf/util/mmap.c map->core.prev = map->core.start; map 108 tools/perf/util/mmap.c static bool perf_mmap__empty(struct mmap *map) map 110 tools/perf/util/mmap.c return perf_mmap__read_head(map) == map->core.prev && !map->auxtrace_mmap.base; map 113 tools/perf/util/mmap.c void perf_mmap__get(struct mmap *map) map 115 tools/perf/util/mmap.c refcount_inc(&map->core.refcnt); map 118 tools/perf/util/mmap.c void perf_mmap__put(struct mmap *map) map 120 tools/perf/util/mmap.c BUG_ON(map->core.base && refcount_read(&map->core.refcnt) == 0); map 122 tools/perf/util/mmap.c if (refcount_dec_and_test(&map->core.refcnt)) map 123 tools/perf/util/mmap.c perf_mmap__munmap(map); map 126 tools/perf/util/mmap.c void perf_mmap__consume(struct mmap *map) map 128 tools/perf/util/mmap.c if (!map->core.overwrite) { map 129 tools/perf/util/mmap.c u64 old = map->core.prev; map 131 tools/perf/util/mmap.c perf_mmap__write_tail(map, old); map 134 tools/perf/util/mmap.c if (refcount_read(&map->core.refcnt) == 1 && perf_mmap__empty(map)) map 135 tools/perf/util/mmap.c perf_mmap__put(map); map 165 tools/perf/util/mmap.c static int perf_mmap__aio_enabled(struct mmap *map) map 167 tools/perf/util/mmap.c return map->aio.nr_cblocks > 0; map 171 tools/perf/util/mmap.c static int perf_mmap__aio_alloc(struct mmap *map, int idx) map 173 tools/perf/util/mmap.c map->aio.data[idx] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, map 175 tools/perf/util/mmap.c if (map->aio.data[idx] == MAP_FAILED) { map 176 tools/perf/util/mmap.c map->aio.data[idx] = NULL; map 183 tools/perf/util/mmap.c static void perf_mmap__aio_free(struct mmap *map, int idx) map 185 tools/perf/util/mmap.c if (map->aio.data[idx]) { map 186 tools/perf/util/mmap.c munmap(map->aio.data[idx], perf_mmap__mmap_len(map)); map 187 tools/perf/util/mmap.c map->aio.data[idx] = NULL; map 191 tools/perf/util/mmap.c static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity) map 198 tools/perf/util/mmap.c data = map->aio.data[idx]; map 199 tools/perf/util/mmap.c mmap_len = perf_mmap__mmap_len(map); map 211 tools/perf/util/mmap.c static int perf_mmap__aio_alloc(struct mmap *map, int idx) map 213 tools/perf/util/mmap.c map->aio.data[idx] = malloc(perf_mmap__mmap_len(map)); map 214 tools/perf/util/mmap.c if (map->aio.data[idx] == NULL) map 220 tools/perf/util/mmap.c static void perf_mmap__aio_free(struct mmap *map, int idx) map 222 tools/perf/util/mmap.c zfree(&(map->aio.data[idx])); map 225 tools/perf/util/mmap.c static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, map 232 tools/perf/util/mmap.c static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) map 236 tools/perf/util/mmap.c map->aio.nr_cblocks = mp->nr_cblocks; map 237 tools/perf/util/mmap.c if (map->aio.nr_cblocks) { map 238 tools/perf/util/mmap.c map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); map 239 tools/perf/util/mmap.c if (!map->aio.aiocb) { map 243 tools/perf/util/mmap.c map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); map 244 tools/perf/util/mmap.c if (!map->aio.cblocks) { map 248 tools/perf/util/mmap.c map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); map 249 tools/perf/util/mmap.c if (!map->aio.data) { map 254 tools/perf/util/mmap.c for (i = 0; i < map->aio.nr_cblocks; ++i) { map 255 tools/perf/util/mmap.c ret = perf_mmap__aio_alloc(map, i); map 260 tools/perf/util/mmap.c ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); map 269 tools/perf/util/mmap.c map->aio.cblocks[i].aio_fildes = -1; map 279 tools/perf/util/mmap.c map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; map 286 tools/perf/util/mmap.c static void perf_mmap__aio_munmap(struct mmap *map) map 290 tools/perf/util/mmap.c for (i = 0; i < map->aio.nr_cblocks; ++i) map 291 tools/perf/util/mmap.c perf_mmap__aio_free(map, i); map 292 tools/perf/util/mmap.c if (map->aio.data) map 293 tools/perf/util/mmap.c zfree(&map->aio.data); map 294 tools/perf/util/mmap.c zfree(&map->aio.cblocks); map 295 tools/perf/util/mmap.c zfree(&map->aio.aiocb); map 298 tools/perf/util/mmap.c static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) map 303 tools/perf/util/mmap.c static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, map 309 tools/perf/util/mmap.c static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) map 314 tools/perf/util/mmap.c void perf_mmap__munmap(struct mmap *map) map 316 tools/perf/util/mmap.c perf_mmap__aio_munmap(map); map 317 tools/perf/util/mmap.c if (map->data != NULL) { map 318 tools/perf/util/mmap.c munmap(map->data, perf_mmap__mmap_len(map)); map 319 tools/perf/util/mmap.c map->data = NULL; map 321 tools/perf/util/mmap.c if (map->core.base != NULL) { map 322 tools/perf/util/mmap.c munmap(map->core.base, perf_mmap__mmap_len(map)); map 323 tools/perf/util/mmap.c map->core.base = NULL; map 324 tools/perf/util/mmap.c map->core.fd = -1; map 325 tools/perf/util/mmap.c refcount_set(&map->core.refcnt, 0); map 327 tools/perf/util/mmap.c auxtrace_mmap__munmap(&map->auxtrace_mmap); map 341 tools/perf/util/mmap.c cpu = cpu_map->map[c]; /* map c index to online cpu index */ map 347 tools/perf/util/mmap.c static void perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) map 349 tools/perf/util/mmap.c CPU_ZERO(&map->affinity_mask); map 351 tools/perf/util/mmap.c build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); map 353 tools/perf/util/mmap.c CPU_SET(map->core.cpu, &map->affinity_mask); map 356 tools/perf/util/mmap.c int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu) map 371 tools/perf/util/mmap.c refcount_set(&map->core.refcnt, 2); map 372 tools/perf/util/mmap.c map->core.prev = 0; map 373 tools/perf/util/mmap.c map->core.mask = mp->mask; map 374 tools/perf/util/mmap.c map->core.base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, map 376 tools/perf/util/mmap.c if (map->core.base == MAP_FAILED) { map 379 tools/perf/util/mmap.c map->core.base = NULL; map 382 tools/perf/util/mmap.c map->core.fd = fd; map 383 tools/perf/util/mmap.c map->core.cpu = cpu; map 385 tools/perf/util/mmap.c perf_mmap__setup_affinity_mask(map, mp); map 387 tools/perf/util/mmap.c map->core.flush = mp->flush; map 389 tools/perf/util/mmap.c map->comp_level = mp->comp_level; map 391 tools/perf/util/mmap.c if (map->comp_level && !perf_mmap__aio_enabled(map)) { map 392 tools/perf/util/mmap.c map->data = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE, map 394 tools/perf/util/mmap.c if (map->data == MAP_FAILED) { map 397 tools/perf/util/mmap.c map->data = NULL; map 402 tools/perf/util/mmap.c if (auxtrace_mmap__mmap(&map->auxtrace_mmap, map 403 tools/perf/util/mmap.c &mp->auxtrace_mp, map->core.base, fd)) map 406 tools/perf/util/mmap.c return perf_mmap__aio_mmap(map, mp); map 478 tools/perf/util/mmap.c int perf_mmap__read_init(struct mmap *map) map 483 tools/perf/util/mmap.c if (!refcount_read(&map->core.refcnt)) map 486 tools/perf/util/mmap.c return __perf_mmap__read_init(map); map 490 tools/perf/util/mmap.c int push(struct mmap *map, void *to, void *buf, size_t size)) map 536 tools/perf/util/mmap.c void perf_mmap__read_done(struct mmap *map) map 541 tools/perf/util/mmap.c if (!refcount_read(&map->core.refcnt)) map 544 tools/perf/util/mmap.c map->core.prev = perf_mmap__read_head(map); map 44 tools/perf/util/mmap.h int perf_mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu); map 45 tools/perf/util/mmap.h void perf_mmap__munmap(struct mmap *map); map 47 tools/perf/util/mmap.h void perf_mmap__get(struct mmap *map); map 48 tools/perf/util/mmap.h void perf_mmap__put(struct mmap *map); map 50 tools/perf/util/mmap.h void perf_mmap__consume(struct mmap *map); map 62 tools/perf/util/mmap.h union perf_event *perf_mmap__read_forward(struct mmap *map); map 64 tools/perf/util/mmap.h union perf_event *perf_mmap__read_event(struct mmap *map); map 67 tools/perf/util/mmap.h int push(struct mmap *map, void *to, void *buf, size_t size)); map 69 tools/perf/util/mmap.h size_t perf_mmap__mmap_len(struct mmap *map); map 72 tools/perf/util/mmap.h void perf_mmap__read_done(struct mmap *map); map 681 tools/perf/util/pmu.c struct pmu_events_map *map; map 693 tools/perf/util/pmu.c map = &pmu_events_map[i++]; map 694 tools/perf/util/pmu.c if (!map->table) { map 695 tools/perf/util/pmu.c map = NULL; map 699 tools/perf/util/pmu.c if (!strcmp_cpuid_str(map->cpuid, cpuid)) map 703 tools/perf/util/pmu.c return map; map 754 tools/perf/util/pmu.c struct pmu_events_map *map; map 757 tools/perf/util/pmu.c map = perf_pmu__find_map(pmu); map 758 tools/perf/util/pmu.c if (!map) map 767 tools/perf/util/pmu.c struct pmu_event *pe = &map->table[i++]; map 109 tools/perf/util/probe-event.c struct map *map = machine__kernel_map(host_machine); map 111 tools/perf/util/probe-event.c if (map__load(map) < 0) map 114 tools/perf/util/probe-event.c kmap = map__kmap(map); map 125 tools/perf/util/probe-event.c struct map *map; map 132 tools/perf/util/probe-event.c sym = machine__find_kernel_symbol_by_name(host_machine, name, &map); map 135 tools/perf/util/probe-event.c *addr = map->unmap_ip(map, sym->start) - map 136 tools/perf/util/probe-event.c ((reloc) ? 0 : map->reloc) - map 137 tools/perf/util/probe-event.c ((reladdr) ? map->start : 0); map 142 tools/perf/util/probe-event.c static struct map *kernel_get_module_map(const char *module) map 145 tools/perf/util/probe-event.c struct map *pos; map 167 tools/perf/util/probe-event.c struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) map 171 tools/perf/util/probe-event.c struct map *map; map 173 tools/perf/util/probe-event.c map = dso__new_map(target); map 174 tools/perf/util/probe-event.c if (map && map->dso) map 175 tools/perf/util/probe-event.c map->dso->nsinfo = nsinfo__get(nsi); map 176 tools/perf/util/probe-event.c return map; map 316 tools/perf/util/probe-event.c struct map *map; map 324 tools/perf/util/probe-event.c map = map_groups__find_by_name(&host_machine->kmaps, module_name); map 325 tools/perf/util/probe-event.c if (map) { map 326 tools/perf/util/probe-event.c dso = map->dso; map 333 tools/perf/util/probe-event.c map = machine__kernel_map(host_machine); map 334 tools/perf/util/probe-event.c dso = map->dso; map 339 tools/perf/util/probe-event.c ret = dso__load_vmlinux(dso, map, vmlinux_name, false); map 341 tools/perf/util/probe-event.c ret = dso__load_vmlinux_path(dso, map); map 358 tools/perf/util/probe-event.c struct map *map = NULL; map 367 tools/perf/util/probe-event.c map = get_target_map(target, nsi, uprobes); map 368 tools/perf/util/probe-event.c if (!map) map 372 tools/perf/util/probe-event.c map__for_each_symbol_by_name(map, pp->function, sym) { map 376 tools/perf/util/probe-event.c address = map->unmap_ip(map, sym->start) - map->reloc; map 398 tools/perf/util/probe-event.c map__put(map); map 617 tools/perf/util/probe-event.c struct map *map, unsigned long offs) map 622 tools/perf/util/probe-event.c sym = map__find_symbol(map, addr); map 654 tools/perf/util/probe-event.c struct map *map; map 659 tools/perf/util/probe-event.c map = dso__new_map(pathname); map 660 tools/perf/util/probe-event.c if (!map || get_text_start_address(pathname, &stext, NULL) < 0) { map 667 tools/perf/util/probe-event.c map, stext); map 671 tools/perf/util/probe-event.c map__put(map); map 712 tools/perf/util/probe-event.c struct map *map; map 717 tools/perf/util/probe-event.c map = get_target_map(module, NULL, false); map 718 tools/perf/util/probe-event.c if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) { map 726 tools/perf/util/probe-event.c map, (unsigned long)text_offs); map 738 tools/perf/util/probe-event.c map__put(map); map 2100 tools/perf/util/probe-event.c struct map *map = NULL; map 2105 tools/perf/util/probe-event.c map = dso__new_map(tp->module); map 2106 tools/perf/util/probe-event.c if (!map) map 2108 tools/perf/util/probe-event.c sym = map__find_symbol(map, addr); map 2117 tools/perf/util/probe-event.c sym = machine__find_kernel_symbol(host_machine, addr, &map); map 2125 tools/perf/util/probe-event.c pp->offset = addr - map->unmap_ip(map, sym->start); map 2130 tools/perf/util/probe-event.c if (map && !is_kprobe) { map 2131 tools/perf/util/probe-event.c map__put(map); map 2825 tools/perf/util/probe-event.c static int find_probe_functions(struct map *map, char *name, map 2835 tools/perf/util/probe-event.c if (map__load(map) < 0) map 2842 tools/perf/util/probe-event.c map__for_each_symbol(map, sym, tmp) { map 2872 tools/perf/util/probe-event.c struct map *map __maybe_unused, map 2882 tools/perf/util/probe-event.c struct map *map = NULL; map 2893 tools/perf/util/probe-event.c map = get_target_map(pev->target, pev->nsi, pev->uprobes); map 2894 tools/perf/util/probe-event.c if (!map) { map 2909 tools/perf/util/probe-event.c num_matched_functions = find_probe_functions(map, pp->function, syms); map 2960 tools/perf/util/probe-event.c tp->address = map->unmap_ip(map, sym->start) + pp->offset; map 3011 tools/perf/util/probe-event.c arch__fix_tev_from_maps(pev, tev, map, sym); map 3019 tools/perf/util/probe-event.c map__put(map); map 3504 tools/perf/util/probe-event.c struct map *map; map 3512 tools/perf/util/probe-event.c map = get_target_map(target, nsi, user); map 3513 tools/perf/util/probe-event.c if (!map) { map 3518 tools/perf/util/probe-event.c ret = map__load(map); map 3530 tools/perf/util/probe-event.c if (!dso__sorted_by_name(map->dso)) map 3531 tools/perf/util/probe-event.c dso__sort_by_name(map->dso); map 3536 tools/perf/util/probe-event.c for (nd = rb_first_cached(&map->dso->symbol_names); nd; map 3544 tools/perf/util/probe-event.c map__put(map); map 124 tools/perf/util/probe-event.h struct map; map 180 tools/perf/util/probe-event.h struct probe_trace_event *tev, struct map *map, map 192 tools/perf/util/probe-event.h struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user); map 589 tools/perf/util/python.c return Py_BuildValue("i", pcpus->cpus->map[i]); map 658 tools/perf/util/python.c return Py_BuildValue("i", pthreads->threads->map[i]); map 73 tools/perf/util/record.c cpu = cpus->map[0]; map 128 tools/perf/util/record.c cpu = cpus->map[0]; map 154 tools/perf/util/record.c if (evlist->core.cpus->map[0] < 0) map 284 tools/perf/util/record.c cpu = cpus ? cpus->map[0] : 0; map 287 tools/perf/util/record.c cpu = evlist->core.cpus->map[0]; map 138 tools/perf/util/s390-sample-raw.c static const char *get_counter_name(int set, int nr, struct pmu_events_map *map) map 142 tools/perf/util/s390-sample-raw.c if (map) { map 143 tools/perf/util/s390-sample-raw.c struct pmu_event *evp = map->table; map 162 tools/perf/util/s390-sample-raw.c struct pmu_events_map *map; map 167 tools/perf/util/s390-sample-raw.c map = perf_pmu__find_map(&pmu); map 185 tools/perf/util/s390-sample-raw.c const char *ev_name = get_counter_name(ce.set, i, map); map 315 tools/perf/util/scripting-engines/trace-event-perl.c if (node->map) { map 316 tools/perf/util/scripting-engines/trace-event-perl.c struct map *map = node->map; map 318 tools/perf/util/scripting-engines/trace-event-perl.c if (map && map->dso) { map 319 tools/perf/util/scripting-engines/trace-event-perl.c if (symbol_conf.show_kernel_path && map->dso->long_name) map 320 tools/perf/util/scripting-engines/trace-event-perl.c dsoname = map->dso->long_name; map 322 tools/perf/util/scripting-engines/trace-event-perl.c dsoname = map->dso->name; map 380 tools/perf/util/scripting-engines/trace-event-python.c static const char *get_dsoname(struct map *map) map 384 tools/perf/util/scripting-engines/trace-event-python.c if (map && map->dso) { map 385 tools/perf/util/scripting-engines/trace-event-python.c if (symbol_conf.show_kernel_path && map->dso->long_name) map 386 tools/perf/util/scripting-engines/trace-event-python.c dsoname = map->dso->long_name; map 388 tools/perf/util/scripting-engines/trace-event-python.c dsoname = map->dso->name; map 447 tools/perf/util/scripting-engines/trace-event-python.c if (node->map) { map 448 tools/perf/util/scripting-engines/trace-event-python.c const char *dsoname = get_dsoname(node->map); map 503 tools/perf/util/scripting-engines/trace-event-python.c dsoname = get_dsoname(al.map); map 509 tools/perf/util/scripting-engines/trace-event-python.c dsoname = get_dsoname(al.map); map 528 tools/perf/util/scripting-engines/trace-event-python.c offset = al->addr - al->map->start - sym->start; map 770 tools/perf/util/scripting-engines/trace-event-python.c if (al->map) { map 772 tools/perf/util/scripting-engines/trace-event-python.c _PyUnicode_FromString(al->map->dso->name)); map 1407 tools/perf/util/scripting-engines/trace-event-python.c process_stat(counter, cpus->map[cpu], map 2236 tools/perf/util/session.c int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr) map 2258 tools/perf/util/session.c kmap = map__kmap(map); map 2315 tools/perf/util/session.c struct perf_cpu_map *map; map 2332 tools/perf/util/session.c map = perf_cpu_map__new(cpu_list); map 2333 tools/perf/util/session.c if (map == NULL) { map 2338 tools/perf/util/session.c for (i = 0; i < map->nr; i++) { map 2339 tools/perf/util/session.c int cpu = map->map[i]; map 2353 tools/perf/util/session.c perf_cpu_map__put(map); map 168 tools/perf/util/sort.c static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r) map 191 tools/perf/util/sort.c return _sort__dso_cmp(right->ms.map, left->ms.map); map 194 tools/perf/util/sort.c static int _hist_entry__dso_snprintf(struct map *map, char *bf, map 197 tools/perf/util/sort.c if (map && map->dso) { map 198 tools/perf/util/sort.c const char *dso_name = verbose > 0 ? map->dso->long_name : map 199 tools/perf/util/sort.c map->dso->short_name; map 209 tools/perf/util/sort.c return _hist_entry__dso_snprintf(he->ms.map, bf, size, width); map 219 tools/perf/util/sort.c return dso && (!he->ms.map || he->ms.map->dso != dso); map 290 tools/perf/util/sort.c static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, map 297 tools/perf/util/sort.c char o = map ? dso__symtab_origin(map->dso) : '!'; map 303 tools/perf/util/sort.c if (sym && map) { map 307 tools/perf/util/sort.c ip - map->unmap_ip(map, sym->start)); map 328 tools/perf/util/sort.c return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip, map 355 tools/perf/util/sort.c return map__srcline(he->ms.map, he->ip, he->ms.sym); map 389 tools/perf/util/sort.c return map__srcline(ams->map, ams->al_addr, ams->sym); map 502 tools/perf/util/sort.c struct map *map = e->ms.map; map 504 tools/perf/util/sort.c if (!map) map 507 tools/perf/util/sort.c sf = __get_srcline(map->dso, map__rip_2objdump(map, e->ip), map 772 tools/perf/util/sort.c return _sort__dso_cmp(left->branch_info->from.map, map 773 tools/perf/util/sort.c right->branch_info->from.map); map 780 tools/perf/util/sort.c return _hist_entry__dso_snprintf(he->branch_info->from.map, map 794 tools/perf/util/sort.c return dso && (!he->branch_info || !he->branch_info->from.map || map 795 tools/perf/util/sort.c he->branch_info->from.map->dso != dso); map 804 tools/perf/util/sort.c return _sort__dso_cmp(left->branch_info->to.map, map 805 tools/perf/util/sort.c right->branch_info->to.map); map 812 tools/perf/util/sort.c return _hist_entry__dso_snprintf(he->branch_info->to.map, map 826 tools/perf/util/sort.c return dso && (!he->branch_info || !he->branch_info->to.map || map 827 tools/perf/util/sort.c he->branch_info->to.map->dso != dso); map 871 tools/perf/util/sort.c return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, map 884 tools/perf/util/sort.c return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, map 1020 tools/perf/util/sort.c struct map *map = NULL; map 1025 tools/perf/util/sort.c map = he->mem_info->daddr.map; map 1028 tools/perf/util/sort.c return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, map 1049 tools/perf/util/sort.c struct map *map = NULL; map 1054 tools/perf/util/sort.c map = he->mem_info->iaddr.map; map 1057 tools/perf/util/sort.c return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size, map 1064 tools/perf/util/sort.c struct map *map_l = NULL; map 1065 tools/perf/util/sort.c struct map *map_r = NULL; map 1068 tools/perf/util/sort.c map_l = left->mem_info->daddr.map; map 1070 tools/perf/util/sort.c map_r = right->mem_info->daddr.map; map 1078 tools/perf/util/sort.c struct map *map = NULL; map 1081 tools/perf/util/sort.c map = he->mem_info->daddr.map; map 1083 tools/perf/util/sort.c return _hist_entry__dso_snprintf(map, bf, size, width); map 1202 tools/perf/util/sort.c struct map *l_map, *r_map; map 1211 tools/perf/util/sort.c l_map = left->mem_info->daddr.map; map 1212 tools/perf/util/sort.c r_map = right->mem_info->daddr.map; map 1267 tools/perf/util/sort.c struct map *map = NULL; map 1273 tools/perf/util/sort.c map = he->mem_info->daddr.map; map 1278 tools/perf/util/sort.c map && !(map->prot & PROT_EXEC) && map 1279 tools/perf/util/sort.c (map->flags & MAP_SHARED) && map 1280 tools/perf/util/sort.c (map->maj || map->min || map->ino || map 1281 tools/perf/util/sort.c map->ino_generation)) map 1283 tools/perf/util/sort.c else if (!map) map 1286 tools/perf/util/sort.c return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size, map 1614 tools/perf/util/sort.c static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r) map 1626 tools/perf/util/sort.c return _sort__dso_size_cmp(right->ms.map, left->ms.map); map 1629 tools/perf/util/sort.c static int _hist_entry__dso_size_snprintf(struct map *map, char *bf, map 1632 tools/perf/util/sort.c if (map && map->dso) map 1634 tools/perf/util/sort.c map__size(map)); map 1642 tools/perf/util/sort.c return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width); map 29 tools/perf/util/srccode.c char *map; map 47 tools/perf/util/srccode.c static int countlines(char *map, int maplen) map 50 tools/perf/util/srccode.c char *end = map + maplen; map 51 tools/perf/util/srccode.c char *p = map; map 65 tools/perf/util/srccode.c static void fill_lines(char **lines, int maxline, char *map, int maplen) map 68 tools/perf/util/srccode.c char *end = map + maplen; map 69 tools/perf/util/srccode.c char *p = map; map 74 tools/perf/util/srccode.c lines[l++] = map; map 89 tools/perf/util/srccode.c munmap(sf->map, sf->maplen); map 137 tools/perf/util/srccode.c h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); map 139 tools/perf/util/srccode.c if (h->map == (char *)-1) { map 143 tools/perf/util/srccode.c h->numlines = countlines(h->map, h->maplen); map 147 tools/perf/util/srccode.c fill_lines(h->lines, h->numlines, h->map, h->maplen); map 155 tools/perf/util/srccode.c munmap(h->map, sz); map 176 tools/perf/util/srccode.c p = memchr(l, '\n', sf->map + sf->maplen - l); map 113 tools/perf/util/stat-display.c evsel__cpus(evsel)->map[id], map 329 tools/perf/util/stat-display.c int cpu2 = evsel__cpus(evsel)->map[i]; map 500 tools/perf/util/stat-display.c id = config->aggr_map->map[s]; map 630 tools/perf/util/stat-display.c ad.id = id = config->aggr_map->map[s]; map 699 tools/perf/util/svghelper.c static void scan_thread_topology(int *map, struct topology *t, int cpu, map 710 tools/perf/util/svghelper.c if (map[thr] == -1) map 711 tools/perf/util/svghelper.c map[thr] = (*pos)++; map 715 tools/perf/util/svghelper.c static void scan_core_topology(int *map, struct topology *t, int nr_cpus) map 723 tools/perf/util/svghelper.c scan_thread_topology(map, t, cpu, &pos, nr_cpus); map 738 tools/perf/util/svghelper.c c = m->map[i]; map 845 tools/perf/util/symbol-elf.c static int dso__process_kernel_symbol(struct dso *dso, struct map *map, map 848 tools/perf/util/symbol-elf.c struct dso **curr_dsop, struct map **curr_mapp, map 853 tools/perf/util/symbol-elf.c struct map *curr_map; map 871 tools/perf/util/symbol-elf.c map->start = shdr->sh_addr + ref_reloc(kmap); map 872 tools/perf/util/symbol-elf.c map->end = map->start + shdr->sh_size; map 873 tools/perf/util/symbol-elf.c map->pgoff = shdr->sh_offset; map 874 tools/perf/util/symbol-elf.c map->map_ip = map__map_ip; map 875 tools/perf/util/symbol-elf.c map->unmap_ip = map__unmap_ip; map 878 tools/perf/util/symbol-elf.c map__get(map); map 879 tools/perf/util/symbol-elf.c map_groups__remove(kmaps, map); map 880 tools/perf/util/symbol-elf.c map_groups__insert(kmaps, map); map 881 tools/perf/util/symbol-elf.c map__put(map); map 892 tools/perf/util/symbol-elf.c map->pgoff = shdr->sh_offset; map 895 tools/perf/util/symbol-elf.c *curr_mapp = map; map 910 tools/perf/util/symbol-elf.c start += map->start + shdr->sh_offset; map 937 tools/perf/util/symbol-elf.c dsos__add(&map->groups->machine->dsos, curr_dso); map 949 tools/perf/util/symbol-elf.c int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, map 952 tools/perf/util/symbol-elf.c struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; map 953 tools/perf/util/symbol-elf.c struct map_groups *kmaps = kmap ? map__kmaps(map) : NULL; map 954 tools/perf/util/symbol-elf.c struct map *curr_map = map; map 1044 tools/perf/util/symbol-elf.c map->reloc = kmap->ref_reloc_sym->addr - map 1055 tools/perf/util/symbol-elf.c map->reloc = map->start - dso->text_offset; map 1125 tools/perf/util/symbol-elf.c if (dso__process_kernel_symbol(dso, map, &sym, &shdr, kmaps, kmap, &curr_dso, &curr_map, map 337 tools/perf/util/symbol-minimal.c int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, map 44 tools/perf/util/symbol.c static int dso__load_kernel_sym(struct dso *dso, struct map *map); map 45 tools/perf/util/symbol.c static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map); map 245 tools/perf/util/symbol.c struct map *next, *curr; map 707 tools/perf/util/symbol.c struct map *curr_map; map 759 tools/perf/util/symbol.c struct map *initial_map) map 762 tools/perf/util/symbol.c struct map *curr_map = initial_map; map 1056 tools/perf/util/symbol.c struct map *map_groups__first(struct map_groups *mg) map 1065 tools/perf/util/symbol.c struct map *old_map; map 1074 tools/perf/util/symbol.c struct map *next = map_groups__next(old_map); map 1122 tools/perf/util/symbol.c struct map *map) map 1124 tools/perf/util/symbol.c struct map_groups *kmaps = map__kmaps(map); map 1141 tools/perf/util/symbol.c struct map *map) map 1143 tools/perf/util/symbol.c struct kmap *kmap = map__kmap(map); map 1158 tools/perf/util/symbol.c return validate_kcore_modules(kallsyms_filename, map); map 1169 tools/perf/util/symbol.c struct map *map; map 1171 tools/perf/util/symbol.c map = map__new2(start, md->dso); map 1172 tools/perf/util/symbol.c if (map == NULL) map 1175 tools/perf/util/symbol.c map->end = map->start + len; map 1176 tools/perf/util/symbol.c map->pgoff = pgoff; map 1178 tools/perf/util/symbol.c list_add(&map->node, &md->maps); map 1187 tools/perf/util/symbol.c int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) map 1189 tools/perf/util/symbol.c struct map *old_map; map 1216 tools/perf/util/symbol.c struct map *m = map__clone(new_map); map 1249 tools/perf/util/symbol.c old_map = list_entry(merged.next, struct map, node); map 1262 tools/perf/util/symbol.c static int dso__load_kcore(struct dso *dso, struct map *map, map 1265 tools/perf/util/symbol.c struct map_groups *kmaps = map__kmaps(map); map 1267 tools/perf/util/symbol.c struct map *old_map, *new_map, *replacement_map = NULL; map 1280 tools/perf/util/symbol.c if (!__map__is_kernel(map)) map 1288 tools/perf/util/symbol.c if (validate_kcore_addresses(kallsyms_filename, map)) map 1302 tools/perf/util/symbol.c err = file__read_maps(fd, map->prot & PROT_EXEC, kcore_mapfn, &md, map 1316 tools/perf/util/symbol.c struct map *next = map_groups__next(old_map); map 1323 tools/perf/util/symbol.c if (old_map != map && !__map__is_bpf_prog(old_map)) map 1340 tools/perf/util/symbol.c replacement_map = list_entry(md.maps.next, struct map, node); map 1344 tools/perf/util/symbol.c new_map = list_entry(md.maps.next, struct map, node); map 1347 tools/perf/util/symbol.c map->start = new_map->start; map 1348 tools/perf/util/symbol.c map->end = new_map->end; map 1349 tools/perf/util/symbol.c map->pgoff = new_map->pgoff; map 1350 tools/perf/util/symbol.c map->map_ip = new_map->map_ip; map 1351 tools/perf/util/symbol.c map->unmap_ip = new_map->unmap_ip; map 1353 tools/perf/util/symbol.c map__get(map); map 1354 tools/perf/util/symbol.c map_groups__remove(kmaps, map); map 1355 tools/perf/util/symbol.c map_groups__insert(kmaps, map); map 1356 tools/perf/util/symbol.c map__put(map); map 1394 tools/perf/util/symbol.c if (map->prot & PROT_EXEC) map 1403 tools/perf/util/symbol.c map = list_entry(md.maps.next, struct map, node); map 1404 tools/perf/util/symbol.c list_del_init(&map->node); map 1405 tools/perf/util/symbol.c map__put(map); map 1430 tools/perf/util/symbol.c struct map *map, bool no_kcore) map 1432 tools/perf/util/symbol.c struct kmap *kmap = map__kmap(map); map 1455 tools/perf/util/symbol.c if (!no_kcore && !dso__load_kcore(dso, map, filename)) map 1458 tools/perf/util/symbol.c return map_groups__split_kallsyms(kmap->kmaps, dso, delta, map); map 1462 tools/perf/util/symbol.c struct map *map) map 1464 tools/perf/util/symbol.c return __dso__load_kallsyms(dso, filename, map, false); map 1606 tools/perf/util/symbol.c int dso__load(struct dso *dso, struct map *map) map 1640 tools/perf/util/symbol.c if (map->groups && map->groups->machine) map 1641 tools/perf/util/symbol.c machine = map->groups->machine; map 1647 tools/perf/util/symbol.c ret = dso__load_kernel_sym(dso, map); map 1649 tools/perf/util/symbol.c ret = dso__load_guest_kernel_sym(dso, map); map 1761 tools/perf/util/symbol.c ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod); map 1787 tools/perf/util/symbol.c struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) map 1790 tools/perf/util/symbol.c struct map *map; map 1798 tools/perf/util/symbol.c map = rb_entry(node, struct map, rb_node_name); map 1800 tools/perf/util/symbol.c rc = strcmp(map->dso->short_name, name); map 1810 tools/perf/util/symbol.c map = NULL; map 1814 tools/perf/util/symbol.c return map; map 1817 tools/perf/util/symbol.c int dso__load_vmlinux(struct dso *dso, struct map *map, map 1838 tools/perf/util/symbol.c err = dso__load_sym(dso, map, &ss, &ss, 0); map 1854 tools/perf/util/symbol.c int dso__load_vmlinux_path(struct dso *dso, struct map *map) map 1863 tools/perf/util/symbol.c err = dso__load_vmlinux(dso, map, vmlinux_path[i], false); map 1871 tools/perf/util/symbol.c err = dso__load_vmlinux(dso, map, filename, true); map 1887 tools/perf/util/symbol.c static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) map 1901 tools/perf/util/symbol.c if (!validate_kcore_addresses(kallsyms_filename, map)) { map 1927 tools/perf/util/symbol.c static char *dso__find_kallsyms(struct dso *dso, struct map *map) map 1956 tools/perf/util/symbol.c !validate_kcore_addresses("/proc/kallsyms", map)) map 1966 tools/perf/util/symbol.c if (!find_matching_kcore(map, path, sizeof(path))) map 1985 tools/perf/util/symbol.c static int dso__load_kernel_sym(struct dso *dso, struct map *map) map 2011 tools/perf/util/symbol.c return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false); map 2015 tools/perf/util/symbol.c err = dso__load_vmlinux_path(dso, map); map 2024 tools/perf/util/symbol.c kallsyms_allocated_filename = dso__find_kallsyms(dso, map); map 2031 tools/perf/util/symbol.c err = dso__load_kallsyms(dso, kallsyms_filename, map); map 2039 tools/perf/util/symbol.c map__fixup_start(map); map 2040 tools/perf/util/symbol.c map__fixup_end(map); map 2046 tools/perf/util/symbol.c static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) map 2053 tools/perf/util/symbol.c if (!map->groups) { map 2057 tools/perf/util/symbol.c machine = map->groups->machine; map 2066 tools/perf/util/symbol.c err = dso__load_vmlinux(dso, map, map 2080 tools/perf/util/symbol.c err = dso__load_kallsyms(dso, kallsyms_filename, map); map 2086 tools/perf/util/symbol.c map__fixup_start(map); map 2087 tools/perf/util/symbol.c map__fixup_end(map); map 22 tools/perf/util/symbol.h struct map; map 122 tools/perf/util/symbol.h struct map *map; map 133 tools/perf/util/symbol.h int dso__load(struct dso *dso, struct map *map); map 134 tools/perf/util/symbol.h int dso__load_vmlinux(struct dso *dso, struct map *map, map 136 tools/perf/util/symbol.h int dso__load_vmlinux_path(struct dso *dso, struct map *map); map 137 tools/perf/util/symbol.h int __dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, map 139 tools/perf/util/symbol.h int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map); map 188 tools/perf/util/symbol.h int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, map 33 tools/perf/util/symbol_fprintf.c offset = al->addr - al->map->start - sym->start; map 420 tools/perf/util/synthetic-events.c struct map *pos; map 818 tools/perf/util/synthetic-events.c struct map *map = machine__kernel_map(machine); map 823 tools/perf/util/synthetic-events.c if (map == NULL) map 826 tools/perf/util/synthetic-events.c kmap = map__kmap(map); map 859 tools/perf/util/synthetic-events.c event->mmap.start = map->start; map 860 tools/perf/util/synthetic-events.c event->mmap.len = map->end - event->mmap.start; map 919 tools/perf/util/synthetic-events.c struct perf_cpu_map *map) map 923 tools/perf/util/synthetic-events.c cpus->nr = map->nr; map 925 tools/perf/util/synthetic-events.c for (i = 0; i < map->nr; i++) map 926 tools/perf/util/synthetic-events.c cpus->cpu[i] = map->map[i]; map 930 tools/perf/util/synthetic-events.c struct perf_cpu_map *map, int max) map 937 tools/perf/util/synthetic-events.c for (i = 0; i < map->nr; i++) map 938 tools/perf/util/synthetic-events.c set_bit(map->map[i], mask->mask); map 941 tools/perf/util/synthetic-events.c static size_t cpus_size(struct perf_cpu_map *map) map 943 tools/perf/util/synthetic-events.c return sizeof(struct cpu_map_entries) + map->nr * sizeof(u16); map 946 tools/perf/util/synthetic-events.c static size_t mask_size(struct perf_cpu_map *map, int *max) map 952 tools/perf/util/synthetic-events.c for (i = 0; i < map->nr; i++) { map 954 tools/perf/util/synthetic-events.c int bit = map->map[i] + 1; map 963 tools/perf/util/synthetic-events.c void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int *max) map 966 tools/perf/util/synthetic-events.c bool is_dummy = perf_cpu_map__empty(map); map 981 tools/perf/util/synthetic-events.c size_cpus = cpus_size(map); map 982 tools/perf/util/synthetic-events.c size_mask = mask_size(map, max); map 997 tools/perf/util/synthetic-events.c void cpu_map_data__synthesize(struct perf_record_cpu_map_data *data, struct perf_cpu_map *map, map 1004 tools/perf/util/synthetic-events.c synthesize_cpus((struct cpu_map_entries *) data->data, map); map 1007 tools/perf/util/synthetic-events.c synthesize_mask((struct perf_record_record_cpu_map *)data->data, map, max); map 1013 tools/perf/util/synthetic-events.c static struct perf_record_cpu_map *cpu_map_event__new(struct perf_cpu_map *map) map 1020 tools/perf/util/synthetic-events.c event = cpu_map_data__alloc(map, &size, &type, &max); map 1028 tools/perf/util/synthetic-events.c cpu_map_data__synthesize(&event->data, map, type, max); map 1033 tools/perf/util/synthetic-events.c struct perf_cpu_map *map, map 1040 tools/perf/util/synthetic-events.c event = cpu_map_event__new(map); map 330 tools/perf/util/thread.c int thread__insert_map(struct thread *thread, struct map *map) map 334 tools/perf/util/thread.c ret = unwind__prepare_access(thread->mg, map, NULL); map 338 tools/perf/util/thread.c map_groups__fixup_overlappings(thread->mg, map, stderr); map 339 tools/perf/util/thread.c map_groups__insert(thread->mg, map); map 349 tools/perf/util/thread.c struct map *map; map 353 tools/perf/util/thread.c for (map = maps__first(maps); map; map = map__next(map)) { map 354 tools/perf/util/thread.c err = unwind__prepare_access(thread->mg, map, &initialized); map 420 tools/perf/util/thread.c if (al->map) map 446 tools/perf/util/thread.c if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso || map 447 tools/perf/util/thread.c al.map->dso->data.status == DSO_DATA_STATUS_ERROR || map 448 tools/perf/util/thread.c map__load(al.map) < 0) map 451 tools/perf/util/thread.c offset = al.map->map_ip(al.map, ip); map 453 tools/perf/util/thread.c *is64bit = al.map->dso->is_64_bit; map 455 tools/perf/util/thread.c return dso__data_read_offset(al.map->dso, machine, offset, buf, len); map 18 tools/perf/util/thread.h struct map; map 93 tools/perf/util/thread.h int thread__insert_map(struct thread *thread, struct map *map); map 99 tools/perf/util/thread.h struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, map 101 tools/perf/util/thread.h struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr, map 342 tools/perf/util/thread_map.c static void comm_init(struct perf_thread_map *map, int i) map 344 tools/perf/util/thread_map.c pid_t pid = perf_thread_map__pid(map, i); map 349 tools/perf/util/thread_map.c map->map[i].comm = strdup("dummy"); map 360 tools/perf/util/thread_map.c map->map[i].comm = comm; map 380 tools/perf/util/thread_map.c threads->map[i].comm = strndup(event->entries[i].comm, 16); map 402 tools/perf/util/thread_map.c if (threads->map[i].pid == pid) map 422 tools/perf/util/thread_map.c zfree(&threads->map[idx].comm); map 425 tools/perf/util/thread_map.c threads->map[i] = threads->map[i + 1]; map 40 tools/perf/util/unwind-libdw.c if (al->map) map 41 tools/perf/util/unwind-libdw.c dso = al->map->dso; map 51 tools/perf/util/unwind-libdw.c if (s != al->map->start - al->map->pgoff) map 57 tools/perf/util/unwind-libdw.c (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff, map 84 tools/perf/util/unwind-libdw.c e->map = al.map; map 90 tools/perf/util/unwind-libdw.c al.map ? al.map->map_ip(al.map, ip) : (u64) 0); map 115 tools/perf/util/unwind-libdw.c if (!al.map->dso) map 118 tools/perf/util/unwind-libdw.c size = dso__data_read_addr(al.map->dso, al.map, ui->machine, map 367 tools/perf/util/unwind-libunwind-local.c static struct map *find_map(unw_word_t ip, struct unwind_info *ui) map 378 tools/perf/util/unwind-libunwind-local.c struct map *map; map 383 tools/perf/util/unwind-libunwind-local.c map = find_map(ip, ui); map 384 tools/perf/util/unwind-libunwind-local.c if (!map || !map->dso) map 387 tools/perf/util/unwind-libunwind-local.c pr_debug("unwind: find_proc_info dso %s\n", map->dso->name); map 390 tools/perf/util/unwind-libunwind-local.c if (!read_unwind_spec_eh_frame(map->dso, ui->machine, map 394 tools/perf/util/unwind-libunwind-local.c di.start_ip = map->start; map 395 tools/perf/util/unwind-libunwind-local.c di.end_ip = map->end; map 396 tools/perf/util/unwind-libunwind-local.c di.u.rti.segbase = map->start + segbase - map->pgoff; map 397 tools/perf/util/unwind-libunwind-local.c di.u.rti.table_data = map->start + table_data - map->pgoff; map 407 tools/perf/util/unwind-libunwind-local.c !read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { map 408 tools/perf/util/unwind-libunwind-local.c int fd = dso__data_get_fd(map->dso, ui->machine); map 409 tools/perf/util/unwind-libunwind-local.c int is_exec = elf_is_exec(fd, map->dso->name); map 410 tools/perf/util/unwind-libunwind-local.c unw_word_t base = is_exec ? 0 : map->start; map 414 tools/perf/util/unwind-libunwind-local.c dso__data_put_fd(map->dso); map 416 tools/perf/util/unwind-libunwind-local.c symfile = map->dso->symsrc_filename ?: map->dso->name; map 420 tools/perf/util/unwind-libunwind-local.c map->start, map->end)) map 467 tools/perf/util/unwind-libunwind-local.c struct map *map; map 470 tools/perf/util/unwind-libunwind-local.c map = find_map(addr, ui); map 471 tools/perf/util/unwind-libunwind-local.c if (!map) { map 476 tools/perf/util/unwind-libunwind-local.c if (!map->dso) map 479 tools/perf/util/unwind-libunwind-local.c size = dso__data_read_addr(map->dso, map, ui->machine, map 580 tools/perf/util/unwind-libunwind-local.c e.map = al.map; map 585 tools/perf/util/unwind-libunwind-local.c al.map ? al.map->map_ip(al.map, ip) : (u64) 0); map 21 tools/perf/util/unwind-libunwind.c int unwind__prepare_access(struct map_groups *mg, struct map *map, map 34 tools/perf/util/unwind-libunwind.c map->dso->name); map 44 tools/perf/util/unwind-libunwind.c dso_type = dso__type(map->dso, mg->machine); map 8 tools/perf/util/unwind.h struct map; map 15 tools/perf/util/unwind.h struct map *map; map 50 tools/perf/util/unwind.h int unwind__prepare_access(struct map_groups *mg, struct map *map, map 56 tools/perf/util/unwind.h struct map *map __maybe_unused, map 77 tools/perf/util/unwind.h struct map *map __maybe_unused, map 145 tools/perf/util/vdso.c struct map *map = map_groups__first(thread->mg); map 147 tools/perf/util/vdso.c for (; map ; map = map_groups__next(map)) { map 148 tools/perf/util/vdso.c struct dso *dso = map->dso; map 356 tools/power/x86/intel-speed-select/isst-config.c struct isst_if_cpu_maps map; map 370 tools/power/x86/intel-speed-select/isst-config.c map.cmd_count = 1; map 371 tools/power/x86/intel-speed-select/isst-config.c map.cpu_map[0].logical_cpu = i; map 374 tools/power/x86/intel-speed-select/isst-config.c map.cpu_map[0].logical_cpu); map 375 tools/power/x86/intel-speed-select/isst-config.c if (ioctl(fd, ISST_IF_GET_PHY_ID, &map) == -1) { map 378 tools/power/x86/intel-speed-select/isst-config.c map.cpu_map[0].logical_cpu); map 384 tools/power/x86/intel-speed-select/isst-config.c cpu_map[i].punit_cpu = map.cpu_map[0].physical_cpu; map 385 tools/power/x86/intel-speed-select/isst-config.c cpu_map[i].punit_cpu_core = (map.cpu_map[0].physical_cpu >> map 2635 tools/power/x86/turbostat/turbostat.c unsigned long map; map 2656 tools/power/x86/turbostat/turbostat.c if (fscanf(filep, "%lx%c", &map, &character) != 2) map 2659 tools/power/x86/turbostat/turbostat.c if ((map >> shift) & 0x1) { map 5 tools/testing/radix-tree/bitmap.c void bitmap_clear(unsigned long *map, unsigned int start, int len) map 7 tools/testing/radix-tree/bitmap.c unsigned long *p = map + BIT_WORD(start); map 25 tools/testing/selftests/bpf/bpf_helpers.h static void *(*bpf_map_lookup_elem)(void *map, const void *key) = map 27 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_map_update_elem)(void *map, const void *key, const void *value, map 30 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_map_delete_elem)(void *map, const void *key) = map 32 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_map_push_elem)(void *map, const void *value, map 35 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_map_pop_elem)(void *map, void *value) = map 37 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_map_peek_elem)(void *map, void *value) = map 45 tools/testing/selftests/bpf/bpf_helpers.h static void (*bpf_tail_call)(void *ctx, void *map, int index) = map 55 tools/testing/selftests/bpf/bpf_helpers.h static unsigned long long (*bpf_perf_event_read)(void *map, map 62 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_redirect_map)(void *map, int key, int flags) = map 64 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_perf_event_output)(void *ctx, void *map, map 68 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_get_stackid)(void *ctx, void *map, int flags) = map 72 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_current_task_under_cgroup)(void *map, int index) = map 98 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = map 100 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_redirect_hash)(void *ctx, void *map, void *key, int flags) = map 102 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sock_map_update)(void *map, void *key, void *value, map 105 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sock_hash_update)(void *map, void *key, void *value, map 108 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_perf_event_read_value)(void *map, unsigned long long flags, map 116 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_msg_redirect_map)(void *ctx, void *map, int key, int flags) = map 119 tools/testing/selftests/bpf/bpf_helpers.h void *map, void *key, int flags) = map 138 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_select_reuseport)(void *ctx, void *map, void *key, __u32 flags) = map 164 tools/testing/selftests/bpf/bpf_helpers.h static void *(*bpf_get_local_storage)(void *map, unsigned long long flags) = map 227 tools/testing/selftests/bpf/bpf_helpers.h static void *(*bpf_sk_storage_get)(void *map, struct bpf_sock *sk, map 230 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_sk_storage_delete)(void *map, struct bpf_sock *sk) = map 288 tools/testing/selftests/bpf/bpf_helpers.h static int (*bpf_skb_under_cgroup)(void *ctx, void *map, int index) = map 38 tools/testing/selftests/bpf/get_cgroup_id_user.c struct bpf_map *map; map 40 tools/testing/selftests/bpf/get_cgroup_id_user.c map = bpf_object__find_map_by_name(obj, name); map 41 tools/testing/selftests/bpf/get_cgroup_id_user.c if (!map) map 43 tools/testing/selftests/bpf/get_cgroup_id_user.c return bpf_map__fd(map); map 94 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c struct bpf_map *map; map 109 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c map = bpf_object__find_map_by_name(obj, "perfmap"); map 110 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c if (CHECK(!map, "bpf_find_map", "not found\n")) map 128 tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts); map 105 tools/testing/selftests/bpf/prog_tests/global_data.c struct bpf_map *map; map 108 tools/testing/selftests/bpf/prog_tests/global_data.c map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); map 109 tools/testing/selftests/bpf/prog_tests/global_data.c if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) map 112 tools/testing/selftests/bpf/prog_tests/global_data.c map_fd = bpf_map__fd(map); map 116 tools/testing/selftests/bpf/prog_tests/global_data.c buff = malloc(bpf_map__def(map)->value_size); map 50 tools/testing/selftests/bpf/prog_tests/sockmap_basic.c int s, map, err; map 56 tools/testing/selftests/bpf/prog_tests/sockmap_basic.c map = bpf_create_map(map_type, sizeof(int), sizeof(int), 1, 0); map 57 tools/testing/selftests/bpf/prog_tests/sockmap_basic.c if (CHECK_FAIL(map == -1)) { map 62 tools/testing/selftests/bpf/prog_tests/sockmap_basic.c err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST); map 69 tools/testing/selftests/bpf/prog_tests/sockmap_basic.c close(map); map 127 tools/testing/selftests/bpf/prog_tests/tcp_rtt.c struct bpf_map *map; map 139 tools/testing/selftests/bpf/prog_tests/tcp_rtt.c map = bpf_map__next(NULL, obj); map 140 tools/testing/selftests/bpf/prog_tests/tcp_rtt.c map_fd = bpf_map__fd(map); map 43 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c __u8 sk, map; map 48 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c map = d[0]; map 60 tools/testing/selftests/bpf/progs/sockmap_verdict_prog.c if (!map) map 382 tools/testing/selftests/bpf/progs/strobemeta.h struct strobe_map_raw map; map 395 tools/testing/selftests/bpf/progs/strobemeta.h if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr)) map 398 tools/testing/selftests/bpf/progs/strobemeta.h descr->id = map.id; map 399 tools/testing/selftests/bpf/progs/strobemeta.h descr->cnt = map.cnt; map 401 tools/testing/selftests/bpf/progs/strobemeta.h data->req_id = map.id; map 405 tools/testing/selftests/bpf/progs/strobemeta.h len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag); map 417 tools/testing/selftests/bpf/progs/strobemeta.h if (i >= map.cnt) map 422 tools/testing/selftests/bpf/progs/strobemeta.h map.entries[i].key); map 429 tools/testing/selftests/bpf/progs/strobemeta.h map.entries[i].val); map 65 tools/testing/selftests/bpf/progs/test_global_data.c #define test_reloc(map, num, var) \ map 68 tools/testing/selftests/bpf/progs/test_global_data.c bpf_map_update_elem(&result_##map, &key, var, 0); \ map 32 tools/testing/selftests/bpf/progs/test_map_in_map.c void *map; map 34 tools/testing/selftests/bpf/progs/test_map_in_map.c map = bpf_map_lookup_elem(&mim_array, &key); map 35 tools/testing/selftests/bpf/progs/test_map_in_map.c if (!map) map 38 tools/testing/selftests/bpf/progs/test_map_in_map.c bpf_map_update_elem(map, &key, &value, 0); map 39 tools/testing/selftests/bpf/progs/test_map_in_map.c value_p = bpf_map_lookup_elem(map, &key); map 43 tools/testing/selftests/bpf/progs/test_map_in_map.c map = bpf_map_lookup_elem(&mim_hash, &key); map 44 tools/testing/selftests/bpf/progs/test_map_in_map.c if (!map) map 47 tools/testing/selftests/bpf/progs/test_map_in_map.c bpf_map_update_elem(map, &key, &value, 0); map 4127 tools/testing/selftests/bpf/test_btf.c struct bpf_map *map; map 4167 tools/testing/selftests/bpf/test_btf.c map = bpf_object__find_map_by_name(obj, "btf_map"); map 4168 tools/testing/selftests/bpf/test_btf.c if (CHECK(!map, "btf_map not found")) { map 4173 tools/testing/selftests/bpf/test_btf.c err = (bpf_map__btf_key_type_id(map) == 0 || bpf_map__btf_value_type_id(map) == 0) map 4176 tools/testing/selftests/bpf/test_btf.c bpf_map__btf_key_type_id(map), bpf_map__btf_value_type_id(map), map 57 tools/testing/selftests/bpf/test_hashmap.c struct hashmap *map; map 61 tools/testing/selftests/bpf/test_hashmap.c map = hashmap__new(hash_fn, equal_fn, NULL); map 62 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) map 69 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__update(map, k, v, &oldk, &oldv); map 74 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__add(map, k, v); map 76 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__set(map, k, v, &oldk, &oldv); map 86 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(!hashmap__find(map, k, &oldv), map 93 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != ELEM_CNT, map 94 tools/testing/selftests/bpf/test_hashmap.c "invalid map size: %zu\n", hashmap__size(map))) map 96 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), map 97 tools/testing/selftests/bpf/test_hashmap.c "unexpected map capacity: %zu\n", hashmap__capacity(map))) map 101 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry(map, entry, bkt) { map 117 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__add(map, k, v); map 122 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__update(map, k, v, &oldk, &oldv); map 124 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__set(map, k, v, &oldk, &oldv); map 129 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(!hashmap__find(map, k, &oldv), map 136 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != ELEM_CNT, map 137 tools/testing/selftests/bpf/test_hashmap.c "invalid updated map size: %zu\n", hashmap__size(map))) map 139 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), map 140 tools/testing/selftests/bpf/test_hashmap.c "unexpected map capacity: %zu\n", hashmap__capacity(map))) map 144 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry_safe(map, entry, tmp, bkt) { map 158 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_key_entry(map, entry, (void *)0) { map 166 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { map 176 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), map 184 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__delete(map, k, &oldk, &oldv), map 193 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != ELEM_CNT - found_cnt, map 195 tools/testing/selftests/bpf/test_hashmap.c found_cnt, hashmap__size(map))) map 197 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__capacity(map) != exp_cap(hashmap__size(map)), map 198 tools/testing/selftests/bpf/test_hashmap.c "unexpected map capacity: %zu\n", hashmap__capacity(map))) map 201 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry_safe(map, entry, tmp, bkt) { map 211 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), map 219 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__delete(map, k, &oldk, &oldv), map 229 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != 0, map 231 tools/testing/selftests/bpf/test_hashmap.c found_cnt, hashmap__size(map))) map 235 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry(map, entry, bkt) { map 241 tools/testing/selftests/bpf/test_hashmap.c hashmap__free(map); map 242 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry(map, entry, bkt) { map 261 tools/testing/selftests/bpf/test_hashmap.c struct hashmap *map; map 268 tools/testing/selftests/bpf/test_hashmap.c map = hashmap__new(collision_hash_fn, equal_fn, NULL); map 269 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) map 277 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k1, (void *)1); map 280 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k1, (void *)2); map 283 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k1, (void *)4); map 287 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k2, (void *)8); map 290 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k2, (void *)16); map 293 tools/testing/selftests/bpf/test_hashmap.c err = hashmap__append(map, k2, (void *)32); map 297 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != 6, map 298 tools/testing/selftests/bpf/test_hashmap.c "invalid map size: %zu\n", hashmap__size(map))) map 303 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry(map, entry, bkt) { map 312 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_key_entry(map, entry, k1) { map 321 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_key_entry(map, entry, k2) { map 336 tools/testing/selftests/bpf/test_hashmap.c struct hashmap *map; map 342 tools/testing/selftests/bpf/test_hashmap.c map = hashmap__new(hash_fn, equal_fn, NULL); map 343 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(IS_ERR(map), "failed to create map: %ld\n", PTR_ERR(map))) map 346 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__size(map) != 0, map 347 tools/testing/selftests/bpf/test_hashmap.c "invalid map size: %zu\n", hashmap__size(map))) map 349 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__capacity(map) != 0, map 350 tools/testing/selftests/bpf/test_hashmap.c "invalid map capacity: %zu\n", hashmap__capacity(map))) map 352 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__find(map, k, NULL), "unexpected find\n")) map 354 tools/testing/selftests/bpf/test_hashmap.c if (CHECK(hashmap__delete(map, k, NULL, NULL), "unexpected delete\n")) map 357 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_entry(map, entry, bkt) { map 361 tools/testing/selftests/bpf/test_hashmap.c hashmap__for_each_key_entry(map, entry, k) { map 69 tools/testing/selftests/bpf/test_libbpf_open.c struct bpf_map *map; map 72 tools/testing/selftests/bpf/test_libbpf_open.c bpf_object__for_each_map(map, obj) { map 76 tools/testing/selftests/bpf/test_libbpf_open.c bpf_map__name(map)); map 215 tools/testing/selftests/bpf/test_lpm_map.c int r, map; map 236 tools/testing/selftests/bpf/test_lpm_map.c map = bpf_create_map(BPF_MAP_TYPE_LPM_TRIE, map 241 tools/testing/selftests/bpf/test_lpm_map.c assert(map >= 0); map 252 tools/testing/selftests/bpf/test_lpm_map.c r = bpf_map_update_elem(map, key, value, 0); map 264 tools/testing/selftests/bpf/test_lpm_map.c r = bpf_map_lookup_elem(map, key, value); map 288 tools/testing/selftests/bpf/test_lpm_map.c r = bpf_map_delete_elem(map, key); map 301 tools/testing/selftests/bpf/test_lpm_map.c r = bpf_map_lookup_elem(map, key, value); map 314 tools/testing/selftests/bpf/test_lpm_map.c close(map); map 46 tools/testing/selftests/bpf/test_lru_map.c struct bpf_create_map_attr map; map 67 tools/testing/selftests/bpf/test_lru_map.c memset(&map, 0, sizeof(map)); map 68 tools/testing/selftests/bpf/test_lru_map.c map.map_type = BPF_MAP_TYPE_ARRAY; map 69 tools/testing/selftests/bpf/test_lru_map.c map.key_size = sizeof(int); map 70 tools/testing/selftests/bpf/test_lru_map.c map.value_size = sizeof(unsigned long long); map 71 tools/testing/selftests/bpf/test_lru_map.c map.max_entries = 1; map 73 tools/testing/selftests/bpf/test_lru_map.c mfd = bpf_create_map_xattr(&map); map 1147 tools/testing/selftests/bpf/test_maps.c struct bpf_map *map; map 1160 tools/testing/selftests/bpf/test_maps.c map = bpf_object__find_map_by_name(obj, "mim_array"); map 1161 tools/testing/selftests/bpf/test_maps.c if (IS_ERR(map)) { map 1165 tools/testing/selftests/bpf/test_maps.c err = bpf_map__set_inner_map_fd(map, fd); map 1171 tools/testing/selftests/bpf/test_maps.c map = bpf_object__find_map_by_name(obj, "mim_hash"); map 1172 tools/testing/selftests/bpf/test_maps.c if (IS_ERR(map)) { map 1176 tools/testing/selftests/bpf/test_maps.c err = bpf_map__set_inner_map_fd(map, fd); map 1187 tools/testing/selftests/bpf/test_maps.c map = bpf_object__find_map_by_name(obj, "mim_array"); map 1188 tools/testing/selftests/bpf/test_maps.c if (IS_ERR(map)) { map 1192 tools/testing/selftests/bpf/test_maps.c mim_fd = bpf_map__fd(map); map 1204 tools/testing/selftests/bpf/test_maps.c map = bpf_object__find_map_by_name(obj, "mim_hash"); map 1205 tools/testing/selftests/bpf/test_maps.c if (IS_ERR(map)) { map 1209 tools/testing/selftests/bpf/test_maps.c mim_fd = bpf_map__fd(map); map 24 tools/testing/selftests/bpf/test_netcnt.c struct bpf_map *map; map 26 tools/testing/selftests/bpf/test_netcnt.c map = bpf_object__find_map_by_name(obj, name); map 27 tools/testing/selftests/bpf/test_netcnt.c if (!map) { map 31 tools/testing/selftests/bpf/test_netcnt.c return bpf_map__fd(map); map 186 tools/testing/selftests/bpf/test_progs.c struct bpf_map *map; map 188 tools/testing/selftests/bpf/test_progs.c map = bpf_object__find_map_by_name(obj, name); map 189 tools/testing/selftests/bpf/test_progs.c if (!map) { map 194 tools/testing/selftests/bpf/test_progs.c return bpf_map__fd(map); map 88 tools/testing/selftests/bpf/test_select_reuseport.c struct bpf_map *map; map 103 tools/testing/selftests/bpf/test_select_reuseport.c map = bpf_object__find_map_by_name(obj, "outer_map"); map 104 tools/testing/selftests/bpf/test_select_reuseport.c CHECK(!map, "find outer_map", "!map\n"); map 105 tools/testing/selftests/bpf/test_select_reuseport.c err = bpf_map__reuse_fd(map, outer_map); map 115 tools/testing/selftests/bpf/test_select_reuseport.c map = bpf_object__find_map_by_name(obj, "result_map"); map 116 tools/testing/selftests/bpf/test_select_reuseport.c CHECK(!map, "find result_map", "!map\n"); map 117 tools/testing/selftests/bpf/test_select_reuseport.c result_map = bpf_map__fd(map); map 121 tools/testing/selftests/bpf/test_select_reuseport.c map = bpf_object__find_map_by_name(obj, "tmp_index_ovr_map"); map 122 tools/testing/selftests/bpf/test_select_reuseport.c CHECK(!map, "find tmp_index_ovr_map", "!map\n"); map 123 tools/testing/selftests/bpf/test_select_reuseport.c tmp_index_ovr_map = bpf_map__fd(map); map 127 tools/testing/selftests/bpf/test_select_reuseport.c map = bpf_object__find_map_by_name(obj, "linum_map"); map 128 tools/testing/selftests/bpf/test_select_reuseport.c CHECK(!map, "find linum_map", "!map\n"); map 129 tools/testing/selftests/bpf/test_select_reuseport.c linum_map = bpf_map__fd(map); map 133 tools/testing/selftests/bpf/test_select_reuseport.c map = bpf_object__find_map_by_name(obj, "data_check_map"); map 134 tools/testing/selftests/bpf/test_select_reuseport.c CHECK(!map, "find data_check_map", "!map\n"); map 135 tools/testing/selftests/bpf/test_select_reuseport.c data_check_map = bpf_map__fd(map); map 422 tools/testing/selftests/bpf/test_sock_fields.c struct bpf_map *map; map 458 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "addr_map"); map 459 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find addr_map", "(null)"); map 460 tools/testing/selftests/bpf/test_sock_fields.c addr_map_fd = bpf_map__fd(map); map 462 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "sock_result_map"); map 463 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find sock_result_map", "(null)"); map 464 tools/testing/selftests/bpf/test_sock_fields.c sk_map_fd = bpf_map__fd(map); map 466 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map"); map 467 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find tcp_sock_result_map", "(null)"); map 468 tools/testing/selftests/bpf/test_sock_fields.c tp_map_fd = bpf_map__fd(map); map 470 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "linum_map"); map 471 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find linum_map", "(null)"); map 472 tools/testing/selftests/bpf/test_sock_fields.c linum_map_fd = bpf_map__fd(map); map 474 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt"); map 475 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find sk_pkt_out_cnt", "(null)"); map 476 tools/testing/selftests/bpf/test_sock_fields.c sk_pkt_out_cnt_fd = bpf_map__fd(map); map 478 tools/testing/selftests/bpf/test_sock_fields.c map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt10"); map 479 tools/testing/selftests/bpf/test_sock_fields.c CHECK(!map, "cannot find sk_pkt_out_cnt10", "(null)"); map 480 tools/testing/selftests/bpf/test_sock_fields.c sk_pkt_out_cnt10_fd = bpf_map__fd(map); map 92 tools/testing/selftests/bpf/test_socket_cookie.c static int validate_map(struct bpf_map *map, int client_fd) map 101 tools/testing/selftests/bpf/test_socket_cookie.c if (!map) { map 106 tools/testing/selftests/bpf/test_socket_cookie.c map_fd = bpf_map__fd(map); map 80 tools/testing/selftests/bpf/test_tcpbpf_user.c struct bpf_map *map; map 82 tools/testing/selftests/bpf/test_tcpbpf_user.c map = bpf_object__find_map_by_name(obj, name); map 83 tools/testing/selftests/bpf/test_tcpbpf_user.c if (!map) { map 87 tools/testing/selftests/bpf/test_tcpbpf_user.c return bpf_map__fd(map); map 99 tools/testing/selftests/bpf/xdping.c struct bpf_map *map; map 191 tools/testing/selftests/bpf/xdping.c map = bpf_map__next(NULL, obj); map 192 tools/testing/selftests/bpf/xdping.c if (map) map 193 tools/testing/selftests/bpf/xdping.c map_fd = bpf_map__fd(map); map 194 tools/testing/selftests/bpf/xdping.c if (!map || map_fd < 0) { map 95 tools/testing/selftests/powerpc/mm/subpage_prot.c unsigned int *map; map 99 tools/testing/selftests/powerpc/mm/subpage_prot.c map = malloc(pages * 4); map 100 tools/testing/selftests/powerpc/mm/subpage_prot.c assert(map); map 107 tools/testing/selftests/powerpc/mm/subpage_prot.c map[i] = (0x40000000 >> (((i + 1) * 2) % 32)) | map 111 tools/testing/selftests/powerpc/mm/subpage_prot.c err = syscall(__NR_subpage_prot, addr, size, map); map 116 tools/testing/selftests/powerpc/mm/subpage_prot.c free(map); map 85 tools/testing/selftests/powerpc/tm/tm-vmxcopy.c [map]"r"(a) map 24 tools/testing/selftests/vm/compaction_test.c void *map; map 165 tools/testing/selftests/vm/compaction_test.c void *map = NULL; map 196 tools/testing/selftests/vm/compaction_test.c map = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE, map 198 tools/testing/selftests/vm/compaction_test.c if (map == MAP_FAILED) map 203 tools/testing/selftests/vm/compaction_test.c munmap(map, MAP_SIZE); map 206 tools/testing/selftests/vm/compaction_test.c entry->map = map; map 214 tools/testing/selftests/vm/compaction_test.c *(unsigned long *)(map + i) = (unsigned long)map + i; map 220 tools/testing/selftests/vm/compaction_test.c munmap(entry->map, MAP_SIZE); map 181 tools/testing/selftests/vm/mlock2-tests.c static int unlock_lock_check(char *map) map 183 tools/testing/selftests/vm/mlock2-tests.c if (is_vmflag_set((unsigned long)map, LOCKED)) { map 193 tools/testing/selftests/vm/mlock2-tests.c char *map; map 197 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, map 199 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 204 tools/testing/selftests/vm/mlock2-tests.c if (mlock2_(map, 2 * page_size, 0)) { map 213 tools/testing/selftests/vm/mlock2-tests.c if (!lock_check((unsigned long)map)) map 217 tools/testing/selftests/vm/mlock2-tests.c if (munlock(map, 2 * page_size)) { map 222 tools/testing/selftests/vm/mlock2-tests.c ret = unlock_lock_check(map); map 225 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 2 * page_size); map 230 tools/testing/selftests/vm/mlock2-tests.c static int onfault_check(char *map) map 232 tools/testing/selftests/vm/mlock2-tests.c *map = 'a'; map 233 tools/testing/selftests/vm/mlock2-tests.c if (!is_vma_lock_on_fault((unsigned long)map)) { map 241 tools/testing/selftests/vm/mlock2-tests.c static int unlock_onfault_check(char *map) map 245 tools/testing/selftests/vm/mlock2-tests.c if (is_vma_lock_on_fault((unsigned long)map) || map 246 tools/testing/selftests/vm/mlock2-tests.c is_vma_lock_on_fault((unsigned long)map + page_size)) { map 256 tools/testing/selftests/vm/mlock2-tests.c char *map; map 260 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, map 262 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 267 tools/testing/selftests/vm/mlock2-tests.c if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { map 276 tools/testing/selftests/vm/mlock2-tests.c if (onfault_check(map)) map 280 tools/testing/selftests/vm/mlock2-tests.c if (munlock(map, 2 * page_size)) { map 289 tools/testing/selftests/vm/mlock2-tests.c ret = unlock_onfault_check(map); map 291 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 2 * page_size); map 298 tools/testing/selftests/vm/mlock2-tests.c char *map; map 302 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, map 304 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 309 tools/testing/selftests/vm/mlock2-tests.c *map = 'a'; map 311 tools/testing/selftests/vm/mlock2-tests.c if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { map 320 tools/testing/selftests/vm/mlock2-tests.c if (!is_vma_lock_on_fault((unsigned long)map) || map 321 tools/testing/selftests/vm/mlock2-tests.c !is_vma_lock_on_fault((unsigned long)map + page_size)) { map 327 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 2 * page_size); map 334 tools/testing/selftests/vm/mlock2-tests.c char *map; map 338 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, map 341 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 351 tools/testing/selftests/vm/mlock2-tests.c if (!lock_check((unsigned long)map)) map 359 tools/testing/selftests/vm/mlock2-tests.c if (unlock_lock_check(map)) map 362 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 2 * page_size); map 364 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, map 367 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 377 tools/testing/selftests/vm/mlock2-tests.c if (onfault_check(map)) map 385 tools/testing/selftests/vm/mlock2-tests.c if (unlock_onfault_check(map)) map 393 tools/testing/selftests/vm/mlock2-tests.c if (!lock_check((unsigned long)map)) map 401 tools/testing/selftests/vm/mlock2-tests.c ret = unlock_lock_check(map); map 404 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 2 * page_size); map 413 tools/testing/selftests/vm/mlock2-tests.c void *map; map 419 tools/testing/selftests/vm/mlock2-tests.c map = mmap(NULL, 3 * page_size, PROT_READ | PROT_WRITE, map 421 tools/testing/selftests/vm/mlock2-tests.c if (map == MAP_FAILED) { map 426 tools/testing/selftests/vm/mlock2-tests.c if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) { map 435 tools/testing/selftests/vm/mlock2-tests.c if (get_vm_area((unsigned long)map, &page1) || map 436 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || map 437 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size * 2, &page3)) { map 453 tools/testing/selftests/vm/mlock2-tests.c if (munlock(map + page_size, page_size)) { map 458 tools/testing/selftests/vm/mlock2-tests.c if (get_vm_area((unsigned long)map, &page1) || map 459 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || map 460 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size * 2, &page3)) { map 472 tools/testing/selftests/vm/mlock2-tests.c if (munlock(map, page_size * 3)) { map 477 tools/testing/selftests/vm/mlock2-tests.c if (get_vm_area((unsigned long)map, &page1) || map 478 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size, &page2) || map 479 tools/testing/selftests/vm/mlock2-tests.c get_vm_area((unsigned long)map + page_size * 2, &page3)) { map 492 tools/testing/selftests/vm/mlock2-tests.c munmap(map, 3 * page_size); map 17 tools/testing/selftests/vm/on-fault-limit.c void *map; map 29 tools/testing/selftests/vm/on-fault-limit.c map = mmap(NULL, 2 * lims.rlim_max, PROT_READ | PROT_WRITE, map 31 tools/testing/selftests/vm/on-fault-limit.c if (map != MAP_FAILED) map 35 tools/testing/selftests/vm/on-fault-limit.c munmap(map, 2 * lims.rlim_max); map 144 tools/testing/selftests/vm/thuge-gen.c char *map; map 149 tools/testing/selftests/vm/thuge-gen.c map = mmap(NULL, size*NUM_PAGES, PROT_READ|PROT_WRITE, map 152 tools/testing/selftests/vm/thuge-gen.c if (map == (char *)-1) err("mmap"); map 153 tools/testing/selftests/vm/thuge-gen.c memset(map, 0xff, size*NUM_PAGES); map 159 tools/testing/selftests/vm/thuge-gen.c err = munmap(map, size); map 179 tools/testing/selftests/vm/thuge-gen.c char *map = shmat(id, NULL, 0600); map 180 tools/testing/selftests/vm/thuge-gen.c if (map == (char*)-1) err("shmat"); map 184 tools/testing/selftests/vm/thuge-gen.c memset(map, 0xff, size*NUM_PAGES); map 191 tools/testing/selftests/vm/thuge-gen.c err = shmdt(map); map 64 tools/testing/selftests/vm/transhuge-stress.c uint8_t *map; map 99 tools/testing/selftests/vm/transhuge-stress.c map = malloc(map_len); map 100 tools/testing/selftests/vm/transhuge-stress.c if (!map) map 106 tools/testing/selftests/vm/transhuge-stress.c memset(map, 0, map_len); map 121 tools/testing/selftests/vm/transhuge-stress.c map = realloc(map, idx + 1); map 122 tools/testing/selftests/vm/transhuge-stress.c if (!map) map 124 tools/testing/selftests/vm/transhuge-stress.c memset(map + map_len, 0, idx + 1 - map_len); map 127 tools/testing/selftests/vm/transhuge-stress.c if (!map[idx]) map 129 tools/testing/selftests/vm/transhuge-stress.c map[idx] = 1; map 59 virt/kvm/arm/arch_timer.c static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) map 62 virt/kvm/arm/arch_timer.c map->direct_vtimer = vcpu_vtimer(vcpu); map 63 virt/kvm/arm/arch_timer.c map->direct_ptimer = vcpu_ptimer(vcpu); map 64 virt/kvm/arm/arch_timer.c map->emul_ptimer = NULL; map 66 virt/kvm/arm/arch_timer.c map->direct_vtimer = vcpu_vtimer(vcpu); map 67 virt/kvm/arm/arch_timer.c map->direct_ptimer = NULL; map 68 virt/kvm/arm/arch_timer.c map->emul_ptimer = vcpu_ptimer(vcpu); map 71 virt/kvm/arm/arch_timer.c trace_kvm_get_timer_map(vcpu->vcpu_id, map); map 95 virt/kvm/arm/arch_timer.c struct timer_map map; map 106 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 109 virt/kvm/arm/arch_timer.c ctx = map.direct_vtimer; map 111 virt/kvm/arm/arch_timer.c ctx = map.direct_ptimer; map 267 virt/kvm/arm/arch_timer.c struct timer_map map; map 269 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 271 virt/kvm/arm/arch_timer.c return kvm_timer_should_fire(map.direct_vtimer) || map 272 virt/kvm/arm/arch_timer.c kvm_timer_should_fire(map.direct_ptimer) || map 273 virt/kvm/arm/arch_timer.c kvm_timer_should_fire(map.emul_ptimer); map 389 virt/kvm/arm/arch_timer.c struct timer_map map; map 391 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 397 virt/kvm/arm/arch_timer.c if (!kvm_timer_irq_can_fire(map.direct_vtimer) && map 398 virt/kvm/arm/arch_timer.c !kvm_timer_irq_can_fire(map.direct_ptimer) && map 399 virt/kvm/arm/arch_timer.c !kvm_timer_irq_can_fire(map.emul_ptimer)) map 526 virt/kvm/arm/arch_timer.c struct timer_map map; map 531 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 534 virt/kvm/arm/arch_timer.c kvm_timer_vcpu_load_gic(map.direct_vtimer); map 535 virt/kvm/arm/arch_timer.c if (map.direct_ptimer) map 536 virt/kvm/arm/arch_timer.c kvm_timer_vcpu_load_gic(map.direct_ptimer); map 541 virt/kvm/arm/arch_timer.c set_cntvoff(map.direct_vtimer->cntvoff); map 545 virt/kvm/arm/arch_timer.c timer_restore_state(map.direct_vtimer); map 546 virt/kvm/arm/arch_timer.c if (map.direct_ptimer) map 547 virt/kvm/arm/arch_timer.c timer_restore_state(map.direct_ptimer); map 549 virt/kvm/arm/arch_timer.c if (map.emul_ptimer) map 550 virt/kvm/arm/arch_timer.c timer_emulate(map.emul_ptimer); map 573 virt/kvm/arm/arch_timer.c struct timer_map map; map 578 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 580 virt/kvm/arm/arch_timer.c timer_save_state(map.direct_vtimer); map 581 virt/kvm/arm/arch_timer.c if (map.direct_ptimer) map 582 virt/kvm/arm/arch_timer.c timer_save_state(map.direct_ptimer); map 593 virt/kvm/arm/arch_timer.c if (map.emul_ptimer) map 594 virt/kvm/arm/arch_timer.c soft_timer_cancel(&map.emul_ptimer->hrtimer); map 641 virt/kvm/arm/arch_timer.c struct timer_map map; map 643 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 659 virt/kvm/arm/arch_timer.c kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); map 660 virt/kvm/arm/arch_timer.c if (map.direct_ptimer) map 661 virt/kvm/arm/arch_timer.c kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); map 665 virt/kvm/arm/arch_timer.c if (map.emul_ptimer) map 666 virt/kvm/arm/arch_timer.c soft_timer_cancel(&map.emul_ptimer->hrtimer); map 1042 virt/kvm/arm/arch_timer.c struct timer_map map; map 1060 virt/kvm/arm/arch_timer.c get_timer_map(vcpu, &map); map 1063 virt/kvm/arm/arch_timer.c map.direct_vtimer->host_timer_irq, map 1064 virt/kvm/arm/arch_timer.c map.direct_vtimer->irq.irq, map 1069 virt/kvm/arm/arch_timer.c if (map.direct_ptimer) { map 1071 virt/kvm/arm/arch_timer.c map.direct_ptimer->host_timer_irq, map 1072 virt/kvm/arm/arch_timer.c map.direct_ptimer->irq.irq, map 267 virt/kvm/arm/trace.h TP_PROTO(unsigned long vcpu_id, struct timer_map *map), map 268 virt/kvm/arm/trace.h TP_ARGS(vcpu_id, map), map 279 virt/kvm/arm/trace.h __entry->direct_vtimer = arch_timer_ctx_index(map->direct_vtimer); map 281 virt/kvm/arm/trace.h (map->direct_ptimer) ? arch_timer_ctx_index(map->direct_ptimer) : -1; map 283 virt/kvm/arm/trace.h (map->emul_ptimer) ? arch_timer_ctx_index(map->emul_ptimer) : -1; map 357 virt/kvm/arm/vgic/vgic-its.c struct its_vlpi_map map; map 359 virt/kvm/arm/vgic/vgic-its.c ret = its_get_vlpi(irq->host_irq, &map); map 363 virt/kvm/arm/vgic/vgic-its.c map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; map 365 virt/kvm/arm/vgic/vgic-its.c ret = its_map_vlpi(irq->host_irq, &map); map 253 virt/kvm/arm/vgic/vgic-v4.c struct its_vlpi_map map; map 281 virt/kvm/arm/vgic/vgic-v4.c map = (struct its_vlpi_map) { map 291 virt/kvm/arm/vgic/vgic-v4.c ret = its_map_vlpi(virq, &map); map 32 virt/kvm/irqchip.c hlist_for_each_entry(e, &irq_rt->map[gsi], link) { map 111 virt/kvm/irqchip.c hlist_for_each_entry_safe(e, n, &rt->map[i], link) { map 141 virt/kvm/irqchip.c hlist_for_each_entry(ei, &rt->map[gsi], link) map 155 virt/kvm/irqchip.c hlist_add_head(&e->link, &rt->map[e->gsi]); map 187 virt/kvm/irqchip.c new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT); map 1844 virt/kvm/kvm_main.c struct kvm_host_map *map, map 1854 virt/kvm/kvm_main.c if (!map) map 1890 virt/kvm/kvm_main.c map->page = page; map 1891 virt/kvm/kvm_main.c map->hva = hva; map 1892 virt/kvm/kvm_main.c map->pfn = pfn; map 1893 virt/kvm/kvm_main.c map->gfn = gfn; map 1898 virt/kvm/kvm_main.c int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, map 1901 virt/kvm/kvm_main.c return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, map 1906 virt/kvm/kvm_main.c int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map) map 1908 virt/kvm/kvm_main.c return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map, map 1914 virt/kvm/kvm_main.c struct kvm_host_map *map, map 1918 virt/kvm/kvm_main.c if (!map) map 1921 virt/kvm/kvm_main.c if (!map->hva) map 1924 virt/kvm/kvm_main.c if (map->page != KVM_UNMAPPED_PAGE) { map 1926 virt/kvm/kvm_main.c kunmap_atomic(map->hva); map 1928 virt/kvm/kvm_main.c kunmap(map->page); map 1932 virt/kvm/kvm_main.c memunmap(map->hva); map 1938 virt/kvm/kvm_main.c mark_page_dirty_in_slot(memslot, map->gfn); map 1943 virt/kvm/kvm_main.c kvm_release_pfn(map->pfn, dirty, NULL); map 1945 virt/kvm/kvm_main.c map->hva = NULL; map 1946 virt/kvm/kvm_main.c map->page = NULL; map 1949 virt/kvm/kvm_main.c int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, map 1952 virt/kvm/kvm_main.c __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, map 1958 virt/kvm/kvm_main.c void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty) map 1960 virt/kvm/kvm_main.c __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,