mapping 15 arch/alpha/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 16 arch/alpha/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 44 arch/arc/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 45 arch/arc/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 839 arch/arc/mm/cache.c struct address_space *mapping; mapping 847 arch/arc/mm/cache.c mapping = page_mapping_file(page); mapping 848 arch/arc/mm/cache.c if (!mapping) mapping 855 arch/arc/mm/cache.c if (!mapping_mapped(mapping)) { mapping 318 arch/arm/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) mapping 319 arch/arm/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) mapping 16 arch/arm/include/asm/device.h struct dma_iommu_mapping *mapping; mapping 31 arch/arm/include/asm/device.h #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping) mapping 30 arch/arm/include/asm/dma-iommu.h void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); mapping 33 arch/arm/include/asm/dma-iommu.h struct dma_iommu_mapping *mapping); mapping 1138 arch/arm/mm/dma-mapping.c static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); mapping 1140 arch/arm/mm/dma-mapping.c static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, mapping 1146 arch/arm/mm/dma-mapping.c size_t mapping_size = mapping->bits << PAGE_SHIFT; mapping 1157 arch/arm/mm/dma-mapping.c spin_lock_irqsave(&mapping->lock, flags); mapping 1158 arch/arm/mm/dma-mapping.c for (i = 0; i < mapping->nr_bitmaps; i++) { mapping 1159 arch/arm/mm/dma-mapping.c start = bitmap_find_next_zero_area(mapping->bitmaps[i], mapping 1160 arch/arm/mm/dma-mapping.c mapping->bits, 0, count, align); mapping 1162 arch/arm/mm/dma-mapping.c if (start > mapping->bits) mapping 1165 arch/arm/mm/dma-mapping.c bitmap_set(mapping->bitmaps[i], start, count); mapping 1174 arch/arm/mm/dma-mapping.c if (i == mapping->nr_bitmaps) { mapping 1175 arch/arm/mm/dma-mapping.c if (extend_iommu_mapping(mapping)) { mapping 1176 arch/arm/mm/dma-mapping.c spin_unlock_irqrestore(&mapping->lock, flags); mapping 1180 arch/arm/mm/dma-mapping.c start = bitmap_find_next_zero_area(mapping->bitmaps[i], mapping 1181 arch/arm/mm/dma-mapping.c mapping->bits, 0, count, align); mapping 1183 arch/arm/mm/dma-mapping.c if (start > mapping->bits) { mapping 1184 arch/arm/mm/dma-mapping.c spin_unlock_irqrestore(&mapping->lock, flags); mapping 1188 arch/arm/mm/dma-mapping.c bitmap_set(mapping->bitmaps[i], start, count); mapping 1190 arch/arm/mm/dma-mapping.c spin_unlock_irqrestore(&mapping->lock, flags); mapping 1192 arch/arm/mm/dma-mapping.c iova = mapping->base + (mapping_size * i); mapping 1198 arch/arm/mm/dma-mapping.c static inline void __free_iova(struct dma_iommu_mapping *mapping, mapping 1202 arch/arm/mm/dma-mapping.c size_t mapping_size = mapping->bits << PAGE_SHIFT; mapping 1210 arch/arm/mm/dma-mapping.c bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; mapping 1211 arch/arm/mm/dma-mapping.c BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); mapping 1213 arch/arm/mm/dma-mapping.c bitmap_base = mapping->base + mapping_size * bitmap_index; mapping 1228 arch/arm/mm/dma-mapping.c spin_lock_irqsave(&mapping->lock, flags); mapping 1229 arch/arm/mm/dma-mapping.c bitmap_clear(mapping->bitmaps[bitmap_index], start, count); mapping 1230 arch/arm/mm/dma-mapping.c spin_unlock_irqrestore(&mapping->lock, flags); mapping 1352 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1357 arch/arm/mm/dma-mapping.c dma_addr = __alloc_iova(mapping, size); mapping 1374 arch/arm/mm/dma-mapping.c ret = iommu_map(mapping->domain, iova, phys, len, mapping 1383 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); mapping 1384 arch/arm/mm/dma-mapping.c __free_iova(mapping, dma_addr, size); mapping 1390 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1399 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, iova, size); mapping 1400 arch/arm/mm/dma-mapping.c __free_iova(mapping, iova, size); mapping 1620 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1630 arch/arm/mm/dma-mapping.c iova_base = iova = __alloc_iova(mapping, size); mapping 1643 arch/arm/mm/dma-mapping.c ret = iommu_map(mapping->domain, iova, phys, len, prot); mapping 1653 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); mapping 1654 arch/arm/mm/dma-mapping.c __free_iova(mapping, iova_base, size); mapping 1841 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1845 arch/arm/mm/dma-mapping.c dma_addr = __alloc_iova(mapping, len); mapping 1851 arch/arm/mm/dma-mapping.c ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); mapping 1857 arch/arm/mm/dma-mapping.c __free_iova(mapping, dma_addr, len); mapping 1893 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1901 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, iova, len); mapping 1902 arch/arm/mm/dma-mapping.c __free_iova(mapping, iova, len); mapping 1917 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1919 arch/arm/mm/dma-mapping.c struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); mapping 1929 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, iova, len); mapping 1930 arch/arm/mm/dma-mapping.c __free_iova(mapping, iova, len); mapping 1944 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1951 arch/arm/mm/dma-mapping.c dma_addr = __alloc_iova(mapping, len); mapping 1957 arch/arm/mm/dma-mapping.c ret = iommu_map(mapping->domain, dma_addr, addr, len, prot); mapping 1963 arch/arm/mm/dma-mapping.c __free_iova(mapping, dma_addr, len); mapping 1978 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1986 arch/arm/mm/dma-mapping.c iommu_unmap(mapping->domain, iova, len); mapping 1987 arch/arm/mm/dma-mapping.c __free_iova(mapping, iova, len); mapping 1993 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 1995 arch/arm/mm/dma-mapping.c struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); mapping 2007 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 2009 arch/arm/mm/dma-mapping.c struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); mapping 2076 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping; mapping 2092 arch/arm/mm/dma-mapping.c mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); mapping 2093 arch/arm/mm/dma-mapping.c if (!mapping) mapping 2096 arch/arm/mm/dma-mapping.c mapping->bitmap_size = bitmap_size; mapping 2097 arch/arm/mm/dma-mapping.c mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *), mapping 2099 arch/arm/mm/dma-mapping.c if (!mapping->bitmaps) mapping 2102 arch/arm/mm/dma-mapping.c mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); mapping 2103 arch/arm/mm/dma-mapping.c if (!mapping->bitmaps[0]) mapping 2106 arch/arm/mm/dma-mapping.c mapping->nr_bitmaps = 1; mapping 2107 arch/arm/mm/dma-mapping.c mapping->extensions = extensions; mapping 2108 arch/arm/mm/dma-mapping.c mapping->base = base; mapping 2109 arch/arm/mm/dma-mapping.c mapping->bits = BITS_PER_BYTE * bitmap_size; mapping 2111 arch/arm/mm/dma-mapping.c spin_lock_init(&mapping->lock); mapping 2113 arch/arm/mm/dma-mapping.c mapping->domain = iommu_domain_alloc(bus); mapping 2114 arch/arm/mm/dma-mapping.c if (!mapping->domain) mapping 2117 arch/arm/mm/dma-mapping.c kref_init(&mapping->kref); mapping 2118 arch/arm/mm/dma-mapping.c return mapping; mapping 2120 arch/arm/mm/dma-mapping.c kfree(mapping->bitmaps[0]); mapping 2122 arch/arm/mm/dma-mapping.c kfree(mapping->bitmaps); mapping 2124 arch/arm/mm/dma-mapping.c kfree(mapping); mapping 2133 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = mapping 2136 arch/arm/mm/dma-mapping.c iommu_domain_free(mapping->domain); mapping 2137 arch/arm/mm/dma-mapping.c for (i = 0; i < mapping->nr_bitmaps; i++) mapping 2138 arch/arm/mm/dma-mapping.c kfree(mapping->bitmaps[i]); mapping 2139 arch/arm/mm/dma-mapping.c kfree(mapping->bitmaps); mapping 2140 arch/arm/mm/dma-mapping.c kfree(mapping); mapping 2143 arch/arm/mm/dma-mapping.c static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) mapping 2147 arch/arm/mm/dma-mapping.c if (mapping->nr_bitmaps >= mapping->extensions) mapping 2150 arch/arm/mm/dma-mapping.c next_bitmap = mapping->nr_bitmaps; mapping 2151 arch/arm/mm/dma-mapping.c mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, mapping 2153 arch/arm/mm/dma-mapping.c if (!mapping->bitmaps[next_bitmap]) mapping 2156 arch/arm/mm/dma-mapping.c mapping->nr_bitmaps++; mapping 2161 arch/arm/mm/dma-mapping.c void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) mapping 2163 arch/arm/mm/dma-mapping.c if (mapping) mapping 2164 arch/arm/mm/dma-mapping.c kref_put(&mapping->kref, release_iommu_mapping); mapping 2169 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping) mapping 2173 arch/arm/mm/dma-mapping.c err = iommu_attach_device(mapping->domain, dev); mapping 2177 arch/arm/mm/dma-mapping.c kref_get(&mapping->kref); mapping 2178 arch/arm/mm/dma-mapping.c to_dma_iommu_mapping(dev) = mapping; mapping 2198 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping) mapping 2202 arch/arm/mm/dma-mapping.c err = __arm_iommu_attach_device(dev, mapping); mapping 2220 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping; mapping 2222 arch/arm/mm/dma-mapping.c mapping = to_dma_iommu_mapping(dev); mapping 2223 arch/arm/mm/dma-mapping.c if (!mapping) { mapping 2228 arch/arm/mm/dma-mapping.c iommu_detach_device(mapping->domain, dev); mapping 2229 arch/arm/mm/dma-mapping.c kref_put(&mapping->kref, release_iommu_mapping); mapping 2245 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping; mapping 2250 arch/arm/mm/dma-mapping.c mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); mapping 2251 arch/arm/mm/dma-mapping.c if (IS_ERR(mapping)) { mapping 2257 arch/arm/mm/dma-mapping.c if (__arm_iommu_attach_device(dev, mapping)) { mapping 2260 arch/arm/mm/dma-mapping.c arm_iommu_release_mapping(mapping); mapping 2269 arch/arm/mm/dma-mapping.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 2271 arch/arm/mm/dma-mapping.c if (!mapping) mapping 2275 arch/arm/mm/dma-mapping.c arm_iommu_release_mapping(mapping); mapping 129 arch/arm/mm/fault-armv.c make_coherent(struct address_space *mapping, struct vm_area_struct *vma, mapping 145 arch/arm/mm/fault-armv.c flush_dcache_mmap_lock(mapping); mapping 146 arch/arm/mm/fault-armv.c vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { mapping 159 arch/arm/mm/fault-armv.c flush_dcache_mmap_unlock(mapping); mapping 181 arch/arm/mm/fault-armv.c struct address_space *mapping; mapping 195 arch/arm/mm/fault-armv.c mapping = page_mapping_file(page); mapping 197 arch/arm/mm/fault-armv.c __flush_dcache_page(mapping, page); mapping 198 arch/arm/mm/fault-armv.c if (mapping) { mapping 200 arch/arm/mm/fault-armv.c make_coherent(mapping, vma, addr, ptep, pfn); mapping 199 arch/arm/mm/flush.c void __flush_dcache_page(struct address_space *mapping, struct page *page) mapping 232 arch/arm/mm/flush.c if (mapping && cache_is_vipt_aliasing()) mapping 237 arch/arm/mm/flush.c static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) mapping 251 arch/arm/mm/flush.c flush_dcache_mmap_lock(mapping); mapping 252 arch/arm/mm/flush.c vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { mapping 265 arch/arm/mm/flush.c flush_dcache_mmap_unlock(mapping); mapping 273 arch/arm/mm/flush.c struct address_space *mapping; mapping 284 arch/arm/mm/flush.c mapping = page_mapping_file(page); mapping 286 arch/arm/mm/flush.c mapping = NULL; mapping 289 arch/arm/mm/flush.c __flush_dcache_page(mapping, page); mapping 317 arch/arm/mm/flush.c struct address_space *mapping; mapping 332 arch/arm/mm/flush.c mapping = page_mapping_file(page); mapping 335 arch/arm/mm/flush.c mapping && !page_mapcount(page)) mapping 338 arch/arm/mm/flush.c __flush_dcache_page(mapping, page); mapping 339 arch/arm/mm/flush.c if (mapping && cache_is_vivt()) mapping 340 arch/arm/mm/flush.c __flush_dcache_aliases(mapping, page); mapping 341 arch/arm/mm/flush.c else if (mapping) mapping 360 arch/arm/mm/flush.c struct address_space *mapping; mapping 362 arch/arm/mm/flush.c mapping = page_mapping_file(page); mapping 364 arch/arm/mm/flush.c if (!mapping || mapping_mapped(mapping)) { mapping 54 arch/arm/mm/mm.h extern void __flush_dcache_page(struct address_space *mapping, struct page *page); mapping 157 arch/arm64/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 158 arch/arm64/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 31 arch/c6x/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do {} while (0) mapping 32 arch/c6x/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do {} while (0) mapping 176 arch/c6x/platforms/megamod-pic.c int *mapping, int size) mapping 192 arch/c6x/platforms/megamod-pic.c mapping[i] = val; mapping 202 arch/c6x/platforms/megamod-pic.c int mapping[NR_MUX_OUTPUTS]; mapping 230 arch/c6x/platforms/megamod-pic.c for (i = 0; i < ARRAY_SIZE(mapping); i++) mapping 231 arch/c6x/platforms/megamod-pic.c mapping[i] = IRQ_UNMAPPED; mapping 233 arch/c6x/platforms/megamod-pic.c parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); mapping 271 arch/c6x/platforms/megamod-pic.c mapping[hwirq - 4] = i; mapping 289 arch/c6x/platforms/megamod-pic.c if (mapping[i] != IRQ_UNMAPPED) { mapping 291 arch/c6x/platforms/megamod-pic.c np, mapping[i], i + 4); mapping 292 arch/c6x/platforms/megamod-pic.c set_megamod_mux(pic, mapping[i], i); mapping 18 arch/csky/abiv1/cacheflush.c struct address_space *mapping; mapping 23 arch/csky/abiv1/cacheflush.c mapping = page_mapping_file(page); mapping 25 arch/csky/abiv1/cacheflush.c if (mapping && !page_mapcount(page)) mapping 29 arch/csky/abiv1/cacheflush.c if (mapping) mapping 60 arch/csky/abiv1/cacheflush.c struct address_space *mapping; mapping 62 arch/csky/abiv1/cacheflush.c mapping = page_mapping_file(page); mapping 64 arch/csky/abiv1/cacheflush.c if (!mapping || mapping_mapped(mapping)) mapping 21 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) mapping 22 arch/csky/abiv1/inc/abi/cacheflush.h #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) mapping 26 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 27 arch/csky/abiv2/inc/abi/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 35 arch/hexagon/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 36 arch/hexagon/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 35 arch/ia64/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 36 arch/ia64/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 1782 arch/ia64/include/asm/pal.h ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping) mapping 1790 arch/ia64/include/asm/pal.h mapping->overview.overview_data = iprv.v0; mapping 1791 arch/ia64/include/asm/pal.h mapping->ppli1.ppli1_data = iprv.v1; mapping 1792 arch/ia64/include/asm/pal.h mapping->ppli2.ppli2_data = iprv.v2; mapping 253 arch/m68k/include/asm/cacheflush_mm.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 254 arch/m68k/include/asm/cacheflush_mm.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 19 arch/m68k/include/asm/cacheflush_no.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 20 arch/m68k/include/asm/cacheflush_no.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 84 arch/microblaze/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 85 arch/microblaze/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 64 arch/mips/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 65 arch/mips/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 38 arch/mips/include/asm/mach-rc32434/pci.h u32 mapping; /* mapping. */ mapping 35 arch/mips/include/asm/vdso.h struct vm_special_mapping mapping; mapping 53 arch/mips/kernel/vdso.c image->mapping.pages[i] = pfn_to_page(data_pfn + i); mapping 176 arch/mips/kernel/vdso.c &image->mapping); mapping 86 arch/mips/mm/cache.c struct address_space *mapping = page_mapping_file(page); mapping 89 arch/mips/mm/cache.c if (mapping && !mapping_mapped(mapping)) { mapping 155 arch/mips/pci/pci-rc32434.c rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START); mapping 163 arch/mips/pci/pci-rc32434.c rc32434_pci->pcilba[1].mapping = 0x60000000; mapping 170 arch/mips/pci/pci-rc32434.c rc32434_pci->pcilba[2].mapping = 0x18FFFFFF; mapping 179 arch/mips/pci/pci-rc32434.c rc32434_pci->pcilba[3].mapping = 0x18800000; mapping 43 arch/nds32/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) mapping 44 arch/nds32/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) mapping 72 arch/nds32/kernel/perf_event_cpu.c int mapping; mapping 77 arch/nds32/kernel/perf_event_cpu.c mapping = (*event_map)[config]; mapping 78 arch/nds32/kernel/perf_event_cpu.c return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; mapping 800 arch/nds32/kernel/perf_event_cpu.c int mapping; mapping 802 arch/nds32/kernel/perf_event_cpu.c mapping = nds32_pmu->map_event(event); mapping 804 arch/nds32/kernel/perf_event_cpu.c if (mapping < 0) { mapping 807 arch/nds32/kernel/perf_event_cpu.c return mapping; mapping 835 arch/nds32/kernel/perf_event_cpu.c hwc->config_base |= (unsigned long)mapping; mapping 240 arch/nds32/mm/cacheflush.c struct address_space *mapping; mapping 242 arch/nds32/mm/cacheflush.c mapping = page_mapping(page); mapping 243 arch/nds32/mm/cacheflush.c if (mapping && !mapping_mapped(mapping)) mapping 251 arch/nds32/mm/cacheflush.c if (mapping) { mapping 49 arch/nios2/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) mapping 50 arch/nios2/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) mapping 73 arch/nios2/mm/cacheflush.c static void flush_aliases(struct address_space *mapping, struct page *page) mapping 81 arch/nios2/mm/cacheflush.c flush_dcache_mmap_lock(mapping); mapping 82 arch/nios2/mm/cacheflush.c vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { mapping 94 arch/nios2/mm/cacheflush.c flush_dcache_mmap_unlock(mapping); mapping 160 arch/nios2/mm/cacheflush.c void __flush_dcache_page(struct address_space *mapping, struct page *page) mapping 174 arch/nios2/mm/cacheflush.c struct address_space *mapping; mapping 183 arch/nios2/mm/cacheflush.c mapping = page_mapping_file(page); mapping 186 arch/nios2/mm/cacheflush.c if (mapping && !mapping_mapped(mapping)) { mapping 189 arch/nios2/mm/cacheflush.c __flush_dcache_page(mapping, page); mapping 190 arch/nios2/mm/cacheflush.c if (mapping) { mapping 192 arch/nios2/mm/cacheflush.c flush_aliases(mapping, page); mapping 206 arch/nios2/mm/cacheflush.c struct address_space *mapping; mapping 221 arch/nios2/mm/cacheflush.c mapping = page_mapping_file(page); mapping 223 arch/nios2/mm/cacheflush.c __flush_dcache_page(mapping, page); mapping 225 arch/nios2/mm/cacheflush.c if(mapping) mapping 227 arch/nios2/mm/cacheflush.c flush_aliases(mapping, page); mapping 74 arch/openrisc/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 75 arch/openrisc/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 58 arch/parisc/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) mapping 59 arch/parisc/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) mapping 327 arch/parisc/kernel/cache.c struct address_space *mapping = page_mapping_file(page); mapping 333 arch/parisc/kernel/cache.c if (mapping && !mapping_mapped(mapping)) { mapping 340 arch/parisc/kernel/cache.c if (!mapping) mapping 350 arch/parisc/kernel/cache.c flush_dcache_mmap_lock(mapping); mapping 351 arch/parisc/kernel/cache.c vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { mapping 373 arch/parisc/kernel/cache.c flush_dcache_mmap_unlock(mapping); mapping 42 arch/powerpc/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 43 arch/powerpc/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 857 arch/powerpc/kernel/iommu.c dma_addr_t mapping; mapping 889 arch/powerpc/kernel/iommu.c mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, mapping 891 arch/powerpc/kernel/iommu.c if (mapping == DMA_MAPPING_ERROR) { mapping 895 arch/powerpc/kernel/iommu.c *dma_handle = mapping; mapping 139 arch/powerpc/kvm/book3s_xive.h struct address_space *mapping; mapping 215 arch/powerpc/kvm/book3s_xive_native.c if (xive->mapping) mapping 216 arch/powerpc/kvm/book3s_xive_native.c unmap_mapping_range(xive->mapping, mapping 327 arch/powerpc/kvm/book3s_xive_native.c xive->mapping = vma->vm_file->f_mapping; mapping 1017 arch/powerpc/kvm/book3s_xive_native.c xive->mapping = NULL; mapping 328 arch/powerpc/platforms/pseries/papr_scm.c struct nd_mapping_desc mapping; mapping 364 arch/powerpc/platforms/pseries/papr_scm.c memset(&mapping, 0, sizeof(mapping)); mapping 365 arch/powerpc/platforms/pseries/papr_scm.c mapping.nvdimm = p->nvdimm; mapping 366 arch/powerpc/platforms/pseries/papr_scm.c mapping.start = 0; mapping 367 arch/powerpc/platforms/pseries/papr_scm.c mapping.size = p->blocks * p->block_size; // XXX: potential overflow? mapping 378 arch/powerpc/platforms/pseries/papr_scm.c ndr_desc.mapping = &mapping; mapping 41 arch/riscv/include/asm/cacheflush.h static inline void flush_dcache_mmap_lock(struct address_space *mapping) mapping 45 arch/riscv/include/asm/cacheflush.h static inline void flush_dcache_mmap_unlock(struct address_space *mapping) mapping 93 arch/sh/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 94 arch/sh/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 320 arch/sh/kernel/cpu/sh4/sq.c __ATTR(mapping, 0644, mapping_show, mapping_store); mapping 115 arch/sh/mm/cache-sh4.c struct address_space *mapping = page_mapping_file(page); mapping 117 arch/sh/mm/cache-sh4.c if (mapping && !mapping_mapped(mapping)) mapping 139 arch/sh/mm/cache-sh7705.c struct address_space *mapping = page_mapping_file(page); mapping 141 arch/sh/mm/cache-sh7705.c if (mapping && !mapping_mapped(mapping)) mapping 44 arch/sparc/include/asm/cacheflush_32.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 45 arch/sparc/include/asm/cacheflush_32.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 72 arch/sparc/include/asm/cacheflush_64.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 73 arch/sparc/include/asm/cacheflush_64.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 477 arch/sparc/mm/init_64.c struct address_space *mapping; mapping 492 arch/sparc/mm/init_64.c mapping = page_mapping_file(page); mapping 493 arch/sparc/mm/init_64.c if (mapping && !mapping_mapped(mapping)) { mapping 120 arch/sparc/mm/tlb.c struct address_space *mapping; mapping 131 arch/sparc/mm/tlb.c mapping = page_mapping_file(page); mapping 132 arch/sparc/mm/tlb.c if (!mapping) mapping 170 arch/unicore32/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 171 arch/unicore32/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 58 arch/unicore32/mm/flush.c void __flush_dcache_page(struct address_space *mapping, struct page *page) mapping 74 arch/unicore32/mm/flush.c struct address_space *mapping; mapping 83 arch/unicore32/mm/flush.c mapping = page_mapping_file(page); mapping 85 arch/unicore32/mm/flush.c if (mapping && !mapping_mapped(mapping)) mapping 88 arch/unicore32/mm/flush.c __flush_dcache_page(mapping, page); mapping 89 arch/unicore32/mm/flush.c if (mapping) mapping 493 arch/unicore32/mm/mmu.c struct address_space *mapping; mapping 507 arch/unicore32/mm/mmu.c mapping = page_mapping_file(page); mapping 509 arch/unicore32/mm/mmu.c __flush_dcache_page(mapping, page); mapping 510 arch/unicore32/mm/mmu.c if (mapping) mapping 143 arch/x86/kernel/jailhouse.c void *mapping; mapping 162 arch/x86/kernel/jailhouse.c mapping = early_memremap(pa_data, sizeof(header)); mapping 163 arch/x86/kernel/jailhouse.c memcpy(&header, mapping, sizeof(header)); mapping 164 arch/x86/kernel/jailhouse.c early_memunmap(mapping, sizeof(header)); mapping 170 arch/x86/kernel/jailhouse.c mapping = early_memremap(pa_data, sizeof(setup_data)); mapping 171 arch/x86/kernel/jailhouse.c memcpy(&setup_data, mapping, sizeof(setup_data)); mapping 172 arch/x86/kernel/jailhouse.c early_memunmap(mapping, sizeof(setup_data)); mapping 422 arch/x86/kernel/pci-calgary_64.c dma_addr_t mapping; mapping 437 arch/x86/kernel/pci-calgary_64.c mapping = iommu_alloc(dev, tbl, ret, npages, DMA_BIDIRECTIONAL); mapping 438 arch/x86/kernel/pci-calgary_64.c if (mapping == DMA_MAPPING_ERROR) mapping 440 arch/x86/kernel/pci-calgary_64.c *dma_handle = mapping; mapping 1826 arch/x86/xen/mmu_pv.c struct xen_machphys_mapping mapping; mapping 1828 arch/x86/xen/mmu_pv.c if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { mapping 1829 arch/x86/xen/mmu_pv.c machine_to_phys_mapping = (unsigned long *)mapping.v_start; mapping 1830 arch/x86/xen/mmu_pv.c machine_to_phys_nr = mapping.max_mfn + 1; mapping 158 arch/xtensa/include/asm/cacheflush.h #define flush_dcache_mmap_lock(mapping) do { } while (0) mapping 159 arch/xtensa/include/asm/cacheflush.h #define flush_dcache_mmap_unlock(mapping) do { } while (0) mapping 130 arch/xtensa/mm/cache.c struct address_space *mapping = page_mapping_file(page); mapping 138 arch/xtensa/mm/cache.c if (mapping && !mapping_mapped(mapping)) { mapping 157 arch/xtensa/mm/cache.c if (!alias && !mapping) mapping 168 arch/xtensa/mm/cache.c if (mapping) mapping 207 block/ioctl.c struct address_space *mapping = bdev->bd_inode->i_mapping; mapping 229 block/ioctl.c truncate_inode_pages_range(mapping, start, start + len - 1); mapping 238 block/ioctl.c struct address_space *mapping; mapping 261 block/ioctl.c mapping = bdev->bd_inode->i_mapping; mapping 262 block/ioctl.c truncate_inode_pages_range(mapping, start, end); mapping 664 block/partition-generic.c struct address_space *mapping = bdev->bd_inode->i_mapping; mapping 667 block/partition-generic.c page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), NULL); mapping 2215 drivers/acpi/nfit/core.c } mapping[0]; mapping 2226 drivers/acpi/nfit/core.c } mapping[0]; mapping 2312 drivers/acpi/nfit/core.c struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; mapping 2313 drivers/acpi/nfit/core.c struct nfit_set_info_map *map = &info->mapping[i]; mapping 2314 drivers/acpi/nfit/core.c struct nfit_set_info_map2 *map2 = &info2->mapping[i]; mapping 2315 drivers/acpi/nfit/core.c struct nvdimm *nvdimm = mapping->nvdimm; mapping 2337 drivers/acpi/nfit/core.c sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), mapping 2342 drivers/acpi/nfit/core.c sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), mapping 2347 drivers/acpi/nfit/core.c sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), mapping 2353 drivers/acpi/nfit/core.c struct nfit_set_info_map2 *map2 = &info2->mapping[i]; mapping 2357 drivers/acpi/nfit/core.c struct nd_mapping_desc *mapping = &ndr_desc->mapping[j]; mapping 2358 drivers/acpi/nfit/core.c struct nvdimm *nvdimm = mapping->nvdimm; mapping 2367 drivers/acpi/nfit/core.c mapping->position = i; mapping 2855 drivers/acpi/nfit/core.c struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc, mapping 2872 drivers/acpi/nfit/core.c mapping->nvdimm = nvdimm; mapping 2876 drivers/acpi/nfit/core.c mapping->start = memdev->address; mapping 2877 drivers/acpi/nfit/core.c mapping->size = memdev->region_size; mapping 2887 drivers/acpi/nfit/core.c mapping->size = nfit_mem->bdw->capacity; mapping 2888 drivers/acpi/nfit/core.c mapping->start = nfit_mem->bdw->start_address; mapping 2890 drivers/acpi/nfit/core.c ndr_desc->mapping = mapping; mapping 2974 drivers/acpi/nfit/core.c struct nd_mapping_desc *mapping; mapping 2983 drivers/acpi/nfit/core.c mapping = &mappings[count++]; mapping 2984 drivers/acpi/nfit/core.c rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc, mapping 2990 drivers/acpi/nfit/core.c ndr_desc->mapping = mappings; mapping 773 drivers/atm/he.c dma_addr_t mapping; mapping 821 drivers/atm/he.c heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_KERNEL, &mapping); mapping 824 drivers/atm/he.c heb->mapping = mapping; mapping 831 drivers/atm/he.c he_dev->rbpl_base[i].phys = mapping + offsetof(struct he_buff, data); mapping 895 drivers/atm/he.c dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); mapping 1574 drivers/atm/he.c dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); mapping 1612 drivers/atm/he.c dma_addr_t mapping; mapping 1614 drivers/atm/he.c tpd = dma_pool_alloc(he_dev->tpd_pool, GFP_ATOMIC, &mapping); mapping 1618 drivers/atm/he.c tpd->status = TPD_ADDR(mapping); mapping 1685 drivers/atm/he.c dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); mapping 1778 drivers/atm/he.c dma_pool_free(he_dev->rbpl_pool, heb, heb->mapping); mapping 1886 drivers/atm/he.c dma_addr_t mapping; mapping 1909 drivers/atm/he.c heb = dma_pool_alloc(he_dev->rbpl_pool, GFP_ATOMIC, &mapping); mapping 1912 drivers/atm/he.c heb->mapping = mapping; mapping 1917 drivers/atm/he.c new_tail->phys = mapping + offsetof(struct he_buff, data); mapping 225 drivers/atm/he.h dma_addr_t mapping; mapping 173 drivers/block/loop.c struct address_space *mapping = file->f_mapping; mapping 174 drivers/block/loop.c struct inode *inode = mapping->host; mapping 197 drivers/block/loop.c mapping->a_ops->direct_IO && mapping 956 drivers/block/loop.c struct address_space *mapping; mapping 995 drivers/block/loop.c mapping = file->f_mapping; mapping 996 drivers/block/loop.c inode = mapping->host; mapping 1021 drivers/block/loop.c lo->old_gfp_mask = mapping_gfp_mask(mapping); mapping 1022 drivers/block/loop.c mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); mapping 450 drivers/block/rbd.c struct rbd_mapping mapping; mapping 1340 drivers/block/rbd.c rbd_dev->mapping.size = size; mapping 1341 drivers/block/rbd.c rbd_dev->mapping.features = features; mapping 1348 drivers/block/rbd.c rbd_dev->mapping.size = 0; mapping 1349 drivers/block/rbd.c rbd_dev->mapping.features = 0; mapping 1975 drivers/block/rbd.c rbd_dev->mapping.size); mapping 4863 drivers/block/rbd.c mapping_size = rbd_dev->mapping.size; mapping 5062 drivers/block/rbd.c size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; mapping 5075 drivers/block/rbd.c mapping_size = rbd_dev->mapping.size; mapping 5092 drivers/block/rbd.c rbd_dev->mapping.size = rbd_dev->header.image_size; mapping 5100 drivers/block/rbd.c if (!ret && mapping_size != rbd_dev->mapping.size) mapping 5215 drivers/block/rbd.c (unsigned long long)rbd_dev->mapping.size); mapping 5228 drivers/block/rbd.c (unsigned long long)rbd_dev->mapping.features); mapping 6895 drivers/block/rbd.c set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); mapping 450 drivers/block/zram/zram_drv.c struct address_space *mapping; mapping 481 drivers/block/zram/zram_drv.c mapping = backing_dev->f_mapping; mapping 482 drivers/block/zram/zram_drv.c inode = mapping->host; mapping 263 drivers/dax/device.c if (page->mapping) mapping 265 drivers/dax/device.c page->mapping = filp->f_mapping; mapping 1256 drivers/gpu/drm/amd/amdgpu/amdgpu.h struct amdgpu_bo_va_mapping **mapping); mapping 1718 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c struct amdgpu_bo_va_mapping *mapping; mapping 1723 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); mapping 1724 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) mapping 1727 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c *bo = mapping->bo_va->base.bo; mapping 1728 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c *map = mapping; mapping 245 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h struct amdgpu_bo_va_mapping *mapping), mapping 246 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(bo_va, mapping), mapping 257 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->start = mapping->start; mapping 258 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->last = mapping->last; mapping 259 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->offset = mapping->offset; mapping 260 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->flags = mapping->flags; mapping 269 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h struct amdgpu_bo_va_mapping *mapping), mapping 270 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(bo_va, mapping), mapping 281 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->start = mapping->start; mapping 282 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->last = mapping->last; mapping 283 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->offset = mapping->offset; mapping 284 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->flags = mapping->flags; mapping 292 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_PROTO(struct amdgpu_bo_va_mapping *mapping), mapping 293 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(mapping), mapping 301 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->soffset = mapping->start; mapping 302 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->eoffset = mapping->last + 1; mapping 303 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h __entry->flags = mapping->flags; mapping 310 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_PROTO(struct amdgpu_bo_va_mapping *mapping), mapping 311 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(mapping) mapping 315 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_PROTO(struct amdgpu_bo_va_mapping *mapping), mapping 316 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(mapping) mapping 320 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_PROTO(struct amdgpu_bo_va_mapping *mapping), mapping 321 drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h TP_ARGS(mapping) mapping 2342 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (p->mapping != adev->mman.bdev.dev_mapping) mapping 2393 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c if (p->mapping != adev->mman.bdev.dev_mapping) mapping 478 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c struct amdgpu_bo_va_mapping *mapping; mapping 484 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); mapping 807 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c struct amdgpu_bo_va_mapping *mapping; mapping 814 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping); mapping 822 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c end = (mapping->last + 1 - mapping->start); mapping 825 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; mapping 581 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c struct amdgpu_bo_va_mapping *mapping; mapping 598 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); mapping 627 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c struct amdgpu_bo_va_mapping *mapping; mapping 639 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping); mapping 647 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { mapping 653 drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE; mapping 1560 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping, mapping 1567 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c uint64_t pfn, start = mapping->start; mapping 1573 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!(mapping->flags & AMDGPU_PTE_READABLE)) mapping 1575 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) mapping 1579 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; mapping 1583 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); mapping 1586 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK); mapping 1589 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if ((mapping->flags & AMDGPU_PTE_PRT) && mapping 1600 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_update(mapping); mapping 1602 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c pfn = mapping->offset >> PAGE_SHIFT; mapping 1650 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c last = min((uint64_t)mapping->last, start + max_entries - 1); mapping 1664 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c } while (unlikely(start != mapping->last + 1)); mapping 1687 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping; mapping 1732 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry(mapping, &bo_va->invalids, list) { mapping 1734 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping, flags, bo_adev, nodes, mapping 1765 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry(mapping, &bo_va->valids, list) mapping 1766 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_mapping(mapping); mapping 1868 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping, mapping 1871 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->flags & AMDGPU_PTE_PRT) mapping 1873 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c kfree(mapping); mapping 1933 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping; mapping 1939 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping = list_first_entry(&vm->freed, mapping 1941 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 1944 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start < AMDGPU_GMC_HOLE_START) mapping 1948 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start, mapping->last, mapping 1950 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_free_mapping(adev, vm, mapping, f); mapping 2079 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping) mapping 2084 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->bo_va = bo_va; mapping 2085 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_add(&mapping->list, &bo_va->invalids); mapping 2086 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_it_insert(mapping, &vm->va); mapping 2088 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->flags & AMDGPU_PTE_PRT) mapping 2095 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_map(bo_va, mapping); mapping 2120 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping, *tmp; mapping 2148 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping 2149 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!mapping) mapping 2152 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start = saddr; mapping 2153 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->last = eaddr; mapping 2154 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->offset = offset; mapping 2155 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->flags = flags; mapping 2157 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_bo_insert_map(adev, bo_va, mapping); mapping 2185 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping; mapping 2202 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping 2203 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (!mapping) mapping 2208 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c kfree(mapping); mapping 2215 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->start = saddr; mapping 2216 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->last = eaddr; mapping 2217 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->offset = offset; mapping 2218 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->flags = flags; mapping 2220 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_bo_insert_map(adev, bo_va, mapping); mapping 2243 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping; mapping 2249 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry(mapping, &bo_va->valids, list) { mapping 2250 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->start == saddr) mapping 2254 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (&mapping->list == &bo_va->valids) { mapping 2257 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry(mapping, &bo_va->invalids, list) { mapping 2258 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->start == saddr) mapping 2262 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (&mapping->list == &bo_va->invalids) mapping 2266 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 2267 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_it_remove(mapping, &vm->va); mapping 2268 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->bo_va = NULL; mapping 2269 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_unmap(bo_va, mapping); mapping 2272 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_add(&mapping->list, &vm->freed); mapping 2274 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_free_mapping(adev, vm, mapping, mapping 2412 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping; mapping 2417 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping; mapping 2418 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) { mapping 2419 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->bo_va && mapping->bo_va->base.bo) { mapping 2422 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c bo = mapping->bo_va->base.bo; mapping 2428 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_cs(mapping); mapping 2445 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping, *next; mapping 2468 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { mapping 2469 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 2470 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_it_remove(mapping, &vm->va); mapping 2471 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c mapping->bo_va = NULL; mapping 2472 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c trace_amdgpu_vm_bo_unmap(bo_va, mapping); mapping 2473 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_add(&mapping->list, &vm->freed); mapping 2475 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) { mapping 2476 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 2477 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_it_remove(mapping, &vm->va); mapping 2478 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_free_mapping(adev, vm, mapping, mapping 2943 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c struct amdgpu_bo_va_mapping *mapping, *tmp; mapping 2963 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c rbtree_postorder_for_each_entry_safe(mapping, tmp, mapping 2968 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 2969 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c kfree(mapping); mapping 2971 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { mapping 2972 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { mapping 2977 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c list_del(&mapping->list); mapping 2978 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c amdgpu_vm_free_mapping(adev, vm, mapping, NULL); mapping 240 drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h } mapping; mapping 192 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 197 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_message_map[index]; mapping 198 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) mapping 201 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 206 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 211 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_clk_map[index]; mapping 212 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) { mapping 217 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 222 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 227 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_feature_mask_map[index]; mapping 228 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) { mapping 232 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 237 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 242 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_table_map[index]; mapping 243 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) { mapping 248 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 253 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 258 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_pwr_src_map[index]; mapping 259 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) { mapping 264 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 270 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 275 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c mapping = arcturus_workload_map[profile]; mapping 276 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c if (!(mapping.valid_mapping)) { mapping 281 drivers/gpu/drm/amd/powerplay/arcturus_ppt.c return mapping.map_to; mapping 214 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 219 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_message_map[index]; mapping 220 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 224 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 229 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 234 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_clk_map[index]; mapping 235 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 239 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 244 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 249 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_feature_mask_map[index]; mapping 250 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 254 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 259 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 264 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_table_map[index]; mapping 265 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 269 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 274 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 279 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_pwr_src_map[index]; mapping 280 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 284 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 290 drivers/gpu/drm/amd/powerplay/navi10_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 295 drivers/gpu/drm/amd/powerplay/navi10_ppt.c mapping = navi10_workload_map[profile]; mapping 296 drivers/gpu/drm/amd/powerplay/navi10_ppt.c if (!(mapping.valid_mapping)) { mapping 300 drivers/gpu/drm/amd/powerplay/navi10_ppt.c return mapping.map_to; mapping 115 drivers/gpu/drm/amd/powerplay/renoir_ppt.c struct smu_12_0_cmn2aisc_mapping mapping; mapping 120 drivers/gpu/drm/amd/powerplay/renoir_ppt.c mapping = renoir_message_map[index]; mapping 121 drivers/gpu/drm/amd/powerplay/renoir_ppt.c if (!(mapping.valid_mapping)) mapping 124 drivers/gpu/drm/amd/powerplay/renoir_ppt.c return mapping.map_to; mapping 129 drivers/gpu/drm/amd/powerplay/renoir_ppt.c struct smu_12_0_cmn2aisc_mapping mapping; mapping 134 drivers/gpu/drm/amd/powerplay/renoir_ppt.c mapping = renoir_table_map[index]; mapping 135 drivers/gpu/drm/amd/powerplay/renoir_ppt.c if (!(mapping.valid_mapping)) mapping 138 drivers/gpu/drm/amd/powerplay/renoir_ppt.c return mapping.map_to; mapping 228 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 233 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_table_map[index]; mapping 234 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 238 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 243 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 248 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_pwr_src_map[index]; mapping 249 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 253 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 258 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 263 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_feature_mask_map[index]; mapping 264 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 268 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 273 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 278 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_clk_map[index]; mapping 279 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 283 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 288 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 293 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_message_map[index]; mapping 294 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 298 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 303 drivers/gpu/drm/amd/powerplay/vega20_ppt.c struct smu_11_0_cmn2aisc_mapping mapping; mapping 308 drivers/gpu/drm/amd/powerplay/vega20_ppt.c mapping = vega20_workload_map[profile]; mapping 309 drivers/gpu/drm/amd/powerplay/vega20_ppt.c if (!(mapping.valid_mapping)) { mapping 313 drivers/gpu/drm/amd/powerplay/vega20_ppt.c return mapping.map_to; mapping 209 drivers/gpu/drm/armada/armada_gem.c struct address_space *mapping; mapping 222 drivers/gpu/drm/armada/armada_gem.c mapping = obj->obj.filp->f_mapping; mapping 223 drivers/gpu/drm/armada/armada_gem.c mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); mapping 389 drivers/gpu/drm/armada/armada_gem.c struct address_space *mapping; mapping 396 drivers/gpu/drm/armada/armada_gem.c mapping = dobj->obj.filp->f_mapping; mapping 401 drivers/gpu/drm/armada/armada_gem.c page = shmem_read_mapping_page(mapping, i); mapping 102 drivers/gpu/drm/ati_pcigart.c struct drm_local_map *map = &gart_info->mapping; mapping 555 drivers/gpu/drm/drm_gem.c struct address_space *mapping; mapping 561 drivers/gpu/drm/drm_gem.c mapping = obj->filp->f_mapping; mapping 575 drivers/gpu/drm/drm_gem.c mapping_set_unevictable(mapping); mapping 578 drivers/gpu/drm/drm_gem.c p = shmem_read_mapping_page(mapping, i); mapping 588 drivers/gpu/drm/drm_gem.c BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && mapping 595 drivers/gpu/drm/drm_gem.c mapping_clear_unevictable(mapping); mapping 620 drivers/gpu/drm/drm_gem.c struct address_space *mapping; mapping 623 drivers/gpu/drm/drm_gem.c mapping = file_inode(obj->filp)->i_mapping; mapping 624 drivers/gpu/drm/drm_gem.c mapping_clear_unevictable(mapping); mapping 65 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c struct etnaviv_vram_mapping *mapping, mapping 68 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base, mapping 73 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c struct etnaviv_vram_mapping *mapping) mapping 75 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c etnaviv_iommu_put_suballoc_va(context, mapping); mapping 134 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c struct etnaviv_vram_mapping *mapping) mapping 136 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c return mapping->iova + buf->suballoc_offset; mapping 32 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h struct etnaviv_vram_mapping *mapping, mapping 35 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h struct etnaviv_vram_mapping *mapping); mapping 43 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h struct etnaviv_vram_mapping *mapping); mapping 202 drivers/gpu/drm/etnaviv/etnaviv_dump.c vram = submit->bos[i].mapping; mapping 228 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct etnaviv_vram_mapping *mapping; mapping 230 drivers/gpu/drm/etnaviv/etnaviv_gem.c list_for_each_entry(mapping, &obj->vram_list, obj_node) { mapping 231 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (mapping->context == context) mapping 232 drivers/gpu/drm/etnaviv/etnaviv_gem.c return mapping; mapping 238 drivers/gpu/drm/etnaviv/etnaviv_gem.c void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping) mapping 240 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct etnaviv_gem_object *etnaviv_obj = mapping->object; mapping 243 drivers/gpu/drm/etnaviv/etnaviv_gem.c WARN_ON(mapping->use == 0); mapping 244 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->use -= 1; mapping 255 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct etnaviv_vram_mapping *mapping; mapping 260 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, mmu_context); mapping 261 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (mapping) { mapping 268 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (mapping->use == 0) { mapping 270 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (mapping->context == mmu_context) mapping 271 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->use += 1; mapping 273 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping = NULL; mapping 275 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (mapping) mapping 278 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->use += 1; mapping 293 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, NULL); mapping 294 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (!mapping) { mapping 295 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); mapping 296 drivers/gpu/drm/etnaviv/etnaviv_gem.c if (!mapping) { mapping 301 drivers/gpu/drm/etnaviv/etnaviv_gem.c INIT_LIST_HEAD(&mapping->scan_node); mapping 302 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->object = etnaviv_obj; mapping 304 drivers/gpu/drm/etnaviv/etnaviv_gem.c list_del(&mapping->obj_node); mapping 308 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->context = mmu_context; mapping 309 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping->use = 1; mapping 313 drivers/gpu/drm/etnaviv/etnaviv_gem.c mapping, va); mapping 316 drivers/gpu/drm/etnaviv/etnaviv_gem.c kfree(mapping); mapping 318 drivers/gpu/drm/etnaviv/etnaviv_gem.c list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list); mapping 329 drivers/gpu/drm/etnaviv/etnaviv_gem.c return mapping; mapping 522 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct etnaviv_vram_mapping *mapping, *tmp; mapping 531 drivers/gpu/drm/etnaviv/etnaviv_gem.c list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, mapping 533 drivers/gpu/drm/etnaviv/etnaviv_gem.c struct etnaviv_iommu_context *context = mapping->context; mapping 535 drivers/gpu/drm/etnaviv/etnaviv_gem.c WARN_ON(mapping->use); mapping 538 drivers/gpu/drm/etnaviv/etnaviv_gem.c etnaviv_iommu_unmap_gem(context, mapping); mapping 542 drivers/gpu/drm/etnaviv/etnaviv_gem.c list_del(&mapping->obj_node); mapping 543 drivers/gpu/drm/etnaviv/etnaviv_gem.c kfree(mapping); mapping 82 drivers/gpu/drm/etnaviv/etnaviv_gem.h struct etnaviv_vram_mapping *mapping; mapping 125 drivers/gpu/drm/etnaviv/etnaviv_gem.h void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping); mapping 232 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c struct etnaviv_vram_mapping *mapping; mapping 234 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base, mapping 237 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c if (IS_ERR(mapping)) { mapping 238 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c ret = PTR_ERR(mapping); mapping 243 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c submit->bos[i].va != mapping->iova) { mapping 244 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c etnaviv_gem_mapping_unreference(mapping); mapping 251 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c submit->bos[i].mapping = mapping; mapping 318 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c ptr[off] = bo->mapping->iova + r->reloc_offset; mapping 395 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c etnaviv_gem_mapping_unreference(submit->bos[i].mapping); mapping 397 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c submit->bos[i].mapping = NULL; mapping 130 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *mapping) mapping 132 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_gem_object *etnaviv_obj = mapping->object; mapping 134 drivers/gpu/drm/etnaviv/etnaviv_mmu.c etnaviv_iommu_unmap(context, mapping->vram_node.start, mapping 136 drivers/gpu/drm/etnaviv/etnaviv_mmu.c drm_mm_remove_node(&mapping->vram_node); mapping 232 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *mapping, u64 va) mapping 249 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = iova; mapping 250 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_add_tail(&mapping->mmu_node, &context->mappings); mapping 256 drivers/gpu/drm/etnaviv/etnaviv_mmu.c node = &mapping->vram_node; mapping 267 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = node->start; mapping 276 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_add_tail(&mapping->mmu_node, &context->mappings); mapping 285 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *mapping) mapping 287 drivers/gpu/drm/etnaviv/etnaviv_mmu.c WARN_ON(mapping->use); mapping 292 drivers/gpu/drm/etnaviv/etnaviv_mmu.c if (mapping->vram_node.mm == &context->mm) mapping 293 drivers/gpu/drm/etnaviv/etnaviv_mmu.c etnaviv_iommu_remove_mapping(context, mapping); mapping 295 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_del(&mapping->mmu_node); mapping 357 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *mapping, mapping 363 drivers/gpu/drm/etnaviv/etnaviv_mmu.c if (mapping->use > 0) { mapping 364 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->use++; mapping 376 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = paddr - memory_base; mapping 378 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct drm_mm_node *node = &mapping->vram_node; mapping 387 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->iova = node->start; mapping 399 drivers/gpu/drm/etnaviv/etnaviv_mmu.c list_add_tail(&mapping->mmu_node, &context->mappings); mapping 400 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->use = 1; mapping 408 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct etnaviv_vram_mapping *mapping) mapping 410 drivers/gpu/drm/etnaviv/etnaviv_mmu.c struct drm_mm_node *node = &mapping->vram_node; mapping 413 drivers/gpu/drm/etnaviv/etnaviv_mmu.c mapping->use--; mapping 415 drivers/gpu/drm/etnaviv/etnaviv_mmu.c if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) { mapping 91 drivers/gpu/drm/etnaviv/etnaviv_mmu.h struct etnaviv_vram_mapping *mapping, u64 va); mapping 93 drivers/gpu/drm/etnaviv/etnaviv_mmu.h struct etnaviv_vram_mapping *mapping); mapping 96 drivers/gpu/drm/etnaviv/etnaviv_mmu.h struct etnaviv_vram_mapping *mapping, mapping 100 drivers/gpu/drm/etnaviv/etnaviv_mmu.h struct etnaviv_vram_mapping *mapping); mapping 87 drivers/gpu/drm/exynos/exynos_drm_dma.c ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); mapping 89 drivers/gpu/drm/exynos/exynos_drm_dma.c ret = iommu_attach_device(priv->mapping, subdrv_dev); mapping 116 drivers/gpu/drm/exynos/exynos_drm_dma.c iommu_detach_device(priv->mapping, subdrv_dev); mapping 135 drivers/gpu/drm/exynos/exynos_drm_dma.c if (!priv->mapping) { mapping 136 drivers/gpu/drm/exynos/exynos_drm_dma.c void *mapping; mapping 139 drivers/gpu/drm/exynos/exynos_drm_dma.c mapping = arm_iommu_create_mapping(&platform_bus_type, mapping 142 drivers/gpu/drm/exynos/exynos_drm_dma.c mapping = iommu_get_domain_for_dev(priv->dma_dev); mapping 144 drivers/gpu/drm/exynos/exynos_drm_dma.c if (IS_ERR(mapping)) mapping 145 drivers/gpu/drm/exynos/exynos_drm_dma.c return PTR_ERR(mapping); mapping 146 drivers/gpu/drm/exynos/exynos_drm_dma.c priv->mapping = mapping; mapping 166 drivers/gpu/drm/exynos/exynos_drm_dma.c arm_iommu_release_mapping(priv->mapping); mapping 167 drivers/gpu/drm/exynos/exynos_drm_dma.c priv->mapping = NULL; mapping 204 drivers/gpu/drm/exynos/exynos_drm_drv.h void *mapping; mapping 223 drivers/gpu/drm/exynos/exynos_drm_drv.h return priv->mapping ? true : false; mapping 1930 drivers/gpu/drm/gma500/psb_intel_sdvo.c struct sdvo_device_mapping *mapping; mapping 1933 drivers/gpu/drm/gma500/psb_intel_sdvo.c mapping = &(dev_priv->sdvo_mappings[0]); mapping 1935 drivers/gpu/drm/gma500/psb_intel_sdvo.c mapping = &(dev_priv->sdvo_mappings[1]); mapping 1937 drivers/gpu/drm/gma500/psb_intel_sdvo.c if (mapping->initialized) mapping 1938 drivers/gpu/drm/gma500/psb_intel_sdvo.c sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); mapping 1947 drivers/gpu/drm/gma500/psb_intel_sdvo.c struct sdvo_device_mapping *mapping; mapping 1951 drivers/gpu/drm/gma500/psb_intel_sdvo.c mapping = &dev_priv->sdvo_mappings[0]; mapping 1953 drivers/gpu/drm/gma500/psb_intel_sdvo.c mapping = &dev_priv->sdvo_mappings[1]; mapping 1957 drivers/gpu/drm/gma500/psb_intel_sdvo.c if (mapping->initialized) { mapping 1958 drivers/gpu/drm/gma500/psb_intel_sdvo.c pin = mapping->i2c_pin; mapping 1959 drivers/gpu/drm/gma500/psb_intel_sdvo.c speed = mapping->i2c_speed; mapping 451 drivers/gpu/drm/i915/display/intel_bios.c struct sdvo_device_mapping *mapping; mapping 486 drivers/gpu/drm/i915/display/intel_bios.c mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1]; mapping 487 drivers/gpu/drm/i915/display/intel_bios.c if (!mapping->initialized) { mapping 488 drivers/gpu/drm/i915/display/intel_bios.c mapping->dvo_port = child->dvo_port; mapping 489 drivers/gpu/drm/i915/display/intel_bios.c mapping->slave_addr = child->slave_addr; mapping 490 drivers/gpu/drm/i915/display/intel_bios.c mapping->dvo_wiring = child->dvo_wiring; mapping 491 drivers/gpu/drm/i915/display/intel_bios.c mapping->ddc_pin = child->ddc_pin; mapping 492 drivers/gpu/drm/i915/display/intel_bios.c mapping->i2c_pin = child->i2c_pin; mapping 493 drivers/gpu/drm/i915/display/intel_bios.c mapping->initialized = 1; mapping 495 drivers/gpu/drm/i915/display/intel_bios.c mapping->dvo_port, mapping 496 drivers/gpu/drm/i915/display/intel_bios.c mapping->slave_addr, mapping 497 drivers/gpu/drm/i915/display/intel_bios.c mapping->dvo_wiring, mapping 498 drivers/gpu/drm/i915/display/intel_bios.c mapping->ddc_pin, mapping 499 drivers/gpu/drm/i915/display/intel_bios.c mapping->i2c_pin); mapping 1235 drivers/gpu/drm/i915/display/intel_bios.c static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */ mapping 1237 drivers/gpu/drm/i915/display/intel_bios.c if (val >= ARRAY_SIZE(mapping)) { mapping 1241 drivers/gpu/drm/i915/display/intel_bios.c return mapping[val]; mapping 2525 drivers/gpu/drm/i915/display/intel_sdvo.c struct sdvo_device_mapping *mapping; mapping 2528 drivers/gpu/drm/i915/display/intel_sdvo.c mapping = &dev_priv->vbt.sdvo_mappings[0]; mapping 2530 drivers/gpu/drm/i915/display/intel_sdvo.c mapping = &dev_priv->vbt.sdvo_mappings[1]; mapping 2532 drivers/gpu/drm/i915/display/intel_sdvo.c if (mapping->initialized) mapping 2533 drivers/gpu/drm/i915/display/intel_sdvo.c sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); mapping 2542 drivers/gpu/drm/i915/display/intel_sdvo.c struct sdvo_device_mapping *mapping; mapping 2546 drivers/gpu/drm/i915/display/intel_sdvo.c mapping = &dev_priv->vbt.sdvo_mappings[0]; mapping 2548 drivers/gpu/drm/i915/display/intel_sdvo.c mapping = &dev_priv->vbt.sdvo_mappings[1]; mapping 2550 drivers/gpu/drm/i915/display/intel_sdvo.c if (mapping->initialized && mapping 2551 drivers/gpu/drm/i915/display/intel_sdvo.c intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) mapping 2552 drivers/gpu/drm/i915/display/intel_sdvo.c pin = mapping->i2c_pin; mapping 164 drivers/gpu/drm/i915/gem/i915_gem_object_types.h void *mapping; mapping 164 drivers/gpu/drm/i915/gem/i915_gem_pages.c if (obj->mm.mapping) { mapping 167 drivers/gpu/drm/i915/gem/i915_gem_pages.c ptr = page_mask_bits(obj->mm.mapping); mapping 173 drivers/gpu/drm/i915/gem/i915_gem_pages.c obj->mm.mapping = NULL; mapping 310 drivers/gpu/drm/i915/gem/i915_gem_pages.c ptr = page_unpack_bits(obj->mm.mapping, &has_type); mapping 322 drivers/gpu/drm/i915/gem/i915_gem_pages.c ptr = obj->mm.mapping = NULL; mapping 332 drivers/gpu/drm/i915/gem/i915_gem_pages.c obj->mm.mapping = page_pack_bits(ptr, type); mapping 362 drivers/gpu/drm/i915/gem/i915_gem_pages.c ptr = page_unpack_bits(obj->mm.mapping, &has_type); mapping 23 drivers/gpu/drm/i915/gem/i915_gem_phys.c struct address_space *mapping = obj->base.filp->f_mapping; mapping 65 drivers/gpu/drm/i915/gem/i915_gem_phys.c page = shmem_read_mapping_page(mapping, i); mapping 103 drivers/gpu/drm/i915/gem/i915_gem_phys.c struct address_space *mapping = obj->base.filp->f_mapping; mapping 111 drivers/gpu/drm/i915/gem/i915_gem_phys.c page = shmem_read_mapping_page(mapping, i); mapping 180 drivers/gpu/drm/i915/gem/i915_gem_phys.c if (obj->mm.mapping) { mapping 31 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct address_space *mapping; mapping 74 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping = obj->base.filp->f_mapping; mapping 75 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping_set_unevictable(mapping); mapping 76 drivers/gpu/drm/i915/gem/i915_gem_shmem.c noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); mapping 91 drivers/gpu/drm/i915/gem/i915_gem_shmem.c page = shmem_read_mapping_page_gfp(mapping, i, gfp); mapping 113 drivers/gpu/drm/i915/gem/i915_gem_shmem.c gfp = mapping_gfp_mask(mapping); mapping 190 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping_clear_unevictable(mapping); mapping 233 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct address_space *mapping; mapping 249 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping = obj->base.filp->f_mapping; mapping 255 drivers/gpu/drm/i915/gem/i915_gem_shmem.c page = find_lock_entry(mapping, i); mapping 263 drivers/gpu/drm/i915/gem/i915_gem_shmem.c ret = mapping->a_ops->writepage(page, &wbc); mapping 332 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct address_space *mapping = obj->base.filp->f_mapping; mapping 387 drivers/gpu/drm/i915/gem/i915_gem_shmem.c err = pagecache_write_begin(obj->base.filp, mapping, mapping 399 drivers/gpu/drm/i915/gem/i915_gem_shmem.c err = pagecache_write_end(obj->base.filp, mapping, mapping 462 drivers/gpu/drm/i915/gem/i915_gem_shmem.c struct address_space *mapping; mapping 493 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping = obj->base.filp->f_mapping; mapping 494 drivers/gpu/drm/i915/gem/i915_gem_shmem.c mapping_set_gfp_mask(mapping, mask); mapping 495 drivers/gpu/drm/i915/gem/i915_gem_shmem.c GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); mapping 236 drivers/gpu/drm/i915/gem/i915_gem_shrinker.c !is_vmalloc_addr(obj->mm.mapping)) mapping 1510 drivers/gpu/drm/i915/i915_cmd_parser.c void *ptr = page_mask_bits(shadow_batch_obj->mm.mapping); mapping 102 drivers/gpu/drm/i915/i915_debugfs.c return obj->mm.mapping ? 'M' : ' '; mapping 304 drivers/gpu/drm/i915/i915_gem.c gtt_user_read(struct io_mapping *mapping, mapping 312 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_atomic_wc(mapping, base); mapping 318 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); mapping 491 drivers/gpu/drm/i915/i915_gem.c ggtt_write(struct io_mapping *mapping, mapping 499 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_atomic_wc(mapping, base); mapping 504 drivers/gpu/drm/i915/i915_gem.c vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE); mapping 487 drivers/gpu/drm/imx/imx-ldb.c const char * const mapping; mapping 509 drivers/gpu/drm/imx/imx-ldb.c if (!strcasecmp(bm, imx_ldb_bit_mappings[i].mapping) && mapping 77 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c unsigned int mapping; mapping 80 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c mapping = irq_find_mapping(dpu_mdss->irq_controller.domain, mapping 82 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c if (mapping == 0) { mapping 87 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c rc = generic_handle_irq(mapping); mapping 90 drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c hwirq, mapping, rc); mapping 113 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c if (dev->archdata.mapping) { mapping 114 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 117 drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c arm_iommu_release_mapping(mapping); mapping 1130 drivers/gpu/drm/omapdrm/omap_gem.c struct address_space *mapping; mapping 1202 drivers/gpu/drm/omapdrm/omap_gem.c mapping = obj->filp->f_mapping; mapping 1203 drivers/gpu/drm/omapdrm/omap_gem.c mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); mapping 146 drivers/gpu/drm/panel/panel-lvds.c const char *mapping; mapping 173 drivers/gpu/drm/panel/panel-lvds.c ret = of_property_read_string(np, "data-mapping", &mapping); mapping 180 drivers/gpu/drm/panel/panel-lvds.c if (!strcmp(mapping, "jeida-18")) { mapping 182 drivers/gpu/drm/panel/panel-lvds.c } else if (!strcmp(mapping, "jeida-24")) { mapping 184 drivers/gpu/drm/panel/panel-lvds.c } else if (!strcmp(mapping, "vesa-24")) { mapping 84 drivers/gpu/drm/panfrost/panfrost_drv.c struct panfrost_gem_mapping *mapping; mapping 100 drivers/gpu/drm/panfrost/panfrost_drv.c mapping = panfrost_gem_mapping_get(bo, priv); mapping 101 drivers/gpu/drm/panfrost/panfrost_drv.c if (!mapping) { mapping 106 drivers/gpu/drm/panfrost/panfrost_drv.c args->offset = mapping->mmnode.start << PAGE_SHIFT; mapping 107 drivers/gpu/drm/panfrost/panfrost_drv.c panfrost_gem_mapping_put(mapping); mapping 160 drivers/gpu/drm/panfrost/panfrost_drv.c struct panfrost_gem_mapping *mapping; mapping 163 drivers/gpu/drm/panfrost/panfrost_drv.c mapping = panfrost_gem_mapping_get(bo, priv); mapping 164 drivers/gpu/drm/panfrost/panfrost_drv.c if (!mapping) { mapping 170 drivers/gpu/drm/panfrost/panfrost_drv.c job->mappings[i] = mapping; mapping 363 drivers/gpu/drm/panfrost/panfrost_drv.c struct panfrost_gem_mapping *mapping; mapping 374 drivers/gpu/drm/panfrost/panfrost_drv.c mapping = panfrost_gem_mapping_get(bo, priv); mapping 377 drivers/gpu/drm/panfrost/panfrost_drv.c if (!mapping) mapping 380 drivers/gpu/drm/panfrost/panfrost_drv.c args->offset = mapping->mmnode.start << PAGE_SHIFT; mapping 381 drivers/gpu/drm/panfrost/panfrost_drv.c panfrost_gem_mapping_put(mapping); mapping 59 drivers/gpu/drm/panfrost/panfrost_gem.c struct panfrost_gem_mapping *iter, *mapping = NULL; mapping 65 drivers/gpu/drm/panfrost/panfrost_gem.c mapping = iter; mapping 71 drivers/gpu/drm/panfrost/panfrost_gem.c return mapping; mapping 75 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) mapping 79 drivers/gpu/drm/panfrost/panfrost_gem.c if (mapping->active) mapping 80 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_mmu_unmap(mapping); mapping 82 drivers/gpu/drm/panfrost/panfrost_gem.c priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); mapping 84 drivers/gpu/drm/panfrost/panfrost_gem.c if (drm_mm_node_allocated(&mapping->mmnode)) mapping 85 drivers/gpu/drm/panfrost/panfrost_gem.c drm_mm_remove_node(&mapping->mmnode); mapping 91 drivers/gpu/drm/panfrost/panfrost_gem.c struct panfrost_gem_mapping *mapping; mapping 93 drivers/gpu/drm/panfrost/panfrost_gem.c mapping = container_of(kref, struct panfrost_gem_mapping, refcount); mapping 95 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_gem_teardown_mapping(mapping); mapping 96 drivers/gpu/drm/panfrost/panfrost_gem.c drm_gem_object_put_unlocked(&mapping->obj->base.base); mapping 97 drivers/gpu/drm/panfrost/panfrost_gem.c kfree(mapping); mapping 100 drivers/gpu/drm/panfrost/panfrost_gem.c void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) mapping 102 drivers/gpu/drm/panfrost/panfrost_gem.c if (!mapping) mapping 105 drivers/gpu/drm/panfrost/panfrost_gem.c kref_put(&mapping->refcount, panfrost_gem_mapping_release); mapping 110 drivers/gpu/drm/panfrost/panfrost_gem.c struct panfrost_gem_mapping *mapping; mapping 113 drivers/gpu/drm/panfrost/panfrost_gem.c list_for_each_entry(mapping, &bo->mappings.list, node) mapping 114 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_gem_teardown_mapping(mapping); mapping 126 drivers/gpu/drm/panfrost/panfrost_gem.c struct panfrost_gem_mapping *mapping; mapping 128 drivers/gpu/drm/panfrost/panfrost_gem.c mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); mapping 129 drivers/gpu/drm/panfrost/panfrost_gem.c if (!mapping) mapping 132 drivers/gpu/drm/panfrost/panfrost_gem.c INIT_LIST_HEAD(&mapping->node); mapping 133 drivers/gpu/drm/panfrost/panfrost_gem.c kref_init(&mapping->refcount); mapping 135 drivers/gpu/drm/panfrost/panfrost_gem.c mapping->obj = bo; mapping 148 drivers/gpu/drm/panfrost/panfrost_gem.c mapping->mmu = &priv->mmu; mapping 150 drivers/gpu/drm/panfrost/panfrost_gem.c ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, mapping 157 drivers/gpu/drm/panfrost/panfrost_gem.c ret = panfrost_mmu_map(mapping); mapping 164 drivers/gpu/drm/panfrost/panfrost_gem.c list_add_tail(&mapping->node, &bo->mappings.list); mapping 169 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_gem_mapping_put(mapping); mapping 177 drivers/gpu/drm/panfrost/panfrost_gem.c struct panfrost_gem_mapping *mapping = NULL, *iter; mapping 182 drivers/gpu/drm/panfrost/panfrost_gem.c mapping = iter; mapping 189 drivers/gpu/drm/panfrost/panfrost_gem.c panfrost_gem_mapping_put(mapping); mapping 84 drivers/gpu/drm/panfrost/panfrost_gem.h void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); mapping 277 drivers/gpu/drm/panfrost/panfrost_mmu.c int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) mapping 279 drivers/gpu/drm/panfrost/panfrost_mmu.c struct panfrost_gem_object *bo = mapping->obj; mapping 285 drivers/gpu/drm/panfrost/panfrost_mmu.c if (WARN_ON(mapping->active)) mapping 295 drivers/gpu/drm/panfrost/panfrost_mmu.c mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, mapping 297 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping->active = true; mapping 302 drivers/gpu/drm/panfrost/panfrost_mmu.c void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) mapping 304 drivers/gpu/drm/panfrost/panfrost_mmu.c struct panfrost_gem_object *bo = mapping->obj; mapping 307 drivers/gpu/drm/panfrost/panfrost_mmu.c struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; mapping 308 drivers/gpu/drm/panfrost/panfrost_mmu.c u64 iova = mapping->mmnode.start << PAGE_SHIFT; mapping 309 drivers/gpu/drm/panfrost/panfrost_mmu.c size_t len = mapping->mmnode.size << PAGE_SHIFT; mapping 312 drivers/gpu/drm/panfrost/panfrost_mmu.c if (WARN_ON(!mapping->active)) mapping 316 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping->mmu->as, iova, len); mapping 330 drivers/gpu/drm/panfrost/panfrost_mmu.c panfrost_mmu_flush_range(pfdev, mapping->mmu, mapping 331 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping->mmnode.start << PAGE_SHIFT, len); mapping 332 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping->active = false; mapping 410 drivers/gpu/drm/panfrost/panfrost_mmu.c struct panfrost_gem_mapping *mapping = NULL; mapping 431 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping = drm_mm_node_to_panfrost_mapping(node); mapping 433 drivers/gpu/drm/panfrost/panfrost_mmu.c kref_get(&mapping->refcount); mapping 441 drivers/gpu/drm/panfrost/panfrost_mmu.c return mapping; mapping 452 drivers/gpu/drm/panfrost/panfrost_mmu.c struct address_space *mapping; mapping 500 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping = bo->base.base.filp->f_mapping; mapping 501 drivers/gpu/drm/panfrost/panfrost_mmu.c mapping_set_unevictable(mapping); mapping 504 drivers/gpu/drm/panfrost/panfrost_mmu.c pages[i] = shmem_read_mapping_page(mapping, i); mapping 11 drivers/gpu/drm/panfrost/panfrost_mmu.h int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); mapping 12 drivers/gpu/drm/panfrost/panfrost_mmu.h void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); mapping 28 drivers/gpu/drm/panfrost/panfrost_perfcnt.c struct panfrost_gem_mapping *mapping; mapping 52 drivers/gpu/drm/panfrost/panfrost_perfcnt.c gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; mapping 97 drivers/gpu/drm/panfrost/panfrost_perfcnt.c perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), mapping 99 drivers/gpu/drm/panfrost/panfrost_perfcnt.c if (!perfcnt->mapping) { mapping 129 drivers/gpu/drm/panfrost/panfrost_perfcnt.c as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu); mapping 166 drivers/gpu/drm/panfrost/panfrost_perfcnt.c panfrost_gem_mapping_put(perfcnt->mapping); mapping 191 drivers/gpu/drm/panfrost/panfrost_perfcnt.c drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); mapping 193 drivers/gpu/drm/panfrost/panfrost_perfcnt.c panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); mapping 194 drivers/gpu/drm/panfrost/panfrost_perfcnt.c panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); mapping 195 drivers/gpu/drm/panfrost/panfrost_perfcnt.c panfrost_gem_mapping_put(perfcnt->mapping); mapping 196 drivers/gpu/drm/panfrost/panfrost_perfcnt.c perfcnt->mapping = NULL; mapping 1088 drivers/gpu/drm/tegra/drm.c if (client->dev->archdata.mapping) { mapping 1089 drivers/gpu/drm/tegra/drm.c struct dma_iommu_mapping *mapping = mapping 1092 drivers/gpu/drm/tegra/drm.c arm_iommu_release_mapping(mapping); mapping 1732 drivers/gpu/drm/ttm/ttm_bo.c struct address_space *mapping, mapping 1759 drivers/gpu/drm/ttm/ttm_bo.c bdev->dev_mapping = mapping; mapping 457 drivers/gpu/drm/ttm/ttm_tt.c ttm->pages[i]->mapping = ttm->bdev->dev_mapping; mapping 485 drivers/gpu/drm/ttm/ttm_tt.c (*page)->mapping = NULL; mapping 67 drivers/gpu/drm/vkms/vkms_gem.c struct address_space *mapping; mapping 69 drivers/gpu/drm/vkms/vkms_gem.c mapping = file_inode(obj->gem.filp)->i_mapping; mapping 70 drivers/gpu/drm/vkms/vkms_gem.c page = shmem_read_mapping_page(mapping, page_offset); mapping 265 drivers/gpu/host1x/dev.c if (host->dev->archdata.mapping) { mapping 266 drivers/gpu/host1x/dev.c struct dma_iommu_mapping *mapping = mapping 269 drivers/gpu/host1x/dev.c arm_iommu_release_mapping(mapping); mapping 1679 drivers/hwmon/lm93.c int mapping; mapping 1682 drivers/hwmon/lm93.c mapping = (data->sf_tach_to_pwm >> (nr * 2)) & 0x03; mapping 1685 drivers/hwmon/lm93.c if (mapping && ((data->sfc2 >> nr) & 0x01)) mapping 1686 drivers/hwmon/lm93.c rc = mapping; mapping 1872 drivers/hwmon/lm93.c int mapping = lm93_read_byte(client, LM93_REG_SF_TACH_TO_PWM); mapping 1876 drivers/hwmon/lm93.c mapping = (mapping >> pwm) & 0x55; mapping 1877 drivers/hwmon/lm93.c mask = mapping & 0x01; mapping 1878 drivers/hwmon/lm93.c mask |= (mapping & 0x04) >> 1; mapping 1879 drivers/hwmon/lm93.c mask |= (mapping & 0x10) >> 2; mapping 1880 drivers/hwmon/lm93.c mask |= (mapping & 0x40) >> 3; mapping 966 drivers/hwtracing/intel_th/msu.c page->mapping = NULL; mapping 1126 drivers/hwtracing/intel_th/msu.c page->mapping = NULL; mapping 1571 drivers/hwtracing/intel_th/msu.c if (page->mapping) mapping 1572 drivers/hwtracing/intel_th/msu.c page->mapping = NULL; mapping 1590 drivers/hwtracing/intel_th/msu.c vmf->page->mapping = vmf->vma->vm_file->f_mapping; mapping 2292 drivers/infiniband/core/mad.c recv->header.mapping, mapping 2966 drivers/infiniband/core/mad.c mad_priv->header.mapping = sg_list.addr; mapping 2983 drivers/infiniband/core/mad.c mad_priv->header.mapping, mapping 3022 drivers/infiniband/core/mad.c recv->header.mapping, mapping 75 drivers/infiniband/core/mad_priv.h u64 mapping; mapping 176 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr_set(cq, mapping, cq->dma_addr); mapping 285 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr_set(wq, mapping, wq->dma_addr); mapping 312 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr(cq, mapping)); mapping 322 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr(wq, mapping)); mapping 521 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr_set(&rdev_p->ctrl_qp, mapping, mapping 564 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr(&rdev_p->ctrl_qp, mapping)); mapping 74 drivers/infiniband/hw/cxgb3/cxio_hal.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 694 drivers/infiniband/hw/cxgb3/cxio_wr.h DEFINE_DMA_UNMAP_ADDR(mapping); /* unmap kruft */ mapping 721 drivers/infiniband/hw/cxgb3/cxio_wr.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 66 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr(cq, mapping)); mapping 104 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr_set(cq, mapping, cq->dma_addr); mapping 175 drivers/infiniband/hw/cxgb4/cq.c dma_unmap_addr(cq, mapping)); mapping 103 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(sq, mapping)); mapping 136 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(sq, mapping, sq->dma_addr); mapping 164 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(&wq->rq, mapping)); mapping 261 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); mapping 277 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); mapping 394 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(&wq->rq, mapping)); mapping 2511 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(wq, mapping)); mapping 2558 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(wq, mapping, wq->dma_addr); mapping 2637 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(wq, mapping)); mapping 335 drivers/infiniband/hw/cxgb4/t4.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 364 drivers/infiniband/hw/cxgb4/t4.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 401 drivers/infiniband/hw/cxgb4/t4.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 694 drivers/infiniband/hw/cxgb4/t4.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 316 drivers/infiniband/hw/hfi1/user_sdma.c static u8 mapping[256]; mapping 322 drivers/infiniband/hw/hfi1/user_sdma.c memset(mapping, 0xFF, 256); mapping 327 drivers/infiniband/hw/hfi1/user_sdma.c if (mapping[hash] == 0xFF) { mapping 328 drivers/infiniband/hw/hfi1/user_sdma.c mapping[hash] = next; mapping 332 drivers/infiniband/hw/hfi1/user_sdma.c return mapping[hash]; mapping 215 drivers/infiniband/hw/mthca/mthca_allocator.c dma_unmap_addr_set(&buf->direct, mapping, t); mapping 256 drivers/infiniband/hw/mthca/mthca_allocator.c dma_unmap_addr_set(&buf->page_list[i], mapping, t); mapping 294 drivers/infiniband/hw/mthca/mthca_allocator.c dma_unmap_addr(&buf->direct, mapping)); mapping 300 drivers/infiniband/hw/mthca/mthca_allocator.c mapping)); mapping 506 drivers/infiniband/hw/mthca/mthca_eq.c dma_unmap_addr_set(&eq->page_list[i], mapping, t); mapping 576 drivers/infiniband/hw/mthca/mthca_eq.c mapping)); mapping 622 drivers/infiniband/hw/mthca/mthca_eq.c dma_unmap_addr(&eq->page_list[i], mapping)); mapping 628 drivers/infiniband/hw/mthca/mthca_memfree.c MTHCA_ICM_PAGE_SIZE, &page->mapping, mapping 635 drivers/infiniband/hw/mthca/mthca_memfree.c ret = mthca_MAP_ICM_page(dev, page->mapping, mapping 639 drivers/infiniband/hw/mthca/mthca_memfree.c page->db_rec, page->mapping); mapping 686 drivers/infiniband/hw/mthca/mthca_memfree.c page->db_rec, page->mapping); mapping 755 drivers/infiniband/hw/mthca/mthca_memfree.c dev->db_tab->page[i].mapping); mapping 141 drivers/infiniband/hw/mthca/mthca_memfree.h dma_addr_t mapping; mapping 49 drivers/infiniband/hw/mthca/mthca_provider.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 186 drivers/infiniband/ulp/ipoib/ipoib.h u64 mapping[IPOIB_UD_RX_SG]; mapping 191 drivers/infiniband/ulp/ipoib/ipoib.h u64 mapping[MAX_SKB_FRAGS + 1]; mapping 261 drivers/infiniband/ulp/ipoib/ipoib.h u64 mapping[IPOIB_CM_RX_SG]; mapping 548 drivers/infiniband/ulp/ipoib/ipoib.h u64 *mapping = tx_req->mapping; mapping 551 drivers/infiniband/ulp/ipoib/ipoib.h priv->tx_sge[0].addr = mapping[0]; mapping 558 drivers/infiniband/ulp/ipoib/ipoib.h priv->tx_sge[i + off].addr = mapping[i + off]; mapping 84 drivers/infiniband/ulp/ipoib/ipoib_cm.c u64 mapping[IPOIB_CM_RX_SG]) mapping 88 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); mapping 91 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); mapping 102 drivers/infiniband/ulp/ipoib/ipoib_cm.c priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; mapping 108 drivers/infiniband/ulp/ipoib/ipoib_cm.c priv->cm.srq_ring[id].mapping); mapping 127 drivers/infiniband/ulp/ipoib/ipoib_cm.c sge[i].addr = rx->rx_ring[id].mapping[i]; mapping 133 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring[id].mapping); mapping 144 drivers/infiniband/ulp/ipoib/ipoib_cm.c u64 mapping[IPOIB_CM_RX_SG], mapping 161 drivers/infiniband/ulp/ipoib/ipoib_cm.c mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, mapping 163 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { mapping 175 drivers/infiniband/ulp/ipoib/ipoib_cm.c mapping[i + 1] = ib_dma_map_page(priv->ca, page, mapping 177 drivers/infiniband/ulp/ipoib/ipoib_cm.c if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1]))) mapping 186 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); mapping 189 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE); mapping 204 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx_ring[i].mapping); mapping 385 drivers/infiniband/ulp/ipoib/ipoib_cm.c rx->rx_ring[i].mapping, mapping 568 drivers/infiniband/ulp/ipoib/ipoib_cm.c u64 mapping[IPOIB_CM_RX_SG]; mapping 632 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0], mapping 635 drivers/infiniband/ulp/ipoib/ipoib_cm.c ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0], mapping 648 drivers/infiniband/ulp/ipoib/ipoib_cm.c mapping, GFP_ATOMIC); mapping 659 drivers/infiniband/ulp/ipoib/ipoib_cm.c ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping); mapping 660 drivers/infiniband/ulp/ipoib/ipoib_cm.c memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping)); mapping 1626 drivers/infiniband/ulp/ipoib/ipoib_cm.c priv->cm.srq_ring[i].mapping, mapping 94 drivers/infiniband/ulp/ipoib/ipoib_ib.c u64 mapping[IPOIB_UD_RX_SG]) mapping 96 drivers/infiniband/ulp/ipoib/ipoib_ib.c ib_dma_unmap_single(priv->ca, mapping[0], mapping 107 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; mapping 108 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; mapping 114 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); mapping 127 drivers/infiniband/ulp/ipoib/ipoib_ib.c u64 *mapping; mapping 141 drivers/infiniband/ulp/ipoib/ipoib_ib.c mapping = priv->rx_ring[id].mapping; mapping 142 drivers/infiniband/ulp/ipoib/ipoib_ib.c mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, mapping 144 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) mapping 178 drivers/infiniband/ulp/ipoib/ipoib_ib.c u64 mapping[IPOIB_UD_RX_SG]; mapping 198 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); mapping 204 drivers/infiniband/ulp/ipoib/ipoib_ib.c memcpy(mapping, priv->rx_ring[wr_id].mapping, mapping 205 drivers/infiniband/ulp/ipoib/ipoib_ib.c IPOIB_UD_RX_SG * sizeof(*mapping)); mapping 219 drivers/infiniband/ulp/ipoib/ipoib_ib.c ipoib_ud_dma_unmap_rx(priv, mapping); mapping 278 drivers/infiniband/ulp/ipoib/ipoib_ib.c u64 *mapping = tx_req->mapping; mapping 283 drivers/infiniband/ulp/ipoib/ipoib_ib.c mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), mapping 285 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) mapping 294 drivers/infiniband/ulp/ipoib/ipoib_ib.c mapping[i + off] = ib_dma_map_page(ca, mapping 299 drivers/infiniband/ulp/ipoib/ipoib_ib.c if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) mapping 308 drivers/infiniband/ulp/ipoib/ipoib_ib.c ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); mapping 312 drivers/infiniband/ulp/ipoib/ipoib_ib.c ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); mapping 321 drivers/infiniband/ulp/ipoib/ipoib_ib.c u64 *mapping = tx_req->mapping; mapping 326 drivers/infiniband/ulp/ipoib/ipoib_ib.c ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb), mapping 335 drivers/infiniband/ulp/ipoib/ipoib_ib.c ib_dma_unmap_page(priv->ca, mapping[i + off], mapping 824 drivers/infiniband/ulp/ipoib/ipoib_ib.c priv->rx_ring[i].mapping); mapping 111 drivers/input/joystick/xpad.c u8 mapping; mapping 585 drivers/input/joystick/xpad.c int mapping; /* map d-pad to buttons or to axes */ mapping 609 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { mapping 624 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { mapping 633 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { mapping 683 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { mapping 697 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) || mapping 722 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { mapping 737 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { mapping 855 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { mapping 876 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { mapping 891 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { mapping 1642 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { mapping 1662 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { mapping 1674 drivers/input/joystick/xpad.c if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) || mapping 1680 drivers/input/joystick/xpad.c if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { mapping 1751 drivers/input/joystick/xpad.c xpad->mapping = xpad_device[i].mapping; mapping 1769 drivers/input/joystick/xpad.c xpad->mapping |= MAP_DPAD_TO_BUTTONS; mapping 1771 drivers/input/joystick/xpad.c xpad->mapping |= MAP_TRIGGERS_TO_BUTTONS; mapping 1773 drivers/input/joystick/xpad.c xpad->mapping |= MAP_STICKS_TO_NULL; mapping 68 drivers/iommu/ipmmu-vmsa.c struct dma_iommu_mapping *mapping; mapping 878 drivers/iommu/ipmmu-vmsa.c if (!mmu->mapping) { mapping 879 drivers/iommu/ipmmu-vmsa.c struct dma_iommu_mapping *mapping; mapping 881 drivers/iommu/ipmmu-vmsa.c mapping = arm_iommu_create_mapping(&platform_bus_type, mapping 883 drivers/iommu/ipmmu-vmsa.c if (IS_ERR(mapping)) { mapping 885 drivers/iommu/ipmmu-vmsa.c ret = PTR_ERR(mapping); mapping 889 drivers/iommu/ipmmu-vmsa.c mmu->mapping = mapping; mapping 893 drivers/iommu/ipmmu-vmsa.c ret = arm_iommu_attach_device(dev, mmu->mapping); mapping 903 drivers/iommu/ipmmu-vmsa.c if (mmu->mapping) mapping 904 drivers/iommu/ipmmu-vmsa.c arm_iommu_release_mapping(mmu->mapping); mapping 1169 drivers/iommu/ipmmu-vmsa.c arm_iommu_release_mapping(mmu->mapping); mapping 318 drivers/iommu/virtio-iommu.c struct viommu_mapping *mapping; mapping 320 drivers/iommu/virtio-iommu.c mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); mapping 321 drivers/iommu/virtio-iommu.c if (!mapping) mapping 324 drivers/iommu/virtio-iommu.c mapping->paddr = paddr; mapping 325 drivers/iommu/virtio-iommu.c mapping->iova.start = iova; mapping 326 drivers/iommu/virtio-iommu.c mapping->iova.last = iova + size - 1; mapping 327 drivers/iommu/virtio-iommu.c mapping->flags = flags; mapping 330 drivers/iommu/virtio-iommu.c interval_tree_insert(&mapping->iova, &vdomain->mappings); mapping 352 drivers/iommu/virtio-iommu.c struct viommu_mapping *mapping = NULL; mapping 359 drivers/iommu/virtio-iommu.c mapping = container_of(node, struct viommu_mapping, iova); mapping 363 drivers/iommu/virtio-iommu.c if (mapping->iova.start < iova) mapping 370 drivers/iommu/virtio-iommu.c unmapped += mapping->iova.last - mapping->iova.start + 1; mapping 373 drivers/iommu/virtio-iommu.c kfree(mapping); mapping 391 drivers/iommu/virtio-iommu.c struct viommu_mapping *mapping; mapping 398 drivers/iommu/virtio-iommu.c mapping = container_of(node, struct viommu_mapping, iova); mapping 402 drivers/iommu/virtio-iommu.c .virt_start = cpu_to_le64(mapping->iova.start), mapping 403 drivers/iommu/virtio-iommu.c .virt_end = cpu_to_le64(mapping->iova.last), mapping 404 drivers/iommu/virtio-iommu.c .phys_start = cpu_to_le64(mapping->paddr), mapping 405 drivers/iommu/virtio-iommu.c .flags = cpu_to_le32(mapping->flags), mapping 787 drivers/iommu/virtio-iommu.c struct viommu_mapping *mapping; mapping 794 drivers/iommu/virtio-iommu.c mapping = container_of(node, struct viommu_mapping, iova); mapping 795 drivers/iommu/virtio-iommu.c paddr = mapping->paddr + (iova - mapping->iova.start); mapping 1329 drivers/md/dm-cache-metadata.c __le64 mapping; mapping 1340 drivers/md/dm-cache-metadata.c memcpy(&mapping, mapping_value_le, sizeof(mapping)); mapping 1341 drivers/md/dm-cache-metadata.c unpack_value(mapping, &oblock, &flags); mapping 1371 drivers/md/dm-cache-metadata.c __le64 mapping; mapping 1382 drivers/md/dm-cache-metadata.c memcpy(&mapping, mapping_value_le, sizeof(mapping)); mapping 1383 drivers/md/dm-cache-metadata.c unpack_value(mapping, &oblock, &flags); mapping 667 drivers/media/pci/cx18/cx18-ioctl.c const int mapping[8] = { mapping 693 drivers/media/pci/cx18/cx18-ioctl.c e_idx->flags = mapping[le32_to_cpu(e_buf->flags) & 0x7]; mapping 176 drivers/media/pci/ivtv/ivtv-fileops.c const int mapping[8] = { -1, V4L2_ENC_IDX_FRAME_I, V4L2_ENC_IDX_FRAME_P, -1, mapping 187 drivers/media/pci/ivtv/ivtv-fileops.c e->flags = mapping[read_enc(addr + 12) & 7]; mapping 1951 drivers/media/platform/omap3isp/isp.c arm_iommu_release_mapping(isp->mapping); mapping 1952 drivers/media/platform/omap3isp/isp.c isp->mapping = NULL; mapping 1959 drivers/media/platform/omap3isp/isp.c struct dma_iommu_mapping *mapping; mapping 1966 drivers/media/platform/omap3isp/isp.c mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); mapping 1967 drivers/media/platform/omap3isp/isp.c if (IS_ERR(mapping)) { mapping 1969 drivers/media/platform/omap3isp/isp.c return PTR_ERR(mapping); mapping 1972 drivers/media/platform/omap3isp/isp.c isp->mapping = mapping; mapping 1975 drivers/media/platform/omap3isp/isp.c ret = arm_iommu_attach_device(isp->dev, mapping); mapping 1984 drivers/media/platform/omap3isp/isp.c arm_iommu_release_mapping(isp->mapping); mapping 1985 drivers/media/platform/omap3isp/isp.c isp->mapping = NULL; mapping 188 drivers/media/platform/omap3isp/isp.h struct dma_iommu_mapping *mapping; mapping 48 drivers/media/platform/sti/c8sectpfe/c8sectpfe-common.h int mapping; mapping 85 drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.h int mapping[C8SECTPFEI_MAXCHANNEL]; mapping 365 drivers/media/usb/uvc/uvc_ctrl.c static s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, mapping 383 drivers/media/usb/uvc/uvc_ctrl.c static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, mapping 390 drivers/media/usb/uvc/uvc_ctrl.c static s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, mapping 393 drivers/media/usb/uvc/uvc_ctrl.c unsigned int first = mapping->offset / 8; mapping 410 drivers/media/usb/uvc/uvc_ctrl.c static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, mapping 413 drivers/media/usb/uvc/uvc_ctrl.c unsigned int first = mapping->offset / 8; mapping 764 drivers/media/usb/uvc/uvc_ctrl.c static s32 uvc_get_le_value(struct uvc_control_mapping *mapping, mapping 767 drivers/media/usb/uvc/uvc_ctrl.c int bits = mapping->size; mapping 768 drivers/media/usb/uvc/uvc_ctrl.c int offset = mapping->offset; mapping 785 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) mapping 786 drivers/media/usb/uvc/uvc_ctrl.c value |= -(value & (1 << (mapping->size - 1))); mapping 794 drivers/media/usb/uvc/uvc_ctrl.c static void uvc_set_le_value(struct uvc_control_mapping *mapping, mapping 797 drivers/media/usb/uvc/uvc_ctrl.c int bits = mapping->size; mapping 798 drivers/media/usb/uvc/uvc_ctrl.c int offset = mapping->offset; mapping 806 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) mapping 857 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping **mapping, struct uvc_control **control, mapping 875 drivers/media/usb/uvc/uvc_ctrl.c *mapping = map; mapping 879 drivers/media/usb/uvc/uvc_ctrl.c if ((*mapping == NULL || (*mapping)->id > map->id) && mapping 882 drivers/media/usb/uvc/uvc_ctrl.c *mapping = map; mapping 889 drivers/media/usb/uvc/uvc_ctrl.c u32 v4l2_id, struct uvc_control_mapping **mapping) mapping 895 drivers/media/usb/uvc/uvc_ctrl.c *mapping = NULL; mapping 902 drivers/media/usb/uvc/uvc_ctrl.c __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next); mapping 970 drivers/media/usb/uvc/uvc_ctrl.c static s32 __uvc_ctrl_get_value(struct uvc_control_mapping *mapping, mapping 973 drivers/media/usb/uvc/uvc_ctrl.c s32 value = mapping->get(mapping, UVC_GET_CUR, data); mapping 975 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { mapping 976 drivers/media/usb/uvc/uvc_ctrl.c const struct uvc_menu_info *menu = mapping->menu_info; mapping 979 drivers/media/usb/uvc/uvc_ctrl.c for (i = 0; i < mapping->menu_count; ++i, ++menu) { mapping 991 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control *ctrl, struct uvc_control_mapping *mapping, mapping 1010 drivers/media/usb/uvc/uvc_ctrl.c *value = __uvc_ctrl_get_value(mapping, mapping 1018 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping, mapping 1027 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->id = mapping->id; mapping 1028 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->type = mapping->v4l2_type; mapping 1029 drivers/media/usb/uvc/uvc_ctrl.c strscpy(v4l2_ctrl->name, mapping->name, sizeof(v4l2_ctrl->name)); mapping 1037 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->master_id) mapping 1038 drivers/media/usb/uvc/uvc_ctrl.c __uvc_find_control(ctrl->entity, mapping->master_id, mapping 1046 drivers/media/usb/uvc/uvc_ctrl.c if (val != mapping->master_manual) mapping 1057 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF, mapping 1061 drivers/media/usb/uvc/uvc_ctrl.c switch (mapping->v4l2_type) { mapping 1064 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->maximum = mapping->menu_count - 1; mapping 1067 drivers/media/usb/uvc/uvc_ctrl.c menu = mapping->menu_info; mapping 1068 drivers/media/usb/uvc/uvc_ctrl.c for (i = 0; i < mapping->menu_count; ++i, ++menu) { mapping 1094 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN, mapping 1098 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX, mapping 1102 drivers/media/usb/uvc/uvc_ctrl.c v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, mapping 1112 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1119 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); mapping 1125 drivers/media/usb/uvc/uvc_ctrl.c ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl); mapping 1144 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1158 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(chain, query_menu->id, &mapping); mapping 1159 drivers/media/usb/uvc/uvc_ctrl.c if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { mapping 1164 drivers/media/usb/uvc/uvc_ctrl.c if (query_menu->index >= mapping->menu_count) { mapping 1169 drivers/media/usb/uvc/uvc_ctrl.c menu_info = &mapping->menu_info[query_menu->index]; mapping 1171 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && mapping 1181 drivers/media/usb/uvc/uvc_ctrl.c bitmap = mapping->get(mapping, UVC_GET_RES, mapping 1203 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping, mapping 1208 drivers/media/usb/uvc/uvc_ctrl.c __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl); mapping 1232 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping, s32 value, u32 changes) mapping 1238 drivers/media/usb/uvc/uvc_ctrl.c if (list_empty(&mapping->ev_subs)) mapping 1241 drivers/media/usb/uvc/uvc_ctrl.c uvc_ctrl_fill_event(chain, &ev, ctrl, mapping, value, changes); mapping 1243 drivers/media/usb/uvc/uvc_ctrl.c list_for_each_entry(sev, &mapping->ev_subs, node) { mapping 1259 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping = NULL; mapping 1264 drivers/media/usb/uvc/uvc_ctrl.c __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0); mapping 1268 drivers/media/usb/uvc/uvc_ctrl.c if (__uvc_ctrl_get(chain, ctrl, mapping, &val) == 0) mapping 1271 drivers/media/usb/uvc/uvc_ctrl.c uvc_ctrl_send_event(chain, handle, ctrl, mapping, val, changes); mapping 1280 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1291 drivers/media/usb/uvc/uvc_ctrl.c list_for_each_entry(mapping, &ctrl->info.mappings, list) { mapping 1292 drivers/media/usb/uvc/uvc_ctrl.c s32 value = __uvc_ctrl_get_value(mapping, w->data); mapping 1298 drivers/media/usb/uvc/uvc_ctrl.c for (i = 0; i < ARRAY_SIZE(mapping->slave_ids); ++i) { mapping 1299 drivers/media/usb/uvc/uvc_ctrl.c if (!mapping->slave_ids[i]) mapping 1303 drivers/media/usb/uvc/uvc_ctrl.c mapping->slave_ids[i]); mapping 1306 drivers/media/usb/uvc/uvc_ctrl.c uvc_ctrl_send_event(chain, handle, ctrl, mapping, value, mapping 1357 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1364 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); mapping 1370 drivers/media/usb/uvc/uvc_ctrl.c for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) { mapping 1371 drivers/media/usb/uvc/uvc_ctrl.c u32 slave_id = mapping->slave_ids[j]; mapping 1392 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->master_id && mapping 1394 drivers/media/usb/uvc/uvc_ctrl.c mapping->master_id)) mapping 1397 drivers/media/usb/uvc/uvc_ctrl.c uvc_ctrl_send_event(handle->chain, handle, ctrl, mapping, mapping 1405 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1413 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(handle->chain, sev->id, &mapping); mapping 1419 drivers/media/usb/uvc/uvc_ctrl.c list_add_tail(&sev->node, &mapping->ev_subs); mapping 1425 drivers/media/usb/uvc/uvc_ctrl.c if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0) mapping 1428 drivers/media/usb/uvc/uvc_ctrl.c uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val, mapping 1562 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1564 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(chain, xctrl->id, &mapping); mapping 1568 drivers/media/usb/uvc/uvc_ctrl.c return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value); mapping 1576 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping; mapping 1583 drivers/media/usb/uvc/uvc_ctrl.c ctrl = uvc_find_control(chain, xctrl->id, &mapping); mapping 1590 drivers/media/usb/uvc/uvc_ctrl.c switch (mapping->v4l2_type) { mapping 1598 drivers/media/usb/uvc/uvc_ctrl.c min = mapping->get(mapping, UVC_GET_MIN, mapping 1600 drivers/media/usb/uvc/uvc_ctrl.c max = mapping->get(mapping, UVC_GET_MAX, mapping 1602 drivers/media/usb/uvc/uvc_ctrl.c step = mapping->get(mapping, UVC_GET_RES, mapping 1609 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) mapping 1622 drivers/media/usb/uvc/uvc_ctrl.c if (xctrl->value < 0 || xctrl->value >= mapping->menu_count) mapping 1624 drivers/media/usb/uvc/uvc_ctrl.c value = mapping->menu_info[xctrl->value].value; mapping 1629 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && mapping 1637 drivers/media/usb/uvc/uvc_ctrl.c step = mapping->get(mapping, UVC_GET_RES, mapping 1654 drivers/media/usb/uvc/uvc_ctrl.c if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) { mapping 1678 drivers/media/usb/uvc/uvc_ctrl.c mapping->set(mapping, value, mapping 2050 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control *ctrl, const struct uvc_control_mapping *mapping) mapping 2059 drivers/media/usb/uvc/uvc_ctrl.c map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL); mapping 2065 drivers/media/usb/uvc/uvc_ctrl.c size = sizeof(*mapping->menu_info) * mapping->menu_count; mapping 2066 drivers/media/usb/uvc/uvc_ctrl.c map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL); mapping 2086 drivers/media/usb/uvc/uvc_ctrl.c const struct uvc_control_mapping *mapping) mapping 2095 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->id & ~V4L2_CTRL_ID_MASK) { mapping 2097 drivers/media/usb/uvc/uvc_ctrl.c "id 0x%08x is invalid.\n", mapping->name, mapping 2098 drivers/media/usb/uvc/uvc_ctrl.c mapping->id); mapping 2107 drivers/media/usb/uvc/uvc_ctrl.c !uvc_entity_match_guid(entity, mapping->entity)) mapping 2112 drivers/media/usb/uvc/uvc_ctrl.c if (ctrl->index == mapping->selector - 1) { mapping 2135 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->size > 32 || mapping 2136 drivers/media/usb/uvc/uvc_ctrl.c mapping->offset + mapping->size > ctrl->info.size * 8) { mapping 2142 drivers/media/usb/uvc/uvc_ctrl.c if (mapping->id == map->id) { mapping 2145 drivers/media/usb/uvc/uvc_ctrl.c mapping->name, mapping->id); mapping 2155 drivers/media/usb/uvc/uvc_ctrl.c "mappings count (%u) exceeded.\n", mapping->name, mapping 2161 drivers/media/usb/uvc/uvc_ctrl.c ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping); mapping 2240 drivers/media/usb/uvc/uvc_ctrl.c const struct uvc_control_mapping *mapping = uvc_ctrl_mappings; mapping 2242 drivers/media/usb/uvc/uvc_ctrl.c mapping + ARRAY_SIZE(uvc_ctrl_mappings); mapping 2263 drivers/media/usb/uvc/uvc_ctrl.c for (; mapping < mend; ++mapping) { mapping 2264 drivers/media/usb/uvc/uvc_ctrl.c if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && mapping 2265 drivers/media/usb/uvc/uvc_ctrl.c ctrl->info.selector == mapping->selector) mapping 2266 drivers/media/usb/uvc/uvc_ctrl.c __uvc_ctrl_add_mapping(dev, ctrl, mapping); mapping 2334 drivers/media/usb/uvc/uvc_ctrl.c struct uvc_control_mapping *mapping, *nm; mapping 2336 drivers/media/usb/uvc/uvc_ctrl.c list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) { mapping 2337 drivers/media/usb/uvc/uvc_ctrl.c list_del(&mapping->list); mapping 2338 drivers/media/usb/uvc/uvc_ctrl.c kfree(mapping->menu_info); mapping 2339 drivers/media/usb/uvc/uvc_ctrl.c kfree(mapping); mapping 248 drivers/media/usb/uvc/uvcvideo.h s32 (*get)(struct uvc_control_mapping *mapping, u8 query, mapping 250 drivers/media/usb/uvc/uvcvideo.h void (*set)(struct uvc_control_mapping *mapping, s32 value, mapping 831 drivers/media/usb/uvc/uvcvideo.h const struct uvc_control_mapping *mapping); mapping 75 drivers/mfd/asic3.c void __iomem *mapping; mapping 92 drivers/mfd/asic3.c iowrite16(value, asic->mapping + mapping 99 drivers/mfd/asic3.c return ioread16(asic->mapping + mapping 975 drivers/mfd/asic3.c asic->mapping = ioremap(mem->start, resource_size(mem)); mapping 976 drivers/mfd/asic3.c if (!asic->mapping) { mapping 1030 drivers/mfd/asic3.c iounmap(asic->mapping); mapping 1052 drivers/mfd/asic3.c iounmap(asic->mapping); mapping 22 drivers/mfd/htc-pasic3.c void __iomem *mapping; mapping 38 drivers/mfd/htc-pasic3.c void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); mapping 39 drivers/mfd/htc-pasic3.c void __iomem *data = asic->mapping + (REG_DATA << bus_shift); mapping 53 drivers/mfd/htc-pasic3.c void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); mapping 54 drivers/mfd/htc-pasic3.c void __iomem *data = asic->mapping + (REG_DATA << bus_shift); mapping 153 drivers/mfd/htc-pasic3.c asic->mapping = ioremap(r->start, resource_size(r)); mapping 154 drivers/mfd/htc-pasic3.c if (!asic->mapping) { mapping 191 drivers/mfd/htc-pasic3.c iounmap(asic->mapping); mapping 52 drivers/misc/cxl/api.c if (ctx->kernelapi && ctx->mapping) mapping 395 drivers/misc/cxl/api.c if (ctx->mapping) mapping 43 drivers/misc/cxl/context.c ctx->mapping = NULL; mapping 119 drivers/misc/cxl/context.c struct address_space *mapping) mapping 122 drivers/misc/cxl/context.c ctx->mapping = mapping; mapping 317 drivers/misc/cxl/context.c if (ctx->mapping) mapping 318 drivers/misc/cxl/context.c unmap_mapping_range(ctx->mapping, 0, 0, 1); mapping 344 drivers/misc/cxl/context.c if (ctx->kernelapi && ctx->mapping) mapping 546 drivers/misc/cxl/cxl.h struct address_space *mapping; mapping 981 drivers/misc/cxl/cxl.h struct address_space *mapping); mapping 132 drivers/misc/cxl/file.c ctx->mapping = NULL; mapping 157 drivers/misc/ocxl/afu_irq.c if (ctx->mapping) mapping 158 drivers/misc/ocxl/afu_irq.c unmap_mapping_range(ctx->mapping, mapping 8 drivers/misc/ocxl/context.c struct address_space *mapping) mapping 32 drivers/misc/ocxl/context.c ctx->mapping = mapping; mapping 274 drivers/misc/ocxl/context.c if (ctx->mapping) mapping 275 drivers/misc/ocxl/context.c unmap_mapping_range(ctx->mapping, 0, 0, 1); mapping 465 drivers/misc/ocxl/file.c ctx->mapping = NULL; mapping 75 drivers/misc/ocxl/ocxl_internal.h struct address_space *mapping; mapping 47 drivers/mtd/devices/block2mtd.c static struct page *page_read(struct address_space *mapping, int index) mapping 49 drivers/mtd/devices/block2mtd.c return read_mapping_page(mapping, index, NULL); mapping 55 drivers/mtd/devices/block2mtd.c struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; mapping 63 drivers/mtd/devices/block2mtd.c page = page_read(mapping, index); mapping 74 drivers/mtd/devices/block2mtd.c balance_dirty_pages_ratelimited(mapping); mapping 139 drivers/mtd/devices/block2mtd.c struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; mapping 151 drivers/mtd/devices/block2mtd.c page = page_read(mapping, index); mapping 160 drivers/mtd/devices/block2mtd.c balance_dirty_pages_ratelimited(mapping); mapping 1303 drivers/mtd/nand/raw/nandsim.c struct address_space *mapping = file->f_mapping; mapping 1311 drivers/mtd/nand/raw/nandsim.c page = find_get_page(mapping, index); mapping 1313 drivers/mtd/nand/raw/nandsim.c page = find_or_create_page(mapping, index, GFP_NOFS); mapping 1315 drivers/mtd/nand/raw/nandsim.c write_inode_now(mapping->host, 1); mapping 1316 drivers/mtd/nand/raw/nandsim.c page = find_or_create_page(mapping, index, GFP_NOFS); mapping 1530 drivers/net/dsa/mv88e6xxx/chip.c enum mv88e6xxx_policy_mapping mapping = policy->mapping; mapping 1541 drivers/net/dsa/mv88e6xxx/chip.c switch (mapping) { mapping 1568 drivers/net/dsa/mv88e6xxx/chip.c policy->mapping == mapping && mapping 1572 drivers/net/dsa/mv88e6xxx/chip.c return chip->info->ops->port_set_policy(chip, port, mapping, action); mapping 1580 drivers/net/dsa/mv88e6xxx/chip.c enum mv88e6xxx_policy_mapping mapping; mapping 1600 drivers/net/dsa/mv88e6xxx/chip.c mapping = MV88E6XXX_POLICY_MAPPING_DA; mapping 1604 drivers/net/dsa/mv88e6xxx/chip.c mapping = MV88E6XXX_POLICY_MAPPING_SA; mapping 1622 drivers/net/dsa/mv88e6xxx/chip.c if (policy->port == port && policy->mapping == mapping && mapping 1642 drivers/net/dsa/mv88e6xxx/chip.c policy->mapping = mapping; mapping 212 drivers/net/dsa/mv88e6xxx/chip.h enum mv88e6xxx_policy_mapping mapping; mapping 416 drivers/net/dsa/mv88e6xxx/chip.h enum mv88e6xxx_policy_mapping mapping, mapping 1348 drivers/net/dsa/mv88e6xxx/port.c enum mv88e6xxx_policy_mapping mapping, mapping 1355 drivers/net/dsa/mv88e6xxx/port.c switch (mapping) { mapping 340 drivers/net/dsa/mv88e6xxx/port.h enum mv88e6xxx_policy_mapping mapping, mapping 522 drivers/net/ethernet/adaptec/starfire.c dma_addr_t mapping; mapping 526 drivers/net/ethernet/adaptec/starfire.c dma_addr_t mapping; mapping 1154 drivers/net/ethernet/adaptec/starfire.c np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); mapping 1156 drivers/net/ethernet/adaptec/starfire.c np->rx_info[i].mapping)) { mapping 1162 drivers/net/ethernet/adaptec/starfire.c np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); mapping 1171 drivers/net/ethernet/adaptec/starfire.c np->rx_info[i].mapping = 0; mapping 1234 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping = mapping 1239 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping = mapping 1246 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping)) { mapping 1251 drivers/net/ethernet/adaptec/starfire.c np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); mapping 1290 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping, mapping 1293 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping = 0; mapping 1297 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping, mapping 1375 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping, mapping 1378 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping = 0; mapping 1385 drivers/net/ethernet/adaptec/starfire.c np->tx_info[entry].mapping, mapping 1480 drivers/net/ethernet/adaptec/starfire.c np->rx_info[entry].mapping, mapping 1484 drivers/net/ethernet/adaptec/starfire.c np->rx_info[entry].mapping, mapping 1488 drivers/net/ethernet/adaptec/starfire.c pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); mapping 1492 drivers/net/ethernet/adaptec/starfire.c np->rx_info[entry].mapping = 0; mapping 1605 drivers/net/ethernet/adaptec/starfire.c np->rx_info[entry].mapping = mapping 1608 drivers/net/ethernet/adaptec/starfire.c np->rx_info[entry].mapping)) { mapping 1614 drivers/net/ethernet/adaptec/starfire.c cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); mapping 1982 drivers/net/ethernet/adaptec/starfire.c pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE); mapping 1986 drivers/net/ethernet/adaptec/starfire.c np->rx_info[i].mapping = 0; mapping 1993 drivers/net/ethernet/adaptec/starfire.c np->tx_info[i].mapping, mapping 1995 drivers/net/ethernet/adaptec/starfire.c np->tx_info[i].mapping = 0; mapping 641 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 644 drivers/net/ethernet/alteon/acenic.c mapping = dma_unmap_addr(ringp, mapping); mapping 645 drivers/net/ethernet/alteon/acenic.c pci_unmap_page(ap->pdev, mapping, mapping 661 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 664 drivers/net/ethernet/alteon/acenic.c mapping = dma_unmap_addr(ringp,mapping); mapping 665 drivers/net/ethernet/alteon/acenic.c pci_unmap_page(ap->pdev, mapping, mapping 680 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 683 drivers/net/ethernet/alteon/acenic.c mapping = dma_unmap_addr(ringp, mapping); mapping 684 drivers/net/ethernet/alteon/acenic.c pci_unmap_page(ap->pdev, mapping, mapping 1643 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 1649 drivers/net/ethernet/alteon/acenic.c mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), mapping 1655 drivers/net/ethernet/alteon/acenic.c mapping, mapping); mapping 1658 drivers/net/ethernet/alteon/acenic.c set_aceaddr(&rd->addr, mapping); mapping 1704 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 1710 drivers/net/ethernet/alteon/acenic.c mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), mapping 1716 drivers/net/ethernet/alteon/acenic.c mapping, mapping); mapping 1719 drivers/net/ethernet/alteon/acenic.c set_aceaddr(&rd->addr, mapping); mapping 1760 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 1766 drivers/net/ethernet/alteon/acenic.c mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), mapping 1772 drivers/net/ethernet/alteon/acenic.c mapping, mapping); mapping 1775 drivers/net/ethernet/alteon/acenic.c set_aceaddr(&rd->addr, mapping); mapping 1981 drivers/net/ethernet/alteon/acenic.c dma_unmap_addr(rip, mapping), mapping 2049 drivers/net/ethernet/alteon/acenic.c pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), mapping 2340 drivers/net/ethernet/alteon/acenic.c pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping), mapping 2369 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 2372 drivers/net/ethernet/alteon/acenic.c mapping = pci_map_page(ap->pdev, virt_to_page(skb->data), mapping 2378 drivers/net/ethernet/alteon/acenic.c dma_unmap_addr_set(info, mapping, mapping); mapping 2380 drivers/net/ethernet/alteon/acenic.c return mapping; mapping 2423 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 2426 drivers/net/ethernet/alteon/acenic.c mapping = ace_map_tx_skb(ap, skb, skb, idx); mapping 2441 drivers/net/ethernet/alteon/acenic.c ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); mapping 2443 drivers/net/ethernet/alteon/acenic.c dma_addr_t mapping; mapping 2447 drivers/net/ethernet/alteon/acenic.c mapping = ace_map_tx_skb(ap, skb, NULL, idx); mapping 2456 drivers/net/ethernet/alteon/acenic.c ace_load_tx_bd(ap, ap->tx_ring + idx, mapping, flagsize, vlan_tag); mapping 2468 drivers/net/ethernet/alteon/acenic.c mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, mapping 2490 drivers/net/ethernet/alteon/acenic.c dma_unmap_addr_set(info, mapping, mapping); mapping 2492 drivers/net/ethernet/alteon/acenic.c ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); mapping 594 drivers/net/ethernet/alteon/acenic.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 605 drivers/net/ethernet/alteon/acenic.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 1696 drivers/net/ethernet/atheros/atl1c/atl1c_main.c dma_addr_t mapping; mapping 1723 drivers/net/ethernet/atheros/atl1c/atl1c_main.c mapping = pci_map_single(pdev, vir_addr, mapping 1726 drivers/net/ethernet/atheros/atl1c/atl1c_main.c if (unlikely(pci_dma_mapping_error(pdev, mapping))) { mapping 1734 drivers/net/ethernet/atheros/atl1c/atl1c_main.c buffer_info->dma = mapping; mapping 633 drivers/net/ethernet/broadcom/b44.c rp->mapping, mapping 664 drivers/net/ethernet/broadcom/b44.c dma_addr_t mapping; mapping 677 drivers/net/ethernet/broadcom/b44.c mapping = dma_map_single(bp->sdev->dma_dev, skb->data, mapping 683 drivers/net/ethernet/broadcom/b44.c if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping 684 drivers/net/ethernet/broadcom/b44.c mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { mapping 686 drivers/net/ethernet/broadcom/b44.c if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) mapping 687 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, mapping, mapping 693 drivers/net/ethernet/broadcom/b44.c mapping = dma_map_single(bp->sdev->dma_dev, skb->data, mapping 696 drivers/net/ethernet/broadcom/b44.c if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping 697 drivers/net/ethernet/broadcom/b44.c mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { mapping 698 drivers/net/ethernet/broadcom/b44.c if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) mapping 699 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); mapping 712 drivers/net/ethernet/broadcom/b44.c map->mapping = mapping; mapping 723 drivers/net/ethernet/broadcom/b44.c dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); mapping 751 drivers/net/ethernet/broadcom/b44.c dest_map->mapping = src_map->mapping; mapping 774 drivers/net/ethernet/broadcom/b44.c dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping, mapping 792 drivers/net/ethernet/broadcom/b44.c dma_addr_t map = rp->mapping; mapping 977 drivers/net/ethernet/broadcom/b44.c dma_addr_t mapping; mapping 991 drivers/net/ethernet/broadcom/b44.c mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE); mapping 992 drivers/net/ethernet/broadcom/b44.c if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { mapping 996 drivers/net/ethernet/broadcom/b44.c if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) mapping 997 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, mapping, len, mapping 1004 drivers/net/ethernet/broadcom/b44.c mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data, mapping 1006 drivers/net/ethernet/broadcom/b44.c if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) { mapping 1007 drivers/net/ethernet/broadcom/b44.c if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) mapping 1008 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, mapping, mapping 1021 drivers/net/ethernet/broadcom/b44.c bp->tx_buffers[entry].mapping = mapping; mapping 1029 drivers/net/ethernet/broadcom/b44.c bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); mapping 1104 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ, mapping 1116 drivers/net/ethernet/broadcom/b44.c dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len, mapping 283 drivers/net/ethernet/broadcom/b44.h dma_addr_t mapping; mapping 666 drivers/net/ethernet/broadcom/bcmsysport.c dma_addr_t mapping; mapping 677 drivers/net/ethernet/broadcom/bcmsysport.c mapping = dma_map_single(kdev, skb->data, mapping 679 drivers/net/ethernet/broadcom/bcmsysport.c if (dma_mapping_error(kdev, mapping)) { mapping 694 drivers/net/ethernet/broadcom/bcmsysport.c dma_unmap_addr_set(cb, dma_addr, mapping); mapping 695 drivers/net/ethernet/broadcom/bcmsysport.c dma_desc_set_addr(priv, cb->bd_addr, mapping); mapping 1285 drivers/net/ethernet/broadcom/bcmsysport.c dma_addr_t mapping; mapping 1313 drivers/net/ethernet/broadcom/bcmsysport.c mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); mapping 1314 drivers/net/ethernet/broadcom/bcmsysport.c if (dma_mapping_error(kdev, mapping)) { mapping 1325 drivers/net/ethernet/broadcom/bcmsysport.c dma_unmap_addr_set(cb, dma_addr, mapping); mapping 1328 drivers/net/ethernet/broadcom/bcmsysport.c addr_lo = lower_32_bits(mapping); mapping 1329 drivers/net/ethernet/broadcom/bcmsysport.c len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK; mapping 2730 drivers/net/ethernet/broadcom/bnx2.c dma_addr_t mapping; mapping 2738 drivers/net/ethernet/broadcom/bnx2.c mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, mapping 2740 drivers/net/ethernet/broadcom/bnx2.c if (dma_mapping_error(&bp->pdev->dev, mapping)) { mapping 2746 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(rx_pg, mapping, mapping); mapping 2747 drivers/net/ethernet/broadcom/bnx2.c rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; mapping 2748 drivers/net/ethernet/broadcom/bnx2.c rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; mapping 2761 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), mapping 2773 drivers/net/ethernet/broadcom/bnx2.c dma_addr_t mapping; mapping 2781 drivers/net/ethernet/broadcom/bnx2.c mapping = dma_map_single(&bp->pdev->dev, mapping 2785 drivers/net/ethernet/broadcom/bnx2.c if (dma_mapping_error(&bp->pdev->dev, mapping)) { mapping 2791 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(rx_buf, mapping, mapping); mapping 2793 drivers/net/ethernet/broadcom/bnx2.c rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; mapping 2794 drivers/net/ethernet/broadcom/bnx2.c rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; mapping 2889 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 2902 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(tx_buf, mapping), mapping 2986 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(prod_rx_pg, mapping, mapping 2987 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(cons_rx_pg, mapping)); mapping 3011 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(cons_rx_buf, mapping), mapping 3021 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(prod_rx_buf, mapping, mapping 3022 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(cons_rx_buf, mapping)); mapping 3100 drivers/net/ethernet/broadcom/bnx2.c mapping_old = dma_unmap_addr(rx_pg, mapping); mapping 3185 drivers/net/ethernet/broadcom/bnx2.c dma_addr = dma_unmap_addr(rx_buf, mapping); mapping 5456 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(tx_buf, mapping), mapping 5467 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(tx_buf, mapping), mapping 5498 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(rx_buf, mapping), mapping 5909 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr(rx_buf, mapping), mapping 6598 drivers/net/ethernet/broadcom/bnx2.c dma_addr_t mapping; mapping 6669 drivers/net/ethernet/broadcom/bnx2.c mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); mapping 6670 drivers/net/ethernet/broadcom/bnx2.c if (dma_mapping_error(&bp->pdev->dev, mapping)) { mapping 6677 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(tx_buf, mapping, mapping); mapping 6681 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_hi = (u64) mapping >> 32; mapping 6682 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; mapping 6698 drivers/net/ethernet/broadcom/bnx2.c mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len, mapping 6700 drivers/net/ethernet/broadcom/bnx2.c if (dma_mapping_error(&bp->pdev->dev, mapping)) mapping 6702 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, mapping 6703 drivers/net/ethernet/broadcom/bnx2.c mapping); mapping 6705 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_hi = (u64) mapping >> 32; mapping 6706 drivers/net/ethernet/broadcom/bnx2.c txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff; mapping 6749 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 6757 drivers/net/ethernet/broadcom/bnx2.c dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 6625 drivers/net/ethernet/broadcom/bnx2.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 6639 drivers/net/ethernet/broadcom/bnx2.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 6644 drivers/net/ethernet/broadcom/bnx2.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 353 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 367 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 2022 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, mapping 438 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_addr_t mapping; mapping 447 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = dma_map_single(&bp->pdev->dev, mapping 456 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 465 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr_set(prod_rx_buf, mapping, mapping); mapping 467 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 468 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 552 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_addr_t mapping; mapping 562 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = dma_map_page(&bp->pdev->dev, pool->page, mapping 564 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 572 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr_set(sw_buf, mapping, mapping); mapping 574 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c sge->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 575 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c sge->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 644 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr(&old_rx_pg, mapping), mapping 779 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), mapping 832 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_addr_t mapping; mapping 838 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD, mapping 841 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 848 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr_set(rx_buf, mapping, mapping); mapping 850 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 851 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 1013 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr(rx_buf, mapping), mapping 1045 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr(rx_buf, mapping), mapping 1371 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr(first_buf, mapping), mapping 1427 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr_set(first_buf, mapping, 0); mapping 1548 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_unmap_addr(rx_buf, mapping), mapping 3286 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_addr_t mapping; mapping 3300 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi), mapping 3303 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 3304 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 3748 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c dma_addr_t mapping; mapping 3828 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = dma_map_single(&bp->pdev->dev, skb->data, mapping 3830 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 4026 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 4027 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 4074 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, mapping 4076 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 4099 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 4100 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 815 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), mapping 897 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h dma_unmap_addr_set(prod_rx_buf, mapping, mapping 898 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h dma_unmap_addr(cons_rx_buf, mapping)); mapping 2505 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c dma_addr_t mapping; mapping 2565 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c mapping = dma_map_single(&bp->pdev->dev, skb->data, mapping 2567 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { mapping 2589 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); mapping 2590 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); mapping 2670 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c dma_unmap_addr(rx_buf, mapping), mapping 301 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c u32 addr, dma_addr_t mapping) mapping 303 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c REG_WR(bp, addr, U64_LO(mapping)); mapping 304 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c REG_WR(bp, addr + 4, U64_HI(mapping)); mapping 308 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c dma_addr_t mapping, u16 abs_fid) mapping 313 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c __storm_memset_dma_mapping(bp, addr, mapping); mapping 5939 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid, mapping 5965 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping); mapping 5966 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping); mapping 5980 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping); mapping 5981 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping); mapping 6018 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c dma_addr_t mapping = bp->def_status_blk_mapping; mapping 6038 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c section = ((u64)mapping) + offsetof(struct host_sp_status_block, mapping 6078 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c section = ((u64)mapping) + offsetof(struct host_sp_status_block, mapping 1346 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BNX2X_PCI_FREE(cxt->addr, cxt->mapping, cxt->size); mapping 1350 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VFDB(bp)->sp_dma.mapping, mapping 1354 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VF_MBX_DMA(bp)->mapping, mapping 1358 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VF_BULLETIN_DMA(bp)->mapping, mapping 1379 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size); mapping 1384 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c cxt->mapping = 0; mapping 1391 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping, mapping 1399 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping, mapping 1408 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping, mapping 1608 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c BP_VF_MBX(bp, vfid)->msg_mapping = BP_VF_MBX_DMA(bp)->mapping + mapping 1665 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c ilt->lines[line+i].page_mapping = hw_cxt->mapping; mapping 313 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h dma_addr_t mapping; mapping 349 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h #define bnx2x_vf_sp_map(bp, vf, field) ((bp)->vfdb->sp_dma.mapping + \ mapping 2291 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c dma_addr_t pf_addr = BP_VF_BULLETIN_DMA(bp)->mapping + mapping 352 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 483 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); mapping 485 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { mapping 491 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr_set(tx_buf, mapping, mapping); mapping 495 drivers/net/ethernet/broadcom/bnxt/bnxt.c txbd->tx_bd_haddr = cpu_to_le64(mapping); mapping 545 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, mapping 548 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (unlikely(dma_mapping_error(&pdev->dev, mapping))) mapping 552 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr_set(tx_buf, mapping, mapping); mapping 554 drivers/net/ethernet/broadcom/bnxt/bnxt.c txbd->tx_bd_haddr = cpu_to_le64(mapping); mapping 602 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 610 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 643 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), mapping 652 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr(tx_buf, mapping), mapping 685 drivers/net/ethernet/broadcom/bnxt/bnxt.c static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, mapping 696 drivers/net/ethernet/broadcom/bnxt/bnxt.c *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir, mapping 698 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (dma_mapping_error(dev, *mapping)) { mapping 702 drivers/net/ethernet/broadcom/bnxt/bnxt.c *mapping += bp->rx_dma_offset; mapping 706 drivers/net/ethernet/broadcom/bnxt/bnxt.c static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping, mapping 716 drivers/net/ethernet/broadcom/bnxt/bnxt.c *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset, mapping 720 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (dma_mapping_error(&pdev->dev, *mapping)) { mapping 732 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 736 drivers/net/ethernet/broadcom/bnxt/bnxt.c __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp); mapping 744 drivers/net/ethernet/broadcom/bnxt/bnxt.c u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp); mapping 752 drivers/net/ethernet/broadcom/bnxt/bnxt.c rx_buf->mapping = mapping; mapping 754 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxbd->rx_bd_haddr = cpu_to_le64(mapping); mapping 770 drivers/net/ethernet/broadcom/bnxt/bnxt.c prod_rx_buf->mapping = cons_rx_buf->mapping; mapping 797 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 822 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = dma_map_page_attrs(&pdev->dev, page, offset, mapping 825 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (dma_mapping_error(&pdev->dev, mapping)) { mapping 839 drivers/net/ethernet/broadcom/bnxt/bnxt.c rx_agg_buf->mapping = mapping; mapping 840 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxbd->rx_bd_haddr = cpu_to_le64(mapping); mapping 909 drivers/net/ethernet/broadcom/bnxt/bnxt.c prod_rx_buf->mapping = cons_rx_buf->mapping; mapping 913 drivers/net/ethernet/broadcom/bnxt/bnxt.c prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping); mapping 1019 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 1038 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = cons_rx_buf->mapping; mapping 1062 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, mapping 1091 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping) mapping 1101 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh, mapping 1107 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh, mapping 1213 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 1242 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = tpa_info->mapping; mapping 1243 drivers/net/ethernet/broadcom/bnxt/bnxt.c prod_rx_buf->mapping = mapping; mapping 1247 drivers/net/ethernet/broadcom/bnxt/bnxt.c prod_bd->rx_bd_haddr = cpu_to_le64(mapping); mapping 1252 drivers/net/ethernet/broadcom/bnxt/bnxt.c tpa_info->mapping = cons_rx_buf->mapping; mapping 1513 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 1560 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = tpa_info->mapping; mapping 1571 drivers/net/ethernet/broadcom/bnxt/bnxt.c skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); mapping 1588 drivers/net/ethernet/broadcom/bnxt/bnxt.c tpa_info->mapping = new_mapping; mapping 1591 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_single_attrs(&bp->pdev->dev, mapping, mapping 1782 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr = rx_buf->mapping; mapping 2494 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr(tx_buf, mapping), mapping 2519 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr(tx_buf, mapping), mapping 2532 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_addr(tx_buf, mapping), mapping 2566 drivers/net/ethernet/broadcom/bnxt/bnxt.c tpa_info->mapping, mapping 2579 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping = rx_buf->mapping; mapping 2588 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping -= bp->rx_dma_offset; mapping 2589 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_page_attrs(&pdev->dev, mapping, mapping 2594 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_single_attrs(&pdev->dev, mapping, mapping 2610 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, mapping 2937 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 2950 drivers/net/ethernet/broadcom/bnxt/bnxt.c mapping = txr->tx_push_mapping + mapping 2952 drivers/net/ethernet/broadcom/bnxt/bnxt.c txr->data_mapping = cpu_to_le64(mapping); mapping 3227 drivers/net/ethernet/broadcom/bnxt/bnxt.c dma_addr_t mapping; mapping 3230 drivers/net/ethernet/broadcom/bnxt/bnxt.c data = __bnxt_alloc_rx_data(bp, &mapping, mapping 3237 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr->rx_tpa[i].mapping = mapping; mapping 687 drivers/net/ethernet/broadcom/bnxt/bnxt.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 701 drivers/net/ethernet/broadcom/bnxt/bnxt.h dma_addr_t mapping; mapping 707 drivers/net/ethernet/broadcom/bnxt/bnxt.h dma_addr_t mapping; mapping 834 drivers/net/ethernet/broadcom/bnxt/bnxt.h dma_addr_t mapping; mapping 310 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c dma_addr_t mapping; mapping 319 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, mapping 325 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c get.dest_data_addr = cpu_to_le64(mapping); mapping 376 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c set.src_data_addr = cpu_to_le64(mapping); mapping 382 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); mapping 414 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c dma_addr_t mapping; mapping 422 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c &mapping, GFP_KERNEL); mapping 426 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c req.src_data_addr = cpu_to_le64(mapping); mapping 436 drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c mapping); mapping 25 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_addr_t mapping, u32 len) mapping 40 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c txbd->tx_bd_haddr = cpu_to_le64(mapping); mapping 48 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_addr_t mapping, u32 len, u16 rx_prod) mapping 52 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); mapping 59 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_addr_t mapping, u32 len, mapping 64 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c tx_buf = bnxt_xmit_bd(bp, txr, mapping, len); mapping 67 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_unmap_addr_set(tx_buf, mapping, mapping); mapping 88 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_unmap_addr(tx_buf, mapping), mapping 119 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_addr_t mapping; mapping 132 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c mapping = rx_buf->mapping - bp->rx_dma_offset; mapping 133 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir); mapping 171 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, mapping 173 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, mapping 182 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_unmap_page_attrs(&pdev->dev, mapping, mapping 221 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c dma_addr_t mapping; mapping 244 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, mapping 247 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c if (dma_mapping_error(&pdev->dev, mapping)) { mapping 252 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); mapping 15 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h dma_addr_t mapping, u32 len); mapping 834 drivers/net/ethernet/broadcom/cnic.c cp->ctx_arr[i].mapping); mapping 949 drivers/net/ethernet/broadcom/cnic.c &cp->ctx_arr[i].mapping, mapping 1221 drivers/net/ethernet/broadcom/cnic.c &cp->ctx_arr[i].mapping, mapping 1227 drivers/net/ethernet/broadcom/cnic.c if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { mapping 1669 drivers/net/ethernet/broadcom/cnic.c if (cp->ctx_arr[blk].mapping & mask) mapping 1671 drivers/net/ethernet/broadcom/cnic.c (cp->ctx_arr[blk].mapping & mask); mapping 1673 drivers/net/ethernet/broadcom/cnic.c ctx_map = cp->ctx_arr[blk].mapping + align_off + mapping 4399 drivers/net/ethernet/broadcom/cnic.c (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); mapping 4401 drivers/net/ethernet/broadcom/cnic.c (u64) cp->ctx_arr[i].mapping >> 32); mapping 4854 drivers/net/ethernet/broadcom/cnic.c dma_addr_t map = ctx->mapping; mapping 126 drivers/net/ethernet/broadcom/cnic.h dma_addr_t mapping; mapping 1554 drivers/net/ethernet/broadcom/genet/bcmgenet.c dma_addr_t mapping; mapping 1619 drivers/net/ethernet/broadcom/genet/bcmgenet.c mapping = dma_map_single(kdev, skb->data, size, mapping 1625 drivers/net/ethernet/broadcom/genet/bcmgenet.c mapping = skb_frag_dma_map(kdev, frag, 0, size, mapping 1629 drivers/net/ethernet/broadcom/genet/bcmgenet.c ret = dma_mapping_error(kdev, mapping); mapping 1636 drivers/net/ethernet/broadcom/genet/bcmgenet.c dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); mapping 1652 drivers/net/ethernet/broadcom/genet/bcmgenet.c dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); mapping 1697 drivers/net/ethernet/broadcom/genet/bcmgenet.c dma_addr_t mapping; mapping 1710 drivers/net/ethernet/broadcom/genet/bcmgenet.c mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len, mapping 1712 drivers/net/ethernet/broadcom/genet/bcmgenet.c if (dma_mapping_error(kdev, mapping)) { mapping 1725 drivers/net/ethernet/broadcom/genet/bcmgenet.c dma_unmap_addr_set(cb, dma_addr, mapping); mapping 1727 drivers/net/ethernet/broadcom/genet/bcmgenet.c dmadesc_set_addr(priv, cb->bd_addr, mapping); mapping 6580 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr(ri, mapping), mapping 6600 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr(ri, mapping), mapping 6661 drivers/net/ethernet/broadcom/tg3.c pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), mapping 6686 drivers/net/ethernet/broadcom/tg3.c dma_addr_t mapping; mapping 6726 drivers/net/ethernet/broadcom/tg3.c mapping = pci_map_single(tp->pdev, mapping 6730 drivers/net/ethernet/broadcom/tg3.c if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { mapping 6736 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(map, mapping, mapping); mapping 6738 drivers/net/ethernet/broadcom/tg3.c desc->addr_hi = ((u64)mapping >> 32); mapping 6739 drivers/net/ethernet/broadcom/tg3.c desc->addr_lo = ((u64)mapping & 0xffffffff); mapping 6781 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(dest_map, mapping, mapping 6782 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr(src_map, mapping)); mapping 6852 drivers/net/ethernet/broadcom/tg3.c dma_addr = dma_unmap_addr(ri, mapping); mapping 6858 drivers/net/ethernet/broadcom/tg3.c dma_addr = dma_unmap_addr(ri, mapping); mapping 7661 drivers/net/ethernet/broadcom/tg3.c static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) mapping 7663 drivers/net/ethernet/broadcom/tg3.c u32 base = (u32) mapping & 0xffffffff; mapping 7671 drivers/net/ethernet/broadcom/tg3.c static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, mapping 7675 drivers/net/ethernet/broadcom/tg3.c u32 base = (u32) mapping & 0xffffffff; mapping 7683 drivers/net/ethernet/broadcom/tg3.c static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, mapping 7688 drivers/net/ethernet/broadcom/tg3.c return ((u64) mapping + len) > DMA_BIT_MASK(40); mapping 7696 drivers/net/ethernet/broadcom/tg3.c dma_addr_t mapping, u32 len, u32 flags, mapping 7699 drivers/net/ethernet/broadcom/tg3.c txbd->addr_hi = ((u64) mapping >> 32); mapping 7700 drivers/net/ethernet/broadcom/tg3.c txbd->addr_lo = ((u64) mapping & 0xffffffff); mapping 7778 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr(txb, mapping), mapping 7795 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr(txb, mapping), mapping 7844 drivers/net/ethernet/broadcom/tg3.c mapping, new_addr); mapping 7921 drivers/net/ethernet/broadcom/tg3.c dma_addr_t mapping; mapping 8060 drivers/net/ethernet/broadcom/tg3.c mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); mapping 8061 drivers/net/ethernet/broadcom/tg3.c if (pci_dma_mapping_error(tp->pdev, mapping)) mapping 8066 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); mapping 8073 drivers/net/ethernet/broadcom/tg3.c if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags | mapping 8093 drivers/net/ethernet/broadcom/tg3.c mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, mapping 8097 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping 8098 drivers/net/ethernet/broadcom/tg3.c mapping); mapping 8099 drivers/net/ethernet/broadcom/tg3.c if (dma_mapping_error(&tp->pdev->dev, mapping)) mapping 8103 drivers/net/ethernet/broadcom/tg3.c tg3_tx_frag_set(tnapi, &entry, &budget, mapping, mapping 9414 drivers/net/ethernet/broadcom/tg3.c dma_addr_t mapping, u32 maxlen_flags, mapping 9419 drivers/net/ethernet/broadcom/tg3.c ((u64) mapping >> 32)); mapping 9422 drivers/net/ethernet/broadcom/tg3.c ((u64) mapping & 0xffffffff)); mapping 9663 drivers/net/ethernet/broadcom/tg3.c u64 mapping = (u64)tnapi->status_mapping; mapping 9664 drivers/net/ethernet/broadcom/tg3.c tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32); mapping 9665 drivers/net/ethernet/broadcom/tg3.c tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff); mapping 13526 drivers/net/ethernet/broadcom/tg3.c dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map); mapping 13609 drivers/net/ethernet/broadcom/tg3.c mapping); mapping 13613 drivers/net/ethernet/broadcom/tg3.c mapping); mapping 2864 drivers/net/ethernet/broadcom/tg3.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 2869 drivers/net/ethernet/broadcom/tg3.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 854 drivers/net/ethernet/cadence/macb.h dma_addr_t mapping; mapping 676 drivers/net/ethernet/cadence/macb_main.c if (tx_skb->mapping) { mapping 678 drivers/net/ethernet/cadence/macb_main.c dma_unmap_page(&bp->pdev->dev, tx_skb->mapping, mapping 681 drivers/net/ethernet/cadence/macb_main.c dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, mapping 683 drivers/net/ethernet/cadence/macb_main.c tx_skb->mapping = 0; mapping 1498 drivers/net/ethernet/cadence/macb_main.c dma_addr_t mapping; mapping 1528 drivers/net/ethernet/cadence/macb_main.c mapping = dma_map_single(&bp->pdev->dev, mapping 1531 drivers/net/ethernet/cadence/macb_main.c if (dma_mapping_error(&bp->pdev->dev, mapping)) mapping 1536 drivers/net/ethernet/cadence/macb_main.c tx_skb->mapping = mapping; mapping 1559 drivers/net/ethernet/cadence/macb_main.c mapping = skb_frag_dma_map(&bp->pdev->dev, frag, mapping 1561 drivers/net/ethernet/cadence/macb_main.c if (dma_mapping_error(&bp->pdev->dev, mapping)) mapping 1566 drivers/net/ethernet/cadence/macb_main.c tx_skb->mapping = mapping; mapping 1642 drivers/net/ethernet/cadence/macb_main.c macb_set_addr(bp, desc, tx_skb->mapping); mapping 833 drivers/net/ethernet/chelsio/cxgb/sge.c dma_addr_t mapping; mapping 840 drivers/net/ethernet/chelsio/cxgb/sge.c mapping = pci_map_single(pdev, skb->data, dma_len, mapping 845 drivers/net/ethernet/chelsio/cxgb/sge.c dma_unmap_addr_set(ce, dma_addr, mapping); mapping 847 drivers/net/ethernet/chelsio/cxgb/sge.c e->addr_lo = (u32)mapping; mapping 848 drivers/net/ethernet/chelsio/cxgb/sge.c e->addr_hi = (u64)mapping >> 32; mapping 1140 drivers/net/ethernet/chelsio/cxgb/sge.c static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping, mapping 1146 drivers/net/ethernet/chelsio/cxgb/sge.c e->addr_lo = (u32)mapping; mapping 1147 drivers/net/ethernet/chelsio/cxgb/sge.c e->addr_hi = (u64)mapping >> 32; mapping 1203 drivers/net/ethernet/chelsio/cxgb/sge.c dma_addr_t mapping, desc_mapping; mapping 1212 drivers/net/ethernet/chelsio/cxgb/sge.c mapping = pci_map_single(adapter->pdev, skb->data, mapping 1215 drivers/net/ethernet/chelsio/cxgb/sge.c desc_mapping = mapping; mapping 1251 drivers/net/ethernet/chelsio/cxgb/sge.c dma_unmap_addr_set(ce, dma_addr, mapping); mapping 1265 drivers/net/ethernet/chelsio/cxgb/sge.c mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, mapping 1267 drivers/net/ethernet/chelsio/cxgb/sge.c desc_mapping = mapping; mapping 1277 drivers/net/ethernet/chelsio/cxgb/sge.c dma_unmap_addr_set(ce, dma_addr, mapping); mapping 96 drivers/net/ethernet/chelsio/cxgb3/adapter.h dma_addr_t mapping; mapping 359 drivers/net/ethernet/chelsio/cxgb3/sge.c d->pg_chunk.mapping, mapping 415 drivers/net/ethernet/chelsio/cxgb3/sge.c dma_addr_t mapping; mapping 417 drivers/net/ethernet/chelsio/cxgb3/sge.c mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); mapping 418 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(pci_dma_mapping_error(pdev, mapping))) mapping 421 drivers/net/ethernet/chelsio/cxgb3/sge.c dma_unmap_addr_set(sd, dma_addr, mapping); mapping 423 drivers/net/ethernet/chelsio/cxgb3/sge.c d->addr_lo = cpu_to_be32(mapping); mapping 424 drivers/net/ethernet/chelsio/cxgb3/sge.c d->addr_hi = cpu_to_be32((u64) mapping >> 32); mapping 431 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, mapping 434 drivers/net/ethernet/chelsio/cxgb3/sge.c d->addr_lo = cpu_to_be32(mapping); mapping 435 drivers/net/ethernet/chelsio/cxgb3/sge.c d->addr_hi = cpu_to_be32((u64) mapping >> 32); mapping 447 drivers/net/ethernet/chelsio/cxgb3/sge.c dma_addr_t mapping; mapping 456 drivers/net/ethernet/chelsio/cxgb3/sge.c mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, mapping 458 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { mapping 463 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.mapping = mapping; mapping 512 drivers/net/ethernet/chelsio/cxgb3/sge.c dma_addr_t mapping; mapping 521 drivers/net/ethernet/chelsio/cxgb3/sge.c mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; mapping 522 drivers/net/ethernet/chelsio/cxgb3/sge.c dma_unmap_addr_set(sd, dma_addr, mapping); mapping 524 drivers/net/ethernet/chelsio/cxgb3/sge.c add_one_rx_chunk(mapping, d, q->gen); mapping 525 drivers/net/ethernet/chelsio/cxgb3/sge.c pci_dma_sync_single_for_device(adap->pdev, mapping, mapping 890 drivers/net/ethernet/chelsio/cxgb3/sge.c sd->pg_chunk.mapping, mapping 2154 drivers/net/ethernet/chelsio/cxgb3/sge.c sd->pg_chunk.mapping, mapping 571 drivers/net/ethernet/chelsio/cxgb4/sge.c dma_addr_t mapping) mapping 574 drivers/net/ethernet/chelsio/cxgb4/sge.c sd->dma_addr = mapping; /* includes size low bits */ mapping 596 drivers/net/ethernet/chelsio/cxgb4/sge.c dma_addr_t mapping; mapping 623 drivers/net/ethernet/chelsio/cxgb4/sge.c mapping = dma_map_page(adap->pdev_dev, pg, 0, mapping 626 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { mapping 631 drivers/net/ethernet/chelsio/cxgb4/sge.c mapping |= RX_LARGE_PG_BUF; mapping 632 drivers/net/ethernet/chelsio/cxgb4/sge.c *d++ = cpu_to_be64(mapping); mapping 634 drivers/net/ethernet/chelsio/cxgb4/sge.c set_rx_sw_desc(sd, pg, mapping); mapping 654 drivers/net/ethernet/chelsio/cxgb4/sge.c mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, mapping 656 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { mapping 661 drivers/net/ethernet/chelsio/cxgb4/sge.c *d++ = cpu_to_be64(mapping); mapping 663 drivers/net/ethernet/chelsio/cxgb4/sge.c set_rx_sw_desc(sd, pg, mapping); mapping 92 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 623 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 632 drivers/net/ethernet/cortina/gemini.c mapping = txd->word2.buf_adr; mapping 635 drivers/net/ethernet/cortina/gemini.c dma_unmap_single(geth->dev, mapping, mapping 736 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 740 drivers/net/ethernet/cortina/gemini.c mapping = addr & PAGE_MASK; mapping 750 drivers/net/ethernet/cortina/gemini.c if (gpage->mapping == mapping) mapping 766 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 786 drivers/net/ethernet/cortina/gemini.c mapping = rxd[r].word2.buf_adr; mapping 790 drivers/net/ethernet/cortina/gemini.c if (!mapping) mapping 794 drivers/net/ethernet/cortina/gemini.c gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); mapping 814 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 823 drivers/net/ethernet/cortina/gemini.c mapping = dma_map_single(geth->dev, page_address(page), mapping 825 drivers/net/ethernet/cortina/gemini.c if (dma_mapping_error(geth->dev, mapping)) { mapping 842 drivers/net/ethernet/cortina/gemini.c freeq_entry->word2.buf_adr = mapping; mapping 844 drivers/net/ethernet/cortina/gemini.c mapping += frag_len; mapping 850 drivers/net/ethernet/cortina/gemini.c mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; mapping 851 drivers/net/ethernet/cortina/gemini.c dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); mapping 860 drivers/net/ethernet/cortina/gemini.c pn, (unsigned int)mapping, page); mapping 861 drivers/net/ethernet/cortina/gemini.c gpage->mapping = mapping; mapping 980 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 983 drivers/net/ethernet/cortina/gemini.c mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; mapping 984 drivers/net/ethernet/cortina/gemini.c dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); mapping 1016 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 1018 drivers/net/ethernet/cortina/gemini.c mapping = geth->freeq_ring[pn << fpp_order].word2.buf_adr; mapping 1019 drivers/net/ethernet/cortina/gemini.c dma_unmap_single(geth->dev, mapping, frag_len, DMA_FROM_DEVICE); mapping 1149 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 1196 drivers/net/ethernet/cortina/gemini.c mapping = dma_map_single(geth->dev, buffer, buflen, mapping 1198 drivers/net/ethernet/cortina/gemini.c if (dma_mapping_error(geth->dev, mapping)) mapping 1204 drivers/net/ethernet/cortina/gemini.c txd->word2.buf_adr = mapping; mapping 1412 drivers/net/ethernet/cortina/gemini.c dma_addr_t mapping; mapping 1426 drivers/net/ethernet/cortina/gemini.c mapping = rx->word2.buf_adr; mapping 1434 drivers/net/ethernet/cortina/gemini.c page_offs = mapping & ~PAGE_MASK; mapping 1436 drivers/net/ethernet/cortina/gemini.c if (!mapping) { mapping 1443 drivers/net/ethernet/cortina/gemini.c gpage = gmac_get_queue_page(geth, port, mapping + PAGE_SIZE); mapping 1498 drivers/net/ethernet/cortina/gemini.c if (mapping) mapping 294 drivers/net/ethernet/dec/tulip/de2104x.c dma_addr_t mapping; mapping 409 drivers/net/ethernet/dec/tulip/de2104x.c dma_addr_t mapping; mapping 421 drivers/net/ethernet/dec/tulip/de2104x.c mapping = de->rx_skb[rx_tail].mapping; mapping 449 drivers/net/ethernet/dec/tulip/de2104x.c pci_unmap_single(de->pdev, mapping, mapping 453 drivers/net/ethernet/dec/tulip/de2104x.c mapping = mapping 454 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_skb[rx_tail].mapping = mapping 459 drivers/net/ethernet/dec/tulip/de2104x.c pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); mapping 463 drivers/net/ethernet/dec/tulip/de2104x.c pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); mapping 483 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); mapping 560 drivers/net/ethernet/dec/tulip/de2104x.c pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, mapping 565 drivers/net/ethernet/dec/tulip/de2104x.c pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, mapping 608 drivers/net/ethernet/dec/tulip/de2104x.c u32 mapping, len, flags = FirstFrag | LastFrag; mapping 626 drivers/net/ethernet/dec/tulip/de2104x.c mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE); mapping 633 drivers/net/ethernet/dec/tulip/de2104x.c txd->addr1 = cpu_to_le32(mapping); mapping 636 drivers/net/ethernet/dec/tulip/de2104x.c de->tx_skb[entry].mapping = mapping; mapping 723 drivers/net/ethernet/dec/tulip/de2104x.c u32 mapping; mapping 768 drivers/net/ethernet/dec/tulip/de2104x.c de->tx_skb[entry].mapping = mapping = mapping 778 drivers/net/ethernet/dec/tulip/de2104x.c txd->addr1 = cpu_to_le32(mapping); mapping 1285 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_skb[i].mapping = pci_map_single(de->pdev, mapping 1295 drivers/net/ethernet/dec/tulip/de2104x.c de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping); mapping 1339 drivers/net/ethernet/dec/tulip/de2104x.c pci_unmap_single(de->pdev, de->rx_skb[i].mapping, mapping 1351 drivers/net/ethernet/dec/tulip/de2104x.c de->tx_skb[i].mapping, mapping 1356 drivers/net/ethernet/dec/tulip/de2104x.c de->tx_skb[i].mapping, mapping 70 drivers/net/ethernet/dec/tulip/interrupt.c dma_addr_t mapping; mapping 77 drivers/net/ethernet/dec/tulip/interrupt.c mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, mapping 79 drivers/net/ethernet/dec/tulip/interrupt.c if (dma_mapping_error(&tp->pdev->dev, mapping)) { mapping 85 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping = mapping; mapping 87 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); mapping 214 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping, mapping 226 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping, mapping 233 drivers/net/ethernet/dec/tulip/interrupt.c if (tp->rx_buffers[entry].mapping != mapping 238 drivers/net/ethernet/dec/tulip/interrupt.c (unsigned long long)tp->rx_buffers[entry].mapping, mapping 243 drivers/net/ethernet/dec/tulip/interrupt.c pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, mapping 247 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping = 0; mapping 440 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping, mapping 452 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping, mapping 459 drivers/net/ethernet/dec/tulip/interrupt.c if (tp->rx_buffers[entry].mapping != mapping 464 drivers/net/ethernet/dec/tulip/interrupt.c (long long)tp->rx_buffers[entry].mapping, mapping 469 drivers/net/ethernet/dec/tulip/interrupt.c pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, mapping 473 drivers/net/ethernet/dec/tulip/interrupt.c tp->rx_buffers[entry].mapping = 0; mapping 599 drivers/net/ethernet/dec/tulip/interrupt.c if (tp->tx_buffers[entry].mapping) mapping 601 drivers/net/ethernet/dec/tulip/interrupt.c tp->tx_buffers[entry].mapping, mapping 632 drivers/net/ethernet/dec/tulip/interrupt.c pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, mapping 639 drivers/net/ethernet/dec/tulip/interrupt.c tp->tx_buffers[entry].mapping = 0; mapping 397 drivers/net/ethernet/dec/tulip/tulip.h dma_addr_t mapping; mapping 355 drivers/net/ethernet/dec/tulip/tulip_core.c dma_addr_t mapping; mapping 364 drivers/net/ethernet/dec/tulip/tulip_core.c mapping = pci_map_single(tp->pdev, tp->setup_frame, mapping 368 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[tp->cur_tx].mapping = mapping; mapping 372 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); mapping 628 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_buffers[i].mapping = 0; mapping 635 drivers/net/ethernet/dec/tulip/tulip_core.c dma_addr_t mapping; mapping 644 drivers/net/ethernet/dec/tulip/tulip_core.c mapping = pci_map_single(tp->pdev, skb->data, mapping 646 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_buffers[i].mapping = mapping; mapping 648 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); mapping 656 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[i].mapping = 0; mapping 669 drivers/net/ethernet/dec/tulip/tulip_core.c dma_addr_t mapping; mapping 678 drivers/net/ethernet/dec/tulip/tulip_core.c mapping = pci_map_single(tp->pdev, skb->data, mapping 680 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[entry].mapping = mapping; mapping 681 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping); mapping 729 drivers/net/ethernet/dec/tulip/tulip_core.c if (tp->tx_buffers[entry].mapping) mapping 731 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[entry].mapping, mapping 737 drivers/net/ethernet/dec/tulip/tulip_core.c pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, mapping 744 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[entry].mapping = 0; mapping 799 drivers/net/ethernet/dec/tulip/tulip_core.c dma_addr_t mapping = tp->rx_buffers[i].mapping; mapping 802 drivers/net/ethernet/dec/tulip/tulip_core.c tp->rx_buffers[i].mapping = 0; mapping 809 drivers/net/ethernet/dec/tulip/tulip_core.c pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ, mapping 819 drivers/net/ethernet/dec/tulip/tulip_core.c pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping, mapping 824 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[i].mapping = 0; mapping 1152 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[entry].mapping = 0; mapping 1163 drivers/net/ethernet/dec/tulip/tulip_core.c tp->tx_buffers[entry].mapping = mapping 1172 drivers/net/ethernet/dec/tulip/tulip_core.c cpu_to_le32(tp->tx_buffers[entry].mapping); mapping 616 drivers/net/ethernet/jme.c txbi->mapping = 0; mapping 714 drivers/net/ethernet/jme.c rxdesc->desc1.bufaddrh = cpu_to_le32((__u64)rxbi->mapping >> 32); mapping 716 drivers/net/ethernet/jme.c (__u64)rxbi->mapping & 0xFFFFFFFFUL); mapping 730 drivers/net/ethernet/jme.c dma_addr_t mapping; mapping 737 drivers/net/ethernet/jme.c mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), mapping 740 drivers/net/ethernet/jme.c if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { mapping 745 drivers/net/ethernet/jme.c if (likely(rxbi->mapping)) mapping 746 drivers/net/ethernet/jme.c pci_unmap_page(jme->pdev, rxbi->mapping, mapping 751 drivers/net/ethernet/jme.c rxbi->mapping = mapping; mapping 764 drivers/net/ethernet/jme.c rxbi->mapping, mapping 769 drivers/net/ethernet/jme.c rxbi->mapping = 0; mapping 1009 drivers/net/ethernet/jme.c rxbi->mapping, mapping 1015 drivers/net/ethernet/jme.c rxbi->mapping, mapping 1459 drivers/net/ethernet/jme.c ttxbi->mapping, mapping 1463 drivers/net/ethernet/jme.c ttxbi->mapping = 0; mapping 1999 drivers/net/ethernet/jme.c txbi->mapping = dmaaddr; mapping 2014 drivers/net/ethernet/jme.c ctxbi->mapping, mapping 2018 drivers/net/ethernet/jme.c ctxbi->mapping = 0; mapping 354 drivers/net/ethernet/jme.h dma_addr_t mapping; mapping 285 drivers/net/ethernet/lantiq_xrx200.c dma_addr_t mapping; mapping 304 drivers/net/ethernet/lantiq_xrx200.c mapping = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE); mapping 305 drivers/net/ethernet/lantiq_xrx200.c if (unlikely(dma_mapping_error(priv->dev, mapping))) mapping 309 drivers/net/ethernet/lantiq_xrx200.c byte_offset = mapping % 16; mapping 311 drivers/net/ethernet/lantiq_xrx200.c desc->addr = mapping - byte_offset; mapping 1833 drivers/net/ethernet/marvell/sky2.c dma_addr_t mapping; mapping 1843 drivers/net/ethernet/marvell/sky2.c mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); mapping 1845 drivers/net/ethernet/marvell/sky2.c if (pci_dma_mapping_error(hw->pdev, mapping)) mapping 1853 drivers/net/ethernet/marvell/sky2.c upper = upper_32_bits(mapping); mapping 1924 drivers/net/ethernet/marvell/sky2.c dma_unmap_addr_set(re, mapaddr, mapping); mapping 1928 drivers/net/ethernet/marvell/sky2.c le->addr = cpu_to_le32(lower_32_bits(mapping)); mapping 1937 drivers/net/ethernet/marvell/sky2.c mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, mapping 1940 drivers/net/ethernet/marvell/sky2.c if (dma_mapping_error(&hw->pdev->dev, mapping)) mapping 1943 drivers/net/ethernet/marvell/sky2.c upper = upper_32_bits(mapping); mapping 1953 drivers/net/ethernet/marvell/sky2.c dma_unmap_addr_set(re, mapaddr, mapping); mapping 1957 drivers/net/ethernet/marvell/sky2.c le->addr = cpu_to_le32(lower_32_bits(mapping)); mapping 76 drivers/net/ethernet/mellanox/mlxsw/core.c u8 *mapping; /* lag_id+port_index to local_port mapping */ mapping 1151 drivers/net/ethernet/mellanox/mlxsw/core.c mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL); mapping 1152 drivers/net/ethernet/mellanox/mlxsw/core.c if (!mlxsw_core->lag.mapping) { mapping 1211 drivers/net/ethernet/mellanox/mlxsw/core.c kfree(mlxsw_core->lag.mapping); mapping 1278 drivers/net/ethernet/mellanox/mlxsw/core.c kfree(mlxsw_core->lag.mapping); mapping 1829 drivers/net/ethernet/mellanox/mlxsw/core.c mlxsw_core->lag.mapping[index] = local_port; mapping 1839 drivers/net/ethernet/mellanox/mlxsw/core.c return mlxsw_core->lag.mapping[index]; mapping 1852 drivers/net/ethernet/mellanox/mlxsw/core.c if (mlxsw_core->lag.mapping[index] == local_port) mapping 1853 drivers/net/ethernet/mellanox/mlxsw/core.c mlxsw_core->lag.mapping[index] = 0; mapping 3315 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.width, cmd); mapping 3318 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.width, cmd); mapping 3352 drivers/net/ethernet/mellanox/mlxsw/spectrum.c ops->to_ptys_advert_link(mlxsw_sp, mlxsw_sp_port->mapping.width, mapping 3354 drivers/net/ethernet/mellanox/mlxsw/spectrum.c ops->to_ptys_speed(mlxsw_sp, mlxsw_sp_port->mapping.width, mapping 3388 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.module, mapping 3403 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.module, ee, mapping 3641 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.module = module; mapping 3642 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.width = width; mapping 3643 drivers/net/ethernet/mellanox/mlxsw/spectrum.c mlxsw_sp_port->mapping.lane = lane; mapping 4079 drivers/net/ethernet/mellanox/mlxsw/spectrum.c module = mlxsw_sp_port->mapping.module; mapping 4080 drivers/net/ethernet/mellanox/mlxsw/spectrum.c cur_width = mlxsw_sp_port->mapping.width; mapping 4163 drivers/net/ethernet/mellanox/mlxsw/spectrum.c cur_width = mlxsw_sp_port->mapping.width; mapping 262 drivers/net/ethernet/mellanox/mlxsw/spectrum.h } mapping; mapping 300 drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width, mapping 41 drivers/net/ethernet/mellanox/mlxsw/switchib.c } mapping; mapping 217 drivers/net/ethernet/mellanox/mlxsw/switchib.c mlxsw_sib_port->mapping.module = module; mapping 369 drivers/net/ethernet/mellanox/mlxsw/switchib.c mlxsw_sib_port->mapping.module + 1); mapping 372 drivers/net/ethernet/mellanox/mlxsw/switchib.c mlxsw_sib_port->mapping.module + 1); mapping 51 drivers/net/ethernet/mellanox/mlxsw/switchx2.c } mapping; mapping 994 drivers/net/ethernet/mellanox/mlxsw/switchx2.c mlxsw_sx_port->mapping.module = module; mapping 1155 drivers/net/ethernet/mellanox/mlxsw/switchx2.c mlxsw_sx_port->mapping.module = module; mapping 1317 drivers/net/ethernet/mellanox/mlxsw/switchx2.c mlxsw_sx_port->mapping.module + 1); mapping 1320 drivers/net/ethernet/mellanox/mlxsw/switchx2.c mlxsw_sx_port->mapping.module + 1); mapping 163 drivers/net/ethernet/ni/nixge.c dma_addr_t mapping; mapping 442 drivers/net/ethernet/ni/nixge.c if (tx_skb->mapping) { mapping 444 drivers/net/ethernet/ni/nixge.c dma_unmap_page(priv->ndev->dev.parent, tx_skb->mapping, mapping 448 drivers/net/ethernet/ni/nixge.c tx_skb->mapping, mapping 450 drivers/net/ethernet/ni/nixge.c tx_skb->mapping = 0; mapping 534 drivers/net/ethernet/ni/nixge.c tx_skb->mapping = cur_phys; mapping 555 drivers/net/ethernet/ni/nixge.c tx_skb->mapping = cur_phys; mapping 586 drivers/net/ethernet/ni/nixge.c tx_skb->mapping, mapping 2519 drivers/net/ethernet/qlogic/qed/qed_ll2.c dma_addr_t mapping; mapping 2538 drivers/net/ethernet/qlogic/qed/qed_ll2.c mapping = dma_map_single(&cdev->pdev->dev, skb->data, mapping 2540 drivers/net/ethernet/qlogic/qed/qed_ll2.c if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { mapping 2560 drivers/net/ethernet/qlogic/qed/qed_ll2.c pkt.first_frag = mapping; mapping 2580 drivers/net/ethernet/qlogic/qed/qed_ll2.c mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, mapping 2583 drivers/net/ethernet/qlogic/qed/qed_ll2.c if (unlikely(dma_mapping_error(&cdev->pdev->dev, mapping))) { mapping 2592 drivers/net/ethernet/qlogic/qed/qed_ll2.c mapping, mapping 2605 drivers/net/ethernet/qlogic/qed/qed_ll2.c dma_unmap_single(&cdev->pdev->dev, mapping, skb->len, DMA_TO_DEVICE); mapping 301 drivers/net/ethernet/qlogic/qede/qede.h dma_addr_t mapping; mapping 393 drivers/net/ethernet/qlogic/qede/qede.h dma_addr_t mapping; mapping 1495 drivers/net/ethernet/qlogic/qede/qede_ethtool.c dma_addr_t mapping; mapping 1525 drivers/net/ethernet/qlogic/qede/qede_ethtool.c mapping = dma_map_single(&edev->pdev->dev, skb->data, mapping 1527 drivers/net/ethernet/qlogic/qede/qede_ethtool.c if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { mapping 1531 drivers/net/ethernet/qlogic/qede/qede_ethtool.c BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); mapping 77 drivers/net/ethernet/qlogic/qede/qede_filter.c dma_addr_t mapping; mapping 125 drivers/net/ethernet/qlogic/qede/qede_filter.c params.addr = n->mapping; mapping 167 drivers/net/ethernet/qlogic/qede/qede_filter.c fltr->mapping = dma_map_single(&edev->pdev->dev, fltr->data, mapping 169 drivers/net/ethernet/qlogic/qede/qede_filter.c if (dma_mapping_error(&edev->pdev->dev, fltr->mapping)) { mapping 195 drivers/net/ethernet/qlogic/qede/qede_filter.c dma_unmap_single(&edev->pdev->dev, fltr->mapping, mapping 55 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_addr_t mapping; mapping 74 drivers/net/ethernet/qlogic/qede/qede_fp.c mapping = dma_map_page(rxq->dev, data, 0, mapping 76 drivers/net/ethernet/qlogic/qede/qede_fp.c if (unlikely(dma_mapping_error(rxq->dev, mapping))) { mapping 84 drivers/net/ethernet/qlogic/qede/qede_fp.c sw_rx_data->mapping = mapping; mapping 89 drivers/net/ethernet/qlogic/qede/qede_fp.c rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); mapping 90 drivers/net/ethernet/qlogic/qede/qede_fp.c rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + mapping 270 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_addr_t mapping; mapping 273 drivers/net/ethernet/qlogic/qede/qede_fp.c mapping = skb_frag_dma_map(txq->dev, frag, 0, mapping 275 drivers/net/ethernet/qlogic/qede/qede_fp.c if (unlikely(dma_mapping_error(txq->dev, mapping))) mapping 279 drivers/net/ethernet/qlogic/qede/qede_fp.c BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); mapping 356 drivers/net/ethernet/qlogic/qede/qede_fp.c BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); mapping 362 drivers/net/ethernet/qlogic/qede/qede_fp.c metadata->mapping + padding, mapping 366 drivers/net/ethernet/qlogic/qede/qede_fp.c txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; mapping 400 drivers/net/ethernet/qlogic/qede/qede_fp.c txq->sw_tx_ring.xdp[idx].mapping, mapping 511 drivers/net/ethernet/qlogic/qede/qede_fp.c new_mapping = curr_prod->mapping + curr_prod->page_offset; mapping 551 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_unmap_page(rxq->dev, curr_cons->mapping, mapping 820 drivers/net/ethernet/qlogic/qede/qede_fp.c tpa_info->buffer.mapping = sw_rx_data_cons->mapping; mapping 959 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, mapping 1101 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_unmap_page(rxq->dev, bd->mapping, mapping 1160 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_unmap_page(rxq->dev, bd->mapping, mapping 1440 drivers/net/ethernet/qlogic/qede/qede_fp.c dma_addr_t mapping; mapping 1481 drivers/net/ethernet/qlogic/qede/qede_fp.c mapping = dma_map_single(txq->dev, skb->data, mapping 1483 drivers/net/ethernet/qlogic/qede/qede_fp.c if (unlikely(dma_mapping_error(txq->dev, mapping))) { mapping 1490 drivers/net/ethernet/qlogic/qede/qede_fp.c BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); mapping 1581 drivers/net/ethernet/qlogic/qede/qede_fp.c mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), mapping 1585 drivers/net/ethernet/qlogic/qede/qede_fp.c BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, mapping 1353 drivers/net/ethernet/qlogic/qede/qede_main.c rx_buf->mapping, PAGE_SIZE, rxq->data_direction); mapping 296 drivers/net/ethernet/rdc/r6040.c dma_addr_t mapping = desc_dma; mapping 299 drivers/net/ethernet/rdc/r6040.c mapping += sizeof(*desc); mapping 300 drivers/net/ethernet/rdc/r6040.c desc->ndesc = cpu_to_le32(mapping); mapping 474 drivers/net/ethernet/realtek/8139cp.c dma_addr_t mapping, new_mapping; mapping 488 drivers/net/ethernet/realtek/8139cp.c mapping = le64_to_cpu(desc->addr); mapping 524 drivers/net/ethernet/realtek/8139cp.c dma_unmap_single(&cp->pdev->dev, mapping, mapping 539 drivers/net/ethernet/realtek/8139cp.c mapping = new_mapping; mapping 543 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping); mapping 781 drivers/net/ethernet/realtek/8139cp.c dma_addr_t mapping; mapping 784 drivers/net/ethernet/realtek/8139cp.c mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); mapping 785 drivers/net/ethernet/realtek/8139cp.c if (dma_mapping_error(&cp->pdev->dev, mapping)) mapping 789 drivers/net/ethernet/realtek/8139cp.c txd->addr = cpu_to_le64(mapping); mapping 822 drivers/net/ethernet/realtek/8139cp.c dma_addr_t mapping; mapping 827 drivers/net/ethernet/realtek/8139cp.c mapping = dma_map_single(&cp->pdev->dev, mapping 830 drivers/net/ethernet/realtek/8139cp.c if (dma_mapping_error(&cp->pdev->dev, mapping)) { mapping 844 drivers/net/ethernet/realtek/8139cp.c txd->addr = cpu_to_le64(mapping); mapping 1065 drivers/net/ethernet/realtek/8139cp.c dma_addr_t mapping; mapping 1071 drivers/net/ethernet/realtek/8139cp.c mapping = dma_map_single(&cp->pdev->dev, skb->data, mapping 1073 drivers/net/ethernet/realtek/8139cp.c if (dma_mapping_error(&cp->pdev->dev, mapping)) { mapping 1080 drivers/net/ethernet/realtek/8139cp.c cp->rx_ring[i].addr = cpu_to_le64(mapping); mapping 5541 drivers/net/ethernet/realtek/r8169_main.c dma_addr_t mapping; mapping 5548 drivers/net/ethernet/realtek/r8169_main.c mapping = dma_map_page(d, data, 0, R8169_RX_BUF_SIZE, DMA_FROM_DEVICE); mapping 5549 drivers/net/ethernet/realtek/r8169_main.c if (unlikely(dma_mapping_error(d, mapping))) { mapping 5556 drivers/net/ethernet/realtek/r8169_main.c desc->addr = cpu_to_le64(mapping); mapping 5707 drivers/net/ethernet/realtek/r8169_main.c dma_addr_t mapping; mapping 5716 drivers/net/ethernet/realtek/r8169_main.c mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); mapping 5717 drivers/net/ethernet/realtek/r8169_main.c if (unlikely(dma_mapping_error(d, mapping))) { mapping 5726 drivers/net/ethernet/realtek/r8169_main.c txd->addr = cpu_to_le64(mapping); mapping 5887 drivers/net/ethernet/realtek/r8169_main.c dma_addr_t mapping; mapping 5912 drivers/net/ethernet/realtek/r8169_main.c mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); mapping 5913 drivers/net/ethernet/realtek/r8169_main.c if (unlikely(dma_mapping_error(d, mapping))) { mapping 5920 drivers/net/ethernet/realtek/r8169_main.c txd->addr = cpu_to_le64(mapping); mapping 471 drivers/net/ethernet/sis/sis190.c static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, mapping 474 drivers/net/ethernet/sis/sis190.c desc->addr = cpu_to_le32(mapping); mapping 492 drivers/net/ethernet/sis/sis190.c dma_addr_t mapping; mapping 497 drivers/net/ethernet/sis/sis190.c mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, mapping 499 drivers/net/ethernet/sis/sis190.c if (pci_dma_mapping_error(tp->pci_dev, mapping)) mapping 501 drivers/net/ethernet/sis/sis190.c sis190_map_to_asic(desc, mapping, rx_buf_sz); mapping 1178 drivers/net/ethernet/sis/sis190.c dma_addr_t mapping; mapping 1200 drivers/net/ethernet/sis/sis190.c mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); mapping 1201 drivers/net/ethernet/sis/sis190.c if (pci_dma_mapping_error(tp->pci_dev, mapping)) { mapping 1210 drivers/net/ethernet/sis/sis190.c desc->addr = cpu_to_le32(mapping); mapping 41 drivers/net/ethernet/smsc/smsc9420.c dma_addr_t mapping; mapping 508 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(!pd->tx_buffers[i].mapping); mapping 509 drivers/net/ethernet/smsc/smsc9420.c pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, mapping 541 drivers/net/ethernet/smsc/smsc9420.c if (pd->rx_buffers[i].mapping) mapping 542 drivers/net/ethernet/smsc/smsc9420.c pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, mapping 761 drivers/net/ethernet/smsc/smsc9420.c pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, mapping 763 drivers/net/ethernet/smsc/smsc9420.c pd->rx_buffers[index].mapping = 0; mapping 786 drivers/net/ethernet/smsc/smsc9420.c dma_addr_t mapping; mapping 789 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(pd->rx_buffers[index].mapping); mapping 794 drivers/net/ethernet/smsc/smsc9420.c mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), mapping 796 drivers/net/ethernet/smsc/smsc9420.c if (pci_dma_mapping_error(pd->pdev, mapping)) { mapping 803 drivers/net/ethernet/smsc/smsc9420.c pd->rx_buffers[index].mapping = mapping; mapping 804 drivers/net/ethernet/smsc/smsc9420.c pd->rx_ring[index].buffer1 = mapping + NET_IP_ALIGN; mapping 911 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(!pd->tx_buffers[index].mapping); mapping 913 drivers/net/ethernet/smsc/smsc9420.c pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping, mapping 915 drivers/net/ethernet/smsc/smsc9420.c pd->tx_buffers[index].mapping = 0; mapping 931 drivers/net/ethernet/smsc/smsc9420.c dma_addr_t mapping; mapping 942 drivers/net/ethernet/smsc/smsc9420.c BUG_ON(pd->tx_buffers[index].mapping); mapping 944 drivers/net/ethernet/smsc/smsc9420.c mapping = pci_map_single(pd->pdev, skb->data, mapping 946 drivers/net/ethernet/smsc/smsc9420.c if (pci_dma_mapping_error(pd->pdev, mapping)) { mapping 953 drivers/net/ethernet/smsc/smsc9420.c pd->tx_buffers[index].mapping = mapping; mapping 965 drivers/net/ethernet/smsc/smsc9420.c pd->tx_ring[index].buffer1 = mapping; mapping 1194 drivers/net/ethernet/smsc/smsc9420.c pd->tx_buffers[i].mapping = 0; mapping 1230 drivers/net/ethernet/smsc/smsc9420.c pd->rx_buffers[i].mapping = 0; mapping 2726 drivers/net/ethernet/sun/cassini.c dma_addr_t mapping, int len, u64 ctrl, int last) mapping 2736 drivers/net/ethernet/sun/cassini.c txd->buffer = cpu_to_le64(mapping); mapping 2758 drivers/net/ethernet/sun/cassini.c dma_addr_t mapping; mapping 2789 drivers/net/ethernet/sun/cassini.c mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), mapping 2797 drivers/net/ethernet/sun/cassini.c cas_write_txd(cp, ring, entry, mapping, len - tabort, mapping 2803 drivers/net/ethernet/sun/cassini.c mapping = tx_tiny_map(cp, ring, entry, tentry); mapping 2804 drivers/net/ethernet/sun/cassini.c cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, mapping 2807 drivers/net/ethernet/sun/cassini.c cas_write_txd(cp, ring, entry, mapping, len, ctrl | mapping 2816 drivers/net/ethernet/sun/cassini.c mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, mapping 2824 drivers/net/ethernet/sun/cassini.c cas_write_txd(cp, ring, entry, mapping, len - tabort, mapping 2833 drivers/net/ethernet/sun/cassini.c mapping = tx_tiny_map(cp, ring, entry, tentry); mapping 2837 drivers/net/ethernet/sun/cassini.c cas_write_txd(cp, ring, entry, mapping, len, ctrl, mapping 3286 drivers/net/ethernet/sun/niu.c for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { mapping 3303 drivers/net/ethernet/sun/niu.c page->mapping = (struct address_space *) rp->rxhash[h]; mapping 3385 drivers/net/ethernet/sun/niu.c *link = (struct page *) page->mapping; mapping 3389 drivers/net/ethernet/sun/niu.c page->mapping = NULL; mapping 3454 drivers/net/ethernet/sun/niu.c *link = (struct page *) page->mapping; mapping 3458 drivers/net/ethernet/sun/niu.c page->mapping = NULL; mapping 3521 drivers/net/ethernet/sun/niu.c struct page *next = (struct page *) page->mapping; mapping 3527 drivers/net/ethernet/sun/niu.c page->mapping = NULL; mapping 3556 drivers/net/ethernet/sun/niu.c np->ops->unmap_single(np->device, tb->mapping, mapping 3571 drivers/net/ethernet/sun/niu.c np->ops->unmap_page(np->device, tb->mapping, mapping 6446 drivers/net/ethernet/sun/niu.c (struct page *) page->mapping; mapping 6531 drivers/net/ethernet/sun/niu.c u64 mapping, u64 len, u64 mark, mapping 6539 drivers/net/ethernet/sun/niu.c (mapping & TX_DESC_SAD)); mapping 6618 drivers/net/ethernet/sun/niu.c u64 mapping, mrk; mapping 6657 drivers/net/ethernet/sun/niu.c mapping = np->ops->map_single(np->device, skb->data, mapping 6663 drivers/net/ethernet/sun/niu.c rp->tx_buffs[prod].mapping = mapping; mapping 6685 drivers/net/ethernet/sun/niu.c niu_set_txd(rp, prod, mapping, this_len, mrk, nfg); mapping 6689 drivers/net/ethernet/sun/niu.c mapping += this_len; mapping 6697 drivers/net/ethernet/sun/niu.c mapping = np->ops->map_page(np->device, skb_frag_page(frag), mapping 6702 drivers/net/ethernet/sun/niu.c rp->tx_buffs[prod].mapping = mapping; mapping 6704 drivers/net/ethernet/sun/niu.c niu_set_txd(rp, prod, mapping, len, 0, 0); mapping 2836 drivers/net/ethernet/sun/niu.h u64 mapping; mapping 958 drivers/net/ethernet/sun/sunbmac.c u32 mapping; mapping 961 drivers/net/ethernet/sun/sunbmac.c mapping = dma_map_single(&bp->bigmac_op->dev, skb->data, mapping 970 drivers/net/ethernet/sun/sunbmac.c bp->bmac_block->be_txd[entry].tx_addr = mapping; mapping 1031 drivers/net/ethernet/sun/sungem.c dma_addr_t mapping; mapping 1035 drivers/net/ethernet/sun/sungem.c mapping = pci_map_page(gp->pdev, mapping 1042 drivers/net/ethernet/sun/sungem.c txd->buffer = cpu_to_le64(mapping); mapping 1069 drivers/net/ethernet/sun/sungem.c dma_addr_t mapping; mapping 1073 drivers/net/ethernet/sun/sungem.c mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, mapping 1080 drivers/net/ethernet/sun/sungem.c txd->buffer = cpu_to_le64(mapping); mapping 1266 drivers/net/ethernet/sun/sunhme.c u32 mapping; mapping 1277 drivers/net/ethernet/sun/sunhme.c mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, mapping 1279 drivers/net/ethernet/sun/sunhme.c if (dma_mapping_error(hp->dma_dev, mapping)) { mapping 1286 drivers/net/ethernet/sun/sunhme.c mapping); mapping 2030 drivers/net/ethernet/sun/sunhme.c u32 mapping; mapping 2039 drivers/net/ethernet/sun/sunhme.c mapping = dma_map_single(hp->dma_dev, new_skb->data, mapping 2042 drivers/net/ethernet/sun/sunhme.c if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { mapping 2052 drivers/net/ethernet/sun/sunhme.c mapping); mapping 2318 drivers/net/ethernet/sun/sunhme.c u32 mapping, len; mapping 2321 drivers/net/ethernet/sun/sunhme.c mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); mapping 2322 drivers/net/ethernet/sun/sunhme.c if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) mapping 2327 drivers/net/ethernet/sun/sunhme.c mapping); mapping 2345 drivers/net/ethernet/sun/sunhme.c u32 len, mapping, this_txflags; mapping 2348 drivers/net/ethernet/sun/sunhme.c mapping = skb_frag_dma_map(hp->dma_dev, this_frag, mapping 2350 drivers/net/ethernet/sun/sunhme.c if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) { mapping 2360 drivers/net/ethernet/sun/sunhme.c mapping); mapping 183 drivers/net/ethernet/ti/cpmac.c dma_addr_t mapping; mapping 360 drivers/net/ethernet/ti/cpmac.c cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); mapping 453 drivers/net/ethernet/ti/cpmac.c priv->rx_head->prev->hw_next = priv->rx_head->mapping; mapping 489 drivers/net/ethernet/ti/cpmac.c cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); mapping 575 drivers/net/ethernet/ti/cpmac.c cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); mapping 586 drivers/net/ethernet/ti/cpmac.c cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); mapping 645 drivers/net/ethernet/ti/cpmac.c cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); mapping 693 drivers/net/ethernet/ti/cpmac.c desc->hw_next = desc->next->mapping; mapping 946 drivers/net/ethernet/ti/cpmac.c priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; mapping 964 drivers/net/ethernet/ti/cpmac.c desc->hw_next = (u32)desc->next->mapping; mapping 1224 drivers/net/wan/z85230.c void z8530_describe(struct z8530_dev *dev, char *mapping, unsigned long io) mapping 1229 drivers/net/wan/z85230.c mapping, mapping 400 drivers/net/wan/z85230.h void z8530_describe(struct z8530_dev *, char *mapping, unsigned long io); mapping 327 drivers/net/wireless/admtek/adm8211.c pci_unmap_single(priv->pdev, info->mapping, mapping 387 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[entry].mapping, mapping 394 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[entry].mapping, mapping 404 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[entry].mapping, mapping 407 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[entry].mapping = mapping 413 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[entry].mapping)) { mapping 425 drivers/net/wireless/admtek/adm8211.c cpu_to_le32(priv->rx_buffers[entry].mapping); mapping 1452 drivers/net/wireless/admtek/adm8211.c rx_info->mapping = pci_map_single(priv->pdev, mapping 1456 drivers/net/wireless/admtek/adm8211.c if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) { mapping 1462 drivers/net/wireless/admtek/adm8211.c desc->buffer1 = cpu_to_le32(rx_info->mapping); mapping 1472 drivers/net/wireless/admtek/adm8211.c tx_info->mapping = 0; mapping 1495 drivers/net/wireless/admtek/adm8211.c priv->rx_buffers[i].mapping, mapping 1506 drivers/net/wireless/admtek/adm8211.c priv->tx_buffers[i].mapping, mapping 1631 drivers/net/wireless/admtek/adm8211.c dma_addr_t mapping; mapping 1635 drivers/net/wireless/admtek/adm8211.c mapping = pci_map_single(priv->pdev, skb->data, skb->len, mapping 1637 drivers/net/wireless/admtek/adm8211.c if (pci_dma_mapping_error(priv->pdev, mapping)) mapping 1653 drivers/net/wireless/admtek/adm8211.c priv->tx_buffers[entry].mapping = mapping; mapping 1655 drivers/net/wireless/admtek/adm8211.c priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping); mapping 441 drivers/net/wireless/admtek/adm8211.h dma_addr_t mapping; mapping 446 drivers/net/wireless/admtek/adm8211.h dma_addr_t mapping; mapping 596 drivers/net/wireless/intel/iwlegacy/3945-mac.c dma_unmap_addr_set(out_meta, mapping, txcmd_phys); mapping 655 drivers/net/wireless/intel/iwlegacy/3945.c pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), mapping 1837 drivers/net/wireless/intel/iwlegacy/4965-mac.c dma_unmap_addr_set(out_meta, mapping, txcmd_phys); mapping 3937 drivers/net/wireless/intel/iwlegacy/4965-mac.c pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping), mapping 2826 drivers/net/wireless/intel/iwlegacy/common.c dma_unmap_addr(&txq->meta[i], mapping), mapping 2838 drivers/net/wireless/intel/iwlegacy/common.c dma_unmap_addr(&txq->meta[i], mapping), mapping 3215 drivers/net/wireless/intel/iwlegacy/common.c dma_unmap_addr_set(out_meta, mapping, phys_addr); mapping 3309 drivers/net/wireless/intel/iwlegacy/common.c pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping), mapping 109 drivers/net/wireless/intel/iwlegacy/common.h DEFINE_DMA_UNMAP_ADDR(mapping); mapping 589 drivers/net/wireless/intersil/p54/fwio.c memset(edcf->mapping, 0, sizeof(edcf->mapping)); mapping 405 drivers/net/wireless/intersil/p54/lmac.h u8 mapping[4]; mapping 151 drivers/net/wireless/intersil/p54/p54pci.c dma_addr_t mapping; mapping 156 drivers/net/wireless/intersil/p54/p54pci.c mapping = pci_map_single(priv->pdev, mapping 161 drivers/net/wireless/intersil/p54/p54pci.c if (pci_dma_mapping_error(priv->pdev, mapping)) { mapping 168 drivers/net/wireless/intersil/p54/p54pci.c desc->host_addr = cpu_to_le32(mapping); mapping 330 drivers/net/wireless/intersil/p54/p54pci.c dma_addr_t mapping; mapping 337 drivers/net/wireless/intersil/p54/p54pci.c mapping = pci_map_single(priv->pdev, skb->data, skb->len, mapping 339 drivers/net/wireless/intersil/p54/p54pci.c if (pci_dma_mapping_error(priv->pdev, mapping)) { mapping 348 drivers/net/wireless/intersil/p54/p54pci.c desc->host_addr = cpu_to_le32(mapping); mapping 59 drivers/net/wireless/marvell/mwifiex/pcie.c struct mwifiex_dma_mapping mapping; mapping 61 drivers/net/wireless/marvell/mwifiex/pcie.c mapping.addr = pci_map_single(card->dev, skb->data, size, flags); mapping 62 drivers/net/wireless/marvell/mwifiex/pcie.c if (pci_dma_mapping_error(card->dev, mapping.addr)) { mapping 66 drivers/net/wireless/marvell/mwifiex/pcie.c mapping.len = size; mapping 67 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_store_mapping(skb, &mapping); mapping 75 drivers/net/wireless/marvell/mwifiex/pcie.c struct mwifiex_dma_mapping mapping; mapping 77 drivers/net/wireless/marvell/mwifiex/pcie.c mwifiex_get_mapping(skb, &mapping); mapping 78 drivers/net/wireless/marvell/mwifiex/pcie.c pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); mapping 69 drivers/net/wireless/marvell/mwifiex/util.h struct mwifiex_dma_mapping *mapping) mapping 73 drivers/net/wireless/marvell/mwifiex/util.h memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); mapping 77 drivers/net/wireless/marvell/mwifiex/util.h struct mwifiex_dma_mapping *mapping) mapping 81 drivers/net/wireless/marvell/mwifiex/util.h memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); mapping 86 drivers/net/wireless/marvell/mwifiex/util.h struct mwifiex_dma_mapping mapping; mapping 88 drivers/net/wireless/marvell/mwifiex/util.h mwifiex_get_mapping(skb, &mapping); mapping 90 drivers/net/wireless/marvell/mwifiex/util.h return mapping.addr; mapping 218 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c dma_addr_t mapping; mapping 263 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c mapping = pci_map_single(priv->pdev, mapping 267 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (pci_dma_mapping_error(priv->pdev, mapping)) { mapping 327 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c *((dma_addr_t *) skb->cb) = mapping; mapping 465 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c dma_addr_t mapping; mapping 476 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c mapping = pci_map_single(priv->pdev, skb->data, mapping 479 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (pci_dma_mapping_error(priv->pdev, mapping)) { mapping 558 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c entry->tx_buf = cpu_to_le32(mapping); mapping 1018 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c dma_addr_t *mapping; mapping 1027 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c mapping = (dma_addr_t *)skb->cb; mapping 1028 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb), mapping 1031 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c if (pci_dma_mapping_error(priv->pdev, *mapping)) { mapping 1039 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c entry->rx_buf = cpu_to_le32(*mapping); mapping 497 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c dma_addr_t mapping; mapping 518 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 520 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 645 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 677 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 684 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 707 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 404 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 410 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 534 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 571 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 578 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 601 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 527 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c dma_addr_t mapping; mapping 545 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 547 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 672 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); mapping 700 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 705 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 728 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); mapping 660 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c dma_addr_t mapping; mapping 681 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 683 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 691 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c skb, mapping); mapping 821 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 845 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 851 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 874 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 331 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 335 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 488 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); mapping 501 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 504 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 524 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); mapping 543 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, mapping); mapping 365 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 370 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 493 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 532 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 539 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 562 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 425 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c dma_addr_t mapping; mapping 445 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 447 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 567 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 598 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 602 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 624 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 680 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c dma_addr_t mapping; mapping 693 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len, mapping 695 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 805 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 837 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c dma_addr_t mapping = pci_map_single(rtlpci->pdev, mapping 841 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c if (pci_dma_mapping_error(rtlpci->pdev, mapping)) { mapping 867 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c set_tx_desc_tx_buffer_address(pdesc, mapping); mapping 1211 drivers/net/wireless/ti/wlcore/main.c int q, mapping; mapping 1221 drivers/net/wireless/ti/wlcore/main.c mapping = skb_get_queue_mapping(skb); mapping 1222 drivers/net/wireless/ti/wlcore/main.c q = wl1271_tx_get_queue(mapping); mapping 246 drivers/net/xen-netback/common.h u32 mapping[2][XEN_NETBK_MAX_HASH_MAPPING_SIZE]; mapping 327 drivers/net/xen-netback/hash.c memset(vif->hash.mapping[vif->hash.mapping_sel], 0, mapping 336 drivers/net/xen-netback/hash.c u32 *mapping = vif->hash.mapping[!vif->hash.mapping_sel]; mapping 342 drivers/net/xen-netback/hash.c .len = len * sizeof(*mapping), mapping 347 drivers/net/xen-netback/hash.c len > XEN_PAGE_SIZE / sizeof(*mapping)) mapping 350 drivers/net/xen-netback/hash.c copy_op[0].dest.u.gmfn = virt_to_gfn(mapping + off); mapping 351 drivers/net/xen-netback/hash.c copy_op[0].dest.offset = xen_offset_in_page(mapping + off); mapping 355 drivers/net/xen-netback/hash.c copy_op[1].dest.u.gmfn = virt_to_gfn(mapping + off + len); mapping 362 drivers/net/xen-netback/hash.c memcpy(mapping, vif->hash.mapping[vif->hash.mapping_sel], mapping 363 drivers/net/xen-netback/hash.c vif->hash.size * sizeof(*mapping)); mapping 374 drivers/net/xen-netback/hash.c if (mapping[off++] >= vif->num_queues) mapping 430 drivers/net/xen-netback/hash.c const u32 *mapping = vif->hash.mapping[vif->hash.mapping_sel]; mapping 444 drivers/net/xen-netback/hash.c seq_printf(m, "%4u ", mapping[i]); mapping 172 drivers/net/xen-netback/interface.c return vif->hash.mapping[vif->hash.mapping_sel] mapping 94 drivers/nvdimm/btt.c static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping, mapping 103 drivers/nvdimm/btt.c return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags); mapping 106 drivers/nvdimm/btt.c static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping, mapping 116 drivers/nvdimm/btt.c mapping = ent_lba(mapping); mapping 127 drivers/nvdimm/btt.c mapping |= MAP_ENT_NORMAL; mapping 130 drivers/nvdimm/btt.c mapping |= (1 << MAP_ERR_SHIFT); mapping 133 drivers/nvdimm/btt.c mapping |= (1 << MAP_TRIM_SHIFT); mapping 146 drivers/nvdimm/btt.c mapping_le = cpu_to_le32(mapping); mapping 150 drivers/nvdimm/btt.c static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping, mapping 181 drivers/nvdimm/btt.c *mapping = lba; mapping 184 drivers/nvdimm/btt.c *mapping = postmap; mapping 188 drivers/nvdimm/btt.c *mapping = postmap; mapping 192 drivers/nvdimm/btt.c *mapping = postmap; mapping 227 drivers/nvdimm/dimm_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 574 drivers/nvdimm/dimm_devs.c nd_mapping = &nd_region->mapping[i]; mapping 642 drivers/nvdimm/dimm_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 1250 drivers/nvdimm/label.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1282 drivers/nvdimm/label.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1295 drivers/nvdimm/label.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 297 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 315 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 510 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 578 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 814 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 879 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 932 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 1018 drivers/nvdimm/namespace_devs.c nd_mapping = &nd_region->mapping[i]; mapping 1150 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1224 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1240 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1423 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1444 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1798 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1856 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1957 drivers/nvdimm/namespace_devs.c struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm; mapping 1987 drivers/nvdimm/namespace_devs.c nd_mapping = &nd_region->mapping[i]; mapping 2181 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 2219 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 2306 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 2405 drivers/nvdimm/namespace_devs.c nd_mapping = &nd_region->mapping[i]; mapping 2450 drivers/nvdimm/namespace_devs.c nd_mapping = &nd_region->mapping[i]; mapping 2459 drivers/nvdimm/namespace_devs.c nd_mapping = &nd_region->mapping[reverse]; mapping 2472 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 2492 drivers/nvdimm/namespace_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 159 drivers/nvdimm/nd.h struct nd_mapping mapping[0]; mapping 71 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 98 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 130 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 231 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 258 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 338 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[0]; mapping 370 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 401 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 759 drivers/nvdimm/region_devs.c nd_mapping = &nd_region->mapping[n]; mapping 768 drivers/nvdimm/region_devs.c static ssize_t mapping##idx##_show(struct device *dev, \ mapping 773 drivers/nvdimm/region_devs.c static DEVICE_ATTR_RO(mapping##idx) mapping 944 drivers/nvdimm/region_devs.c struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; mapping 945 drivers/nvdimm/region_devs.c struct nvdimm *nvdimm = mapping->nvdimm; mapping 947 drivers/nvdimm/region_devs.c if ((mapping->start | mapping->size) % PAGE_SIZE) { mapping 980 drivers/nvdimm/region_devs.c nd_region = kzalloc(struct_size(nd_region, mapping, mapping 1005 drivers/nvdimm/region_devs.c struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; mapping 1006 drivers/nvdimm/region_devs.c struct nvdimm *nvdimm = mapping->nvdimm; mapping 1008 drivers/nvdimm/region_devs.c nd_region->mapping[i].nvdimm = nvdimm; mapping 1009 drivers/nvdimm/region_devs.c nd_region->mapping[i].start = mapping->start; mapping 1010 drivers/nvdimm/region_devs.c nd_region->mapping[i].size = mapping->size; mapping 1011 drivers/nvdimm/region_devs.c nd_region->mapping[i].position = mapping->position; mapping 1012 drivers/nvdimm/region_devs.c INIT_LIST_HEAD(&nd_region->mapping[i].labels); mapping 1013 drivers/nvdimm/region_devs.c mutex_init(&nd_region->mapping[i].lock); mapping 1146 drivers/nvdimm/region_devs.c struct nd_mapping *nd_mapping = &nd_region->mapping[i]; mapping 1284 drivers/perf/arm-cci.c int mapping; mapping 1286 drivers/perf/arm-cci.c mapping = pmu_map_event(event); mapping 1288 drivers/perf/arm-cci.c if (mapping < 0) { mapping 1291 drivers/perf/arm-cci.c return mapping; mapping 1307 drivers/perf/arm-cci.c hwc->config_base |= (unsigned long)mapping; mapping 75 drivers/perf/arm_pmu.c int mapping; mapping 83 drivers/perf/arm_pmu.c mapping = (*event_map)[config]; mapping 84 drivers/perf/arm_pmu.c return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; mapping 365 drivers/perf/arm_pmu.c int mapping; mapping 368 drivers/perf/arm_pmu.c mapping = armpmu->map_event(event); mapping 370 drivers/perf/arm_pmu.c if (mapping < 0) { mapping 373 drivers/perf/arm_pmu.c return mapping; mapping 400 drivers/perf/arm_pmu.c hwc->config_base |= (unsigned long)mapping; mapping 1088 drivers/rapidio/devices/rio_mport_cdev.c u64 size, struct rio_mport_mapping **mapping) mapping 1111 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; mapping 1121 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *mapping = NULL; mapping 1127 drivers/rapidio/devices/rio_mport_cdev.c ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); mapping 1131 drivers/rapidio/devices/rio_mport_cdev.c map.dma_handle = mapping->phys_addr; mapping 1135 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&mapping->ref, mport_release_mapping); mapping 1202 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping **mapping) mapping 1238 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; mapping 1252 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping **mapping) mapping 1266 drivers/rapidio/devices/rio_mport_cdev.c *mapping = map; mapping 1281 drivers/rapidio/devices/rio_mport_cdev.c return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); mapping 1289 drivers/rapidio/devices/rio_mport_cdev.c struct rio_mport_mapping *mapping = NULL; mapping 1300 drivers/rapidio/devices/rio_mport_cdev.c map.length, &mapping); mapping 1304 drivers/rapidio/devices/rio_mport_cdev.c map.handle = mapping->phys_addr; mapping 1305 drivers/rapidio/devices/rio_mport_cdev.c map.rio_addr = mapping->rio_addr; mapping 1309 drivers/rapidio/devices/rio_mport_cdev.c if (ret == 0 && mapping->filp == filp) { mapping 1311 drivers/rapidio/devices/rio_mport_cdev.c kref_put(&mapping->ref, mport_release_mapping); mapping 1495 drivers/regulator/qcom_spmi-regulator.c const struct spmi_regulator_mapping *mapping; mapping 1521 drivers/regulator/qcom_spmi-regulator.c mapping = &supported_regulators[i]; mapping 1522 drivers/regulator/qcom_spmi-regulator.c if (mapping->type == type && mapping->subtype == subtype mapping 1523 drivers/regulator/qcom_spmi-regulator.c && mapping->revision_min <= dig_major_rev mapping 1524 drivers/regulator/qcom_spmi-regulator.c && mapping->revision_max >= dig_major_rev) mapping 1535 drivers/regulator/qcom_spmi-regulator.c vreg->logical_type = mapping->logical_type; mapping 1536 drivers/regulator/qcom_spmi-regulator.c vreg->set_points = mapping->set_points; mapping 1537 drivers/regulator/qcom_spmi-regulator.c vreg->hpm_min_load = mapping->hpm_min_load; mapping 1538 drivers/regulator/qcom_spmi-regulator.c vreg->desc.ops = mapping->ops; mapping 1540 drivers/regulator/qcom_spmi-regulator.c if (mapping->set_points) { mapping 1541 drivers/regulator/qcom_spmi-regulator.c if (!mapping->set_points->n_voltages) mapping 1542 drivers/regulator/qcom_spmi-regulator.c spmi_calculate_num_voltages(mapping->set_points); mapping 1543 drivers/regulator/qcom_spmi-regulator.c vreg->desc.n_voltages = mapping->set_points->n_voltages; mapping 680 drivers/remoteproc/remoteproc_core.c struct rproc_mem_entry *mapping; mapping 699 drivers/remoteproc/remoteproc_core.c mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); mapping 700 drivers/remoteproc/remoteproc_core.c if (!mapping) mapping 716 drivers/remoteproc/remoteproc_core.c mapping->da = rsc->da; mapping 717 drivers/remoteproc/remoteproc_core.c mapping->len = rsc->len; mapping 718 drivers/remoteproc/remoteproc_core.c list_add_tail(&mapping->node, &rproc->mappings); mapping 726 drivers/remoteproc/remoteproc_core.c kfree(mapping); mapping 741 drivers/remoteproc/remoteproc_core.c struct rproc_mem_entry *mapping = NULL; mapping 787 drivers/remoteproc/remoteproc_core.c mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); mapping 788 drivers/remoteproc/remoteproc_core.c if (!mapping) { mapping 807 drivers/remoteproc/remoteproc_core.c mapping->da = mem->da; mapping 808 drivers/remoteproc/remoteproc_core.c mapping->len = mem->len; mapping 809 drivers/remoteproc/remoteproc_core.c list_add_tail(&mapping->node, &rproc->mappings); mapping 829 drivers/remoteproc/remoteproc_core.c kfree(mapping); mapping 53 drivers/scsi/cxlflash/ocxl_hw.c if (ctx->mapping) mapping 55 drivers/scsi/cxlflash/ocxl_hw.c ctx->mapping = NULL; mapping 511 drivers/scsi/cxlflash/ocxl_hw.c ctx->mapping = NULL; mapping 1206 drivers/scsi/cxlflash/ocxl_hw.c if (ctx->mapping) { mapping 1243 drivers/scsi/cxlflash/ocxl_hw.c ctx->mapping = file->f_mapping; mapping 53 drivers/scsi/cxlflash/ocxl_hw.h struct address_space *mapping; /* Mapping for pseudo filesystem */ mapping 340 drivers/sh/clk/core.c struct clk_mapping *mapping = clk->mapping; mapping 345 drivers/sh/clk/core.c if (!mapping) { mapping 352 drivers/sh/clk/core.c clk->mapping = &dummy_mapping; mapping 361 drivers/sh/clk/core.c mapping = clkp->mapping; mapping 362 drivers/sh/clk/core.c BUG_ON(!mapping); mapping 368 drivers/sh/clk/core.c if (!mapping->base && mapping->phys) { mapping 369 drivers/sh/clk/core.c kref_init(&mapping->ref); mapping 371 drivers/sh/clk/core.c mapping->base = ioremap_nocache(mapping->phys, mapping->len); mapping 372 drivers/sh/clk/core.c if (unlikely(!mapping->base)) mapping 374 drivers/sh/clk/core.c } else if (mapping->base) { mapping 378 drivers/sh/clk/core.c kref_get(&mapping->ref); mapping 381 drivers/sh/clk/core.c clk->mapping = mapping; mapping 383 drivers/sh/clk/core.c clk->mapped_reg = clk->mapping->base; mapping 384 drivers/sh/clk/core.c clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys; mapping 390 drivers/sh/clk/core.c struct clk_mapping *mapping; mapping 392 drivers/sh/clk/core.c mapping = container_of(kref, struct clk_mapping, ref); mapping 394 drivers/sh/clk/core.c iounmap(mapping->base); mapping 399 drivers/sh/clk/core.c struct clk_mapping *mapping = clk->mapping; mapping 402 drivers/sh/clk/core.c if (mapping == &dummy_mapping) mapping 405 drivers/sh/clk/core.c kref_put(&mapping->ref, clk_destroy_mapping); mapping 406 drivers/sh/clk/core.c clk->mapping = NULL; mapping 414 drivers/sh/clk/cpg.c value = __raw_readl(clk->mapping->base); mapping 430 drivers/sh/clk/cpg.c __raw_writel(0, clk->mapping->base); mapping 437 drivers/sh/clk/cpg.c value = __raw_readl(clk->mapping->base) >> 16; mapping 441 drivers/sh/clk/cpg.c __raw_writel((value << 16) | 0x3, clk->mapping->base); mapping 452 drivers/sh/clk/cpg.c __raw_writel(0, clk->mapping->base); mapping 454 drivers/sh/clk/cpg.c __raw_writel(idx << 16, clk->mapping->base); mapping 486 drivers/sh/clk/cpg.c clks[i].mapping = map; mapping 2815 drivers/staging/exfat/exfat_super.c struct address_space *mapping = inode->i_mapping; mapping 2827 drivers/staging/exfat/exfat_super.c err = filemap_fdatawrite_range(mapping, start, mapping 2829 drivers/staging/exfat/exfat_super.c err2 = sync_mapping_buffers(mapping); mapping 2834 drivers/staging/exfat/exfat_super.c err = filemap_fdatawait_range(mapping, start, mapping 3168 drivers/staging/exfat/exfat_super.c static int exfat_readpages(struct file *file, struct address_space *mapping, mapping 3171 drivers/staging/exfat/exfat_super.c return mpage_readpages(mapping, pages, nr_pages, exfat_get_block); mapping 3179 drivers/staging/exfat/exfat_super.c static int exfat_writepages(struct address_space *mapping, mapping 3182 drivers/staging/exfat/exfat_super.c return mpage_writepages(mapping, wbc, exfat_get_block); mapping 3185 drivers/staging/exfat/exfat_super.c static void exfat_write_failed(struct address_space *mapping, loff_t to) mapping 3187 drivers/staging/exfat/exfat_super.c struct inode *inode = mapping->host; mapping 3196 drivers/staging/exfat/exfat_super.c static int exfat_write_begin(struct file *file, struct address_space *mapping, mapping 3203 drivers/staging/exfat/exfat_super.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 3205 drivers/staging/exfat/exfat_super.c &EXFAT_I(mapping->host)->mmu_private); mapping 3208 drivers/staging/exfat/exfat_super.c exfat_write_failed(mapping, pos + len); mapping 3212 drivers/staging/exfat/exfat_super.c static int exfat_write_end(struct file *file, struct address_space *mapping, mapping 3216 drivers/staging/exfat/exfat_super.c struct inode *inode = mapping->host; mapping 3220 drivers/staging/exfat/exfat_super.c err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); mapping 3223 drivers/staging/exfat/exfat_super.c exfat_write_failed(mapping, pos + len); mapping 3236 drivers/staging/exfat/exfat_super.c struct address_space *mapping = iocb->ki_filp->f_mapping; mapping 3249 drivers/staging/exfat/exfat_super.c exfat_write_failed(mapping, iov_iter_count(iter)); mapping 3253 drivers/staging/exfat/exfat_super.c static sector_t _exfat_bmap(struct address_space *mapping, sector_t block) mapping 3258 drivers/staging/exfat/exfat_super.c down_read(&EXFAT_I(mapping->host)->truncate_lock); mapping 3259 drivers/staging/exfat/exfat_super.c blocknr = generic_block_bmap(mapping, block, exfat_get_block); mapping 3260 drivers/staging/exfat/exfat_super.c up_read(&EXFAT_I(mapping->host)->truncate_lock); mapping 77 drivers/staging/gasket/gasket_sysfs.c static void put_mapping(struct gasket_sysfs_mapping *mapping) mapping 84 drivers/staging/gasket/gasket_sysfs.c if (!mapping) { mapping 89 drivers/staging/gasket/gasket_sysfs.c mutex_lock(&mapping->mutex); mapping 90 drivers/staging/gasket/gasket_sysfs.c if (kref_put(&mapping->refcount, release_entry)) { mapping 91 drivers/staging/gasket/gasket_sysfs.c dev_dbg(mapping->device, "Removing Gasket sysfs mapping\n"); mapping 99 drivers/staging/gasket/gasket_sysfs.c device = mapping->device; mapping 100 drivers/staging/gasket/gasket_sysfs.c num_files_to_remove = mapping->attribute_count; mapping 107 drivers/staging/gasket/gasket_sysfs.c mapping->attributes[i].attr; mapping 111 drivers/staging/gasket/gasket_sysfs.c kfree(mapping->attributes); mapping 112 drivers/staging/gasket/gasket_sysfs.c mapping->attributes = NULL; mapping 113 drivers/staging/gasket/gasket_sysfs.c mapping->attribute_count = 0; mapping 114 drivers/staging/gasket/gasket_sysfs.c put_device(mapping->device); mapping 115 drivers/staging/gasket/gasket_sysfs.c mapping->device = NULL; mapping 116 drivers/staging/gasket/gasket_sysfs.c mapping->gasket_dev = NULL; mapping 118 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 134 drivers/staging/gasket/gasket_sysfs.c static void put_mapping_n(struct gasket_sysfs_mapping *mapping, int times) mapping 139 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 155 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping; mapping 168 drivers/staging/gasket/gasket_sysfs.c mapping = get_mapping(device); mapping 169 drivers/staging/gasket/gasket_sysfs.c if (mapping) { mapping 172 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 195 drivers/staging/gasket/gasket_sysfs.c mapping = &dev_mappings[map_idx]; mapping 196 drivers/staging/gasket/gasket_sysfs.c mapping->attributes = kcalloc(GASKET_SYSFS_MAX_NODES, mapping 197 drivers/staging/gasket/gasket_sysfs.c sizeof(*mapping->attributes), mapping 199 drivers/staging/gasket/gasket_sysfs.c if (!mapping->attributes) { mapping 201 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 206 drivers/staging/gasket/gasket_sysfs.c kref_init(&mapping->refcount); mapping 207 drivers/staging/gasket/gasket_sysfs.c mapping->device = get_device(device); mapping 208 drivers/staging/gasket/gasket_sysfs.c mapping->gasket_dev = gasket_dev; mapping 209 drivers/staging/gasket/gasket_sysfs.c mapping->attribute_count = 0; mapping 210 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 222 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 224 drivers/staging/gasket/gasket_sysfs.c if (!mapping) { mapping 230 drivers/staging/gasket/gasket_sysfs.c mutex_lock(&mapping->mutex); mapping 232 drivers/staging/gasket/gasket_sysfs.c if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) { mapping 235 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 236 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 243 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 244 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 248 drivers/staging/gasket/gasket_sysfs.c mapping->attributes[mapping->attribute_count] = attrs[i]; mapping 249 drivers/staging/gasket/gasket_sysfs.c ++mapping->attribute_count; mapping 252 drivers/staging/gasket/gasket_sysfs.c mutex_unlock(&mapping->mutex); mapping 253 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 260 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 262 drivers/staging/gasket/gasket_sysfs.c if (!mapping) { mapping 268 drivers/staging/gasket/gasket_sysfs.c put_mapping_n(mapping, 2); mapping 273 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 275 drivers/staging/gasket/gasket_sysfs.c if (!mapping) { mapping 280 drivers/staging/gasket/gasket_sysfs.c return mapping->gasket_dev; mapping 286 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 288 drivers/staging/gasket/gasket_sysfs.c if (!mapping) mapping 292 drivers/staging/gasket/gasket_sysfs.c put_mapping_n(mapping, 2); mapping 301 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 304 drivers/staging/gasket/gasket_sysfs.c if (!mapping) mapping 307 drivers/staging/gasket/gasket_sysfs.c attrs = mapping->attributes; mapping 308 drivers/staging/gasket/gasket_sysfs.c num_attrs = mapping->attribute_count; mapping 325 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping = get_mapping(device); mapping 328 drivers/staging/gasket/gasket_sysfs.c if (!mapping) mapping 331 drivers/staging/gasket/gasket_sysfs.c attrs = mapping->attributes; mapping 332 drivers/staging/gasket/gasket_sysfs.c num_attrs = mapping->attribute_count; mapping 335 drivers/staging/gasket/gasket_sysfs.c put_mapping_n(mapping, 2); mapping 350 drivers/staging/gasket/gasket_sysfs.c struct gasket_sysfs_mapping *mapping; mapping 366 drivers/staging/gasket/gasket_sysfs.c mapping = get_mapping(device); mapping 367 drivers/staging/gasket/gasket_sysfs.c if (!mapping) { mapping 372 drivers/staging/gasket/gasket_sysfs.c gasket_dev = mapping->gasket_dev; mapping 380 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 393 drivers/staging/gasket/gasket_sysfs.c put_mapping(mapping); mapping 74 drivers/staging/media/tegra-vde/iommu.c if (dev->archdata.mapping) { mapping 75 drivers/staging/media/tegra-vde/iommu.c struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); mapping 78 drivers/staging/media/tegra-vde/iommu.c arm_iommu_release_mapping(mapping); mapping 1172 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c dma_addr_t mapping; mapping 1218 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c mapping = pci_map_single(priv->pdev, skb->data, skb->len, mapping 1220 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c if (pci_dma_mapping_error(priv->pdev, mapping)) { mapping 1278 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c pdesc->TxBuffAddr = mapping; mapping 1285 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len, mapping 1288 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c if (pci_dma_mapping_error(priv->pdev, mapping)) mapping 1308 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c entry->TxBuffAddr = mapping; mapping 1799 drivers/staging/rtl8192e/rtl8192e/rtl_core.c dma_addr_t *mapping; mapping 1806 drivers/staging/rtl8192e/rtl8192e/rtl_core.c mapping = (dma_addr_t *)skb->cb; mapping 1807 drivers/staging/rtl8192e/rtl8192e/rtl_core.c *mapping = pci_map_single(priv->pdev, mapping 1811 drivers/staging/rtl8192e/rtl8192e/rtl_core.c if (pci_dma_mapping_error(priv->pdev, *mapping)) { mapping 1815 drivers/staging/rtl8192e/rtl8192e/rtl_core.c entry->BufferAddress = *mapping; mapping 57 drivers/video/fbdev/core/fb_defio.c page->mapping = vmf->vma->vm_file->f_mapping; mapping 61 drivers/video/fbdev/core/fb_defio.c BUG_ON(!page->mapping); mapping 237 drivers/video/fbdev/core/fb_defio.c page->mapping = NULL; mapping 1317 drivers/visorbus/visorchipset.c void *mapping; mapping 1335 drivers/visorbus/visorchipset.c mapping = memremap(addr, bytes, MEMREMAP_WB); mapping 1336 drivers/visorbus/visorchipset.c if (!mapping) mapping 1338 drivers/visorbus/visorchipset.c memcpy(&ctx->data, mapping, bytes); mapping 1339 drivers/visorbus/visorchipset.c memunmap(mapping); mapping 586 drivers/vlynq/vlynq.c struct vlynq_mapping *mapping) mapping 595 drivers/vlynq/vlynq.c writel(mapping[i].offset, &dev->local->rx_mapping[i].offset); mapping 596 drivers/vlynq/vlynq.c writel(mapping[i].size, &dev->local->rx_mapping[i].size); mapping 603 drivers/vlynq/vlynq.c struct vlynq_mapping *mapping) mapping 612 drivers/vlynq/vlynq.c writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset); mapping 613 drivers/vlynq/vlynq.c writel(mapping[i].size, &dev->remote->rx_mapping[i].size); mapping 205 fs/9p/cache.c struct inode *inode = page->mapping->host; mapping 215 fs/9p/cache.c struct inode *inode = page->mapping->host; mapping 279 fs/9p/cache.c struct address_space *mapping, mapping 291 fs/9p/cache.c mapping, pages, nr_pages, mapping 294 fs/9p/cache.c mapping_gfp_mask(mapping)); mapping 35 fs/9p/cache.h struct address_space *mapping, mapping 60 fs/9p/cache.h struct address_space *mapping, mapping 64 fs/9p/cache.h return __v9fs_readpages_from_fscache(inode, mapping, pages, mapping 116 fs/9p/cache.h struct address_space *mapping, mapping 41 fs/9p/vfs_addr.c struct inode *inode = page->mapping->host; mapping 98 fs/9p/vfs_addr.c static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, mapping 104 fs/9p/vfs_addr.c inode = mapping->host; mapping 107 fs/9p/vfs_addr.c ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); mapping 111 fs/9p/vfs_addr.c ret = read_cache_pages(mapping, pages, v9fs_fid_readpage, mapping 150 fs/9p/vfs_addr.c struct inode *inode = page->mapping->host; mapping 191 fs/9p/vfs_addr.c mapping_set_error(page->mapping, retval); mapping 208 fs/9p/vfs_addr.c struct inode *inode = page->mapping->host; mapping 255 fs/9p/vfs_addr.c static int v9fs_write_begin(struct file *filp, struct address_space *mapping, mapping 263 fs/9p/vfs_addr.c struct inode *inode = mapping->host; mapping 266 fs/9p/vfs_addr.c p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); mapping 270 fs/9p/vfs_addr.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 291 fs/9p/vfs_addr.c static int v9fs_write_end(struct file *filp, struct address_space *mapping, mapping 296 fs/9p/vfs_addr.c struct inode *inode = page->mapping->host; mapping 298 fs/9p/vfs_addr.c p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); mapping 562 fs/9p/vfs_file.c if (page->mapping != inode->i_mapping) mapping 45 fs/adfs/inode.c static void adfs_write_failed(struct address_space *mapping, loff_t to) mapping 47 fs/adfs/inode.c struct inode *inode = mapping->host; mapping 53 fs/adfs/inode.c static int adfs_write_begin(struct file *file, struct address_space *mapping, mapping 60 fs/adfs/inode.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 62 fs/adfs/inode.c &ADFS_I(mapping->host)->mmu_private); mapping 64 fs/adfs/inode.c adfs_write_failed(mapping, pos + len); mapping 69 fs/adfs/inode.c static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) mapping 71 fs/adfs/inode.c return generic_block_bmap(mapping, block, adfs_get_block); mapping 382 fs/affs/file.c static void affs_write_failed(struct address_space *mapping, loff_t to) mapping 384 fs/affs/file.c struct inode *inode = mapping->host; mapping 396 fs/affs/file.c struct address_space *mapping = file->f_mapping; mapping 397 fs/affs/file.c struct inode *inode = mapping->host; mapping 411 fs/affs/file.c affs_write_failed(mapping, offset + count); mapping 415 fs/affs/file.c static int affs_write_begin(struct file *file, struct address_space *mapping, mapping 422 fs/affs/file.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 424 fs/affs/file.c &AFFS_I(mapping->host)->mmu_private); mapping 426 fs/affs/file.c affs_write_failed(mapping, pos + len); mapping 431 fs/affs/file.c static sector_t _affs_bmap(struct address_space *mapping, sector_t block) mapping 433 fs/affs/file.c return generic_block_bmap(mapping,block,affs_get_block); mapping 505 fs/affs/file.c struct inode *inode = page->mapping->host; mapping 613 fs/affs/file.c struct inode *inode = page->mapping->host; mapping 631 fs/affs/file.c static int affs_write_begin_ofs(struct file *file, struct address_space *mapping, mapping 635 fs/affs/file.c struct inode *inode = mapping->host; mapping 652 fs/affs/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 669 fs/affs/file.c static int affs_write_end_ofs(struct file *file, struct address_space *mapping, mapping 673 fs/affs/file.c struct inode *inode = mapping->host; mapping 855 fs/affs/file.c struct address_space *mapping = inode->i_mapping; mapping 861 fs/affs/file.c res = mapping->a_ops->write_begin(NULL, mapping, isize, 0, 0, &page, &fsdata); mapping 863 fs/affs/file.c res = mapping->a_ops->write_end(NULL, mapping, isize, 0, 0, page, fsdata); mapping 17 fs/affs/symlink.c struct inode *inode = page->mapping->host; mapping 2019 fs/afs/dir.c struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host); mapping 2040 fs/afs/dir.c struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host); mapping 25 fs/afs/file.c static int afs_readpages(struct file *filp, struct address_space *mapping, mapping 274 fs/afs/file.c struct inode *inode = page->mapping->host; mapping 399 fs/afs/file.c struct inode *inode = page->mapping->host; mapping 440 fs/afs/file.c static int afs_readpages_one(struct file *file, struct address_space *mapping, mapping 443 fs/afs/file.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 489 fs/afs/file.c if (add_to_page_cache_lru(page, mapping, index, mapping 490 fs/afs/file.c readahead_gfp_mask(mapping))) { mapping 541 fs/afs/file.c static int afs_readpages(struct file *file, struct address_space *mapping, mapping 549 fs/afs/file.c key_serial(key), mapping->host->i_ino, nr_pages); mapping 553 fs/afs/file.c vnode = AFS_FS_I(mapping->host); mapping 562 fs/afs/file.c mapping, mapping 567 fs/afs/file.c mapping_gfp_mask(mapping)); mapping 592 fs/afs/file.c ret = afs_readpages_one(file, mapping, pages); mapping 609 fs/afs/file.c struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); mapping 620 fs/afs/file.c struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); mapping 644 fs/afs/file.c struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); mapping 1138 fs/afs/fsclient.c struct address_space *mapping, mapping 1159 fs/afs/fsclient.c call->mapping = mapping; mapping 1197 fs/afs/fsclient.c int afs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, mapping 1209 fs/afs/fsclient.c return yfs_fs_store_data(fc, mapping, first, last, offset, to, scb); mapping 1229 fs/afs/fsclient.c return afs_fs_store_data64(fc, mapping, first, last, offset, to, mapping 1239 fs/afs/fsclient.c call->mapping = mapping; mapping 117 fs/afs/internal.h struct address_space *mapping; /* Pages being written from */ mapping 1338 fs/afs/internal.h extern int afs_write_begin(struct file *file, struct address_space *mapping, mapping 1341 fs/afs/internal.h extern int afs_write_end(struct file *file, struct address_space *mapping, mapping 290 fs/afs/rxrpc.c n = find_get_pages_contig(call->mapping, first, nr, pages); mapping 77 fs/afs/write.c int afs_write_begin(struct file *file, struct address_space *mapping, mapping 98 fs/afs/write.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 181 fs/afs/write.c int afs_write_end(struct file *file, struct address_space *mapping, mapping 232 fs/afs/write.c static void afs_kill_pages(struct address_space *mapping, mapping 235 fs/afs/write.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 250 fs/afs/write.c pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); mapping 261 fs/afs/write.c generic_error_remove_page(mapping, page); mapping 275 fs/afs/write.c struct address_space *mapping, mapping 278 fs/afs/write.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 293 fs/afs/write.c pv.nr = find_get_pages_contig(mapping, first, count, pv.pages); mapping 354 fs/afs/write.c static int afs_store_data(struct address_space *mapping, mapping 358 fs/afs/write.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 410 fs/afs/write.c afs_fs_store_data(&fc, mapping, first, last, offset, to, scb); mapping 451 fs/afs/write.c static int afs_write_back_from_locked_page(struct address_space *mapping, mapping 456 fs/afs/write.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 496 fs/afs/write.c n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages); mapping 563 fs/afs/write.c ret = afs_store_data(mapping, first, last, offset, to); mapping 578 fs/afs/write.c afs_redirty_pages(wbc, mapping, first, last); mapping 579 fs/afs/write.c mapping_set_error(mapping, ret); mapping 584 fs/afs/write.c afs_redirty_pages(wbc, mapping, first, last); mapping 585 fs/afs/write.c mapping_set_error(mapping, -ENOSPC); mapping 596 fs/afs/write.c afs_kill_pages(mapping, first, last); mapping 597 fs/afs/write.c mapping_set_error(mapping, ret); mapping 615 fs/afs/write.c ret = afs_write_back_from_locked_page(page->mapping, wbc, page, mapping 631 fs/afs/write.c static int afs_writepages_region(struct address_space *mapping, mapping 641 fs/afs/write.c n = find_get_pages_range_tag(mapping, &index, end, mapping 661 fs/afs/write.c if (page->mapping != mapping || !PageDirty(page)) { mapping 677 fs/afs/write.c ret = afs_write_back_from_locked_page(mapping, wbc, page, end); mapping 697 fs/afs/write.c int afs_writepages(struct address_space *mapping, mapping 706 fs/afs/write.c start = mapping->writeback_index; mapping 708 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, start, end, &next); mapping 710 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, 0, start, mapping 712 fs/afs/write.c mapping->writeback_index = next; mapping 715 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, 0, end, &next); mapping 717 fs/afs/write.c mapping->writeback_index = next; mapping 721 fs/afs/write.c ret = afs_writepages_region(mapping, wbc, start, end, &next); mapping 852 fs/afs/write.c struct address_space *mapping = page->mapping; mapping 853 fs/afs/write.c struct afs_vnode *vnode = AFS_FS_I(mapping->host); mapping 871 fs/afs/write.c ret = afs_store_data(mapping, page->index, page->index, t, f); mapping 1247 fs/afs/yfsclient.c int yfs_fs_store_data(struct afs_fs_cursor *fc, struct address_space *mapping, mapping 1287 fs/afs/yfsclient.c call->mapping = mapping; mapping 377 fs/aio.c static int aio_migratepage(struct address_space *mapping, struct page *new, mapping 396 fs/aio.c spin_lock(&mapping->private_lock); mapping 397 fs/aio.c ctx = mapping->private_data; mapping 427 fs/aio.c rc = migrate_page_move_mapping(mapping, new, old, 1); mapping 449 fs/aio.c spin_unlock(&mapping->private_lock); mapping 43 fs/befs/linuxvfs.c static sector_t befs_bmap(struct address_space *mapping, sector_t block); mapping 117 fs/befs/linuxvfs.c befs_bmap(struct address_space *mapping, sector_t block) mapping 119 fs/befs/linuxvfs.c return generic_block_bmap(mapping, block, befs_get_block); mapping 472 fs/befs/linuxvfs.c struct inode *inode = page->mapping->host; mapping 163 fs/bfs/file.c static void bfs_write_failed(struct address_space *mapping, loff_t to) mapping 165 fs/bfs/file.c struct inode *inode = mapping->host; mapping 171 fs/bfs/file.c static int bfs_write_begin(struct file *file, struct address_space *mapping, mapping 177 fs/bfs/file.c ret = block_write_begin(mapping, pos, len, flags, pagep, mapping 180 fs/bfs/file.c bfs_write_failed(mapping, pos + len); mapping 185 fs/bfs/file.c static sector_t bfs_bmap(struct address_space *mapping, sector_t block) mapping 187 fs/bfs/file.c return generic_block_bmap(mapping, block, bfs_get_block); mapping 81 fs/block_dev.c struct address_space *mapping = bdev->bd_inode->i_mapping; mapping 83 fs/block_dev.c if (mapping->nrpages == 0 && mapping->nrexceptional == 0) mapping 87 fs/block_dev.c truncate_inode_pages(mapping, 0); mapping 94 fs/block_dev.c struct address_space *mapping = bdev->bd_inode->i_mapping; mapping 96 fs/block_dev.c if (mapping->nrpages) { mapping 99 fs/block_dev.c invalidate_mapping_pages(mapping, 0, -1); mapping 104 fs/block_dev.c cleancache_invalidate_inode(mapping); mapping 618 fs/block_dev.c static int blkdev_readpages(struct file *file, struct address_space *mapping, mapping 621 fs/block_dev.c return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); mapping 624 fs/block_dev.c static int blkdev_write_begin(struct file *file, struct address_space *mapping, mapping 628 fs/block_dev.c return block_write_begin(mapping, pos, len, flags, pagep, mapping 632 fs/block_dev.c static int blkdev_write_end(struct file *file, struct address_space *mapping, mapping 637 fs/block_dev.c ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); mapping 2025 fs/block_dev.c struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; mapping 2033 fs/block_dev.c static int blkdev_writepages(struct address_space *mapping, mapping 2036 fs/block_dev.c return generic_writepages(mapping, wbc); mapping 2060 fs/block_dev.c struct address_space *mapping; mapping 2088 fs/block_dev.c mapping = bdev->bd_inode->i_mapping; mapping 2089 fs/block_dev.c truncate_inode_pages_range(mapping, start, end); mapping 2116 fs/block_dev.c return invalidate_inode_pages2_range(mapping, mapping 2220 fs/block_dev.c struct address_space *mapping = inode->i_mapping; mapping 2225 fs/block_dev.c mapping->nrpages == 0) { mapping 178 fs/btrfs/compression.c page->mapping = NULL; mapping 273 fs/btrfs/compression.c cb->compressed_pages[0]->mapping = cb->inode->i_mapping; mapping 277 fs/btrfs/compression.c cb->compressed_pages[0]->mapping = NULL; mapping 289 fs/btrfs/compression.c page->mapping = NULL; mapping 357 fs/btrfs/compression.c page->mapping = inode->i_mapping; mapping 362 fs/btrfs/compression.c page->mapping = NULL; mapping 440 fs/btrfs/compression.c struct address_space *mapping = inode->i_mapping; mapping 461 fs/btrfs/compression.c page = xa_load(&mapping->i_pages, pg_index); mapping 469 fs/btrfs/compression.c page = __page_cache_alloc(mapping_gfp_constraint(mapping, mapping 474 fs/btrfs/compression.c if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { mapping 637 fs/btrfs/compression.c page->mapping = inode->i_mapping; mapping 644 fs/btrfs/compression.c page->mapping = NULL; mapping 1032 fs/btrfs/compression.c int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, mapping 1045 fs/btrfs/compression.c ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, mapping 80 fs/btrfs/compression.h int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, mapping 143 fs/btrfs/compression.h struct address_space *mapping, mapping 2355 fs/btrfs/ctree.h static inline gfp_t btrfs_alloc_write_mask(struct address_space *mapping) mapping 2357 fs/btrfs/ctree.h return mapping_gfp_constraint(mapping, ~__GFP_FS); mapping 596 fs/btrfs/disk-io.c struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; mapping 849 fs/btrfs/disk-io.c root = BTRFS_I(bvec->bv_page->mapping->host)->root; mapping 921 fs/btrfs/disk-io.c static int btree_migratepage(struct address_space *mapping, mapping 938 fs/btrfs/disk-io.c return migrate_page(mapping, newpage, page, mode); mapping 943 fs/btrfs/disk-io.c static int btree_writepages(struct address_space *mapping, mapping 954 fs/btrfs/disk-io.c fs_info = BTRFS_I(mapping->host)->root->fs_info; mapping 962 fs/btrfs/disk-io.c return btree_write_cache_pages(mapping, wbc); mapping 968 fs/btrfs/disk-io.c tree = &BTRFS_I(page->mapping->host)->io_tree; mapping 984 fs/btrfs/disk-io.c tree = &BTRFS_I(page->mapping->host)->io_tree; mapping 988 fs/btrfs/disk-io.c btrfs_warn(BTRFS_I(page->mapping->host)->root->fs_info, mapping 4111 fs/btrfs/disk-io.c struct inode *btree_inode = buf->pages[0]->mapping->host; mapping 4140 fs/btrfs/disk-io.c root = BTRFS_I(buf->pages[0]->mapping->host)->root; mapping 1736 fs/btrfs/extent_io.c static int __process_pages_contig(struct address_space *mapping, mapping 1867 fs/btrfs/extent_io.c static int __process_pages_contig(struct address_space *mapping, mapping 1886 fs/btrfs/extent_io.c mapping_set_error(mapping, -EIO); mapping 1889 fs/btrfs/extent_io.c ret = find_get_pages_contig(mapping, index, mapping 1924 fs/btrfs/extent_io.c pages[i]->mapping != mapping) { mapping 2572 fs/btrfs/extent_io.c struct inode *inode = page->mapping->host; mapping 2631 fs/btrfs/extent_io.c mapping_set_error(page->mapping, ret); mapping 2655 fs/btrfs/extent_io.c struct inode *inode = page->mapping->host; mapping 2726 fs/btrfs/extent_io.c struct inode *inode = page->mapping->host; mapping 2988 fs/btrfs/extent_io.c bio->bi_write_hint = page->mapping->host->i_write_hint; mapping 3063 fs/btrfs/extent_io.c struct inode *inode = page->mapping->host; mapping 3272 fs/btrfs/extent_io.c struct btrfs_inode *inode = BTRFS_I(pages[0]->mapping->host); mapping 3291 fs/btrfs/extent_io.c struct btrfs_inode *inode = BTRFS_I(page->mapping->host); mapping 3557 fs/btrfs/extent_io.c struct inode *inode = page->mapping->host; mapping 3579 fs/btrfs/extent_io.c page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); mapping 3920 fs/btrfs/extent_io.c int btree_write_cache_pages(struct address_space *mapping, mapping 3923 fs/btrfs/extent_io.c struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree; mapping 3931 fs/btrfs/extent_io.c struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; mapping 3944 fs/btrfs/extent_io.c index = mapping->writeback_index; /* Start from prev offset */ mapping 3962 fs/btrfs/extent_io.c tag_pages_for_writeback(mapping, index, end); mapping 3964 fs/btrfs/extent_io.c (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, mapping 3974 fs/btrfs/extent_io.c spin_lock(&mapping->private_lock); mapping 3976 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 3988 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 3993 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 3998 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 4095 fs/btrfs/extent_io.c static int extent_write_cache_pages(struct address_space *mapping, mapping 4099 fs/btrfs/extent_io.c struct inode *inode = mapping->host; mapping 4126 fs/btrfs/extent_io.c index = mapping->writeback_index; /* Start from prev offset */ mapping 4159 fs/btrfs/extent_io.c tag_pages_for_writeback(mapping, index, end); mapping 4162 fs/btrfs/extent_io.c (nr_pages = pagevec_lookup_range_tag(&pvec, mapping, mapping 4183 fs/btrfs/extent_io.c if (unlikely(page->mapping != mapping)) { mapping 4238 fs/btrfs/extent_io.c mapping->writeback_index = done_index; mapping 4249 fs/btrfs/extent_io.c .tree = &BTRFS_I(page->mapping->host)->io_tree, mapping 4270 fs/btrfs/extent_io.c struct address_space *mapping = inode->i_mapping; mapping 4290 fs/btrfs/extent_io.c page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 4311 fs/btrfs/extent_io.c int extent_writepages(struct address_space *mapping, mapping 4317 fs/btrfs/extent_io.c .tree = &BTRFS_I(mapping->host)->io_tree, mapping 4322 fs/btrfs/extent_io.c ret = extent_write_cache_pages(mapping, wbc, &epd); mapping 4332 fs/btrfs/extent_io.c int extent_readpages(struct address_space *mapping, struct list_head *pages, mapping 4339 fs/btrfs/extent_io.c struct extent_io_tree *tree = &BTRFS_I(mapping->host)->io_tree; mapping 4351 fs/btrfs/extent_io.c if (add_to_page_cache_lru(page, mapping, page->index, mapping 4352 fs/btrfs/extent_io.c readahead_gfp_mask(mapping))) { mapping 4391 fs/btrfs/extent_io.c size_t blocksize = page->mapping->host->i_sb->s_blocksize; mapping 4448 fs/btrfs/extent_io.c struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); mapping 4453 fs/btrfs/extent_io.c page->mapping->host->i_size > SZ_16M) { mapping 4879 fs/btrfs/extent_io.c spin_lock(&page->mapping->private_lock); mapping 4903 fs/btrfs/extent_io.c spin_unlock(&page->mapping->private_lock); mapping 5163 fs/btrfs/extent_io.c struct address_space *mapping = fs_info->btree_inode->i_mapping; mapping 5182 fs/btrfs/extent_io.c p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); mapping 5188 fs/btrfs/extent_io.c spin_lock(&mapping->private_lock); mapping 5199 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 5216 fs/btrfs/extent_io.c spin_unlock(&mapping->private_lock); mapping 5381 fs/btrfs/extent_io.c xa_lock_irq(&page->mapping->i_pages); mapping 5383 fs/btrfs/extent_io.c __xa_clear_mark(&page->mapping->i_pages, mapping 5385 fs/btrfs/extent_io.c xa_unlock_irq(&page->mapping->i_pages); mapping 6096 fs/btrfs/extent_io.c spin_lock(&page->mapping->private_lock); mapping 6098 fs/btrfs/extent_io.c spin_unlock(&page->mapping->private_lock); mapping 6113 fs/btrfs/extent_io.c spin_unlock(&page->mapping->private_lock); mapping 6116 fs/btrfs/extent_io.c spin_unlock(&page->mapping->private_lock); mapping 411 fs/btrfs/extent_io.h int extent_writepages(struct address_space *mapping, mapping 413 fs/btrfs/extent_io.h int btree_write_cache_pages(struct address_space *mapping, mapping 415 fs/btrfs/extent_io.h int extent_readpages(struct address_space *mapping, struct list_head *pages, mapping 1410 fs/btrfs/file.c if (page->mapping != inode->i_mapping) { mapping 2249 fs/btrfs/file.c struct address_space *mapping = filp->f_mapping; mapping 2251 fs/btrfs/file.c if (!mapping->a_ops->readpage) mapping 388 fs/btrfs/free-space-cache.c if (page->mapping != inode->i_mapping) { mapping 644 fs/btrfs/inode.c WARN_ON(pages[i]->mapping); mapping 694 fs/btrfs/inode.c WARN_ON(pages[i]->mapping); mapping 739 fs/btrfs/inode.c WARN_ON(async_extent->pages[i]->mapping); mapping 888 fs/btrfs/inode.c p->mapping = inode->i_mapping; mapping 891 fs/btrfs/inode.c p->mapping = NULL; mapping 2020 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 2208 fs/btrfs/inode.c if (!page->mapping || !PageDirty(page) || !PageChecked(page)) { mapping 2289 fs/btrfs/inode.c mapping_set_error(page->mapping, ret); mapping 2320 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 3342 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 3408 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 5044 fs/btrfs/inode.c struct address_space *mapping = inode->i_mapping; mapping 5054 fs/btrfs/inode.c gfp_t mask = btrfs_alloc_write_mask(mapping); mapping 5072 fs/btrfs/inode.c page = find_or_create_page(mapping, index, mask); mapping 5084 fs/btrfs/inode.c if (page->mapping != mapping) { mapping 8858 fs/btrfs/inode.c tree = &BTRFS_I(page->mapping->host)->io_tree; mapping 8864 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 8887 fs/btrfs/inode.c static int btrfs_writepages(struct address_space *mapping, mapping 8890 fs/btrfs/inode.c return extent_writepages(mapping, wbc); mapping 8894 fs/btrfs/inode.c btrfs_readpages(struct file *file, struct address_space *mapping, mapping 8897 fs/btrfs/inode.c return extent_readpages(mapping, pages, nr_pages); mapping 8921 fs/btrfs/inode.c struct inode *inode = page->mapping->host; mapping 9099 fs/btrfs/inode.c if ((page->mapping != inode->i_mapping) || mapping 1296 fs/btrfs/ioctl.c if (page->mapping != inode->i_mapping) { mapping 1314 fs/btrfs/ioctl.c if (page->mapping != inode->i_mapping) { mapping 135 fs/btrfs/lzo.c struct address_space *mapping, mapping 166 fs/btrfs/lzo.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 274 fs/btrfs/lzo.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 917 fs/btrfs/transaction.c struct address_space *mapping = fs_info->btree_inode->i_mapping; mapping 948 fs/btrfs/transaction.c err = filemap_fdatawrite_range(mapping, start, end); mapping 952 fs/btrfs/transaction.c werr = filemap_fdatawait_range(mapping, start, end); mapping 973 fs/btrfs/transaction.c struct address_space *mapping = fs_info->btree_inode->i_mapping; mapping 993 fs/btrfs/transaction.c err = filemap_fdatawait_range(mapping, start, end); mapping 231 fs/btrfs/tree-log.c return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, mapping 237 fs/btrfs/tree-log.c filemap_fdatawait_range(buf->pages[0]->mapping, mapping 92 fs/btrfs/zlib.c struct address_space *mapping, mapping 124 fs/btrfs/zlib.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 194 fs/btrfs/zlib.c in_page = find_get_page(mapping, mapping 371 fs/btrfs/zstd.c struct address_space *mapping, mapping 406 fs/btrfs/zstd.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 486 fs/btrfs/zstd.c in_page = find_get_page(mapping, start >> PAGE_SHIFT); mapping 516 fs/buffer.c int sync_mapping_buffers(struct address_space *mapping) mapping 518 fs/buffer.c struct address_space *buffer_mapping = mapping->private_data; mapping 520 fs/buffer.c if (buffer_mapping == NULL || list_empty(&mapping->private_list)) mapping 524 fs/buffer.c &mapping->private_list); mapping 547 fs/buffer.c struct address_space *mapping = inode->i_mapping; mapping 548 fs/buffer.c struct address_space *buffer_mapping = bh->b_page->mapping; mapping 551 fs/buffer.c if (!mapping->private_data) { mapping 552 fs/buffer.c mapping->private_data = buffer_mapping; mapping 554 fs/buffer.c BUG_ON(mapping->private_data != buffer_mapping); mapping 559 fs/buffer.c &mapping->private_list); mapping 560 fs/buffer.c bh->b_assoc_map = mapping; mapping 575 fs/buffer.c void __set_page_dirty(struct page *page, struct address_space *mapping, mapping 580 fs/buffer.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 581 fs/buffer.c if (page->mapping) { /* Race with truncate? */ mapping 583 fs/buffer.c account_page_dirtied(page, mapping); mapping 584 fs/buffer.c __xa_set_mark(&mapping->i_pages, page_index(page), mapping 587 fs/buffer.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 619 fs/buffer.c struct address_space *mapping = page_mapping(page); mapping 621 fs/buffer.c if (unlikely(!mapping)) mapping 624 fs/buffer.c spin_lock(&mapping->private_lock); mapping 640 fs/buffer.c spin_unlock(&mapping->private_lock); mapping 643 fs/buffer.c __set_page_dirty(page, mapping, 1); mapping 648 fs/buffer.c __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); mapping 677 fs/buffer.c struct address_space *mapping; mapping 687 fs/buffer.c mapping = bh->b_assoc_map; mapping 694 fs/buffer.c bh->b_assoc_map = mapping; mapping 726 fs/buffer.c mapping = bh->b_assoc_map; mapping 733 fs/buffer.c &mapping->private_list); mapping 734 fs/buffer.c bh->b_assoc_map = mapping; mapping 764 fs/buffer.c struct address_space *mapping = &inode->i_data; mapping 765 fs/buffer.c struct list_head *list = &mapping->private_list; mapping 766 fs/buffer.c struct address_space *buffer_mapping = mapping->private_data; mapping 787 fs/buffer.c struct address_space *mapping = &inode->i_data; mapping 788 fs/buffer.c struct list_head *list = &mapping->private_list; mapping 789 fs/buffer.c struct address_space *buffer_mapping = mapping->private_data; mapping 1106 fs/buffer.c struct address_space *mapping = NULL; mapping 1110 fs/buffer.c mapping = page_mapping(page); mapping 1111 fs/buffer.c if (mapping) mapping 1112 fs/buffer.c __set_page_dirty(page, mapping, 0); mapping 1115 fs/buffer.c if (mapping) mapping 1116 fs/buffer.c __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); mapping 1125 fs/buffer.c if (bh->b_page && bh->b_page->mapping) mapping 1126 fs/buffer.c mapping_set_error(bh->b_page->mapping, -EIO); mapping 1157 fs/buffer.c struct address_space *buffer_mapping = bh->b_page->mapping; mapping 1538 fs/buffer.c spin_lock(&page->mapping->private_lock); mapping 1550 fs/buffer.c spin_unlock(&page->mapping->private_lock); mapping 1827 fs/buffer.c mapping_set_error(page->mapping, err); mapping 1946 fs/buffer.c struct inode *inode = page->mapping->host; mapping 2077 fs/buffer.c int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, mapping 2084 fs/buffer.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 2100 fs/buffer.c int block_write_end(struct file *file, struct address_space *mapping, mapping 2104 fs/buffer.c struct inode *inode = mapping->host; mapping 2136 fs/buffer.c int generic_write_end(struct file *file, struct address_space *mapping, mapping 2140 fs/buffer.c struct inode *inode = mapping->host; mapping 2144 fs/buffer.c copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); mapping 2229 fs/buffer.c struct inode *inode = page->mapping->host; mapping 2319 fs/buffer.c struct address_space *mapping = inode->i_mapping; mapping 2328 fs/buffer.c err = pagecache_write_begin(NULL, mapping, size, 0, mapping 2333 fs/buffer.c err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); mapping 2341 fs/buffer.c static int cont_expand_zero(struct file *file, struct address_space *mapping, mapping 2344 fs/buffer.c struct inode *inode = mapping->host; mapping 2364 fs/buffer.c err = pagecache_write_begin(file, mapping, curpos, len, 0, mapping 2369 fs/buffer.c err = pagecache_write_end(file, mapping, curpos, len, len, mapping 2376 fs/buffer.c balance_dirty_pages_ratelimited(mapping); mapping 2397 fs/buffer.c err = pagecache_write_begin(file, mapping, curpos, len, 0, mapping 2402 fs/buffer.c err = pagecache_write_end(file, mapping, curpos, len, len, mapping 2417 fs/buffer.c int cont_write_begin(struct file *file, struct address_space *mapping, mapping 2422 fs/buffer.c struct inode *inode = mapping->host; mapping 2427 fs/buffer.c err = cont_expand_zero(file, mapping, pos, bytes); mapping 2437 fs/buffer.c return block_write_begin(mapping, pos, len, flags, pagep, get_block); mapping 2443 fs/buffer.c struct inode *inode = page->mapping->host; mapping 2478 fs/buffer.c if ((page->mapping != inode->i_mapping) || mapping 2527 fs/buffer.c spin_lock(&page->mapping->private_lock); mapping 2537 fs/buffer.c spin_unlock(&page->mapping->private_lock); mapping 2545 fs/buffer.c int nobh_write_begin(struct address_space *mapping, mapping 2550 fs/buffer.c struct inode *inode = mapping->host; mapping 2568 fs/buffer.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 2686 fs/buffer.c int nobh_write_end(struct file *file, struct address_space *mapping, mapping 2690 fs/buffer.c struct inode *inode = page->mapping->host; mapping 2698 fs/buffer.c return generic_write_end(file, mapping, pos, len, mapping 2729 fs/buffer.c struct inode * const inode = page->mapping->host; mapping 2749 fs/buffer.c if (page->mapping->a_ops->invalidatepage) mapping 2750 fs/buffer.c page->mapping->a_ops->invalidatepage(page, offset); mapping 2773 fs/buffer.c int nobh_truncate_page(struct address_space *mapping, mapping 2781 fs/buffer.c struct inode *inode = mapping->host; mapping 2796 fs/buffer.c page = grab_cache_page(mapping, index); mapping 2805 fs/buffer.c return block_truncate_page(mapping, from, get_block); mapping 2826 fs/buffer.c err = mapping->a_ops->readpage(NULL, page); mapping 2851 fs/buffer.c int block_truncate_page(struct address_space *mapping, mapping 2859 fs/buffer.c struct inode *inode = mapping->host; mapping 2874 fs/buffer.c page = grab_cache_page(mapping, index); mapping 2933 fs/buffer.c struct inode * const inode = page->mapping->host; mapping 2969 fs/buffer.c sector_t generic_block_bmap(struct address_space *mapping, sector_t block, mapping 2972 fs/buffer.c struct inode *inode = mapping->host; mapping 3247 fs/buffer.c struct address_space * const mapping = page->mapping; mapping 3255 fs/buffer.c if (mapping == NULL) { /* can this still happen? */ mapping 3260 fs/buffer.c spin_lock(&mapping->private_lock); mapping 3279 fs/buffer.c spin_unlock(&mapping->private_lock); mapping 89 fs/cachefiles/rdwr.c if (backpage->mapping != bmapping) { mapping 530 fs/cachefiles/rdwr.c ret = add_to_page_cache_lru(netpage, op->mapping, mapping 606 fs/cachefiles/rdwr.c ret = add_to_page_cache_lru(netpage, op->mapping, mapping 75 fs/ceph/addr.c struct address_space *mapping = page->mapping; mapping 81 fs/ceph/addr.c if (unlikely(!mapping)) mapping 86 fs/ceph/addr.c mapping->host, page, page->index); mapping 91 fs/ceph/addr.c inode = mapping->host; mapping 114 fs/ceph/addr.c mapping->host, page, page->index, mapping 130 fs/ceph/addr.c WARN_ON(!page->mapping); mapping 147 fs/ceph/addr.c inode = page->mapping->host; mapping 175 fs/ceph/addr.c dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host, mapping 441 fs/ceph/addr.c static int ceph_readpages(struct file *file, struct address_space *mapping, mapping 454 fs/ceph/addr.c rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, mapping 591 fs/ceph/addr.c inode = page->mapping->host; mapping 616 fs/ceph/addr.c page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); mapping 673 fs/ceph/addr.c struct inode *inode = page->mapping->host; mapping 703 fs/ceph/addr.c struct address_space *mapping = inode->i_mapping; mapping 709 fs/ceph/addr.c mapping_set_error(mapping, rc); mapping 779 fs/ceph/addr.c static int ceph_writepages_start(struct address_space *mapping, mapping 782 fs/ceph/addr.c struct inode *inode = mapping->host; mapping 806 fs/ceph/addr.c mapping_set_error(mapping, -EIO); mapping 814 fs/ceph/addr.c start_index = wbc->range_cyclic ? mapping->writeback_index : 0; mapping 870 fs/ceph/addr.c pvec_pages = pagevec_lookup_range_nr_tag(&pvec, mapping, &index, mapping 886 fs/ceph/addr.c unlikely(page->mapping != mapping)) { mapping 909 fs/ceph/addr.c mapping->a_ops->invalidatepage(page, mapping 1164 fs/ceph/addr.c (nr = pagevec_lookup_tag(&pvec, mapping, &index, mapping 1183 fs/ceph/addr.c mapping->writeback_index = index; mapping 1317 fs/ceph/addr.c static int ceph_write_begin(struct file *file, struct address_space *mapping, mapping 1328 fs/ceph/addr.c page = grab_cache_page_write_begin(mapping, index, 0); mapping 1349 fs/ceph/addr.c static int ceph_write_end(struct file *file, struct address_space *mapping, mapping 1477 fs/ceph/addr.c struct address_space *mapping = inode->i_mapping; mapping 1478 fs/ceph/addr.c struct page *page = find_or_create_page(mapping, 0, mapping 1479 fs/ceph/addr.c mapping_gfp_constraint(mapping, mapping 1578 fs/ceph/addr.c if ((off > size) || (page->mapping != inode->i_mapping)) { mapping 1619 fs/ceph/addr.c struct address_space *mapping = inode->i_mapping; mapping 1627 fs/ceph/addr.c page = find_or_create_page(mapping, 0, mapping 1628 fs/ceph/addr.c mapping_gfp_constraint(mapping, mapping 1800 fs/ceph/addr.c struct address_space *mapping = file->f_mapping; mapping 1802 fs/ceph/addr.c if (!mapping->a_ops->readpage) mapping 253 fs/ceph/cache.c struct address_space *mapping, mapping 263 fs/ceph/cache.c ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages, mapping 265 fs/ceph/cache.c NULL, mapping_gfp_mask(mapping)); mapping 29 fs/ceph/cache.h struct address_space *mapping, mapping 55 fs/ceph/cache.h struct inode* inode = page->mapping->host; mapping 133 fs/ceph/cache.h struct address_space *mapping, mapping 1312 fs/cifs/cifsglob.h struct address_space *mapping; mapping 2096 fs/cifs/file.c struct address_space *mapping = page->mapping; mapping 2104 fs/cifs/file.c if (!mapping || !mapping->host) mapping 2107 fs/cifs/file.c inode = page->mapping->host; mapping 2119 fs/cifs/file.c if (offset > mapping->host->i_size) { mapping 2125 fs/cifs/file.c if (mapping->host->i_size - offset < (loff_t)to) mapping 2126 fs/cifs/file.c to = (unsigned)(mapping->host->i_size - offset); mapping 2128 fs/cifs/file.c rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY, mapping 2153 fs/cifs/file.c wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, mapping 2164 fs/cifs/file.c *found_pages = find_get_pages_range_tag(mapping, index, end, mapping 2171 fs/cifs/file.c struct address_space *mapping, mapping 2192 fs/cifs/file.c if (unlikely(page->mapping != mapping)) { mapping 2223 fs/cifs/file.c if (page_offset(page) >= i_size_read(mapping->host)) { mapping 2250 fs/cifs/file.c struct address_space *mapping, struct writeback_control *wbc) mapping 2260 fs/cifs/file.c wdata->tailsz = min(i_size_read(mapping->host) - mapping 2278 fs/cifs/file.c static int cifs_writepages(struct address_space *mapping, mapping 2281 fs/cifs/file.c struct inode *inode = mapping->host; mapping 2297 fs/cifs/file.c return generic_writepages(mapping, wbc); mapping 2301 fs/cifs/file.c index = mapping->writeback_index; /* Start from prev offset */ mapping 2337 fs/cifs/file.c wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, mapping 2352 fs/cifs/file.c nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc, mapping 2374 fs/cifs/file.c rc = wdata_send_pages(wdata, nr_pages, mapping, wbc); mapping 2392 fs/cifs/file.c mapping_set_error(mapping, rc); mapping 2431 fs/cifs/file.c mapping->writeback_index = index; mapping 2470 fs/cifs/file.c mapping_set_error(page->mapping, rc); mapping 2487 fs/cifs/file.c static int cifs_write_end(struct file *file, struct address_space *mapping, mapping 2492 fs/cifs/file.c struct inode *inode = mapping->host; mapping 4114 fs/cifs/file.c cifs_readpage_to_fscache(rdata->mapping->host, page); mapping 4137 fs/cifs/file.c eof = CIFS_I(rdata->mapping->host)->server_eof; mapping 4226 fs/cifs/file.c readpages_get_pages(struct address_space *mapping, struct list_head *page_list, mapping 4233 fs/cifs/file.c gfp_t gfp = readahead_gfp_mask(mapping); mapping 4245 fs/cifs/file.c rc = add_to_page_cache_locked(page, mapping, mapping 4272 fs/cifs/file.c if (add_to_page_cache_locked(page, mapping, page->index, gfp)) { mapping 4284 fs/cifs/file.c static int cifs_readpages(struct file *file, struct address_space *mapping, mapping 4303 fs/cifs/file.c rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, mapping 4319 fs/cifs/file.c __func__, file, mapping, num_pages); mapping 4365 fs/cifs/file.c rc = readpages_get_pages(mapping, page_list, rsize, &tmplist, mapping 4387 fs/cifs/file.c rdata->mapping = mapping; mapping 4431 fs/cifs/file.c cifs_fscache_readpages_cancel(mapping->host, page_list); mapping 4554 fs/cifs/file.c static int cifs_write_begin(struct file *file, struct address_space *mapping, mapping 4569 fs/cifs/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 4592 fs/cifs/file.c if (CIFS_CACHE_READ(CIFS_I(mapping->host))) { mapping 4593 fs/cifs/file.c i_size = i_size_read(mapping->host); mapping 4642 fs/cifs/file.c struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); mapping 4665 fs/cifs/file.c cifs_fscache_invalidate_page(page, page->mapping->host); mapping 222 fs/cifs/fscache.c struct inode *inode = page->mapping->host; mapping 276 fs/cifs/fscache.c struct address_space *mapping, mapping 284 fs/cifs/fscache.c ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, mapping 288 fs/cifs/fscache.c mapping_gfp_mask(mapping)); mapping 93 fs/cifs/fscache.h struct address_space *mapping, mapping 98 fs/cifs/fscache.h return __cifs_readpages_from_fscache(inode, mapping, pages, mapping 147 fs/cifs/fscache.h struct address_space *mapping, mapping 2172 fs/cifs/inode.c static int cifs_truncate_page(struct address_space *mapping, loff_t from) mapping 2179 fs/cifs/inode.c page = grab_cache_page(mapping, index); mapping 25 fs/coda/symlink.c struct inode *inode = page->mapping->host; mapping 185 fs/cramfs/inode.c struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; mapping 212 fs/cramfs/inode.c devsize = mapping->host->i_size >> PAGE_SHIFT; mapping 219 fs/cramfs/inode.c page = read_mapping_page(mapping, blocknr + i, NULL); mapping 822 fs/cramfs/inode.c struct inode *inode = page->mapping->host; mapping 223 fs/crypto/crypto.c const struct inode *inode = page->mapping->host; mapping 302 fs/crypto/crypto.c const struct inode *inode = page->mapping->host; mapping 331 fs/dax.c static void dax_associate_entry(void *entry, struct address_space *mapping, mapping 344 fs/dax.c WARN_ON_ONCE(page->mapping); mapping 345 fs/dax.c page->mapping = mapping; mapping 350 fs/dax.c static void dax_disassociate_entry(void *entry, struct address_space *mapping, mapping 362 fs/dax.c WARN_ON_ONCE(page->mapping && page->mapping != mapping); mapping 363 fs/dax.c page->mapping = NULL; mapping 397 fs/dax.c struct address_space *mapping = READ_ONCE(page->mapping); mapping 400 fs/dax.c if (!mapping || !dax_mapping(mapping)) mapping 411 fs/dax.c if (S_ISCHR(mapping->host->i_mode)) mapping 414 fs/dax.c xas.xa = &mapping->i_pages; mapping 416 fs/dax.c if (mapping != page->mapping) { mapping 438 fs/dax.c struct address_space *mapping = page->mapping; mapping 439 fs/dax.c XA_STATE(xas, &mapping->i_pages, page->index); mapping 441 fs/dax.c if (S_ISCHR(mapping->host->i_mode)) mapping 477 fs/dax.c struct address_space *mapping, unsigned int order) mapping 518 fs/dax.c unmap_mapping_pages(mapping, mapping 525 fs/dax.c dax_disassociate_entry(entry, mapping, false); mapping 528 fs/dax.c mapping->nrexceptional--; mapping 544 fs/dax.c mapping->nrexceptional++; mapping 549 fs/dax.c if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM)) mapping 576 fs/dax.c struct page *dax_layout_busy_page(struct address_space *mapping) mapping 578 fs/dax.c XA_STATE(xas, &mapping->i_pages, 0); mapping 589 fs/dax.c if (!dax_mapping(mapping) || !mapping_mapped(mapping)) mapping 604 fs/dax.c unmap_mapping_range(mapping, 0, 0, 0); mapping 630 fs/dax.c static int __dax_invalidate_entry(struct address_space *mapping, mapping 633 fs/dax.c XA_STATE(xas, &mapping->i_pages, index); mapping 645 fs/dax.c dax_disassociate_entry(entry, mapping, trunc); mapping 647 fs/dax.c mapping->nrexceptional--; mapping 659 fs/dax.c int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) mapping 661 fs/dax.c int ret = __dax_invalidate_entry(mapping, index, true); mapping 677 fs/dax.c int dax_invalidate_mapping_entry_sync(struct address_space *mapping, mapping 680 fs/dax.c return __dax_invalidate_entry(mapping, index, false); mapping 717 fs/dax.c struct address_space *mapping, struct vm_fault *vmf, mapping 723 fs/dax.c __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); mapping 729 fs/dax.c unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR, mapping 732 fs/dax.c unmap_mapping_pages(mapping, index, 1, false); mapping 740 fs/dax.c dax_disassociate_entry(entry, mapping, false); mapping 741 fs/dax.c dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address); mapping 776 fs/dax.c static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index, mapping 784 fs/dax.c i_mmap_lock_read(mapping); mapping 785 fs/dax.c vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) { mapping 846 fs/dax.c i_mmap_unlock_read(mapping); mapping 850 fs/dax.c struct address_space *mapping, void *entry) mapping 912 fs/dax.c dax_entry_mkclean(mapping, index, pfn); mapping 926 fs/dax.c trace_dax_writeback_one(mapping->host, index, count); mapping 939 fs/dax.c int dax_writeback_mapping_range(struct address_space *mapping, mapping 942 fs/dax.c XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT); mapping 943 fs/dax.c struct inode *inode = mapping->host; mapping 953 fs/dax.c if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) mapping 962 fs/dax.c tag_pages_for_writeback(mapping, xas.xa_index, end_index); mapping 966 fs/dax.c ret = dax_writeback_one(&xas, dax_dev, mapping, entry); mapping 968 fs/dax.c mapping_set_error(mapping, ret); mapping 1031 fs/dax.c struct address_space *mapping, void **entry, mapping 1034 fs/dax.c struct inode *inode = mapping->host; mapping 1039 fs/dax.c *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, mapping 1198 fs/dax.c struct address_space *mapping = iocb->ki_filp->f_mapping; mapping 1199 fs/dax.c struct inode *inode = mapping->host; mapping 1249 fs/dax.c struct address_space *mapping = vma->vm_file->f_mapping; mapping 1250 fs/dax.c XA_STATE(xas, &mapping->i_pages, vmf->pgoff); mapping 1251 fs/dax.c struct inode *inode = mapping->host; mapping 1277 fs/dax.c entry = grab_mapping_entry(&xas, mapping, 0); mapping 1352 fs/dax.c entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, mapping 1380 fs/dax.c ret = dax_load_hole(&xas, mapping, &entry, vmf); mapping 1417 fs/dax.c struct address_space *mapping = vmf->vma->vm_file->f_mapping; mapping 1420 fs/dax.c struct inode *inode = mapping->host; mapping 1433 fs/dax.c *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, mapping 1470 fs/dax.c struct address_space *mapping = vma->vm_file->f_mapping; mapping 1471 fs/dax.c XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER); mapping 1476 fs/dax.c struct inode *inode = mapping->host; mapping 1529 fs/dax.c entry = grab_mapping_entry(&xas, mapping, PMD_ORDER); mapping 1568 fs/dax.c entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn, mapping 1672 fs/dax.c struct address_space *mapping = vmf->vma->vm_file->f_mapping; mapping 1673 fs/dax.c XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order); mapping 1684 fs/dax.c trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf, mapping 1700 fs/dax.c trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret); mapping 1213 fs/direct-io.c struct address_space *mapping = mapping 1219 fs/direct-io.c retval = filemap_write_and_wait_range(mapping, offset, mapping 462 fs/ecryptfs/crypto.c ecryptfs_inode = page->mapping->host; mapping 530 fs/ecryptfs/crypto.c ecryptfs_inode = page->mapping->host; mapping 134 fs/ecryptfs/mmap.c page_virt, page->mapping->host); mapping 156 fs/ecryptfs/mmap.c crypt_stat->extent_size, page->mapping->host); mapping 183 fs/ecryptfs/mmap.c &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; mapping 189 fs/ecryptfs/mmap.c page->mapping->host); mapping 206 fs/ecryptfs/mmap.c page->mapping->host); mapping 237 fs/ecryptfs/mmap.c struct inode *inode = page->mapping->host; mapping 265 fs/ecryptfs/mmap.c struct address_space *mapping, mapping 274 fs/ecryptfs/mmap.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 282 fs/ecryptfs/mmap.c &ecryptfs_inode_to_private(mapping->host)->crypt_stat; mapping 286 fs/ecryptfs/mmap.c page, index, 0, PAGE_SIZE, mapping->host); mapping 313 fs/ecryptfs/mmap.c mapping->host); mapping 325 fs/ecryptfs/mmap.c >= i_size_read(page->mapping->host)) { mapping 345 fs/ecryptfs/mmap.c if (prev_page_end_size > i_size_read(page->mapping->host)) { mapping 359 fs/ecryptfs/mmap.c if ((i_size_read(mapping->host) == prev_page_end_size) mapping 463 fs/ecryptfs/mmap.c struct address_space *mapping, mapping 470 fs/ecryptfs/mmap.c struct inode *ecryptfs_inode = mapping->host; mapping 525 fs/ecryptfs/mmap.c static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block) mapping 531 fs/ecryptfs/mmap.c inode = (struct inode *)mapping->host; mapping 21 fs/efs/inode.c static sector_t _efs_bmap(struct address_space *mapping, sector_t block) mapping 23 fs/efs/inode.c return generic_block_bmap(mapping,block,efs_get_block); mapping 19 fs/efs/symlink.c struct inode * inode = page->mapping->host; mapping 39 fs/erofs/compress.h return page->mapping == Z_EROFS_MAPPING_STAGING; mapping 37 fs/erofs/data.c struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping; mapping 40 fs/erofs/data.c page = read_cache_page_gfp(mapping, blkaddr, mapping 41 fs/erofs/data.c mapping_gfp_constraint(mapping, ~__GFP_FS)); mapping 128 fs/erofs/data.c struct address_space *mapping, mapping 134 fs/erofs/data.c struct inode *const inode = mapping->host; mapping 273 fs/erofs/data.c bio = erofs_read_raw_page(NULL, page->mapping, mapping 284 fs/erofs/data.c struct address_space *mapping, mapping 290 fs/erofs/data.c gfp_t gfp = readahead_gfp_mask(mapping); mapping 293 fs/erofs/data.c trace_erofs_readpages(mapping->host, page, nr_pages, true); mapping 301 fs/erofs/data.c if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { mapping 302 fs/erofs/data.c bio = erofs_read_raw_page(bio, mapping, page, mapping 309 fs/erofs/data.c EROFS_I(mapping->host)->nid); mapping 344 fs/erofs/data.c static sector_t erofs_bmap(struct address_space *mapping, sector_t block) mapping 346 fs/erofs/data.c struct inode *inode = mapping->host; mapping 355 fs/erofs/data.c return generic_block_bmap(mapping, block, erofs_get_block); mapping 79 fs/erofs/decompressor.c victim->mapping = Z_EROFS_MAPPING_STAGING; mapping 71 fs/erofs/dir.c struct address_space *mapping = dir->i_mapping; mapping 83 fs/erofs/dir.c dentry_page = read_mapping_page(mapping, i, NULL); mapping 417 fs/erofs/internal.h int erofs_try_to_free_cached_page(struct address_space *mapping, mapping 96 fs/erofs/namei.c struct address_space *const mapping = dir->i_mapping; mapping 105 fs/erofs/namei.c struct page *page = read_mapping_page(mapping, mid, NULL); mapping 299 fs/erofs/super.c struct address_space *const mapping = page->mapping; mapping 302 fs/erofs/super.c DBG_BUGON(mapping->a_ops != &managed_cache_aops); mapping 305 fs/erofs/super.c ret = erofs_try_to_free_cached_page(mapping, page); mapping 216 fs/erofs/zdata.c struct address_space *const mapping = MNGD_MAPPING(sbi); mapping 234 fs/erofs/zdata.c if (page->mapping != mapping) mapping 248 fs/erofs/zdata.c int erofs_try_to_free_cached_page(struct address_space *mapping, mapping 551 fs/erofs/zdata.c page->mapping = Z_EROFS_MAPPING_STAGING; mapping 733 fs/erofs/zdata.c DBG_BUGON(!page->mapping); mapping 736 fs/erofs/zdata.c sbi = EROFS_SB(page->mapping->host->i_sb); mapping 813 fs/erofs/zdata.c DBG_BUGON(!page->mapping); mapping 849 fs/erofs/zdata.c DBG_BUGON(!page->mapping); mapping 926 fs/erofs/zdata.c DBG_BUGON(!page->mapping); mapping 1002 fs/erofs/zdata.c struct address_space *mapping; mapping 1029 fs/erofs/zdata.c mapping = READ_ONCE(page->mapping); mapping 1042 fs/erofs/zdata.c DBG_BUGON(!mapping); mapping 1050 fs/erofs/zdata.c if (mapping && mapping != mc) mapping 1060 fs/erofs/zdata.c if (page->mapping == mc) { mapping 1089 fs/erofs/zdata.c DBG_BUGON(page->mapping); mapping 1105 fs/erofs/zdata.c page->mapping = Z_EROFS_MAPPING_STAGING; mapping 1335 fs/erofs/zdata.c struct inode *const inode = page->mapping->host; mapping 1368 fs/erofs/zdata.c struct address_space *mapping, mapping 1372 fs/erofs/zdata.c struct inode *const inode = mapping->host; mapping 1377 fs/erofs/zdata.c gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); mapping 1381 fs/erofs/zdata.c trace_erofs_readpages(mapping->host, lru_to_page(pages), mapping 1399 fs/erofs/zdata.c if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { mapping 106 fs/erofs/zdata.h return page->mapping == MNGD_MAPPING(sbi); mapping 92 fs/ext2/dir.c struct address_space *mapping = page->mapping; mapping 93 fs/ext2/dir.c struct inode *dir = mapping->host; mapping 97 fs/ext2/dir.c block_write_end(NULL, mapping, pos, len, len, page, NULL); mapping 117 fs/ext2/dir.c struct inode *dir = page->mapping->host; mapping 202 fs/ext2/dir.c struct address_space *mapping = dir->i_mapping; mapping 203 fs/ext2/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 562 fs/ext2/dir.c struct inode *inode = page->mapping->host; mapping 59 fs/ext2/inode.c static void ext2_write_failed(struct address_space *mapping, loff_t to) mapping 61 fs/ext2/inode.c struct inode *inode = mapping->host; mapping 881 fs/ext2/inode.c ext2_readpages(struct file *file, struct address_space *mapping, mapping 884 fs/ext2/inode.c return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); mapping 888 fs/ext2/inode.c ext2_write_begin(struct file *file, struct address_space *mapping, mapping 894 fs/ext2/inode.c ret = block_write_begin(mapping, pos, len, flags, pagep, mapping 897 fs/ext2/inode.c ext2_write_failed(mapping, pos + len); mapping 901 fs/ext2/inode.c static int ext2_write_end(struct file *file, struct address_space *mapping, mapping 907 fs/ext2/inode.c ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); mapping 909 fs/ext2/inode.c ext2_write_failed(mapping, pos + len); mapping 914 fs/ext2/inode.c ext2_nobh_write_begin(struct file *file, struct address_space *mapping, mapping 920 fs/ext2/inode.c ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, mapping 923 fs/ext2/inode.c ext2_write_failed(mapping, pos + len); mapping 933 fs/ext2/inode.c static sector_t ext2_bmap(struct address_space *mapping, sector_t block) mapping 935 fs/ext2/inode.c return generic_block_bmap(mapping,block,ext2_get_block); mapping 942 fs/ext2/inode.c struct address_space *mapping = file->f_mapping; mapping 943 fs/ext2/inode.c struct inode *inode = mapping->host; mapping 950 fs/ext2/inode.c ext2_write_failed(mapping, offset + count); mapping 955 fs/ext2/inode.c ext2_writepages(struct address_space *mapping, struct writeback_control *wbc) mapping 957 fs/ext2/inode.c return mpage_writepages(mapping, wbc, ext2_get_block); mapping 961 fs/ext2/inode.c ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc) mapping 963 fs/ext2/inode.c return dax_writeback_mapping_range(mapping, mapping 964 fs/ext2/inode.c mapping->host->i_sb->s_bdev, wbc); mapping 3138 fs/ext4/ext4.h extern int ext4_try_to_write_inline_data(struct address_space *mapping, mapping 3151 fs/ext4/ext4.h extern int ext4_da_write_inline_data_begin(struct address_space *mapping, mapping 3239 fs/ext4/ext4.h extern int ext4_mpage_readpages(struct address_space *mapping, mapping 525 fs/ext4/inline.c static int ext4_convert_inline_data_to_extent(struct address_space *mapping, mapping 563 fs/ext4/inline.c page = grab_cache_page_write_begin(mapping, 0, flags); mapping 645 fs/ext4/inline.c int ext4_try_to_write_inline_data(struct address_space *mapping, mapping 691 fs/ext4/inline.c page = grab_cache_page_write_begin(mapping, 0, flags); mapping 725 fs/ext4/inline.c return ext4_convert_inline_data_to_extent(mapping, mapping 800 fs/ext4/inline.c static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping, mapping 808 fs/ext4/inline.c page = grab_cache_page_write_begin(mapping, 0, flags); mapping 858 fs/ext4/inline.c int ext4_da_write_inline_data_begin(struct address_space *mapping, mapping 899 fs/ext4/inline.c ret = ext4_da_convert_inline_data_to_extent(mapping, mapping 909 fs/ext4/inline.c page = grab_cache_page_write_begin(mapping, 0, flags); mapping 1176 fs/ext4/inode.c struct inode *inode = page->mapping->host; mapping 1266 fs/ext4/inode.c static int ext4_write_begin(struct file *file, struct address_space *mapping, mapping 1270 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 1292 fs/ext4/inode.c ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, mapping 1308 fs/ext4/inode.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 1321 fs/ext4/inode.c if (page->mapping != mapping) { mapping 1411 fs/ext4/inode.c struct address_space *mapping, mapping 1416 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 1434 fs/ext4/inode.c copied = block_write_end(file, mapping, pos, mapping 1519 fs/ext4/inode.c struct address_space *mapping, mapping 1524 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 1696 fs/ext4/inode.c struct address_space *mapping = inode->i_mapping; mapping 1713 fs/ext4/inode.c nr_pages = pagevec_lookup_range(&pvec, mapping, &index, end); mapping 2003 fs/ext4/inode.c struct address_space *mapping = page->mapping; mapping 2004 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 2047 fs/ext4/inode.c if (page->mapping != mapping) { mapping 2129 fs/ext4/inode.c struct inode *inode = page->mapping->host; mapping 2633 fs/ext4/inode.c struct address_space *mapping = mpd->inode->i_mapping; mapping 2654 fs/ext4/inode.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, mapping 2688 fs/ext4/inode.c unlikely(page->mapping != mapping)) { mapping 2718 fs/ext4/inode.c static int ext4_writepages(struct address_space *mapping, mapping 2727 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 2729 fs/ext4/inode.c struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); mapping 2745 fs/ext4/inode.c if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) mapping 2749 fs/ext4/inode.c ret = generic_writepages(mapping, wbc); mapping 2763 fs/ext4/inode.c if (unlikely(ext4_forced_shutdown(EXT4_SB(mapping->host->i_sb)) || mapping 2800 fs/ext4/inode.c writeback_index = mapping->writeback_index; mapping 2815 fs/ext4/inode.c tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page); mapping 2953 fs/ext4/inode.c mapping->writeback_index = mpd.first_page; mapping 2962 fs/ext4/inode.c static int ext4_dax_writepages(struct address_space *mapping, mapping 2967 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 2968 fs/ext4/inode.c struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); mapping 2976 fs/ext4/inode.c ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc); mapping 3030 fs/ext4/inode.c static int ext4_da_write_begin(struct file *file, struct address_space *mapping, mapping 3037 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 3048 fs/ext4/inode.c return ext4_write_begin(file, mapping, pos, mapping 3055 fs/ext4/inode.c ret = ext4_da_write_inline_data_begin(mapping, inode, mapping 3072 fs/ext4/inode.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 3092 fs/ext4/inode.c if (page->mapping != mapping) { mapping 3139 fs/ext4/inode.c struct inode *inode = page->mapping->host; mapping 3155 fs/ext4/inode.c struct address_space *mapping, mapping 3159 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 3167 fs/ext4/inode.c return ext4_write_end(file, mapping, pos, mapping 3198 fs/ext4/inode.c ret2 = generic_write_end(file, mapping, pos, len, copied, mapping 3269 fs/ext4/inode.c static sector_t ext4_bmap(struct address_space *mapping, sector_t block) mapping 3271 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 3281 fs/ext4/inode.c if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) && mapping 3288 fs/ext4/inode.c filemap_write_and_wait(mapping); mapping 3321 fs/ext4/inode.c return generic_block_bmap(mapping, block, ext4_get_block); mapping 3327 fs/ext4/inode.c struct inode *inode = page->mapping->host; mapping 3335 fs/ext4/inode.c return ext4_mpage_readpages(page->mapping, NULL, page, 1, mapping 3342 fs/ext4/inode.c ext4_readpages(struct file *file, struct address_space *mapping, mapping 3345 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 3351 fs/ext4/inode.c return ext4_mpage_readpages(mapping, pages, NULL, nr_pages, true); mapping 3369 fs/ext4/inode.c journal_t *journal = EXT4_JOURNAL(page->mapping->host); mapping 3392 fs/ext4/inode.c journal_t *journal = EXT4_JOURNAL(page->mapping->host); mapping 3835 fs/ext4/inode.c struct address_space *mapping = iocb->ki_filp->f_mapping; mapping 3836 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 3852 fs/ext4/inode.c ret = filemap_write_and_wait_range(mapping, iocb->ki_pos, mapping 4002 fs/ext4/inode.c struct address_space *mapping, loff_t from, loff_t length) mapping 4008 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 4013 fs/ext4/inode.c page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping 4014 fs/ext4/inode.c mapping_gfp_constraint(mapping, ~__GFP_FS)); mapping 4098 fs/ext4/inode.c struct address_space *mapping, loff_t from, loff_t length) mapping 4100 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 4116 fs/ext4/inode.c return __ext4_block_zero_page_range(handle, mapping, from, length); mapping 4126 fs/ext4/inode.c struct address_space *mapping, loff_t from) mapping 4131 fs/ext4/inode.c struct inode *inode = mapping->host; mapping 4140 fs/ext4/inode.c return ext4_block_zero_page_range(handle, mapping, from, length); mapping 4147 fs/ext4/inode.c struct address_space *mapping = inode->i_mapping; mapping 4162 fs/ext4/inode.c err = ext4_block_zero_page_range(handle, mapping, mapping 4168 fs/ext4/inode.c err = ext4_block_zero_page_range(handle, mapping, mapping 4175 fs/ext4/inode.c err = ext4_block_zero_page_range(handle, mapping, mapping 4266 fs/ext4/inode.c struct address_space *mapping = inode->i_mapping; mapping 4290 fs/ext4/inode.c if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { mapping 4291 fs/ext4/inode.c ret = filemap_write_and_wait_range(mapping, offset, mapping 4467 fs/ext4/inode.c struct address_space *mapping = inode->i_mapping; mapping 4512 fs/ext4/inode.c ext4_block_truncate_page(handle, mapping, inode->i_size); mapping 6247 fs/ext4/inode.c struct address_space *mapping = inode->i_mapping; mapping 6279 fs/ext4/inode.c if (page->mapping != mapping || page_offset(page) > size) { mapping 825 fs/ext4/mballoc.c inode = page->mapping->host; mapping 995 fs/ext4/mballoc.c BUG_ON(page->mapping != inode->i_mapping); mapping 1009 fs/ext4/mballoc.c BUG_ON(page->mapping != inode->i_mapping); mapping 1160 fs/ext4/mballoc.c BUG_ON(page->mapping != inode->i_mapping); mapping 1196 fs/ext4/mballoc.c BUG_ON(page->mapping != inode->i_mapping); mapping 129 fs/ext4/move_extent.c struct address_space *mapping[2]; mapping 134 fs/ext4/move_extent.c mapping[0] = inode1->i_mapping; mapping 135 fs/ext4/move_extent.c mapping[1] = inode2->i_mapping; mapping 138 fs/ext4/move_extent.c mapping[0] = inode2->i_mapping; mapping 139 fs/ext4/move_extent.c mapping[1] = inode1->i_mapping; mapping 142 fs/ext4/move_extent.c page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); mapping 146 fs/ext4/move_extent.c page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); mapping 169 fs/ext4/move_extent.c struct inode *inode = page->mapping->host; mapping 86 fs/ext4/page-io.c mapping_set_error(page->mapping, -EIO); mapping 411 fs/ext4/page-io.c struct inode *inode = page->mapping->host; mapping 225 fs/ext4/readpage.c int ext4_mpage_readpages(struct address_space *mapping, mapping 232 fs/ext4/readpage.c struct inode *inode = mapping->host; mapping 260 fs/ext4/readpage.c if (add_to_page_cache_lru(page, mapping, page->index, mapping 261 fs/ext4/readpage.c readahead_gfp_mask(mapping))) mapping 39 fs/f2fs/checkpoint.c struct address_space *mapping = META_MAPPING(sbi); mapping 42 fs/f2fs/checkpoint.c page = f2fs_grab_cache_page(mapping, index, false); mapping 59 fs/f2fs/checkpoint.c struct address_space *mapping = META_MAPPING(sbi); mapping 76 fs/f2fs/checkpoint.c page = f2fs_grab_cache_page(mapping, index, false); mapping 93 fs/f2fs/checkpoint.c if (unlikely(page->mapping != mapping)) { mapping 330 fs/f2fs/checkpoint.c static int f2fs_write_meta_pages(struct address_space *mapping, mapping 333 fs/f2fs/checkpoint.c struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); mapping 349 fs/f2fs/checkpoint.c trace_f2fs_writepages(mapping->host, wbc, META); mapping 358 fs/f2fs/checkpoint.c trace_f2fs_writepages(mapping->host, wbc, META); mapping 365 fs/f2fs/checkpoint.c struct address_space *mapping = META_MAPPING(sbi); mapping 379 fs/f2fs/checkpoint.c while ((nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, mapping 395 fs/f2fs/checkpoint.c if (unlikely(page->mapping != mapping)) { mapping 36 fs/f2fs/data.c struct address_space *mapping = page->mapping; mapping 40 fs/f2fs/data.c if (!mapping) mapping 43 fs/f2fs/data.c inode = mapping->host; mapping 58 fs/f2fs/data.c struct address_space *mapping = page_file_mapping(page); mapping 60 fs/f2fs/data.c if (mapping) { mapping 61 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 216 fs/f2fs/data.c mapping_set_error(page->mapping, -EIO); mapping 221 fs/f2fs/data.c f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) && mapping 396 fs/f2fs/data.c if (inode && inode == target->mapping->host) mapping 848 fs/f2fs/data.c struct address_space *mapping = inode->i_mapping; mapping 854 fs/f2fs/data.c page = f2fs_grab_cache_page(mapping, index, for_write); mapping 918 fs/f2fs/data.c struct address_space *mapping = inode->i_mapping; mapping 921 fs/f2fs/data.c page = find_get_page(mapping, index); mapping 949 fs/f2fs/data.c struct address_space *mapping = inode->i_mapping; mapping 958 fs/f2fs/data.c if (unlikely(page->mapping != mapping)) { mapping 981 fs/f2fs/data.c struct address_space *mapping = inode->i_mapping; mapping 986 fs/f2fs/data.c page = f2fs_grab_cache_page(mapping, index, true); mapping 1765 fs/f2fs/data.c static int f2fs_mpage_readpages(struct address_space *mapping, mapping 1771 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 1790 fs/f2fs/data.c if (add_to_page_cache_lru(page, mapping, mapping 1792 fs/f2fs/data.c readahead_gfp_mask(mapping))) mapping 1830 fs/f2fs/data.c struct address_space *mapping, mapping 1833 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 1842 fs/f2fs/data.c return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true); mapping 1847 fs/f2fs/data.c struct inode *inode = fio->page->mapping->host; mapping 1958 fs/f2fs/data.c struct inode *inode = fio->page->mapping->host; mapping 1969 fs/f2fs/data.c struct inode *inode = page->mapping->host; mapping 2083 fs/f2fs/data.c struct inode *inode = page->mapping->host; mapping 2113 fs/f2fs/data.c mapping_set_error(page->mapping, -EIO); mapping 2246 fs/f2fs/data.c static int f2fs_write_cache_pages(struct address_space *mapping, mapping 2253 fs/f2fs/data.c struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); mapping 2268 fs/f2fs/data.c if (get_dirty_pages(mapping->host) <= mapping 2269 fs/f2fs/data.c SM_I(F2FS_M_SB(mapping))->min_hot_blocks) mapping 2270 fs/f2fs/data.c set_inode_flag(mapping->host, FI_HOT_DATA); mapping 2272 fs/f2fs/data.c clear_inode_flag(mapping->host, FI_HOT_DATA); mapping 2275 fs/f2fs/data.c writeback_index = mapping->writeback_index; /* prev offset */ mapping 2295 fs/f2fs/data.c tag_pages_for_writeback(mapping, index, end); mapping 2300 fs/f2fs/data.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, mapping 2320 fs/f2fs/data.c if (unlikely(page->mapping != mapping)) { mapping 2389 fs/f2fs/data.c mapping->writeback_index = done_index; mapping 2392 fs/f2fs/data.c f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, mapping 2418 fs/f2fs/data.c static int __f2fs_write_data_pages(struct address_space *mapping, mapping 2422 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2429 fs/f2fs/data.c if (!mapping->a_ops->writepage) mapping 2450 fs/f2fs/data.c trace_f2fs_writepages(mapping->host, wbc, DATA); mapping 2464 fs/f2fs/data.c ret = f2fs_write_cache_pages(mapping, wbc, io_type); mapping 2482 fs/f2fs/data.c trace_f2fs_writepages(mapping->host, wbc, DATA); mapping 2486 fs/f2fs/data.c static int f2fs_write_data_pages(struct address_space *mapping, mapping 2489 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2491 fs/f2fs/data.c return __f2fs_write_data_pages(mapping, wbc, mapping 2496 fs/f2fs/data.c static void f2fs_write_failed(struct address_space *mapping, loff_t to) mapping 2498 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2519 fs/f2fs/data.c struct inode *inode = page->mapping->host; mapping 2601 fs/f2fs/data.c static int f2fs_write_begin(struct file *file, struct address_space *mapping, mapping 2605 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2643 fs/f2fs/data.c page = f2fs_pagecache_get_page(mapping, index, mapping 2662 fs/f2fs/data.c if (page->mapping != mapping) { mapping 2694 fs/f2fs/data.c if (unlikely(page->mapping != mapping)) { mapping 2707 fs/f2fs/data.c f2fs_write_failed(mapping, pos + len); mapping 2714 fs/f2fs/data.c struct address_space *mapping, mapping 2718 fs/f2fs/data.c struct inode *inode = page->mapping->host; mapping 2813 fs/f2fs/data.c struct address_space *mapping = iocb->ki_filp->f_mapping; mapping 2814 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2876 fs/f2fs/data.c f2fs_write_failed(mapping, offset + count); mapping 2889 fs/f2fs/data.c struct inode *inode = page->mapping->host; mapping 2961 fs/f2fs/data.c static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) mapping 2963 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 2969 fs/f2fs/data.c if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) mapping 2970 fs/f2fs/data.c filemap_write_and_wait(mapping); mapping 2972 fs/f2fs/data.c return generic_block_bmap(mapping, block, get_data_block_bmap); mapping 2978 fs/f2fs/data.c int f2fs_migrate_page(struct address_space *mapping, mapping 2982 fs/f2fs/data.c struct f2fs_inode_info *fi = F2FS_I(mapping->host); mapping 2997 fs/f2fs/data.c rc = migrate_page_move_mapping(mapping, newpage, mapping 3036 fs/f2fs/data.c struct address_space *mapping = swap_file->f_mapping; mapping 3037 fs/f2fs/data.c struct inode *inode = mapping->host; mapping 3190 fs/f2fs/data.c struct address_space *mapping = page_mapping(page); mapping 3193 fs/f2fs/data.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 3194 fs/f2fs/data.c __xa_clear_mark(&mapping->i_pages, page_index(page), mapping 3196 fs/f2fs/data.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 1507 fs/f2fs/f2fs.h static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping) mapping 1509 fs/f2fs/f2fs.h return F2FS_I_SB(mapping->host); mapping 2129 fs/f2fs/f2fs.h static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, mapping 2136 fs/f2fs/f2fs.h page = find_get_page_flags(mapping, index, mapping 2139 fs/f2fs/f2fs.h page = find_lock_page(mapping, index); mapping 2143 fs/f2fs/f2fs.h if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) { mapping 2150 fs/f2fs/f2fs.h return grab_cache_page(mapping, index); mapping 2151 fs/f2fs/f2fs.h return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); mapping 2155 fs/f2fs/f2fs.h struct address_space *mapping, pgoff_t index, mapping 2158 fs/f2fs/f2fs.h if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) { mapping 2163 fs/f2fs/f2fs.h return pagecache_get_page(mapping, index, fgp_flags, gfp_mask); mapping 3237 fs/f2fs/f2fs.h int f2fs_migrate_page(struct address_space *mapping, struct page *newpage, mapping 76 fs/f2fs/file.c if (unlikely(page->mapping != inode->i_mapping || mapping 343 fs/f2fs/file.c static pgoff_t __get_first_dirty_index(struct address_space *mapping, mapping 353 fs/f2fs/file.c nr_pages = find_get_pages_tag(mapping, &pgofs, PAGECACHE_TAG_DIRTY, mapping 577 fs/f2fs/file.c struct address_space *mapping = inode->i_mapping; mapping 584 fs/f2fs/file.c page = find_lock_page(mapping, index); mapping 979 fs/f2fs/file.c struct address_space *mapping = inode->i_mapping; mapping 991 fs/f2fs/file.c truncate_inode_pages_range(mapping, blk_start, mapping 1336 fs/f2fs/file.c struct address_space *mapping = inode->i_mapping; mapping 1350 fs/f2fs/file.c ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1); mapping 661 fs/f2fs/gc.c struct address_space *mapping = inode->i_mapping; mapping 678 fs/f2fs/gc.c page = f2fs_grab_cache_page(mapping, index, true); mapping 837 fs/f2fs/gc.c if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) || mapping 45 fs/f2fs/inline.c struct inode *inode = page->mapping->host; mapping 288 fs/f2fs/node.c return NODE_MAPPING(sbi) == page->mapping && mapping 1115 fs/f2fs/node.c BUG_ON(page->mapping != NODE_MAPPING(sbi)); mapping 1364 fs/f2fs/node.c if (unlikely(page->mapping != NODE_MAPPING(sbi))) { mapping 1472 fs/f2fs/node.c if (unlikely(page->mapping != NODE_MAPPING(sbi))) { mapping 1693 fs/f2fs/node.c if (unlikely(page->mapping != NODE_MAPPING(sbi))) { mapping 1860 fs/f2fs/node.c if (unlikely(page->mapping != NODE_MAPPING(sbi))) { mapping 1972 fs/f2fs/node.c static int f2fs_write_node_pages(struct address_space *mapping, mapping 1975 fs/f2fs/node.c struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); mapping 1996 fs/f2fs/node.c trace_f2fs_writepages(mapping->host, wbc, NODE); mapping 2010 fs/f2fs/node.c trace_f2fs_writepages(mapping->host, wbc, NODE); mapping 401 fs/f2fs/segment.c if (page->mapping == inode->i_mapping) { mapping 3011 fs/f2fs/segment.c struct inode *inode = fio->page->mapping->host; mapping 3028 fs/f2fs/segment.c struct inode *inode = fio->page->mapping->host; mapping 1766 fs/f2fs/super.c struct address_space *mapping = inode->i_mapping; mapping 1784 fs/f2fs/super.c page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS); mapping 1796 fs/f2fs/super.c if (unlikely(page->mapping != mapping)) { mapping 1824 fs/f2fs/super.c struct address_space *mapping = inode->i_mapping; mapping 1825 fs/f2fs/super.c const struct address_space_operations *a_ops = mapping->a_ops; mapping 1838 fs/f2fs/super.c err = a_ops->write_begin(NULL, mapping, off, tocopy, 0, mapping 1854 fs/f2fs/super.c a_ops->write_end(NULL, mapping, off, tocopy, tocopy, mapping 2011 fs/f2fs/super.c struct address_space *mapping; mapping 2018 fs/f2fs/super.c mapping = dqopt->files[cnt]->i_mapping; mapping 2020 fs/f2fs/super.c ret = filemap_fdatawrite(mapping); mapping 2028 fs/f2fs/super.c ret = filemap_fdatawait(mapping); mapping 55 fs/f2fs/trace.c struct inode *inode = page->mapping->host; mapping 98 fs/f2fs/trace.c inode = fio->page->mapping->host; mapping 229 fs/fat/file.c struct address_space *mapping = inode->i_mapping; mapping 246 fs/fat/file.c err = filemap_fdatawrite_range(mapping, start, mapping 248 fs/fat/file.c err2 = sync_mapping_buffers(mapping); mapping 255 fs/fat/file.c err = filemap_fdatawait_range(mapping, start, mapping 201 fs/fat/inode.c static int fat_writepages(struct address_space *mapping, mapping 204 fs/fat/inode.c return mpage_writepages(mapping, wbc, fat_get_block); mapping 212 fs/fat/inode.c static int fat_readpages(struct file *file, struct address_space *mapping, mapping 215 fs/fat/inode.c return mpage_readpages(mapping, pages, nr_pages, fat_get_block); mapping 218 fs/fat/inode.c static void fat_write_failed(struct address_space *mapping, loff_t to) mapping 220 fs/fat/inode.c struct inode *inode = mapping->host; mapping 228 fs/fat/inode.c static int fat_write_begin(struct file *file, struct address_space *mapping, mapping 235 fs/fat/inode.c err = cont_write_begin(file, mapping, pos, len, flags, mapping 237 fs/fat/inode.c &MSDOS_I(mapping->host)->mmu_private); mapping 239 fs/fat/inode.c fat_write_failed(mapping, pos + len); mapping 243 fs/fat/inode.c static int fat_write_end(struct file *file, struct address_space *mapping, mapping 247 fs/fat/inode.c struct inode *inode = mapping->host; mapping 249 fs/fat/inode.c err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); mapping 251 fs/fat/inode.c fat_write_failed(mapping, pos + len); mapping 263 fs/fat/inode.c struct address_space *mapping = file->f_mapping; mapping 264 fs/fat/inode.c struct inode *inode = mapping->host; mapping 290 fs/fat/inode.c fat_write_failed(mapping, offset + count); mapping 320 fs/fat/inode.c static sector_t _fat_bmap(struct address_space *mapping, sector_t block) mapping 325 fs/fat/inode.c down_read(&MSDOS_I(mapping->host)->truncate_lock); mapping 326 fs/fat/inode.c blocknr = generic_block_bmap(mapping, block, fat_get_block_bmap); mapping 327 fs/fat/inode.c up_read(&MSDOS_I(mapping->host)->truncate_lock); mapping 1947 fs/fat/inode.c struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; mapping 1948 fs/fat/inode.c ret = filemap_flush(mapping); mapping 68 fs/freevxfs/vxfs_immed.c struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); mapping 68 fs/freevxfs/vxfs_subr.c vxfs_get_page(struct address_space *mapping, u_long n) mapping 72 fs/freevxfs/vxfs_subr.c pp = read_mapping_page(mapping, n, NULL); mapping 180 fs/freevxfs/vxfs_subr.c vxfs_bmap(struct address_space *mapping, sector_t block) mapping 182 fs/freevxfs/vxfs_subr.c return generic_block_bmap(mapping, block, vxfs_getblk); mapping 357 fs/fs-writeback.c struct address_space *mapping = inode->i_mapping; mapping 360 fs/fs-writeback.c XA_STATE(xas, &mapping->i_pages, 0); mapping 388 fs/fs-writeback.c xa_lock_irq(&mapping->i_pages); mapping 452 fs/fs-writeback.c xa_unlock_irq(&mapping->i_pages); mapping 1443 fs/fs-writeback.c struct address_space *mapping = inode->i_mapping; mapping 1452 fs/fs-writeback.c ret = do_writepages(mapping, wbc); mapping 1462 fs/fs-writeback.c int err = filemap_fdatawait(mapping); mapping 1502 fs/fs-writeback.c if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) mapping 2400 fs/fs-writeback.c struct address_space *mapping = inode->i_mapping; mapping 2415 fs/fs-writeback.c if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) mapping 2436 fs/fs-writeback.c filemap_fdatawait_keep_errors(mapping); mapping 304 fs/fscache/page.c struct address_space *mapping, mapping 324 fs/fscache/page.c op->mapping = mapping; mapping 458 fs/fscache/page.c op = fscache_alloc_retrieval(cookie, page->mapping, mapping 559 fs/fscache/page.c struct address_space *mapping, mapping 590 fs/fscache/page.c op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); mapping 702 fs/fscache/page.c op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); mapping 1187 fs/fscache/page.c op->mapping, page); mapping 1218 fs/fscache/page.c struct address_space *mapping = inode->i_mapping; mapping 1225 fs/fscache/page.c if (!mapping || mapping->nrpages == 0) { mapping 1233 fs/fscache/page.c if (!pagevec_lookup(&pvec, mapping, &next)) mapping 766 fs/fuse/dev.c page->mapping != NULL || mapping 776 fs/fuse/dev.c pr_warn(" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); mapping 1539 fs/fuse/dev.c struct address_space *mapping; mapping 1572 fs/fuse/dev.c mapping = inode->i_mapping; mapping 1588 fs/fuse/dev.c page = find_or_create_page(mapping, index, mapping 1589 fs/fuse/dev.c mapping_gfp_mask(mapping)); mapping 1639 fs/fuse/dev.c struct address_space *mapping = inode->i_mapping; mapping 1686 fs/fuse/dev.c page = find_get_page(mapping, index); mapping 1765 fs/fuse/dir.c int err = fuse_readlink_page(page->mapping->host, page); mapping 783 fs/fuse/file.c struct inode *inode = page->mapping->host; mapping 827 fs/fuse/file.c struct inode *inode = page->mapping->host; mapping 849 fs/fuse/file.c struct address_space *mapping = NULL; mapping 851 fs/fuse/file.c for (i = 0; mapping == NULL && i < ap->num_pages; i++) mapping 852 fs/fuse/file.c mapping = ap->pages[i]->mapping; mapping 854 fs/fuse/file.c if (mapping) { mapping 855 fs/fuse/file.c struct inode *inode = mapping->host; mapping 965 fs/fuse/file.c static int fuse_readpages(struct file *file, struct address_space *mapping, mapping 968 fs/fuse/file.c struct inode *inode = mapping->host; mapping 987 fs/fuse/file.c err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); mapping 1140 fs/fuse/file.c struct address_space *mapping, mapping 1144 fs/fuse/file.c struct fuse_conn *fc = get_fuse_conn(mapping->host); mapping 1167 fs/fuse/file.c page = grab_cache_page_write_begin(mapping, index, 0); mapping 1171 fs/fuse/file.c if (mapping_writably_mapped(mapping)) mapping 1214 fs/fuse/file.c struct address_space *mapping, mapping 1217 fs/fuse/file.c struct inode *inode = mapping->host; mapping 1239 fs/fuse/file.c count = fuse_fill_write_pages(ap, mapping, ii, pos, nr_pages); mapping 1271 fs/fuse/file.c struct address_space *mapping = file->f_mapping; mapping 1274 fs/fuse/file.c struct inode *inode = mapping->host; mapping 1280 fs/fuse/file.c err = fuse_update_attributes(mapping->host, file); mapping 1312 fs/fuse/file.c written_buffered = fuse_perform_write(iocb, mapping, from, pos); mapping 1331 fs/fuse/file.c written = fuse_perform_write(iocb, mapping, from, iocb->ki_pos); mapping 1816 fs/fuse/file.c struct address_space *mapping = page->mapping; mapping 1817 fs/fuse/file.c struct inode *inode = mapping->host; mapping 1872 fs/fuse/file.c mapping_set_error(page->mapping, error); mapping 1881 fs/fuse/file.c if (fuse_page_is_writeback(page->mapping->host, page->index)) { mapping 2125 fs/fuse/file.c static int fuse_writepages(struct address_space *mapping, mapping 2128 fs/fuse/file.c struct inode *inode = mapping->host; mapping 2148 fs/fuse/file.c err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); mapping 2167 fs/fuse/file.c static int fuse_write_begin(struct file *file, struct address_space *mapping, mapping 2179 fs/fuse/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 2183 fs/fuse/file.c fuse_wait_on_page_writeback(mapping->host, page->index); mapping 2191 fs/fuse/file.c fsize = i_size_read(mapping->host); mapping 2212 fs/fuse/file.c static int fuse_write_end(struct file *file, struct address_space *mapping, mapping 2216 fs/fuse/file.c struct inode *inode = page->mapping->host; mapping 2244 fs/fuse/file.c struct inode *inode = page->mapping->host; mapping 2283 fs/fuse/file.c if (page->mapping != inode->i_mapping) { mapping 2470 fs/fuse/file.c static sector_t fuse_bmap(struct address_space *mapping, sector_t block) mapping 2472 fs/fuse/file.c struct inode *inode = mapping->host; mapping 91 fs/gfs2/aops.c struct inode *inode = page->mapping->host; mapping 105 fs/gfs2/aops.c page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); mapping 124 fs/gfs2/aops.c struct inode * const inode = page->mapping->host; mapping 157 fs/gfs2/aops.c struct inode *inode = page->mapping->host; mapping 183 fs/gfs2/aops.c struct inode *inode = page->mapping->host; mapping 209 fs/gfs2/aops.c static int gfs2_writepages(struct address_space *mapping, mapping 212 fs/gfs2/aops.c struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); mapping 213 fs/gfs2/aops.c int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); mapping 238 fs/gfs2/aops.c static int gfs2_write_jdata_pagevec(struct address_space *mapping, mapping 244 fs/gfs2/aops.c struct inode *inode = mapping->host; mapping 261 fs/gfs2/aops.c if (unlikely(page->mapping != mapping)) { mapping 333 fs/gfs2/aops.c static int gfs2_write_cache_jdata(struct address_space *mapping, mapping 350 fs/gfs2/aops.c writeback_index = mapping->writeback_index; /* prev offset */ mapping 371 fs/gfs2/aops.c tag_pages_for_writeback(mapping, index, end); mapping 374 fs/gfs2/aops.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, mapping 379 fs/gfs2/aops.c ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index); mapping 401 fs/gfs2/aops.c mapping->writeback_index = done_index; mapping 414 fs/gfs2/aops.c static int gfs2_jdata_writepages(struct address_space *mapping, mapping 417 fs/gfs2/aops.c struct gfs2_inode *ip = GFS2_I(mapping->host); mapping 418 fs/gfs2/aops.c struct gfs2_sbd *sdp = GFS2_SB(mapping->host); mapping 421 fs/gfs2/aops.c ret = gfs2_write_cache_jdata(mapping, wbc); mapping 425 fs/gfs2/aops.c ret = gfs2_write_cache_jdata(mapping, wbc); mapping 485 fs/gfs2/aops.c struct gfs2_inode *ip = GFS2_I(page->mapping->host); mapping 486 fs/gfs2/aops.c struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); mapping 490 fs/gfs2/aops.c if (i_blocksize(page->mapping->host) == PAGE_SIZE && mapping 518 fs/gfs2/aops.c struct address_space *mapping = page->mapping; mapping 519 fs/gfs2/aops.c struct gfs2_inode *ip = GFS2_I(mapping->host); mapping 530 fs/gfs2/aops.c if (page->mapping == mapping && !PageUptodate(page)) mapping 554 fs/gfs2/aops.c struct address_space *mapping = ip->i_inode.i_mapping; mapping 566 fs/gfs2/aops.c page = read_cache_page(mapping, index, __gfs2_readpage, NULL); mapping 599 fs/gfs2/aops.c static int gfs2_readpages(struct file *file, struct address_space *mapping, mapping 602 fs/gfs2/aops.c struct inode *inode = mapping->host; mapping 613 fs/gfs2/aops.c ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); mapping 688 fs/gfs2/aops.c static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) mapping 690 fs/gfs2/aops.c struct gfs2_inode *ip = GFS2_I(mapping->host); mapping 700 fs/gfs2/aops.c dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops); mapping 732 fs/gfs2/aops.c struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); mapping 772 fs/gfs2/aops.c struct address_space *mapping = page->mapping; mapping 773 fs/gfs2/aops.c struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); mapping 424 fs/gfs2/file.c if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap)) mapping 480 fs/gfs2/file.c if (!PageUptodate(page) || page->mapping != inode->i_mapping) { mapping 523 fs/gfs2/file.c if (!PageUptodate(page) || page->mapping != inode->i_mapping) mapping 712 fs/gfs2/file.c struct address_space *mapping = file->f_mapping; mapping 713 fs/gfs2/file.c struct inode *inode = mapping->host; mapping 718 fs/gfs2/file.c if (mapping->nrpages) { mapping 719 fs/gfs2/file.c ret1 = filemap_fdatawrite_range(mapping, start, end); mapping 740 fs/gfs2/file.c if (mapping->nrpages) mapping 867 fs/gfs2/file.c struct address_space *mapping = file->f_mapping; mapping 890 fs/gfs2/file.c invalidate_mapping_pages(mapping, mapping 240 fs/gfs2/glock.c struct address_space *mapping = gfs2_glock2aspace(gl); mapping 247 fs/gfs2/glock.c GLOCK_BUG_ON(gl, mapping && mapping->nrpages); mapping 800 fs/gfs2/glock.c struct address_space *mapping; mapping 852 fs/gfs2/glock.c mapping = gfs2_glock2aspace(gl); mapping 853 fs/gfs2/glock.c if (mapping) { mapping 854 fs/gfs2/glock.c mapping->a_ops = &gfs2_meta_aops; mapping 855 fs/gfs2/glock.c mapping->host = s->s_bdev->bd_inode; mapping 856 fs/gfs2/glock.c mapping->flags = 0; mapping 857 fs/gfs2/glock.c mapping_set_gfp_mask(mapping, GFP_NOFS); mapping 858 fs/gfs2/glock.c mapping->private_data = NULL; mapping 859 fs/gfs2/glock.c mapping->writeback_index = 0; mapping 38 fs/gfs2/glops.c bh->b_page->mapping, bh->b_page->flags); mapping 171 fs/gfs2/glops.c struct address_space *mapping = &sdp->sd_aspace; mapping 187 fs/gfs2/glops.c filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); mapping 188 fs/gfs2/glops.c error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); mapping 189 fs/gfs2/glops.c mapping_set_error(mapping, error); mapping 212 fs/gfs2/glops.c struct address_space *mapping = &sdp->sd_aspace; mapping 220 fs/gfs2/glops.c truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); mapping 285 fs/gfs2/glops.c struct address_space *mapping = ip->i_inode.i_mapping; mapping 286 fs/gfs2/glops.c filemap_fdatawrite(mapping); mapping 287 fs/gfs2/glops.c error = filemap_fdatawait(mapping); mapping 288 fs/gfs2/glops.c mapping_set_error(mapping, error); mapping 322 fs/gfs2/glops.c struct address_space *mapping = gfs2_glock2aspace(gl); mapping 323 fs/gfs2/glops.c truncate_inode_pages(mapping, 0); mapping 98 fs/gfs2/log.c struct address_space *mapping; mapping 124 fs/gfs2/log.c mapping = bh->b_page->mapping; mapping 125 fs/gfs2/log.c if (!mapping) mapping 128 fs/gfs2/log.c generic_writepages(mapping, wbc); mapping 396 fs/gfs2/lops.c mapping_set_error(page->mapping, err); mapping 502 fs/gfs2/lops.c struct address_space *mapping = jd->jd_inode->i_mapping; mapping 519 fs/gfs2/lops.c since = filemap_sample_wb_err(mapping); mapping 525 fs/gfs2/lops.c page = find_or_create_page(mapping, mapping 587 fs/gfs2/lops.c ret = filemap_check_wb_err(mapping, since); mapping 590 fs/gfs2/lops.c truncate_inode_pages(mapping, 0); mapping 823 fs/gfs2/lops.c struct address_space *mapping = gfs2_glock2aspace(gl); mapping 827 fs/gfs2/lops.c if (mapping == NULL) mapping 828 fs/gfs2/lops.c mapping = &sdp->sd_aspace; mapping 830 fs/gfs2/lops.c filemap_fdatawrite(mapping); mapping 831 fs/gfs2/lops.c error = filemap_fdatawait(mapping); mapping 65 fs/gfs2/main.c struct address_space *mapping = (struct address_space *)(gl + 1); mapping 68 fs/gfs2/main.c address_space_init_once(mapping); mapping 112 fs/gfs2/meta_io.c struct address_space *mapping = gfs2_glock2aspace(gl); mapping 120 fs/gfs2/meta_io.c if (mapping == NULL) mapping 121 fs/gfs2/meta_io.c mapping = &sdp->sd_aspace; mapping 129 fs/gfs2/meta_io.c page = grab_cache_page(mapping, index); mapping 135 fs/gfs2/meta_io.c page = find_get_page_flags(mapping, index, mapping 331 fs/gfs2/meta_io.c struct address_space *mapping = bh->b_page->mapping; mapping 332 fs/gfs2/meta_io.c struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); mapping 40 fs/gfs2/meta_io.h static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) mapping 42 fs/gfs2/meta_io.h struct inode *inode = mapping->host; mapping 43 fs/gfs2/meta_io.h if (mapping->a_ops == &gfs2_meta_aops) mapping 44 fs/gfs2/meta_io.h return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd; mapping 45 fs/gfs2/meta_io.h else if (mapping->a_ops == &gfs2_rgrp_aops) mapping 46 fs/gfs2/meta_io.h return container_of(mapping, struct gfs2_sbd, sd_aspace); mapping 75 fs/gfs2/ops_fstype.c struct address_space *mapping; mapping 113 fs/gfs2/ops_fstype.c mapping = &sdp->sd_aspace; mapping 115 fs/gfs2/ops_fstype.c address_space_init_once(mapping); mapping 116 fs/gfs2/ops_fstype.c mapping->a_ops = &gfs2_rgrp_aops; mapping 117 fs/gfs2/ops_fstype.c mapping->host = sb->s_bdev->bd_inode; mapping 118 fs/gfs2/ops_fstype.c mapping->flags = 0; mapping 119 fs/gfs2/ops_fstype.c mapping_set_gfp_mask(mapping, GFP_NOFS); mapping 120 fs/gfs2/ops_fstype.c mapping->private_data = NULL; mapping 121 fs/gfs2/ops_fstype.c mapping->writeback_index = 0; mapping 692 fs/gfs2/quota.c struct address_space *mapping = inode->i_mapping; mapping 704 fs/gfs2/quota.c page = find_or_create_page(mapping, index, GFP_NOFS); mapping 239 fs/hfs/bnode.c struct address_space *mapping; mapping 276 fs/hfs/bnode.c mapping = tree->inode->i_mapping; mapping 281 fs/hfs/bnode.c page = read_mapping_page(mapping, block++, NULL); mapping 23 fs/hfs/btree.c struct address_space *mapping; mapping 77 fs/hfs/btree.c mapping = tree->inode->i_mapping; mapping 78 fs/hfs/btree.c page = read_mapping_page(mapping, 0, NULL); mapping 488 fs/hfs/extent.c struct address_space *mapping = inode->i_mapping; mapping 494 fs/hfs/extent.c res = pagecache_write_begin(NULL, mapping, size+1, 0, 0, mapping 497 fs/hfs/extent.c res = pagecache_write_end(NULL, mapping, size+1, 0, 0, mapping 41 fs/hfs/inode.c static void hfs_write_failed(struct address_space *mapping, loff_t to) mapping 43 fs/hfs/inode.c struct inode *inode = mapping->host; mapping 51 fs/hfs/inode.c static int hfs_write_begin(struct file *file, struct address_space *mapping, mapping 58 fs/hfs/inode.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 60 fs/hfs/inode.c &HFS_I(mapping->host)->phys_size); mapping 62 fs/hfs/inode.c hfs_write_failed(mapping, pos + len); mapping 67 fs/hfs/inode.c static sector_t hfs_bmap(struct address_space *mapping, sector_t block) mapping 69 fs/hfs/inode.c return generic_block_bmap(mapping, block, hfs_get_block); mapping 74 fs/hfs/inode.c struct inode *inode = page->mapping->host; mapping 132 fs/hfs/inode.c struct address_space *mapping = file->f_mapping; mapping 133 fs/hfs/inode.c struct inode *inode = mapping->host; mapping 148 fs/hfs/inode.c hfs_write_failed(mapping, end); mapping 154 fs/hfs/inode.c static int hfs_writepages(struct address_space *mapping, mapping 157 fs/hfs/inode.c return mpage_writepages(mapping, wbc, hfs_get_block); mapping 24 fs/hfsplus/bitmap.c struct address_space *mapping; mapping 36 fs/hfsplus/bitmap.c mapping = sbi->alloc_file->i_mapping; mapping 37 fs/hfsplus/bitmap.c page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); mapping 81 fs/hfsplus/bitmap.c page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, mapping 132 fs/hfsplus/bitmap.c page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, mapping 168 fs/hfsplus/bitmap.c struct address_space *mapping; mapping 183 fs/hfsplus/bitmap.c mapping = sbi->alloc_file->i_mapping; mapping 185 fs/hfsplus/bitmap.c page = read_mapping_page(mapping, pnr, NULL); mapping 219 fs/hfsplus/bitmap.c page = read_mapping_page(mapping, ++pnr, NULL); mapping 403 fs/hfsplus/bnode.c struct address_space *mapping; mapping 442 fs/hfsplus/bnode.c mapping = tree->inode->i_mapping; mapping 447 fs/hfsplus/bnode.c page = read_mapping_page(mapping, block, NULL); mapping 137 fs/hfsplus/btree.c struct address_space *mapping; mapping 160 fs/hfsplus/btree.c mapping = tree->inode->i_mapping; mapping 161 fs/hfsplus/btree.c page = read_mapping_page(mapping, 0, NULL); mapping 555 fs/hfsplus/extents.c struct address_space *mapping = inode->i_mapping; mapping 560 fs/hfsplus/extents.c res = pagecache_write_begin(NULL, mapping, size, 0, 0, mapping 564 fs/hfsplus/extents.c res = pagecache_write_end(NULL, mapping, size, mapping 35 fs/hfsplus/inode.c static void hfsplus_write_failed(struct address_space *mapping, loff_t to) mapping 37 fs/hfsplus/inode.c struct inode *inode = mapping->host; mapping 45 fs/hfsplus/inode.c static int hfsplus_write_begin(struct file *file, struct address_space *mapping, mapping 52 fs/hfsplus/inode.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 54 fs/hfsplus/inode.c &HFSPLUS_I(mapping->host)->phys_size); mapping 56 fs/hfsplus/inode.c hfsplus_write_failed(mapping, pos + len); mapping 61 fs/hfsplus/inode.c static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) mapping 63 fs/hfsplus/inode.c return generic_block_bmap(mapping, block, hfsplus_get_block); mapping 68 fs/hfsplus/inode.c struct inode *inode = page->mapping->host; mapping 129 fs/hfsplus/inode.c struct address_space *mapping = file->f_mapping; mapping 130 fs/hfsplus/inode.c struct inode *inode = mapping->host; mapping 145 fs/hfsplus/inode.c hfsplus_write_failed(mapping, end); mapping 151 fs/hfsplus/inode.c static int hfsplus_writepages(struct address_space *mapping, mapping 154 fs/hfsplus/inode.c return mpage_writepages(mapping, wbc, hfsplus_get_block); mapping 131 fs/hfsplus/xattr.c struct address_space *mapping; mapping 214 fs/hfsplus/xattr.c mapping = attr_file->i_mapping; mapping 221 fs/hfsplus/xattr.c page = read_mapping_page(mapping, index, NULL); mapping 403 fs/hostfs/hostfs_kern.c struct address_space *mapping = page->mapping; mapping 404 fs/hostfs/hostfs_kern.c struct inode *inode = mapping->host; mapping 464 fs/hostfs/hostfs_kern.c static int hostfs_write_begin(struct file *file, struct address_space *mapping, mapping 470 fs/hostfs/hostfs_kern.c *pagep = grab_cache_page_write_begin(mapping, index, flags); mapping 476 fs/hostfs/hostfs_kern.c static int hostfs_write_end(struct file *file, struct address_space *mapping, mapping 480 fs/hostfs/hostfs_kern.c struct inode *inode = mapping->host; mapping 128 fs/hpfs/file.c static int hpfs_readpages(struct file *file, struct address_space *mapping, mapping 131 fs/hpfs/file.c return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); mapping 134 fs/hpfs/file.c static int hpfs_writepages(struct address_space *mapping, mapping 137 fs/hpfs/file.c return mpage_writepages(mapping, wbc, hpfs_get_block); mapping 140 fs/hpfs/file.c static void hpfs_write_failed(struct address_space *mapping, loff_t to) mapping 142 fs/hpfs/file.c struct inode *inode = mapping->host; mapping 154 fs/hpfs/file.c static int hpfs_write_begin(struct file *file, struct address_space *mapping, mapping 161 fs/hpfs/file.c ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, mapping 163 fs/hpfs/file.c &hpfs_i(mapping->host)->mmu_private); mapping 165 fs/hpfs/file.c hpfs_write_failed(mapping, pos + len); mapping 170 fs/hpfs/file.c static int hpfs_write_end(struct file *file, struct address_space *mapping, mapping 174 fs/hpfs/file.c struct inode *inode = mapping->host; mapping 176 fs/hpfs/file.c err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); mapping 178 fs/hpfs/file.c hpfs_write_failed(mapping, pos + len); mapping 188 fs/hpfs/file.c static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) mapping 190 fs/hpfs/file.c return generic_block_bmap(mapping, block, hpfs_get_block); mapping 481 fs/hpfs/namei.c struct inode *i = page->mapping->host; mapping 274 fs/hugetlbfs/inode.c struct address_space *mapping = file->f_mapping; mapping 275 fs/hugetlbfs/inode.c struct inode *inode = mapping->host; mapping 302 fs/hugetlbfs/inode.c page = find_lock_page(mapping, index); mapping 333 fs/hugetlbfs/inode.c struct address_space *mapping, mapping 340 fs/hugetlbfs/inode.c static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, mapping 418 fs/hugetlbfs/inode.c struct address_space *mapping = &inode->i_data; mapping 435 fs/hugetlbfs/inode.c if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) mapping 443 fs/hugetlbfs/inode.c hash = hugetlb_fault_mutex_hash(h, mapping, index, 0); mapping 458 fs/hugetlbfs/inode.c i_mmap_lock_write(mapping); mapping 459 fs/hugetlbfs/inode.c hugetlb_vmdelete_list(&mapping->i_mmap, mapping 462 fs/hugetlbfs/inode.c i_mmap_unlock_write(mapping); mapping 517 fs/hugetlbfs/inode.c struct address_space *mapping = inode->i_mapping; mapping 524 fs/hugetlbfs/inode.c i_mmap_lock_write(mapping); mapping 525 fs/hugetlbfs/inode.c if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) mapping 526 fs/hugetlbfs/inode.c hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); mapping 527 fs/hugetlbfs/inode.c i_mmap_unlock_write(mapping); mapping 546 fs/hugetlbfs/inode.c struct address_space *mapping = inode->i_mapping; mapping 557 fs/hugetlbfs/inode.c i_mmap_lock_write(mapping); mapping 558 fs/hugetlbfs/inode.c if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) mapping 559 fs/hugetlbfs/inode.c hugetlb_vmdelete_list(&mapping->i_mmap, mapping 562 fs/hugetlbfs/inode.c i_mmap_unlock_write(mapping); mapping 575 fs/hugetlbfs/inode.c struct address_space *mapping = inode->i_mapping; mapping 647 fs/hugetlbfs/inode.c hash = hugetlb_fault_mutex_hash(h, mapping, index, addr); mapping 651 fs/hugetlbfs/inode.c page = find_get_page(mapping, index); mapping 669 fs/hugetlbfs/inode.c error = huge_add_to_page_cache(page, mapping, index); mapping 879 fs/hugetlbfs/inode.c static int hugetlbfs_migrate_page(struct address_space *mapping, mapping 885 fs/hugetlbfs/inode.c rc = migrate_huge_page_move_mapping(mapping, newpage, page); mapping 908 fs/hugetlbfs/inode.c static int hugetlbfs_error_remove_page(struct address_space *mapping, mapping 911 fs/hugetlbfs/inode.c struct inode *inode = mapping->host; mapping 135 fs/inode.c struct address_space *const mapping = &inode->i_data; mapping 180 fs/inode.c mapping->a_ops = &empty_aops; mapping 181 fs/inode.c mapping->host = inode; mapping 182 fs/inode.c mapping->flags = 0; mapping 183 fs/inode.c mapping->wb_err = 0; mapping 184 fs/inode.c atomic_set(&mapping->i_mmap_writable, 0); mapping 186 fs/inode.c atomic_set(&mapping->nr_thps, 0); mapping 188 fs/inode.c mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); mapping 189 fs/inode.c mapping->private_data = NULL; mapping 190 fs/inode.c mapping->writeback_index = 0; mapping 192 fs/inode.c inode->i_mapping = mapping; mapping 367 fs/inode.c static void __address_space_init_once(struct address_space *mapping) mapping 369 fs/inode.c xa_init_flags(&mapping->i_pages, XA_FLAGS_LOCK_IRQ | XA_FLAGS_ACCOUNT); mapping 370 fs/inode.c init_rwsem(&mapping->i_mmap_rwsem); mapping 371 fs/inode.c INIT_LIST_HEAD(&mapping->private_list); mapping 372 fs/inode.c spin_lock_init(&mapping->private_lock); mapping 373 fs/inode.c mapping->i_mmap = RB_ROOT_CACHED; mapping 376 fs/inode.c void address_space_init_once(struct address_space *mapping) mapping 378 fs/inode.c memset(mapping, 0, sizeof(*mapping)); mapping 379 fs/inode.c __address_space_init_once(mapping); mapping 57 fs/ioctl.c struct address_space *mapping = filp->f_mapping; mapping 61 fs/ioctl.c if (!mapping->a_ops->bmap) mapping 68 fs/ioctl.c res = mapping->a_ops->bmap(mapping, block); mapping 124 fs/iomap/buffered-io.c struct inode *inode = page->mapping->host; mapping 260 fs/iomap/buffered-io.c gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); mapping 292 fs/iomap/buffered-io.c struct inode *inode = page->mapping->host; mapping 381 fs/iomap/buffered-io.c iomap_readpages(struct address_space *mapping, struct list_head *pages, mapping 393 fs/iomap/buffered-io.c ret = iomap_apply(mapping->host, pos, length, 0, ops, mapping 433 fs/iomap/buffered-io.c struct inode *inode = page->mapping->host; mapping 487 fs/iomap/buffered-io.c iomap_migrate_page(struct address_space *mapping, struct page *newpage, mapping 492 fs/iomap/buffered-io.c ret = migrate_page_move_mapping(mapping, newpage, page, 0); mapping 635 fs/iomap/buffered-io.c struct address_space *mapping = page_mapping(page); mapping 638 fs/iomap/buffered-io.c if (unlikely(!mapping)) mapping 648 fs/iomap/buffered-io.c __set_page_dirty(page, mapping, 0); mapping 652 fs/iomap/buffered-io.c __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); mapping 841 fs/iomap/buffered-io.c struct address_space *mapping = inode->i_mapping; mapping 844 fs/iomap/buffered-io.c page = read_mapping_page(mapping, offset >> PAGE_SHIFT, NULL); mapping 1043 fs/iomap/buffered-io.c if ((page->mapping != inode->i_mapping) || mapping 399 fs/iomap/direct-io.c struct address_space *mapping = iocb->ki_filp->f_mapping; mapping 456 fs/iomap/direct-io.c if (filemap_range_has_page(mapping, start, end)) { mapping 463 fs/iomap/direct-io.c ret = filemap_write_and_wait_range(mapping, start, end); mapping 473 fs/iomap/direct-io.c ret = invalidate_inode_pages2_range(mapping, mapping 130 fs/iomap/fiemap.c iomap_bmap(struct address_space *mapping, sector_t bno, mapping 133 fs/iomap/fiemap.c struct inode *inode = mapping->host; mapping 137 fs/iomap/fiemap.c if (filemap_write_and_wait(mapping)) mapping 46 fs/iomap/seek.c if (unlikely(page->mapping != inode->i_mapping)) mapping 142 fs/iomap/swapfile.c struct address_space *mapping = swap_file->f_mapping; mapping 143 fs/iomap/swapfile.c struct inode *inode = mapping->host; mapping 302 fs/isofs/compress.c struct address_space *mapping = inode->i_mapping; mapping 344 fs/isofs/compress.c pages[i] = grab_cache_page_nowait(mapping, index); mapping 1188 fs/isofs/inode.c static int isofs_readpages(struct file *file, struct address_space *mapping, mapping 1191 fs/isofs/inode.c return mpage_readpages(mapping, pages, nr_pages, isofs_get_block); mapping 1194 fs/isofs/inode.c static sector_t _isofs_bmap(struct address_space *mapping, sector_t block) mapping 1196 fs/isofs/inode.c return generic_block_bmap(mapping,block,isofs_get_block); mapping 695 fs/isofs/rock.c struct inode *inode = page->mapping->host; mapping 74 fs/jbd2/commit.c if (page->mapping) mapping 190 fs/jbd2/commit.c static int journal_submit_inode_data_buffers(struct address_space *mapping, mapping 196 fs/jbd2/commit.c .nr_to_write = mapping->nrpages * 2, mapping 201 fs/jbd2/commit.c ret = generic_writepages(mapping, &wbc); mapping 218 fs/jbd2/commit.c struct address_space *mapping; mapping 227 fs/jbd2/commit.c mapping = jinode->i_vfs_inode->i_mapping; mapping 237 fs/jbd2/commit.c err = journal_submit_inode_data_buffers(mapping, dirty_start, mapping 986 fs/jbd2/commit.c struct address_space *mapping; mapping 1003 fs/jbd2/commit.c mapping = READ_ONCE(bh->b_page->mapping); mapping 1004 fs/jbd2/commit.c if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) { mapping 2478 fs/jbd2/journal.c (bh->b_page && bh->b_page->mapping)); mapping 24 fs/jffs2/file.c static int jffs2_write_end(struct file *filp, struct address_space *mapping, mapping 27 fs/jffs2/file.c static int jffs2_write_begin(struct file *filp, struct address_space *mapping, mapping 122 fs/jffs2/file.c struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); mapping 126 fs/jffs2/file.c ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); mapping 131 fs/jffs2/file.c static int jffs2_write_begin(struct file *filp, struct address_space *mapping, mapping 136 fs/jffs2/file.c struct inode *inode = mapping->host; mapping 142 fs/jffs2/file.c pg = grab_cache_page_write_begin(mapping, index, flags); mapping 235 fs/jffs2/file.c static int jffs2_write_end(struct file *filp, struct address_space *mapping, mapping 242 fs/jffs2/file.c struct inode *inode = mapping->host; mapping 288 fs/jfs/inode.c static int jfs_writepages(struct address_space *mapping, mapping 291 fs/jfs/inode.c return mpage_writepages(mapping, wbc, jfs_get_block); mapping 299 fs/jfs/inode.c static int jfs_readpages(struct file *file, struct address_space *mapping, mapping 302 fs/jfs/inode.c return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); mapping 305 fs/jfs/inode.c static void jfs_write_failed(struct address_space *mapping, loff_t to) mapping 307 fs/jfs/inode.c struct inode *inode = mapping->host; mapping 315 fs/jfs/inode.c static int jfs_write_begin(struct file *file, struct address_space *mapping, mapping 321 fs/jfs/inode.c ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, mapping 324 fs/jfs/inode.c jfs_write_failed(mapping, pos + len); mapping 329 fs/jfs/inode.c static sector_t jfs_bmap(struct address_space *mapping, sector_t block) mapping 331 fs/jfs/inode.c return generic_block_bmap(mapping, block, jfs_get_block); mapping 337 fs/jfs/inode.c struct address_space *mapping = file->f_mapping; mapping 353 fs/jfs/inode.c jfs_write_failed(mapping, end); mapping 106 fs/jfs/jfs_metapage.c l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; mapping 118 fs/jfs/jfs_metapage.c int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; mapping 339 fs/jfs/jfs_metapage.c struct inode *inode = page->mapping->host; mapping 473 fs/jfs/jfs_metapage.c struct inode *inode = page->mapping->host; mapping 581 fs/jfs/jfs_metapage.c struct address_space *mapping; mapping 601 fs/jfs/jfs_metapage.c mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping; mapping 610 fs/jfs/jfs_metapage.c mapping = inode->i_mapping; mapping 614 fs/jfs/jfs_metapage.c page = grab_cache_page(mapping, page_index); mapping 621 fs/jfs/jfs_metapage.c page = read_mapping_page(mapping, page_index, NULL); mapping 770 fs/jfs/jfs_metapage.c struct address_space *mapping = mapping 782 fs/jfs/jfs_metapage.c page = find_lock_page(mapping, lblock >> l2BlocksPerPage); mapping 450 fs/libfs.c int simple_write_begin(struct file *file, struct address_space *mapping, mapping 459 fs/libfs.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 496 fs/libfs.c int simple_write_end(struct file *file, struct address_space *mapping, mapping 500 fs/libfs.c struct inode *inode = page->mapping->host; mapping 51 fs/minix/dir.c struct address_space *mapping = page->mapping; mapping 52 fs/minix/dir.c struct inode *dir = mapping->host; mapping 54 fs/minix/dir.c block_write_end(NULL, mapping, pos, len, len, page, NULL); mapping 69 fs/minix/dir.c struct address_space *mapping = dir->i_mapping; mapping 70 fs/minix/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 291 fs/minix/dir.c struct inode *inode = page->mapping->host; mapping 415 fs/minix/dir.c struct inode *dir = page->mapping->host; mapping 458 fs/minix/dir.c struct address_space *mapping = page->mapping; mapping 459 fs/minix/dir.c struct inode *inode = mapping->host; mapping 396 fs/minix/inode.c static void minix_write_failed(struct address_space *mapping, loff_t to) mapping 398 fs/minix/inode.c struct inode *inode = mapping->host; mapping 406 fs/minix/inode.c static int minix_write_begin(struct file *file, struct address_space *mapping, mapping 412 fs/minix/inode.c ret = block_write_begin(mapping, pos, len, flags, pagep, mapping 415 fs/minix/inode.c minix_write_failed(mapping, pos + len); mapping 420 fs/minix/inode.c static sector_t minix_bmap(struct address_space *mapping, sector_t block) mapping 422 fs/minix/inode.c return generic_block_bmap(mapping,block,minix_get_block); mapping 106 fs/mpage.c struct inode *inode = page->mapping->host; mapping 159 fs/mpage.c struct inode *inode = page->mapping->host; mapping 180 fs/mpage.c gfp = readahead_gfp_mask(page->mapping); mapping 183 fs/mpage.c gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); mapping 385 fs/mpage.c mpage_readpages(struct address_space *mapping, struct list_head *pages, mapping 399 fs/mpage.c if (!add_to_page_cache_lru(page, mapping, mapping 401 fs/mpage.c readahead_gfp_mask(mapping))) { mapping 501 fs/mpage.c struct address_space *mapping = page->mapping; mapping 502 fs/mpage.c struct inode *inode = page->mapping->host; mapping 678 fs/mpage.c ret = mapping->a_ops->writepage(page, wbc); mapping 686 fs/mpage.c mapping_set_error(mapping, ret); mapping 712 fs/mpage.c mpage_writepages(struct address_space *mapping, mapping 721 fs/mpage.c ret = generic_writepages(mapping, wbc); mapping 730 fs/mpage.c ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); mapping 4772 fs/namei.c struct address_space *mapping = inode->i_mapping; mapping 4775 fs/namei.c page = find_get_page(mapping, 0); mapping 4783 fs/namei.c page = read_mapping_page(mapping, 0, NULL); mapping 4788 fs/namei.c BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); mapping 4818 fs/namei.c struct address_space *mapping = inode->i_mapping; mapping 4827 fs/namei.c err = pagecache_write_begin(NULL, mapping, 0, len-1, mapping 4834 fs/namei.c err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, mapping 892 fs/nfs/blocklayout/blocklayout.c struct address_space *mapping = inode->i_mapping; mapping 899 fs/nfs/blocklayout/blocklayout.c end = page_cache_next_miss(mapping, idx + 1, ULONG_MAX); mapping 732 fs/nfs/dir.c if (desc->page->mapping != NULL) { mapping 545 fs/nfs/direct.c struct address_space *mapping = file->f_mapping; mapping 546 fs/nfs/direct.c struct inode *inode = mapping->host; mapping 551 fs/nfs/direct.c nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); mapping 961 fs/nfs/direct.c struct address_space *mapping = file->f_mapping; mapping 962 fs/nfs/direct.c struct inode *inode = mapping->host; mapping 974 fs/nfs/direct.c nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); mapping 1004 fs/nfs/direct.c if (mapping->nrpages) { mapping 1005 fs/nfs/direct.c invalidate_inode_pages2_range(mapping, mapping 321 fs/nfs/file.c static int nfs_write_begin(struct file *file, struct address_space *mapping, mapping 331 fs/nfs/file.c file, mapping->host->i_ino, len, (long long) pos); mapping 334 fs/nfs/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 354 fs/nfs/file.c static int nfs_write_end(struct file *file, struct address_space *mapping, mapping 363 fs/nfs/file.c file, mapping->host->i_ino, len, (long long) pos); mapping 392 fs/nfs/file.c NFS_I(mapping->host)->write_io += copied; mapping 394 fs/nfs/file.c if (nfs_ctx_key_to_expire(ctx, mapping->host)) { mapping 395 fs/nfs/file.c status = nfs_wb_all(mapping->host); mapping 421 fs/nfs/file.c nfs_fscache_invalidate_page(page, page->mapping->host); mapping 444 fs/nfs/file.c struct address_space *mapping = page_file_mapping(page); mapping 446 fs/nfs/file.c if (!mapping || PageSwapCache(page)) mapping 454 fs/nfs/file.c nfsi = NFS_I(mapping->host); mapping 539 fs/nfs/file.c struct address_space *mapping; mapping 554 fs/nfs/file.c mapping = page_file_mapping(page); mapping 555 fs/nfs/file.c if (mapping != inode->i_mapping) mapping 341 fs/nfs/fscache.c struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); mapping 345 fs/nfs/fscache.c cookie, page, NFS_I(page->mapping->host)); mapping 350 fs/nfs/fscache.c nfs_inc_fscache_stats(page->mapping->host, mapping 374 fs/nfs/fscache.c nfs_inc_fscache_stats(page->mapping->host, mapping 396 fs/nfs/fscache.c error = nfs_readpage_async(context, page->mapping->host, page); mapping 446 fs/nfs/fscache.c struct address_space *mapping, mapping 457 fs/nfs/fscache.c mapping, pages, nr_pages, mapping 460 fs/nfs/fscache.c mapping_gfp_mask(mapping)); mapping 144 fs/nfs/fscache.h struct address_space *mapping, mapping 149 fs/nfs/fscache.h return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, mapping 223 fs/nfs/fscache.h struct address_space *mapping, mapping 150 fs/nfs/inode.c int nfs_sync_mapping(struct address_space *mapping) mapping 154 fs/nfs/inode.c if (mapping->nrpages != 0) { mapping 155 fs/nfs/inode.c unmap_mapping_range(mapping, 0, 0, 0); mapping 156 fs/nfs/inode.c ret = nfs_wb_all(mapping->host); mapping 253 fs/nfs/inode.c void nfs_zap_mapping(struct inode *inode, struct address_space *mapping) mapping 255 fs/nfs/inode.c if (mapping->nrpages != 0) { mapping 1214 fs/nfs/inode.c static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) mapping 1219 fs/nfs/inode.c if (mapping->nrpages != 0) { mapping 1221 fs/nfs/inode.c ret = nfs_sync_mapping(mapping); mapping 1225 fs/nfs/inode.c ret = invalidate_inode_pages2(mapping); mapping 1276 fs/nfs/inode.c struct address_space *mapping) mapping 1326 fs/nfs/inode.c ret = nfs_invalidate_mapping(inode, mapping); mapping 514 fs/nfs/internal.h int nfs_filemap_write_and_wait_range(struct address_space *mapping, mapping 106 fs/nfs/read.c struct address_space *mapping = page_file_mapping(page); mapping 111 fs/nfs/read.c generic_error_remove_page(mapping, page); mapping 404 fs/nfs/read.c int nfs_readpages(struct file *filp, struct address_space *mapping, mapping 412 fs/nfs/read.c struct inode *inode = mapping->host; mapping 435 fs/nfs/read.c ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping, mapping 443 fs/nfs/read.c ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); mapping 170 fs/nfs/write.c struct address_space *mapping = page_file_mapping(page); mapping 175 fs/nfs/write.c spin_lock(&mapping->private_lock); mapping 181 fs/nfs/write.c spin_unlock(&mapping->private_lock); mapping 244 fs/nfs/write.c static void nfs_set_pageerror(struct address_space *mapping) mapping 246 fs/nfs/write.c struct inode *inode = mapping->host; mapping 248 fs/nfs/write.c nfs_zap_mapping(mapping->host, mapping); mapping 721 fs/nfs/write.c int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) mapping 723 fs/nfs/write.c struct inode *inode = mapping->host; mapping 737 fs/nfs/write.c err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); mapping 757 fs/nfs/write.c struct address_space *mapping = page_file_mapping(req->wb_page); mapping 769 fs/nfs/write.c spin_lock(&mapping->private_lock); mapping 778 fs/nfs/write.c spin_unlock(&mapping->private_lock); mapping 793 fs/nfs/write.c struct address_space *mapping = page_file_mapping(req->wb_page); mapping 794 fs/nfs/write.c struct inode *inode = mapping->host; mapping 801 fs/nfs/write.c spin_lock(&mapping->private_lock); mapping 807 fs/nfs/write.c spin_unlock(&mapping->private_lock); mapping 1367 fs/nfs/write.c struct address_space *mapping = page_file_mapping(page); mapping 1368 fs/nfs/write.c struct inode *inode = mapping->host; mapping 1386 fs/nfs/write.c nfs_set_pageerror(mapping); mapping 2007 fs/nfs/write.c int nfs_filemap_write_and_wait_range(struct address_space *mapping, mapping 2012 fs/nfs/write.c ret = filemap_write_and_wait_range(mapping, lstart, lend); mapping 2014 fs/nfs/write.c ret = pnfs_sync_inode(mapping->host, true); mapping 2106 fs/nfs/write.c int nfs_migrate_page(struct address_space *mapping, struct page *newpage, mapping 2123 fs/nfs/write.c return migrate_page(mapping, newpage, page, mode); mapping 222 fs/nfsd/filecache.c struct address_space *mapping; mapping 226 fs/nfsd/filecache.c mapping = file->f_mapping; mapping 227 fs/nfsd/filecache.c return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) || mapping 228 fs/nfsd/filecache.c mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK); mapping 130 fs/nilfs2/btnode.c struct address_space *mapping; mapping 141 fs/nilfs2/btnode.c mapping = page->mapping; mapping 145 fs/nilfs2/btnode.c if (!still_dirty && mapping) mapping 146 fs/nilfs2/btnode.c invalidate_inode_pages2_range(mapping, index, index); mapping 400 fs/nilfs2/btree.c inode = bh->b_page->mapping->host; mapping 96 fs/nilfs2/dir.c struct address_space *mapping, mapping 99 fs/nilfs2/dir.c struct inode *dir = mapping->host; mapping 106 fs/nilfs2/dir.c copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); mapping 118 fs/nilfs2/dir.c struct inode *dir = page->mapping->host; mapping 191 fs/nilfs2/dir.c struct address_space *mapping = dir->i_mapping; mapping 192 fs/nilfs2/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 422 fs/nilfs2/dir.c struct address_space *mapping = page->mapping; mapping 430 fs/nilfs2/dir.c nilfs_commit_chunk(page, mapping, from, to); mapping 521 fs/nilfs2/dir.c nilfs_commit_chunk(page, page->mapping, from, to); mapping 540 fs/nilfs2/dir.c struct address_space *mapping = page->mapping; mapping 541 fs/nilfs2/dir.c struct inode *inode = mapping->host; mapping 569 fs/nilfs2/dir.c nilfs_commit_chunk(page, mapping, from, to); mapping 581 fs/nilfs2/dir.c struct address_space *mapping = inode->i_mapping; mapping 582 fs/nilfs2/dir.c struct page *page = grab_cache_page(mapping, 0); mapping 612 fs/nilfs2/dir.c nilfs_commit_chunk(page, mapping, 0, chunk_size); mapping 58 fs/nilfs2/file.c if (page->mapping != inode->i_mapping || mapping 143 fs/nilfs2/gcinode.c struct inode *inode = bh->b_page->mapping->host; mapping 156 fs/nilfs2/inode.c static int nilfs_readpages(struct file *file, struct address_space *mapping, mapping 159 fs/nilfs2/inode.c return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); mapping 162 fs/nilfs2/inode.c static int nilfs_writepages(struct address_space *mapping, mapping 165 fs/nilfs2/inode.c struct inode *inode = mapping->host; mapping 169 fs/nilfs2/inode.c nilfs_clear_dirty_pages(mapping, false); mapping 182 fs/nilfs2/inode.c struct inode *inode = page->mapping->host; mapping 212 fs/nilfs2/inode.c struct inode *inode = page->mapping->host; mapping 246 fs/nilfs2/inode.c void nilfs_write_failed(struct address_space *mapping, loff_t to) mapping 248 fs/nilfs2/inode.c struct inode *inode = mapping->host; mapping 256 fs/nilfs2/inode.c static int nilfs_write_begin(struct file *file, struct address_space *mapping, mapping 261 fs/nilfs2/inode.c struct inode *inode = mapping->host; mapping 267 fs/nilfs2/inode.c err = block_write_begin(mapping, pos, len, flags, pagep, mapping 270 fs/nilfs2/inode.c nilfs_write_failed(mapping, pos + len); mapping 276 fs/nilfs2/inode.c static int nilfs_write_end(struct file *file, struct address_space *mapping, mapping 280 fs/nilfs2/inode.c struct inode *inode = mapping->host; mapping 287 fs/nilfs2/inode.c copied = generic_write_end(file, mapping, pos, len, copied, page, mapping 403 fs/nilfs2/mdt.c struct inode *inode = page->mapping->host; mapping 271 fs/nilfs2/nilfs.h extern void nilfs_write_failed(struct address_space *mapping, loff_t to); mapping 48 fs/nilfs2/page.c struct address_space *mapping, mapping 57 fs/nilfs2/page.c page = grab_cache_page(mapping, index); mapping 164 fs/nilfs2/page.c m = page->mapping; mapping 335 fs/nilfs2/page.c page->mapping = NULL; mapping 338 fs/nilfs2/page.c page->mapping = dmap; mapping 359 fs/nilfs2/page.c void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) mapping 367 fs/nilfs2/page.c while (pagevec_lookup_tag(&pvec, mapping, &index, mapping 388 fs/nilfs2/page.c struct inode *inode = page->mapping->host; mapping 441 fs/nilfs2/page.c void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) mapping 443 fs/nilfs2/page.c mapping->host = inode; mapping 444 fs/nilfs2/page.c mapping->flags = 0; mapping 445 fs/nilfs2/page.c mapping_set_gfp_mask(mapping, GFP_NOFS); mapping 446 fs/nilfs2/page.c mapping->private_data = NULL; mapping 447 fs/nilfs2/page.c mapping->a_ops = &empty_aops; mapping 463 fs/nilfs2/page.c struct address_space *mapping = page->mapping; mapping 465 fs/nilfs2/page.c if (mapping) { mapping 466 fs/nilfs2/page.c xa_lock_irq(&mapping->i_pages); mapping 468 fs/nilfs2/page.c __xa_clear_mark(&mapping->i_pages, page_index(page), mapping 470 fs/nilfs2/page.c xa_unlock_irq(&mapping->i_pages); mapping 473 fs/nilfs2/page.c xa_unlock_irq(&mapping->i_pages); mapping 46 fs/nilfs2/page.h void nilfs_mapping_init(struct address_space *mapping, struct inode *inode); mapping 687 fs/nilfs2/segment.c struct address_space *mapping = inode->i_mapping; mapping 705 fs/nilfs2/segment.c !pagevec_lookup_range_tag(&pvec, mapping, &index, last, mapping 741 fs/nilfs2/segment.c struct address_space *mapping = &ii->i_btnode_cache; mapping 749 fs/nilfs2/segment.c while (pagevec_lookup_tag(&pvec, mapping, &index, mapping 1575 fs/nilfs2/segment.c inode = bh->b_page->mapping->host; mapping 55 fs/ntfs/aops.c vi = page->mapping->host; mapping 184 fs/ntfs/aops.c vi = page->mapping->host; mapping 395 fs/ntfs/aops.c vi = page->mapping->host; mapping 551 fs/ntfs/aops.c vi = page->mapping->host; mapping 906 fs/ntfs/aops.c struct inode *vi = page->mapping->host; mapping 1342 fs/ntfs/aops.c struct inode *vi = page->mapping->host; mapping 1549 fs/ntfs/aops.c static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) mapping 1555 fs/ntfs/aops.c ntfs_inode *ni = NTFS_I(mapping->host); mapping 1716 fs/ntfs/aops.c struct address_space *mapping = page->mapping; mapping 1717 fs/ntfs/aops.c ntfs_inode *ni = NTFS_I(mapping->host); mapping 1724 fs/ntfs/aops.c spin_lock(&mapping->private_lock); mapping 1726 fs/ntfs/aops.c spin_unlock(&mapping->private_lock); mapping 1728 fs/ntfs/aops.c spin_lock(&mapping->private_lock); mapping 1752 fs/ntfs/aops.c spin_unlock(&mapping->private_lock); mapping 72 fs/ntfs/aops.h static inline struct page *ntfs_map_page(struct address_space *mapping, mapping 75 fs/ntfs/aops.h struct page *page = read_mapping_page(mapping, index, NULL); mapping 2482 fs/ntfs/attrib.c struct address_space *mapping; mapping 2500 fs/ntfs/attrib.c mapping = VFS_I(ni)->i_mapping; mapping 2515 fs/ntfs/attrib.c page = read_mapping_page(mapping, idx, NULL); mapping 2534 fs/ntfs/attrib.c balance_dirty_pages_ratelimited(mapping); mapping 2543 fs/ntfs/attrib.c page = grab_cache_page(mapping, idx); mapping 2575 fs/ntfs/attrib.c balance_dirty_pages_ratelimited(mapping); mapping 2580 fs/ntfs/attrib.c page = read_mapping_page(mapping, idx, NULL); mapping 2592 fs/ntfs/attrib.c balance_dirty_pages_ratelimited(mapping); mapping 38 fs/ntfs/bitmap.c struct address_space *mapping; mapping 60 fs/ntfs/bitmap.c mapping = vi->i_mapping; mapping 61 fs/ntfs/bitmap.c page = ntfs_map_page(mapping, index); mapping 113 fs/ntfs/bitmap.c page = ntfs_map_page(mapping, ++index); mapping 466 fs/ntfs/compress.c struct address_space *mapping = page->mapping; mapping 467 fs/ntfs/compress.c ntfs_inode *ni = NTFS_I(mapping->host); mapping 563 fs/ntfs/compress.c pages[i] = grab_cache_page_nowait(mapping, offset); mapping 106 fs/ntfs/file.c struct address_space *mapping; mapping 208 fs/ntfs/file.c mapping = vi->i_mapping; mapping 216 fs/ntfs/file.c page = read_mapping_page(mapping, index, NULL); mapping 265 fs/ntfs/file.c balance_dirty_pages_ratelimited(mapping); mapping 494 fs/ntfs/file.c static inline int __ntfs_grab_cache_pages(struct address_space *mapping, mapping 503 fs/ntfs/file.c pages[nr] = find_get_page_flags(mapping, index, FGP_LOCK | mapping 507 fs/ntfs/file.c *cached_page = page_cache_alloc(mapping); mapping 513 fs/ntfs/file.c err = add_to_page_cache_lru(*cached_page, mapping, mapping 515 fs/ntfs/file.c mapping_gfp_constraint(mapping, GFP_KERNEL)); mapping 602 fs/ntfs/file.c vi = pages[0]->mapping->host; mapping 1399 fs/ntfs/file.c vi = pages[0]->mapping->host; mapping 1559 fs/ntfs/file.c vi = page->mapping->host; mapping 1733 fs/ntfs/file.c struct address_space *mapping = file->f_mapping; mapping 1734 fs/ntfs/file.c struct inode *vi = mapping->host; mapping 1843 fs/ntfs/file.c status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, mapping 1902 fs/ntfs/file.c balance_dirty_pages_ratelimited(mapping); mapping 143 fs/ntfs/lcnalloc.c struct address_space *mapping; mapping 241 fs/ntfs/lcnalloc.c mapping = lcnbmp_vi->i_mapping; mapping 271 fs/ntfs/lcnalloc.c page = ntfs_map_page(mapping, last_read_pos >> mapping 476 fs/ntfs/logfile.c struct address_space *mapping = log_vi->i_mapping; mapping 533 fs/ntfs/logfile.c page = ntfs_map_page(mapping, idx); mapping 2448 fs/ntfs/super.c struct address_space *mapping = vol->lcnbmp_ino->i_mapping; mapping 2472 fs/ntfs/super.c page = read_mapping_page(mapping, index, NULL); mapping 2528 fs/ntfs/super.c struct address_space *mapping = vol->mftbmp_ino->i_mapping; mapping 2543 fs/ntfs/super.c page = read_mapping_page(mapping, index, NULL); mapping 6882 fs/ocfs2/alloc.c struct address_space *mapping = inode->i_mapping; mapping 6892 fs/ocfs2/alloc.c pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); mapping 283 fs/ocfs2/aops.c struct inode *inode = page->mapping->host; mapping 354 fs/ocfs2/aops.c static int ocfs2_readpages(struct file *filp, struct address_space *mapping, mapping 358 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 392 fs/ocfs2/aops.c err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); mapping 415 fs/ocfs2/aops.c (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, mapping 457 fs/ocfs2/aops.c static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) mapping 462 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 1037 fs/ocfs2/aops.c static int ocfs2_grab_pages_for_write(struct address_space *mapping, mapping 1045 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 1089 fs/ocfs2/aops.c if (mmap_page->mapping != mapping) { mapping 1090 fs/ocfs2/aops.c WARN_ON(mmap_page->mapping); mapping 1105 fs/ocfs2/aops.c wc->w_pages[i] = find_or_create_page(mapping, index, mapping 1127 fs/ocfs2/aops.c static int ocfs2_write_cluster(struct address_space *mapping, mapping 1138 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 1231 fs/ocfs2/aops.c static int ocfs2_write_cluster_by_desc(struct address_space *mapping, mapping 1241 fs/ocfs2/aops.c struct ocfs2_super *osb = OCFS2_SB(mapping->host->i_sb); mapping 1255 fs/ocfs2/aops.c ret = ocfs2_write_cluster(mapping, &desc->c_phys, mapping 1485 fs/ocfs2/aops.c static int ocfs2_write_begin_inline(struct address_space *mapping, mapping 1502 fs/ocfs2/aops.c page = find_or_create_page(mapping, 0, GFP_NOFS); mapping 1551 fs/ocfs2/aops.c static int ocfs2_try_to_write_inline_data(struct address_space *mapping, mapping 1598 fs/ocfs2/aops.c ret = ocfs2_write_begin_inline(mapping, inode, wc); mapping 1659 fs/ocfs2/aops.c int ocfs2_write_begin_nolock(struct address_space *mapping, mapping 1667 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 1684 fs/ocfs2/aops.c ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, mapping 1812 fs/ocfs2/aops.c ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, mapping 1831 fs/ocfs2/aops.c ret = ocfs2_write_cluster_by_desc(mapping, data_ac, meta_ac, wc, pos, mapping 1895 fs/ocfs2/aops.c static int ocfs2_write_begin(struct file *file, struct address_space *mapping, mapping 1901 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 1918 fs/ocfs2/aops.c ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_BUFFER, mapping 1963 fs/ocfs2/aops.c int ocfs2_write_end_nolock(struct address_space *mapping, mapping 1968 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 2077 fs/ocfs2/aops.c static int ocfs2_write_end(struct file *file, struct address_space *mapping, mapping 2082 fs/ocfs2/aops.c struct inode *inode = mapping->host; mapping 2084 fs/ocfs2/aops.c ret = ocfs2_write_end_nolock(mapping, pos, len, copied, fsdata); mapping 32 fs/ocfs2/aops.h int ocfs2_write_end_nolock(struct address_space *mapping, mapping 41 fs/ocfs2/aops.h int ocfs2_write_begin_nolock(struct address_space *mapping, mapping 3905 fs/ocfs2/dlmglue.c struct address_space *mapping; mapping 3909 fs/ocfs2/dlmglue.c mapping = inode->i_mapping; mapping 3928 fs/ocfs2/dlmglue.c unmap_mapping_range(mapping, 0, 0, 0); mapping 3930 fs/ocfs2/dlmglue.c if (filemap_fdatawrite(mapping)) { mapping 3934 fs/ocfs2/dlmglue.c sync_mapping_buffers(mapping); mapping 3936 fs/ocfs2/dlmglue.c truncate_inode_pages(mapping, 0); mapping 3943 fs/ocfs2/dlmglue.c filemap_fdatawait(mapping); mapping 754 fs/ocfs2/file.c struct address_space *mapping = inode->i_mapping; mapping 774 fs/ocfs2/file.c page = find_or_create_page(mapping, index, GFP_NOFS); mapping 1516 fs/ocfs2/file.c struct address_space *mapping = inode->i_mapping; mapping 1523 fs/ocfs2/file.c unmap_mapping_range(mapping, start, end - start, 0); mapping 1524 fs/ocfs2/file.c truncate_inode_pages_range(mapping, start, end - 1); mapping 1702 fs/ocfs2/file.c struct address_space *mapping = inode->i_mapping; mapping 1734 fs/ocfs2/file.c unmap_mapping_range(mapping, 0, 0, 0); mapping 1735 fs/ocfs2/file.c truncate_inode_pages(mapping, 0); mapping 54 fs/ocfs2/mmap.c struct address_space *mapping = inode->i_mapping; mapping 77 fs/ocfs2/mmap.c if ((page->mapping != inode->i_mapping) || mapping 95 fs/ocfs2/mmap.c err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, mapping 108 fs/ocfs2/mmap.c err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata); mapping 2915 fs/ocfs2/refcounttree.c struct address_space *mapping = inode->i_mapping; mapping 2942 fs/ocfs2/refcounttree.c page = find_or_create_page(mapping, page_index, GFP_NOFS); mapping 59 fs/ocfs2/symlink.c struct inode *inode = page->mapping->host; mapping 292 fs/omfs/file.c static int omfs_readpages(struct file *file, struct address_space *mapping, mapping 295 fs/omfs/file.c return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); mapping 304 fs/omfs/file.c omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) mapping 306 fs/omfs/file.c return mpage_writepages(mapping, wbc, omfs_get_block); mapping 309 fs/omfs/file.c static void omfs_write_failed(struct address_space *mapping, loff_t to) mapping 311 fs/omfs/file.c struct inode *inode = mapping->host; mapping 319 fs/omfs/file.c static int omfs_write_begin(struct file *file, struct address_space *mapping, mapping 325 fs/omfs/file.c ret = block_write_begin(mapping, pos, len, flags, pagep, mapping 328 fs/omfs/file.c omfs_write_failed(mapping, pos + len); mapping 333 fs/omfs/file.c static sector_t omfs_bmap(struct address_space *mapping, sector_t block) mapping 335 fs/omfs/file.c return generic_block_bmap(mapping, block, omfs_get_block); mapping 272 fs/orangefs/file.c struct address_space *mapping = inode->i_mapping; mapping 295 fs/orangefs/file.c unmap_mapping_range(mapping, 0, 0, 0); mapping 296 fs/orangefs/file.c ret = filemap_write_and_wait(mapping); mapping 298 fs/orangefs/file.c ret = invalidate_inode_pages2(mapping); mapping 21 fs/orangefs/inode.c struct inode *inode = page->mapping->host; mapping 61 fs/orangefs/inode.c mapping_set_error(page->mapping, ret); mapping 97 fs/orangefs/inode.c struct inode *inode = ow->pages[0]->mapping->host; mapping 133 fs/orangefs/inode.c mapping_set_error(ow->pages[i]->mapping, ret); mapping 206 fs/orangefs/inode.c mapping_set_error(page->mapping, ret); mapping 218 fs/orangefs/inode.c static int orangefs_writepages(struct address_space *mapping, mapping 240 fs/orangefs/inode.c ret = write_cache_pages(mapping, wbc, orangefs_writepages_callback, ow); mapping 254 fs/orangefs/inode.c struct inode *inode = page->mapping->host; mapping 389 fs/orangefs/inode.c struct address_space *mapping, mapping 400 fs/orangefs/inode.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 446 fs/orangefs/inode.c static int orangefs_write_end(struct file *file, struct address_space *mapping, mapping 449 fs/orangefs/inode.c struct inode *inode = page->mapping->host; mapping 776 fs/orangefs/inode.c if (page->mapping != inode->i_mapping) { mapping 415 fs/proc/vmcore.c struct address_space *mapping = vmf->vma->vm_file->f_mapping; mapping 422 fs/proc/vmcore.c page = find_or_create_page(mapping, index, GFP_KERNEL); mapping 254 fs/qnx4/inode.c static sector_t qnx4_bmap(struct address_space *mapping, sector_t block) mapping 256 fs/qnx4/inode.c return generic_block_bmap(mapping,block,qnx4_get_block); mapping 29 fs/qnx6/dir.c struct address_space *mapping = dir->i_mapping; mapping 30 fs/qnx6/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 54 fs/qnx6/dir.c struct address_space *mapping = sbi->longfile->i_mapping; mapping 55 fs/qnx6/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 102 fs/qnx6/inode.c static int qnx6_readpages(struct file *file, struct address_space *mapping, mapping 105 fs/qnx6/inode.c return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); mapping 187 fs/qnx6/inode.c struct address_space *mapping = root->i_mapping; mapping 188 fs/qnx6/inode.c struct page *page = read_mapping_page(mapping, 0, NULL); mapping 496 fs/qnx6/inode.c static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) mapping 498 fs/qnx6/inode.c return generic_block_bmap(mapping, block, qnx6_get_block); mapping 528 fs/qnx6/inode.c struct address_space *mapping; mapping 550 fs/qnx6/inode.c mapping = sbi->inodes->i_mapping; mapping 551 fs/qnx6/inode.c page = read_mapping_page(mapping, n, NULL); mapping 1899 fs/read_write.c src_page->mapping != src->i_mapping || mapping 1900 fs/read_write.c dest_page->mapping != dest->i_mapping) { mapping 1164 fs/reiserfs/inode.c reiserfs_readpages(struct file *file, struct address_space *mapping, mapping 1167 fs/reiserfs/inode.c return mpage_readpages(mapping, pages, nr_pages, reiserfs_get_block); mapping 2530 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 2750 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 2762 fs/reiserfs/inode.c struct address_space *mapping, mapping 2772 fs/reiserfs/inode.c inode = mapping->host; mapping 2781 fs/reiserfs/inode.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 2837 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 2895 fs/reiserfs/inode.c static int reiserfs_write_end(struct file *file, struct address_space *mapping, mapping 2899 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 3001 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 3162 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 3213 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 3232 fs/reiserfs/inode.c struct inode *inode = page->mapping->host; mapping 173 fs/reiserfs/ioctl.c struct address_space *mapping; mapping 208 fs/reiserfs/ioctl.c mapping = inode->i_mapping; mapping 209 fs/reiserfs/ioctl.c page = grab_cache_page(mapping, index); mapping 609 fs/reiserfs/journal.c if (!page->mapping && trylock_page(page)) { mapping 612 fs/reiserfs/journal.c if (!page->mapping) mapping 873 fs/reiserfs/journal.c if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { mapping 180 fs/reiserfs/tail_conversion.c struct inode *inode = bh->b_page->mapping->host; mapping 435 fs/reiserfs/xattr.c struct address_space *mapping = dir->i_mapping; mapping 441 fs/reiserfs/xattr.c mapping_set_gfp_mask(mapping, GFP_NOFS); mapping 442 fs/reiserfs/xattr.c page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL); mapping 104 fs/romfs/super.c struct inode *inode = page->mapping->host; mapping 51 fs/splice.c struct address_space *mapping; mapping 55 fs/splice.c mapping = page_mapping(page); mapping 56 fs/splice.c if (mapping) { mapping 77 fs/splice.c if (remove_mapping(mapping, page)) { mapping 116 fs/splice.c if (!page->mapping) { mapping 385 fs/squashfs/file.c struct inode *inode = page->mapping->host; mapping 404 fs/squashfs/file.c grab_cache_page_nowait(page->mapping, i); mapping 423 fs/squashfs/file.c struct inode *inode = page->mapping->host; mapping 449 fs/squashfs/file.c struct inode *inode = page->mapping->host; mapping 23 fs/squashfs/file_cache.c struct inode *i = page->mapping->host; mapping 29 fs/squashfs/file_direct.c struct inode *inode = target_page->mapping->host; mapping 61 fs/squashfs/file_direct.c grab_cache_page_nowait(target_page->mapping, n); mapping 147 fs/squashfs/file_direct.c struct inode *i = target_page->mapping->host; mapping 35 fs/squashfs/symlink.c struct inode *inode = page->mapping->host; mapping 241 fs/sync.c struct address_space *mapping; mapping 286 fs/sync.c mapping = file->f_mapping; mapping 301 fs/sync.c ret = __filemap_fdatawrite_range(mapping, offset, endbyte, mapping 39 fs/sysv/dir.c struct address_space *mapping = page->mapping; mapping 40 fs/sysv/dir.c struct inode *dir = mapping->host; mapping 43 fs/sysv/dir.c block_write_end(NULL, mapping, pos, len, len, page, NULL); mapping 57 fs/sysv/dir.c struct address_space *mapping = dir->i_mapping; mapping 58 fs/sysv/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 232 fs/sysv/dir.c struct inode *inode = page->mapping->host; mapping 330 fs/sysv/dir.c struct inode *dir = page->mapping->host; mapping 469 fs/sysv/itree.c static void sysv_write_failed(struct address_space *mapping, loff_t to) mapping 471 fs/sysv/itree.c struct inode *inode = mapping->host; mapping 479 fs/sysv/itree.c static int sysv_write_begin(struct file *file, struct address_space *mapping, mapping 485 fs/sysv/itree.c ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); mapping 487 fs/sysv/itree.c sysv_write_failed(mapping, pos + len); mapping 492 fs/sysv/itree.c static sector_t sysv_bmap(struct address_space *mapping, sector_t block) mapping 494 fs/sysv/itree.c return generic_block_bmap(mapping,block,get_block); mapping 105 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 217 fs/ubifs/file.c static int write_begin_slow(struct address_space *mapping, mapping 221 fs/ubifs/file.c struct inode *inode = mapping->host; mapping 247 fs/ubifs/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 421 fs/ubifs/file.c static int ubifs_write_begin(struct file *file, struct address_space *mapping, mapping 425 fs/ubifs/file.c struct inode *inode = mapping->host; mapping 440 fs/ubifs/file.c page = grab_cache_page_write_begin(mapping, index, flags); mapping 496 fs/ubifs/file.c return write_begin_slow(mapping, pos, len, pagep, flags); mapping 536 fs/ubifs/file.c static int ubifs_write_end(struct file *file, struct address_space *mapping, mapping 540 fs/ubifs/file.c struct inode *inode = mapping->host; mapping 610 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 721 fs/ubifs/file.c struct address_space *mapping = page1->mapping; mapping 722 fs/ubifs/file.c struct inode *inode = mapping->host; mapping 727 fs/ubifs/file.c gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; mapping 789 fs/ubifs/file.c page = pagecache_get_page(mapping, page_offset, mapping 829 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 908 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 1007 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 1292 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 1450 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 1463 fs/ubifs/file.c static int ubifs_migrate_page(struct address_space *mapping, mapping 1468 fs/ubifs/file.c rc = migrate_page_move_mapping(mapping, newpage, page, 0); mapping 1487 fs/ubifs/file.c struct inode *inode = page->mapping->host; mapping 1558 fs/ubifs/file.c if (unlikely(page->mapping != inode->i_mapping || mapping 43 fs/udf/file.c struct inode *inode = page->mapping->host; mapping 72 fs/udf/file.c struct inode *inode = page->mapping->host; mapping 90 fs/udf/file.c struct address_space *mapping, loff_t pos, mapping 98 fs/udf/file.c page = grab_cache_page_write_begin(mapping, 0, flags); mapping 114 fs/udf/file.c static int udf_adinicb_write_end(struct file *file, struct address_space *mapping, mapping 118 fs/udf/file.c struct inode *inode = page->mapping->host; mapping 165 fs/udf/inode.c static void udf_write_failed(struct address_space *mapping, loff_t to) mapping 167 fs/udf/inode.c struct inode *inode = mapping->host; mapping 187 fs/udf/inode.c static int udf_writepages(struct address_space *mapping, mapping 190 fs/udf/inode.c return mpage_writepages(mapping, wbc, udf_get_block); mapping 198 fs/udf/inode.c static int udf_readpages(struct file *file, struct address_space *mapping, mapping 201 fs/udf/inode.c return mpage_readpages(mapping, pages, nr_pages, udf_get_block); mapping 204 fs/udf/inode.c static int udf_write_begin(struct file *file, struct address_space *mapping, mapping 210 fs/udf/inode.c ret = block_write_begin(mapping, pos, len, flags, pagep, udf_get_block); mapping 212 fs/udf/inode.c udf_write_failed(mapping, pos + len); mapping 219 fs/udf/inode.c struct address_space *mapping = file->f_mapping; mapping 220 fs/udf/inode.c struct inode *inode = mapping->host; mapping 226 fs/udf/inode.c udf_write_failed(mapping, iocb->ki_pos + count); mapping 230 fs/udf/inode.c static sector_t udf_bmap(struct address_space *mapping, sector_t block) mapping 232 fs/udf/inode.c return generic_block_bmap(mapping, block, udf_get_block); mapping 106 fs/udf/symlink.c struct inode *inode = page->mapping->host; mapping 246 fs/ufs/balloc.c struct address_space * const mapping = inode->i_mapping; mapping 267 fs/ufs/balloc.c page = ufs_get_locked_page(mapping, index); mapping 47 fs/ufs/dir.c struct address_space *mapping = page->mapping; mapping 48 fs/ufs/dir.c struct inode *dir = mapping->host; mapping 52 fs/ufs/dir.c block_write_end(NULL, mapping, pos, len, len, page, NULL); mapping 112 fs/ufs/dir.c struct inode *dir = page->mapping->host; mapping 191 fs/ufs/dir.c struct address_space *mapping = dir->i_mapping; mapping 192 fs/ufs/dir.c struct page *page = read_mapping_page(mapping, n, NULL); mapping 546 fs/ufs/dir.c struct address_space *mapping = inode->i_mapping; mapping 547 fs/ufs/dir.c struct page *page = grab_cache_page(mapping, 0); mapping 487 fs/ufs/inode.c static void ufs_write_failed(struct address_space *mapping, loff_t to) mapping 489 fs/ufs/inode.c struct inode *inode = mapping->host; mapping 497 fs/ufs/inode.c static int ufs_write_begin(struct file *file, struct address_space *mapping, mapping 503 fs/ufs/inode.c ret = block_write_begin(mapping, pos, len, flags, pagep, mapping 506 fs/ufs/inode.c ufs_write_failed(mapping, pos + len); mapping 511 fs/ufs/inode.c static int ufs_write_end(struct file *file, struct address_space *mapping, mapping 517 fs/ufs/inode.c ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); mapping 519 fs/ufs/inode.c ufs_write_failed(mapping, pos + len); mapping 523 fs/ufs/inode.c static sector_t ufs_bmap(struct address_space *mapping, sector_t block) mapping 525 fs/ufs/inode.c return generic_block_bmap(mapping,block,ufs_getfrag_block); mapping 1052 fs/ufs/inode.c struct address_space *mapping = inode->i_mapping; mapping 1067 fs/ufs/inode.c lastpage = ufs_get_locked_page(mapping, lastfrag >> mapping 244 fs/ufs/util.c struct page *ufs_get_locked_page(struct address_space *mapping, mapping 247 fs/ufs/util.c struct inode *inode = mapping->host; mapping 248 fs/ufs/util.c struct page *page = find_lock_page(mapping, index); mapping 250 fs/ufs/util.c page = read_mapping_page(mapping, index, NULL); mapping 255 fs/ufs/util.c mapping->host->i_ino, index); mapping 261 fs/ufs/util.c if (unlikely(page->mapping == NULL)) { mapping 282 fs/ufs/util.h extern struct page *ufs_get_locked_page(struct address_space *mapping, mapping 2120 fs/unicode/mkutf8data.c unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */ mapping 2155 fs/unicode/mkutf8data.c mapping[i] = strtoul(s, &s, 16); mapping 2156 fs/unicode/mkutf8data.c if (!utf32valid(mapping[i])) mapping 2160 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2163 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2181 fs/unicode/mkutf8data.c unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */ mapping 2211 fs/unicode/mkutf8data.c mapping[i] = strtoul(s, &s, 16); mapping 2212 fs/unicode/mkutf8data.c if (!utf32valid(mapping[i])) mapping 2216 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2219 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2311 fs/unicode/mkutf8data.c unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */ mapping 2355 fs/unicode/mkutf8data.c mapping[i] = strtoul(s, &s, 16); mapping 2356 fs/unicode/mkutf8data.c if (!utf32valid(mapping[i])) mapping 2360 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2363 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2439 fs/unicode/mkutf8data.c unsigned int mapping[4]; mapping 2455 fs/unicode/mkutf8data.c mapping[i++] = lb + li; mapping 2456 fs/unicode/mkutf8data.c mapping[i++] = vb + vi; mapping 2458 fs/unicode/mkutf8data.c mapping[i++] = tb + ti; mapping 2459 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2463 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2468 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2492 fs/unicode/mkutf8data.c unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */ mapping 2515 fs/unicode/mkutf8data.c mapping[i++] = dc[j]; mapping 2518 fs/unicode/mkutf8data.c mapping[i++] = *um; mapping 2522 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2527 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2533 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 2547 fs/unicode/mkutf8data.c unsigned int mapping[19]; /* Magic - guaranteed not to be exceeded. */ mapping 2569 fs/unicode/mkutf8data.c mapping[i++] = dc[j]; mapping 2572 fs/unicode/mkutf8data.c mapping[i++] = *um; mapping 2576 fs/unicode/mkutf8data.c mapping[i++] = 0; mapping 2581 fs/unicode/mkutf8data.c memcpy(um, mapping, i * sizeof(unsigned int)); mapping 189 fs/verity/verify.c struct inode *inode = page->mapping->host; mapping 223 fs/verity/verify.c struct inode *inode = bio_first_page_all(bio)->mapping->host; mapping 808 fs/xfs/xfs_aops.c trace_xfs_invalidatepage(page->mapping->host, page, offset, length); mapping 827 fs/xfs/xfs_aops.c struct inode *inode = page->mapping->host; mapping 967 fs/xfs/xfs_aops.c mapping_set_error(page->mapping, error); mapping 985 fs/xfs/xfs_aops.c struct inode *inode = page->mapping->host; mapping 1103 fs/xfs/xfs_aops.c struct address_space *mapping, mapping 1109 fs/xfs/xfs_aops.c xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); mapping 1110 fs/xfs/xfs_aops.c ret = write_cache_pages(mapping, wbc, xfs_do_writepage, &wpc); mapping 1118 fs/xfs/xfs_aops.c struct address_space *mapping, mapping 1121 fs/xfs/xfs_aops.c xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); mapping 1122 fs/xfs/xfs_aops.c return dax_writeback_mapping_range(mapping, mapping 1123 fs/xfs/xfs_aops.c xfs_find_bdev_for_inode(mapping->host), wbc); mapping 1131 fs/xfs/xfs_aops.c trace_xfs_releasepage(page->mapping->host, page, 0, 0); mapping 1137 fs/xfs/xfs_aops.c struct address_space *mapping, mapping 1140 fs/xfs/xfs_aops.c struct xfs_inode *ip = XFS_I(mapping->host); mapping 1155 fs/xfs/xfs_aops.c return iomap_bmap(mapping, block, &xfs_iomap_ops); mapping 1163 fs/xfs/xfs_aops.c trace_xfs_vm_readpage(page->mapping->host, 1); mapping 1170 fs/xfs/xfs_aops.c struct address_space *mapping, mapping 1174 fs/xfs/xfs_aops.c trace_xfs_vm_readpages(mapping->host, nr_pages); mapping 1175 fs/xfs/xfs_aops.c return iomap_readpages(mapping, pages, nr_pages, &xfs_iomap_ops); mapping 481 fs/xfs/xfs_file.c struct address_space *mapping = file->f_mapping; mapping 482 fs/xfs/xfs_file.c struct inode *inode = mapping->host; mapping 622 fs/xfs/xfs_file.c struct address_space *mapping = file->f_mapping; mapping 623 fs/xfs/xfs_file.c struct inode *inode = mapping->host; mapping 698 fs/xfs/xfs_file.c struct address_space *mapping = file->f_mapping; mapping 699 fs/xfs/xfs_file.c struct inode *inode = mapping->host; mapping 55 include/asm-generic/cacheflush.h static inline void flush_dcache_mmap_lock(struct address_space *mapping) mapping 61 include/asm-generic/cacheflush.h static inline void flush_dcache_mmap_unlock(struct address_space *mapping) mapping 22 include/drm/ati_pcigart.h struct drm_local_map mapping; mapping 608 include/drm/ttm/ttm_bo_driver.h struct address_space *mapping, mapping 213 include/linux/backing-dev.h static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) mapping 215 include/linux/backing-dev.h return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); mapping 218 include/linux/backing-dev.h static inline bool mapping_cap_account_dirty(struct address_space *mapping) mapping 220 include/linux/backing-dev.h return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); mapping 86 include/linux/balloon_compaction.h extern int balloon_page_migrate(struct address_space *mapping, mapping 174 include/linux/buffer_head.h int sync_mapping_buffers(struct address_space *mapping); mapping 227 include/linux/buffer_head.h int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, mapping 416 include/linux/buffer_head.h static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } mapping 52 include/linux/cleancache.h static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) mapping 54 include/linux/cleancache.h return mapping->host->i_sb->cleancache_poolid >= 0; mapping 58 include/linux/cleancache.h return cleancache_fs_enabled_mapping(page->mapping); mapping 104 include/linux/cleancache.h static inline void cleancache_invalidate_page(struct address_space *mapping, mapping 108 include/linux/cleancache.h if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) mapping 109 include/linux/cleancache.h __cleancache_invalidate_page(mapping, page); mapping 112 include/linux/cleancache.h static inline void cleancache_invalidate_inode(struct address_space *mapping) mapping 114 include/linux/cleancache.h if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) mapping 115 include/linux/cleancache.h __cleancache_invalidate_inode(mapping); mapping 143 include/linux/dax.h int dax_writeback_mapping_range(struct address_space *mapping, mapping 146 include/linux/dax.h struct page *dax_layout_busy_page(struct address_space *mapping); mapping 177 include/linux/dax.h static inline struct page *dax_layout_busy_page(struct address_space *mapping) mapping 182 include/linux/dax.h static inline int dax_writeback_mapping_range(struct address_space *mapping, mapping 190 include/linux/dax.h if (IS_DAX(page->mapping->host)) mapping 220 include/linux/dax.h int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); mapping 221 include/linux/dax.h int dax_invalidate_mapping_entry_sync(struct address_space *mapping, mapping 237 include/linux/dax.h static inline bool dax_mapping(struct address_space *mapping) mapping 239 include/linux/dax.h return mapping->host && IS_DAX(mapping->host); mapping 376 include/linux/fs.h int (*readpages)(struct file *filp, struct address_space *mapping, mapping 379 include/linux/fs.h int (*write_begin)(struct file *, struct address_space *mapping, mapping 382 include/linux/fs.h int (*write_end)(struct file *, struct address_space *mapping, mapping 418 include/linux/fs.h int pagecache_write_begin(struct file *, struct address_space *mapping, mapping 422 include/linux/fs.h int pagecache_write_end(struct file *, struct address_space *mapping, mapping 519 include/linux/fs.h static inline bool mapping_tagged(struct address_space *mapping, xa_mark_t tag) mapping 521 include/linux/fs.h return xa_marked(&mapping->i_pages, tag); mapping 524 include/linux/fs.h static inline void i_mmap_lock_write(struct address_space *mapping) mapping 526 include/linux/fs.h down_write(&mapping->i_mmap_rwsem); mapping 529 include/linux/fs.h static inline void i_mmap_unlock_write(struct address_space *mapping) mapping 531 include/linux/fs.h up_write(&mapping->i_mmap_rwsem); mapping 534 include/linux/fs.h static inline void i_mmap_lock_read(struct address_space *mapping) mapping 536 include/linux/fs.h down_read(&mapping->i_mmap_rwsem); mapping 539 include/linux/fs.h static inline void i_mmap_unlock_read(struct address_space *mapping) mapping 541 include/linux/fs.h up_read(&mapping->i_mmap_rwsem); mapping 547 include/linux/fs.h static inline int mapping_mapped(struct address_space *mapping) mapping 549 include/linux/fs.h return !RB_EMPTY_ROOT(&mapping->i_mmap.rb_root); mapping 561 include/linux/fs.h static inline int mapping_writably_mapped(struct address_space *mapping) mapping 563 include/linux/fs.h return atomic_read(&mapping->i_mmap_writable) > 0; mapping 566 include/linux/fs.h static inline int mapping_map_writable(struct address_space *mapping) mapping 568 include/linux/fs.h return atomic_inc_unless_negative(&mapping->i_mmap_writable) ? mapping 572 include/linux/fs.h static inline void mapping_unmap_writable(struct address_space *mapping) mapping 574 include/linux/fs.h atomic_dec(&mapping->i_mmap_writable); mapping 577 include/linux/fs.h static inline int mapping_deny_writable(struct address_space *mapping) mapping 579 include/linux/fs.h return atomic_dec_unless_positive(&mapping->i_mmap_writable) ? mapping 583 include/linux/fs.h static inline void mapping_allow_writable(struct address_space *mapping) mapping 585 include/linux/fs.h atomic_inc(&mapping->i_mmap_writable); mapping 2721 include/linux/fs.h unsigned long invalidate_mapping_pages(struct address_space *mapping, mapping 2730 include/linux/fs.h extern int invalidate_inode_pages2(struct address_space *mapping); mapping 2731 include/linux/fs.h extern int invalidate_inode_pages2_range(struct address_space *mapping, mapping 2736 include/linux/fs.h extern int filemap_fdatawait_keep_errors(struct address_space *mapping); mapping 2739 include/linux/fs.h extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping, mapping 2742 include/linux/fs.h static inline int filemap_fdatawait(struct address_space *mapping) mapping 2744 include/linux/fs.h return filemap_fdatawait_range(mapping, 0, LLONG_MAX); mapping 2749 include/linux/fs.h extern int filemap_write_and_wait(struct address_space *mapping); mapping 2750 include/linux/fs.h extern int filemap_write_and_wait_range(struct address_space *mapping, mapping 2752 include/linux/fs.h extern int __filemap_fdatawrite_range(struct address_space *mapping, mapping 2754 include/linux/fs.h extern int filemap_fdatawrite_range(struct address_space *mapping, mapping 2756 include/linux/fs.h extern int filemap_check_errors(struct address_space *mapping); mapping 2757 include/linux/fs.h extern void __filemap_set_wb_err(struct address_space *mapping, int err); mapping 2784 include/linux/fs.h static inline void filemap_set_wb_err(struct address_space *mapping, int err) mapping 2788 include/linux/fs.h __filemap_set_wb_err(mapping, err); mapping 2801 include/linux/fs.h static inline int filemap_check_wb_err(struct address_space *mapping, mapping 2804 include/linux/fs.h return errseq_check(&mapping->wb_err, since); mapping 2814 include/linux/fs.h static inline errseq_t filemap_sample_wb_err(struct address_space *mapping) mapping 2816 include/linux/fs.h return errseq_sample(&mapping->wb_err); mapping 2819 include/linux/fs.h static inline int filemap_nr_thps(struct address_space *mapping) mapping 2822 include/linux/fs.h return atomic_read(&mapping->nr_thps); mapping 2828 include/linux/fs.h static inline void filemap_nr_thps_inc(struct address_space *mapping) mapping 2831 include/linux/fs.h atomic_inc(&mapping->nr_thps); mapping 2837 include/linux/fs.h static inline void filemap_nr_thps_dec(struct address_space *mapping) mapping 2840 include/linux/fs.h atomic_dec(&mapping->nr_thps); mapping 3024 include/linux/fs.h extern void address_space_init_once(struct address_space *mapping); mapping 3136 include/linux/fs.h file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); mapping 3314 include/linux/fs.h extern int simple_write_begin(struct file *file, struct address_space *mapping, mapping 3317 include/linux/fs.h extern int simple_write_end(struct file *file, struct address_space *mapping, mapping 146 include/linux/fscache-cache.h struct address_space *mapping; /* netfs pages */ mapping 111 include/linux/fscache.h struct address_space *mapping, mapping 590 include/linux/fscache.h struct address_space *mapping, mapping 598 include/linux/fscache.h return __fscache_read_or_alloc_pages(cookie, mapping, pages, mapping 125 include/linux/fscrypt.h return page->mapping == NULL; mapping 108 include/linux/hugetlb.h u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, mapping 369 include/linux/hugetlb.h int huge_add_to_page_cache(struct page *page, struct address_space *mapping, mapping 56 include/linux/io-mapping.h io_mapping_fini(struct io_mapping *mapping) mapping 58 include/linux/io-mapping.h iomap_free(mapping->base, mapping->size); mapping 63 include/linux/io-mapping.h io_mapping_map_atomic_wc(struct io_mapping *mapping, mapping 69 include/linux/io-mapping.h BUG_ON(offset >= mapping->size); mapping 70 include/linux/io-mapping.h phys_addr = mapping->base + offset; mapping 72 include/linux/io-mapping.h return iomap_atomic_prot_pfn(pfn, mapping->prot); mapping 82 include/linux/io-mapping.h io_mapping_map_wc(struct io_mapping *mapping, mapping 88 include/linux/io-mapping.h BUG_ON(offset >= mapping->size); mapping 89 include/linux/io-mapping.h phys_addr = mapping->base + offset; mapping 126 include/linux/io-mapping.h io_mapping_fini(struct io_mapping *mapping) mapping 128 include/linux/io-mapping.h iounmap(mapping->iomem); mapping 133 include/linux/io-mapping.h io_mapping_map_wc(struct io_mapping *mapping, mapping 137 include/linux/io-mapping.h return mapping->iomem + offset; mapping 147 include/linux/io-mapping.h io_mapping_map_atomic_wc(struct io_mapping *mapping, mapping 152 include/linux/io-mapping.h return io_mapping_map_wc(mapping, offset, PAGE_SIZE); mapping 155 include/linux/iomap.h int iomap_readpages(struct address_space *mapping, struct list_head *pages, mapping 164 include/linux/iomap.h int iomap_migrate_page(struct address_space *mapping, struct page *newpage, mapping 183 include/linux/iomap.h sector_t iomap_bmap(struct address_space *mapping, sector_t bno, mapping 123 include/linux/libnvdimm.h struct nd_mapping_desc *mapping; mapping 65 include/linux/migrate.h extern int migrate_page(struct address_space *mapping, mapping 77 include/linux/migrate.h extern int migrate_huge_page_move_mapping(struct address_space *mapping, mapping 79 include/linux/migrate.h extern int migrate_page_move_mapping(struct address_space *mapping, mapping 101 include/linux/migrate.h static inline int migrate_huge_page_move_mapping(struct address_space *mapping, mapping 111 include/linux/migrate.h extern void __SetPageMovable(struct page *page, struct address_space *mapping); mapping 116 include/linux/migrate.h struct address_space *mapping) mapping 123 include/linux/mlx5/cq.h } mapping; mapping 1371 include/linux/mm.h return page->mapping; mapping 1483 include/linux/mm.h int truncate_inode_page(struct address_space *mapping, struct page *page); mapping 1484 include/linux/mm.h int generic_error_remove_page(struct address_space *mapping, struct page *page); mapping 1493 include/linux/mm.h void unmap_mapping_pages(struct address_space *mapping, mapping 1495 include/linux/mm.h void unmap_mapping_range(struct address_space *mapping, mapping 1513 include/linux/mm.h static inline void unmap_mapping_pages(struct address_space *mapping, mapping 1515 include/linux/mm.h static inline void unmap_mapping_range(struct address_space *mapping, mapping 1519 include/linux/mm.h static inline void unmap_shared_mapping_range(struct address_space *mapping, mapping 1522 include/linux/mm.h unmap_mapping_range(mapping, holebegin, holelen, 0); mapping 1608 include/linux/mm.h void account_page_dirtied(struct page *page, struct address_space *mapping); mapping 1609 include/linux/mm.h void account_page_cleaned(struct page *page, struct address_space *mapping, mapping 2433 include/linux/mm.h int force_page_cache_readahead(struct address_space *mapping, struct file *filp, mapping 2436 include/linux/mm.h void page_cache_sync_readahead(struct address_space *mapping, mapping 2442 include/linux/mm.h void page_cache_async_readahead(struct address_space *mapping, mapping 86 include/linux/mm_types.h struct address_space *mapping; mapping 17 include/linux/mpage.h int mpage_readpages(struct address_space *mapping, struct list_head *pages, mapping 20 include/linux/mpage.h int mpage_writepages(struct address_space *mapping, mapping 367 include/linux/nfs_fs.h extern int nfs_sync_mapping(struct address_space *mapping); mapping 368 include/linux/nfs_fs.h extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping); mapping 387 include/linux/nfs_fs.h extern int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping); mapping 458 include/linux/page-flags.h return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; mapping 464 include/linux/page-flags.h return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; mapping 469 include/linux/page-flags.h return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == mapping 483 include/linux/page-flags.h return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == mapping 48 include/linux/pagemap.h static inline void mapping_set_error(struct address_space *mapping, int error) mapping 54 include/linux/pagemap.h filemap_set_wb_err(mapping, error); mapping 58 include/linux/pagemap.h set_bit(AS_ENOSPC, &mapping->flags); mapping 60 include/linux/pagemap.h set_bit(AS_EIO, &mapping->flags); mapping 63 include/linux/pagemap.h static inline void mapping_set_unevictable(struct address_space *mapping) mapping 65 include/linux/pagemap.h set_bit(AS_UNEVICTABLE, &mapping->flags); mapping 68 include/linux/pagemap.h static inline void mapping_clear_unevictable(struct address_space *mapping) mapping 70 include/linux/pagemap.h clear_bit(AS_UNEVICTABLE, &mapping->flags); mapping 73 include/linux/pagemap.h static inline int mapping_unevictable(struct address_space *mapping) mapping 75 include/linux/pagemap.h if (mapping) mapping 76 include/linux/pagemap.h return test_bit(AS_UNEVICTABLE, &mapping->flags); mapping 77 include/linux/pagemap.h return !!mapping; mapping 80 include/linux/pagemap.h static inline void mapping_set_exiting(struct address_space *mapping) mapping 82 include/linux/pagemap.h set_bit(AS_EXITING, &mapping->flags); mapping 85 include/linux/pagemap.h static inline int mapping_exiting(struct address_space *mapping) mapping 87 include/linux/pagemap.h return test_bit(AS_EXITING, &mapping->flags); mapping 90 include/linux/pagemap.h static inline void mapping_set_no_writeback_tags(struct address_space *mapping) mapping 92 include/linux/pagemap.h set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); mapping 95 include/linux/pagemap.h static inline int mapping_use_writeback_tags(struct address_space *mapping) mapping 97 include/linux/pagemap.h return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); mapping 100 include/linux/pagemap.h static inline gfp_t mapping_gfp_mask(struct address_space * mapping) mapping 102 include/linux/pagemap.h return mapping->gfp_mask; mapping 106 include/linux/pagemap.h static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, mapping 109 include/linux/pagemap.h return mapping_gfp_mask(mapping) & gfp_mask; mapping 231 include/linux/pagemap.h pgoff_t page_cache_next_miss(struct address_space *mapping, mapping 233 include/linux/pagemap.h pgoff_t page_cache_prev_miss(struct address_space *mapping, mapping 244 include/linux/pagemap.h struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, mapping 257 include/linux/pagemap.h static inline struct page *find_get_page(struct address_space *mapping, mapping 260 include/linux/pagemap.h return pagecache_get_page(mapping, offset, 0, 0); mapping 263 include/linux/pagemap.h static inline struct page *find_get_page_flags(struct address_space *mapping, mapping 266 include/linux/pagemap.h return pagecache_get_page(mapping, offset, fgp_flags, 0); mapping 282 include/linux/pagemap.h static inline struct page *find_lock_page(struct address_space *mapping, mapping 285 include/linux/pagemap.h return pagecache_get_page(mapping, offset, FGP_LOCK, 0); mapping 307 include/linux/pagemap.h static inline struct page *find_or_create_page(struct address_space *mapping, mapping 310 include/linux/pagemap.h return pagecache_get_page(mapping, offset, mapping 328 include/linux/pagemap.h static inline struct page *grab_cache_page_nowait(struct address_space *mapping, mapping 331 include/linux/pagemap.h return pagecache_get_page(mapping, index, mapping 333 include/linux/pagemap.h mapping_gfp_mask(mapping)); mapping 346 include/linux/pagemap.h struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); mapping 347 include/linux/pagemap.h struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); mapping 348 include/linux/pagemap.h unsigned find_get_entries(struct address_space *mapping, pgoff_t start, mapping 351 include/linux/pagemap.h unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, mapping 354 include/linux/pagemap.h static inline unsigned find_get_pages(struct address_space *mapping, mapping 358 include/linux/pagemap.h return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, mapping 361 include/linux/pagemap.h unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, mapping 363 include/linux/pagemap.h unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, mapping 366 include/linux/pagemap.h static inline unsigned find_get_pages_tag(struct address_space *mapping, mapping 370 include/linux/pagemap.h return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, mapping 374 include/linux/pagemap.h struct page *grab_cache_page_write_begin(struct address_space *mapping, mapping 380 include/linux/pagemap.h static inline struct page *grab_cache_page(struct address_space *mapping, mapping 383 include/linux/pagemap.h return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); mapping 386 include/linux/pagemap.h extern struct page * read_cache_page(struct address_space *mapping, mapping 388 include/linux/pagemap.h extern struct page * read_cache_page_gfp(struct address_space *mapping, mapping 390 include/linux/pagemap.h extern int read_cache_pages(struct address_space *mapping, mapping 393 include/linux/pagemap.h static inline struct page *read_mapping_page(struct address_space *mapping, mapping 396 include/linux/pagemap.h return read_cache_page(mapping, index, NULL, data); mapping 607 include/linux/pagemap.h int add_to_page_cache_locked(struct page *page, struct address_space *mapping, mapping 609 include/linux/pagemap.h int add_to_page_cache_lru(struct page *page, struct address_space *mapping, mapping 614 include/linux/pagemap.h void delete_from_page_cache_batch(struct address_space *mapping, mapping 622 include/linux/pagemap.h struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) mapping 627 include/linux/pagemap.h error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); mapping 29 include/linux/pagevec.h struct address_space *mapping, mapping 34 include/linux/pagevec.h struct address_space *mapping, mapping 37 include/linux/pagevec.h struct address_space *mapping, mapping 40 include/linux/pagevec.h return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1); mapping 44 include/linux/pagevec.h struct address_space *mapping, pgoff_t *index, pgoff_t end, mapping 47 include/linux/pagevec.h struct address_space *mapping, pgoff_t *index, pgoff_t end, mapping 50 include/linux/pagevec.h struct address_space *mapping, pgoff_t *index, xa_mark_t tag) mapping 52 include/linux/pagevec.h return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag); mapping 63 include/linux/sh_clk.h struct clk_mapping *mapping; mapping 66 include/linux/shmem_fs.h extern bool shmem_mapping(struct address_space *mapping); mapping 68 include/linux/shmem_fs.h static inline bool shmem_mapping(struct address_space *mapping) mapping 73 include/linux/shmem_fs.h extern void shmem_unlock_mapping(struct address_space *mapping); mapping 74 include/linux/shmem_fs.h extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, mapping 81 include/linux/shmem_fs.h extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, mapping 98 include/linux/shmem_fs.h struct address_space *mapping, pgoff_t index) mapping 100 include/linux/shmem_fs.h return shmem_read_mapping_page_gfp(mapping, index, mapping 101 include/linux/shmem_fs.h mapping_gfp_mask(mapping)); mapping 316 include/linux/swap.h #define mapping_set_update(xas, mapping) do { \ mapping 317 include/linux/swap.h if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ mapping 365 include/linux/swap.h extern int remove_mapping(struct address_space *mapping, struct page *page); mapping 164 include/linux/tpm_eventlog.h void *mapping = NULL; mapping 183 include/linux/tpm_eventlog.h mapping = TPM_MEMREMAP((unsigned long)marker_start, mapping 185 include/linux/tpm_eventlog.h if (!mapping) { mapping 190 include/linux/tpm_eventlog.h mapping = marker_start; mapping 193 include/linux/tpm_eventlog.h event = (struct tcg_pcr_event2_head *)mapping; mapping 214 include/linux/tpm_eventlog.h TPM_MEMUNMAP(mapping, mapping_size); mapping 216 include/linux/tpm_eventlog.h mapping = TPM_MEMREMAP((unsigned long)marker, mapping 218 include/linux/tpm_eventlog.h if (!mapping) { mapping 223 include/linux/tpm_eventlog.h mapping = marker; mapping 226 include/linux/tpm_eventlog.h memcpy(&halg, mapping, halg_size); mapping 248 include/linux/tpm_eventlog.h TPM_MEMUNMAP(mapping, mapping_size); mapping 250 include/linux/tpm_eventlog.h mapping = TPM_MEMREMAP((unsigned long)marker, mapping 252 include/linux/tpm_eventlog.h if (!mapping) { mapping 257 include/linux/tpm_eventlog.h mapping = marker; mapping 260 include/linux/tpm_eventlog.h event_field = (struct tcg_event_field *)mapping; mapping 271 include/linux/tpm_eventlog.h TPM_MEMUNMAP(mapping, mapping_size); mapping 143 include/linux/vlynq.h struct vlynq_mapping *mapping); mapping 145 include/linux/vlynq.h struct vlynq_mapping *mapping); mapping 388 include/linux/writeback.h void balance_dirty_pages_ratelimited(struct address_space *mapping); mapping 394 include/linux/writeback.h int generic_writepages(struct address_space *mapping, mapping 396 include/linux/writeback.h void tag_pages_for_writeback(struct address_space *mapping, mapping 398 include/linux/writeback.h int write_cache_pages(struct address_space *mapping, mapping 401 include/linux/writeback.h int do_writepages(struct address_space *mapping, struct writeback_control *wbc); mapping 403 include/linux/writeback.h void tag_pages_for_writeback(struct address_space *mapping, mapping 139 include/misc/ocxl.h struct address_space *mapping); mapping 632 include/trace/events/btrfs.h TP_fast_assign_btrfs(btrfs_sb(page->mapping->host->i_sb), mapping 633 include/trace/events/btrfs.h __entry->ino = btrfs_ino(BTRFS_I(page->mapping->host)); mapping 639 include/trace/events/btrfs.h BTRFS_I(page->mapping->host)->root->root_key.objectid; mapping 97 include/trace/events/erofs.h __entry->dev = page->mapping->host->i_sb->s_dev; mapping 98 include/trace/events/erofs.h __entry->nid = EROFS_I(page->mapping->host)->nid; mapping 99 include/trace/events/erofs.h __entry->dir = S_ISDIR(page->mapping->host->i_mode); mapping 537 include/trace/events/ext4.h __entry->dev = page->mapping->host->i_sb->s_dev; mapping 538 include/trace/events/ext4.h __entry->ino = page->mapping->host->i_ino; mapping 583 include/trace/events/ext4.h __entry->dev = page->mapping->host->i_sb->s_dev; mapping 584 include/trace/events/ext4.h __entry->ino = page->mapping->host->i_ino; mapping 1050 include/trace/events/f2fs.h TP_CONDITION(page->mapping) mapping 1059 include/trace/events/f2fs.h TP_CONDITION(page->mapping) mapping 31 include/trace/events/filemap.h __entry->i_ino = page->mapping->host->i_ino; mapping 33 include/trace/events/filemap.h if (page->mapping->host->i_sb) mapping 34 include/trace/events/filemap.h __entry->s_dev = page->mapping->host->i_sb->s_dev; mapping 36 include/trace/events/filemap.h __entry->s_dev = page->mapping->host->i_rdev; mapping 58 include/trace/events/filemap.h TP_PROTO(struct address_space *mapping, errseq_t eseq), mapping 60 include/trace/events/filemap.h TP_ARGS(mapping, eseq), mapping 69 include/trace/events/filemap.h __entry->i_ino = mapping->host->i_ino; mapping 71 include/trace/events/filemap.h if (mapping->host->i_sb) mapping 72 include/trace/events/filemap.h __entry->s_dev = mapping->host->i_sb->s_dev; mapping 74 include/trace/events/filemap.h __entry->s_dev = mapping->host->i_rdev; mapping 24 include/trace/events/page_ref.h __field(void *, mapping) mapping 34 include/trace/events/page_ref.h __entry->mapping = page->mapping; mapping 43 include/trace/events/page_ref.h __entry->mapcount, __entry->mapping, __entry->mt, mapping 72 include/trace/events/page_ref.h __field(void *, mapping) mapping 83 include/trace/events/page_ref.h __entry->mapping = page->mapping; mapping 93 include/trace/events/page_ref.h __entry->mapcount, __entry->mapping, __entry->mt, mapping 58 include/trace/events/writeback.h TP_PROTO(struct page *page, struct address_space *mapping), mapping 60 include/trace/events/writeback.h TP_ARGS(page, mapping), mapping 70 include/trace/events/writeback.h bdi_dev_name(mapping ? inode_to_bdi(mapping->host) : mapping 72 include/trace/events/writeback.h __entry->ino = mapping ? mapping->host->i_ino : 0; mapping 85 include/trace/events/writeback.h TP_PROTO(struct page *page, struct address_space *mapping), mapping 87 include/trace/events/writeback.h TP_ARGS(page, mapping) mapping 92 include/trace/events/writeback.h TP_PROTO(struct page *page, struct address_space *mapping), mapping 94 include/trace/events/writeback.h TP_ARGS(page, mapping) mapping 254 include/trace/events/writeback.h struct address_space *mapping = page_mapping(page); mapping 255 include/trace/events/writeback.h struct inode *inode = mapping ? mapping->host : NULL; mapping 840 ipc/shm.c struct address_space *mapping = inode->i_mapping; mapping 842 ipc/shm.c *rss_add += pages_per_huge_page(h) * mapping->nrpages; mapping 425 kernel/crash_core.c VMCOREINFO_OFFSET(page, mapping); mapping 5456 kernel/events/core.c vmf->page->mapping = vmf->vma->vm_file->f_mapping; mapping 598 kernel/events/ring_buffer.c page->mapping = NULL; mapping 806 kernel/events/ring_buffer.c page->mapping = NULL; mapping 840 kernel/events/ring_buffer.c page->mapping = NULL; mapping 797 kernel/events/uprobes.c static int __copy_insn(struct address_space *mapping, struct file *filp, mapping 806 kernel/events/uprobes.c if (mapping->a_ops->readpage) mapping 807 kernel/events/uprobes.c page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); mapping 809 kernel/events/uprobes.c page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); mapping 821 kernel/events/uprobes.c struct address_space *mapping = uprobe->inode->i_mapping; mapping 833 kernel/events/uprobes.c err = __copy_insn(mapping, filp, insn, len, offs); mapping 972 kernel/events/uprobes.c build_map_info(struct address_space *mapping, loff_t offset, bool is_register) mapping 982 kernel/events/uprobes.c i_mmap_lock_read(mapping); mapping 983 kernel/events/uprobes.c vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { mapping 1013 kernel/events/uprobes.c i_mmap_unlock_read(mapping); mapping 563 kernel/fork.c struct address_space *mapping = file->f_mapping; mapping 568 kernel/fork.c i_mmap_lock_write(mapping); mapping 570 kernel/fork.c atomic_inc(&mapping->i_mmap_writable); mapping 571 kernel/fork.c flush_dcache_mmap_lock(mapping); mapping 574 kernel/fork.c &mapping->i_mmap); mapping 575 kernel/fork.c flush_dcache_mmap_unlock(mapping); mapping 576 kernel/fork.c i_mmap_unlock_write(mapping); mapping 577 kernel/futex.c struct address_space *mapping; mapping 647 kernel/futex.c mapping = READ_ONCE(page->mapping); mapping 664 kernel/futex.c if (unlikely(!mapping)) { mapping 673 kernel/futex.c shmem_swizzled = PageSwapCache(page) || page->mapping; mapping 723 kernel/futex.c if (READ_ONCE(page->mapping) != mapping) { mapping 730 kernel/futex.c inode = READ_ONCE(mapping->host); mapping 309 kernel/kexec_core.c pages->mapping = NULL; mapping 233 mm/balloon_compaction.c int balloon_page_migrate(struct address_space *mapping, mapping 188 mm/cleancache.c pool_id = page->mapping->host->i_sb->cleancache_poolid; mapping 192 mm/cleancache.c if (cleancache_get_key(page->mapping->host, &key) < 0) mapping 226 mm/cleancache.c pool_id = page->mapping->host->i_sb->cleancache_poolid; mapping 228 mm/cleancache.c cleancache_get_key(page->mapping->host, &key) >= 0) { mapping 243 mm/cleancache.c void __cleancache_invalidate_page(struct address_space *mapping, mapping 247 mm/cleancache.c int pool_id = mapping->host->i_sb->cleancache_poolid; mapping 255 mm/cleancache.c if (cleancache_get_key(mapping->host, &key) >= 0) { mapping 273 mm/cleancache.c void __cleancache_invalidate_inode(struct address_space *mapping) mapping 275 mm/cleancache.c int pool_id = mapping->host->i_sb->cleancache_poolid; mapping 281 mm/cleancache.c if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) mapping 98 mm/compaction.c struct address_space *mapping; mapping 104 mm/compaction.c mapping = page_mapping(page); mapping 105 mm/compaction.c if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) mapping 112 mm/compaction.c void __SetPageMovable(struct page *page, struct address_space *mapping) mapping 115 mm/compaction.c VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); mapping 116 mm/compaction.c page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); mapping 129 mm/compaction.c page->mapping = (void *)((unsigned long)page->mapping & mapping 47 mm/debug.c struct address_space *mapping; mapping 62 mm/debug.c mapping = page_mapping(page); mapping 75 mm/debug.c page->mapping, page_to_pgoff(page), mapping 80 mm/debug.c page->mapping, page_to_pgoff(page)); mapping 85 mm/debug.c else if (mapping) { mapping 86 mm/debug.c if (mapping->host && mapping->host->i_dentry.first) { mapping 88 mm/debug.c dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias); mapping 89 mm/debug.c pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry); mapping 91 mm/debug.c pr_warn("%ps\n", mapping->a_ops); mapping 33 mm/fadvise.c struct address_space *mapping; mapping 44 mm/fadvise.c mapping = file->f_mapping; mapping 45 mm/fadvise.c if (!mapping || len < 0) mapping 48 mm/fadvise.c bdi = inode_to_bdi(mapping->host); mapping 109 mm/fadvise.c force_page_cache_readahead(mapping, file, start_index, nrpages); mapping 114 mm/fadvise.c if (!inode_write_congested(mapping->host)) mapping 115 mm/fadvise.c __filemap_fdatawrite_range(mapping, offset, endbyte, mapping 159 mm/fadvise.c count = invalidate_mapping_pages(mapping, mapping 170 mm/fadvise.c invalidate_mapping_pages(mapping, start_index, mapping 119 mm/filemap.c static void page_cache_delete(struct address_space *mapping, mapping 122 mm/filemap.c XA_STATE(xas, &mapping->i_pages, page->index); mapping 125 mm/filemap.c mapping_set_update(&xas, mapping); mapping 140 mm/filemap.c page->mapping = NULL; mapping 144 mm/filemap.c mapping->nrexceptional += nr; mapping 153 mm/filemap.c mapping->nrpages -= nr; mapping 156 mm/filemap.c static void unaccount_page_cache_page(struct address_space *mapping, mapping 169 mm/filemap.c cleancache_invalidate_page(mapping, page); mapping 183 mm/filemap.c if (mapping_exiting(mapping) && mapping 209 mm/filemap.c filemap_nr_thps_dec(mapping); mapping 223 mm/filemap.c account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); mapping 233 mm/filemap.c struct address_space *mapping = page->mapping; mapping 237 mm/filemap.c unaccount_page_cache_page(mapping, page); mapping 238 mm/filemap.c page_cache_delete(mapping, page, shadow); mapping 241 mm/filemap.c static void page_cache_free_page(struct address_space *mapping, mapping 246 mm/filemap.c freepage = mapping->a_ops->freepage; mapping 268 mm/filemap.c struct address_space *mapping = page_mapping(page); mapping 272 mm/filemap.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 274 mm/filemap.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 276 mm/filemap.c page_cache_free_page(mapping, page); mapping 294 mm/filemap.c static void page_cache_delete_batch(struct address_space *mapping, mapping 297 mm/filemap.c XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); mapping 302 mm/filemap.c mapping_set_update(&xas, mapping); mapping 326 mm/filemap.c page->mapping = NULL; mapping 339 mm/filemap.c mapping->nrpages -= total_pages; mapping 342 mm/filemap.c void delete_from_page_cache_batch(struct address_space *mapping, mapping 351 mm/filemap.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 355 mm/filemap.c unaccount_page_cache_page(mapping, pvec->pages[i]); mapping 357 mm/filemap.c page_cache_delete_batch(mapping, pvec); mapping 358 mm/filemap.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 361 mm/filemap.c page_cache_free_page(mapping, pvec->pages[i]); mapping 364 mm/filemap.c int filemap_check_errors(struct address_space *mapping) mapping 368 mm/filemap.c if (test_bit(AS_ENOSPC, &mapping->flags) && mapping 369 mm/filemap.c test_and_clear_bit(AS_ENOSPC, &mapping->flags)) mapping 371 mm/filemap.c if (test_bit(AS_EIO, &mapping->flags) && mapping 372 mm/filemap.c test_and_clear_bit(AS_EIO, &mapping->flags)) mapping 378 mm/filemap.c static int filemap_check_and_keep_errors(struct address_space *mapping) mapping 381 mm/filemap.c if (test_bit(AS_EIO, &mapping->flags)) mapping 383 mm/filemap.c if (test_bit(AS_ENOSPC, &mapping->flags)) mapping 405 mm/filemap.c int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, mapping 416 mm/filemap.c if (!mapping_cap_writeback_dirty(mapping) || mapping 417 mm/filemap.c !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) mapping 420 mm/filemap.c wbc_attach_fdatawrite_inode(&wbc, mapping->host); mapping 421 mm/filemap.c ret = do_writepages(mapping, &wbc); mapping 426 mm/filemap.c static inline int __filemap_fdatawrite(struct address_space *mapping, mapping 429 mm/filemap.c return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); mapping 432 mm/filemap.c int filemap_fdatawrite(struct address_space *mapping) mapping 434 mm/filemap.c return __filemap_fdatawrite(mapping, WB_SYNC_ALL); mapping 438 mm/filemap.c int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, mapping 441 mm/filemap.c return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); mapping 454 mm/filemap.c int filemap_flush(struct address_space *mapping) mapping 456 mm/filemap.c return __filemap_fdatawrite(mapping, WB_SYNC_NONE); mapping 472 mm/filemap.c bool filemap_range_has_page(struct address_space *mapping, mapping 476 mm/filemap.c XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); mapping 503 mm/filemap.c static void __filemap_fdatawait_range(struct address_space *mapping, mapping 518 mm/filemap.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, mapping 550 mm/filemap.c int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, mapping 553 mm/filemap.c __filemap_fdatawait_range(mapping, start_byte, end_byte); mapping 554 mm/filemap.c return filemap_check_errors(mapping); mapping 572 mm/filemap.c int filemap_fdatawait_range_keep_errors(struct address_space *mapping, mapping 575 mm/filemap.c __filemap_fdatawait_range(mapping, start_byte, end_byte); mapping 576 mm/filemap.c return filemap_check_and_keep_errors(mapping); mapping 598 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 600 mm/filemap.c __filemap_fdatawait_range(mapping, start_byte, end_byte); mapping 619 mm/filemap.c int filemap_fdatawait_keep_errors(struct address_space *mapping) mapping 621 mm/filemap.c __filemap_fdatawait_range(mapping, 0, LLONG_MAX); mapping 622 mm/filemap.c return filemap_check_and_keep_errors(mapping); mapping 627 mm/filemap.c static bool mapping_needs_writeback(struct address_space *mapping) mapping 629 mm/filemap.c if (dax_mapping(mapping)) mapping 630 mm/filemap.c return mapping->nrexceptional; mapping 632 mm/filemap.c return mapping->nrpages; mapping 635 mm/filemap.c int filemap_write_and_wait(struct address_space *mapping) mapping 639 mm/filemap.c if (mapping_needs_writeback(mapping)) { mapping 640 mm/filemap.c err = filemap_fdatawrite(mapping); mapping 648 mm/filemap.c int err2 = filemap_fdatawait(mapping); mapping 653 mm/filemap.c filemap_check_errors(mapping); mapping 656 mm/filemap.c err = filemap_check_errors(mapping); mapping 675 mm/filemap.c int filemap_write_and_wait_range(struct address_space *mapping, mapping 680 mm/filemap.c if (mapping_needs_writeback(mapping)) { mapping 681 mm/filemap.c err = __filemap_fdatawrite_range(mapping, lstart, lend, mapping 685 mm/filemap.c int err2 = filemap_fdatawait_range(mapping, mapping 691 mm/filemap.c filemap_check_errors(mapping); mapping 694 mm/filemap.c err = filemap_check_errors(mapping); mapping 700 mm/filemap.c void __filemap_set_wb_err(struct address_space *mapping, int err) mapping 702 mm/filemap.c errseq_t eseq = errseq_set(&mapping->wb_err, err); mapping 704 mm/filemap.c trace_filemap_set_wb_err(mapping, eseq); mapping 736 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 739 mm/filemap.c if (errseq_check(&mapping->wb_err, old)) { mapping 743 mm/filemap.c err = errseq_check_and_advance(&mapping->wb_err, mapping 754 mm/filemap.c clear_bit(AS_EIO, &mapping->flags); mapping 755 mm/filemap.c clear_bit(AS_ENOSPC, &mapping->flags); mapping 779 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 781 mm/filemap.c if (mapping_needs_writeback(mapping)) { mapping 782 mm/filemap.c err = __filemap_fdatawrite_range(mapping, lstart, lend, mapping 786 mm/filemap.c __filemap_fdatawait_range(mapping, lstart, lend); mapping 813 mm/filemap.c struct address_space *mapping = old->mapping; mapping 814 mm/filemap.c void (*freepage)(struct page *) = mapping->a_ops->freepage; mapping 816 mm/filemap.c XA_STATE(xas, &mapping->i_pages, offset); mapping 821 mm/filemap.c VM_BUG_ON_PAGE(new->mapping, new); mapping 824 mm/filemap.c new->mapping = mapping; mapping 830 mm/filemap.c old->mapping = NULL; mapping 851 mm/filemap.c struct address_space *mapping, mapping 855 mm/filemap.c XA_STATE(xas, &mapping->i_pages, offset); mapping 863 mm/filemap.c mapping_set_update(&xas, mapping); mapping 873 mm/filemap.c page->mapping = mapping; mapping 886 mm/filemap.c mapping->nrexceptional--; mapping 890 mm/filemap.c mapping->nrpages++; mapping 907 mm/filemap.c page->mapping = NULL; mapping 928 mm/filemap.c int add_to_page_cache_locked(struct page *page, struct address_space *mapping, mapping 931 mm/filemap.c return __add_to_page_cache_locked(page, mapping, offset, mapping 936 mm/filemap.c int add_to_page_cache_lru(struct page *page, struct address_space *mapping, mapping 943 mm/filemap.c ret = __add_to_page_cache_locked(page, mapping, offset, mapping 1363 mm/filemap.c struct address_space *mapping; mapping 1366 mm/filemap.c mapping = page_mapping(page); mapping 1367 mm/filemap.c if (mapping) mapping 1368 mm/filemap.c mapping_set_error(mapping, err); mapping 1459 mm/filemap.c pgoff_t page_cache_next_miss(struct address_space *mapping, mapping 1462 mm/filemap.c XA_STATE(xas, &mapping->i_pages, index); mapping 1495 mm/filemap.c pgoff_t page_cache_prev_miss(struct address_space *mapping, mapping 1498 mm/filemap.c XA_STATE(xas, &mapping->i_pages, index); mapping 1525 mm/filemap.c struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) mapping 1527 mm/filemap.c XA_STATE(xas, &mapping->i_pages, offset); mapping 1579 mm/filemap.c struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) mapping 1584 mm/filemap.c page = find_get_entry(mapping, offset); mapping 1588 mm/filemap.c if (unlikely(page_mapping(page) != mapping)) { mapping 1629 mm/filemap.c struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, mapping 1635 mm/filemap.c page = find_get_entry(mapping, offset); mapping 1652 mm/filemap.c if (unlikely(compound_head(page)->mapping != mapping)) { mapping 1666 mm/filemap.c if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) mapping 1682 mm/filemap.c err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); mapping 1724 mm/filemap.c unsigned find_get_entries(struct address_space *mapping, mapping 1728 mm/filemap.c XA_STATE(xas, &mapping->i_pages, start); mapping 1791 mm/filemap.c unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, mapping 1795 mm/filemap.c XA_STATE(xas, &mapping->i_pages, *start); mapping 1857 mm/filemap.c unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, mapping 1860 mm/filemap.c XA_STATE(xas, &mapping->i_pages, index); mapping 1913 mm/filemap.c unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, mapping 1917 mm/filemap.c XA_STATE(xas, &mapping->i_pages, *index); mapping 2013 mm/filemap.c struct address_space *mapping = filp->f_mapping; mapping 2014 mm/filemap.c struct inode *inode = mapping->host; mapping 2047 mm/filemap.c page = find_get_page(mapping, index); mapping 2051 mm/filemap.c page_cache_sync_readahead(mapping, mapping 2054 mm/filemap.c page = find_get_page(mapping, index); mapping 2059 mm/filemap.c page_cache_async_readahead(mapping, mapping 2081 mm/filemap.c !mapping->a_ops->is_partially_uptodate) mapping 2089 mm/filemap.c if (!page->mapping) mapping 2091 mm/filemap.c if (!mapping->a_ops->is_partially_uptodate(page, mapping 2128 mm/filemap.c if (mapping_writably_mapped(mapping)) mapping 2168 mm/filemap.c if (!page->mapping) { mapping 2188 mm/filemap.c error = mapping->a_ops->readpage(filp, page); mapping 2204 mm/filemap.c if (page->mapping == NULL) { mapping 2232 mm/filemap.c page = page_cache_alloc(mapping); mapping 2237 mm/filemap.c error = add_to_page_cache_lru(page, mapping, index, mapping 2238 mm/filemap.c mapping_gfp_constraint(mapping, GFP_KERNEL)); mapping 2284 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2285 mm/filemap.c struct inode *inode = mapping->host; mapping 2290 mm/filemap.c if (filemap_range_has_page(mapping, iocb->ki_pos, mapping 2294 mm/filemap.c retval = filemap_write_and_wait_range(mapping, mapping 2303 mm/filemap.c retval = mapping->a_ops->direct_IO(iocb, iter); mapping 2387 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2399 mm/filemap.c page_cache_sync_readahead(mapping, ra, file, offset, mapping 2422 mm/filemap.c ra_submit(ra, mapping, file); mapping 2436 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2447 mm/filemap.c page_cache_async_readahead(mapping, ra, file, mapping 2481 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2483 mm/filemap.c struct inode *inode = mapping->host; mapping 2496 mm/filemap.c page = find_get_page(mapping, offset); mapping 2510 mm/filemap.c page = pagecache_get_page(mapping, offset, mapping 2524 mm/filemap.c if (unlikely(compound_head(page)->mapping != mapping)) { mapping 2571 mm/filemap.c error = mapping->a_ops->readpage(file, page); mapping 2606 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2609 mm/filemap.c XA_STATE(xas, &mapping->i_pages, start_pgoff); mapping 2640 mm/filemap.c if (page->mapping != mapping || !PageUptodate(page)) mapping 2643 mm/filemap.c max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); mapping 2680 mm/filemap.c if (page->mapping != inode->i_mapping) { mapping 2707 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 2709 mm/filemap.c if (!mapping->a_ops->readpage) mapping 2756 mm/filemap.c static struct page *do_read_cache_page(struct address_space *mapping, mapping 2765 mm/filemap.c page = find_get_page(mapping, index); mapping 2770 mm/filemap.c err = add_to_page_cache_lru(page, mapping, index, gfp); mapping 2783 mm/filemap.c err = mapping->a_ops->readpage(data, page); mapping 2837 mm/filemap.c if (!page->mapping) { mapping 2869 mm/filemap.c struct page *read_cache_page(struct address_space *mapping, mapping 2874 mm/filemap.c return do_read_cache_page(mapping, index, filler, data, mapping 2875 mm/filemap.c mapping_gfp_mask(mapping)); mapping 2892 mm/filemap.c struct page *read_cache_page_gfp(struct address_space *mapping, mapping 2896 mm/filemap.c return do_read_cache_page(mapping, index, NULL, NULL, gfp); mapping 3122 mm/filemap.c int pagecache_write_begin(struct file *file, struct address_space *mapping, mapping 3126 mm/filemap.c const struct address_space_operations *aops = mapping->a_ops; mapping 3128 mm/filemap.c return aops->write_begin(file, mapping, pos, len, flags, mapping 3133 mm/filemap.c int pagecache_write_end(struct file *file, struct address_space *mapping, mapping 3137 mm/filemap.c const struct address_space_operations *aops = mapping->a_ops; mapping 3139 mm/filemap.c return aops->write_end(file, mapping, pos, len, copied, page, fsdata); mapping 3147 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 3148 mm/filemap.c struct inode *inode = mapping->host; mapping 3163 mm/filemap.c written = filemap_write_and_wait_range(mapping, pos, mapping 3175 mm/filemap.c written = invalidate_inode_pages2_range(mapping, mapping 3187 mm/filemap.c written = mapping->a_ops->direct_IO(iocb, from); mapping 3202 mm/filemap.c if (mapping->nrpages) mapping 3203 mm/filemap.c invalidate_inode_pages2_range(mapping, mapping 3225 mm/filemap.c struct page *grab_cache_page_write_begin(struct address_space *mapping, mapping 3234 mm/filemap.c page = pagecache_get_page(mapping, index, fgp_flags, mapping 3235 mm/filemap.c mapping_gfp_mask(mapping)); mapping 3246 mm/filemap.c struct address_space *mapping = file->f_mapping; mapping 3247 mm/filemap.c const struct address_space_operations *a_ops = mapping->a_ops; mapping 3284 mm/filemap.c status = a_ops->write_begin(file, mapping, pos, bytes, flags, mapping 3289 mm/filemap.c if (mapping_writably_mapped(mapping)) mapping 3295 mm/filemap.c status = a_ops->write_end(file, mapping, pos, bytes, copied, mapping 3320 mm/filemap.c balance_dirty_pages_ratelimited(mapping); mapping 3351 mm/filemap.c struct address_space * mapping = file->f_mapping; mapping 3352 mm/filemap.c struct inode *inode = mapping->host; mapping 3399 mm/filemap.c err = filemap_write_and_wait_range(mapping, pos, endbyte); mapping 3403 mm/filemap.c invalidate_mapping_pages(mapping, mapping 3473 mm/filemap.c struct address_space * const mapping = page->mapping; mapping 3479 mm/filemap.c if (mapping && mapping->a_ops->releasepage) mapping 3480 mm/filemap.c return mapping->a_ops->releasepage(page, gfp_mask); mapping 295 mm/gup.c if (page->mapping && trylock_page(page)) { mapping 1514 mm/huge_memory.c if (PageDoubleMap(page) || !page->mapping) mapping 1519 mm/huge_memory.c if (page->mapping && !PageDoubleMap(page)) mapping 2463 mm/huge_memory.c VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, mapping 2465 mm/huge_memory.c page_tail->mapping = head->mapping; mapping 2528 mm/huge_memory.c shmem_uncharge(head->mapping->host, 1); mapping 2531 mm/huge_memory.c __xa_store(&head->mapping->i_pages, head[i].index, mapping 2555 mm/huge_memory.c xa_unlock(&head->mapping->i_pages); mapping 2700 mm/huge_memory.c struct address_space *mapping = NULL; mapping 2728 mm/huge_memory.c mapping = NULL; mapping 2731 mm/huge_memory.c mapping = head->mapping; mapping 2734 mm/huge_memory.c if (!mapping) { mapping 2740 mm/huge_memory.c i_mmap_lock_read(mapping); mapping 2749 mm/huge_memory.c end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); mapping 2772 mm/huge_memory.c if (mapping) { mapping 2773 mm/huge_memory.c XA_STATE(xas, &mapping->i_pages, page_index(head)); mapping 2779 mm/huge_memory.c xa_lock(&mapping->i_pages); mapping 2793 mm/huge_memory.c if (mapping) { mapping 2818 mm/huge_memory.c fail: if (mapping) mapping 2819 mm/huge_memory.c xa_unlock(&mapping->i_pages); mapping 2830 mm/huge_memory.c if (mapping) mapping 2831 mm/huge_memory.c i_mmap_unlock_read(mapping); mapping 2919 mm/huge_memory.c page = list_entry((void *)pos, struct page, mapping); mapping 2934 mm/huge_memory.c page = list_entry((void *)pos, struct page, mapping); mapping 760 mm/hugetlb.c struct address_space *mapping = vma->vm_file->f_mapping; mapping 761 mm/hugetlb.c struct inode *inode = mapping->host; mapping 1246 mm/hugetlb.c return (unsigned long)page[2].mapping == -1U; mapping 1251 mm/hugetlb.c page[2].mapping = (void *)-1U; mapping 1256 mm/hugetlb.c page[2].mapping = NULL; mapping 1275 mm/hugetlb.c page->mapping = NULL; mapping 1343 mm/hugetlb.c struct page, mapping); mapping 1361 mm/hugetlb.c if (llist_add((struct llist_node *)&page->mapping, mapping 3676 mm/hugetlb.c struct address_space *mapping; mapping 3686 mm/hugetlb.c mapping = vma->vm_file->f_mapping; mapping 3693 mm/hugetlb.c i_mmap_lock_write(mapping); mapping 3694 mm/hugetlb.c vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { mapping 3718 mm/hugetlb.c i_mmap_unlock_write(mapping); mapping 3855 mm/hugetlb.c struct address_space *mapping; mapping 3858 mm/hugetlb.c mapping = vma->vm_file->f_mapping; mapping 3861 mm/hugetlb.c return find_lock_page(mapping, idx); mapping 3871 mm/hugetlb.c struct address_space *mapping; mapping 3875 mm/hugetlb.c mapping = vma->vm_file->f_mapping; mapping 3878 mm/hugetlb.c page = find_get_page(mapping, idx); mapping 3884 mm/hugetlb.c int huge_add_to_page_cache(struct page *page, struct address_space *mapping, mapping 3887 mm/hugetlb.c struct inode *inode = mapping->host; mapping 3889 mm/hugetlb.c int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL); mapping 3909 mm/hugetlb.c struct address_space *mapping, pgoff_t idx, mapping 3938 mm/hugetlb.c page = find_lock_page(mapping, idx); mapping 3940 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); mapping 3967 mm/hugetlb.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); mapping 4003 mm/hugetlb.c int err = huge_add_to_page_cache(page, mapping, idx); mapping 4047 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); mapping 4094 mm/hugetlb.c u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, mapping 4100 mm/hugetlb.c key[0] = (unsigned long) mapping; mapping 4112 mm/hugetlb.c u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping, mapping 4130 mm/hugetlb.c struct address_space *mapping; mapping 4149 mm/hugetlb.c mapping = vma->vm_file->f_mapping; mapping 4157 mm/hugetlb.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, haddr); mapping 4162 mm/hugetlb.c ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); mapping 4267 mm/hugetlb.c struct address_space *mapping; mapping 4306 mm/hugetlb.c mapping = dst_vma->vm_file->f_mapping; mapping 4313 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); mapping 4324 mm/hugetlb.c ret = huge_add_to_page_cache(page, mapping, idx); mapping 4341 mm/hugetlb.c size = i_size_read(mapping->host) >> huge_page_shift(h); mapping 4882 mm/hugetlb.c struct address_space *mapping = vma->vm_file->f_mapping; mapping 4894 mm/hugetlb.c i_mmap_lock_write(mapping); mapping 4895 mm/hugetlb.c vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { mapping 4924 mm/hugetlb.c i_mmap_unlock_write(mapping); mapping 52 mm/internal.h extern unsigned int __do_page_cache_readahead(struct address_space *mapping, mapping 60 mm/internal.h struct address_space *mapping, struct file *filp) mapping 62 mm/internal.h return __do_page_cache_readahead(mapping, filp, mapping 1347 mm/khugepaged.c if (WARN_ON(hpage->mapping != vma->vm_file->f_mapping)) mapping 1417 mm/khugepaged.c static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) mapping 1423 mm/khugepaged.c i_mmap_lock_write(mapping); mapping 1424 mm/khugepaged.c vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { mapping 1471 mm/khugepaged.c i_mmap_unlock_write(mapping); mapping 1496 mm/khugepaged.c struct address_space *mapping = file->f_mapping; mapping 1502 mm/khugepaged.c XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER); mapping 1541 mm/khugepaged.c new_page->mapping = mapping; mapping 1568 mm/khugepaged.c if (!shmem_charge(mapping->host, 1)) { mapping 1580 mm/khugepaged.c if (shmem_getpage(mapping->host, index, &page, mapping 1595 mm/khugepaged.c page_cache_sync_readahead(mapping, &file->f_ra, mapping 1600 mm/khugepaged.c page = find_lock_page(mapping, index); mapping 1635 mm/khugepaged.c if (page_mapping(page) != mapping) { mapping 1663 mm/khugepaged.c unmap_mapping_pages(mapping, index, 1, false); mapping 1703 mm/khugepaged.c filemap_nr_thps_inc(mapping); mapping 1735 mm/khugepaged.c page->mapping = NULL; mapping 1763 mm/khugepaged.c retract_page_tables(mapping, start); mapping 1772 mm/khugepaged.c mapping->nrpages -= nr_none; mapping 1775 mm/khugepaged.c shmem_uncharge(mapping->host, nr_none); mapping 1806 mm/khugepaged.c new_page->mapping = NULL; mapping 1819 mm/khugepaged.c struct address_space *mapping = file->f_mapping; mapping 1820 mm/khugepaged.c XA_STATE(xas, &mapping->i_pages, start); mapping 706 mm/ksm.c if (READ_ONCE(page->mapping) != expected_mapping) mapping 733 mm/ksm.c if (READ_ONCE(page->mapping) != expected_mapping) { mapping 747 mm/ksm.c if (READ_ONCE(page->mapping) != expected_mapping) { mapping 868 mm/ksm.c page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM); mapping 2694 mm/ksm.c VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); mapping 224 mm/madvise.c struct address_space *mapping) mapping 233 mm/madvise.c page = find_get_entry(mapping, index); mapping 5440 mm/memcontrol.c struct address_space *mapping; mapping 5448 mm/memcontrol.c mapping = vma->vm_file->f_mapping; mapping 5454 mm/memcontrol.c if (shmem_mapping(mapping)) { mapping 5455 mm/memcontrol.c page = find_get_entry(mapping, pgoff); mapping 5464 mm/memcontrol.c page = find_get_page(mapping, pgoff); mapping 5466 mm/memcontrol.c page = find_get_page(mapping, pgoff); mapping 5530 mm/memcontrol.c struct address_space *mapping = page_mapping(page); mapping 5532 mm/memcontrol.c if (mapping_cap_account_dirty(mapping)) { mapping 6594 mm/memcontrol.c VM_BUG_ON_PAGE(!page->mapping, page); mapping 66 mm/memfd.c static int memfd_wait_for_pins(struct address_space *mapping) mapping 68 mm/memfd.c XA_STATE(xas, &mapping->i_pages, 0); mapping 83 mm/memory-failure.c struct address_space *mapping; mapping 96 mm/memory-failure.c mapping = page_mapping(p); mapping 97 mm/memory-failure.c if (mapping == NULL || mapping->host == NULL) mapping 100 mm/memory-failure.c dev = mapping->host->i_sb->s_dev; mapping 479 mm/memory-failure.c struct address_space *mapping = page->mapping; mapping 481 mm/memory-failure.c i_mmap_lock_read(mapping); mapping 489 mm/memory-failure.c vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, mapping 503 mm/memory-failure.c i_mmap_unlock_read(mapping); mapping 517 mm/memory-failure.c if (!page->mapping) mapping 594 mm/memory-failure.c struct address_space *mapping) mapping 598 mm/memory-failure.c if (mapping->a_ops->error_remove_page) { mapping 599 mm/memory-failure.c int err = mapping->a_ops->error_remove_page(mapping, p); mapping 650 mm/memory-failure.c struct address_space *mapping; mapping 668 mm/memory-failure.c mapping = page_mapping(p); mapping 669 mm/memory-failure.c if (!mapping) { mapping 681 mm/memory-failure.c return truncate_error_page(p, pfn, mapping); mapping 691 mm/memory-failure.c struct address_space *mapping = page_mapping(p); mapping 695 mm/memory-failure.c if (mapping) { mapping 730 mm/memory-failure.c mapping_set_error(mapping, -EIO); mapping 787 mm/memory-failure.c struct address_space *mapping; mapping 792 mm/memory-failure.c mapping = page_mapping(hpage); mapping 793 mm/memory-failure.c if (mapping) { mapping 794 mm/memory-failure.c res = truncate_error_page(hpage, pfn, mapping); mapping 969 mm/memory-failure.c struct address_space *mapping; mapping 1009 mm/memory-failure.c mapping = page_mapping(hpage); mapping 1010 mm/memory-failure.c if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping && mapping 1011 mm/memory-failure.c mapping_cap_writeback_dirty(mapping)) { mapping 1218 mm/memory-failure.c unmap_mapping_range(page->mapping, start, start + size, 0); mapping 1410 mm/memory-failure.c if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { mapping 488 mm/memory.c struct address_space *mapping; mapping 513 mm/memory.c mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL; mapping 522 mm/memory.c (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index); mapping 527 mm/memory.c mapping ? mapping->a_ops->readpage : NULL); mapping 2215 mm/memory.c if (!page->mapping) { mapping 2233 mm/memory.c struct address_space *mapping; mapping 2246 mm/memory.c mapping = page_rmapping(page); mapping 2261 mm/memory.c if ((dirtied || page_mkwrite) && mapping) { mapping 2265 mm/memory.c balance_dirty_pages_ratelimited(mapping); mapping 2691 mm/memory.c void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, mapping 2696 mm/memory.c details.check_mapping = even_cows ? NULL : mapping; mapping 2702 mm/memory.c i_mmap_lock_write(mapping); mapping 2703 mm/memory.c if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))) mapping 2704 mm/memory.c unmap_mapping_range_tree(&mapping->i_mmap, &details); mapping 2705 mm/memory.c i_mmap_unlock_write(mapping); mapping 2725 mm/memory.c void unmap_mapping_range(struct address_space *mapping, mapping 2739 mm/memory.c unmap_mapping_pages(mapping, hba, hlen, even_cows); mapping 450 mm/memremap.c page->mapping = NULL; mapping 87 mm/migrate.c struct address_space *mapping; mapping 125 mm/migrate.c mapping = page_mapping(page); mapping 126 mm/migrate.c VM_BUG_ON_PAGE(!mapping, page); mapping 128 mm/migrate.c if (!mapping->a_ops->isolate_page(page, mode)) mapping 149 mm/migrate.c struct address_space *mapping; mapping 155 mm/migrate.c mapping = page_mapping(page); mapping 156 mm/migrate.c mapping->a_ops->putback_page(page); mapping 374 mm/migrate.c static int expected_page_refs(struct address_space *mapping, struct page *page) mapping 383 mm/migrate.c if (mapping) mapping 397 mm/migrate.c int migrate_page_move_mapping(struct address_space *mapping, mapping 400 mm/migrate.c XA_STATE(xas, &mapping->i_pages, page_index(page)); mapping 403 mm/migrate.c int expected_count = expected_page_refs(mapping, page) + extra_count; mapping 405 mm/migrate.c if (!mapping) { mapping 412 mm/migrate.c newpage->mapping = page->mapping; mapping 438 mm/migrate.c newpage->mapping = page->mapping; mapping 494 mm/migrate.c if (dirty && mapping_cap_account_dirty(mapping)) { mapping 511 mm/migrate.c int migrate_huge_page_move_mapping(struct address_space *mapping, mapping 514 mm/migrate.c XA_STATE(xas, &mapping->i_pages, page_index(page)); mapping 530 mm/migrate.c newpage->mapping = page->mapping; mapping 676 mm/migrate.c int migrate_page(struct address_space *mapping, mapping 684 mm/migrate.c rc = migrate_page_move_mapping(mapping, newpage, page, 0); mapping 736 mm/migrate.c static int __buffer_migrate_page(struct address_space *mapping, mapping 745 mm/migrate.c return migrate_page(mapping, newpage, page, mode); mapping 748 mm/migrate.c expected_count = expected_page_refs(mapping, page); mapping 762 mm/migrate.c spin_lock(&mapping->private_lock); mapping 776 mm/migrate.c spin_unlock(&mapping->private_lock); mapping 783 mm/migrate.c rc = migrate_page_move_mapping(mapping, newpage, page, 0); mapping 810 mm/migrate.c spin_unlock(&mapping->private_lock); mapping 826 mm/migrate.c int buffer_migrate_page(struct address_space *mapping, mapping 829 mm/migrate.c return __buffer_migrate_page(mapping, newpage, page, mode, false); mapping 839 mm/migrate.c int buffer_migrate_page_norefs(struct address_space *mapping, mapping 842 mm/migrate.c return __buffer_migrate_page(mapping, newpage, page, mode, true); mapping 849 mm/migrate.c static int writeout(struct address_space *mapping, struct page *page) mapping 860 mm/migrate.c if (!mapping->a_ops->writepage) mapping 878 mm/migrate.c rc = mapping->a_ops->writepage(page, &wbc); mapping 890 mm/migrate.c static int fallback_migrate_page(struct address_space *mapping, mapping 902 mm/migrate.c return writeout(mapping, page); mapping 913 mm/migrate.c return migrate_page(mapping, newpage, page, mode); mapping 930 mm/migrate.c struct address_space *mapping; mapping 937 mm/migrate.c mapping = page_mapping(page); mapping 940 mm/migrate.c if (!mapping) mapping 941 mm/migrate.c rc = migrate_page(mapping, newpage, page, mode); mapping 942 mm/migrate.c else if (mapping->a_ops->migratepage) mapping 950 mm/migrate.c rc = mapping->a_ops->migratepage(mapping, newpage, mapping 953 mm/migrate.c rc = fallback_migrate_page(mapping, newpage, mapping 967 mm/migrate.c rc = mapping->a_ops->migratepage(mapping, newpage, mapping 994 mm/migrate.c page->mapping = NULL; mapping 1100 mm/migrate.c if (!page->mapping) { mapping 2056 mm/migrate.c new_page->mapping = page->mapping; mapping 2290 mm/migrate.c if (!page || !page->mapping || PageTransCompound(page)) { mapping 2853 mm/migrate.c struct address_space *mapping; mapping 2881 mm/migrate.c mapping = page_mapping(page); mapping 2889 mm/migrate.c if (mapping) { mapping 2903 mm/migrate.c r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); mapping 51 mm/mincore.c static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) mapping 63 mm/mincore.c if (shmem_mapping(mapping)) { mapping 64 mm/mincore.c page = find_get_entry(mapping, pgoff); mapping 83 mm/mincore.c page = find_get_page(mapping, pgoff); mapping 85 mm/mincore.c page = find_get_page(mapping, pgoff); mapping 140 mm/mmap.c struct file *file, struct address_space *mapping) mapping 145 mm/mmap.c mapping_unmap_writable(mapping); mapping 147 mm/mmap.c flush_dcache_mmap_lock(mapping); mapping 148 mm/mmap.c vma_interval_tree_remove(vma, &mapping->i_mmap); mapping 149 mm/mmap.c flush_dcache_mmap_unlock(mapping); mapping 161 mm/mmap.c struct address_space *mapping = file->f_mapping; mapping 162 mm/mmap.c i_mmap_lock_write(mapping); mapping 163 mm/mmap.c __remove_shared_vm_struct(vma, file, mapping); mapping 164 mm/mmap.c i_mmap_unlock_write(mapping); mapping 618 mm/mmap.c struct address_space *mapping = file->f_mapping; mapping 623 mm/mmap.c atomic_inc(&mapping->i_mmap_writable); mapping 625 mm/mmap.c flush_dcache_mmap_lock(mapping); mapping 626 mm/mmap.c vma_interval_tree_insert(vma, &mapping->i_mmap); mapping 627 mm/mmap.c flush_dcache_mmap_unlock(mapping); mapping 644 mm/mmap.c struct address_space *mapping = NULL; mapping 647 mm/mmap.c mapping = vma->vm_file->f_mapping; mapping 648 mm/mmap.c i_mmap_lock_write(mapping); mapping 654 mm/mmap.c if (mapping) mapping 655 mm/mmap.c i_mmap_unlock_write(mapping); mapping 723 mm/mmap.c struct address_space *mapping = NULL; mapping 819 mm/mmap.c mapping = file->f_mapping; mapping 820 mm/mmap.c root = &mapping->i_mmap; mapping 826 mm/mmap.c i_mmap_lock_write(mapping); mapping 851 mm/mmap.c flush_dcache_mmap_lock(mapping); mapping 875 mm/mmap.c flush_dcache_mmap_unlock(mapping); mapping 897 mm/mmap.c __remove_shared_vm_struct(next, file, mapping); mapping 922 mm/mmap.c if (mapping) mapping 923 mm/mmap.c i_mmap_unlock_write(mapping); mapping 3492 mm/mmap.c static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) mapping 3494 mm/mmap.c if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { mapping 3504 mm/mmap.c if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) mapping 3506 mm/mmap.c down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); mapping 3609 mm/mmap.c static void vm_unlock_mapping(struct address_space *mapping) mapping 3611 mm/mmap.c if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { mapping 3616 mm/mmap.c i_mmap_unlock_write(mapping); mapping 3618 mm/mmap.c &mapping->flags)) mapping 589 mm/nommu.c struct address_space *mapping; mapping 599 mm/nommu.c mapping = vma->vm_file->f_mapping; mapping 601 mm/nommu.c i_mmap_lock_write(mapping); mapping 602 mm/nommu.c flush_dcache_mmap_lock(mapping); mapping 603 mm/nommu.c vma_interval_tree_insert(vma, &mapping->i_mmap); mapping 604 mm/nommu.c flush_dcache_mmap_unlock(mapping); mapping 605 mm/nommu.c i_mmap_unlock_write(mapping); mapping 653 mm/nommu.c struct address_space *mapping; mapping 668 mm/nommu.c mapping = vma->vm_file->f_mapping; mapping 670 mm/nommu.c i_mmap_lock_write(mapping); mapping 671 mm/nommu.c flush_dcache_mmap_lock(mapping); mapping 672 mm/nommu.c vma_interval_tree_remove(vma, &mapping->i_mmap); mapping 673 mm/nommu.c flush_dcache_mmap_unlock(mapping); mapping 674 mm/nommu.c i_mmap_unlock_write(mapping); mapping 1864 mm/page-writeback.c void balance_dirty_pages_ratelimited(struct address_space *mapping) mapping 1866 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2105 mm/page-writeback.c void tag_pages_for_writeback(struct address_space *mapping, mapping 2108 mm/page-writeback.c XA_STATE(xas, &mapping->i_pages, start); mapping 2158 mm/page-writeback.c int write_cache_pages(struct address_space *mapping, mapping 2176 mm/page-writeback.c writeback_index = mapping->writeback_index; /* prev offset */ mapping 2190 mm/page-writeback.c tag_pages_for_writeback(mapping, index, end); mapping 2195 mm/page-writeback.c nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, mapping 2215 mm/page-writeback.c if (unlikely(page->mapping != mapping)) { mapping 2237 mm/page-writeback.c trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); mapping 2289 mm/page-writeback.c mapping->writeback_index = done_index; mapping 2302 mm/page-writeback.c struct address_space *mapping = data; mapping 2303 mm/page-writeback.c int ret = mapping->a_ops->writepage(page, wbc); mapping 2304 mm/page-writeback.c mapping_set_error(mapping, ret); mapping 2318 mm/page-writeback.c int generic_writepages(struct address_space *mapping, mapping 2325 mm/page-writeback.c if (!mapping->a_ops->writepage) mapping 2329 mm/page-writeback.c ret = write_cache_pages(mapping, wbc, __writepage, mapping); mapping 2336 mm/page-writeback.c int do_writepages(struct address_space *mapping, struct writeback_control *wbc) mapping 2343 mm/page-writeback.c if (mapping->a_ops->writepages) mapping 2344 mm/page-writeback.c ret = mapping->a_ops->writepages(mapping, wbc); mapping 2346 mm/page-writeback.c ret = generic_writepages(mapping, wbc); mapping 2368 mm/page-writeback.c struct address_space *mapping = page->mapping; mapping 2381 mm/page-writeback.c ret = mapping->a_ops->writepage(page, &wbc); mapping 2390 mm/page-writeback.c ret = filemap_check_errors(mapping); mapping 2412 mm/page-writeback.c void account_page_dirtied(struct page *page, struct address_space *mapping) mapping 2414 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2416 mm/page-writeback.c trace_writeback_dirty_page(page, mapping); mapping 2418 mm/page-writeback.c if (mapping_cap_account_dirty(mapping)) { mapping 2442 mm/page-writeback.c void account_page_cleaned(struct page *page, struct address_space *mapping, mapping 2445 mm/page-writeback.c if (mapping_cap_account_dirty(mapping)) { mapping 2469 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2472 mm/page-writeback.c if (!mapping) { mapping 2477 mm/page-writeback.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 2478 mm/page-writeback.c BUG_ON(page_mapping(page) != mapping); mapping 2480 mm/page-writeback.c account_page_dirtied(page, mapping); mapping 2481 mm/page-writeback.c __xa_set_mark(&mapping->i_pages, page_index(page), mapping 2483 mm/page-writeback.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 2486 mm/page-writeback.c if (mapping->host) { mapping 2488 mm/page-writeback.c __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); mapping 2506 mm/page-writeback.c struct address_space *mapping = page->mapping; mapping 2508 mm/page-writeback.c if (mapping && mapping_cap_account_dirty(mapping)) { mapping 2509 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2551 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2554 mm/page-writeback.c if (likely(mapping)) { mapping 2555 mm/page-writeback.c int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; mapping 2618 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2620 mm/page-writeback.c if (mapping_cap_account_dirty(mapping)) { mapping 2621 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2629 mm/page-writeback.c account_page_cleaned(page, mapping, wb); mapping 2655 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2660 mm/page-writeback.c if (mapping && mapping_cap_account_dirty(mapping)) { mapping 2661 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2716 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2723 mm/page-writeback.c if (mapping && mapping_use_writeback_tags(mapping)) { mapping 2724 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2728 mm/page-writeback.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 2731 mm/page-writeback.c __xa_clear_mark(&mapping->i_pages, page_index(page), mapping 2741 mm/page-writeback.c if (mapping->host && !mapping_tagged(mapping, mapping 2743 mm/page-writeback.c sb_clear_inode_writeback(mapping->host); mapping 2745 mm/page-writeback.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 2766 mm/page-writeback.c struct address_space *mapping = page_mapping(page); mapping 2770 mm/page-writeback.c if (mapping && mapping_use_writeback_tags(mapping)) { mapping 2771 mm/page-writeback.c XA_STATE(xas, &mapping->i_pages, page_index(page)); mapping 2772 mm/page-writeback.c struct inode *inode = mapping->host; mapping 2782 mm/page-writeback.c on_wblist = mapping_tagged(mapping, mapping 2794 mm/page-writeback.c if (mapping->host && !on_wblist) mapping 2795 mm/page-writeback.c sb_mark_inode_writeback(mapping->host); mapping 2837 mm/page-writeback.c if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host))) mapping 688 mm/page_alloc.c p->mapping = TAIL_MAPPING; mapping 1017 mm/page_alloc.c if (unlikely((unsigned long)page->mapping | mapping 1038 mm/page_alloc.c if (unlikely(page->mapping != NULL)) mapping 1092 mm/page_alloc.c if (page->mapping != TAIL_MAPPING) { mapping 1108 mm/page_alloc.c page->mapping = NULL; mapping 1153 mm/page_alloc.c page->mapping = NULL; mapping 2050 mm/page_alloc.c if (unlikely(page->mapping != NULL)) mapping 150 mm/page_io.c struct address_space *mapping = swap_file->f_mapping; mapping 151 mm/page_io.c struct inode *inode = mapping->host; mapping 288 mm/page_io.c struct address_space *mapping = swap_file->f_mapping; mapping 302 mm/page_io.c ret = mapping->a_ops->direct_IO(&kiocb, &from); mapping 369 mm/page_io.c struct address_space *mapping = swap_file->f_mapping; mapping 371 mm/page_io.c ret = mapping->a_ops->readpage(swap_file, page); mapping 429 mm/page_io.c struct address_space *mapping = sis->swap_file->f_mapping; mapping 432 mm/page_io.c return mapping->a_ops->set_page_dirty(page); mapping 33 mm/readahead.c file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) mapping 35 mm/readahead.c ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; mapping 47 mm/readahead.c static void read_cache_pages_invalidate_page(struct address_space *mapping, mapping 53 mm/readahead.c page->mapping = mapping; mapping 55 mm/readahead.c page->mapping = NULL; mapping 64 mm/readahead.c static void read_cache_pages_invalidate_pages(struct address_space *mapping, mapping 72 mm/readahead.c read_cache_pages_invalidate_page(mapping, victim); mapping 88 mm/readahead.c int read_cache_pages(struct address_space *mapping, struct list_head *pages, mapping 97 mm/readahead.c if (add_to_page_cache_lru(page, mapping, page->index, mapping 98 mm/readahead.c readahead_gfp_mask(mapping))) { mapping 99 mm/readahead.c read_cache_pages_invalidate_page(mapping, page); mapping 106 mm/readahead.c read_cache_pages_invalidate_pages(mapping, pages); mapping 116 mm/readahead.c static int read_pages(struct address_space *mapping, struct file *filp, mapping 125 mm/readahead.c if (mapping->a_ops->readpages) { mapping 126 mm/readahead.c ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); mapping 135 mm/readahead.c if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) mapping 136 mm/readahead.c mapping->a_ops->readpage(filp, page); mapping 155 mm/readahead.c unsigned int __do_page_cache_readahead(struct address_space *mapping, mapping 159 mm/readahead.c struct inode *inode = mapping->host; mapping 166 mm/readahead.c gfp_t gfp_mask = readahead_gfp_mask(mapping); mapping 182 mm/readahead.c page = xa_load(&mapping->i_pages, page_offset); mapping 190 mm/readahead.c read_pages(mapping, filp, &page_pool, nr_pages, mapping 212 mm/readahead.c read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); mapping 222 mm/readahead.c int force_page_cache_readahead(struct address_space *mapping, struct file *filp, mapping 225 mm/readahead.c struct backing_dev_info *bdi = inode_to_bdi(mapping->host); mapping 229 mm/readahead.c if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) mapping 243 mm/readahead.c __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); mapping 332 mm/readahead.c static pgoff_t count_history_pages(struct address_space *mapping, mapping 338 mm/readahead.c head = page_cache_prev_miss(mapping, offset - 1, max); mapping 347 mm/readahead.c static int try_context_readahead(struct address_space *mapping, mapping 355 mm/readahead.c size = count_history_pages(mapping, offset, max); mapping 382 mm/readahead.c ondemand_readahead(struct address_space *mapping, mapping 387 mm/readahead.c struct backing_dev_info *bdi = inode_to_bdi(mapping->host); mapping 427 mm/readahead.c start = page_cache_next_miss(mapping, offset + 1, max_pages); mapping 460 mm/readahead.c if (try_context_readahead(mapping, ra, offset, req_size, max_pages)) mapping 467 mm/readahead.c return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); mapping 492 mm/readahead.c return ra_submit(ra, mapping, filp); mapping 509 mm/readahead.c void page_cache_sync_readahead(struct address_space *mapping, mapping 522 mm/readahead.c force_page_cache_readahead(mapping, filp, offset, req_size); mapping 527 mm/readahead.c ondemand_readahead(mapping, ra, filp, false, offset, req_size); mapping 547 mm/readahead.c page_cache_async_readahead(struct address_space *mapping, mapping 567 mm/readahead.c if (inode_read_congested(mapping->host)) mapping 574 mm/readahead.c ondemand_readahead(mapping, ra, filp, true, offset, req_size); mapping 471 mm/rmap.c anon_mapping = (unsigned long)READ_ONCE(page->mapping); mapping 515 mm/rmap.c anon_mapping = (unsigned long)READ_ONCE(page->mapping); mapping 700 mm/rmap.c } else if (page->mapping) { mapping 701 mm/rmap.c if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) mapping 969 mm/rmap.c struct address_space *mapping; mapping 981 mm/rmap.c mapping = page_mapping(page); mapping 982 mm/rmap.c if (!mapping) mapping 1016 mm/rmap.c WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); mapping 1045 mm/rmap.c page->mapping = (struct address_space *) anon_vma; mapping 1867 mm/rmap.c struct address_space *mapping = page_mapping(page); mapping 1879 mm/rmap.c if (!mapping) mapping 1885 mm/rmap.c i_mmap_lock_read(mapping); mapping 1886 mm/rmap.c vma_interval_tree_foreach(vma, &mapping->i_mmap, mapping 1903 mm/rmap.c i_mmap_unlock_read(mapping); mapping 353 mm/shmem.c static int shmem_replace_entry(struct address_space *mapping, mapping 356 mm/shmem.c XA_STATE(xas, &mapping->i_pages, index); mapping 375 mm/shmem.c static bool shmem_confirm_swap(struct address_space *mapping, mapping 378 mm/shmem.c return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap); mapping 607 mm/shmem.c struct address_space *mapping, mapping 610 mm/shmem.c XA_STATE_ORDER(xas, &mapping->i_pages, index, compound_order(page)); mapping 621 mm/shmem.c page->mapping = mapping; mapping 643 mm/shmem.c mapping->nrpages += nr; mapping 651 mm/shmem.c page->mapping = NULL; mapping 664 mm/shmem.c struct address_space *mapping = page->mapping; mapping 669 mm/shmem.c xa_lock_irq(&mapping->i_pages); mapping 670 mm/shmem.c error = shmem_replace_entry(mapping, page->index, page, radswap); mapping 671 mm/shmem.c page->mapping = NULL; mapping 672 mm/shmem.c mapping->nrpages--; mapping 675 mm/shmem.c xa_unlock_irq(&mapping->i_pages); mapping 683 mm/shmem.c static int shmem_free_swap(struct address_space *mapping, mapping 688 mm/shmem.c old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); mapping 702 mm/shmem.c unsigned long shmem_partial_swap_usage(struct address_space *mapping, mapping 705 mm/shmem.c XA_STATE(xas, &mapping->i_pages, start); mapping 738 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 756 mm/shmem.c return shmem_partial_swap_usage(mapping, mapping 764 mm/shmem.c void shmem_unlock_mapping(struct address_space *mapping) mapping 774 mm/shmem.c while (!mapping_unevictable(mapping)) { mapping 779 mm/shmem.c pvec.nr = find_get_entries(mapping, index, mapping 798 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 816 mm/shmem.c pvec.nr = find_get_entries(mapping, index, mapping 831 mm/shmem.c nr_swaps_freed += !shmem_free_swap(mapping, mapping 862 mm/shmem.c if (page_mapping(page) == mapping) { mapping 864 mm/shmem.c truncate_inode_page(mapping, page); mapping 907 mm/shmem.c pvec.nr = find_get_entries(mapping, index, mapping 928 mm/shmem.c if (shmem_free_swap(mapping, index, page)) { mapping 967 mm/shmem.c if (page_mapping(page) == mapping) { mapping 969 mm/shmem.c truncate_inode_page(mapping, page); mapping 1121 mm/shmem.c static int shmem_find_swap_entries(struct address_space *mapping, mapping 1126 mm/shmem.c XA_STATE(xas, &mapping->i_pages, start); mapping 1174 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 1183 mm/shmem.c mapping_gfp_mask(mapping), mapping 1203 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 1217 mm/shmem.c pvec.nr = shmem_find_swap_entries(mapping, start, nr_entries, mapping 1296 mm/shmem.c struct address_space *mapping; mapping 1303 mm/shmem.c mapping = page->mapping; mapping 1305 mm/shmem.c inode = mapping->host; mapping 1470 mm/shmem.c struct address_space *mapping = info->vfs_inode.i_mapping; mapping 1478 mm/shmem.c if (xa_find(&mapping->i_pages, &hindex, hindex + HPAGE_PMD_NR - 1, mapping 1631 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 1663 mm/shmem.c !shmem_confirm_swap(mapping, index, swap)) { mapping 1682 mm/shmem.c error = shmem_add_to_page_cache(page, mapping, index, mapping 1719 mm/shmem.c if (!shmem_confirm_swap(mapping, index, swap)) mapping 1745 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 1770 mm/shmem.c page = find_lock_entry(mapping, index); mapping 1808 mm/shmem.c if (mapping->a_ops != &shmem_aops) mapping 1875 mm/shmem.c error = shmem_add_to_page_cache(page, mapping, hindex, mapping 2303 mm/shmem.c bool shmem_mapping(struct address_space *mapping) mapping 2305 mm/shmem.c return mapping->a_ops == &shmem_aops; mapping 2318 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 2319 mm/shmem.c gfp_t gfp = mapping_gfp_mask(mapping); mapping 2375 mm/shmem.c ret = shmem_add_to_page_cache(page, mapping, pgoff, NULL, mapping 2473 mm/shmem.c shmem_write_begin(struct file *file, struct address_space *mapping, mapping 2477 mm/shmem.c struct inode *inode = mapping->host; mapping 2494 mm/shmem.c shmem_write_end(struct file *file, struct address_space *mapping, mapping 2498 mm/shmem.c struct inode *inode = mapping->host; mapping 2533 mm/shmem.c struct address_space *mapping = inode->i_mapping; mapping 2602 mm/shmem.c if (mapping_writably_mapped(mapping)) mapping 2642 mm/shmem.c static pgoff_t shmem_seek_hole_data(struct address_space *mapping, mapping 2654 mm/shmem.c pvec.nr = find_get_entries(mapping, index, mapping 2691 mm/shmem.c struct address_space *mapping = file->f_mapping; mapping 2692 mm/shmem.c struct inode *inode = mapping->host; mapping 2707 mm/shmem.c new_offset = shmem_seek_hole_data(mapping, start, end, whence); mapping 2741 mm/shmem.c struct address_space *mapping = file->f_mapping; mapping 2760 mm/shmem.c unmap_mapping_range(mapping, unmap_start, mapping 4066 mm/shmem.c void shmem_unlock_mapping(struct address_space *mapping) mapping 4217 mm/shmem.c struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, mapping 4221 mm/shmem.c struct inode *inode = mapping->host; mapping 4225 mm/shmem.c BUG_ON(mapping->a_ops != &shmem_aops); mapping 4237 mm/shmem.c return read_cache_page_gfp(mapping, index, gfp); mapping 1396 mm/slab.c page->mapping = NULL; mapping 1727 mm/slub.c page->mapping = NULL; mapping 991 mm/swap.c struct address_space *mapping, mapping 995 mm/swap.c pvec->nr = find_get_entries(mapping, start, nr_entries, mapping 1042 mm/swap.c struct address_space *mapping, pgoff_t *start, pgoff_t end) mapping 1044 mm/swap.c pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, mapping 1051 mm/swap.c struct address_space *mapping, pgoff_t *index, pgoff_t end, mapping 1054 mm/swap.c pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, mapping 1061 mm/swap.c struct address_space *mapping, pgoff_t *index, pgoff_t end, mapping 1064 mm/swap.c pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, mapping 2296 mm/swapfile.c struct address_space *mapping = swap_file->f_mapping; mapping 2299 mm/swapfile.c if (mapping->a_ops->swap_deactivate) mapping 2300 mm/swapfile.c mapping->a_ops->swap_deactivate(swap_file); mapping 2384 mm/swapfile.c struct address_space *mapping = swap_file->f_mapping; mapping 2385 mm/swapfile.c struct inode *inode = mapping->host; mapping 2394 mm/swapfile.c if (mapping->a_ops->swap_activate) { mapping 2395 mm/swapfile.c ret = mapping->a_ops->swap_activate(sis, swap_file, span); mapping 2522 mm/swapfile.c struct address_space *mapping; mapping 2542 mm/swapfile.c mapping = victim->f_mapping; mapping 2546 mm/swapfile.c if (p->swap_file->f_mapping == mapping) { mapping 2660 mm/swapfile.c inode = mapping->host; mapping 3103 mm/swapfile.c struct address_space *mapping; mapping 3146 mm/swapfile.c mapping = swap_file->f_mapping; mapping 3147 mm/swapfile.c inode = mapping->host; mapping 3162 mm/swapfile.c if (!mapping->a_ops->readpage) { mapping 3166 mm/swapfile.c page = read_mapping_page(mapping, 0, swap_file); mapping 34 mm/truncate.c static inline void __clear_shadow_entry(struct address_space *mapping, mapping 37 mm/truncate.c XA_STATE(xas, &mapping->i_pages, index); mapping 43 mm/truncate.c mapping->nrexceptional--; mapping 46 mm/truncate.c static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, mapping 49 mm/truncate.c xa_lock_irq(&mapping->i_pages); mapping 50 mm/truncate.c __clear_shadow_entry(mapping, index, entry); mapping 51 mm/truncate.c xa_unlock_irq(&mapping->i_pages); mapping 59 mm/truncate.c static void truncate_exceptional_pvec_entries(struct address_space *mapping, mapping 67 mm/truncate.c if (shmem_mapping(mapping)) mapping 77 mm/truncate.c dax = dax_mapping(mapping); mapping 80 mm/truncate.c xa_lock_irq(&mapping->i_pages); mapping 95 mm/truncate.c dax_delete_mapping_entry(mapping, index); mapping 99 mm/truncate.c __clear_shadow_entry(mapping, index, page); mapping 103 mm/truncate.c xa_unlock_irq(&mapping->i_pages); mapping 111 mm/truncate.c static int invalidate_exceptional_entry(struct address_space *mapping, mapping 115 mm/truncate.c if (shmem_mapping(mapping) || dax_mapping(mapping)) mapping 117 mm/truncate.c clear_shadow_entry(mapping, index, entry); mapping 125 mm/truncate.c static int invalidate_exceptional_entry2(struct address_space *mapping, mapping 129 mm/truncate.c if (shmem_mapping(mapping)) mapping 131 mm/truncate.c if (dax_mapping(mapping)) mapping 132 mm/truncate.c return dax_invalidate_mapping_entry_sync(mapping, index); mapping 133 mm/truncate.c clear_shadow_entry(mapping, index, entry); mapping 157 mm/truncate.c invalidatepage = page->mapping->a_ops->invalidatepage; mapping 177 mm/truncate.c truncate_cleanup_page(struct address_space *mapping, struct page *page) mapping 181 mm/truncate.c unmap_mapping_pages(mapping, page->index, nr, false); mapping 205 mm/truncate.c invalidate_complete_page(struct address_space *mapping, struct page *page) mapping 209 mm/truncate.c if (page->mapping != mapping) mapping 215 mm/truncate.c ret = remove_mapping(mapping, page); mapping 220 mm/truncate.c int truncate_inode_page(struct address_space *mapping, struct page *page) mapping 224 mm/truncate.c if (page->mapping != mapping) mapping 227 mm/truncate.c truncate_cleanup_page(mapping, page); mapping 235 mm/truncate.c int generic_error_remove_page(struct address_space *mapping, struct page *page) mapping 237 mm/truncate.c if (!mapping) mapping 243 mm/truncate.c if (!S_ISREG(mapping->host->i_mode)) mapping 245 mm/truncate.c return truncate_inode_page(mapping, page); mapping 257 mm/truncate.c struct address_space *mapping = page_mapping(page); mapping 258 mm/truncate.c if (!mapping) mapping 264 mm/truncate.c return invalidate_complete_page(mapping, page); mapping 291 mm/truncate.c void truncate_inode_pages_range(struct address_space *mapping, mapping 303 mm/truncate.c if (mapping->nrpages == 0 && mapping->nrexceptional == 0) mapping 329 mm/truncate.c while (index < end && pagevec_lookup_entries(&pvec, mapping, index, mapping 358 mm/truncate.c if (page->mapping != mapping) { mapping 365 mm/truncate.c truncate_cleanup_page(mapping, locked_pvec.pages[i]); mapping 366 mm/truncate.c delete_from_page_cache_batch(mapping, &locked_pvec); mapping 369 mm/truncate.c truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); mapping 375 mm/truncate.c struct page *page = find_lock_page(mapping, start - 1); mapping 385 mm/truncate.c cleancache_invalidate_page(mapping, page); mapping 394 mm/truncate.c struct page *page = find_lock_page(mapping, end); mapping 398 mm/truncate.c cleancache_invalidate_page(mapping, page); mapping 416 mm/truncate.c if (!pagevec_lookup_entries(&pvec, mapping, index, mapping 449 mm/truncate.c truncate_inode_page(mapping, page); mapping 452 mm/truncate.c truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); mapping 458 mm/truncate.c cleancache_invalidate_inode(mapping); mapping 474 mm/truncate.c void truncate_inode_pages(struct address_space *mapping, loff_t lstart) mapping 476 mm/truncate.c truncate_inode_pages_range(mapping, lstart, (loff_t)-1); mapping 489 mm/truncate.c void truncate_inode_pages_final(struct address_space *mapping) mapping 501 mm/truncate.c mapping_set_exiting(mapping); mapping 508 mm/truncate.c nrpages = mapping->nrpages; mapping 510 mm/truncate.c nrexceptional = mapping->nrexceptional; mapping 519 mm/truncate.c xa_lock_irq(&mapping->i_pages); mapping 520 mm/truncate.c xa_unlock_irq(&mapping->i_pages); mapping 527 mm/truncate.c truncate_inode_pages(mapping, 0); mapping 546 mm/truncate.c unsigned long invalidate_mapping_pages(struct address_space *mapping, mapping 557 mm/truncate.c while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, mapping 569 mm/truncate.c invalidate_exceptional_entry(mapping, index, mapping 636 mm/truncate.c invalidate_complete_page2(struct address_space *mapping, struct page *page) mapping 640 mm/truncate.c if (page->mapping != mapping) mapping 646 mm/truncate.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 652 mm/truncate.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 654 mm/truncate.c if (mapping->a_ops->freepage) mapping 655 mm/truncate.c mapping->a_ops->freepage(page); mapping 660 mm/truncate.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 664 mm/truncate.c static int do_launder_page(struct address_space *mapping, struct page *page) mapping 668 mm/truncate.c if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) mapping 670 mm/truncate.c return mapping->a_ops->launder_page(page); mapping 684 mm/truncate.c int invalidate_inode_pages2_range(struct address_space *mapping, mapping 695 mm/truncate.c if (mapping->nrpages == 0 && mapping->nrexceptional == 0) mapping 700 mm/truncate.c while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, mapping 712 mm/truncate.c if (!invalidate_exceptional_entry2(mapping, mapping 720 mm/truncate.c if (page->mapping != mapping) { mapping 730 mm/truncate.c unmap_mapping_pages(mapping, index, mapping 737 mm/truncate.c unmap_mapping_pages(mapping, index, mapping 742 mm/truncate.c ret2 = do_launder_page(mapping, page); mapping 744 mm/truncate.c if (!invalidate_complete_page2(mapping, page)) mapping 763 mm/truncate.c if (dax_mapping(mapping)) { mapping 764 mm/truncate.c unmap_mapping_pages(mapping, start, end - start + 1, false); mapping 767 mm/truncate.c cleancache_invalidate_inode(mapping); mapping 781 mm/truncate.c int invalidate_inode_pages2(struct address_space *mapping) mapping 783 mm/truncate.c return invalidate_inode_pages2_range(mapping, 0, -1); mapping 804 mm/truncate.c struct address_space *mapping = inode->i_mapping; mapping 816 mm/truncate.c unmap_mapping_range(mapping, holebegin, 0, 1); mapping 817 mm/truncate.c truncate_inode_pages(mapping, newsize); mapping 818 mm/truncate.c unmap_mapping_range(mapping, holebegin, 0, 1); mapping 912 mm/truncate.c struct address_space *mapping = inode->i_mapping; mapping 929 mm/truncate.c unmap_mapping_range(mapping, unmap_start, mapping 931 mm/truncate.c truncate_inode_pages_range(mapping, lstart, lend); mapping 191 mm/userfaultfd.c struct address_space *mapping; mapping 271 mm/userfaultfd.c mapping = dst_vma->vm_file->f_mapping; mapping 272 mm/userfaultfd.c hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr); mapping 617 mm/util.c unsigned long mapping; mapping 619 mm/util.c mapping = (unsigned long)page->mapping; mapping 620 mm/util.c mapping &= ~PAGE_MAPPING_FLAGS; mapping 622 mm/util.c return (void *)mapping; mapping 657 mm/util.c unsigned long mapping; mapping 660 mm/util.c mapping = (unsigned long)page->mapping; mapping 661 mm/util.c if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) mapping 668 mm/util.c struct address_space *mapping; mapping 683 mm/util.c mapping = page->mapping; mapping 684 mm/util.c if ((unsigned long)mapping & PAGE_MAPPING_ANON) mapping 687 mm/util.c return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); mapping 801 mm/vmscan.c static void handle_write_error(struct address_space *mapping, mapping 805 mm/vmscan.c if (page_mapping(page) == mapping) mapping 806 mm/vmscan.c mapping_set_error(mapping, error); mapping 826 mm/vmscan.c static pageout_t pageout(struct page *page, struct address_space *mapping, mapping 847 mm/vmscan.c if (!mapping) { mapping 861 mm/vmscan.c if (mapping->a_ops->writepage == NULL) mapping 863 mm/vmscan.c if (!may_write_to_inode(mapping->host, sc)) mapping 877 mm/vmscan.c res = mapping->a_ops->writepage(page, &wbc); mapping 879 mm/vmscan.c handle_write_error(mapping, page, res); mapping 901 mm/vmscan.c static int __remove_mapping(struct address_space *mapping, struct page *page, mapping 908 mm/vmscan.c BUG_ON(mapping != page_mapping(page)); mapping 910 mm/vmscan.c xa_lock_irqsave(&mapping->i_pages, flags); mapping 949 mm/vmscan.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 955 mm/vmscan.c freepage = mapping->a_ops->freepage; mapping 973 mm/vmscan.c !mapping_exiting(mapping) && !dax_mapping(mapping)) mapping 976 mm/vmscan.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 985 mm/vmscan.c xa_unlock_irqrestore(&mapping->i_pages, flags); mapping 995 mm/vmscan.c int remove_mapping(struct address_space *mapping, struct page *page) mapping 997 mm/vmscan.c if (__remove_mapping(mapping, page, false)) { mapping 1090 mm/vmscan.c struct address_space *mapping; mapping 1111 mm/vmscan.c mapping = page_mapping(page); mapping 1112 mm/vmscan.c if (mapping && mapping->a_ops->is_dirty_writeback) mapping 1113 mm/vmscan.c mapping->a_ops->is_dirty_writeback(page, dirty, writeback); mapping 1135 mm/vmscan.c struct address_space *mapping; mapping 1185 mm/vmscan.c mapping = page_mapping(page); mapping 1186 mm/vmscan.c if (((dirty || writeback) && mapping && mapping 1187 mm/vmscan.c inode_write_congested(mapping->host)) || mapping 1323 mm/vmscan.c mapping = page_mapping(page); mapping 1397 mm/vmscan.c switch (pageout(page, mapping, sc)) { mapping 1416 mm/vmscan.c mapping = page_mapping(page); mapping 1446 mm/vmscan.c if (!mapping && page_count(page) == 1) { mapping 1475 mm/vmscan.c } else if (!mapping || !__remove_mapping(mapping, page, true)) mapping 1602 mm/vmscan.c struct address_space *mapping; mapping 1617 mm/vmscan.c mapping = page_mapping(page); mapping 1618 mm/vmscan.c migrate_dirty = !mapping || mapping->a_ops->migratepage; mapping 456 mm/workingset.c struct address_space *mapping; mapping 471 mm/workingset.c mapping = container_of(node->array, struct address_space, i_pages); mapping 474 mm/workingset.c if (!xa_trylock(&mapping->i_pages)) { mapping 494 mm/workingset.c mapping->nrexceptional -= node->nr_values; mapping 495 mm/workingset.c xas.xa_node = xa_parent_locked(&mapping->i_pages, node); mapping 507 mm/workingset.c xa_unlock_irq(&mapping->i_pages); mapping 1373 mm/z3fold.c static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage, mapping 1922 mm/zsmalloc.c struct address_space *mapping; mapping 1939 mm/zsmalloc.c mapping = page_mapping(page); mapping 1940 mm/zsmalloc.c pool = mapping->private_data; mapping 1971 mm/zsmalloc.c static int zs_page_migrate(struct address_space *mapping, struct page *newpage, mapping 2003 mm/zsmalloc.c pool = mapping->private_data; mapping 2106 mm/zsmalloc.c struct address_space *mapping; mapping 2114 mm/zsmalloc.c mapping = page_mapping(page); mapping 2115 mm/zsmalloc.c pool = mapping->private_data; mapping 115 net/bluetooth/cmtp/capi.c if (app->mapping == value) mapping 209 net/bluetooth/cmtp/capi.c application->mapping = CAPIMSG_APPID(skb->data); mapping 463 net/bluetooth/cmtp/capi.c cmtp_send_interopmsg(session, CAPI_REQ, application->mapping, application->msgnum, mapping 490 net/bluetooth/cmtp/capi.c CAPIMSG_SETAPPID(skb->data, application->mapping); mapping 518 net/bluetooth/cmtp/capi.c seq_printf(m, "appl %d -> %d\n", app->appl, app->mapping); mapping 110 net/bluetooth/cmtp/cmtp.h __u16 mapping; mapping 248 net/rds/ib_rdma.c WARN_ON(!page->mapping && irqs_disabled()); mapping 466 net/rds/rdma.c WARN_ON(!page->mapping && irqs_disabled()); mapping 180 net/sched/sch_generic.c int mapping = skb_get_queue_mapping(skb); mapping 188 net/sched/sch_generic.c if (unlikely(skb_get_queue_mapping(nskb) != mapping)) { mapping 909 net/sunrpc/cache.c static ssize_t cache_downcall(struct address_space *mapping, mapping 920 net/sunrpc/cache.c page = find_or_create_page(mapping, 0, GFP_KERNEL); mapping 938 net/sunrpc/cache.c struct address_space *mapping = filp->f_mapping; mapping 946 net/sunrpc/cache.c ret = cache_downcall(mapping, buf, count, cd); mapping 117 security/selinux/ss/services.c out_map->mapping = kcalloc(++i, sizeof(*out_map->mapping), GFP_ATOMIC); mapping 118 security/selinux/ss/services.c if (!out_map->mapping) mapping 125 security/selinux/ss/services.c struct selinux_mapping *p_out = out_map->mapping + j; mapping 173 security/selinux/ss/services.c kfree(out_map->mapping); mapping 174 security/selinux/ss/services.c out_map->mapping = NULL; mapping 185 security/selinux/ss/services.c return map->mapping[tclass].value; mapping 198 security/selinux/ss/services.c if (map->mapping[i].value == pol_value) mapping 210 security/selinux/ss/services.c struct selinux_mapping *mapping = &map->mapping[tclass]; mapping 211 security/selinux/ss/services.c unsigned int i, n = mapping->num_perms; mapping 215 security/selinux/ss/services.c if (avd->allowed & mapping->perms[i]) mapping 217 security/selinux/ss/services.c if (allow_unknown && !mapping->perms[i]) mapping 223 security/selinux/ss/services.c if (avd->auditallow & mapping->perms[i]) mapping 228 security/selinux/ss/services.c if (avd->auditdeny & mapping->perms[i]) mapping 230 security/selinux/ss/services.c if (!allow_unknown && !mapping->perms[i]) mapping 2209 security/selinux/ss/services.c oldmapping = state->ss->map.mapping; mapping 2210 security/selinux/ss/services.c state->ss->map.mapping = newmap.mapping; mapping 2231 security/selinux/ss/services.c kfree(newmap.mapping); mapping 22 security/selinux/ss/services.h struct selinux_mapping *mapping; /* indexed by class */ mapping 530 tools/perf/util/bpf-loader.c static int map_prologue(struct perf_probe_event *pev, int *mapping, mapping 556 tools/perf/util/bpf-loader.c mapping[n] = type; mapping 562 tools/perf/util/bpf-loader.c mapping[n] = type; mapping 564 tools/perf/util/bpf-loader.c mapping[n] = ++type; mapping 566 tools/perf/util/bpf-loader.c pr_debug("mapping[%d]=%d\n", n, mapping[n]); mapping 686 tools/testing/nvdimm/test/nfit.c nd_mapping = &nd_region->mapping[nd_region->ndr_mappings - 1];