Searched refs:mapping (Results 1 - 200 of 3402) sorted by relevance

1234567891011>>

/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos_drm_iommu.c15 #include <linux/dma-mapping.h>
25 * drm_create_iommu_mapping - create a mapping structure
31 struct dma_iommu_mapping *mapping = NULL; drm_create_iommu_mapping() local
40 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, drm_create_iommu_mapping()
43 if (IS_ERR(mapping)) drm_create_iommu_mapping()
44 return PTR_ERR(mapping); drm_create_iommu_mapping()
52 dev->archdata.mapping = mapping; drm_create_iommu_mapping()
56 arm_iommu_release_mapping(mapping); drm_create_iommu_mapping()
61 * drm_release_iommu_mapping - release iommu mapping structure
65 * if mapping->kref becomes 0 then all things related to iommu mapping
72 arm_iommu_release_mapping(dev->archdata.mapping); drm_release_iommu_mapping()
76 * drm_iommu_attach_device- attach device to iommu mapping
82 * mapping.
90 if (!dev->archdata.mapping) { drm_iommu_attach_device()
103 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); drm_iommu_attach_device()
112 * The dma mapping api needs device object and the api is used drm_iommu_attach_device()
124 * drm_iommu_detach_device -detach device address space mapping from device
130 * mapping
136 struct dma_iommu_mapping *mapping = dev->archdata.mapping; drm_iommu_detach_device() local
138 if (!mapping || !mapping->domain) drm_iommu_detach_device()
141 iommu_detach_device(mapping->domain, subdrv_dev); drm_iommu_detach_device()
/linux-4.1.27/include/linux/
H A Dio-mapping.h28 * The io_mapping mechanism provides an abstraction for mapping
31 * See Documentation/io-mapping.txt
45 * For small address space machines, mapping large objects
76 io_mapping_free(struct io_mapping *mapping) io_mapping_free() argument
78 iomap_free(mapping->base, mapping->size); io_mapping_free()
79 kfree(mapping); io_mapping_free()
84 io_mapping_map_atomic_wc(struct io_mapping *mapping, io_mapping_map_atomic_wc() argument
90 BUG_ON(offset >= mapping->size); io_mapping_map_atomic_wc()
91 phys_addr = mapping->base + offset; io_mapping_map_atomic_wc()
93 return iomap_atomic_prot_pfn(pfn, mapping->prot); io_mapping_map_atomic_wc()
103 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) io_mapping_map_wc() argument
107 BUG_ON(offset >= mapping->size); io_mapping_map_wc()
108 phys_addr = mapping->base + offset; io_mapping_map_wc()
134 io_mapping_free(struct io_mapping *mapping) io_mapping_free() argument
136 iounmap((void __force __iomem *) mapping); io_mapping_free()
141 io_mapping_map_atomic_wc(struct io_mapping *mapping, io_mapping_map_atomic_wc() argument
145 return ((char __force __iomem *) mapping) + offset; io_mapping_map_atomic_wc()
156 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) io_mapping_map_wc() argument
158 return ((char __force __iomem *) mapping) + offset; io_mapping_map_wc()
H A Dpagemap.h19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
30 static inline void mapping_set_error(struct address_space *mapping, int error) mapping_set_error() argument
34 set_bit(AS_ENOSPC, &mapping->flags); mapping_set_error()
36 set_bit(AS_EIO, &mapping->flags); mapping_set_error()
40 static inline void mapping_set_unevictable(struct address_space *mapping) mapping_set_unevictable() argument
42 set_bit(AS_UNEVICTABLE, &mapping->flags); mapping_set_unevictable()
45 static inline void mapping_clear_unevictable(struct address_space *mapping) mapping_clear_unevictable() argument
47 clear_bit(AS_UNEVICTABLE, &mapping->flags); mapping_clear_unevictable()
50 static inline int mapping_unevictable(struct address_space *mapping) mapping_unevictable() argument
52 if (mapping) mapping_unevictable()
53 return test_bit(AS_UNEVICTABLE, &mapping->flags); mapping_unevictable()
54 return !!mapping; mapping_unevictable()
57 static inline void mapping_set_exiting(struct address_space *mapping) mapping_set_exiting() argument
59 set_bit(AS_EXITING, &mapping->flags); mapping_set_exiting()
62 static inline int mapping_exiting(struct address_space *mapping) mapping_exiting() argument
64 return test_bit(AS_EXITING, &mapping->flags); mapping_exiting()
67 static inline gfp_t mapping_gfp_mask(struct address_space * mapping) mapping_gfp_mask() argument
69 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; mapping_gfp_mask()
73 * This is non-atomic. Only to be used before the mapping is activated.
241 pgoff_t page_cache_next_hole(struct address_space *mapping,
243 pgoff_t page_cache_prev_hole(struct address_space *mapping,
253 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
258 * @mapping: the address_space to search
261 * Looks up the page cache slot at @mapping & @offset. If there is a
266 static inline struct page *find_get_page(struct address_space *mapping, find_get_page() argument
269 return pagecache_get_page(mapping, offset, 0, 0); find_get_page()
272 static inline struct page *find_get_page_flags(struct address_space *mapping, find_get_page_flags() argument
275 return pagecache_get_page(mapping, offset, fgp_flags, 0); find_get_page_flags()
281 * @mapping: the address_space to search
284 * Looks up the page cache slot at @mapping & @offset. If there is a
292 static inline struct page *find_lock_page(struct address_space *mapping, find_lock_page() argument
295 return pagecache_get_page(mapping, offset, FGP_LOCK, 0); find_lock_page()
300 * @mapping: the page's address_space
301 * @index: the page's index into the mapping
304 * Looks up the page cache slot at @mapping & @offset. If there is a
317 static inline struct page *find_or_create_page(struct address_space *mapping, find_or_create_page() argument
320 return pagecache_get_page(mapping, offset, find_or_create_page()
327 * @mapping: target address_space
338 static inline struct page *grab_cache_page_nowait(struct address_space *mapping, grab_cache_page_nowait() argument
341 return pagecache_get_page(mapping, index, grab_cache_page_nowait()
343 mapping_gfp_mask(mapping)); grab_cache_page_nowait()
346 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
347 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
348 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
351 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
353 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
355 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
358 struct page *grab_cache_page_write_begin(struct address_space *mapping,
364 static inline struct page *grab_cache_page(struct address_space *mapping, grab_cache_page() argument
367 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); grab_cache_page()
370 extern struct page * read_cache_page(struct address_space *mapping,
372 extern struct page * read_cache_page_gfp(struct address_space *mapping,
374 extern int read_cache_pages(struct address_space *mapping,
377 static inline struct page *read_mapping_page(struct address_space *mapping, read_mapping_page() argument
380 filler_t *filler = (filler_t *)mapping->a_ops->readpage; read_mapping_page()
381 return read_cache_page(mapping, index, filler, data); read_mapping_page()
649 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
651 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
662 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) add_to_page_cache()
667 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); add_to_page_cache()
661 add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) add_to_page_cache() argument
H A Dcleancache.h53 return page->mapping->host->i_sb->cleancache_poolid >= 0; cleancache_fs_enabled()
55 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) cleancache_fs_enabled_mapping() argument
57 return mapping->host->i_sb->cleancache_poolid >= 0; cleancache_fs_enabled_mapping()
105 static inline void cleancache_invalidate_page(struct address_space *mapping, cleancache_invalidate_page() argument
108 /* careful... page->mapping is NULL sometimes when this is called */ cleancache_invalidate_page()
109 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) cleancache_invalidate_page()
110 __cleancache_invalidate_page(mapping, page); cleancache_invalidate_page()
113 static inline void cleancache_invalidate_inode(struct address_space *mapping) cleancache_invalidate_inode() argument
115 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) cleancache_invalidate_inode()
116 __cleancache_invalidate_inode(mapping); cleancache_invalidate_inode()
H A Dshmem_fs.h56 extern bool shmem_mapping(struct address_space *mapping);
57 extern void shmem_unlock_mapping(struct address_space *mapping);
58 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
64 struct address_space *mapping, pgoff_t index) shmem_read_mapping_page()
66 return shmem_read_mapping_page_gfp(mapping, index, shmem_read_mapping_page()
67 mapping_gfp_mask(mapping)); shmem_read_mapping_page()
63 shmem_read_mapping_page( struct address_space *mapping, pgoff_t index) shmem_read_mapping_page() argument
H A Dpagevec.h26 struct address_space *mapping,
30 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
33 struct address_space *mapping, pgoff_t *index, int tag,
H A Dmigrate.h40 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
42 extern int migrate_page_move_mapping(struct address_space *mapping,
60 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, migrate_huge_page_move_mapping() argument
H A Dzsmalloc.h20 * zsmalloc mapping modes
26 ZS_MM_RW, /* normal read-write mapping */
H A Dfutex.h19 * The key type depends on whether it's a shared or private mapping.
29 * (but private mapping on an mm, and reference taken on it)
H A Dwriteback.h36 WB_SYNC_ALL, /* Wait on every mapping */
170 void balance_dirty_pages_ratelimited(struct address_space *mapping);
175 int generic_writepages(struct address_space *mapping,
177 void tag_pages_for_writeback(struct address_space *mapping,
179 int write_cache_pages(struct address_space *mapping,
182 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
184 void tag_pages_for_writeback(struct address_space *mapping,
/linux-4.1.27/arch/sparc/kernel/
H A Ddma.c2 #include <linux/dma-mapping.h>
H A Diommu_common.h21 * These give mapping size of each iommu pte/tlb.
/linux-4.1.27/arch/arm64/mm/
H A DMakefile1 obj-y := dma-mapping.o extable.o fault.o init.o \
H A Dioremap.c42 * Page align the mapping address and size, taking account of any __ioremap_caller()
89 * of ioremap_cache() reusing a RAM mapping. __iounmap()
98 /* For normal memory we already have a cacheable mapping. */ ioremap_cache()
/linux-4.1.27/arch/unicore32/mm/
H A Dflush.c61 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
64 * Writeback any data associated with the kernel mapping of this __flush_dcache_page()
66 * coherent with the kernels mapping. __flush_dcache_page()
72 * Ensure cache coherency between kernel mapping and userspace mapping
77 struct address_space *mapping; flush_dcache_page() local
86 mapping = page_mapping(page); flush_dcache_page()
88 if (mapping && !mapping_mapped(mapping)) flush_dcache_page()
91 __flush_dcache_page(mapping, page); flush_dcache_page()
92 if (mapping) flush_dcache_page()
H A Dmmu.c181 * Try a section mapping - end, addr and phys must all be aligned alloc_init_section()
204 * page tables for the mapping specified by `md'. We
215 printk(KERN_WARNING "BUG: not creating mapping for " create_mapping()
223 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " create_mapping()
364 * Create a mapping for the machine vectors at the high-vectors devicemaps_init()
366 * create a mapping at the low-vectors virtual address. devicemaps_init()
375 * Create a mapping for the kuser page at the special devicemaps_init()
444 * In order to soft-boot, we need to insert a 1:1 mapping in place of
492 struct address_space *mapping; update_mmu_cache() local
506 mapping = page_mapping(page); update_mmu_cache()
508 __flush_dcache_page(mapping, page); update_mmu_cache()
509 if (mapping) update_mmu_cache()
H A Ddma-swiotlb.c14 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/x86/include/asm/
H A Dinit.h8 bool kernel_mapping; /* kernel mapping or ident mapping */
H A Dnuma.h23 * __apicid_to_node[] stores the raw mapping between physical apicid and
24 * node and is used to initialize cpu_to_node mapping.
26 * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
/linux-4.1.27/drivers/media/usb/uvc/
H A Duvc_ctrl.c368 static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, uvc_ctrl_get_zoom() argument
386 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, uvc_ctrl_set_zoom() argument
393 static __s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, uvc_ctrl_get_rel_speed() argument
396 unsigned int first = mapping->offset / 8; uvc_ctrl_get_rel_speed()
413 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, uvc_ctrl_set_rel_speed() argument
416 unsigned int first = mapping->offset / 8; uvc_ctrl_set_rel_speed()
762 /* Extract the bit string specified by mapping->offset and mapping->size
764 * a signed 32bit integer. Sign extension will be performed if the mapping
767 static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping, uvc_get_le_value() argument
770 int bits = mapping->size; uvc_get_le_value()
771 int offset = mapping->offset; uvc_get_le_value()
788 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) uvc_get_le_value()
789 value |= -(value & (1 << (mapping->size - 1))); uvc_get_le_value()
794 /* Set the bit string specified by mapping->offset and mapping->size
797 static void uvc_set_le_value(struct uvc_control_mapping *mapping, uvc_set_le_value() argument
800 int bits = mapping->size; uvc_set_le_value()
801 int offset = mapping->offset; uvc_set_le_value()
809 if (mapping->v4l2_type == V4L2_CTRL_TYPE_BUTTON) uvc_set_le_value()
860 struct uvc_control_mapping **mapping, struct uvc_control **control, __uvc_find_control()
878 *mapping = map; __uvc_find_control()
882 if ((*mapping == NULL || (*mapping)->id > map->id) && __uvc_find_control()
885 *mapping = map; __uvc_find_control()
892 __u32 v4l2_id, struct uvc_control_mapping **mapping) uvc_find_control()
898 *mapping = NULL; uvc_find_control()
905 __uvc_find_control(entity, v4l2_id, mapping, &ctrl, next); uvc_find_control()
974 struct uvc_control *ctrl, struct uvc_control_mapping *mapping, __uvc_ctrl_get()
995 *value = mapping->get(mapping, UVC_GET_CUR, __uvc_ctrl_get()
998 if (mapping->v4l2_type == V4L2_CTRL_TYPE_MENU) { __uvc_ctrl_get()
999 menu = mapping->menu_info; __uvc_ctrl_get()
1000 for (i = 0; i < mapping->menu_count; ++i, ++menu) { __uvc_ctrl_get()
1013 struct uvc_control_mapping *mapping, __uvc_query_v4l2_ctrl()
1022 v4l2_ctrl->id = mapping->id; __uvc_query_v4l2_ctrl()
1023 v4l2_ctrl->type = mapping->v4l2_type; __uvc_query_v4l2_ctrl()
1024 strlcpy(v4l2_ctrl->name, mapping->name, sizeof v4l2_ctrl->name); __uvc_query_v4l2_ctrl()
1032 if (mapping->master_id) __uvc_query_v4l2_ctrl()
1033 __uvc_find_control(ctrl->entity, mapping->master_id, __uvc_query_v4l2_ctrl()
1041 if (val != mapping->master_manual) __uvc_query_v4l2_ctrl()
1052 v4l2_ctrl->default_value = mapping->get(mapping, UVC_GET_DEF, __uvc_query_v4l2_ctrl()
1056 switch (mapping->v4l2_type) { __uvc_query_v4l2_ctrl()
1059 v4l2_ctrl->maximum = mapping->menu_count - 1; __uvc_query_v4l2_ctrl()
1062 menu = mapping->menu_info; __uvc_query_v4l2_ctrl()
1063 for (i = 0; i < mapping->menu_count; ++i, ++menu) { __uvc_query_v4l2_ctrl()
1089 v4l2_ctrl->minimum = mapping->get(mapping, UVC_GET_MIN, __uvc_query_v4l2_ctrl()
1093 v4l2_ctrl->maximum = mapping->get(mapping, UVC_GET_MAX, __uvc_query_v4l2_ctrl()
1097 v4l2_ctrl->step = mapping->get(mapping, UVC_GET_RES, __uvc_query_v4l2_ctrl()
1107 struct uvc_control_mapping *mapping; uvc_query_v4l2_ctrl() local
1114 ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping); uvc_query_v4l2_ctrl()
1120 ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl); uvc_query_v4l2_ctrl()
1139 struct uvc_control_mapping *mapping; uvc_query_v4l2_menu() local
1153 ctrl = uvc_find_control(chain, query_menu->id, &mapping); uvc_query_v4l2_menu()
1154 if (ctrl == NULL || mapping->v4l2_type != V4L2_CTRL_TYPE_MENU) { uvc_query_v4l2_menu()
1159 if (query_menu->index >= mapping->menu_count) { uvc_query_v4l2_menu()
1164 menu_info = &mapping->menu_info[query_menu->index]; uvc_query_v4l2_menu()
1166 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && uvc_query_v4l2_menu()
1176 bitmap = mapping->get(mapping, UVC_GET_RES, uvc_query_v4l2_menu()
1198 struct uvc_control_mapping *mapping, uvc_ctrl_fill_event()
1203 __uvc_query_v4l2_ctrl(chain, ctrl, mapping, &v4l2_ctrl); uvc_ctrl_fill_event()
1219 struct uvc_control *ctrl, struct uvc_control_mapping *mapping, uvc_ctrl_send_event()
1225 if (list_empty(&mapping->ev_subs)) uvc_ctrl_send_event()
1228 uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, value, changes); uvc_ctrl_send_event()
1230 list_for_each_entry(sev, &mapping->ev_subs, node) { uvc_ctrl_send_event()
1242 struct uvc_control_mapping *mapping = NULL; uvc_ctrl_send_slave_event() local
1257 __uvc_find_control(master->entity, slave_id, &mapping, &ctrl, 0); uvc_ctrl_send_slave_event()
1261 if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0) uvc_ctrl_send_slave_event()
1264 uvc_ctrl_send_event(handle, ctrl, mapping, val, changes); uvc_ctrl_send_slave_event()
1270 struct uvc_control_mapping *mapping; uvc_ctrl_send_events() local
1277 ctrl = uvc_find_control(handle->chain, xctrls[i].id, &mapping); uvc_ctrl_send_events()
1279 for (j = 0; j < ARRAY_SIZE(mapping->slave_ids); ++j) { uvc_ctrl_send_events()
1280 if (!mapping->slave_ids[j]) uvc_ctrl_send_events()
1283 mapping->slave_ids[j], uvc_ctrl_send_events()
1291 if (mapping->master_id) { uvc_ctrl_send_events()
1293 if (xctrls[j].id == mapping->master_id) { uvc_ctrl_send_events()
1300 uvc_ctrl_send_event(handle, ctrl, mapping, xctrls[i].value, uvc_ctrl_send_events()
1308 struct uvc_control_mapping *mapping; uvc_ctrl_add_event() local
1316 ctrl = uvc_find_control(handle->chain, sev->id, &mapping); uvc_ctrl_add_event()
1322 list_add_tail(&sev->node, &mapping->ev_subs); uvc_ctrl_add_event()
1328 if (__uvc_ctrl_get(handle->chain, ctrl, mapping, &val) == 0) uvc_ctrl_add_event()
1331 uvc_ctrl_fill_event(handle->chain, &ev, ctrl, mapping, val, uvc_ctrl_add_event()
1465 struct uvc_control_mapping *mapping; uvc_ctrl_get() local
1467 ctrl = uvc_find_control(chain, xctrl->id, &mapping); uvc_ctrl_get()
1471 return __uvc_ctrl_get(chain, ctrl, mapping, &xctrl->value); uvc_ctrl_get()
1478 struct uvc_control_mapping *mapping; uvc_ctrl_set() local
1485 ctrl = uvc_find_control(chain, xctrl->id, &mapping); uvc_ctrl_set()
1492 switch (mapping->v4l2_type) { uvc_ctrl_set()
1500 min = mapping->get(mapping, UVC_GET_MIN, uvc_ctrl_set()
1502 max = mapping->get(mapping, UVC_GET_MAX, uvc_ctrl_set()
1504 step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_set()
1511 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) uvc_ctrl_set()
1524 if (xctrl->value < 0 || xctrl->value >= mapping->menu_count) uvc_ctrl_set()
1526 value = mapping->menu_info[xctrl->value].value; uvc_ctrl_set()
1531 if (mapping->data_type == UVC_CTRL_DATA_TYPE_BITMASK && uvc_ctrl_set()
1539 step = mapping->get(mapping, UVC_GET_RES, uvc_ctrl_set()
1552 /* If the mapping doesn't span the whole UVC control, the current value uvc_ctrl_set()
1556 if (!ctrl->loaded && (ctrl->info.size * 8) != mapping->size) { uvc_ctrl_set()
1580 mapping->set(mapping, value, uvc_ctrl_set()
1882 * Control and mapping handling
1917 * Add a control mapping to a given control.
1920 struct uvc_control *ctrl, const struct uvc_control_mapping *mapping) __uvc_ctrl_add_mapping()
1929 map = kmemdup(mapping, sizeof(*mapping), GFP_KERNEL); __uvc_ctrl_add_mapping()
1935 size = sizeof(*mapping->menu_info) * mapping->menu_count; __uvc_ctrl_add_mapping()
1936 map->menu_info = kmemdup(mapping->menu_info, size, GFP_KERNEL); __uvc_ctrl_add_mapping()
1949 "Adding mapping '%s' to control %pUl/%u.\n", __uvc_ctrl_add_mapping()
1956 const struct uvc_control_mapping *mapping) uvc_ctrl_add_mapping()
1965 if (mapping->id & ~V4L2_CTRL_ID_MASK) { uvc_ctrl_add_mapping()
1966 uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', control " uvc_ctrl_add_mapping()
1967 "id 0x%08x is invalid.\n", mapping->name, uvc_ctrl_add_mapping()
1968 mapping->id); uvc_ctrl_add_mapping()
1977 !uvc_entity_match_guid(entity, mapping->entity)) uvc_ctrl_add_mapping()
1982 if (ctrl->index == mapping->selector - 1) { uvc_ctrl_add_mapping()
2005 if (mapping->id == map->id) { uvc_ctrl_add_mapping()
2006 uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', " uvc_ctrl_add_mapping()
2008 mapping->name, mapping->id); uvc_ctrl_add_mapping()
2017 uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', maximum " uvc_ctrl_add_mapping()
2018 "mappings count (%u) exceeded.\n", mapping->name, uvc_ctrl_add_mapping()
2024 ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping); uvc_ctrl_add_mapping()
2103 const struct uvc_control_mapping *mapping = uvc_ctrl_mappings; uvc_ctrl_init_ctrl() local
2105 mapping + ARRAY_SIZE(uvc_ctrl_mappings); uvc_ctrl_init_ctrl()
2126 for (; mapping < mend; ++mapping) { uvc_ctrl_init_ctrl()
2127 if (uvc_entity_match_guid(ctrl->entity, mapping->entity) && uvc_ctrl_init_ctrl()
2128 ctrl->info.selector == mapping->selector) uvc_ctrl_init_ctrl()
2129 __uvc_ctrl_add_mapping(dev, ctrl, mapping); uvc_ctrl_init_ctrl()
2195 struct uvc_control_mapping *mapping, *nm; uvc_ctrl_cleanup_mappings() local
2197 list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) { uvc_ctrl_cleanup_mappings()
2198 list_del(&mapping->list); uvc_ctrl_cleanup_mappings()
2199 kfree(mapping->menu_info); uvc_ctrl_cleanup_mappings()
2200 kfree(mapping); uvc_ctrl_cleanup_mappings()
859 __uvc_find_control(struct uvc_entity *entity, __u32 v4l2_id, struct uvc_control_mapping **mapping, struct uvc_control **control, int next) __uvc_find_control() argument
891 uvc_find_control(struct uvc_video_chain *chain, __u32 v4l2_id, struct uvc_control_mapping **mapping) uvc_find_control() argument
973 __uvc_ctrl_get(struct uvc_video_chain *chain, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 *value) __uvc_ctrl_get() argument
1011 __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, struct v4l2_queryctrl *v4l2_ctrl) __uvc_query_v4l2_ctrl() argument
1195 uvc_ctrl_fill_event(struct uvc_video_chain *chain, struct v4l2_event *ev, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 value, u32 changes) uvc_ctrl_fill_event() argument
1218 uvc_ctrl_send_event(struct uvc_fh *handle, struct uvc_control *ctrl, struct uvc_control_mapping *mapping, s32 value, u32 changes) uvc_ctrl_send_event() argument
1919 __uvc_ctrl_add_mapping(struct uvc_device *dev, struct uvc_control *ctrl, const struct uvc_control_mapping *mapping) __uvc_ctrl_add_mapping() argument
1955 uvc_ctrl_add_mapping(struct uvc_video_chain *chain, const struct uvc_control_mapping *mapping) uvc_ctrl_add_mapping() argument
/linux-4.1.27/drivers/net/wireless/mwifiex/
H A Dutil.h69 struct mwifiex_dma_mapping *mapping) mwifiex_store_mapping()
73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); mwifiex_store_mapping()
77 struct mwifiex_dma_mapping *mapping) mwifiex_get_mapping()
81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); mwifiex_get_mapping()
86 struct mwifiex_dma_mapping mapping; MWIFIEX_SKB_DMA_ADDR() local
88 mwifiex_get_mapping(skb, &mapping); MWIFIEX_SKB_DMA_ADDR()
90 return mapping.addr; MWIFIEX_SKB_DMA_ADDR()
68 mwifiex_store_mapping(struct sk_buff *skb, struct mwifiex_dma_mapping *mapping) mwifiex_store_mapping() argument
76 mwifiex_get_mapping(struct sk_buff *skb, struct mwifiex_dma_mapping *mapping) mwifiex_get_mapping() argument
/linux-4.1.27/mm/
H A Dtruncate.c26 static void clear_exceptional_entry(struct address_space *mapping, clear_exceptional_entry() argument
33 if (shmem_mapping(mapping)) clear_exceptional_entry()
36 spin_lock_irq(&mapping->tree_lock); clear_exceptional_entry()
42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) clear_exceptional_entry()
47 mapping->nrshadows--; clear_exceptional_entry()
56 * protected by mapping->tree_lock. clear_exceptional_entry()
61 __radix_tree_delete_node(&mapping->page_tree, node); clear_exceptional_entry()
63 spin_unlock_irq(&mapping->tree_lock); clear_exceptional_entry()
86 invalidatepage = page->mapping->a_ops->invalidatepage; do_invalidatepage()
100 * We need to bale out if page->mapping is no longer equal to the original
101 * mapping. This happens a) when the VM reclaimed the page while we waited on
106 truncate_complete_page(struct address_space *mapping, struct page *page) truncate_complete_page() argument
108 if (page->mapping != mapping) truncate_complete_page()
120 account_page_cleaned(page, mapping); truncate_complete_page()
136 invalidate_complete_page(struct address_space *mapping, struct page *page) invalidate_complete_page() argument
140 if (page->mapping != mapping) invalidate_complete_page()
146 ret = remove_mapping(mapping, page); invalidate_complete_page()
151 int truncate_inode_page(struct address_space *mapping, struct page *page) truncate_inode_page() argument
154 unmap_mapping_range(mapping, truncate_inode_page()
158 return truncate_complete_page(mapping, page); truncate_inode_page()
164 int generic_error_remove_page(struct address_space *mapping, struct page *page) generic_error_remove_page() argument
166 if (!mapping) generic_error_remove_page()
172 if (!S_ISREG(mapping->host->i_mode)) generic_error_remove_page()
174 return truncate_inode_page(mapping, page); generic_error_remove_page()
179 * Safely invalidate one page from its pagecache mapping.
186 struct address_space *mapping = page_mapping(page); invalidate_inode_page() local
187 if (!mapping) invalidate_inode_page()
193 return invalidate_complete_page(mapping, page); invalidate_inode_page()
198 * @mapping: mapping to truncate
213 * mapping is large, it is probably the case that the final pages are the most
220 void truncate_inode_pages_range(struct address_space *mapping, truncate_inode_pages_range() argument
232 cleancache_invalidate_inode(mapping); truncate_inode_pages_range()
233 if (mapping->nrpages == 0 && mapping->nrshadows == 0) truncate_inode_pages_range()
259 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, truncate_inode_pages_range()
271 clear_exceptional_entry(mapping, index, page); truncate_inode_pages_range()
282 truncate_inode_page(mapping, page); truncate_inode_pages_range()
292 struct page *page = find_lock_page(mapping, start - 1); truncate_inode_pages_range()
302 cleancache_invalidate_page(mapping, page); truncate_inode_pages_range()
311 struct page *page = find_lock_page(mapping, end); truncate_inode_pages_range()
315 cleancache_invalidate_page(mapping, page); truncate_inode_pages_range()
333 if (!pagevec_lookup_entries(&pvec, mapping, index, truncate_inode_pages_range()
360 clear_exceptional_entry(mapping, index, page); truncate_inode_pages_range()
367 truncate_inode_page(mapping, page); truncate_inode_pages_range()
374 cleancache_invalidate_inode(mapping); truncate_inode_pages_range()
380 * @mapping: mapping to truncate
387 * mapping->nrpages can be non-zero when this function returns even after
388 * truncation of the whole mapping.
390 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) truncate_inode_pages() argument
392 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); truncate_inode_pages()
398 * @mapping: mapping to truncate
405 void truncate_inode_pages_final(struct address_space *mapping) truncate_inode_pages_final() argument
417 mapping_set_exiting(mapping); truncate_inode_pages_final()
424 nrpages = mapping->nrpages; truncate_inode_pages_final()
426 nrshadows = mapping->nrshadows; truncate_inode_pages_final()
435 spin_lock_irq(&mapping->tree_lock); truncate_inode_pages_final()
436 spin_unlock_irq(&mapping->tree_lock); truncate_inode_pages_final()
438 truncate_inode_pages(mapping, 0); truncate_inode_pages_final()
445 * @mapping: the address_space which holds the pages to invalidate
456 unsigned long invalidate_mapping_pages(struct address_space *mapping, invalidate_mapping_pages() argument
467 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, invalidate_mapping_pages()
479 clear_exceptional_entry(mapping, index, page); invalidate_mapping_pages()
513 invalidate_complete_page2(struct address_space *mapping, struct page *page) invalidate_complete_page2() argument
515 if (page->mapping != mapping) invalidate_complete_page2()
521 spin_lock_irq(&mapping->tree_lock); invalidate_complete_page2()
527 spin_unlock_irq(&mapping->tree_lock); invalidate_complete_page2()
529 if (mapping->a_ops->freepage) invalidate_complete_page2()
530 mapping->a_ops->freepage(page); invalidate_complete_page2()
535 spin_unlock_irq(&mapping->tree_lock); invalidate_complete_page2()
539 static int do_launder_page(struct address_space *mapping, struct page *page) do_launder_page() argument
543 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) do_launder_page()
545 return mapping->a_ops->launder_page(page); do_launder_page()
550 * @mapping: the address_space
559 int invalidate_inode_pages2_range(struct address_space *mapping, invalidate_inode_pages2_range() argument
570 cleancache_invalidate_inode(mapping); invalidate_inode_pages2_range()
573 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, invalidate_inode_pages2_range()
585 clear_exceptional_entry(mapping, index, page); invalidate_inode_pages2_range()
591 if (page->mapping != mapping) { invalidate_inode_pages2_range()
601 unmap_mapping_range(mapping, invalidate_inode_pages2_range()
611 unmap_mapping_range(mapping, invalidate_inode_pages2_range()
617 ret2 = do_launder_page(mapping, page); invalidate_inode_pages2_range()
619 if (!invalidate_complete_page2(mapping, page)) invalidate_inode_pages2_range()
631 cleancache_invalidate_inode(mapping); invalidate_inode_pages2_range()
638 * @mapping: the address_space
645 int invalidate_inode_pages2(struct address_space *mapping) invalidate_inode_pages2() argument
647 return invalidate_inode_pages2_range(mapping, 0, -1); invalidate_inode_pages2()
668 struct address_space *mapping = inode->i_mapping; truncate_pagecache() local
680 unmap_mapping_range(mapping, holebegin, 0, 1); truncate_pagecache()
681 truncate_inode_pages(mapping, newsize); truncate_pagecache()
682 unmap_mapping_range(mapping, holebegin, 0, 1); truncate_pagecache()
776 struct address_space *mapping = inode->i_mapping; truncate_pagecache_range() local
793 unmap_mapping_range(mapping, unmap_start, truncate_pagecache_range()
795 truncate_inode_pages_range(mapping, lstart, lend); truncate_pagecache_range()
H A Dfilemap.c67 * ->mapping->tree_lock
75 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
85 * ->mapping->tree_lock (__sync_single_inode)
111 static void page_cache_tree_delete(struct address_space *mapping, page_cache_tree_delete() argument
122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); page_cache_tree_delete()
125 mapping->nrshadows++; page_cache_tree_delete()
134 mapping->nrpages--; page_cache_tree_delete()
138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; page_cache_tree_delete()
148 radix_tree_tag_clear(&mapping->page_tree, index, tag); page_cache_tree_delete()
157 if (__radix_tree_delete_node(&mapping->page_tree, node)) page_cache_tree_delete()
165 * protected by mapping->tree_lock. page_cache_tree_delete()
169 node->private_data = mapping; page_cache_tree_delete()
177 * is safe. The caller must hold the mapping's tree_lock.
181 struct address_space *mapping = page->mapping; __delete_from_page_cache() local
192 cleancache_invalidate_page(mapping, page); __delete_from_page_cache()
194 page_cache_tree_delete(mapping, page, shadow); __delete_from_page_cache()
196 page->mapping = NULL; __delete_from_page_cache()
213 account_page_cleaned(page, mapping); __delete_from_page_cache()
226 struct address_space *mapping = page->mapping; delete_from_page_cache() local
231 freepage = mapping->a_ops->freepage; delete_from_page_cache()
232 spin_lock_irq(&mapping->tree_lock); delete_from_page_cache()
234 spin_unlock_irq(&mapping->tree_lock); delete_from_page_cache()
242 static int filemap_check_errors(struct address_space *mapping) filemap_check_errors() argument
246 if (test_bit(AS_ENOSPC, &mapping->flags) && filemap_check_errors()
247 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) filemap_check_errors()
249 if (test_bit(AS_EIO, &mapping->flags) && filemap_check_errors()
250 test_and_clear_bit(AS_EIO, &mapping->flags)) filemap_check_errors()
256 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
257 * @mapping: address space structure to write
262 * Start writeback against all of a mapping's dirty pages that lie
270 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, __filemap_fdatawrite_range() argument
281 if (!mapping_cap_writeback_dirty(mapping)) __filemap_fdatawrite_range()
284 ret = do_writepages(mapping, &wbc); __filemap_fdatawrite_range()
288 static inline int __filemap_fdatawrite(struct address_space *mapping, __filemap_fdatawrite() argument
291 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); __filemap_fdatawrite()
294 int filemap_fdatawrite(struct address_space *mapping) filemap_fdatawrite() argument
296 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); filemap_fdatawrite()
300 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, filemap_fdatawrite_range() argument
303 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); filemap_fdatawrite_range()
309 * @mapping: target address_space
314 int filemap_flush(struct address_space *mapping) filemap_flush() argument
316 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); filemap_flush()
322 * @mapping: address space structure to wait for
329 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, filemap_fdatawait_range() argument
343 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, filemap_fdatawait_range()
363 ret2 = filemap_check_errors(mapping); filemap_fdatawait_range()
373 * @mapping: address space structure to wait for
378 int filemap_fdatawait(struct address_space *mapping) filemap_fdatawait() argument
380 loff_t i_size = i_size_read(mapping->host); filemap_fdatawait()
385 return filemap_fdatawait_range(mapping, 0, i_size - 1); filemap_fdatawait()
389 int filemap_write_and_wait(struct address_space *mapping) filemap_write_and_wait() argument
393 if (mapping->nrpages) { filemap_write_and_wait()
394 err = filemap_fdatawrite(mapping); filemap_write_and_wait()
402 int err2 = filemap_fdatawait(mapping); filemap_write_and_wait()
407 err = filemap_check_errors(mapping); filemap_write_and_wait()
415 * @mapping: the address_space for the pages
424 int filemap_write_and_wait_range(struct address_space *mapping, filemap_write_and_wait_range() argument
429 if (mapping->nrpages) { filemap_write_and_wait_range()
430 err = __filemap_fdatawrite_range(mapping, lstart, lend, filemap_write_and_wait_range()
434 int err2 = filemap_fdatawait_range(mapping, filemap_write_and_wait_range()
440 err = filemap_check_errors(mapping); filemap_write_and_wait_range()
467 VM_BUG_ON_PAGE(new->mapping, new); replace_page_cache_page()
471 struct address_space *mapping = old->mapping; replace_page_cache_page() local
475 freepage = mapping->a_ops->freepage; replace_page_cache_page()
478 new->mapping = mapping; replace_page_cache_page()
481 spin_lock_irq(&mapping->tree_lock); replace_page_cache_page()
483 error = radix_tree_insert(&mapping->page_tree, offset, new); replace_page_cache_page()
485 mapping->nrpages++; replace_page_cache_page()
489 spin_unlock_irq(&mapping->tree_lock); replace_page_cache_page()
501 static int page_cache_tree_insert(struct address_space *mapping, page_cache_tree_insert() argument
508 error = __radix_tree_create(&mapping->page_tree, page->index, page_cache_tree_insert()
515 p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); page_cache_tree_insert()
520 mapping->nrshadows--; page_cache_tree_insert()
525 mapping->nrpages++; page_cache_tree_insert()
534 * mapping->tree_lock. page_cache_tree_insert()
544 struct address_space *mapping, __add_to_page_cache_locked()
570 page->mapping = mapping; __add_to_page_cache_locked()
573 spin_lock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
574 error = page_cache_tree_insert(mapping, page, shadowp); __add_to_page_cache_locked()
579 spin_unlock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
585 page->mapping = NULL; __add_to_page_cache_locked()
587 spin_unlock_irq(&mapping->tree_lock); __add_to_page_cache_locked()
597 * @mapping: the page's address_space
604 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, add_to_page_cache_locked() argument
607 return __add_to_page_cache_locked(page, mapping, offset, add_to_page_cache_locked()
612 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, add_to_page_cache_lru() argument
619 ret = __add_to_page_cache_locked(page, mapping, offset, add_to_page_cache_lru()
795 if (page->mapping) page_endio()
796 mapping_set_error(page->mapping, err); page_endio()
870 * @mapping: mapping
889 pgoff_t page_cache_next_hole(struct address_space *mapping, page_cache_next_hole() argument
897 page = radix_tree_lookup(&mapping->page_tree, index); page_cache_next_hole()
911 * @mapping: mapping
930 pgoff_t page_cache_prev_hole(struct address_space *mapping, page_cache_prev_hole() argument
938 page = radix_tree_lookup(&mapping->page_tree, index); page_cache_prev_hole()
952 * @mapping: the address_space to search
955 * Looks up the page cache slot at @mapping & @offset. If there is a
963 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) find_get_entry() argument
971 pagep = radix_tree_lookup_slot(&mapping->page_tree, offset); find_get_entry()
1008 * @mapping: the address_space to search
1011 * Looks up the page cache slot at @mapping & @offset. If there is a
1022 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) find_lock_entry() argument
1027 page = find_get_entry(mapping, offset); find_lock_entry()
1031 if (unlikely(page->mapping != mapping)) { find_lock_entry()
1044 * @mapping: the address_space to search
1049 * Looks up the page cache slot at @mapping & @offset.
1065 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, pagecache_get_page() argument
1071 page = find_get_entry(mapping, offset); pagecache_get_page()
1088 if (unlikely(page->mapping != mapping)) { pagecache_get_page()
1102 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) pagecache_get_page()
1118 err = add_to_page_cache_lru(page, mapping, offset, pagecache_get_page()
1134 * @mapping: The address_space to search
1141 * @nr_entries entries in the mapping. The entries are placed at
1145 * The search returns a group of mapping-contiguous page cache entries
1155 unsigned find_get_entries(struct address_space *mapping, find_get_entries() argument
1168 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { find_get_entries()
1204 * @mapping: The address_space to search
1210 * @nr_pages pages in the mapping. The pages are placed at @pages.
1213 * The search returns a group of mapping-contiguous pages with ascending
1218 unsigned find_get_pages(struct address_space *mapping, pgoff_t start, find_get_pages() argument
1230 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { find_get_pages()
1275 * @mapping: The address_space to search
1285 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, find_get_pages_contig() argument
1297 radix_tree_for_each_contig(slot, &mapping->page_tree, &iter, index) { find_get_pages_contig()
1332 * must check mapping and index after taking the ref. find_get_pages_contig()
1336 if (page->mapping == NULL || page->index != iter.index) { find_get_pages_contig()
1352 * @mapping: the address_space to search
1361 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, find_get_pages_tag() argument
1373 radix_tree_for_each_tagged(slot, &mapping->page_tree, find_get_pages_tag()
1456 * mapping->a_ops->readpage() function for the actual low-level stuff.
1464 struct address_space *mapping = filp->f_mapping; do_generic_file_read() local
1465 struct inode *inode = mapping->host; do_generic_file_read()
1488 page = find_get_page(mapping, index); do_generic_file_read()
1490 page_cache_sync_readahead(mapping, do_generic_file_read()
1493 page = find_get_page(mapping, index); do_generic_file_read()
1498 page_cache_async_readahead(mapping, do_generic_file_read()
1504 !mapping->a_ops->is_partially_uptodate) do_generic_file_read()
1509 if (!page->mapping) do_generic_file_read()
1511 if (!mapping->a_ops->is_partially_uptodate(page, do_generic_file_read()
1548 if (mapping_writably_mapped(mapping)) do_generic_file_read()
1588 if (!page->mapping) { do_generic_file_read()
1608 error = mapping->a_ops->readpage(filp, page); do_generic_file_read()
1624 if (page->mapping == NULL) { do_generic_file_read()
1652 page = page_cache_alloc_cold(mapping); do_generic_file_read()
1657 error = add_to_page_cache_lru(page, mapping, do_generic_file_read()
1697 struct address_space *mapping = file->f_mapping; generic_file_read_iter() local
1698 struct inode *inode = mapping->host; generic_file_read_iter()
1705 retval = filemap_write_and_wait_range(mapping, pos, generic_file_read_iter()
1709 retval = mapping->a_ops->direct_IO(iocb, &data, pos); generic_file_read_iter()
1750 struct address_space *mapping = file->f_mapping; page_cache_read() local
1755 page = page_cache_alloc_cold(mapping); page_cache_read()
1759 ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); page_cache_read()
1761 ret = mapping->a_ops->readpage(file, page); page_cache_read()
1784 struct address_space *mapping = file->f_mapping; do_sync_mmap_readahead() local
1793 page_cache_sync_readahead(mapping, ra, file, offset, do_sync_mmap_readahead()
1816 ra_submit(ra, mapping, file); do_sync_mmap_readahead()
1829 struct address_space *mapping = file->f_mapping; do_async_mmap_readahead() local
1837 page_cache_async_readahead(mapping, ra, file, do_async_mmap_readahead()
1869 struct address_space *mapping = file->f_mapping; filemap_fault() local
1871 struct inode *inode = mapping->host; filemap_fault()
1884 page = find_get_page(mapping, offset); filemap_fault()
1898 page = find_get_page(mapping, offset); filemap_fault()
1909 if (unlikely(page->mapping != mapping)) { filemap_fault()
1969 error = mapping->a_ops->readpage(file, page); filemap_fault()
1991 struct address_space *mapping = file->f_mapping; filemap_map_pages() local
1999 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, vmf->pgoff) { filemap_map_pages()
2029 if (page->mapping != mapping || !PageUptodate(page)) filemap_map_pages()
2032 size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); filemap_map_pages()
2067 if (page->mapping != inode->i_mapping) { filemap_page_mkwrite()
2095 struct address_space *mapping = file->f_mapping; generic_file_mmap() local
2097 if (!mapping->a_ops->readpage) generic_file_mmap()
2139 static struct page *__read_cache_page(struct address_space *mapping, __read_cache_page() argument
2148 page = find_get_page(mapping, index); __read_cache_page()
2153 err = add_to_page_cache_lru(page, mapping, index, gfp); __read_cache_page()
2172 static struct page *do_read_cache_page(struct address_space *mapping, do_read_cache_page() argument
2183 page = __read_cache_page(mapping, index, filler, data, gfp); do_read_cache_page()
2190 if (!page->mapping) { do_read_cache_page()
2215 * @mapping: the page's address_space
2225 struct page *read_cache_page(struct address_space *mapping, read_cache_page() argument
2230 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); read_cache_page()
2236 * @mapping: the page's address_space
2240 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2245 struct page *read_cache_page_gfp(struct address_space *mapping, read_cache_page_gfp() argument
2249 filler_t *filler = (filler_t *)mapping->a_ops->readpage; read_cache_page_gfp()
2251 return do_read_cache_page(mapping, index, filler, NULL, gfp); read_cache_page_gfp()
2311 int pagecache_write_begin(struct file *file, struct address_space *mapping, pagecache_write_begin() argument
2315 const struct address_space_operations *aops = mapping->a_ops; pagecache_write_begin()
2317 return aops->write_begin(file, mapping, pos, len, flags, pagecache_write_begin()
2322 int pagecache_write_end(struct file *file, struct address_space *mapping, pagecache_write_end() argument
2326 const struct address_space_operations *aops = mapping->a_ops; pagecache_write_end()
2328 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); pagecache_write_end()
2336 struct address_space *mapping = file->f_mapping; generic_file_direct_write() local
2337 struct inode *inode = mapping->host; generic_file_direct_write()
2346 written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); generic_file_direct_write()
2356 if (mapping->nrpages) { generic_file_direct_write()
2357 written = invalidate_inode_pages2_range(mapping, generic_file_direct_write()
2371 written = mapping->a_ops->direct_IO(iocb, &data, pos); generic_file_direct_write()
2381 if (mapping->nrpages) { generic_file_direct_write()
2382 invalidate_inode_pages2_range(mapping, generic_file_direct_write()
2404 struct page *grab_cache_page_write_begin(struct address_space *mapping, grab_cache_page_write_begin() argument
2413 page = pagecache_get_page(mapping, index, fgp_flags, grab_cache_page_write_begin()
2414 mapping_gfp_mask(mapping)); grab_cache_page_write_begin()
2425 struct address_space *mapping = file->f_mapping; generic_perform_write() local
2426 const struct address_space_operations *a_ops = mapping->a_ops; generic_perform_write()
2469 status = a_ops->write_begin(file, mapping, pos, bytes, flags, generic_perform_write()
2474 if (mapping_writably_mapped(mapping)) generic_perform_write()
2480 status = a_ops->write_end(file, mapping, pos, bytes, copied, generic_perform_write()
2505 balance_dirty_pages_ratelimited(mapping); generic_perform_write()
2532 struct address_space * mapping = file->f_mapping; __generic_file_write_iter() local
2533 struct inode *inode = mapping->host; __generic_file_write_iter()
2580 err = filemap_write_and_wait_range(mapping, pos, endbyte); __generic_file_write_iter()
2584 invalidate_mapping_pages(mapping, __generic_file_write_iter()
2655 struct address_space * const mapping = page->mapping; try_to_release_page() local
2661 if (mapping && mapping->a_ops->releasepage) try_to_release_page()
2662 return mapping->a_ops->releasepage(page, gfp_mask); try_to_release_page()
543 __add_to_page_cache_locked(struct page *page, struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask, void **shadowp) __add_to_page_cache_locked() argument
H A Dreadahead.c28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) file_ra_state_init() argument
30 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; file_ra_state_init()
44 static void read_cache_pages_invalidate_page(struct address_space *mapping, read_cache_pages_invalidate_page() argument
50 page->mapping = mapping; read_cache_pages_invalidate_page()
52 page->mapping = NULL; read_cache_pages_invalidate_page()
61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, read_cache_pages_invalidate_pages() argument
69 read_cache_pages_invalidate_page(mapping, victim); read_cache_pages_invalidate_pages()
75 * @mapping: the address_space
83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, read_cache_pages() argument
92 if (add_to_page_cache_lru(page, mapping, read_cache_pages()
94 read_cache_pages_invalidate_page(mapping, page); read_cache_pages()
101 read_cache_pages_invalidate_pages(mapping, pages); read_cache_pages()
111 static int read_pages(struct address_space *mapping, struct file *filp, read_pages() argument
120 if (mapping->a_ops->readpages) { read_pages()
121 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); read_pages()
130 if (!add_to_page_cache_lru(page, mapping, read_pages()
132 mapping->a_ops->readpage(filp, page); read_pages()
152 int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, __do_page_cache_readahead() argument
156 struct inode *inode = mapping->host; __do_page_cache_readahead()
179 page = radix_tree_lookup(&mapping->page_tree, page_offset); __do_page_cache_readahead()
184 page = page_cache_alloc_readahead(mapping); __do_page_cache_readahead()
200 read_pages(mapping, filp, &page_pool, ret); __do_page_cache_readahead()
210 int force_page_cache_readahead(struct address_space *mapping, struct file *filp, force_page_cache_readahead() argument
213 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) force_page_cache_readahead()
224 err = __do_page_cache_readahead(mapping, filp, force_page_cache_readahead()
328 static pgoff_t count_history_pages(struct address_space *mapping, count_history_pages() argument
334 head = page_cache_prev_hole(mapping, offset - 1, max); count_history_pages()
343 static int try_context_readahead(struct address_space *mapping, try_context_readahead() argument
351 size = count_history_pages(mapping, offset, max); try_context_readahead()
378 ondemand_readahead(struct address_space *mapping, ondemand_readahead() argument
414 start = page_cache_next_hole(mapping, offset + 1, max); ondemand_readahead()
447 if (try_context_readahead(mapping, ra, offset, req_size, max)) ondemand_readahead()
454 return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); ondemand_readahead()
472 return ra_submit(ra, mapping, filp); ondemand_readahead()
477 * @mapping: address_space which holds the pagecache and I/O vectors
480 * @offset: start offset into @mapping, in pagecache page-sized units
489 void page_cache_sync_readahead(struct address_space *mapping, page_cache_sync_readahead() argument
499 force_page_cache_readahead(mapping, filp, offset, req_size); page_cache_sync_readahead()
504 ondemand_readahead(mapping, ra, filp, false, offset, req_size); page_cache_sync_readahead()
510 * @mapping: address_space which holds the pagecache and I/O vectors
514 * @offset: start offset into @mapping, in pagecache page-sized units
524 page_cache_async_readahead(struct address_space *mapping, page_cache_async_readahead() argument
544 if (bdi_read_congested(inode_to_bdi(mapping->host))) page_cache_async_readahead()
548 ondemand_readahead(mapping, ra, filp, true, offset, req_size); page_cache_async_readahead()
553 do_readahead(struct address_space *mapping, struct file *filp, do_readahead() argument
556 if (!mapping || !mapping->a_ops) do_readahead()
559 return force_page_cache_readahead(mapping, filp, index, nr); do_readahead()
571 struct address_space *mapping = f.file->f_mapping; SYSCALL_DEFINE3() local
575 ret = do_readahead(mapping, f.file, start, len); SYSCALL_DEFINE3()
H A Dfadvise.c32 struct address_space *mapping; SYSCALL_DEFINE4() local
49 mapping = f.file->f_mapping; SYSCALL_DEFINE4()
50 if (!mapping || len < 0) { SYSCALL_DEFINE4()
78 bdi = inode_to_bdi(mapping->host); SYSCALL_DEFINE4()
112 force_page_cache_readahead(mapping, f.file, start_index, SYSCALL_DEFINE4()
119 __filemap_fdatawrite_range(mapping, offset, endbyte, SYSCALL_DEFINE4()
131 unsigned long count = invalidate_mapping_pages(mapping, SYSCALL_DEFINE4()
142 invalidate_mapping_pages(mapping, start_index, SYSCALL_DEFINE4()
H A Drmap.c7 * Simple, low overhead reverse mapping scheme.
26 * mapping->i_mmap_rwsem
32 * mapping->private_lock (in __set_page_dirty_buffers)
36 * mapping->tree_lock (widely used, in set_page_dirty,
40 * anon_vma->rwsem,mapping->i_mutex (memory_failure, collect_procs_anon)
141 * This makes sure the memory mapping described by 'vma' has
146 * not we either need to find an adjacent mapping that we
450 * that the anon_vma pointer from page->mapping is valid if there is a
459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); page_get_anon_vma()
503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); page_lock_anon_vma_read()
578 /* page should be within @vma mapping range */ vma_address()
600 } else if (page->mapping) { page_address_in_vma()
601 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) page_address_in_vma()
773 * mapping as such. If the page has been used in page_referenced_one()
774 * another mapping, we will catch it; if this other page_referenced_one()
775 * mapping is already gone, the unmap path will have page_referenced_one()
910 struct address_space *mapping; page_mkclean() local
922 mapping = page_mapping(page); page_mkclean()
923 if (!mapping) page_mkclean()
953 page->mapping = (struct address_space *) anon_vma; page_move_anon_rmap()
960 * @address: User virtual address of the mapping
976 * page mapping! __page_set_anon_rmap()
982 page->mapping = (struct address_space *) anon_vma; __page_set_anon_rmap()
988 * @page: the page to add the mapping to
989 * @vma: the vm area in which the mapping is added
997 * The page's anon-rmap details (mapping and index) are guaranteed to __page_check_anon_rmap()
1014 * page_add_anon_rmap - add pte mapping to an anonymous page
1015 * @page: the page to add the mapping to
1016 * @vma: the vm area in which the mapping is added
1020 * the anon_vma case: to serialize mapping,index checking after setting,
1064 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1065 * @page: the page to add the mapping to
1066 * @vma: the vm area in which the mapping is added
1087 * page_add_file_rmap - add pte mapping to a file page
1088 * @page: the page to add the mapping to
1133 * page_remove_rmap - take down pte mapping from a page
1134 * @page: page to remove mapping from
1168 * It would be tidy to reset the PageAnon mapping here, page_remove_rmap()
1170 * which increments mapcount after us but sets mapping page_remove_rmap()
1305 * we now hold anon_vma->rwsem or mapping->i_mmap_rwsem. try_to_unmap_one()
1349 * Tries to remove all the page table entries which are mapping this
1354 * SWAP_AGAIN - we missed a mapping, try again later
1392 * Called from munlock code. Checks all of the VMAs mapping the page
1457 * Find all the mappings of a page using the mapping pointer and the vma chains
1499 * Find all the mappings of a page using the mapping pointer and the vma chains
1509 struct address_space *mapping = page->mapping; rmap_walk_file() local
1515 * The page lock not only makes sure that page->mapping cannot rmap_walk_file()
1517 * structure at mapping cannot be freed and reused yet, rmap_walk_file()
1518 * so we can safely take mapping->i_mmap_rwsem. rmap_walk_file()
1522 if (!mapping) rmap_walk_file()
1526 i_mmap_lock_read(mapping); rmap_walk_file()
1527 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { rmap_walk_file()
1541 i_mmap_unlock_read(mapping); rmap_walk_file()
1574 page->mapping = (struct address_space *) anon_vma; __hugepage_set_anon_rmap()
H A Dshmem.c172 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
253 static int shmem_radix_tree_replace(struct address_space *mapping, shmem_radix_tree_replace() argument
261 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); shmem_radix_tree_replace()
264 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); shmem_radix_tree_replace()
278 static bool shmem_confirm_swap(struct address_space *mapping, shmem_confirm_swap() argument
284 item = radix_tree_lookup(&mapping->page_tree, index); shmem_confirm_swap()
293 struct address_space *mapping, shmem_add_to_page_cache()
302 page->mapping = mapping; shmem_add_to_page_cache()
305 spin_lock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
307 error = radix_tree_insert(&mapping->page_tree, index, page); shmem_add_to_page_cache()
309 error = shmem_radix_tree_replace(mapping, index, expected, shmem_add_to_page_cache()
312 mapping->nrpages++; shmem_add_to_page_cache()
315 spin_unlock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
317 page->mapping = NULL; shmem_add_to_page_cache()
318 spin_unlock_irq(&mapping->tree_lock); shmem_add_to_page_cache()
329 struct address_space *mapping = page->mapping; shmem_delete_from_page_cache() local
332 spin_lock_irq(&mapping->tree_lock); shmem_delete_from_page_cache()
333 error = shmem_radix_tree_replace(mapping, page->index, page, radswap); shmem_delete_from_page_cache()
334 page->mapping = NULL; shmem_delete_from_page_cache()
335 mapping->nrpages--; shmem_delete_from_page_cache()
338 spin_unlock_irq(&mapping->tree_lock); shmem_delete_from_page_cache()
346 static int shmem_free_swap(struct address_space *mapping, shmem_free_swap() argument
351 spin_lock_irq(&mapping->tree_lock); shmem_free_swap()
352 old = radix_tree_delete_item(&mapping->page_tree, index, radswap); shmem_free_swap()
353 spin_unlock_irq(&mapping->tree_lock); shmem_free_swap()
363 void shmem_unlock_mapping(struct address_space *mapping) shmem_unlock_mapping() argument
373 while (!mapping_unevictable(mapping)) { shmem_unlock_mapping()
378 pvec.nr = find_get_entries(mapping, index, shmem_unlock_mapping()
397 struct address_space *mapping = inode->i_mapping; shmem_undo_range() local
415 pvec.nr = find_get_entries(mapping, index, shmem_undo_range()
430 nr_swaps_freed += !shmem_free_swap(mapping, shmem_undo_range()
438 if (page->mapping == mapping) { shmem_undo_range()
440 truncate_inode_page(mapping, page); shmem_undo_range()
483 pvec.nr = find_get_entries(mapping, index, shmem_undo_range()
504 if (shmem_free_swap(mapping, index, page)) { shmem_undo_range()
515 if (page->mapping == mapping) { shmem_undo_range()
517 truncate_inode_page(mapping, page); shmem_undo_range()
615 struct address_space *mapping = info->vfs_inode.i_mapping; shmem_unuse_inode() local
622 index = radix_tree_locate_item(&mapping->page_tree, radswap); shmem_unuse_inode()
635 gfp = mapping_gfp_mask(mapping); shmem_unuse_inode()
651 * inode or mapping or info to check that. However, we can shmem_unuse_inode()
668 error = shmem_add_to_page_cache(*pagep, mapping, index, shmem_unuse_inode()
747 struct address_space *mapping; shmem_writepage() local
753 mapping = page->mapping; shmem_writepage()
755 inode = mapping->host; shmem_writepage()
946 * ignorance of the mapping it belongs to. If that mapping has special
1035 struct address_space *mapping = inode->i_mapping; shmem_getpage_gfp() local
1049 page = find_lock_entry(mapping, index); shmem_getpage_gfp()
1101 !shmem_confirm_swap(mapping, index, swap)) { shmem_getpage_gfp()
1119 error = shmem_add_to_page_cache(page, mapping, index, shmem_getpage_gfp()
1185 error = shmem_add_to_page_cache(page, mapping, index, shmem_getpage_gfp()
1254 !shmem_confirm_swap(mapping, index, swap)) shmem_getpage_gfp()
1456 bool shmem_mapping(struct address_space *mapping) shmem_mapping() argument
1458 if (!mapping->host) shmem_mapping()
1461 return mapping->host->i_sb->s_op == &shmem_ops; shmem_mapping()
1475 shmem_write_begin(struct file *file, struct address_space *mapping, shmem_write_begin() argument
1479 struct inode *inode = mapping->host; shmem_write_begin()
1495 shmem_write_end(struct file *file, struct address_space *mapping, shmem_write_end() argument
1499 struct inode *inode = mapping->host; shmem_write_end()
1523 struct address_space *mapping = inode->i_mapping; shmem_file_read_iter() local
1589 if (mapping_writably_mapped(mapping)) shmem_file_read_iter()
1630 struct address_space *mapping = in->f_mapping; shmem_file_splice_read() local
1631 struct inode *inode = mapping->host; shmem_file_splice_read()
1664 spd.nr_pages = find_get_pages_contig(mapping, index, shmem_file_splice_read()
1691 if (!PageUptodate(page) || page->mapping != mapping) { shmem_file_splice_read()
1743 static pgoff_t shmem_seek_hole_data(struct address_space *mapping, shmem_seek_hole_data() argument
1755 pvec.nr = find_get_entries(mapping, index, shmem_seek_hole_data()
1792 struct address_space *mapping = file->f_mapping; shmem_file_llseek() local
1793 struct inode *inode = mapping->host; shmem_file_llseek()
1810 new_offset = shmem_seek_hole_data(mapping, start, end, whence); shmem_file_llseek()
1835 static void shmem_tag_pins(struct address_space *mapping) shmem_tag_pins() argument
1847 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) { shmem_tag_pins()
1853 spin_lock_irq(&mapping->tree_lock); shmem_tag_pins()
1854 radix_tree_tag_set(&mapping->page_tree, iter.index, shmem_tag_pins()
1856 spin_unlock_irq(&mapping->tree_lock); shmem_tag_pins()
1877 static int shmem_wait_for_pins(struct address_space *mapping) shmem_wait_for_pins() argument
1885 shmem_tag_pins(mapping); shmem_wait_for_pins()
1889 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED)) shmem_wait_for_pins()
1900 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter, shmem_wait_for_pins()
1924 spin_lock_irq(&mapping->tree_lock); shmem_wait_for_pins()
1925 radix_tree_tag_clear(&mapping->page_tree, shmem_wait_for_pins()
1927 spin_unlock_irq(&mapping->tree_lock); shmem_wait_for_pins()
2065 struct address_space *mapping = file->f_mapping; shmem_fallocate() local
2084 unmap_mapping_range(mapping, unmap_start, shmem_fallocate()
3288 void shmem_unlock_mapping(struct address_space *mapping) shmem_unlock_mapping() argument
3396 * shmem_zero_setup - setup a shared anonymous mapping
3407 * accessible to the user through its mapping, use S_PRIVATE flag to shmem_zero_setup()
3423 * @mapping: the page's address_space
3427 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3436 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, shmem_read_mapping_page_gfp() argument
3440 struct inode *inode = mapping->host; shmem_read_mapping_page_gfp()
3444 BUG_ON(mapping->a_ops != &shmem_aops); shmem_read_mapping_page_gfp()
3455 return read_cache_page_gfp(mapping, index, gfp); shmem_read_mapping_page_gfp()
292 shmem_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t index, void *expected) shmem_add_to_page_cache() argument
H A Dpage-writeback.c1341 static void balance_dirty_pages(struct address_space *mapping, balance_dirty_pages() argument
1357 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); balance_dirty_pages()
1567 * @mapping: address_space which was dirtied
1578 void balance_dirty_pages_ratelimited(struct address_space *mapping) balance_dirty_pages_ratelimited() argument
1580 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); balance_dirty_pages_ratelimited()
1620 balance_dirty_pages(mapping, current->nr_dirtied); balance_dirty_pages_ratelimited()
1778 * @mapping: address space structure to write
1793 void tag_pages_for_writeback(struct address_space *mapping, tag_pages_for_writeback() argument
1800 spin_lock_irq(&mapping->tree_lock); tag_pages_for_writeback()
1801 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, tag_pages_for_writeback()
1804 spin_unlock_irq(&mapping->tree_lock); tag_pages_for_writeback()
1814 * @mapping: address space structure to write
1834 int write_cache_pages(struct address_space *mapping, write_cache_pages() argument
1852 writeback_index = mapping->writeback_index; /* prev offset */ write_cache_pages()
1872 tag_pages_for_writeback(mapping, index, end); write_cache_pages()
1877 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, write_cache_pages()
1887 * invalidated (changing page->mapping to NULL), or write_cache_pages()
1889 * mapping. However, page->index will not change write_cache_pages()
1913 if (unlikely(page->mapping != mapping)) { write_cache_pages()
1935 trace_wbc_writepage(wbc, inode_to_bdi(mapping->host)); write_cache_pages()
1984 mapping->writeback_index = done_index; write_cache_pages()
1992 * function and set the mapping flags on error
1997 struct address_space *mapping = data; __writepage() local
1998 int ret = mapping->a_ops->writepage(page, wbc); __writepage()
1999 mapping_set_error(mapping, ret); __writepage()
2005 * @mapping: address space structure to write
2011 int generic_writepages(struct address_space *mapping, generic_writepages() argument
2018 if (!mapping->a_ops->writepage) generic_writepages()
2022 ret = write_cache_pages(mapping, wbc, __writepage, mapping); generic_writepages()
2029 int do_writepages(struct address_space *mapping, struct writeback_control *wbc) do_writepages() argument
2035 if (mapping->a_ops->writepages) do_writepages()
2036 ret = mapping->a_ops->writepages(mapping, wbc); do_writepages()
2038 ret = generic_writepages(mapping, wbc); do_writepages()
2053 struct address_space *mapping = page->mapping; write_one_page() local
2067 ret = mapping->a_ops->writepage(page, &wbc); write_one_page()
2095 void account_page_dirtied(struct page *page, struct address_space *mapping) account_page_dirtied() argument
2097 trace_writeback_dirty_page(page, mapping); account_page_dirtied()
2099 if (mapping_cap_account_dirty(mapping)) { account_page_dirtied()
2100 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); account_page_dirtied()
2122 void account_page_cleaned(struct page *page, struct address_space *mapping) account_page_cleaned() argument
2124 if (mapping_cap_account_dirty(mapping)) { account_page_cleaned()
2126 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); account_page_cleaned()
2147 struct address_space *mapping = page_mapping(page); __set_page_dirty_nobuffers() local
2150 if (!mapping) __set_page_dirty_nobuffers()
2153 spin_lock_irqsave(&mapping->tree_lock, flags); __set_page_dirty_nobuffers()
2154 BUG_ON(page_mapping(page) != mapping); __set_page_dirty_nobuffers()
2156 account_page_dirtied(page, mapping); __set_page_dirty_nobuffers()
2157 radix_tree_tag_set(&mapping->page_tree, page_index(page), __set_page_dirty_nobuffers()
2159 spin_unlock_irqrestore(&mapping->tree_lock, flags); __set_page_dirty_nobuffers()
2160 if (mapping->host) { __set_page_dirty_nobuffers()
2162 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __set_page_dirty_nobuffers()
2179 struct address_space *mapping = page->mapping; account_page_redirty() local
2180 if (mapping && mapping_cap_account_dirty(mapping)) { account_page_redirty()
2183 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED); account_page_redirty()
2207 * For pages with a mapping this should be done under the page lock
2212 * If the mapping doesn't provide a set_page_dirty a_op, then
2217 struct address_space *mapping = page_mapping(page); set_page_dirty() local
2219 if (likely(mapping)) { set_page_dirty()
2220 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; set_page_dirty()
2249 * page->mapping->host, and if the page is unlocked. This is because another
2250 * CPU could truncate the page off the mapping and then free the mapping.
2284 struct address_space *mapping = page_mapping(page); clear_page_dirty_for_io() local
2288 if (mapping && mapping_cap_account_dirty(mapping)) { clear_page_dirty_for_io()
2326 dec_bdi_stat(inode_to_bdi(mapping->host), clear_page_dirty_for_io()
2338 struct address_space *mapping = page_mapping(page); test_clear_page_writeback() local
2343 if (mapping) { test_clear_page_writeback()
2344 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); test_clear_page_writeback()
2347 spin_lock_irqsave(&mapping->tree_lock, flags); test_clear_page_writeback()
2350 radix_tree_tag_clear(&mapping->page_tree, test_clear_page_writeback()
2358 spin_unlock_irqrestore(&mapping->tree_lock, flags); test_clear_page_writeback()
2373 struct address_space *mapping = page_mapping(page); __test_set_page_writeback() local
2378 if (mapping) { __test_set_page_writeback()
2379 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); __test_set_page_writeback()
2382 spin_lock_irqsave(&mapping->tree_lock, flags); __test_set_page_writeback()
2385 radix_tree_tag_set(&mapping->page_tree, __test_set_page_writeback()
2392 radix_tree_tag_clear(&mapping->page_tree, __test_set_page_writeback()
2396 radix_tree_tag_clear(&mapping->page_tree, __test_set_page_writeback()
2399 spin_unlock_irqrestore(&mapping->tree_lock, flags); __test_set_page_writeback()
2414 * Return true if any of the pages in the mapping are marked with the
2417 int mapping_tagged(struct address_space *mapping, int tag) mapping_tagged() argument
2419 return radix_tree_tagged(&mapping->page_tree, tag); mapping_tagged()
2433 if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host))) wait_for_stable_page()
H A Dutil.c330 unsigned long mapping; __page_rmapping() local
332 mapping = (unsigned long)page->mapping; __page_rmapping()
333 mapping &= ~PAGE_MAPPING_FLAGS; __page_rmapping()
335 return (void *)mapping; __page_rmapping()
338 /* Neutral page->mapping pointer to address_space or anon_vma or other */ page_rmapping()
347 unsigned long mapping; page_anon_vma() local
350 mapping = (unsigned long)page->mapping; page_anon_vma()
351 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) page_anon_vma()
358 unsigned long mapping; page_mapping() local
371 mapping = (unsigned long)page->mapping; page_mapping()
372 if (mapping & PAGE_MAPPING_FLAGS) page_mapping()
374 return page->mapping; page_mapping()
H A Dworkingset.c207 * @mapping: address space the page was backing
210 * Returns a shadow entry to be stored in @mapping->page_tree in place
213 void *workingset_eviction(struct address_space *mapping, struct page *page) workingset_eviction() argument
276 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ count_shadow_nodes()
309 struct address_space *mapping; shadow_lru_isolate() local
316 * the shadow node LRU under the mapping->tree_lock and the shadow_lru_isolate()
321 * We can then safely transition to the mapping->tree_lock to shadow_lru_isolate()
327 mapping = node->private_data; shadow_lru_isolate()
330 if (!spin_trylock(&mapping->tree_lock)) { shadow_lru_isolate()
354 BUG_ON(!mapping->nrshadows); shadow_lru_isolate()
355 mapping->nrshadows--; shadow_lru_isolate()
360 if (!__radix_tree_delete_node(&mapping->page_tree, node)) shadow_lru_isolate()
363 spin_unlock(&mapping->tree_lock); shadow_lru_isolate()
378 /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ scan_shadow_nodes()
395 * mapping->tree_lock.
H A Dcleancache.c189 pool_id = page->mapping->host->i_sb->cleancache_poolid; __cleancache_get_page()
193 if (cleancache_get_key(page->mapping->host, &key) < 0) __cleancache_get_page()
227 pool_id = page->mapping->host->i_sb->cleancache_poolid; __cleancache_put_page()
229 cleancache_get_key(page->mapping->host, &key) >= 0) { __cleancache_put_page()
244 void __cleancache_invalidate_page(struct address_space *mapping, __cleancache_invalidate_page() argument
247 /* careful... page->mapping is NULL sometimes when this is called */ __cleancache_invalidate_page()
248 int pool_id = mapping->host->i_sb->cleancache_poolid; __cleancache_invalidate_page()
256 if (cleancache_get_key(mapping->host, &key) >= 0) { __cleancache_invalidate_page()
274 void __cleancache_invalidate_inode(struct address_space *mapping) __cleancache_invalidate_inode() argument
276 int pool_id = mapping->host->i_sb->cleancache_poolid; __cleancache_invalidate_inode()
282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) __cleancache_invalidate_inode()
H A Dmigrate.c301 * Replace the page in the mapping.
304 * 1 for anonymous pages without a mapping
305 * 2 for pages with a mapping
306 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
308 int migrate_page_move_mapping(struct address_space *mapping, migrate_page_move_mapping() argument
316 if (!mapping) { migrate_page_move_mapping()
317 /* Anonymous page without mapping */ migrate_page_move_mapping()
323 spin_lock_irq(&mapping->tree_lock); migrate_page_move_mapping()
325 pslot = radix_tree_lookup_slot(&mapping->page_tree, migrate_page_move_mapping()
330 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_page_move_mapping()
331 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
336 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
342 * buffers using trylock before the mapping is moved. If the mapping migrate_page_move_mapping()
344 * the mapping back due to an elevated page count, we would have to migrate_page_move_mapping()
350 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
388 spin_unlock_irq(&mapping->tree_lock); migrate_page_move_mapping()
397 int migrate_huge_page_move_mapping(struct address_space *mapping, migrate_huge_page_move_mapping() argument
403 if (!mapping) { migrate_huge_page_move_mapping()
409 spin_lock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
411 pslot = radix_tree_lookup_slot(&mapping->page_tree, migrate_huge_page_move_mapping()
416 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { migrate_huge_page_move_mapping()
417 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
422 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
432 spin_unlock_irq(&mapping->tree_lock); migrate_huge_page_move_mapping()
563 int migrate_page(struct address_space *mapping, migrate_page() argument
571 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); migrate_page()
587 int buffer_migrate_page(struct address_space *mapping, buffer_migrate_page() argument
594 return migrate_page(mapping, newpage, page, mode); buffer_migrate_page()
598 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); buffer_migrate_page()
644 static int writeout(struct address_space *mapping, struct page *page) writeout() argument
655 if (!mapping->a_ops->writepage) writeout()
673 rc = mapping->a_ops->writepage(page, &wbc); writeout()
685 static int fallback_migrate_page(struct address_space *mapping, fallback_migrate_page() argument
692 return writeout(mapping, page); fallback_migrate_page()
703 return migrate_page(mapping, newpage, page, mode); fallback_migrate_page()
720 struct address_space *mapping; move_to_new_page() local
731 /* Prepare mapping for the new page.*/ move_to_new_page()
733 newpage->mapping = page->mapping; move_to_new_page()
737 mapping = page_mapping(page); move_to_new_page()
738 if (!mapping) move_to_new_page()
739 rc = migrate_page(mapping, newpage, page, mode); move_to_new_page()
740 else if (mapping->a_ops->migratepage) move_to_new_page()
742 * Most pages have a mapping and most filesystems provide a move_to_new_page()
747 rc = mapping->a_ops->migratepage(mapping, move_to_new_page()
750 rc = fallback_migrate_page(mapping, newpage, page, mode); move_to_new_page()
753 newpage->mapping = NULL; move_to_new_page()
758 page->mapping = NULL; move_to_new_page()
850 * physical to virtual reverse mapping procedures. __unmap_and_move()
863 * Calling try_to_unmap() against a page->mapping==NULL page will __unmap_and_move()
871 if (!page->mapping) { __unmap_and_move()
1758 /* anon mapping, we can simply copy page->mapping to the new page: */ migrate_misplaced_transhuge_page()
1759 new_page->mapping = page->mapping; migrate_misplaced_transhuge_page()
H A Dmincore.c49 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) mincore_page() argument
55 * When tmpfs swaps out a page from a file, any process mapping that mincore_page()
57 * any other file mapping (ie. marked !present and faulted in with mincore_page()
61 if (shmem_mapping(mapping)) { mincore_page()
62 page = find_get_entry(mapping, pgoff); mincore_page()
72 page = find_get_page(mapping, pgoff); mincore_page()
74 page = find_get_page(mapping, pgoff); mincore_page()
H A Dnommu.c253 * @vma: memory mapping
744 struct address_space *mapping; add_vma_to_mm() local
756 /* add the VMA to the mapping */ add_vma_to_mm()
758 mapping = vma->vm_file->f_mapping; add_vma_to_mm()
760 i_mmap_lock_write(mapping); add_vma_to_mm()
761 flush_dcache_mmap_lock(mapping); add_vma_to_mm()
762 vma_interval_tree_insert(vma, &mapping->i_mmap); add_vma_to_mm()
763 flush_dcache_mmap_unlock(mapping); add_vma_to_mm()
764 i_mmap_unlock_write(mapping); add_vma_to_mm()
812 struct address_space *mapping; delete_vma_from_mm() local
829 /* remove the VMA from the mapping */ delete_vma_from_mm()
831 mapping = vma->vm_file->f_mapping; delete_vma_from_mm()
833 i_mmap_lock_write(mapping); delete_vma_from_mm()
834 flush_dcache_mmap_lock(mapping); delete_vma_from_mm()
835 vma_interval_tree_remove(vma, &mapping->i_mmap); delete_vma_from_mm()
836 flush_dcache_mmap_unlock(mapping); delete_vma_from_mm()
837 i_mmap_unlock_write(mapping); delete_vma_from_mm()
945 * determine whether a mapping should be permitted and, if so, what sort of
946 * mapping we're capable of supporting
1050 /* we don't permit a private writable mapping to be validate_mmap_request()
1111 * we've determined that we can make the mapping, now translate what we
1130 /* overlay a shareable mapping on the backing device or inode determine_vm_flags()
1149 * set up a shared mapping on a file (the driver or filesystem provides and
1171 * set up a private mapping or an anonymous shared mapping
1182 /* invoke the file's mapping function so that it can keep track of do_mmap_private()
1203 /* allocate some memory to hold the mapping do_mmap_private()
1272 * handle mapping creation for uClinux
1292 /* decide whether we should attempt the mapping, and if so what sort of do_mmap_pgoff()
1293 * mapping */ do_mmap_pgoff()
1305 /* we've determined that we can make the mapping, now translate what we do_mmap_pgoff()
1309 /* we're going to need to record the mapping */ do_mmap_pgoff()
1334 * mmap() calls that overlap with our proposed mapping do_mmap_pgoff()
1372 /* new mapping is not a subset of the region */ do_mmap_pgoff()
1408 /* obtain the address at which to make a shared mapping do_mmap_pgoff()
1410 * tell us the location of a shared mapping do_mmap_pgoff()
1421 * the mapping so we'll have to attempt to copy do_mmap_pgoff()
1437 /* set up the mapping do_mmap_pgoff()
1453 /* okay... we have a mapping; now we have to register it */ do_mmap_pgoff()
1462 * mapping of it is made */ do_mmap_pgoff()
1662 * release a mapping
1789 * expand (or shrink) an existing mapping, potentially moving it at the same
1792 * under NOMMU conditions, we only permit changing a mapping's size, and only
1896 void unmap_mapping_range(struct address_space *mapping, unmap_mapping_range() argument
1905 * mapping. 0 means there is enough memory for the allocation to
2020 /* don't overrun this mapping */ __access_remote_vm()
/linux-4.1.27/fs/hpfs/
H A Dfile.c122 static int hpfs_readpages(struct file *file, struct address_space *mapping, hpfs_readpages() argument
125 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); hpfs_readpages()
128 static int hpfs_writepages(struct address_space *mapping, hpfs_writepages() argument
131 return mpage_writepages(mapping, wbc, hpfs_get_block); hpfs_writepages()
134 static void hpfs_write_failed(struct address_space *mapping, loff_t to) hpfs_write_failed() argument
136 struct inode *inode = mapping->host; hpfs_write_failed()
148 static int hpfs_write_begin(struct file *file, struct address_space *mapping, hpfs_write_begin() argument
155 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, hpfs_write_begin()
157 &hpfs_i(mapping->host)->mmu_private); hpfs_write_begin()
159 hpfs_write_failed(mapping, pos + len); hpfs_write_begin()
164 static int hpfs_write_end(struct file *file, struct address_space *mapping, hpfs_write_end() argument
168 struct inode *inode = mapping->host; hpfs_write_end()
170 err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata); hpfs_write_end()
172 hpfs_write_failed(mapping, pos + len); hpfs_write_end()
182 static sector_t _hpfs_bmap(struct address_space *mapping, sector_t block) _hpfs_bmap() argument
184 return generic_block_bmap(mapping,block,hpfs_get_block); _hpfs_bmap()
/linux-4.1.27/include/trace/events/
H A Dfilemap.h29 __entry->i_ino = page->mapping->host->i_ino;
31 if (page->mapping->host->i_sb)
32 __entry->s_dev = page->mapping->host->i_sb->s_dev;
34 __entry->s_dev = page->mapping->host->i_rdev;
/linux-4.1.27/arch/ia64/include/asm/
H A Dswiotlb.h4 #include <linux/dma-mapping.h>
H A Dcacheflush.h34 #define flush_dcache_mmap_lock(mapping) do { } while (0)
35 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
/linux-4.1.27/arch/arm/include/asm/
H A Didmap.h7 /* Tag a function as requiring to be executed via an identity mapping. */
H A Ddevice.h18 struct dma_iommu_mapping *mapping; member in struct:dev_archdata
32 #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
H A Ddma-iommu.h30 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
33 struct dma_iommu_mapping *mapping);
/linux-4.1.27/arch/arm/mm/
H A Dflush.c187 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
190 * Writeback any data associated with the kernel mapping of this __flush_dcache_page()
192 * coherent with the kernels mapping. __flush_dcache_page()
221 if (mapping && cache_is_vipt_aliasing()) __flush_dcache_page()
226 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) __flush_dcache_aliases() argument
236 * - aliasing VIPT: we only need to find one mapping of this page. __flush_dcache_aliases()
240 flush_dcache_mmap_lock(mapping); __flush_dcache_aliases()
241 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { __flush_dcache_aliases()
254 flush_dcache_mmap_unlock(mapping); __flush_dcache_aliases()
262 struct address_space *mapping; __sync_icache_dcache() local
273 mapping = page_mapping(page); __sync_icache_dcache()
275 mapping = NULL; __sync_icache_dcache()
278 __flush_dcache_page(mapping, page); __sync_icache_dcache()
286 * Ensure cache coherency between kernel mapping and userspace mapping
306 struct address_space *mapping; flush_dcache_page() local
315 mapping = page_mapping(page); flush_dcache_page()
318 mapping && !page_mapped(page)) flush_dcache_page()
321 __flush_dcache_page(mapping, page); flush_dcache_page()
322 if (mapping && cache_is_vivt()) flush_dcache_page()
323 __flush_dcache_aliases(mapping, page); flush_dcache_page()
324 else if (mapping) flush_dcache_page()
332 * Ensure cache coherency for the kernel mapping of this page. We can
343 struct address_space *mapping; flush_kernel_dcache_page() local
345 mapping = page_mapping(page); flush_kernel_dcache_page()
347 if (!mapping || mapping_mapped(mapping)) { flush_kernel_dcache_page()
382 * Write back and invalidate userspace mapping. __flush_anon_page()
397 * Invalidate kernel mapping. No data should be contained __flush_anon_page()
398 * in this mapping of the page. FIXME: this is overkill __flush_anon_page()
H A Ddma-mapping.c2 * linux/arch/arm/mm/dma-mapping.c
10 * DMA uncached mapping support.
21 #include <linux/dma-mapping.h>
430 * Clear previous low-memory mapping to ensure that the dma_contiguous_remap()
694 * Create userspace mapping for the DMA-coherent memory.
724 * Free a buffer as defined by the above mapping.
833 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
1020 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
1022 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, __alloc_iova() argument
1028 size_t mapping_size = mapping->bits << PAGE_SHIFT; __alloc_iova()
1039 spin_lock_irqsave(&mapping->lock, flags); __alloc_iova()
1040 for (i = 0; i < mapping->nr_bitmaps; i++) { __alloc_iova()
1041 start = bitmap_find_next_zero_area(mapping->bitmaps[i], __alloc_iova()
1042 mapping->bits, 0, count, align); __alloc_iova()
1044 if (start > mapping->bits) __alloc_iova()
1047 bitmap_set(mapping->bitmaps[i], start, count); __alloc_iova()
1052 * No unused range found. Try to extend the existing mapping __alloc_iova()
1056 if (i == mapping->nr_bitmaps) { __alloc_iova()
1057 if (extend_iommu_mapping(mapping)) { __alloc_iova()
1058 spin_unlock_irqrestore(&mapping->lock, flags); __alloc_iova()
1062 start = bitmap_find_next_zero_area(mapping->bitmaps[i], __alloc_iova()
1063 mapping->bits, 0, count, align); __alloc_iova()
1065 if (start > mapping->bits) { __alloc_iova()
1066 spin_unlock_irqrestore(&mapping->lock, flags); __alloc_iova()
1070 bitmap_set(mapping->bitmaps[i], start, count); __alloc_iova()
1072 spin_unlock_irqrestore(&mapping->lock, flags); __alloc_iova()
1074 iova = mapping->base + (mapping_size * i); __alloc_iova()
1080 static inline void __free_iova(struct dma_iommu_mapping *mapping, __free_iova() argument
1084 size_t mapping_size = mapping->bits << PAGE_SHIFT; __free_iova()
1092 bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size; __free_iova()
1093 BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions); __free_iova()
1095 bitmap_base = mapping->base + mapping_size * bitmap_index; __free_iova()
1110 spin_lock_irqsave(&mapping->lock, flags); __free_iova()
1111 bitmap_clear(mapping->bitmaps[bitmap_index], start, count); __free_iova()
1112 spin_unlock_irqrestore(&mapping->lock, flags); __free_iova()
1223 * Create a CPU mapping for a specified pages
1234 * Create a mapping in device IO address space for specified pages
1239 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); __iommu_create_mapping() local
1244 dma_addr = __alloc_iova(mapping, size); __iommu_create_mapping()
1259 ret = iommu_map(mapping->domain, iova, phys, len, __iommu_create_mapping()
1268 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __iommu_create_mapping()
1269 __free_iova(mapping, dma_addr, size); __iommu_create_mapping()
1275 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); __iommu_remove_mapping() local
1284 iommu_unmap(mapping->domain, iova, size); __iommu_remove_mapping()
1285 __free_iova(mapping, iova, size); __iommu_remove_mapping()
1425 * free a page as defined by the above mapping.
1497 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); __map_sg_chunk() local
1507 iova_base = iova = __alloc_iova(mapping, size); __map_sg_chunk()
1521 ret = iommu_map(mapping->domain, iova, phys, len, prot); __map_sg_chunk()
1531 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); __map_sg_chunk()
1532 __free_iova(mapping, iova_base, size); __map_sg_chunk()
1718 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_coherent_iommu_map_page() local
1722 dma_addr = __alloc_iova(mapping, len); arm_coherent_iommu_map_page()
1728 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); arm_coherent_iommu_map_page()
1734 __free_iova(mapping, dma_addr, len); arm_coherent_iommu_map_page()
1771 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_coherent_iommu_unmap_page() local
1779 iommu_unmap(mapping->domain, iova, len); arm_coherent_iommu_unmap_page()
1780 __free_iova(mapping, iova, len); arm_coherent_iommu_unmap_page()
1796 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_iommu_unmap_page() local
1798 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); arm_iommu_unmap_page()
1808 iommu_unmap(mapping->domain, iova, len); arm_iommu_unmap_page()
1809 __free_iova(mapping, iova, len); arm_iommu_unmap_page()
1815 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_iommu_sync_single_for_cpu() local
1817 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); arm_iommu_sync_single_for_cpu()
1829 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_iommu_sync_single_for_device() local
1831 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); arm_iommu_sync_single_for_device()
1880 * Creates a mapping structure which holds information about used/unused
1882 * mapping with IOMMU aware functions.
1884 * The client device need to be attached to the mapping with
1892 struct dma_iommu_mapping *mapping; arm_iommu_create_mapping() local
1908 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); arm_iommu_create_mapping()
1909 if (!mapping) arm_iommu_create_mapping()
1912 mapping->bitmap_size = bitmap_size; arm_iommu_create_mapping()
1913 mapping->bitmaps = kzalloc(extensions * sizeof(unsigned long *), arm_iommu_create_mapping()
1915 if (!mapping->bitmaps) arm_iommu_create_mapping()
1918 mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL); arm_iommu_create_mapping()
1919 if (!mapping->bitmaps[0]) arm_iommu_create_mapping()
1922 mapping->nr_bitmaps = 1; arm_iommu_create_mapping()
1923 mapping->extensions = extensions; arm_iommu_create_mapping()
1924 mapping->base = base; arm_iommu_create_mapping()
1925 mapping->bits = BITS_PER_BYTE * bitmap_size; arm_iommu_create_mapping()
1927 spin_lock_init(&mapping->lock); arm_iommu_create_mapping()
1929 mapping->domain = iommu_domain_alloc(bus); arm_iommu_create_mapping()
1930 if (!mapping->domain) arm_iommu_create_mapping()
1933 kref_init(&mapping->kref); arm_iommu_create_mapping()
1934 return mapping; arm_iommu_create_mapping()
1936 kfree(mapping->bitmaps[0]); arm_iommu_create_mapping()
1938 kfree(mapping->bitmaps); arm_iommu_create_mapping()
1940 kfree(mapping); arm_iommu_create_mapping()
1949 struct dma_iommu_mapping *mapping = release_iommu_mapping() local
1952 iommu_domain_free(mapping->domain); release_iommu_mapping()
1953 for (i = 0; i < mapping->nr_bitmaps; i++) release_iommu_mapping()
1954 kfree(mapping->bitmaps[i]); release_iommu_mapping()
1955 kfree(mapping->bitmaps); release_iommu_mapping()
1956 kfree(mapping); release_iommu_mapping()
1959 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping) extend_iommu_mapping() argument
1963 if (mapping->nr_bitmaps >= mapping->extensions) extend_iommu_mapping()
1966 next_bitmap = mapping->nr_bitmaps; extend_iommu_mapping()
1967 mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size, extend_iommu_mapping()
1969 if (!mapping->bitmaps[next_bitmap]) extend_iommu_mapping()
1972 mapping->nr_bitmaps++; extend_iommu_mapping()
1977 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) arm_iommu_release_mapping() argument
1979 if (mapping) arm_iommu_release_mapping()
1980 kref_put(&mapping->kref, release_iommu_mapping); arm_iommu_release_mapping()
1985 struct dma_iommu_mapping *mapping) __arm_iommu_attach_device()
1989 err = iommu_attach_device(mapping->domain, dev); __arm_iommu_attach_device()
1993 kref_get(&mapping->kref); __arm_iommu_attach_device()
1994 to_dma_iommu_mapping(dev) = mapping; __arm_iommu_attach_device()
2003 * @mapping: io address space mapping structure (returned from
2006 * Attaches specified io address space mapping to the provided device.
2011 * mapping.
2014 struct dma_iommu_mapping *mapping) arm_iommu_attach_device()
2018 err = __arm_iommu_attach_device(dev, mapping); arm_iommu_attach_device()
2029 struct dma_iommu_mapping *mapping; __arm_iommu_detach_device() local
2031 mapping = to_dma_iommu_mapping(dev); __arm_iommu_detach_device()
2032 if (!mapping) { __arm_iommu_detach_device()
2037 iommu_detach_device(mapping->domain, dev); __arm_iommu_detach_device()
2038 kref_put(&mapping->kref, release_iommu_mapping); __arm_iommu_detach_device()
2066 struct dma_iommu_mapping *mapping; arm_setup_iommu_dma_ops() local
2071 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); arm_setup_iommu_dma_ops()
2072 if (IS_ERR(mapping)) { arm_setup_iommu_dma_ops()
2073 pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", arm_setup_iommu_dma_ops()
2078 if (__arm_iommu_attach_device(dev, mapping)) { arm_setup_iommu_dma_ops()
2081 arm_iommu_release_mapping(mapping); arm_setup_iommu_dma_ops()
2090 struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); arm_teardown_iommu_dma_ops() local
2092 if (!mapping) arm_teardown_iommu_dma_ops()
2096 arm_iommu_release_mapping(mapping); arm_teardown_iommu_dma_ops()
1984 __arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) __arm_iommu_attach_device() argument
2013 arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) arm_iommu_attach_device() argument
H A Dmm.h51 extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
63 /* empty mapping */
66 /* mapping type (attributes) for permanent static mappings */
H A Dfault-armv.c132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, make_coherent() argument
148 flush_dcache_mmap_lock(mapping); make_coherent()
149 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { make_coherent()
162 flush_dcache_mmap_unlock(mapping); make_coherent()
184 struct address_space *mapping; update_mmu_cache() local
198 mapping = page_mapping(page); update_mmu_cache()
200 __flush_dcache_page(mapping, page); update_mmu_cache()
201 if (mapping) { update_mmu_cache()
203 make_coherent(mapping, vma, addr, ptep, pfn); update_mmu_cache()
H A Dioremap.c19 * two 2GB chunks and mapping only one at a time into processor memory.
195 * Remove and free any PTE-based mapping, and remap_area_sections()
196 * sync the current kernel mapping. remap_area_sections()
227 * Remove and free any PTE-based mapping, and remap_area_supersections()
228 * sync the current kernel mapping. remap_area_supersections()
280 * Page align the mapping size, taking account of any offset. __arm_ioremap_pfn_caller()
285 * Try to reuse one of the static mapping whenever possible. __arm_ioremap_pfn_caller()
358 * have to convert them into an offset in a page-aligned mapping, but the
408 /* If this is a static mapping, we must leave it alone */ __iounmap()
420 * If this is a section based mapping we need to handle it __iounmap()
H A Dcopypage-v6.c31 * attack the kernel's existing mapping of these pages.
47 * attack the kernel's existing mapping of this page.
57 * Discard data in the kernel mapping for the new page.
H A Didmap.c118 * In order to soft-boot, we need to switch to a 1:1 mapping for the
124 /* Switch to the identity mapping. */ setup_mm_for_reboot()
130 * We don't have a clean ASID for the identity mapping, which setup_mm_for_reboot()
H A Dcache-xsc3l2.c86 * using virtual addresses only, we must put a mapping l2_map_va()
107 vaddr = -1; /* to force the first mapping */ xsc3_l2_inv_range()
146 vaddr = -1; /* to force the first mapping */ xsc3_l2_clean_range()
189 vaddr = -1; /* to force the first mapping */ xsc3_l2_flush_range()
/linux-4.1.27/arch/nios2/mm/
H A Dcacheflush.c89 static void flush_aliases(struct address_space *mapping, struct page *page) flush_aliases() argument
97 flush_dcache_mmap_lock(mapping); flush_aliases()
98 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { flush_aliases()
110 flush_dcache_mmap_unlock(mapping); flush_aliases()
176 void __flush_dcache_page(struct address_space *mapping, struct page *page) __flush_dcache_page() argument
179 * Writeback any data associated with the kernel mapping of this __flush_dcache_page()
181 * coherent with the kernels mapping. __flush_dcache_page()
190 struct address_space *mapping; flush_dcache_page() local
199 mapping = page_mapping(page); flush_dcache_page()
202 if (mapping && !mapping_mapped(mapping)) { flush_dcache_page()
205 __flush_dcache_page(mapping, page); flush_dcache_page()
206 if (mapping) { flush_dcache_page()
208 flush_aliases(mapping, page); flush_dcache_page()
221 struct address_space *mapping; update_mmu_cache() local
234 mapping = page_mapping(page); update_mmu_cache()
236 __flush_dcache_page(mapping, page); update_mmu_cache()
238 if(mapping) update_mmu_cache()
240 flush_aliases(mapping, page); update_mmu_cache()
/linux-4.1.27/drivers/sh/clk/
H A Dcore.c339 struct clk_mapping *mapping = clk->mapping; clk_establish_mapping() local
344 if (!mapping) { clk_establish_mapping()
348 * dummy mapping for root clocks with no specified ranges clk_establish_mapping()
351 clk->mapping = &dummy_mapping; clk_establish_mapping()
356 * If we're on a child clock and it provides no mapping of its clk_establish_mapping()
357 * own, inherit the mapping from its root clock. clk_establish_mapping()
360 mapping = clkp->mapping; clk_establish_mapping()
361 BUG_ON(!mapping); clk_establish_mapping()
365 * Establish initial mapping. clk_establish_mapping()
367 if (!mapping->base && mapping->phys) { clk_establish_mapping()
368 kref_init(&mapping->ref); clk_establish_mapping()
370 mapping->base = ioremap_nocache(mapping->phys, mapping->len); clk_establish_mapping()
371 if (unlikely(!mapping->base)) clk_establish_mapping()
373 } else if (mapping->base) { clk_establish_mapping()
375 * Bump the refcount for an existing mapping clk_establish_mapping()
377 kref_get(&mapping->ref); clk_establish_mapping()
380 clk->mapping = mapping; clk_establish_mapping()
382 clk->mapped_reg = clk->mapping->base; clk_establish_mapping()
383 clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys; clk_establish_mapping()
389 struct clk_mapping *mapping; clk_destroy_mapping() local
391 mapping = container_of(kref, struct clk_mapping, ref); clk_destroy_mapping()
393 iounmap(mapping->base); clk_destroy_mapping()
398 struct clk_mapping *mapping = clk->mapping; clk_teardown_mapping() local
401 if (mapping == &dummy_mapping) clk_teardown_mapping()
404 kref_put(&mapping->ref, clk_destroy_mapping); clk_teardown_mapping()
405 clk->mapping = NULL; clk_teardown_mapping()
/linux-4.1.27/arch/m32r/include/asm/
H A Dcacheflush.h17 #define flush_dcache_mmap_lock(mapping) do { } while (0)
18 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
39 #define flush_dcache_mmap_lock(mapping) do { } while (0)
40 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
53 #define flush_dcache_mmap_lock(mapping) do { } while (0)
54 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
/linux-4.1.27/fs/gfs2/
H A Dmeta_io.h43 static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) gfs2_mapping2sbd() argument
45 struct inode *inode = mapping->host; gfs2_mapping2sbd()
46 if (mapping->a_ops == &gfs2_meta_aops) gfs2_mapping2sbd()
47 return (((struct gfs2_glock *)mapping) - 1)->gl_sbd; gfs2_mapping2sbd()
48 else if (mapping->a_ops == &gfs2_rgrp_aops) gfs2_mapping2sbd()
49 return container_of(mapping, struct gfs2_sbd, sd_aspace); gfs2_mapping2sbd()
H A Daops.c100 struct inode *inode = page->mapping->host; gfs2_writepage_common()
114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); gfs2_writepage_common()
156 struct inode *inode = page->mapping->host; __gfs2_jdata_writepage()
181 struct inode *inode = page->mapping->host; gfs2_jdata_writepage()
209 * @mapping: The mapping to write
214 static int gfs2_writepages(struct address_space *mapping, gfs2_writepages() argument
217 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); gfs2_writepages()
222 * @mapping: The mapping
231 static int gfs2_write_jdata_pagevec(struct address_space *mapping, gfs2_write_jdata_pagevec() argument
237 struct inode *inode = mapping->host; gfs2_write_jdata_pagevec()
252 * invalidated (changing page->mapping to NULL), or gfs2_write_jdata_pagevec()
254 * mapping. However, page->index will not change gfs2_write_jdata_pagevec()
270 if (unlikely(page->mapping != mapping)) { gfs2_write_jdata_pagevec()
334 * @mapping: The mapping to write
344 static int gfs2_write_cache_jdata(struct address_space *mapping, gfs2_write_cache_jdata() argument
361 writeback_index = mapping->writeback_index; /* prev offset */ gfs2_write_cache_jdata()
382 tag_pages_for_writeback(mapping, index, end); gfs2_write_cache_jdata()
385 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, gfs2_write_cache_jdata()
390 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index); gfs2_write_cache_jdata()
412 mapping->writeback_index = done_index; gfs2_write_cache_jdata()
420 * @mapping: The mapping to write
425 static int gfs2_jdata_writepages(struct address_space *mapping, gfs2_jdata_writepages() argument
428 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_jdata_writepages()
429 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); gfs2_jdata_writepages()
432 ret = gfs2_write_cache_jdata(mapping, wbc); gfs2_jdata_writepages()
435 ret = gfs2_write_cache_jdata(mapping, wbc); gfs2_jdata_writepages()
497 struct gfs2_inode *ip = GFS2_I(page->mapping->host); __gfs2_readpage()
498 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); __gfs2_readpage()
526 struct address_space *mapping = page->mapping; gfs2_readpage() local
527 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_readpage()
538 if (page->mapping == mapping && !PageUptodate(page)) gfs2_readpage()
562 struct address_space *mapping = ip->i_inode.i_mapping; gfs2_internal_read() local
574 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); gfs2_internal_read()
603 static int gfs2_readpages(struct file *file, struct address_space *mapping, gfs2_readpages() argument
606 struct inode *inode = mapping->host; gfs2_readpages()
617 ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map); gfs2_readpages()
629 * @mapping: The mapping in which to write
639 static int gfs2_write_begin(struct file *file, struct address_space *mapping, gfs2_write_begin() argument
643 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_write_begin()
644 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); gfs2_write_begin()
702 page = grab_cache_page_write_begin(mapping, index, flags); gfs2_write_begin()
853 * @mapping: The address space to write to
867 static int gfs2_write_end(struct file *file, struct address_space *mapping, gfs2_write_end() argument
871 struct inode *inode = page->mapping->host; gfs2_write_end()
897 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); gfs2_write_end()
939 * @mapping: Address space info
945 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock) gfs2_bmap() argument
947 struct gfs2_inode *ip = GFS2_I(mapping->host); gfs2_bmap()
957 dblock = generic_block_bmap(mapping, lblock, gfs2_block_map); gfs2_bmap()
989 struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); gfs2_invalidatepage()
1046 struct address_space *mapping = inode->i_mapping; gfs2_direct_IO() local
1078 * the first place, mapping->nr_pages will always be zero. gfs2_direct_IO()
1080 if (mapping->nrpages) { gfs2_direct_IO()
1090 rv = filemap_write_and_wait_range(mapping, lstart, end); gfs2_direct_IO()
1094 truncate_inode_pages_range(mapping, lstart, end); gfs2_direct_IO()
1118 struct address_space *mapping = page->mapping; gfs2_releasepage() local
1119 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); gfs2_releasepage()
H A Dglops.c35 fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", gfs2_ail_error()
37 bh->b_page->mapping, bh->b_page->flags); gfs2_ail_error()
38 fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", gfs2_ail_error()
143 struct address_space *mapping = &sdp->sd_aspace; rgrp_go_sync() local
152 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); rgrp_go_sync()
153 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); rgrp_go_sync()
154 mapping_set_error(mapping, error); rgrp_go_sync()
177 struct address_space *mapping = &sdp->sd_aspace; rgrp_go_inval() local
181 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); rgrp_go_inval()
216 struct address_space *mapping = ip->i_inode.i_mapping; inode_go_sync() local
217 filemap_fdatawrite(mapping); inode_go_sync()
218 error = filemap_fdatawait(mapping); inode_go_sync()
219 mapping_set_error(mapping, error); inode_go_sync()
225 * Writeback of the data mapping may cause the dirty flag to be set inode_go_sync()
250 struct address_space *mapping = gfs2_glock2aspace(gl); inode_go_inval() local
251 truncate_inode_pages(mapping, 0); inode_go_inval()
/linux-4.1.27/fs/9p/
H A Dvfs_addr.c54 struct inode *inode = page->mapping->host; v9fs_fid_readpage()
105 * @mapping: the address space
111 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, v9fs_vfs_readpages() argument
117 inode = mapping->host; v9fs_vfs_readpages()
120 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); v9fs_vfs_readpages()
124 ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); v9fs_vfs_readpages()
162 struct inode *inode = page->mapping->host; v9fs_vfs_writepage_locked()
203 mapping_set_error(page->mapping, retval); v9fs_vfs_writepage()
220 struct inode *inode = page->mapping->host; v9fs_launder_page()
267 static int v9fs_write_begin(struct file *filp, struct address_space *mapping, v9fs_write_begin() argument
275 struct inode *inode = mapping->host; v9fs_write_begin()
278 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); v9fs_write_begin()
282 page = grab_cache_page_write_begin(mapping, index, flags); v9fs_write_begin()
303 static int v9fs_write_end(struct file *filp, struct address_space *mapping, v9fs_write_end() argument
308 struct inode *inode = page->mapping->host; v9fs_write_end()
310 p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); v9fs_write_end()
H A Dcache.h49 struct address_space *mapping,
74 struct address_space *mapping, v9fs_readpages_from_fscache()
78 return __v9fs_readpages_from_fscache(inode, mapping, pages, v9fs_readpages_from_fscache()
130 struct address_space *mapping, v9fs_readpages_from_fscache()
73 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument
129 v9fs_readpages_from_fscache(struct inode *inode, struct address_space *mapping, struct list_head *pages, unsigned *nr_pages) v9fs_readpages_from_fscache() argument
/linux-4.1.27/arch/sparc/lib/
H A Diomap.c8 /* Create a virtual mapping cookie for an IO port range */ ioport_map()
/linux-4.1.27/arch/ia64/kernel/
H A Ddma-mapping.c1 #include <linux/dma-mapping.h>
H A Dpci-swiotlb.c7 #include <linux/dma-mapping.h>
/linux-4.1.27/security/selinux/include/
H A Davc_ss.h13 /* Class/perm mapping support */
H A Dnetnode.h4 * SELinux must keep a mapping of network nodes to labels/SIDs. This
5 * mapping is maintained as part of the normal policy but a fast cache is
H A Dnetport.h4 * SELinux must keep a mapping of network ports to labels/SIDs. This
5 * mapping is maintained as part of the normal policy but a fast cache is
/linux-4.1.27/include/drm/
H A Dati_pcigart.h21 struct drm_local_map mapping; member in struct:drm_ati_pcigart_info
/linux-4.1.27/include/linux/crush/
H A Dmapper.h5 * CRUSH functions for find rules and then mapping an input to an
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/
H A Dlustre_patchless_compat.h50 truncate_complete_page(struct address_space *mapping, struct page *page) truncate_complete_page() argument
52 if (page->mapping != mapping) truncate_complete_page()
56 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); truncate_complete_page()
59 account_page_cleaned(page, mapping); truncate_complete_page()
/linux-4.1.27/drivers/mtd/maps/
H A Dscx200_docflash.c32 MODULE_PARM_DESC(probe, "Probe for a BIOS mapping");
34 MODULE_PARM_DESC(size, "Size of the flash mapping");
36 MODULE_PARM_DESC(width, "Data width of the flash mapping (8/16)");
97 /* Try to use the present flash mapping if any */ init_scx200_docflash()
125 printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n"); init_scx200_docflash()
133 printk(KERN_ERR NAME ": invalid size for flash mapping\n"); init_scx200_docflash()
138 printk(KERN_ERR NAME ": invalid bus width for flash mapping\n"); init_scx200_docflash()
146 printk(KERN_ERR NAME ": unable to allocate memory for flash mapping\n"); init_scx200_docflash()
194 printk(KERN_WARNING NAME ": warning, flash mapping is smaller than flash size\n"); init_scx200_docflash()
/linux-4.1.27/fs/hfsplus/
H A Dbitmap.c23 struct address_space *mapping; hfsplus_block_allocate() local
35 mapping = sbi->alloc_file->i_mapping; hfsplus_block_allocate()
36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); hfsplus_block_allocate()
80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, hfsplus_block_allocate()
131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, hfsplus_block_allocate()
167 struct address_space *mapping; hfsplus_block_free() local
182 mapping = sbi->alloc_file->i_mapping; hfsplus_block_free()
184 page = read_mapping_page(mapping, pnr, NULL); hfsplus_block_free()
218 page = read_mapping_page(mapping, ++pnr, NULL); hfsplus_block_free()
H A Dinode.c34 static void hfsplus_write_failed(struct address_space *mapping, loff_t to) hfsplus_write_failed() argument
36 struct inode *inode = mapping->host; hfsplus_write_failed()
44 static int hfsplus_write_begin(struct file *file, struct address_space *mapping, hfsplus_write_begin() argument
51 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, hfsplus_write_begin()
53 &HFSPLUS_I(mapping->host)->phys_size); hfsplus_write_begin()
55 hfsplus_write_failed(mapping, pos + len); hfsplus_write_begin()
60 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) hfsplus_bmap() argument
62 return generic_block_bmap(mapping, block, hfsplus_get_block); hfsplus_bmap()
67 struct inode *inode = page->mapping->host; hfsplus_releasepage()
129 struct address_space *mapping = file->f_mapping; hfsplus_direct_IO() local
145 hfsplus_write_failed(mapping, end); hfsplus_direct_IO()
151 static int hfsplus_writepages(struct address_space *mapping, hfsplus_writepages() argument
154 return mpage_writepages(mapping, wbc, hfsplus_get_block); hfsplus_writepages()
/linux-4.1.27/fs/freevxfs/
H A Dvxfs_subr.c68 vxfs_get_page(struct address_space *mapping, u_long n) vxfs_get_page() argument
72 pp = read_mapping_page(mapping, n, NULL); vxfs_get_page()
165 * vxfs_bmap - perform logical to physical block mapping
166 * @mapping: logical to physical mapping to use
167 * @block: logical block (relative to @mapping).
171 * @mapping, @block pair.
180 vxfs_bmap(struct address_space *mapping, sector_t block) vxfs_bmap() argument
182 return generic_block_bmap(mapping, block, vxfs_getblk); vxfs_bmap()
/linux-4.1.27/arch/c6x/platforms/
H A Dmegamod-pic.c55 /* hw mux mapping */
165 * Parse the MUX mapping, if one exists.
178 int *mapping, int size) parse_priority_map()
194 mapping[i] = val; parse_priority_map()
204 int mapping[NR_MUX_OUTPUTS]; init_megamod_pic() local
232 for (i = 0; i < ARRAY_SIZE(mapping); i++) init_megamod_pic()
233 mapping[i] = IRQ_UNMAPPED; init_megamod_pic()
235 parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); init_megamod_pic()
272 /* record the mapping */ init_megamod_pic()
273 mapping[hwirq - 4] = i; init_megamod_pic()
291 if (mapping[i] != IRQ_UNMAPPED) { init_megamod_pic()
293 np->full_name, mapping[i], i + 4); init_megamod_pic()
294 set_megamod_mux(pic, mapping[i], i); init_megamod_pic()
177 parse_priority_map(struct megamod_pic *pic, int *mapping, int size) parse_priority_map() argument
/linux-4.1.27/fs/nilfs2/
H A Dpage.c62 struct address_space *mapping, nilfs_grab_buffer()
71 page = grab_cache_page(mapping, index); nilfs_grab_buffer()
178 m = page->mapping; nilfs_page_bug()
182 "mapping=%p ino=%lu\n", nilfs_page_bug()
352 page->mapping = NULL; nilfs_copy_back_pages()
355 page->mapping = dmap; nilfs_copy_back_pages()
374 * @mapping: address space with dirty pages for discarding
377 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) nilfs_clear_dirty_pages() argument
385 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, nilfs_clear_dirty_pages()
406 struct inode *inode = page->mapping->host; nilfs_clear_dirty_page()
460 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) nilfs_mapping_init() argument
462 mapping->host = inode; nilfs_mapping_init()
463 mapping->flags = 0; nilfs_mapping_init()
464 mapping_set_gfp_mask(mapping, GFP_NOFS); nilfs_mapping_init()
465 mapping->private_data = NULL; nilfs_mapping_init()
466 mapping->a_ops = &empty_aops; nilfs_mapping_init()
482 struct address_space *mapping = page->mapping; __nilfs_clear_page_dirty() local
484 if (mapping) { __nilfs_clear_page_dirty()
485 spin_lock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
487 radix_tree_tag_clear(&mapping->page_tree, __nilfs_clear_page_dirty()
490 spin_unlock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
493 spin_unlock_irq(&mapping->tree_lock); __nilfs_clear_page_dirty()
61 nilfs_grab_buffer(struct inode *inode, struct address_space *mapping, unsigned long blkoff, unsigned long b_state) nilfs_grab_buffer() argument
H A Ddir.c90 struct address_space *mapping, nilfs_commit_chunk()
93 struct inode *dir = mapping->host; nilfs_commit_chunk()
100 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); nilfs_commit_chunk()
112 struct inode *dir = page->mapping->host; nilfs_check_page()
187 struct address_space *mapping = dir->i_mapping; nilfs_get_page() local
188 struct page *page = read_mapping_page(mapping, n, NULL); nilfs_get_page()
420 struct address_space *mapping = page->mapping; nilfs_set_link() local
428 nilfs_commit_chunk(page, mapping, from, to); nilfs_set_link()
519 nilfs_commit_chunk(page, page->mapping, from, to); nilfs_add_link()
538 struct address_space *mapping = page->mapping; nilfs_delete_entry() local
539 struct inode *inode = mapping->host; nilfs_delete_entry()
566 nilfs_commit_chunk(page, mapping, from, to); nilfs_delete_entry()
578 struct address_space *mapping = inode->i_mapping; nilfs_make_empty() local
579 struct page *page = grab_cache_page(mapping, 0); nilfs_make_empty()
609 nilfs_commit_chunk(page, mapping, 0, chunk_size); nilfs_make_empty()
89 nilfs_commit_chunk(struct page *page, struct address_space *mapping, unsigned from, unsigned to) nilfs_commit_chunk() argument
/linux-4.1.27/drivers/mfd/
H A Dhtc-pasic3.c25 void __iomem *mapping; member in struct:pasic3_data
41 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); pasic3_write_register()
42 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); pasic3_write_register()
56 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); pasic3_read_register()
57 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); pasic3_read_register()
156 asic->mapping = ioremap(r->start, resource_size(r)); pasic3_probe()
157 if (!asic->mapping) { pasic3_probe()
194 iounmap(asic->mapping); pasic3_remove()
/linux-4.1.27/drivers/scsi/
H A Dscsi_lib_dma.c16 * scsi_dma_map - perform DMA mapping against command's sg lists
20 * is NULL, or -ENOMEM if the mapping failed.
/linux-4.1.27/drivers/staging/android/uapi/
H A Dashmem.h21 /* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
25 /* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
/linux-4.1.27/arch/sh/mm/
H A Duncached.c11 * uncached segments by making use of the 1:1 mapping relationship in
12 * 512MB lowmem, others via a special uncached mapping.
H A Dioremap.c34 * have to convert them into an offset in a page-aligned mapping, but the
112 * Nothing to do if there is no translatable mapping. __iounmap()
118 * There's no VMA if it's from an early fixed mapping. __iounmap()
/linux-4.1.27/arch/sparc/include/asm/
H A Dpci_32.h6 #include <linux/dma-mapping.h>
19 /* Dynamic DMA mapping stuff.
H A Dcacheflush_32.h43 #define flush_dcache_mmap_lock(mapping) do { } while (0)
44 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
H A Dpci_64.h6 #include <linux/dma-mapping.h>
25 /* PCI IOMMU mapping bypass support. */
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dmman.h11 #define MAP_LOCKED 0x100 /* lock the mapping */
24 #define MAP_HUGETLB 0x40000 /* create a huge page mapping */
/linux-4.1.27/arch/mips/sgi-ip27/
H A Dip27-hubio.c6 * Support functions for the HUB ASIC - mostly PIO mapping related.
20 * hub_pio_map - establish a HUB PIO mapping
22 * @hub: hub to perform PIO mapping on
23 * @widget: widget ID to perform PIO mapping for
25 * @size: size of the PIO mapping
34 /* use small-window mapping if possible */ hub_pio_map()
39 printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx" hub_pio_map()
71 printk(KERN_WARNING "unable to establish PIO mapping for at" hub_pio_map()
/linux-4.1.27/arch/cris/arch-v32/drivers/pci/
H A Ddma.c2 * Dynamic DMA mapping support.
6 * The rest of the dynamic DMA mapping interface is implemented
/linux-4.1.27/arch/cris/include/asm/
H A Dcacheflush.h17 #define flush_dcache_mmap_lock(mapping) do { } while (0)
18 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
/linux-4.1.27/security/apparmor/
H A Dsid.c30 /* TODO FIXME: add sid to profile mapping, and sid recycling */
40 * TODO FIXME: sid recycling - part of profile mapping table aa_alloc_sid()
/linux-4.1.27/include/asm-generic/
H A Dcacheflush.h18 #define flush_dcache_mmap_lock(mapping) do { } while (0)
19 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
H A Diomap.h20 * encoded in the hardware mapping set up by the mapping functions
60 /* Create a virtual mapping cookie for an IO port range */
70 /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
H A Dpci_iomap.h16 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
21 /* Create a virtual mapping cookie for a port on a given PCI device.
/linux-4.1.27/arch/unicore32/include/mach/
H A Dmap.h12 * Page table mapping constructs and function prototypes
H A DPKUnity.h13 /* Be sure that virtual mapping is defined right */
31 * 0x90000000 - 0x97FFFFFF 128MB PCI AHB-PCI MEM-mapping
32 * 0x98000000 - 0x9FFFFFFF 128MB PCI PCI-AHB MEM-mapping
/linux-4.1.27/drivers/acpi/
H A Dgsi.c50 * the mapping corresponding to default domain by passing NULL acpi_gsi_to_irq()
55 * *irq == 0 means no mapping, that should acpi_gsi_to_irq()
80 * hence always create mapping referring to the default domain acpi_register_gsi()
96 * acpi_unregister_gsi() - Free a GSI<->linux IRQ number mapping
H A Dacpi_lpat.c24 * @lpat_table: the temperature_raw mapping table structure
26 * above mapping table
58 * @lpat: the temperature_raw mapping table
60 * above mapping table
146 * @lpat_table: the temperature_raw mapping table structure
/linux-4.1.27/drivers/base/
H A DMakefile11 obj-$(CONFIG_HAS_DMA) += dma-mapping.o
/linux-4.1.27/fs/romfs/
H A Dmmap-nommu.c17 * try to determine where a shared mapping can be made
36 /* the mapping mustn't extend beyond the EOF */ romfs_get_unmapped_area()
54 /* the mapping mustn't extend beyond the EOF */ romfs_get_unmapped_area()
65 * permit a R/O mapping to be made directly through onto an MTD device if
/linux-4.1.27/arch/arm/include/asm/mach/
H A Dpci.h50 u64 mem_offset; /* bus->cpu memory mapping offset */
51 unsigned long io_offset; /* bus->cpu IO mapping offset */
58 /* IRQ mapping */
84 * Setup early fixed I/O mapping.
/linux-4.1.27/sound/firewire/
H A Dpackets-buffer.h4 #include <linux/dma-mapping.h>
/linux-4.1.27/drivers/hid/
H A Dhid-tivo.c46 /* Enter/Last (default mapping: KEY_LAST) */ tivo_input_mapping()
48 /* Info (default mapping: KEY_PROPS) */ tivo_input_mapping()
58 /* This means we found a matching mapping here, else, look in the tivo_input_mapping()
/linux-4.1.27/arch/powerpc/kernel/
H A Dfsl_booke_entry_mapping.S72 /* 3. Setup a temp mapping and jump to it */
101 /* Just modify the entry ID and EPN for the temp mapping */
105 xori r6,r4,1 /* Setup TMP mapping in the other Address space */
140 /* 5. Invalidate mapping we started in */
155 /* The mapping only needs to be cache-coherent on SMP */
164 /* 6. Setup KERNELBASE mapping in TLB1[0] */
176 /* 7. Jump to KERNELBASE mapping */
184 * 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp
185 * mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This
209 /* 7. Jump to our 1:1 mapping */
212 #error You need to specify the mapping or not use this at all.
225 /* 8. Clear out the temp mapping */
/linux-4.1.27/arch/arm/plat-samsung/include/plat/
H A Dmap-s3c.h39 * phsyical address space, as the initial mapping for the IO
40 * is done as a 1:1 mapping. This puts it (currently) at
41 * 0xFA800000, which is not in the way of any current mapping
/linux-4.1.27/fs/ecryptfs/
H A Dmmap.c148 page_virt, page->mapping->host); ecryptfs_copy_up_encrypted_with_header()
170 crypt_stat->extent_size, page->mapping->host); ecryptfs_copy_up_encrypted_with_header()
188 * @page: Page from eCryptfs inode mapping into which to stick the read data
197 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; ecryptfs_readpage()
203 page->mapping->host); ecryptfs_readpage()
220 page->mapping->host); ecryptfs_readpage()
251 struct inode *inode = page->mapping->host; fill_zeros_to_end_of_page()
267 * @mapping: The eCryptfs object
279 struct address_space *mapping, ecryptfs_write_begin()
288 page = grab_cache_page_write_begin(mapping, index, flags); ecryptfs_write_begin()
296 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; ecryptfs_write_begin()
300 page, index, 0, PAGE_CACHE_SIZE, mapping->host); ecryptfs_write_begin()
327 mapping->host); ecryptfs_write_begin()
339 >= i_size_read(page->mapping->host)) { ecryptfs_write_begin()
359 if (prev_page_end_size > i_size_read(page->mapping->host)) { ecryptfs_write_begin()
373 if ((i_size_read(mapping->host) == prev_page_end_size) ecryptfs_write_begin()
471 * @mapping: The eCryptfs object
479 struct address_space *mapping, ecryptfs_write_end()
486 struct inode *ecryptfs_inode = mapping->host; ecryptfs_write_end()
541 static sector_t ecryptfs_bmap(struct address_space *mapping, sector_t block) ecryptfs_bmap() argument
547 inode = (struct inode *)mapping->host; ecryptfs_bmap()
278 ecryptfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) ecryptfs_write_begin() argument
478 ecryptfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) ecryptfs_write_end() argument
/linux-4.1.27/arch/alpha/include/asm/
H A Dtlbflush.h80 /* Flush current user mapping. */
87 /* Flush someone else's user mapping. */
98 /* Flush everything (kernel mapping may also have changed
105 /* Flush a specified user mapping. */
127 /* Flush a specified range of user mapping. On the Alpha we flush
/linux-4.1.27/fs/omfs/
H A Dfile.c292 static int omfs_readpages(struct file *file, struct address_space *mapping, omfs_readpages() argument
295 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); omfs_readpages()
304 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) omfs_writepages() argument
306 return mpage_writepages(mapping, wbc, omfs_get_block); omfs_writepages()
309 static void omfs_write_failed(struct address_space *mapping, loff_t to) omfs_write_failed() argument
311 struct inode *inode = mapping->host; omfs_write_failed()
319 static int omfs_write_begin(struct file *file, struct address_space *mapping, omfs_write_begin() argument
325 ret = block_write_begin(mapping, pos, len, flags, pagep, omfs_write_begin()
328 omfs_write_failed(mapping, pos + len); omfs_write_begin()
333 static sector_t omfs_bmap(struct address_space *mapping, sector_t block) omfs_bmap() argument
335 return generic_block_bmap(mapping, block, omfs_get_block); omfs_bmap()
/linux-4.1.27/fs/afs/
H A Dfile.c27 static int afs_readpages(struct file *filp, struct address_space *mapping,
127 struct inode *inode = page->mapping->host; afs_page_filler()
225 struct inode *inode = page->mapping->host; afs_readpage()
240 static int afs_readpages(struct file *file, struct address_space *mapping, afs_readpages() argument
248 key_serial(key), mapping->host->i_ino, nr_pages); afs_readpages()
252 vnode = AFS_FS_I(mapping->host); afs_readpages()
261 mapping, afs_readpages()
266 mapping_gfp_mask(mapping)); afs_readpages()
291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); afs_readpages()
325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); afs_invalidatepage()
352 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); afs_releasepage()
H A Dwrite.c118 int afs_write_begin(struct file *file, struct address_space *mapping, afs_write_begin() argument
146 page = grab_cache_page_write_begin(mapping, index, flags); afs_write_begin()
244 int afs_write_end(struct file *file, struct address_space *mapping, afs_write_end() argument
463 static int afs_writepages_region(struct address_space *mapping, afs_writepages_region() argument
474 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, afs_writepages_region()
488 /* at this point we hold neither mapping->tree_lock nor lock on afs_writepages_region()
490 * (changing page->mapping to NULL), or even swizzled back from afs_writepages_region()
491 * swapper_space to tmpfs file mapping afs_writepages_region()
495 if (page->mapping != mapping) { afs_writepages_region()
537 int afs_writepages(struct address_space *mapping, afs_writepages() argument
546 start = mapping->writeback_index; afs_writepages()
548 ret = afs_writepages_region(mapping, wbc, start, end, &next); afs_writepages()
550 ret = afs_writepages_region(mapping, wbc, 0, start, afs_writepages()
552 mapping->writeback_index = next; afs_writepages()
555 ret = afs_writepages_region(mapping, wbc, 0, end, &next); afs_writepages()
557 mapping->writeback_index = next; afs_writepages()
561 ret = afs_writepages_region(mapping, wbc, start, end, &next); afs_writepages()
592 pv.nr = find_get_pages_contig(call->mapping, first, count, afs_pages_written_back()
660 struct address_space *mapping = vnode->vfs_inode.i_mapping; afs_writeback_all() local
670 ret = mapping->a_ops->writepages(mapping, &wbc); afs_writeback_all()
671 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); afs_writeback_all()
/linux-4.1.27/fs/ntfs/
H A Daops.h48 * @mapping: address space for which to obtain the page
49 * @index: index into the page cache for @mapping of the page to map
51 * Read a page from the page cache of the address space @mapping at position
55 * method defined in the address space operations of @mapping and the page is
56 * added to the page cache of @mapping in the process.
86 static inline struct page *ntfs_map_page(struct address_space *mapping, ntfs_map_page() argument
89 struct page *page = read_mapping_page(mapping, index, NULL); ntfs_map_page()
H A Drunlist.h32 * runlist_element - in memory vcn to lcn mapping array element
42 typedef struct { /* In memory vcn to lcn mapping structure element. */
49 * runlist - in memory vcn to lcn mapping array including a read/write lock
/linux-4.1.27/include/linux/pinctrl/
H A Dmachine.h28 * struct pinctrl_map_mux - mapping table content for MAP_TYPE_MUX_GROUP
40 * struct pinctrl_map_configs - mapping table content for MAP_TYPE_CONFIGS_*
56 * @dev_name: the name of the device using this specific mapping, the name
62 * @type: the type of mapping table entry
63 * @ctrl_dev_name: the name of the device controlling this specific mapping,
66 * @data: Data specific to the mapping type
79 /* Convenience macros to create mapping table entries */
/linux-4.1.27/fs/bfs/
H A Dfile.c162 static void bfs_write_failed(struct address_space *mapping, loff_t to) bfs_write_failed() argument
164 struct inode *inode = mapping->host; bfs_write_failed()
170 static int bfs_write_begin(struct file *file, struct address_space *mapping, bfs_write_begin() argument
176 ret = block_write_begin(mapping, pos, len, flags, pagep, bfs_write_begin()
179 bfs_write_failed(mapping, pos + len); bfs_write_begin()
184 static sector_t bfs_bmap(struct address_space *mapping, sector_t block) bfs_bmap() argument
186 return generic_block_bmap(mapping, block, bfs_get_block); bfs_bmap()
/linux-4.1.27/drivers/gpu/drm/rockchip/
H A Drockchip_drm_drv.c22 #include <linux/dma-mapping.h>
39 * Attach a (component) device to the shared drm dma mapping from master drm
41 * mapping.
46 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; rockchip_drm_dma_attach_device() local
55 return arm_iommu_attach_device(dev, mapping); rockchip_drm_dma_attach_device()
130 struct dma_iommu_mapping *mapping; rockchip_drm_load() local
152 /* TODO(djkurtz): fetch the mapping start/size from somewhere */ rockchip_drm_load()
153 mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000, rockchip_drm_load()
155 if (IS_ERR(mapping)) { rockchip_drm_load()
156 ret = PTR_ERR(mapping); rockchip_drm_load()
166 ret = arm_iommu_attach_device(dev, mapping); rockchip_drm_load()
226 arm_iommu_release_mapping(dev->archdata.mapping); rockchip_drm_load()
242 arm_iommu_release_mapping(dev->archdata.mapping); rockchip_drm_unload()
/linux-4.1.27/include/xen/interface/
H A Dmemory.h116 * mapping table. Architectures which do not have a m2p table do not implement
145 * mapping table. Architectures which do not have a m2p table, or which do not
171 /* Which domain to change the mapping for. */
177 /* Source mapping space. */
180 /* Index into source mapping space. */
183 /* GPFN where the source mapping page should appear. */
194 /* Which domain to change the mapping for. */
205 /* GPFN in domid where the source mapping page should appear. */
258 /* Which domain to change the mapping for. */
261 /* GPFN of the current mapping of the page. */
/linux-4.1.27/fs/jfs/
H A Dinode.c287 static int jfs_writepages(struct address_space *mapping, jfs_writepages() argument
290 return mpage_writepages(mapping, wbc, jfs_get_block); jfs_writepages()
298 static int jfs_readpages(struct file *file, struct address_space *mapping, jfs_readpages() argument
301 return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); jfs_readpages()
304 static void jfs_write_failed(struct address_space *mapping, loff_t to) jfs_write_failed() argument
306 struct inode *inode = mapping->host; jfs_write_failed()
314 static int jfs_write_begin(struct file *file, struct address_space *mapping, jfs_write_begin() argument
320 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, jfs_write_begin()
323 jfs_write_failed(mapping, pos + len); jfs_write_begin()
328 static sector_t jfs_bmap(struct address_space *mapping, sector_t block) jfs_bmap() argument
330 return generic_block_bmap(mapping, block, jfs_get_block); jfs_bmap()
337 struct address_space *mapping = file->f_mapping; jfs_direct_IO() local
353 jfs_write_failed(mapping, end); jfs_direct_IO()
/linux-4.1.27/fs/ocfs2/
H A Dmmap.c66 struct address_space *mapping = inode->i_mapping; __ocfs2_page_mkwrite() local
78 * mapping. __ocfs2_page_mkwrite()
84 * then re-extended the file. We'll re-check the page mapping after __ocfs2_page_mkwrite()
89 if ((page->mapping != inode->i_mapping) || __ocfs2_page_mkwrite()
107 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, __ocfs2_page_mkwrite()
123 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, __ocfs2_page_mkwrite()
/linux-4.1.27/arch/m68k/sun3x/
H A Ddvma.c64 /* code to print out a dvma mapping for debugging purposes */ dvma_print()
80 /* create a virtual mapping for a page assigned within the IOMMU
95 printk("dvma: mapping kern %08lx to virt %08lx\n", dvma_map_cpu()
130 printk("mapping %08lx phys to %08lx\n", dvma_map_cpu()
165 // printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT); dvma_map_iommu()
194 printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT); dvma_unmap_iommu()
/linux-4.1.27/net/netlabel/
H A Dnetlabel_cipso_v4.h39 * Sent by an application to add a new DOI mapping table.
56 * Sent by an application to remove a specific DOI mapping table from the
71 * The valid response message format depends on the type of the DOI mapping,
118 * the mapping table type (defined in the cipso_ipv4.h header as
137 * a MLS sensitivity level mapping, must contain only one attribute of
152 * a MLS category mapping, must contain only one attribute of each of
H A Dnetlabel_mgmt.h40 * Sent by an application to add a domain mapping to the NetLabel system.
64 * Sent by an application to remove a domain mapping from the NetLabel
91 * If the mapping is using the NETLBL_NLTYPE_CIPSOV4 type then the following
96 * If the mapping is using the NETLBL_NLTYPE_UNLABELED type no other
100 * Sent by an application to set the default domain mapping for the NetLabel
114 * Sent by an application to remove the default domain mapping from the
133 * If the mapping is using the NETLBL_NLTYPE_CIPSOV4 type then the following
138 * If the mapping is using the NETLBL_NLTYPE_UNLABELED type no other
/linux-4.1.27/net/rds/
H A Diw_rdma.c52 struct rds_iw_mapping mapping; member in struct:rds_iw_mr
390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); rds_iw_reuse_fmr()
391 list_del_init(&ibmr->mapping.m_list); rds_iw_reuse_fmr()
439 spin_lock_init(&ibmr->mapping.m_lock); rds_iw_alloc_mr()
440 INIT_LIST_HEAD(&ibmr->mapping.m_list); rds_iw_alloc_mr()
441 ibmr->mapping.m_mr = ibmr; rds_iw_alloc_mr()
466 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, rds_iw_sync_mr()
467 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); rds_iw_sync_mr()
470 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, rds_iw_sync_mr()
471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); rds_iw_sync_mr()
504 * actually members of an ibmr (ibmr->mapping). They either rds_iw_flush_mr_pool()
520 list_for_each_entry_safe(ibmr, next, &kill_list, mapping.m_list) { rds_iw_flush_mr_pool()
522 list_del(&ibmr->mapping.m_list); rds_iw_flush_mr_pool()
556 rdsdebug("RDS/IW: free_mr nents %u\n", ibmr->mapping.m_sg.len); rds_iw_free_mr()
658 * The application can request that a mapping is invalidated in FREE_MR.
678 /* FIXME - this is overkill, but mapping->m_sg.dma_len/mapping->m_sg.dma_npages rds_iw_init_fastreg()
695 static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping) rds_iw_rdma_build_fastreg() argument
697 struct rds_iw_mr *ibmr = mapping->m_mr; rds_iw_rdma_build_fastreg()
708 mapping->m_rkey = ibmr->mr->rkey; rds_iw_rdma_build_fastreg()
713 f_wr.wr.fast_reg.length = mapping->m_sg.bytes; rds_iw_rdma_build_fastreg()
714 f_wr.wr.fast_reg.rkey = mapping->m_rkey; rds_iw_rdma_build_fastreg()
716 f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len; rds_iw_rdma_build_fastreg()
764 struct rds_iw_mapping *mapping = &ibmr->mapping; rds_iw_map_fastreg() local
768 rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len); rds_iw_map_fastreg()
770 dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg); rds_iw_map_fastreg()
777 if (mapping->m_sg.dma_len > pool->max_message_size) { rds_iw_map_fastreg()
782 for (i = 0; i < mapping->m_sg.dma_npages; ++i) rds_iw_map_fastreg()
785 ret = rds_iw_rdma_build_fastreg(mapping); rds_iw_map_fastreg()
806 if (!ibmr->mapping.m_sg.dma_len) rds_iw_free_fastreg()
816 list_add_tail(&ibmr->mapping.m_list, &pool->dirty_list); rds_iw_free_fastreg()
817 atomic_add(ibmr->mapping.m_sg.len, &pool->free_pinned); rds_iw_free_fastreg()
828 struct rds_iw_mapping *mapping, *next; rds_iw_unmap_fastreg_list() local
852 list_for_each_entry_safe(mapping, next, unmap_list, m_list) { list_for_each_entry_safe()
853 *unpinned += mapping->m_sg.len; list_for_each_entry_safe()
854 list_move(&mapping->m_list, &laundered); list_for_each_entry_safe()
/linux-4.1.27/arch/arm/mach-imx/
H A Dmm-imx21.c35 * this fixed mapping covers:
44 * this fixed mapping covers:
50 * this fixed mapping covers:
H A Dmm-imx27.c35 * this fixed mapping covers:
44 * this fixed mapping covers:
50 * this fixed mapping covers:
/linux-4.1.27/drivers/iommu/
H A Dshmobile-iommu.c10 #include <linux/dma-mapping.h>
346 struct dma_iommu_mapping *mapping; shmobile_iommu_add_device() local
350 mapping = archdata->iommu_mapping; shmobile_iommu_add_device()
351 if (!mapping) { shmobile_iommu_add_device()
352 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, shmobile_iommu_add_device()
354 if (IS_ERR(mapping)) shmobile_iommu_add_device()
355 return PTR_ERR(mapping); shmobile_iommu_add_device()
356 archdata->iommu_mapping = mapping; shmobile_iommu_add_device()
359 if (arm_iommu_attach_device(dev, mapping)) shmobile_iommu_add_device()
H A Dipmmu-vmsa.c12 #include <linux/dma-mapping.h>
36 struct dma_iommu_mapping *mapping; member in struct:ipmmu_vmsa_device
452 if (!mmu->mapping) ipmmu_irq()
455 io_domain = mmu->mapping->domain; ipmmu_irq()
686 * Create the ARM mapping, used by the ARM DMA mapping core to allocate ipmmu_add_device()
690 * - Create one mapping per context (TLB). ipmmu_add_device()
691 * - Make the mapping size configurable ? We currently use a 2GB mapping ipmmu_add_device()
694 if (!mmu->mapping) { ipmmu_add_device()
695 struct dma_iommu_mapping *mapping; ipmmu_add_device() local
697 mapping = arm_iommu_create_mapping(&platform_bus_type, ipmmu_add_device()
699 if (IS_ERR(mapping)) { ipmmu_add_device()
700 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); ipmmu_add_device()
701 ret = PTR_ERR(mapping); ipmmu_add_device()
705 mmu->mapping = mapping; ipmmu_add_device()
708 /* Attach the ARM VA mapping to the device. */ ipmmu_add_device()
709 ret = arm_iommu_attach_device(dev, mmu->mapping); ipmmu_add_device()
711 dev_err(dev, "Failed to attach device to VA mapping\n"); ipmmu_add_device()
718 arm_iommu_release_mapping(mmu->mapping); ipmmu_add_device()
828 * We can't create the ARM mapping here as it requires the bus to have ipmmu_probe()
850 arm_iommu_release_mapping(mmu->mapping); ipmmu_remove()
/linux-4.1.27/drivers/crypto/caam/
H A Dcompat.h17 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/metag/include/asm/
H A Duser_gateway.h20 /* Kernel mapping of the gateway page. */
/linux-4.1.27/fs/quota/
H A Dkqid.c63 * There is always a mapping into the initial user_namespace.
65 * If @kqid has no mapping in @targ (qid_t)-1 is returned.
90 * There is always a mapping into the initial user_namespace.
97 * If @kqid has no mapping in @targ the kqid.type specific
/linux-4.1.27/fs/squashfs/
H A Dfile_cache.c25 struct inode *i = page->mapping->host; squashfs_readpage_block()
/linux-4.1.27/fs/jffs2/
H A Dfile.c24 static int jffs2_write_end(struct file *filp, struct address_space *mapping,
27 static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
124 struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); jffs2_readpage()
128 ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); jffs2_readpage()
133 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, jffs2_write_begin() argument
138 struct inode *inode = mapping->host; jffs2_write_begin()
144 pg = grab_cache_page_write_begin(mapping, index, flags); jffs2_write_begin()
237 static int jffs2_write_end(struct file *filp, struct address_space *mapping, jffs2_write_end() argument
244 struct inode *inode = mapping->host; jffs2_write_end()
/linux-4.1.27/include/media/
H A Dvideobuf-dma-contig.h16 #include <linux/dma-mapping.h>
H A Dvideobuf2-dma-contig.h17 #include <linux/dma-mapping.h>
H A Di2c-addr.h6 * Based on a previous mapping by
/linux-4.1.27/include/uapi/asm-generic/
H A Dmman.h14 #define MAP_HUGETLB 0x40000 /* create a huge page mapping */
/linux-4.1.27/include/linux/platform_data/
H A Dmax3421-hcd.h12 * This structure defines the mapping of certain auxiliary functions to the
H A Dpinctrl-adi2.h28 * @pint_map: GIOP bank mapping code in PINT device
/linux-4.1.27/include/uapi/linux/
H A Djoystick.h67 #define JSIOCSAXMAP _IOW('j', 0x31, __u8[ABS_CNT]) /* set axis mapping */
68 #define JSIOCGAXMAP _IOR('j', 0x32, __u8[ABS_CNT]) /* get axis mapping */
69 #define JSIOCSBTNMAP _IOW('j', 0x33, __u16[KEY_MAX - BTN_MISC + 1]) /* set button mapping */
70 #define JSIOCGBTNMAP _IOR('j', 0x34, __u16[KEY_MAX - BTN_MISC + 1]) /* get button mapping */
/linux-4.1.27/arch/sh/drivers/pci/
H A Dfixups-sh03.c17 printk(KERN_ERR "PCI: Bad IRQ mapping request " pcibios_map_platform_irq()
/linux-4.1.27/arch/arm64/include/asm/
H A Ddmi.h22 * request a virtual mapping for configuration tables such as SMBIOS.
H A Dpci.h7 #include <linux/dma-mapping.h>
H A Defi.h58 * service to communicate the new mapping to the firmware (Note that the new
59 * mapping is not live at this time)
H A Dcacheflush.h114 * the dcache entries associated with the kernel mapping.
129 #define flush_dcache_mmap_lock(mapping) \
130 spin_lock_irq(&(mapping)->tree_lock)
131 #define flush_dcache_mmap_unlock(mapping) \
132 spin_unlock_irq(&(mapping)->tree_lock)
H A Dsmp_plat.h38 * Logical CPU mapping.
H A Dvdso.h23 * Since we randomise the VDSO mapping, there's little point in trying
/linux-4.1.27/arch/arm/plat-iop/
H A Dsetup.c19 * Standard IO mapping for all IOP3xx based systems. Note that
/linux-4.1.27/arch/arc/include/asm/
H A Dcacheflush.h49 #define flush_dcache_mmap_lock(mapping) do { } while (0)
50 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
74 * To make sure that userspace mapping is flushed to memory before
75 * get_user_pages() uses a kernel mapping to access the page
/linux-4.1.27/fs/f2fs/
H A Ddata.c61 set_bit(AS_EIO, &page->mapping->flags); bio_for_each_segment_all()
910 struct address_space *mapping = inode->i_mapping; find_data_page() local
928 page = find_get_page(mapping, index); find_data_page()
952 page = grab_cache_page(mapping, index); find_data_page()
983 struct address_space *mapping = inode->i_mapping; get_lock_data_page() local
993 page = grab_cache_page(mapping, index); get_lock_data_page()
1041 if (unlikely(page->mapping != mapping)) { get_lock_data_page()
1059 struct address_space *mapping = inode->i_mapping; get_new_data_page() local
1069 page = grab_cache_page(mapping, index); get_new_data_page()
1097 if (unlikely(page->mapping != mapping)) { get_new_data_page()
1344 struct inode *inode = page->mapping->host; f2fs_read_data_page()
1359 struct address_space *mapping, f2fs_read_data_pages()
1368 return mpage_readpages(mapping, pages, nr_pages, get_data_block); f2fs_read_data_pages()
1373 struct inode *inode = page->mapping->host; do_write_data_page()
1419 struct inode *inode = page->mapping->host; f2fs_write_data_page()
1505 struct address_space *mapping = data; __f2fs_writepage() local
1506 int ret = mapping->a_ops->writepage(page, wbc); __f2fs_writepage()
1507 mapping_set_error(mapping, ret); __f2fs_writepage()
1511 static int f2fs_write_data_pages(struct address_space *mapping, f2fs_write_data_pages() argument
1514 struct inode *inode = mapping->host; f2fs_write_data_pages()
1520 trace_f2fs_writepages(mapping->host, wbc, DATA); f2fs_write_data_pages()
1523 if (!mapping->a_ops->writepage) f2fs_write_data_pages()
1541 ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); f2fs_write_data_pages()
1557 static void f2fs_write_failed(struct address_space *mapping, loff_t to) f2fs_write_failed() argument
1559 struct inode *inode = mapping->host; f2fs_write_failed()
1567 static int f2fs_write_begin(struct file *file, struct address_space *mapping, f2fs_write_begin() argument
1571 struct inode *inode = mapping->host; f2fs_write_begin()
1593 page = grab_cache_page_write_begin(mapping, index, flags); f2fs_write_begin()
1662 if (unlikely(page->mapping != mapping)) { f2fs_write_begin()
1678 f2fs_write_failed(mapping, pos + len); f2fs_write_begin()
1683 struct address_space *mapping, f2fs_write_end()
1687 struct inode *inode = page->mapping->host; f2fs_write_end()
1724 struct address_space *mapping = file->f_mapping; f2fs_direct_IO() local
1725 struct inode *inode = mapping->host; f2fs_direct_IO()
1746 f2fs_write_failed(mapping, offset + count); f2fs_direct_IO()
1756 struct inode *inode = page->mapping->host; f2fs_invalidate_page()
1786 struct address_space *mapping = page->mapping; f2fs_set_data_page_dirty() local
1787 struct inode *inode = mapping->host; f2fs_set_data_page_dirty()
1808 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) f2fs_bmap() argument
1810 struct inode *inode = mapping->host; f2fs_bmap()
1818 return generic_block_bmap(mapping, block, get_data_block); f2fs_bmap()
1358 f2fs_read_data_pages(struct file *file, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) f2fs_read_data_pages() argument
1682 f2fs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) f2fs_write_end() argument
/linux-4.1.27/drivers/net/ethernet/dec/tulip/
H A Dinterrupt.c70 dma_addr_t mapping; tulip_refill_rx() local
77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, tulip_refill_rx()
79 if (dma_mapping_error(&tp->pdev->dev, mapping)) { tulip_refill_rx()
85 tp->rx_buffers[entry].mapping = mapping; tulip_refill_rx()
87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); tulip_refill_rx()
214 tp->rx_buffers[entry].mapping, tulip_poll()
226 tp->rx_buffers[entry].mapping, tulip_poll()
233 if (tp->rx_buffers[entry].mapping != tulip_poll()
238 (unsigned long long)tp->rx_buffers[entry].mapping, tulip_poll()
243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_poll()
247 tp->rx_buffers[entry].mapping = 0; tulip_poll()
440 tp->rx_buffers[entry].mapping, tulip_rx()
452 tp->rx_buffers[entry].mapping, tulip_rx()
459 if (tp->rx_buffers[entry].mapping != tulip_rx()
464 (long long)tp->rx_buffers[entry].mapping, tulip_rx()
469 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, tulip_rx()
473 tp->rx_buffers[entry].mapping = 0; tulip_rx()
599 if (tp->tx_buffers[entry].mapping) tulip_interrupt()
601 tp->tx_buffers[entry].mapping, tulip_interrupt()
632 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping, tulip_interrupt()
639 tp->tx_buffers[entry].mapping = 0; tulip_interrupt()
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
H A Dipoib_ib.c38 #include <linux/dma-mapping.h>
95 u64 mapping[IPOIB_UD_RX_SG]) ipoib_ud_dma_unmap_rx()
97 ib_dma_unmap_single(priv->ca, mapping[0], ipoib_ud_dma_unmap_rx()
109 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; ipoib_ib_post_receive()
110 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; ipoib_ib_post_receive()
116 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); ipoib_ib_post_receive()
129 u64 *mapping; ipoib_alloc_rx_skb() local
144 mapping = priv->rx_ring[id].mapping; ipoib_alloc_rx_skb()
145 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, ipoib_alloc_rx_skb()
147 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) ipoib_alloc_rx_skb()
181 u64 mapping[IPOIB_UD_RX_SG]; ipoib_ib_handle_rx_wc() local
200 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); ipoib_ib_handle_rx_wc()
213 memcpy(mapping, priv->rx_ring[wr_id].mapping, ipoib_ib_handle_rx_wc()
214 IPOIB_UD_RX_SG * sizeof *mapping); ipoib_ib_handle_rx_wc()
228 ipoib_ud_dma_unmap_rx(priv, mapping); ipoib_ib_handle_rx_wc()
270 u64 *mapping = tx_req->mapping; ipoib_dma_map_tx() local
275 mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), ipoib_dma_map_tx()
277 if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) ipoib_dma_map_tx()
286 mapping[i + off] = ib_dma_map_page(ca, ipoib_dma_map_tx()
290 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) ipoib_dma_map_tx()
299 ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); ipoib_dma_map_tx()
303 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); ipoib_dma_map_tx()
312 u64 *mapping = tx_req->mapping; ipoib_dma_unmap_tx() local
317 ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); ipoib_dma_unmap_tx()
325 ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), ipoib_dma_unmap_tx()
521 u64 *mapping = tx_req->mapping; post_send() local
524 priv->tx_sge[0].addr = mapping[0]; post_send()
531 priv->tx_sge[i + off].addr = mapping[i + off]; post_send()
884 priv->rx_ring[i].mapping); ipoib_ib_dev_stop()
94 ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) ipoib_ud_dma_unmap_rx() argument
/linux-4.1.27/fs/exofs/
H A Ddir.c65 struct address_space *mapping = page->mapping; exofs_commit_chunk() local
66 struct inode *dir = mapping->host; exofs_commit_chunk()
90 struct inode *dir = page->mapping->host; exofs_check_page()
165 struct address_space *mapping = dir->i_mapping; exofs_get_page() local
166 struct page *page = read_mapping_page(mapping, n, NULL); exofs_get_page()
414 err = exofs_write_begin(NULL, page->mapping, pos, len, exofs_set_link()
496 err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0, exofs_add_link()
527 struct address_space *mapping = page->mapping; exofs_delete_entry() local
528 struct inode *inode = mapping->host; exofs_delete_entry()
553 err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0, exofs_delete_entry()
577 struct address_space *mapping = inode->i_mapping; exofs_make_empty() local
578 struct page *page = grab_cache_page(mapping, 0); exofs_make_empty()
587 err = exofs_write_begin(NULL, page->mapping, 0, chunk_size, 0, exofs_make_empty()
/linux-4.1.27/fs/minix/
H A Ddir.c55 struct address_space *mapping = page->mapping; dir_commit_chunk() local
56 struct inode *dir = mapping->host; dir_commit_chunk()
58 block_write_end(NULL, mapping, pos, len, len, page, NULL); dir_commit_chunk()
73 struct address_space *mapping = dir->i_mapping; dir_get_page() local
74 struct page *page = read_mapping_page(mapping, n, NULL); dir_get_page()
295 struct inode *inode = page->mapping->host; minix_delete_entry()
419 struct inode *dir = page->mapping->host; minix_set_link()
462 struct address_space *mapping = page->mapping; minix_inode_by_name() local
463 struct inode *inode = mapping->host; minix_inode_by_name()
/linux-4.1.27/fs/
H A Ddax.c202 struct address_space *mapping = inode->i_mapping; dax_do_io() local
204 retval = filemap_write_and_wait_range(mapping, pos, end - 1); dax_do_io()
236 static int dax_load_hole(struct address_space *mapping, struct page *page, dax_load_hole() argument
240 struct inode *inode = mapping->host; dax_load_hole()
242 page = find_or_create_page(mapping, vmf->pgoff, dax_load_hole()
273 struct address_space *mapping = inode->i_mapping; dax_insert_mapping() local
281 i_mmap_lock_read(mapping); dax_insert_mapping()
310 i_mmap_unlock_read(mapping); dax_insert_mapping()
319 struct address_space *mapping = file->f_mapping; do_dax_fault() local
320 struct inode *inode = mapping->host; do_dax_fault()
339 page = find_get_page(mapping, vmf->pgoff); do_dax_fault()
345 if (unlikely(page->mapping != mapping)) { do_dax_fault()
378 return dax_load_hole(mapping, page, vmf); do_dax_fault()
392 i_mmap_lock_read(mapping); do_dax_fault()
397 i_mmap_unlock_read(mapping); do_dax_fault()
407 page = find_lock_page(mapping, vmf->pgoff); do_dax_fault()
410 unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, do_dax_fault()
418 * If we successfully insert the new mapping over an unwritten extent, do_dax_fault()
420 * error inserting the mapping, the filesystem needs to leave it as do_dax_fault()
423 * the private resources on the mapping buffer can be released. We do_dax_fault()
H A Dmpage.c100 struct inode *inode = page->mapping->host; map_buffer_to_page()
131 * This is the worker routine which does all the work of mapping the disk
136 * represent the validity of its disk mapping and to decide when to do the next
144 struct inode *inode = page->mapping->host; do_mpage_readpage()
314 * @mapping: the address_space
349 * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
356 mpage_readpages(struct address_space *mapping, struct list_head *pages, mpage_readpages() argument
372 if (!add_to_page_cache_lru(page, mapping, mpage_readpages()
413 * mapping. We only support pages which are fully mapped-and-dirty, with a
419 * BIO. Otherwise fall back to the mapping's writepage().
467 struct address_space *mapping = page->mapping; __mpage_writepage() local
468 struct inode *inode = page->mapping->host; __mpage_writepage()
642 ret = mapping->a_ops->writepage(page, wbc); __mpage_writepage()
648 * The caller has a ref on the inode, so *mapping is stable __mpage_writepage()
650 mapping_set_error(mapping, ret); __mpage_writepage()
658 * @mapping: address space structure to write
676 mpage_writepages(struct address_space *mapping, mpage_writepages() argument
685 ret = generic_writepages(mapping, wbc); mpage_writepages()
694 ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd); mpage_writepages()
H A Dbuffer.c189 * we get exclusion from try_to_free_buffers with the blockdev mapping's
192 * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
195 * private_lock is contended then so is mapping->tree_lock).
354 set_bit(AS_EIO, &page->mapping->flags); end_buffer_async_write()
440 * try_to_free_buffers() will be operating against the *blockdev* mapping
445 * mapping->private_lock does *not* protect mapping->private_list! In fact,
446 * mapping->private_list will always be protected by the backing blockdev's
560 * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
561 * @mapping: the mapping which wants those buffers written
563 * Starts I/O against the buffers at mapping->private_list, and waits upon
567 * @mapping is a file or directory which needs those buffers to be written for
570 int sync_mapping_buffers(struct address_space *mapping) sync_mapping_buffers() argument
572 struct address_space *buffer_mapping = mapping->private_data; sync_mapping_buffers()
574 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) sync_mapping_buffers()
578 &mapping->private_list); sync_mapping_buffers()
601 struct address_space *mapping = inode->i_mapping; mark_buffer_dirty_inode() local
602 struct address_space *buffer_mapping = bh->b_page->mapping; mark_buffer_dirty_inode()
605 if (!mapping->private_data) { mark_buffer_dirty_inode()
606 mapping->private_data = buffer_mapping; mark_buffer_dirty_inode()
608 BUG_ON(mapping->private_data != buffer_mapping); mark_buffer_dirty_inode()
613 &mapping->private_list); mark_buffer_dirty_inode()
614 bh->b_assoc_map = mapping; mark_buffer_dirty_inode()
628 struct address_space *mapping, int warn) __set_page_dirty()
632 spin_lock_irqsave(&mapping->tree_lock, flags); __set_page_dirty()
633 if (page->mapping) { /* Race with truncate? */ __set_page_dirty()
635 account_page_dirtied(page, mapping); __set_page_dirty()
636 radix_tree_tag_set(&mapping->page_tree, __set_page_dirty()
639 spin_unlock_irqrestore(&mapping->tree_lock, flags); __set_page_dirty()
640 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); __set_page_dirty()
671 struct address_space *mapping = page_mapping(page); __set_page_dirty_buffers() local
673 if (unlikely(!mapping)) __set_page_dirty_buffers()
676 spin_lock(&mapping->private_lock); __set_page_dirty_buffers()
687 spin_unlock(&mapping->private_lock); __set_page_dirty_buffers()
690 __set_page_dirty(page, mapping, 1); __set_page_dirty_buffers()
718 struct address_space *mapping; fsync_buffers_list() local
728 mapping = bh->b_assoc_map; fsync_buffers_list()
735 bh->b_assoc_map = mapping; fsync_buffers_list()
749 * Kick off IO for the previous mapping. Note fsync_buffers_list()
750 * that we will not run the very last mapping, fsync_buffers_list()
767 mapping = bh->b_assoc_map; fsync_buffers_list()
774 &mapping->private_list); fsync_buffers_list()
775 bh->b_assoc_map = mapping; fsync_buffers_list()
798 * NOTE: we take the inode's blockdev's mapping's private_lock. Which
805 struct address_space *mapping = &inode->i_data; invalidate_inode_buffers() local
806 struct list_head *list = &mapping->private_list; invalidate_inode_buffers()
807 struct address_space *buffer_mapping = mapping->private_data; invalidate_inode_buffers()
828 struct address_space *mapping = &inode->i_data; remove_inode_buffers() local
829 struct list_head *list = &mapping->private_list; remove_inode_buffers()
830 struct address_space *buffer_mapping = mapping->private_data; remove_inode_buffers()
1138 * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
1139 * mapping->tree_lock and mapping->host->i_lock.
1162 struct address_space *mapping = page_mapping(page); mark_buffer_dirty() local
1163 if (mapping) mark_buffer_dirty()
1164 __set_page_dirty(page, mapping, 0); mark_buffer_dirty()
1195 struct address_space *buffer_mapping = bh->b_page->mapping; __bforget()
1578 spin_lock(&page->mapping->private_lock); create_empty_buffers()
1590 spin_unlock(&page->mapping->private_lock); create_empty_buffers()
1712 * handle any aliases from the underlying blockdev's mapping. __block_write_full_page()
1825 mapping_set_error(page->mapping, err); __block_write_full_page()
1887 struct inode *inode = page->mapping->host; __block_write_begin()
2008 int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, block_write_begin() argument
2015 page = grab_cache_page_write_begin(mapping, index, flags); block_write_begin()
2031 int block_write_end(struct file *file, struct address_space *mapping, block_write_end() argument
2035 struct inode *inode = mapping->host; block_write_end()
2067 int generic_write_end(struct file *file, struct address_space *mapping, generic_write_end() argument
2071 struct inode *inode = mapping->host; generic_write_end()
2075 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); generic_write_end()
2161 struct inode *inode = page->mapping->host; block_read_full_page()
2251 struct address_space *mapping = inode->i_mapping; generic_cont_expand_simple() local
2260 err = pagecache_write_begin(NULL, mapping, size, 0, generic_cont_expand_simple()
2266 err = pagecache_write_end(NULL, mapping, size, 0, 0, page, fsdata); generic_cont_expand_simple()
2274 static int cont_expand_zero(struct file *file, struct address_space *mapping, cont_expand_zero() argument
2277 struct inode *inode = mapping->host; cont_expand_zero()
2297 err = pagecache_write_begin(file, mapping, curpos, len, cont_expand_zero()
2303 err = pagecache_write_end(file, mapping, curpos, len, len, cont_expand_zero()
2310 balance_dirty_pages_ratelimited(mapping); cont_expand_zero()
2331 err = pagecache_write_begin(file, mapping, curpos, len, cont_expand_zero()
2337 err = pagecache_write_end(file, mapping, curpos, len, len, cont_expand_zero()
2352 int cont_write_begin(struct file *file, struct address_space *mapping, cont_write_begin() argument
2357 struct inode *inode = mapping->host; cont_write_begin()
2362 err = cont_expand_zero(file, mapping, pos, bytes); cont_write_begin()
2372 return block_write_begin(mapping, pos, len, flags, pagep, get_block); cont_write_begin()
2378 struct inode *inode = page->mapping->host; block_commit_write()
2389 * holes and correct delalloc and unwritten extent mapping on filesystems that
2413 if ((page->mapping != inode->i_mapping) || __block_page_mkwrite()
2482 spin_lock(&page->mapping->private_lock); attach_nobh_buffers()
2492 spin_unlock(&page->mapping->private_lock); attach_nobh_buffers()
2500 int nobh_write_begin(struct address_space *mapping, nobh_write_begin() argument
2505 struct inode *inode = mapping->host; nobh_write_begin()
2523 page = grab_cache_page_write_begin(mapping, index, flags); nobh_write_begin()
2641 int nobh_write_end(struct file *file, struct address_space *mapping, nobh_write_end() argument
2645 struct inode *inode = page->mapping->host; nobh_write_end()
2653 return generic_write_end(file, mapping, pos, len, nobh_write_end()
2684 struct inode * const inode = page->mapping->host; nobh_writepage()
2704 if (page->mapping->a_ops->invalidatepage) nobh_writepage()
2705 page->mapping->a_ops->invalidatepage(page, offset); nobh_writepage()
2728 int nobh_truncate_page(struct address_space *mapping, nobh_truncate_page() argument
2736 struct inode *inode = mapping->host; nobh_truncate_page()
2751 page = grab_cache_page(mapping, index); nobh_truncate_page()
2760 return block_truncate_page(mapping, from, get_block); nobh_truncate_page()
2781 err = mapping->a_ops->readpage(NULL, page); nobh_truncate_page()
2806 int block_truncate_page(struct address_space *mapping, block_truncate_page() argument
2814 struct inode *inode = mapping->host; block_truncate_page()
2829 page = grab_cache_page(mapping, index); block_truncate_page()
2888 struct inode * const inode = page->mapping->host; block_write_full_page()
2924 sector_t generic_block_bmap(struct address_space *mapping, sector_t block, generic_block_bmap() argument
2928 struct inode *inode = mapping->host; generic_block_bmap()
3018 * from here on down, it's all bio -- do the initial mapping, _submit_bh()
3164 * locking the page or by holding its mapping's private_lock.
3193 if (buffer_write_io_error(bh) && page->mapping) drop_buffers()
3194 set_bit(AS_EIO, &page->mapping->flags); drop_buffers()
3216 struct address_space * const mapping = page->mapping; try_to_free_buffers() local
3224 if (mapping == NULL) { /* can this still happen? */ try_to_free_buffers()
3229 spin_lock(&mapping->private_lock); try_to_free_buffers()
3247 account_page_cleaned(page, mapping); try_to_free_buffers()
3248 spin_unlock(&mapping->private_lock); try_to_free_buffers()
627 __set_page_dirty(struct page *page, struct address_space *mapping, int warn) __set_page_dirty() argument
/linux-4.1.27/fs/ceph/
H A Daddr.c71 struct address_space *mapping = page->mapping; ceph_set_page_dirty() local
77 if (unlikely(!mapping)) ceph_set_page_dirty()
82 mapping->host, page, page->index); ceph_set_page_dirty()
87 inode = mapping->host; ceph_set_page_dirty()
106 mapping->host, page, page->index, ceph_set_page_dirty()
122 WARN_ON(!page->mapping); ceph_set_page_dirty()
139 inode = page->mapping->host; ceph_invalidatepage()
174 struct inode *inode = page->mapping ? page->mapping->host : NULL; ceph_releasepage()
397 static int ceph_readpages(struct file *file, struct address_space *mapping, ceph_readpages() argument
408 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, ceph_readpages()
486 if (!page->mapping || !page->mapping->host) { writepage_nounlock()
487 dout("writepage %p - no mapping\n", page); writepage_nounlock()
490 inode = page->mapping->host; writepage_nounlock()
544 dout("writepage setting page/mapping error %d %p\n", err, page); writepage_nounlock()
565 struct inode *inode = page->mapping->host; ceph_writepage()
595 * If we get an error, set the mapping error bit, but not the individual
609 struct address_space *mapping = inode->i_mapping; writepages_finish() local
630 mapping_set_error(mapping, rc); writepages_finish()
680 static int ceph_writepages_start(struct address_space *mapping, ceph_writepages_start() argument
683 struct inode *inode = mapping->host; ceph_writepages_start()
728 start = mapping->writeback_index; /* Start from prev offset */ ceph_writepages_start()
795 pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index, ceph_writepages_start()
811 unlikely(page->mapping != mapping)) { ceph_writepages_start()
812 dout("!dirty or !mapping %p\n", page); ceph_writepages_start()
996 mapping->writeback_index = index; ceph_writepages_start()
1129 static int ceph_write_begin(struct file *file, struct address_space *mapping, ceph_write_begin() argument
1140 page = grab_cache_page_write_begin(mapping, index, 0); ceph_write_begin()
1163 static int ceph_write_end(struct file *file, struct address_space *mapping, ceph_write_end() argument
1279 struct address_space *mapping = inode->i_mapping; ceph_filemap_fault() local
1280 struct page *page = find_or_create_page(mapping, 0, ceph_filemap_fault()
1281 mapping_gfp_mask(mapping) & ceph_filemap_fault()
1369 (page->mapping != inode->i_mapping)) ceph_page_mkwrite()
1408 struct address_space *mapping = inode->i_mapping; ceph_fill_inline_data() local
1416 page = find_or_create_page(mapping, 0, ceph_fill_inline_data()
1417 mapping_gfp_mask(mapping) & ~__GFP_FS); ceph_fill_inline_data()
1592 struct address_space *mapping = file->f_mapping; ceph_mmap() local
1594 if (!mapping->a_ops->readpage) ceph_mmap()
/linux-4.1.27/arch/sh/kernel/
H A Ddma-nommu.c2 * DMA mapping support for platforms lacking IOMMUs.
10 #include <linux/dma-mapping.h>
/linux-4.1.27/fs/sysv/
H A Ddir.c43 struct address_space *mapping = page->mapping; dir_commit_chunk() local
44 struct inode *dir = mapping->host; dir_commit_chunk()
47 block_write_end(NULL, mapping, pos, len, len, page, NULL); dir_commit_chunk()
61 struct address_space *mapping = dir->i_mapping; dir_get_page() local
62 struct page *page = read_mapping_page(mapping, n, NULL); dir_get_page()
236 struct inode *inode = page->mapping->host; sysv_delete_entry()
334 struct inode *dir = page->mapping->host; sysv_set_link()
/linux-4.1.27/fs/logfs/
H A Ddev_mtd.c76 struct address_space *mapping = super->s_mapping_inode->i_mapping; logfs_mtd_erase_mapping() local
81 page = find_get_page(mapping, index); logfs_mtd_erase_mapping()
151 struct address_space *mapping = super->s_mapping_inode->i_mapping; logfs_mtd_find_first_sb() local
162 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); logfs_mtd_find_first_sb()
168 struct address_space *mapping = super->s_mapping_inode->i_mapping; logfs_mtd_find_last_sb() local
180 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); logfs_mtd_find_last_sb()
187 struct address_space *mapping = super->s_mapping_inode->i_mapping; __logfs_mtd_writeseg() local
192 page = find_lock_page(mapping, index + i); __logfs_mtd_writeseg()
H A Dfile.c12 static int logfs_write_begin(struct file *file, struct address_space *mapping, logfs_write_begin() argument
16 struct inode *inode = mapping->host; logfs_write_begin()
20 page = grab_cache_page_write_begin(mapping, index, flags); logfs_write_begin()
38 static int logfs_write_end(struct file *file, struct address_space *mapping, logfs_write_end() argument
42 struct inode *inode = mapping->host; logfs_write_end()
105 struct inode *inode = page->mapping->host; __logfs_writepage()
119 struct inode *inode = page->mapping->host; logfs_writepage()
168 struct super_block *sb = page->mapping->host->i_sb; logfs_invalidatepage()
/linux-4.1.27/arch/score/include/asm/
H A Dcacheflush.h23 #define flush_dcache_mmap_lock(mapping) do {} while (0)
24 #define flush_dcache_mmap_unlock(mapping) do {} while (0)
/linux-4.1.27/arch/sh/boards/mach-sdk7786/
H A Dfpga.c23 * Once the FPGA is located, the rest of the mapping data for the other
24 * components can be determined dynamically from its section mapping
H A Dsram.c35 * FPGA_SEL determines the area mapping fpga_sram_init()
51 * up a mapping prior to inserting it in to the pool. fpga_sram_init()
/linux-4.1.27/arch/nios2/include/asm/
H A Dcacheflush.h49 #define flush_dcache_mmap_lock(mapping) do { } while (0)
50 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
/linux-4.1.27/arch/c6x/include/asm/
H A Dcacheflush.h34 #define flush_dcache_mmap_lock(mapping) do {} while (0)
35 #define flush_dcache_mmap_unlock(mapping) do {} while (0)
/linux-4.1.27/arch/frv/include/asm/
H A Dmmu.h20 unsigned long itlb_ptd_mapping; /* [DAMR4] PTD mapping for itlb cached PGE */
22 unsigned long dtlb_ptd_mapping; /* [DAMR5] PTD mapping for dtlb cached PGE */
/linux-4.1.27/arch/frv/mb93090-mb00/
H A Dpci-dma.c1 /* pci-dma.c: Dynamic DMA mapping support for the FRV CPUs that have MMUs
13 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/arm/mach-s3c64xx/
H A Dmach-s3c64xx-dt.c25 * IO mapping for shared system controller IP.
27 * FIXME: Make remaining drivers use dynamic mapping.
/linux-4.1.27/arch/arm/mach-hisi/
H A Dhisilicon.c26 * the same mapping if it's defined as static IO mapping.
/linux-4.1.27/arch/parisc/kernel/
H A Dcache.c288 struct address_space *mapping = page_mapping(page); flush_dcache_page() local
294 if (mapping && !mapping_mapped(mapping)) { flush_dcache_page()
301 if (!mapping) flush_dcache_page()
311 flush_dcache_mmap_lock(mapping); flush_dcache_page()
312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { flush_dcache_page()
318 * mapping, so here we kill the mapping then flush the flush_dcache_page()
319 * page along a special flush only alias mapping. flush_dcache_page()
334 flush_dcache_mmap_unlock(mapping); flush_dcache_page()
427 /* Copy using kernel mapping. No coherency is needed (all in copy_user_page()
429 be flushed through a mapping equivalent to the user mapping copy_user_page()
430 before it can be accessed through the kernel mapping. */ copy_user_page()
/linux-4.1.27/fs/xfs/
H A Dxfs_aops.c650 if (!page->mapping) xfs_check_page_type()
680 * that the page has no mapping at all.
705 if (page->mapping != inode->i_mapping) xfs_convert_page()
859 trace_xfs_invalidatepage(page->mapping->host, page, offset, xfs_vm_invalidatepage()
866 * invalidate the page. If we don't, we leave a stale delalloc mapping on the
884 struct inode *inode = page->mapping->host; xfs_aops_discard_page()
914 "page discard unable to remove delalloc mapping."); xfs_aops_discard_page()
942 struct inode *inode = page->mapping->host; xfs_vm_writepage()
989 * | file mapping | <EOF> | xfs_vm_writepage()
1003 * | file mapping | <EOF> | xfs_vm_writepage()
1107 * If we didn't have a valid mapping then we need to xfs_vm_writepage()
1108 * put the new mapping into a separate ioend structure. xfs_vm_writepage()
1202 struct address_space *mapping, xfs_vm_writepages()
1205 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); xfs_vm_writepages()
1206 return generic_writepages(mapping, wbc); xfs_vm_writepages()
1223 trace_xfs_releasepage(page->mapping->host, page, 0, 0); xfs_vm_releasepage()
1238 * operations it needs to perform. If the mapping is for an overwrite wholly
1243 * If we get multiple mappings in a single IO, we might be mapping different
1307 * If this is O_DIRECT or the mpage code calling tell them how large the mapping
1310 * If the mapping spans EOF, then we have to break the mapping up as the mapping
1312 * correctly zeroed. We can't do this for mappings within EOF unless the mapping
1314 * existing data with zeros. Hence we have to split the mapping into a range up
1315 * to and including EOF, and a second mapping for beyond EOF.
1336 /* limit mapping to block that spans EOF */ xfs_map_trim_size()
1377 * a block mapping without an exclusive lock first. For buffered __xfs_get_blocks()
1447 /* trim mapping down to size requested */ __xfs_get_blocks()
1740 struct address_space *mapping, xfs_vm_write_begin()
1753 page = grab_cache_page_write_begin(mapping, index, flags); xfs_vm_write_begin()
1759 struct inode *inode = mapping->host; xfs_vm_write_begin()
1795 struct address_space *mapping, xfs_vm_write_end()
1806 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); xfs_vm_write_end()
1808 struct inode *inode = mapping->host; xfs_vm_write_end()
1825 struct address_space *mapping, xfs_vm_bmap()
1828 struct inode *inode = (struct inode *)mapping->host; xfs_vm_bmap()
1833 filemap_write_and_wait(mapping); xfs_vm_bmap()
1835 return generic_block_bmap(mapping, block, xfs_get_blocks); xfs_vm_bmap()
1849 struct address_space *mapping, xfs_vm_readpages()
1853 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); xfs_vm_readpages()
1872 struct address_space *mapping = page->mapping; xfs_vm_set_page_dirty() local
1873 struct inode *inode = mapping->host; xfs_vm_set_page_dirty()
1878 if (unlikely(!mapping)) xfs_vm_set_page_dirty()
1884 spin_lock(&mapping->private_lock); xfs_vm_set_page_dirty()
1897 spin_unlock(&mapping->private_lock); xfs_vm_set_page_dirty()
1903 spin_lock_irqsave(&mapping->tree_lock, flags); xfs_vm_set_page_dirty()
1904 if (page->mapping) { /* Race with truncate? */ xfs_vm_set_page_dirty()
1906 account_page_dirtied(page, mapping); xfs_vm_set_page_dirty()
1907 radix_tree_tag_set(&mapping->page_tree, xfs_vm_set_page_dirty()
1910 spin_unlock_irqrestore(&mapping->tree_lock, flags); xfs_vm_set_page_dirty()
1911 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); xfs_vm_set_page_dirty()
1201 xfs_vm_writepages( struct address_space *mapping, struct writeback_control *wbc) xfs_vm_writepages() argument
1738 xfs_vm_write_begin( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) xfs_vm_write_begin() argument
1793 xfs_vm_write_end( struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) xfs_vm_write_end() argument
1824 xfs_vm_bmap( struct address_space *mapping, sector_t block) xfs_vm_bmap() argument
1847 xfs_vm_readpages( struct file *unused, struct address_space *mapping, struct list_head *pages, unsigned nr_pages) xfs_vm_readpages() argument
/linux-4.1.27/lib/
H A Dscatterlist.c299 * If this is the first mapping, assign the sg table header. __sg_alloc_table()
300 * If this is not the first mapping, chain previous part. __sg_alloc_table()
445 * sg_miter_start - start mapping iteration over a sg list
446 * @miter: sg mapping iter to be started
451 * Starts mapping iterator @miter.
490 * sg_miter_skip - reposition mapping iterator
491 * @miter: sg mapping iter to be skipped
496 * If mapping iterator @miter has been proceeded by sg_miter_next(), this
504 * true if @miter contains the valid mapping. false if end of sg
528 * sg_miter_next - proceed mapping iterator to the next mapping
529 * @miter: sg mapping iter to proceed
532 * Proceeds @miter to the next mapping. @miter should have been started
534 * @miter->addr and @miter->length point to the current mapping.
541 * true if @miter contains the next mapping. false if end of sg
568 * sg_miter_stop - stop mapping iteration
569 * @miter: sg mapping iter to be stopped
572 * Stops mapping iterator @miter. @miter should have been started
/linux-4.1.27/arch/xtensa/mm/
H A Dcache.c135 struct address_space *mapping = page_mapping(page); flush_dcache_page() local
138 * If we have a mapping but the page is not mapped to user-space flush_dcache_page()
143 if (mapping && !mapping_mapped(mapping)) { flush_dcache_page()
159 * if we have a mapping. flush_dcache_page()
162 if (!alias && !mapping) flush_dcache_page()
173 if (mapping) flush_dcache_page()
/linux-4.1.27/fs/ufs/
H A Dutil.c234 * @mapping: the address_space to search
243 struct page *ufs_get_locked_page(struct address_space *mapping, ufs_get_locked_page() argument
248 page = find_lock_page(mapping, index); ufs_get_locked_page()
250 page = read_mapping_page(mapping, index, NULL); ufs_get_locked_page()
255 mapping->host->i_ino, index); ufs_get_locked_page()
261 if (unlikely(page->mapping == NULL)) { ufs_get_locked_page()
275 mapping->host->i_ino, index); ufs_get_locked_page()
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
H A Drw26.c93 inode = vmpage->mapping->host; ll_invalidatepage()
123 struct address_space *mapping; ll_releasepage() local
130 mapping = vmpage->mapping; ll_releasepage()
131 if (mapping == NULL) ll_releasepage()
134 obj = ll_i2info(mapping->host)->lli_clob; ll_releasepage()
168 struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host); ll_set_page_dirty()
335 struct address_space *mapping, ll_direct_IO_26_seg()
472 static int ll_write_begin(struct file *file, struct address_space *mapping, ll_write_begin() argument
481 page = grab_cache_page_write_begin(mapping, index, flags); ll_write_begin()
495 static int ll_write_end(struct file *file, struct address_space *mapping, ll_write_end() argument
510 static int ll_migratepage(struct address_space *mapping, ll_migratepage() argument
333 ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, int rw, struct inode *inode, struct address_space *mapping, size_t size, loff_t file_offset, struct page **pages, int page_count) ll_direct_IO_26_seg() argument
/linux-4.1.27/arch/arm64/kernel/
H A Dsmp_spin_table.c76 * The cpu-release-addr may or may not be inside the linear mapping. smp_spin_table_cpu_prepare()
77 * As ioremap_cache will either give us a new mapping or reuse the smp_spin_table_cpu_prepare()
78 * existing linear mapping, we can use it to cover both cases. In smp_spin_table_cpu_prepare()
/linux-4.1.27/arch/c6x/mm/
H A Ddma-coherent.c11 * DMA uncached mapping support.
22 #include <linux/dma-mapping.h>
100 * Free DMA coherent memory as defined by the above mapping.
/linux-4.1.27/arch/arm/mach-omap1/
H A Dio.c4 * OMAP1 I/O mapping code
28 * The machine specific code may provide the extra mapping besides the
29 * default mapping provided here.
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dmman.h14 #define MAP_TYPE 0x0f /* Mask for type of mapping (OSF/1 is _wrong_) */
27 #define MAP_LOCKED 0x08000 /* lock the mapping */
32 #define MAP_HUGETLB 0x100000 /* create a huge page mapping */
/linux-4.1.27/arch/arc/mm/
H A Dcache_arc700.c465 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
467 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
471 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
475 struct address_space *mapping; flush_dcache_page() local
483 mapping = page_mapping(page); flush_dcache_page()
484 if (!mapping) flush_dcache_page()
489 * Make a note that K-mapping is dirty flush_dcache_page()
491 if (!mapping_mapped(mapping)) { flush_dcache_page()
495 /* kernel reading from page with U-mapping */ flush_dcache_page()
655 /* TBD: do we really need to clear the kernel mapping */ flush_anon_page()
671 * If SRC page was already mapped in userspace AND it's U-mapping is copy_user_highpage()
672 * not congruent with K-mapping, sync former to physical page so that copy_user_highpage()
673 * K-mapping in memcpy below, sees the right data copy_user_highpage()
686 * Mark DST page K-mapping as dirty for a later finalization by copy_user_highpage()
696 * if SRC was already usermapped and non-congruent to kernel mapping copy_user_highpage()
697 * sync the kernel mapping back to physical page copy_user_highpage()
/linux-4.1.27/drivers/sh/intc/
H A Dvirq-debugfs.c2 * Support for virtual IRQ subgroups debugfs mapping.
/linux-4.1.27/drivers/staging/lustre/lustre/include/lustre/
H A Dll_fiemap.h62 * which to start mapping (in) */
63 __u64 fm_length; /* logical length of mapping which
110 #define FIEMAP_FLAG_DEVICE_ORDER 0x40000000 /* return device ordered mapping */
117 #define FIEMAP_EXTENT_NO_DIRECT 0x40000000 /* Data mapping undefined */
/linux-4.1.27/arch/x86/include/asm/xen/
H A Dpage-coherent.h6 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/x86/platform/intel-mid/device_libs/
H A Dplatform_wdt.c35 /* IOAPIC builds identity mapping between GSI and IRQ on MID */ tangier_probe()
/linux-4.1.27/arch/xtensa/boot/boot-elf/
H A Dboot.lds.S48 * to the temporary mapping used while setting up
/linux-4.1.27/arch/xtensa/include/asm/
H A Dinitialize_mmu.h7 * to the standard Linux mapping used in earlier MMU's.
84 /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */
92 * and jump to the new mapping.
152 /* Step 5: remove temporary mapping. */
/linux-4.1.27/arch/unicore32/kernel/
H A Dsys.c31 /* Provide the actual syscall number to call mapping. */
/linux-4.1.27/drivers/gpu/drm/nouveau/include/nvif/
H A Dos.h16 #include <linux/io-mapping.h>
/linux-4.1.27/arch/mips/bcm63xx/
H A Ddev-usb-usbd.c14 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/mips/include/asm/mach-bcm63xx/
H A Dboard_bcm963xx.h12 * flash mapping
/linux-4.1.27/include/linux/mtd/
H A Dphysmap.h3 * drivers/mtd/maps/physmap.c mapping driver.
/linux-4.1.27/include/linux/spi/
H A Dspi_bitbang.h24 /* txrx_bufs() may handle dma mapping for transfers that don't
/linux-4.1.27/arch/powerpc/platforms/cell/
H A Diommu.c55 * to the DMA mapping functions
177 * driver - check mapping directions later, but allow read & write by tce_build_cell()
226 /* spider bridge does PCI reads after freeing - insert a mapping tce_free_cell()
870 * Fixed IOMMU mapping support
872 * This code adds support for setting up a fixed IOMMU mapping on certain
874 * mapping and unmapping pages at runtime. 32-bit devices are unable to use
875 * the fixed mapping.
877 * The fixed mapping is established at boot, and maps all of physical memory
879 * we setup the fixed mapping immediately above the normal IOMMU window.
882 * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
888 * mapping above the normal IOMMU window as we would run out of address space.
890 * table, this region does not need to be part of the fixed mapping as no
891 * device should ever be DMA'ing to it. We then setup the fixed mapping
1010 pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); cell_iommu_setup_fixed_ptab()
1016 pr_info("IOMMU: Using weak ordering for fixed mapping\n"); cell_iommu_setup_fixed_ptab()
1018 pr_info("IOMMU: Using strong ordering for fixed mapping\n"); cell_iommu_setup_fixed_ptab()
1042 /* The fixed mapping is only supported on axon machines */ cell_iommu_fixed_mapping_init()
1047 pr_debug("iommu: fixed mapping disabled, no axons found\n"); cell_iommu_fixed_mapping_init()
1051 /* We must have dma-ranges properties for fixed mapping to work */ cell_iommu_fixed_mapping_init()
1056 pr_debug("iommu: no dma-ranges found, no fixed mapping\n"); cell_iommu_fixed_mapping_init()
1060 /* The default setup is to have the fixed mapping sit after the cell_iommu_fixed_mapping_init()
1078 * RAM with the fixed mapping, and also fit the dynamic cell_iommu_fixed_mapping_init()
1081 * need a fixed mapping for that area. cell_iommu_fixed_mapping_init()
1152 * we're on a triblade or a CAB so by default the fixed mapping setup_iommu_fixed()
1192 * to enable it, we setup a direct mapping. cell_iommu_init()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dlscsa_alloc.c44 /* Set LS pages reserved to allow for user-space mapping. */ spu_alloc_lscsa_std()
103 /* Now we need to create a vmalloc mapping of these for the kernel spu_alloc_lscsa()
105 * normal kernel vmalloc mapping, which in our case will be 4K spu_alloc_lscsa()
126 /* Set LS pages reserved to allow for user-space mapping. spu_alloc_lscsa()
/linux-4.1.27/arch/sh/include/uapi/asm/
H A Dcpu-features.h9 * mapping of the processor flags has a chance of being
/linux-4.1.27/arch/mips/lantiq/xway/
H A Dvmmc.c12 #include <linux/dma-mapping.h>
/linux-4.1.27/arch/mips/lib/
H A Diomap.c15 * encoded in the hardware mapping set up by the mapping functions
198 * Create a virtual mapping cookie for an IO port range
200 * This uses the same mapping are as the in/out family which has to be setup
/linux-4.1.27/arch/parisc/include/asm/
H A Dcacheflush.h77 #define flush_dcache_mmap_lock(mapping) \
78 spin_lock_irq(&(mapping)->tree_lock)
79 #define flush_dcache_mmap_unlock(mapping) \
80 spin_unlock_irq(&(mapping)->tree_lock)
/linux-4.1.27/arch/powerpc/include/uapi/asm/
H A Dmman.h29 #define MAP_HUGETLB 0x40000 /* create a huge page mapping */
/linux-4.1.27/arch/blackfin/kernel/
H A DMakefile9 sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
/linux-4.1.27/arch/arm/mach-pxa/
H A Dpxa2xx.c26 /* RESET_STATUS_* has a 1:1 mapping with RCSR */ pxa2xx_clear_reset_status()
/linux-4.1.27/arch/arm/include/debug/
H A Dexynos.S19 * mapping the head code makes. We keep the UART virtual address
H A Ds5pv210.S18 * mapping the head code makes. We keep the UART virtual address
/linux-4.1.27/arch/arm/mach-davinci/include/mach/
H A Dhardware.h23 * I/O mapping
/linux-4.1.27/arch/arm/mach-footbridge/
H A Dcommon.c136 * Common mapping for all systems. Note that the outbound write flush is
137 * commented out since there is a "No Fix" problem with it. Not mapping
150 * The mapping when the footbridge is in host mode. We don't map any of
182 * Set up the common mapping first; we need this to footbridge_map_io()

Completed in 3180 milliseconds

1234567891011>>