/linux-4.4.14/mm/ |
D | truncate.c | 26 static void clear_exceptional_entry(struct address_space *mapping, in clear_exceptional_entry() argument 33 if (shmem_mapping(mapping)) in clear_exceptional_entry() 36 spin_lock_irq(&mapping->tree_lock); in clear_exceptional_entry() 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry() 47 mapping->nrshadows--; in clear_exceptional_entry() 61 __radix_tree_delete_node(&mapping->page_tree, node); in clear_exceptional_entry() 63 spin_unlock_irq(&mapping->tree_lock); in clear_exceptional_entry() 86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage() 106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 108 if (page->mapping != mapping) in truncate_complete_page() [all …]
|
D | readahead.c | 28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 30 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 44 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument 50 page->mapping = mapping; in read_cache_pages_invalidate_page() 52 page->mapping = NULL; in read_cache_pages_invalidate_page() 61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument 69 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages() 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument 92 if (add_to_page_cache_lru(page, mapping, page->index, in read_cache_pages() 93 mapping_gfp_constraint(mapping, GFP_KERNEL))) { in read_cache_pages() [all …]
|
D | filemap.c | 112 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument 123 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 126 mapping->nrshadows++; in page_cache_tree_delete() 135 mapping->nrpages--; in page_cache_tree_delete() 139 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete() 149 radix_tree_tag_clear(&mapping->page_tree, index, tag); in page_cache_tree_delete() 158 if (__radix_tree_delete_node(&mapping->page_tree, node)) in page_cache_tree_delete() 170 node->private_data = mapping; in page_cache_tree_delete() 184 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local 195 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache() [all …]
|
D | page-writeback.c | 1519 static void balance_dirty_pages(struct address_space *mapping, in balance_dirty_pages() argument 1820 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument 1822 struct inode *inode = mapping->host; in balance_dirty_pages_ratelimited() 1869 balance_dirty_pages(mapping, wb, current->nr_dirtied); in balance_dirty_pages_ratelimited() 2103 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument 2110 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 2111 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, in tag_pages_for_writeback() 2114 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 2144 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument 2162 writeback_index = mapping->writeback_index; /* prev offset */ in write_cache_pages() [all …]
|
D | fadvise.c | 32 struct address_space *mapping; in SYSCALL_DEFINE4() local 49 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 50 if (!mapping || len < 0) { in SYSCALL_DEFINE4() 78 bdi = inode_to_bdi(mapping->host); in SYSCALL_DEFINE4() 112 force_page_cache_readahead(mapping, f.file, start_index, in SYSCALL_DEFINE4() 118 if (!inode_write_congested(mapping->host)) in SYSCALL_DEFINE4() 119 __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4() 131 unsigned long count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4() 142 invalidate_mapping_pages(mapping, start_index, in SYSCALL_DEFINE4()
|
D | cleancache.c | 189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page() 229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page() 244 void __cleancache_invalidate_page(struct address_space *mapping, in __cleancache_invalidate_page() argument 248 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_page() 256 if (cleancache_get_key(mapping->host, &key) >= 0) { in __cleancache_invalidate_page() 274 void __cleancache_invalidate_inode(struct address_space *mapping) in __cleancache_invalidate_inode() argument 276 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_inode() 282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) in __cleancache_invalidate_inode()
|
D | migrate.c | 312 int migrate_page_move_mapping(struct address_space *mapping, in migrate_page_move_mapping() argument 322 if (!mapping) { in migrate_page_move_mapping() 330 newpage->mapping = page->mapping; in migrate_page_move_mapping() 340 spin_lock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 342 pslot = radix_tree_lookup_slot(&mapping->page_tree, in migrate_page_move_mapping() 347 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping() 348 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 353 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 367 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 377 newpage->mapping = page->mapping; in migrate_page_move_mapping() [all …]
|
D | util.c | 330 unsigned long mapping; in __page_rmapping() local 332 mapping = (unsigned long)page->mapping; in __page_rmapping() 333 mapping &= ~PAGE_MAPPING_FLAGS; in __page_rmapping() 335 return (void *)mapping; in __page_rmapping() 347 unsigned long mapping; in page_anon_vma() local 350 mapping = (unsigned long)page->mapping; in page_anon_vma() 351 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) in page_anon_vma() 358 unsigned long mapping; in page_mapping() local 371 mapping = (unsigned long)page->mapping; in page_mapping() 372 if (mapping & PAGE_MAPPING_FLAGS) in page_mapping() [all …]
|
D | workingset.c | 213 void *workingset_eviction(struct address_space *mapping, struct page *page) in workingset_eviction() argument 309 struct address_space *mapping; in shadow_lru_isolate() local 327 mapping = node->private_data; in shadow_lru_isolate() 330 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate() 354 BUG_ON(!mapping->nrshadows); in shadow_lru_isolate() 355 mapping->nrshadows--; in shadow_lru_isolate() 360 if (!__radix_tree_delete_node(&mapping->page_tree, node)) in shadow_lru_isolate() 363 spin_unlock(&mapping->tree_lock); in shadow_lru_isolate()
|
D | shmem.c | 255 static int shmem_radix_tree_replace(struct address_space *mapping, in shmem_radix_tree_replace() argument 263 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); in shmem_radix_tree_replace() 266 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); in shmem_radix_tree_replace() 280 static bool shmem_confirm_swap(struct address_space *mapping, in shmem_confirm_swap() argument 286 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap() 295 struct address_space *mapping, in shmem_add_to_page_cache() argument 304 page->mapping = mapping; in shmem_add_to_page_cache() 307 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 309 error = radix_tree_insert(&mapping->page_tree, index, page); in shmem_add_to_page_cache() 311 error = shmem_radix_tree_replace(mapping, index, expected, in shmem_add_to_page_cache() [all …]
|
D | memory-failure.c | 84 struct address_space *mapping; in hwpoison_filter_dev() local 97 mapping = page_mapping(p); in hwpoison_filter_dev() 98 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev() 101 dev = mapping->host->i_sb->s_dev; in hwpoison_filter_dev() 446 struct address_space *mapping = page->mapping; in collect_procs_file() local 448 i_mmap_lock_read(mapping); in collect_procs_file() 456 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 470 i_mmap_unlock_read(mapping); in collect_procs_file() 484 if (!page->mapping) in collect_procs() 577 struct address_space *mapping; in me_pagecache_clean() local [all …]
|
D | page_io.c | 136 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local 137 struct inode *inode = mapping->host; in generic_swapfile_activate() 262 struct address_space *mapping = swap_file->f_mapping; in __swap_writepage() local 276 ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos); in __swap_writepage() 340 struct address_space *mapping = swap_file->f_mapping; in swap_readpage() local 342 ret = mapping->a_ops->readpage(swap_file, page); in swap_readpage() 372 struct address_space *mapping = sis->swap_file->f_mapping; in swap_set_page_dirty() local 373 return mapping->a_ops->set_page_dirty(page); in swap_set_page_dirty()
|
D | vmscan.c | 509 static void handle_write_error(struct address_space *mapping, in handle_write_error() argument 513 if (page_mapping(page) == mapping) in handle_write_error() 514 mapping_set_error(mapping, error); in handle_write_error() 534 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument 555 if (!mapping) { in pageout() 569 if (mapping->a_ops->writepage == NULL) in pageout() 571 if (!may_write_to_inode(mapping->host, sc)) in pageout() 585 res = mapping->a_ops->writepage(page, &wbc); in pageout() 587 handle_write_error(mapping, page, res); in pageout() 609 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument [all …]
|
D | mincore.c | 49 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument 61 if (shmem_mapping(mapping)) { in mincore_page() 62 page = find_get_entry(mapping, pgoff); in mincore_page() 72 page = find_get_page(mapping, pgoff); in mincore_page() 74 page = find_get_page(mapping, pgoff); in mincore_page()
|
D | rmap.c | 464 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma() 508 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read() 706 } else if (page->mapping) { in page_address_in_vma() 707 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma() 1021 struct address_space *mapping; in page_mkclean() local 1033 mapping = page_mapping(page); in page_mkclean() 1034 if (!mapping) in page_mkclean() 1069 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap() 1098 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap() 1625 struct address_space *mapping = page->mapping; in rmap_walk_file() local [all …]
|
D | mmap.c | 239 struct file *file, struct address_space *mapping) in __remove_shared_vm_struct() argument 244 mapping_unmap_writable(mapping); in __remove_shared_vm_struct() 246 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct() 247 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 248 flush_dcache_mmap_unlock(mapping); in __remove_shared_vm_struct() 260 struct address_space *mapping = file->f_mapping; in unlink_file_vma() local 261 i_mmap_lock_write(mapping); in unlink_file_vma() 262 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma() 263 i_mmap_unlock_write(mapping); in unlink_file_vma() 646 struct address_space *mapping = file->f_mapping; in __vma_link_file() local [all …]
|
/linux-4.4.14/include/linux/ |
D | pagemap.h | 30 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument 34 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error() 36 set_bit(AS_EIO, &mapping->flags); in mapping_set_error() 40 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument 42 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable() 45 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument 47 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable() 50 static inline int mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument 52 if (mapping) in mapping_unevictable() 53 return test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() [all …]
|
D | io-mapping.h | 76 io_mapping_free(struct io_mapping *mapping) in io_mapping_free() argument 78 iomap_free(mapping->base, mapping->size); in io_mapping_free() 79 kfree(mapping); in io_mapping_free() 84 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 90 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 91 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 93 return iomap_atomic_prot_pfn(pfn, mapping->prot); in io_mapping_map_atomic_wc() 103 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_wc() argument 107 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 108 phys_addr = mapping->base + offset; in io_mapping_map_wc() [all …]
|
D | cleancache.h | 53 return page->mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled() 55 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) in cleancache_fs_enabled_mapping() argument 57 return mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled_mapping() 105 static inline void cleancache_invalidate_page(struct address_space *mapping, in cleancache_invalidate_page() argument 109 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_page() 110 __cleancache_invalidate_page(mapping, page); in cleancache_invalidate_page() 113 static inline void cleancache_invalidate_inode(struct address_space *mapping) in cleancache_invalidate_inode() argument 115 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_inode() 116 __cleancache_invalidate_inode(mapping); in cleancache_invalidate_inode()
|
D | shmem_fs.h | 53 extern bool shmem_mapping(struct address_space *mapping); 54 extern void shmem_unlock_mapping(struct address_space *mapping); 55 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 61 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 63 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() 64 mapping_gfp_mask(mapping)); in shmem_read_mapping_page()
|
D | pagevec.h | 26 struct address_space *mapping, 30 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 33 struct address_space *mapping, pgoff_t *index, int tag,
|
D | writeback.h | 367 void balance_dirty_pages_ratelimited(struct address_space *mapping); 373 int generic_writepages(struct address_space *mapping, 375 void tag_pages_for_writeback(struct address_space *mapping, 377 int write_cache_pages(struct address_space *mapping, 380 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 382 void tag_pages_for_writeback(struct address_space *mapping,
|
D | mpage.h | 16 int mpage_readpages(struct address_space *mapping, struct list_head *pages, 19 int mpage_writepages(struct address_space *mapping,
|
D | migrate.h | 40 extern int migrate_huge_page_move_mapping(struct address_space *mapping, 42 extern int migrate_page_move_mapping(struct address_space *mapping, 60 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, in migrate_huge_page_move_mapping() argument
|
D | fs.h | 378 int (*readpages)(struct file *filp, struct address_space *mapping, 381 int (*write_begin)(struct file *, struct address_space *mapping, 384 int (*write_end)(struct file *, struct address_space *mapping, 418 int pagecache_write_begin(struct file *, struct address_space *mapping, 422 int pagecache_write_end(struct file *, struct address_space *mapping, 495 int mapping_tagged(struct address_space *mapping, int tag); 497 static inline void i_mmap_lock_write(struct address_space *mapping) in i_mmap_lock_write() argument 499 down_write(&mapping->i_mmap_rwsem); in i_mmap_lock_write() 502 static inline void i_mmap_unlock_write(struct address_space *mapping) in i_mmap_unlock_write() argument 504 up_write(&mapping->i_mmap_rwsem); in i_mmap_unlock_write() [all …]
|
/linux-4.4.14/drivers/gpu/drm/exynos/ |
D | exynos_drm_iommu.c | 31 struct dma_iommu_mapping *mapping = NULL; in drm_create_iommu_mapping() local 40 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, in drm_create_iommu_mapping() 43 if (IS_ERR(mapping)) in drm_create_iommu_mapping() 44 return PTR_ERR(mapping); in drm_create_iommu_mapping() 52 dev->archdata.mapping = mapping; in drm_create_iommu_mapping() 56 arm_iommu_release_mapping(mapping); in drm_create_iommu_mapping() 72 arm_iommu_release_mapping(dev->archdata.mapping); in drm_release_iommu_mapping() 90 if (!dev->archdata.mapping) in drm_iommu_attach_device() 101 if (subdrv_dev->archdata.mapping) in drm_iommu_attach_device() 104 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); in drm_iommu_attach_device() [all …]
|
/linux-4.4.14/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 368 static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument 386 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument 393 static __s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument 396 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed() 413 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument 416 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed() 767 static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument 770 int bits = mapping->size; in uvc_get_le_value() 771 int offset = mapping->offset; in uvc_get_le_value() 788 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) in uvc_get_le_value() [all …]
|
/linux-4.4.14/arch/arm/mm/ |
D | dma-mapping.c | 1030 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1032 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 1038 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 1049 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 1050 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() 1051 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova() 1052 mapping->bits, 0, count, align); in __alloc_iova() 1054 if (start > mapping->bits) in __alloc_iova() 1057 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova() 1066 if (i == mapping->nr_bitmaps) { in __alloc_iova() [all …]
|
D | flush.c | 202 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 236 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page() 241 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument 255 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 256 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases() 269 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 277 struct address_space *mapping; in __sync_icache_dcache() local 288 mapping = page_mapping(page); in __sync_icache_dcache() 290 mapping = NULL; in __sync_icache_dcache() 293 __flush_dcache_page(mapping, page); in __sync_icache_dcache() [all …]
|
D | fault-armv.c | 132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument 148 flush_dcache_mmap_lock(mapping); in make_coherent() 149 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in make_coherent() 162 flush_dcache_mmap_unlock(mapping); in make_coherent() 184 struct address_space *mapping; in update_mmu_cache() local 198 mapping = page_mapping(page); in update_mmu_cache() 200 __flush_dcache_page(mapping, page); in update_mmu_cache() 201 if (mapping) { in update_mmu_cache() 203 make_coherent(mapping, vma, addr, ptep, pfn); in update_mmu_cache()
|
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 705 struct amdgpu_bo_va_mapping *mapping, in amdgpu_vm_bo_update_mapping() argument 719 if (!(mapping->flags & AMDGPU_PTE_READABLE)) in amdgpu_vm_bo_update_mapping() 721 if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) in amdgpu_vm_bo_update_mapping() 724 trace_amdgpu_vm_bo_update(mapping); in amdgpu_vm_bo_update_mapping() 726 nptes = mapping->it.last - mapping->it.start + 1; in amdgpu_vm_bo_update_mapping() 772 r = amdgpu_vm_update_ptes(adev, vm, ib, mapping->it.start, in amdgpu_vm_bo_update_mapping() 773 mapping->it.last + 1, addr + mapping->offset, in amdgpu_vm_bo_update_mapping() 826 struct amdgpu_bo_va_mapping *mapping; in amdgpu_vm_bo_update() local 846 list_for_each_entry(mapping, &bo_va->invalids, list) { in amdgpu_vm_bo_update() 847 r = amdgpu_vm_bo_update_mapping(adev, vm, mapping, addr, in amdgpu_vm_bo_update() [all …]
|
D | amdgpu_trace.h | 119 struct amdgpu_bo_va_mapping *mapping), 120 TP_ARGS(bo_va, mapping), 131 __entry->start = mapping->it.start; 132 __entry->last = mapping->it.last; 133 __entry->offset = mapping->offset; 134 __entry->flags = mapping->flags; 143 struct amdgpu_bo_va_mapping *mapping), 144 TP_ARGS(bo_va, mapping), 155 __entry->start = mapping->it.start; 156 __entry->last = mapping->it.last; [all …]
|
/linux-4.4.14/drivers/net/wireless/mwifiex/ |
D | util.h | 69 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 77 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 86 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 88 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 90 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
/linux-4.4.14/arch/nios2/mm/ |
D | cacheflush.c | 73 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument 81 flush_dcache_mmap_lock(mapping); in flush_aliases() 82 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases() 94 flush_dcache_mmap_unlock(mapping); in flush_aliases() 160 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 174 struct address_space *mapping; in flush_dcache_page() local 183 mapping = page_mapping(page); in flush_dcache_page() 186 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 189 __flush_dcache_page(mapping, page); in flush_dcache_page() 190 if (mapping) { in flush_dcache_page() [all …]
|
/linux-4.4.14/fs/gfs2/ |
D | aops.c | 100 struct inode *inode = page->mapping->host; in gfs2_writepage_common() 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in gfs2_writepage_common() 156 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 182 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 215 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 218 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); in gfs2_writepages() 233 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument 239 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec() 272 if (unlikely(page->mapping != mapping)) { in gfs2_write_jdata_pagevec() 344 static int gfs2_write_cache_jdata(struct address_space *mapping, in gfs2_write_cache_jdata() argument [all …]
|
D | meta_io.h | 43 static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) in gfs2_mapping2sbd() argument 45 struct inode *inode = mapping->host; in gfs2_mapping2sbd() 46 if (mapping->a_ops == &gfs2_meta_aops) in gfs2_mapping2sbd() 47 return (((struct gfs2_glock *)mapping) - 1)->gl_name.ln_sbd; in gfs2_mapping2sbd() 48 else if (mapping->a_ops == &gfs2_rgrp_aops) in gfs2_mapping2sbd() 49 return container_of(mapping, struct gfs2_sbd, sd_aspace); in gfs2_mapping2sbd()
|
D | meta_io.c | 116 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf() local 124 if (mapping == NULL) in gfs2_getbuf() 125 mapping = &sdp->sd_aspace; in gfs2_getbuf() 133 page = grab_cache_page(mapping, index); in gfs2_getbuf() 139 page = find_get_page_flags(mapping, index, in gfs2_getbuf() 266 struct address_space *mapping = bh->b_page->mapping; in gfs2_remove_from_journal() local 267 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_remove_from_journal()
|
D | glops.c | 39 bh->b_page->mapping, bh->b_page->flags); in gfs2_ail_error() 145 struct address_space *mapping = &sdp->sd_aspace; in rgrp_go_sync() local 160 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync() 161 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync() 162 mapping_set_error(mapping, error); in rgrp_go_sync() 185 struct address_space *mapping = &sdp->sd_aspace; in rgrp_go_inval() local 193 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_inval() 226 struct address_space *mapping = ip->i_inode.i_mapping; in inode_go_sync() local 227 filemap_fdatawrite(mapping); in inode_go_sync() 228 error = filemap_fdatawait(mapping); in inode_go_sync() [all …]
|
/linux-4.4.14/fs/hpfs/ |
D | file.c | 127 static int hpfs_readpages(struct file *file, struct address_space *mapping, in hpfs_readpages() argument 130 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); in hpfs_readpages() 133 static int hpfs_writepages(struct address_space *mapping, in hpfs_writepages() argument 136 return mpage_writepages(mapping, wbc, hpfs_get_block); in hpfs_writepages() 139 static void hpfs_write_failed(struct address_space *mapping, loff_t to) in hpfs_write_failed() argument 141 struct inode *inode = mapping->host; in hpfs_write_failed() 153 static int hpfs_write_begin(struct file *file, struct address_space *mapping, in hpfs_write_begin() argument 160 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hpfs_write_begin() 162 &hpfs_i(mapping->host)->mmu_private); in hpfs_write_begin() 164 hpfs_write_failed(mapping, pos + len); in hpfs_write_begin() [all …]
|
/linux-4.4.14/arch/unicore32/mm/ |
D | flush.c | 61 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 77 struct address_space *mapping; in flush_dcache_page() local 86 mapping = page_mapping(page); in flush_dcache_page() 88 if (mapping && !mapping_mapped(mapping)) in flush_dcache_page() 91 __flush_dcache_page(mapping, page); in flush_dcache_page() 92 if (mapping) in flush_dcache_page()
|
/linux-4.4.14/drivers/sh/clk/ |
D | core.c | 339 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 344 if (!mapping) { in clk_establish_mapping() 351 clk->mapping = &dummy_mapping; in clk_establish_mapping() 360 mapping = clkp->mapping; in clk_establish_mapping() 361 BUG_ON(!mapping); in clk_establish_mapping() 367 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 368 kref_init(&mapping->ref); in clk_establish_mapping() 370 mapping->base = ioremap_nocache(mapping->phys, mapping->len); in clk_establish_mapping() 371 if (unlikely(!mapping->base)) in clk_establish_mapping() 373 } else if (mapping->base) { in clk_establish_mapping() [all …]
|
D | cpg.c | 414 value = __raw_readl(clk->mapping->base); in fsidiv_recalc() 430 __raw_writel(0, clk->mapping->base); in fsidiv_disable() 437 value = __raw_readl(clk->mapping->base) >> 16; in fsidiv_enable() 441 __raw_writel((value << 16) | 0x3, clk->mapping->base); in fsidiv_enable() 452 __raw_writel(0, clk->mapping->base); in fsidiv_set_rate() 454 __raw_writel(idx << 16, clk->mapping->base); in fsidiv_set_rate() 486 clks[i].mapping = map; in sh_clk_fsidiv_register()
|
/linux-4.4.14/fs/9p/ |
D | vfs_addr.c | 54 struct inode *inode = page->mapping->host; in v9fs_fid_readpage() 111 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, in v9fs_vfs_readpages() argument 117 inode = mapping->host; in v9fs_vfs_readpages() 120 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); in v9fs_vfs_readpages() 124 ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); in v9fs_vfs_readpages() 162 struct inode *inode = page->mapping->host; in v9fs_vfs_writepage_locked() 203 mapping_set_error(page->mapping, retval); in v9fs_vfs_writepage() 220 struct inode *inode = page->mapping->host; in v9fs_launder_page() 267 static int v9fs_write_begin(struct file *filp, struct address_space *mapping, in v9fs_write_begin() argument 275 struct inode *inode = mapping->host; in v9fs_write_begin() [all …]
|
D | cache.h | 50 struct address_space *mapping, 75 struct address_space *mapping, in v9fs_readpages_from_fscache() argument 79 return __v9fs_readpages_from_fscache(inode, mapping, pages, in v9fs_readpages_from_fscache() 131 struct address_space *mapping, in v9fs_readpages_from_fscache() argument
|
D | cache.c | 282 struct inode *inode = page->mapping->host; in __v9fs_fscache_release_page() 292 struct inode *inode = page->mapping->host; in __v9fs_fscache_invalidate_page() 356 struct address_space *mapping, in __v9fs_readpages_from_fscache() argument 368 mapping, pages, nr_pages, in __v9fs_readpages_from_fscache() 371 mapping_gfp_mask(mapping)); in __v9fs_readpages_from_fscache()
|
/linux-4.4.14/fs/ |
D | buffer.c | 358 set_bit(AS_EIO, &page->mapping->flags); in end_buffer_async_write() 574 int sync_mapping_buffers(struct address_space *mapping) in sync_mapping_buffers() argument 576 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers() 578 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers() 582 &mapping->private_list); in sync_mapping_buffers() 605 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode() local 606 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode() 609 if (!mapping->private_data) { in mark_buffer_dirty_inode() 610 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode() 612 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode() [all …]
|
D | dax.c | 220 struct address_space *mapping = inode->i_mapping; in dax_do_io() local 222 retval = filemap_write_and_wait_range(mapping, pos, end - 1); in dax_do_io() 256 static int dax_load_hole(struct address_space *mapping, struct page *page, in dax_load_hole() argument 260 struct inode *inode = mapping->host; in dax_load_hole() 262 page = find_or_create_page(mapping, vmf->pgoff, in dax_load_hole() 295 struct address_space *mapping = inode->i_mapping; in dax_insert_mapping() local 303 i_mmap_lock_read(mapping); in dax_insert_mapping() 334 i_mmap_unlock_read(mapping); in dax_insert_mapping() 359 struct address_space *mapping = file->f_mapping; in __dax_fault() local 360 struct inode *inode = mapping->host; in __dax_fault() [all …]
|
D | mpage.c | 100 struct inode *inode = page->mapping->host; in map_buffer_to_page() 145 struct inode *inode = page->mapping->host; in do_mpage_readpage() 356 mpage_readpages(struct address_space *mapping, struct list_head *pages, in mpage_readpages() argument 364 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); in mpage_readpages() 373 if (!add_to_page_cache_lru(page, mapping, in mpage_readpages() 400 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); in mpage_readpage() 470 struct address_space *mapping = page->mapping; in __mpage_writepage() local 471 struct inode *inode = page->mapping->host; in __mpage_writepage() 649 ret = mapping->a_ops->writepage(page, wbc); in __mpage_writepage() 657 mapping_set_error(mapping, ret); in __mpage_writepage() [all …]
|
D | sync.c | 287 struct address_space *mapping; in SYSCALL_DEFINE4() local 337 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 338 if (!mapping) { in SYSCALL_DEFINE4() 345 ret = filemap_fdatawait_range(mapping, offset, endbyte); in SYSCALL_DEFINE4() 351 ret = __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4() 358 ret = filemap_fdatawait_range(mapping, offset, endbyte); in SYSCALL_DEFINE4()
|
D | inode.c | 133 struct address_space *const mapping = &inode->i_data; in inode_init_always() local 167 mapping->a_ops = &empty_aops; in inode_init_always() 168 mapping->host = inode; in inode_init_always() 169 mapping->flags = 0; in inode_init_always() 170 atomic_set(&mapping->i_mmap_writable, 0); in inode_init_always() 171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); in inode_init_always() 172 mapping->private_data = NULL; in inode_init_always() 173 mapping->writeback_index = 0; in inode_init_always() 175 inode->i_mapping = mapping; in inode_init_always() 338 void address_space_init_once(struct address_space *mapping) in address_space_init_once() argument [all …]
|
D | block_dev.c | 76 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev() local 78 if (mapping->nrpages == 0 && mapping->nrshadows == 0) in kill_bdev() 82 truncate_inode_pages(mapping, 0); in kill_bdev() 89 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev() local 91 if (mapping->nrpages == 0) in invalidate_bdev() 96 invalidate_mapping_pages(mapping, 0, -1); in invalidate_bdev() 100 cleancache_invalidate_inode(mapping); in invalidate_bdev() 307 static int blkdev_readpages(struct file *file, struct address_space *mapping, in blkdev_readpages() argument 310 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); in blkdev_readpages() 313 static int blkdev_write_begin(struct file *file, struct address_space *mapping, in blkdev_write_begin() argument [all …]
|
D | splice.c | 47 struct address_space *mapping; in page_cache_pipe_buf_steal() local 51 mapping = page_mapping(page); in page_cache_pipe_buf_steal() 52 if (mapping) { in page_cache_pipe_buf_steal() 73 if (remove_mapping(mapping, page)) { in page_cache_pipe_buf_steal() 112 if (!page->mapping) { in page_cache_pipe_buf_confirm() 311 struct address_space *mapping = in->f_mapping; in __generic_file_splice_read() local 339 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); in __generic_file_splice_read() 347 page_cache_sync_readahead(mapping, &in->f_ra, in, in __generic_file_splice_read() 356 page = find_get_page(mapping, index); in __generic_file_splice_read() 361 page = page_cache_alloc_cold(mapping); in __generic_file_splice_read() [all …]
|
/linux-4.4.14/net/rds/ |
D | iw_rdma.c | 51 struct rds_iw_mapping mapping; member 371 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); 372 list_del_init(&ibmr->mapping.m_list); 420 spin_lock_init(&ibmr->mapping.m_lock); 421 INIT_LIST_HEAD(&ibmr->mapping.m_list); 422 ibmr->mapping.m_mr = ibmr; 447 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, 448 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); 451 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, 452 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); [all …]
|
/linux-4.4.14/Documentation/ |
D | io-mapping.txt | 1 The io_mapping functions in linux/io-mapping.h provide an abstraction for 2 efficiently mapping small regions of an I/O device to the CPU. The initial 7 A mapping object is created during driver initialization using 13 mappable, while 'size' indicates how large a mapping region to 16 This _wc variant provides a mapping which may only be used 19 With this mapping object, individual pages can be mapped either atomically 23 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 26 'offset' is the offset within the defined mapping region. 48 void *io_mapping_map_wc(struct io_mapping *mapping, 61 void io_mapping_free(struct io_mapping *mapping) [all …]
|
D | nommu-mmap.txt | 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (*) Anonymous mapping, MAP_PRIVATE 27 (*) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device 47 appropriate mapping protection capabilities. Ramfs, romfs, cramfs [all …]
|
D | IRQ-domain.txt | 1 irq_domain interrupt number mapping library 26 irq numbers, but they don't provide any support for reverse mapping of 30 The irq_domain library adds mapping between hwirq and IRQ numbers on 31 top of the irq_alloc_desc*() API. An irq_domain to manage mapping is 33 reverse mapping scheme. 41 calling one of the irq_domain_add_*() functions (each mapping method 49 hwirq number as arguments. If a mapping for the hwirq doesn't already 66 There are several mechanisms available for reverse mapping from hwirq 98 Very few drivers should need this mapping. 103 The No Map mapping is to be used when the hwirq number is [all …]
|
D | DMA-attributes.txt | 28 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping 38 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be 57 virtual mapping for the allocated buffer. On some architectures creating 58 such mapping is non-trivial task and consumes very limited resources 77 having a mapping created separately for each device and is usually 92 device domain after releasing a mapping for it. Use this attribute with 98 By default DMA-mapping subsystem is allowed to assemble the buffer
|
D | DMA-API-HOWTO.txt | 1 Dynamic DMA mapping Guide 51 | | mapping | | by host | | 59 | | mapping | RAM | by IOMMU 84 mapping and returns the DMA address Z. The driver then tells the device to 88 So that Linux can use the dynamic DMA mapping, it needs some help from the 103 #include <linux/dma-mapping.h> 107 everywhere you hold a DMA address returned from the DMA mapping functions. 112 be used with the DMA mapping facilities. There has been an unwritten 369 The interfaces for using this type of mapping were designed in 374 Neither type of DMA mapping has alignment restrictions that come from [all …]
|
/linux-4.4.14/fs/ecryptfs/ |
D | mmap.c | 148 page_virt, page->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 170 crypt_stat->extent_size, page->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 197 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; in ecryptfs_readpage() 203 page->mapping->host); in ecryptfs_readpage() 220 page->mapping->host); in ecryptfs_readpage() 251 struct inode *inode = page->mapping->host; in fill_zeros_to_end_of_page() 279 struct address_space *mapping, in ecryptfs_write_begin() argument 288 page = grab_cache_page_write_begin(mapping, index, flags); in ecryptfs_write_begin() 296 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; in ecryptfs_write_begin() 300 page, index, 0, PAGE_CACHE_SIZE, mapping->host); in ecryptfs_write_begin() [all …]
|
/linux-4.4.14/fs/f2fs/ |
D | data.c | 71 set_bit(AS_EIO, &page->mapping->flags); in f2fs_write_end_io() 281 struct address_space *mapping = inode->i_mapping; in get_read_data_page() local 294 return read_mapping_page(mapping, index, NULL); in get_read_data_page() 296 page = f2fs_grab_cache_page(mapping, index, for_write); in get_read_data_page() 348 struct address_space *mapping = inode->i_mapping; in find_data_page() local 351 page = find_get_page(mapping, index); in find_data_page() 379 struct address_space *mapping = inode->i_mapping; in get_lock_data_page() local 392 if (unlikely(page->mapping != mapping)) { in get_lock_data_page() 411 struct address_space *mapping = inode->i_mapping; in get_new_data_page() local 416 page = f2fs_grab_cache_page(mapping, index, true); in get_new_data_page() [all …]
|
D | checkpoint.c | 34 struct address_space *mapping = META_MAPPING(sbi); in grab_meta_page() local 37 page = grab_cache_page(mapping, index); in grab_meta_page() 53 struct address_space *mapping = META_MAPPING(sbi); in __get_meta_page() local 66 page = grab_cache_page(mapping, index); in __get_meta_page() 82 if (unlikely(page->mapping != mapping)) { in __get_meta_page() 249 static int f2fs_write_meta_pages(struct address_space *mapping, in f2fs_write_meta_pages() argument 252 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() 255 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages() 278 struct address_space *mapping = META_MAPPING(sbi); in sync_meta_pages() local 290 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in sync_meta_pages() [all …]
|
/linux-4.4.14/fs/nilfs2/ |
D | page.c | 62 struct address_space *mapping, in nilfs_grab_buffer() argument 71 page = grab_cache_page(mapping, index); in nilfs_grab_buffer() 178 m = page->mapping; in nilfs_page_bug() 352 page->mapping = NULL; in nilfs_copy_back_pages() 355 page->mapping = dmap; in nilfs_copy_back_pages() 377 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) in nilfs_clear_dirty_pages() argument 385 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, in nilfs_clear_dirty_pages() 406 struct inode *inode = page->mapping->host; in nilfs_clear_dirty_page() 460 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) in nilfs_mapping_init() argument 462 mapping->host = inode; in nilfs_mapping_init() [all …]
|
D | dir.c | 85 struct address_space *mapping, in nilfs_commit_chunk() argument 88 struct inode *dir = mapping->host; in nilfs_commit_chunk() 95 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); in nilfs_commit_chunk() 107 struct inode *dir = page->mapping->host; in nilfs_check_page() 182 struct address_space *mapping = dir->i_mapping; in nilfs_get_page() local 183 struct page *page = read_mapping_page(mapping, n, NULL); in nilfs_get_page() 415 struct address_space *mapping = page->mapping; in nilfs_set_link() local 423 nilfs_commit_chunk(page, mapping, from, to); in nilfs_set_link() 514 nilfs_commit_chunk(page, page->mapping, from, to); in nilfs_add_link() 533 struct address_space *mapping = page->mapping; in nilfs_delete_entry() local [all …]
|
D | btnode.c | 144 struct address_space *mapping; in nilfs_btnode_delete() local 155 mapping = page->mapping; in nilfs_btnode_delete() 159 if (!still_dirty && mapping) in nilfs_btnode_delete() 160 invalidate_inode_pages2_range(mapping, index, index); in nilfs_btnode_delete()
|
D | inode.c | 169 static int nilfs_readpages(struct file *file, struct address_space *mapping, in nilfs_readpages() argument 172 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); in nilfs_readpages() 175 static int nilfs_writepages(struct address_space *mapping, in nilfs_writepages() argument 178 struct inode *inode = mapping->host; in nilfs_writepages() 182 nilfs_clear_dirty_pages(mapping, false); in nilfs_writepages() 195 struct inode *inode = page->mapping->host; in nilfs_writepage() 225 struct inode *inode = page->mapping->host; in nilfs_set_page_dirty() 259 void nilfs_write_failed(struct address_space *mapping, loff_t to) in nilfs_write_failed() argument 261 struct inode *inode = mapping->host; in nilfs_write_failed() 269 static int nilfs_write_begin(struct file *file, struct address_space *mapping, in nilfs_write_begin() argument [all …]
|
/linux-4.4.14/fs/jfs/ |
D | inode.c | 288 static int jfs_writepages(struct address_space *mapping, in jfs_writepages() argument 291 return mpage_writepages(mapping, wbc, jfs_get_block); in jfs_writepages() 299 static int jfs_readpages(struct file *file, struct address_space *mapping, in jfs_readpages() argument 302 return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); in jfs_readpages() 305 static void jfs_write_failed(struct address_space *mapping, loff_t to) in jfs_write_failed() argument 307 struct inode *inode = mapping->host; in jfs_write_failed() 315 static int jfs_write_begin(struct file *file, struct address_space *mapping, in jfs_write_begin() argument 321 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, in jfs_write_begin() 324 jfs_write_failed(mapping, pos + len); in jfs_write_begin() 329 static sector_t jfs_bmap(struct address_space *mapping, sector_t block) in jfs_bmap() argument [all …]
|
D | jfs_metapage.c | 119 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; in insert_metapage() 131 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; in remove_metapage() 352 struct inode *inode = page->mapping->host; in metapage_writepage() 485 struct inode *inode = page->mapping->host; in metapage_readpage() 592 struct address_space *mapping; in __get_metapage() local 612 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping; in __get_metapage() 621 mapping = inode->i_mapping; in __get_metapage() 625 page = grab_cache_page(mapping, page_index); in __get_metapage() 632 page = read_mapping_page(mapping, page_index, NULL); in __get_metapage() 776 struct address_space *mapping = in __invalidate_metapages() local [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/linux/ |
D | lustre_patchless_compat.h | 49 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 51 if (page->mapping != mapping) in truncate_complete_page() 55 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page()
|
/linux-4.4.14/arch/powerpc/boot/dts/fsl/ |
D | t4240si-pre.dtsi | 93 fsl,portid-mapping = <0x80000000>; 100 fsl,portid-mapping = <0x80000000>; 107 fsl,portid-mapping = <0x80000000>; 114 fsl,portid-mapping = <0x80000000>; 121 fsl,portid-mapping = <0x40000000>; 128 fsl,portid-mapping = <0x40000000>; 135 fsl,portid-mapping = <0x40000000>; 142 fsl,portid-mapping = <0x40000000>; 149 fsl,portid-mapping = <0x20000000>; 156 fsl,portid-mapping = <0x20000000>; [all …]
|
D | p4080si-pre.dtsi | 99 fsl,portid-mapping = <0x80000000>; 109 fsl,portid-mapping = <0x40000000>; 119 fsl,portid-mapping = <0x20000000>; 129 fsl,portid-mapping = <0x10000000>; 139 fsl,portid-mapping = <0x08000000>; 149 fsl,portid-mapping = <0x04000000>; 159 fsl,portid-mapping = <0x02000000>; 169 fsl,portid-mapping = <0x01000000>;
|
D | t208xsi-pre.dtsi | 86 fsl,portid-mapping = <0x80000000>; 93 fsl,portid-mapping = <0x80000000>; 100 fsl,portid-mapping = <0x80000000>; 107 fsl,portid-mapping = <0x80000000>;
|
D | b4860si-pre.dtsi | 79 fsl,portid-mapping = <0x80000000>; 86 fsl,portid-mapping = <0x80000000>; 93 fsl,portid-mapping = <0x80000000>; 100 fsl,portid-mapping = <0x80000000>;
|
D | p2041si-pre.dtsi | 94 fsl,portid-mapping = <0x80000000>; 104 fsl,portid-mapping = <0x40000000>; 114 fsl,portid-mapping = <0x20000000>; 124 fsl,portid-mapping = <0x10000000>;
|
D | p3041si-pre.dtsi | 95 fsl,portid-mapping = <0x80000000>; 105 fsl,portid-mapping = <0x40000000>; 115 fsl,portid-mapping = <0x20000000>; 125 fsl,portid-mapping = <0x10000000>;
|
D | p5040si-pre.dtsi | 101 fsl,portid-mapping = <0x80000000>; 111 fsl,portid-mapping = <0x40000000>; 121 fsl,portid-mapping = <0x20000000>; 131 fsl,portid-mapping = <0x10000000>;
|
/linux-4.4.14/drivers/mfd/ |
D | htc-pasic3.c | 25 void __iomem *mapping; member 41 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_write_register() 42 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_write_register() 56 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_read_register() 57 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_read_register() 156 asic->mapping = ioremap(r->start, resource_size(r)); in pasic3_probe() 157 if (!asic->mapping) { in pasic3_probe() 194 iounmap(asic->mapping); in pasic3_remove()
|
/linux-4.4.14/fs/afs/ |
D | file.c | 27 static int afs_readpages(struct file *filp, struct address_space *mapping, 127 struct inode *inode = page->mapping->host; in afs_page_filler() 225 struct inode *inode = page->mapping->host; in afs_readpage() 240 static int afs_readpages(struct file *file, struct address_space *mapping, in afs_readpages() argument 248 key_serial(key), mapping->host->i_ino, nr_pages); in afs_readpages() 252 vnode = AFS_FS_I(mapping->host); in afs_readpages() 261 mapping, in afs_readpages() 266 mapping_gfp_mask(mapping)); in afs_readpages() 291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); in afs_readpages() 325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); in afs_invalidatepage() [all …]
|
D | write.c | 118 int afs_write_begin(struct file *file, struct address_space *mapping, in afs_write_begin() argument 146 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin() 244 int afs_write_end(struct file *file, struct address_space *mapping, in afs_write_end() argument 463 static int afs_writepages_region(struct address_space *mapping, in afs_writepages_region() argument 474 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, in afs_writepages_region() 495 if (page->mapping != mapping) { in afs_writepages_region() 537 int afs_writepages(struct address_space *mapping, in afs_writepages() argument 546 start = mapping->writeback_index; in afs_writepages() 548 ret = afs_writepages_region(mapping, wbc, start, end, &next); in afs_writepages() 550 ret = afs_writepages_region(mapping, wbc, 0, start, in afs_writepages() [all …]
|
/linux-4.4.14/arch/c6x/platforms/ |
D | megamod-pic.c | 179 int *mapping, int size) in parse_priority_map() argument 195 mapping[i] = val; in parse_priority_map() 205 int mapping[NR_MUX_OUTPUTS]; in init_megamod_pic() local 233 for (i = 0; i < ARRAY_SIZE(mapping); i++) in init_megamod_pic() 234 mapping[i] = IRQ_UNMAPPED; in init_megamod_pic() 236 parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); in init_megamod_pic() 274 mapping[hwirq - 4] = i; in init_megamod_pic() 292 if (mapping[i] != IRQ_UNMAPPED) { in init_megamod_pic() 294 np->full_name, mapping[i], i + 4); in init_megamod_pic() 295 set_megamod_mux(pic, mapping[i], i); in init_megamod_pic()
|
/linux-4.4.14/arch/m32r/include/asm/ |
D | cacheflush.h | 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 39 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 40 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 53 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 54 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/drivers/net/ethernet/dec/tulip/ |
D | interrupt.c | 70 dma_addr_t mapping; in tulip_refill_rx() local 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, in tulip_refill_rx() 79 if (dma_mapping_error(&tp->pdev->dev, mapping)) { in tulip_refill_rx() 85 tp->rx_buffers[entry].mapping = mapping; in tulip_refill_rx() 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); in tulip_refill_rx() 214 tp->rx_buffers[entry].mapping, in tulip_poll() 226 tp->rx_buffers[entry].mapping, in tulip_poll() 233 if (tp->rx_buffers[entry].mapping != in tulip_poll() 238 (unsigned long long)tp->rx_buffers[entry].mapping, in tulip_poll() 243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, in tulip_poll() [all …]
|
D | tulip_core.c | 355 dma_addr_t mapping; in tulip_up() local 364 mapping = pci_map_single(tp->pdev, tp->setup_frame, in tulip_up() 368 tp->tx_buffers[tp->cur_tx].mapping = mapping; in tulip_up() 372 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); in tulip_up() 630 tp->rx_buffers[i].mapping = 0; in tulip_init_ring() 637 dma_addr_t mapping; in tulip_init_ring() local 646 mapping = pci_map_single(tp->pdev, skb->data, in tulip_init_ring() 648 tp->rx_buffers[i].mapping = mapping; in tulip_init_ring() 650 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); in tulip_init_ring() 658 tp->tx_buffers[i].mapping = 0; in tulip_init_ring() [all …]
|
D | de2104x.c | 294 dma_addr_t mapping; member 411 dma_addr_t mapping; in de_rx() local 423 mapping = de->rx_skb[rx_tail].mapping; in de_rx() 451 pci_unmap_single(de->pdev, mapping, in de_rx() 455 mapping = in de_rx() 456 de->rx_skb[rx_tail].mapping = in de_rx() 461 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); in de_rx() 465 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); in de_rx() 485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); in de_rx() 562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, in de_tx() [all …]
|
/linux-4.4.14/fs/hfsplus/ |
D | bitmap.c | 23 struct address_space *mapping; in hfsplus_block_allocate() local 35 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate() 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate() 80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate() 131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate() 167 struct address_space *mapping; in hfsplus_block_free() local 182 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_free() 184 page = read_mapping_page(mapping, pnr, NULL); in hfsplus_block_free() 218 page = read_mapping_page(mapping, ++pnr, NULL); in hfsplus_block_free()
|
D | inode.c | 34 static void hfsplus_write_failed(struct address_space *mapping, loff_t to) in hfsplus_write_failed() argument 36 struct inode *inode = mapping->host; in hfsplus_write_failed() 44 static int hfsplus_write_begin(struct file *file, struct address_space *mapping, in hfsplus_write_begin() argument 51 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hfsplus_write_begin() 53 &HFSPLUS_I(mapping->host)->phys_size); in hfsplus_write_begin() 55 hfsplus_write_failed(mapping, pos + len); in hfsplus_write_begin() 60 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) in hfsplus_bmap() argument 62 return generic_block_bmap(mapping, block, hfsplus_get_block); in hfsplus_bmap() 67 struct inode *inode = page->mapping->host; in hfsplus_releasepage() 129 struct address_space *mapping = file->f_mapping; in hfsplus_direct_IO() local [all …]
|
/linux-4.4.14/fs/nfs/ |
D | file.c | 347 static int nfs_write_begin(struct file *file, struct address_space *mapping, in nfs_write_begin() argument 357 file, mapping->host->i_ino, len, (long long) pos); in nfs_write_begin() 364 ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, in nfs_write_begin() 371 nfs_inode_dio_wait(mapping->host); in nfs_write_begin() 373 page = grab_cache_page_write_begin(mapping, index, flags); in nfs_write_begin() 393 static int nfs_write_end(struct file *file, struct address_space *mapping, in nfs_write_end() argument 402 file, mapping->host->i_ino, len, (long long) pos); in nfs_write_end() 431 NFS_I(mapping->host)->write_io += copied; in nfs_write_end() 434 status = nfs_wb_all(mapping->host); in nfs_write_end() 460 nfs_fscache_invalidate_page(page, page->mapping->host); in nfs_invalidate_page() [all …]
|
D | fscache.c | 263 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); in nfs_fscache_release_page() 267 cookie, page, NFS_I(page->mapping->host)); in nfs_fscache_release_page() 272 nfs_inc_fscache_stats(page->mapping->host, in nfs_fscache_release_page() 296 nfs_inc_fscache_stats(page->mapping->host, in __nfs_fscache_invalidate_page() 318 error = nfs_readpage_async(context, page->mapping->host, page); in nfs_readpage_from_fscache_complete() 368 struct address_space *mapping, in __nfs_readpages_from_fscache() argument 379 mapping, pages, nr_pages, in __nfs_readpages_from_fscache() 382 mapping_gfp_mask(mapping)); in __nfs_readpages_from_fscache()
|
D | direct.c | 574 struct address_space *mapping = file->f_mapping; in nfs_file_direct_read() local 575 struct inode *inode = mapping->host; in nfs_file_direct_read() 580 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); in nfs_file_direct_read() 590 result = nfs_sync_mapping(mapping); in nfs_file_direct_read() 965 struct address_space *mapping = file->f_mapping; in nfs_file_direct_write() local 966 struct inode *inode = mapping->host; in nfs_file_direct_write() 974 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, in nfs_file_direct_write() 982 result = nfs_sync_mapping(mapping); in nfs_file_direct_write() 986 if (mapping->nrpages) { in nfs_file_direct_write() 987 result = invalidate_inode_pages2_range(mapping, in nfs_file_direct_write() [all …]
|
D | fscache.h | 131 struct address_space *mapping, in nfs_readpages_from_fscache() argument 136 return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, in nfs_readpages_from_fscache() 210 struct address_space *mapping, in nfs_readpages_from_fscache() argument
|
/linux-4.4.14/Documentation/devicetree/bindings/sound/ |
D | st,sta32x.txt | 30 - st,ch1-output-mapping: Channel 1 output mapping 31 - st,ch2-output-mapping: Channel 2 output mapping 32 - st,ch3-output-mapping: Channel 3 output mapping 84 st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1 85 st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1 86 st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
|
D | st,sta350.txt | 30 - st,ch1-output-mapping: Channel 1 output mapping 31 - st,ch2-output-mapping: Channel 2 output mapping 32 - st,ch3-output-mapping: Channel 3 output mapping 123 st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1 124 st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1 125 st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
|
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/4xx/ |
D | ppc440spe-adma.txt | 16 - reg : <registers mapping> 35 - reg : <registers mapping> 37 - interrupts : <interrupt mapping for DMA0/1 interrupts sources: 41 - interrupt-parent : needed for interrupt mapping 66 - reg : <registers mapping> 67 - interrupts : <interrupt mapping for XOR interrupt source> 68 - interrupt-parent : for interrupt mapping
|
D | hsta.txt | 15 - reg : register mapping for the HSTA MSI space 16 - interrupt-parent : parent controller for mapping interrupts 17 - interrupts : ordered interrupt mapping for each MSI in the register
|
D | emac.txt | 20 - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ> 21 - interrupt-parent : optional, if needed for interrupt mapping 22 - reg : <registers mapping> 116 - interrupts : <interrupt mapping for the MAL interrupts sources: 120 and rxeob. Thus we end up with mapping those 5 MPIC 135 - reg : <registers mapping> 144 - reg : <registers mapping>
|
/linux-4.4.14/fs/bfs/ |
D | file.c | 162 static void bfs_write_failed(struct address_space *mapping, loff_t to) in bfs_write_failed() argument 164 struct inode *inode = mapping->host; in bfs_write_failed() 170 static int bfs_write_begin(struct file *file, struct address_space *mapping, in bfs_write_begin() argument 176 ret = block_write_begin(mapping, pos, len, flags, pagep, in bfs_write_begin() 179 bfs_write_failed(mapping, pos + len); in bfs_write_begin() 184 static sector_t bfs_bmap(struct address_space *mapping, sector_t block) in bfs_bmap() argument 186 return generic_block_bmap(mapping, block, bfs_get_block); in bfs_bmap()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 95 u64 mapping[IPOIB_UD_RX_SG]) in ipoib_ud_dma_unmap_rx() 97 ib_dma_unmap_single(priv->ca, mapping[0], in ipoib_ud_dma_unmap_rx() 109 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; in ipoib_ib_post_receive() 110 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; in ipoib_ib_post_receive() 116 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); in ipoib_ib_post_receive() 129 u64 *mapping; in ipoib_alloc_rx_skb() local 144 mapping = priv->rx_ring[id].mapping; in ipoib_alloc_rx_skb() 145 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, in ipoib_alloc_rx_skb() 147 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) in ipoib_alloc_rx_skb() 181 u64 mapping[IPOIB_UD_RX_SG]; in ipoib_ib_handle_rx_wc() local [all …]
|
D | ipoib_cm.c | 81 u64 mapping[IPOIB_CM_RX_SG]) in ipoib_cm_dma_unmap_rx() 85 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx() 88 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx() 100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; in ipoib_cm_post_receive_srq() 106 priv->cm.srq_ring[id].mapping); in ipoib_cm_post_receive_srq() 126 sge[i].addr = rx->rx_ring[id].mapping[i]; in ipoib_cm_post_receive_nonsrq() 132 rx->rx_ring[id].mapping); in ipoib_cm_post_receive_nonsrq() 143 u64 mapping[IPOIB_CM_RX_SG], in ipoib_cm_alloc_rx_skb() 160 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, in ipoib_cm_alloc_rx_skb() 162 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { in ipoib_cm_alloc_rx_skb() [all …]
|
D | ipoib.h | 171 u64 mapping[IPOIB_UD_RX_SG]; member 176 u64 mapping[MAX_SKB_FRAGS + 1]; member 181 u64 mapping; member 251 u64 mapping[IPOIB_CM_RX_SG]; member 518 u64 *mapping = tx_req->mapping; in ipoib_build_sge() local 521 priv->tx_sge[0].addr = mapping[0]; in ipoib_build_sge() 528 priv->tx_sge[i + off].addr = mapping[i + off]; in ipoib_build_sge()
|
/linux-4.4.14/arch/arm64/include/asm/ |
D | cacheflush.h | 131 #define flush_dcache_mmap_lock(mapping) \ argument 132 spin_lock_irq(&(mapping)->tree_lock) 133 #define flush_dcache_mmap_unlock(mapping) \ argument 134 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.4.14/include/trace/events/ |
D | filemap.h | 29 __entry->i_ino = page->mapping->host->i_ino; 31 if (page->mapping->host->i_sb) 32 __entry->s_dev = page->mapping->host->i_sb->s_dev; 34 __entry->s_dev = page->mapping->host->i_rdev;
|
/linux-4.4.14/fs/omfs/ |
D | file.c | 292 static int omfs_readpages(struct file *file, struct address_space *mapping, in omfs_readpages() argument 295 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); in omfs_readpages() 304 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) in omfs_writepages() argument 306 return mpage_writepages(mapping, wbc, omfs_get_block); in omfs_writepages() 309 static void omfs_write_failed(struct address_space *mapping, loff_t to) in omfs_write_failed() argument 311 struct inode *inode = mapping->host; in omfs_write_failed() 319 static int omfs_write_begin(struct file *file, struct address_space *mapping, in omfs_write_begin() argument 325 ret = block_write_begin(mapping, pos, len, flags, pagep, in omfs_write_begin() 328 omfs_write_failed(mapping, pos + len); in omfs_write_begin() 333 static sector_t omfs_bmap(struct address_space *mapping, sector_t block) in omfs_bmap() argument [all …]
|
/linux-4.4.14/Documentation/ia64/ |
D | aliasing.txt | 25 page with both a cacheable mapping and an uncacheable mapping[1]. 52 in the system because of constraints imposed by the identity mapping 65 identity mapping only when the entire granule supports cacheable 69 can referenced safely by an identity mapping. 92 by a kernel identity mapping, the user mapping must use the same 93 attribute as the kernel mapping. 95 If the region is not in kern_memmap, the user mapping should use 99 machines, this should use an uncacheable mapping as a fallback. 122 the WC mapping is allowed. 124 Otherwise, the user mapping must use the same attribute as the [all …]
|
/linux-4.4.14/arch/unicore32/include/asm/ |
D | cacheflush.h | 182 #define flush_dcache_mmap_lock(mapping) \ argument 183 spin_lock_irq(&(mapping)->tree_lock) 184 #define flush_dcache_mmap_unlock(mapping) \ argument 185 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.4.14/fs/freevxfs/ |
D | vxfs_subr.c | 68 vxfs_get_page(struct address_space *mapping, u_long n) in vxfs_get_page() argument 72 pp = read_mapping_page(mapping, n, NULL); in vxfs_get_page() 180 vxfs_bmap(struct address_space *mapping, sector_t block) in vxfs_bmap() argument 182 return generic_block_bmap(mapping, block, vxfs_getblk); in vxfs_bmap()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 93 inode = vmpage->mapping->host; in ll_invalidatepage() 123 struct address_space *mapping; in ll_releasepage() local 130 mapping = vmpage->mapping; in ll_releasepage() 131 if (mapping == NULL) in ll_releasepage() 134 obj = ll_i2info(mapping->host)->lli_clob; in ll_releasepage() 168 struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host); in ll_set_page_dirty() 335 struct address_space *mapping, in ll_direct_IO_26_seg() argument 473 static int ll_write_begin(struct file *file, struct address_space *mapping, in ll_write_begin() argument 482 page = grab_cache_page_write_begin(mapping, index, flags); in ll_write_begin() 496 static int ll_write_end(struct file *file, struct address_space *mapping, in ll_write_end() argument [all …]
|
D | rw.c | 99 clob = ll_i2info(vmpage->mapping->host)->lli_clob; in ll_cl_init() 115 struct inode *inode = vmpage->mapping->host; in ll_cl_init() 357 void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) in ll_ra_stats_inc() argument 359 struct ll_sb_info *sbi = ll_i2sbi(mapping->host); in ll_ra_stats_inc() 466 pgoff_t index, struct address_space *mapping) in ll_read_ahead_page() argument 469 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; in ll_read_ahead_page() 475 vmpage = grab_cache_page_nowait(mapping, index); in ll_read_ahead_page() 478 if (vmpage->mapping == mapping) { in ll_read_ahead_page() 504 ll_ra_stats_inc(mapping, which); in ll_read_ahead_page() 613 struct address_space *mapping, in ll_read_ahead_pages() argument [all …]
|
D | vvp_page.c | 139 struct address_space *mapping; in vvp_page_discard() local 145 mapping = vmpage->mapping; in vvp_page_discard() 148 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED); in vvp_page_discard() 154 truncate_complete_page(mapping, vmpage); in vvp_page_discard() 172 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); in vvp_page_unmap() 180 struct inode *inode = vmpage->mapping->host; in vvp_page_delete()
|
/linux-4.4.14/fs/ceph/ |
D | addr.c | 71 struct address_space *mapping = page->mapping; in ceph_set_page_dirty() local 77 if (unlikely(!mapping)) in ceph_set_page_dirty() 82 mapping->host, page, page->index); in ceph_set_page_dirty() 87 inode = mapping->host; in ceph_set_page_dirty() 110 mapping->host, page, page->index, in ceph_set_page_dirty() 126 WARN_ON(!page->mapping); in ceph_set_page_dirty() 143 inode = page->mapping->host; in ceph_invalidatepage() 178 struct inode *inode = page->mapping ? page->mapping->host : NULL; in ceph_releasepage() 401 static int ceph_readpages(struct file *file, struct address_space *mapping, in ceph_readpages() argument 412 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, in ceph_readpages() [all …]
|
D | cache.h | 44 struct address_space *mapping, 71 struct inode* inode = page->mapping->host; in ceph_release_fscache_page() 132 struct address_space *mapping, in ceph_readpages_from_fscache() argument
|
/linux-4.4.14/Documentation/arm/ |
D | memory.txt | 22 setup a minicache mapping. 38 fffe8000 fffeffff DTCM mapping area for platforms with 41 fffe0000 fffe7fff ITCM mapping area for platforms with 44 ffc00000 ffefffff Fixmap mapping region. Addresses provided 48 mapping within the vmalloc space. 64 One way of mapping HIGHMEM pages into kernel 79 space are also caught via this mapping. 85 Since future CPUs may impact the kernel mapping layout, user programs
|
/linux-4.4.14/fs/logfs/ |
D | file.c | 12 static int logfs_write_begin(struct file *file, struct address_space *mapping, in logfs_write_begin() argument 16 struct inode *inode = mapping->host; in logfs_write_begin() 20 page = grab_cache_page_write_begin(mapping, index, flags); in logfs_write_begin() 38 static int logfs_write_end(struct file *file, struct address_space *mapping, in logfs_write_end() argument 42 struct inode *inode = mapping->host; in logfs_write_end() 105 struct inode *inode = page->mapping->host; in __logfs_writepage() 119 struct inode *inode = page->mapping->host; in logfs_writepage() 168 struct super_block *sb = page->mapping->host->i_sb; in logfs_invalidatepage()
|
D | dev_mtd.c | 76 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_erase_mapping() local 81 page = find_get_page(mapping, index); in logfs_mtd_erase_mapping() 151 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_find_first_sb() local 162 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_first_sb() 168 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_find_last_sb() local 180 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_last_sb() 187 struct address_space *mapping = super->s_mapping_inode->i_mapping; in __logfs_mtd_writeseg() local 192 page = find_lock_page(mapping, index + i); in __logfs_mtd_writeseg()
|
D | dev_bdev.c | 78 struct address_space *mapping = super->s_mapping_inode->i_mapping; in __bdev_writeseg() local 109 page = find_lock_page(mapping, index + i); in __bdev_writeseg() 248 struct address_space *mapping = super->s_mapping_inode->i_mapping; in bdev_find_first_sb() local 252 return read_cache_page(mapping, 0, filler, sb); in bdev_find_first_sb() 258 struct address_space *mapping = super->s_mapping_inode->i_mapping; in bdev_find_last_sb() local 264 return read_cache_page(mapping, index, filler, sb); in bdev_find_last_sb()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_aops.c | 655 if (!page->mapping) in xfs_check_page_type() 710 if (page->mapping != inode->i_mapping) in xfs_convert_page() 864 trace_xfs_invalidatepage(page->mapping->host, page, offset, in xfs_vm_invalidatepage() 889 struct inode *inode = page->mapping->host; in xfs_aops_discard_page() 947 struct inode *inode = page->mapping->host; in xfs_vm_writepage() 1207 struct address_space *mapping, in xfs_vm_writepages() argument 1210 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages() 1211 return generic_writepages(mapping, wbc); in xfs_vm_writepages() 1228 trace_xfs_releasepage(page->mapping->host, page, 0, 0); in xfs_vm_releasepage() 1817 struct address_space *mapping, in xfs_vm_write_begin() argument [all …]
|
D | xfs_file.c | 100 struct address_space *mapping; in xfs_iozero() local 104 mapping = VFS_I(ip)->i_mapping; in xfs_iozero() 120 status = pagecache_write_begin(NULL, mapping, pos, bytes, in xfs_iozero() 128 status = pagecache_write_end(NULL, mapping, pos, bytes, in xfs_iozero() 706 struct address_space *mapping = file->f_mapping; in xfs_file_dio_aio_write() local 707 struct inode *inode = mapping->host; in xfs_file_dio_aio_write() 735 if (unaligned_io || mapping->nrpages) in xfs_file_dio_aio_write() 746 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { in xfs_file_dio_aio_write() 762 if (mapping->nrpages) { in xfs_file_dio_aio_write() 790 ret = mapping->a_ops->direct_IO(iocb, &data, pos); in xfs_file_dio_aio_write() [all …]
|
/linux-4.4.14/fs/jffs2/ |
D | file.c | 24 static int jffs2_write_end(struct file *filp, struct address_space *mapping, 27 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, 124 struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); in jffs2_readpage() 128 ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); in jffs2_readpage() 133 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, in jffs2_write_begin() argument 138 struct inode *inode = mapping->host; in jffs2_write_begin() 144 pg = grab_cache_page_write_begin(mapping, index, flags); in jffs2_write_begin() 237 static int jffs2_write_end(struct file *filp, struct address_space *mapping, in jffs2_write_end() argument 244 struct inode *inode = mapping->host; in jffs2_write_end()
|
/linux-4.4.14/fs/exofs/ |
D | dir.c | 59 struct address_space *mapping = page->mapping; in exofs_commit_chunk() local 60 struct inode *dir = mapping->host; in exofs_commit_chunk() 84 struct inode *dir = page->mapping->host; in exofs_check_page() 159 struct address_space *mapping = dir->i_mapping; in exofs_get_page() local 160 struct page *page = read_mapping_page(mapping, n, NULL); in exofs_get_page() 408 err = exofs_write_begin(NULL, page->mapping, pos, len, in exofs_set_link() 490 err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0, in exofs_add_link() 521 struct address_space *mapping = page->mapping; in exofs_delete_entry() local 522 struct inode *inode = mapping->host; in exofs_delete_entry() 547 err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0, in exofs_delete_entry() [all …]
|
D | inode.c | 186 mapping_set_error(page->mapping, ret); in update_write_page() 216 struct inode *inode = page->mapping->host; in __readpages_done() 473 static int exofs_readpages(struct file *file, struct address_space *mapping, in exofs_readpages() argument 479 _pcol_init(&pcol, nr_pages, mapping->host); in exofs_readpages() 481 ret = read_cache_pages(mapping, pages, readpage_strip, &pcol); in exofs_readpages() 499 _pcol_init(&pcol, 1, page->mapping->host); in _readpage() 544 struct inode *inode = page->mapping->host; in writepages_done() 781 set_bit(AS_EIO, &page->mapping->flags); in writepage_strip() 786 static int exofs_writepages(struct address_space *mapping, in exofs_writepages() argument 795 start + mapping->nrpages : in exofs_writepages() [all …]
|
/linux-4.4.14/fs/adfs/ |
D | inode.c | 48 static void adfs_write_failed(struct address_space *mapping, loff_t to) in adfs_write_failed() argument 50 struct inode *inode = mapping->host; in adfs_write_failed() 56 static int adfs_write_begin(struct file *file, struct address_space *mapping, in adfs_write_begin() argument 63 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in adfs_write_begin() 65 &ADFS_I(mapping->host)->mmu_private); in adfs_write_begin() 67 adfs_write_failed(mapping, pos + len); in adfs_write_begin() 72 static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) in _adfs_bmap() argument 74 return generic_block_bmap(mapping, block, adfs_get_block); in _adfs_bmap()
|
/linux-4.4.14/drivers/gpu/drm/rockchip/ |
D | rockchip_drm_drv.c | 47 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; in rockchip_drm_dma_attach_device() local 56 return arm_iommu_attach_device(dev, mapping); in rockchip_drm_dma_attach_device() 133 struct dma_iommu_mapping *mapping; in rockchip_drm_load() local 156 mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000, in rockchip_drm_load() 158 if (IS_ERR(mapping)) { in rockchip_drm_load() 159 ret = PTR_ERR(mapping); in rockchip_drm_load() 169 ret = arm_iommu_attach_device(dev, mapping); in rockchip_drm_load() 229 arm_iommu_release_mapping(dev->archdata.mapping); in rockchip_drm_load() 245 arm_iommu_release_mapping(dev->archdata.mapping); in rockchip_drm_unload()
|
/linux-4.4.14/fs/ext4/ |
D | inode.c | 919 struct inode *inode = page->mapping->host; in ext4_block_write_begin() 1001 static int ext4_write_begin(struct file *file, struct address_space *mapping, in ext4_write_begin() argument 1005 struct inode *inode = mapping->host; in ext4_write_begin() 1024 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, in ext4_write_begin() 1040 page = grab_cache_page_write_begin(mapping, index, flags); in ext4_write_begin() 1053 if (page->mapping != mapping) { in ext4_write_begin() 1139 struct address_space *mapping, in ext4_write_end() argument 1144 struct inode *inode = mapping->host; in ext4_write_end() 1166 copied = block_write_end(file, mapping, pos, in ext4_write_end() 1245 struct address_space *mapping, in ext4_journalled_write_end() argument [all …]
|
D | move_extent.c | 137 struct address_space *mapping[2]; in mext_page_double_lock() local 142 mapping[0] = inode1->i_mapping; in mext_page_double_lock() 143 mapping[1] = inode2->i_mapping; in mext_page_double_lock() 148 mapping[0] = inode2->i_mapping; in mext_page_double_lock() 149 mapping[1] = inode1->i_mapping; in mext_page_double_lock() 152 page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); in mext_page_double_lock() 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); in mext_page_double_lock() 179 struct inode *inode = page->mapping->host; in mext_page_mkuptodate()
|
D | readpage.c | 133 int ext4_mpage_readpages(struct address_space *mapping, in ext4_mpage_readpages() argument 141 struct inode *inode = mapping->host; in ext4_mpage_readpages() 168 if (add_to_page_cache_lru(page, mapping, page->index, in ext4_mpage_readpages() 169 mapping_gfp_constraint(mapping, GFP_KERNEL))) in ext4_mpage_readpages()
|
/linux-4.4.14/fs/hugetlbfs/ |
D | inode.c | 246 struct address_space *mapping = file->f_mapping; in hugetlbfs_read_iter() local 247 struct inode *inode = mapping->host; in hugetlbfs_read_iter() 274 page = find_lock_page(mapping, index); in hugetlbfs_read_iter() 305 struct address_space *mapping, in hugetlbfs_write_begin() argument 312 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, in hugetlbfs_write_end() argument 353 struct address_space *mapping = &inode->i_data; in remove_inode_hugepages() local 377 if (!pagevec_lookup(&pvec, mapping, next, lookup_nr)) in remove_inode_hugepages() 395 mapping, next, 0); in remove_inode_hugepages() 496 struct address_space *mapping = inode->i_mapping; in hugetlb_vmtruncate() local 503 i_mmap_lock_write(mapping); in hugetlb_vmtruncate() [all …]
|
/linux-4.4.14/fs/ntfs/ |
D | bitmap.c | 52 struct address_space *mapping; in __ntfs_bitmap_set_bits_in_run() local 74 mapping = vi->i_mapping; in __ntfs_bitmap_set_bits_in_run() 75 page = ntfs_map_page(mapping, index); in __ntfs_bitmap_set_bits_in_run() 127 page = ntfs_map_page(mapping, ++index); in __ntfs_bitmap_set_bits_in_run()
|
D | aops.h | 86 static inline struct page *ntfs_map_page(struct address_space *mapping, in ntfs_map_page() argument 89 struct page *page = read_mapping_page(mapping, index, NULL); in ntfs_map_page()
|
D | aops.c | 68 vi = page->mapping->host; in ntfs_end_buffer_async_read() 201 vi = page->mapping->host; in ntfs_read_block() 412 vi = page->mapping->host; in ntfs_readpage() 568 vi = page->mapping->host; in ntfs_write_block() 923 struct inode *vi = page->mapping->host; in ntfs_write_mst_block() 1356 struct inode *vi = page->mapping->host; in ntfs_writepage() 1563 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) in ntfs_bmap() argument 1569 ntfs_inode *ni = NTFS_I(mapping->host); in ntfs_bmap() 1730 struct address_space *mapping = page->mapping; in mark_ntfs_record_dirty() local 1731 ntfs_inode *ni = NTFS_I(mapping->host); in mark_ntfs_record_dirty() [all …]
|
/linux-4.4.14/drivers/misc/cxl/ |
D | context.c | 38 struct address_space *mapping) in cxl_context_init() argument 47 ctx->mapping = mapping; in cxl_context_init() 260 if (ctx->mapping) in cxl_context_detach_all() 261 unmap_mapping_range(ctx->mapping, 0, 0, 1); in cxl_context_detach_all() 276 kfree(ctx->mapping); in reclaim_ctx()
|
D | api.c | 21 struct address_space *mapping; in cxl_dev_context_init() local 43 mapping = kmalloc(sizeof(struct address_space), GFP_KERNEL); in cxl_dev_context_init() 44 if (!mapping) { in cxl_dev_context_init() 48 address_space_init_once(mapping); in cxl_dev_context_init() 51 rc = cxl_context_init(ctx, afu, false, mapping); in cxl_dev_context_init() 60 kfree(mapping); in cxl_dev_context_init() 286 file->f_mapping = ctx->mapping; in cxl_get_fd()
|
/linux-4.4.14/fs/minix/ |
D | dir.c | 50 struct address_space *mapping = page->mapping; in dir_commit_chunk() local 51 struct inode *dir = mapping->host; in dir_commit_chunk() 53 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 68 struct address_space *mapping = dir->i_mapping; in dir_get_page() local 69 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() 290 struct inode *inode = page->mapping->host; in minix_delete_entry() 414 struct inode *dir = page->mapping->host; in minix_set_link() 457 struct address_space *mapping = page->mapping; in minix_inode_by_name() local 458 struct inode *inode = mapping->host; in minix_inode_by_name()
|
/linux-4.4.14/drivers/iommu/ |
D | ipmmu-vmsa.c | 36 struct dma_iommu_mapping *mapping; member 443 if (!mmu->mapping) in ipmmu_irq() 446 io_domain = mmu->mapping->domain; in ipmmu_irq() 685 if (!mmu->mapping) { in ipmmu_add_device() 686 struct dma_iommu_mapping *mapping; in ipmmu_add_device() local 688 mapping = arm_iommu_create_mapping(&platform_bus_type, in ipmmu_add_device() 690 if (IS_ERR(mapping)) { in ipmmu_add_device() 692 ret = PTR_ERR(mapping); in ipmmu_add_device() 696 mmu->mapping = mapping; in ipmmu_add_device() 700 ret = arm_iommu_attach_device(dev, mmu->mapping); in ipmmu_add_device() [all …]
|
D | shmobile-iommu.c | 346 struct dma_iommu_mapping *mapping; in shmobile_iommu_add_device() local 350 mapping = archdata->iommu_mapping; in shmobile_iommu_add_device() 351 if (!mapping) { in shmobile_iommu_add_device() 352 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, in shmobile_iommu_add_device() 354 if (IS_ERR(mapping)) in shmobile_iommu_add_device() 355 return PTR_ERR(mapping); in shmobile_iommu_add_device() 356 archdata->iommu_mapping = mapping; in shmobile_iommu_add_device() 359 if (arm_iommu_attach_device(dev, mapping)) in shmobile_iommu_add_device()
|
/linux-4.4.14/drivers/input/joystick/ |
D | xpad.c | 119 u8 mapping; member 342 int mapping; /* map d-pad to buttons or to axes */ member 361 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { in xpad_process_packet() 376 if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { in xpad_process_packet() 385 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpad_process_packet() 433 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpad360_process_packet() 447 if (!(xpad->mapping & MAP_DPAD_TO_BUTTONS) || in xpad360_process_packet() 472 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { in xpad360_process_packet() 487 if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { in xpad360_process_packet() 556 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpadone_process_buttons() [all …]
|
/linux-4.4.14/fs/affs/ |
D | file.c | 381 static void affs_write_failed(struct address_space *mapping, loff_t to) in affs_write_failed() argument 383 struct inode *inode = mapping->host; in affs_write_failed() 395 struct address_space *mapping = file->f_mapping; in affs_direct_IO() local 396 struct inode *inode = mapping->host; in affs_direct_IO() 409 affs_write_failed(mapping, offset + count); in affs_direct_IO() 413 static int affs_write_begin(struct file *file, struct address_space *mapping, in affs_write_begin() argument 420 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in affs_write_begin() 422 &AFFS_I(mapping->host)->mmu_private); in affs_write_begin() 424 affs_write_failed(mapping, pos + len); in affs_write_begin() 429 static sector_t _affs_bmap(struct address_space *mapping, sector_t block) in _affs_bmap() argument [all …]
|
/linux-4.4.14/fs/ocfs2/ |
D | mmap.c | 66 struct address_space *mapping = inode->i_mapping; in __ocfs2_page_mkwrite() local 89 if ((page->mapping != inode->i_mapping) || in __ocfs2_page_mkwrite() 107 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, in __ocfs2_page_mkwrite() 123 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, in __ocfs2_page_mkwrite()
|
D | aops.c | 283 struct inode *inode = page->mapping->host; in ocfs2_readpage() 354 static int ocfs2_readpages(struct file *filp, struct address_space *mapping, in ocfs2_readpages() argument 358 struct inode *inode = mapping->host; in ocfs2_readpages() 392 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); in ocfs2_readpages() 415 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, in ocfs2_writepage() 457 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) in ocfs2_bmap() argument 462 struct inode *inode = mapping->host; in ocfs2_bmap() 1497 static int ocfs2_grab_pages_for_write(struct address_space *mapping, in ocfs2_grab_pages_for_write() argument 1505 struct inode *inode = mapping->host; in ocfs2_grab_pages_for_write() 1547 if (mmap_page->mapping != mapping) { in ocfs2_grab_pages_for_write() [all …]
|
D | aops.h | 46 int ocfs2_write_end_nolock(struct address_space *mapping, 51 struct address_space *mapping,
|
/linux-4.4.14/Documentation/x86/x86_64/ |
D | mm.txt | 9 ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory 19 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 20 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space 24 The direct mapping covers all memory in the system up to the highest
|
/linux-4.4.14/Documentation/devicetree/bindings/ata/ |
D | fsl-sata.txt | 11 - interrupts : <interrupt mapping for SATA IRQ> 19 - interrupt-parent : optional, if needed for interrupt mapping 20 - reg : <registers mapping>
|
D | exynos-sata.txt | 8 - interrupts : <interrupt mapping for SATA IRQ> 9 - reg : <registers mapping>
|
/linux-4.4.14/arch/parisc/include/asm/ |
D | cacheflush.h | 77 #define flush_dcache_mmap_lock(mapping) \ argument 78 spin_lock_irq(&(mapping)->tree_lock) 79 #define flush_dcache_mmap_unlock(mapping) \ argument 80 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.4.14/drivers/net/ethernet/alteon/ |
D | acenic.c | 640 dma_addr_t mapping; in acenic_remove_one() local 643 mapping = dma_unmap_addr(ringp, mapping); in acenic_remove_one() 644 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 660 dma_addr_t mapping; in acenic_remove_one() local 663 mapping = dma_unmap_addr(ringp,mapping); in acenic_remove_one() 664 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 679 dma_addr_t mapping; in acenic_remove_one() local 682 mapping = dma_unmap_addr(ringp, mapping); in acenic_remove_one() 683 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 1642 dma_addr_t mapping; in ace_load_std_rx_ring() local [all …]
|
/linux-4.4.14/drivers/mtd/devices/ |
D | block2mtd.c | 47 static struct page *page_read(struct address_space *mapping, int index) in page_read() argument 49 return read_mapping_page(mapping, index, NULL); in page_read() 55 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; in _block2mtd_erase() local 63 page = page_read(mapping, index); in _block2mtd_erase() 74 balance_dirty_pages_ratelimited(mapping); in _block2mtd_erase() 144 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; in _block2mtd_write() local 156 page = page_read(mapping, index); in _block2mtd_write() 165 balance_dirty_pages_ratelimited(mapping); in _block2mtd_write()
|
/linux-4.4.14/fs/sysv/ |
D | dir.c | 38 struct address_space *mapping = page->mapping; in dir_commit_chunk() local 39 struct inode *dir = mapping->host; in dir_commit_chunk() 42 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 56 struct address_space *mapping = dir->i_mapping; in dir_get_page() local 57 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() 231 struct inode *inode = page->mapping->host; in sysv_delete_entry() 329 struct inode *dir = page->mapping->host; in sysv_set_link()
|
D | itree.c | 467 static void sysv_write_failed(struct address_space *mapping, loff_t to) in sysv_write_failed() argument 469 struct inode *inode = mapping->host; in sysv_write_failed() 477 static int sysv_write_begin(struct file *file, struct address_space *mapping, in sysv_write_begin() argument 483 ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); in sysv_write_begin() 485 sysv_write_failed(mapping, pos + len); in sysv_write_begin() 490 static sector_t sysv_bmap(struct address_space *mapping, sector_t block) in sysv_bmap() argument 492 return generic_block_bmap(mapping,block,get_block); in sysv_bmap()
|
/linux-4.4.14/arch/xtensa/mm/ |
D | cache.c | 135 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 143 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 162 if (!alias && !mapping) in flush_dcache_page() 173 if (mapping) in flush_dcache_page()
|
/linux-4.4.14/Documentation/vm/ |
D | remap_file_pages.txt | 1 The remap_file_pages() system call is used to create a nonlinear mapping, 2 that is, a mapping in which the pages of the file are mapped into a 8 Supporting of nonlinear mapping requires significant amount of non-trivial 10 nonlinear mapping work kernel need a way to distinguish normal page table
|
D | highmem.txt | 31 The part of (physical) memory not covered by a permanent mapping is what we 67 (*) vmap(). This can be used to make a long duration mapping of multiple 71 (*) kmap(). This permits a short duration mapping of a single page. It needs 76 (*) kmap_atomic(). This permits a very short duration mapping of a single 77 page. Since the mapping is restricted to the CPU that issued it, it 97 struct page *page = find_get_page(mapping, offset); 130 If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping 147 pageframes need to live in the permanent mapping, which means:
|
/linux-4.4.14/arch/metag/include/asm/ |
D | cacheflush.h | 67 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 68 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 95 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 96 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/fs/ufs/ |
D | util.c | 243 struct page *ufs_get_locked_page(struct address_space *mapping, in ufs_get_locked_page() argument 248 page = find_lock_page(mapping, index); in ufs_get_locked_page() 250 page = read_mapping_page(mapping, index, NULL); in ufs_get_locked_page() 255 mapping->host->i_ino, index); in ufs_get_locked_page() 261 if (unlikely(page->mapping == NULL)) { in ufs_get_locked_page() 275 mapping->host->i_ino, index); in ufs_get_locked_page()
|
D | dir.c | 45 struct address_space *mapping = page->mapping; in ufs_commit_chunk() local 46 struct inode *dir = mapping->host; in ufs_commit_chunk() 50 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ufs_commit_chunk() 110 struct inode *dir = page->mapping->host; in ufs_check_page() 189 struct address_space *mapping = dir->i_mapping; in ufs_get_page() local 190 struct page *page = read_mapping_page(mapping, n, NULL); in ufs_get_page() 559 struct address_space *mapping = inode->i_mapping; in ufs_make_empty() local 560 struct page *page = grab_cache_page(mapping, 0); in ufs_make_empty()
|
/linux-4.4.14/arch/arm/include/asm/ |
D | device.h | 18 struct dma_iommu_mapping *mapping; member 32 #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
|
D | dma-iommu.h | 30 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); 33 struct dma_iommu_mapping *mapping);
|
D | cacheflush.h | 321 #define flush_dcache_mmap_lock(mapping) \ argument 322 spin_lock_irq(&(mapping)->tree_lock) 323 #define flush_dcache_mmap_unlock(mapping) \ argument 324 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.4.14/fs/ubifs/ |
D | file.c | 109 struct inode *inode = page->mapping->host; in do_readpage() 220 static int write_begin_slow(struct address_space *mapping, in write_begin_slow() argument 224 struct inode *inode = mapping->host; in write_begin_slow() 250 page = grab_cache_page_write_begin(mapping, index, flags); in write_begin_slow() 424 static int ubifs_write_begin(struct file *file, struct address_space *mapping, in ubifs_write_begin() argument 428 struct inode *inode = mapping->host; in ubifs_write_begin() 443 page = grab_cache_page_write_begin(mapping, index, flags); in ubifs_write_begin() 499 return write_begin_slow(mapping, pos, len, pagep, flags); in ubifs_write_begin() 539 static int ubifs_write_end(struct file *file, struct address_space *mapping, in ubifs_write_end() argument 543 struct inode *inode = mapping->host; in ubifs_write_end() [all …]
|
/linux-4.4.14/fs/hfs/ |
D | inode.c | 39 static void hfs_write_failed(struct address_space *mapping, loff_t to) in hfs_write_failed() argument 41 struct inode *inode = mapping->host; in hfs_write_failed() 49 static int hfs_write_begin(struct file *file, struct address_space *mapping, in hfs_write_begin() argument 56 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hfs_write_begin() 58 &HFS_I(mapping->host)->phys_size); in hfs_write_begin() 60 hfs_write_failed(mapping, pos + len); in hfs_write_begin() 65 static sector_t hfs_bmap(struct address_space *mapping, sector_t block) in hfs_bmap() argument 67 return generic_block_bmap(mapping, block, hfs_get_block); in hfs_bmap() 72 struct inode *inode = page->mapping->host; in hfs_releasepage() 131 struct address_space *mapping = file->f_mapping; in hfs_direct_IO() local [all …]
|
/linux-4.4.14/drivers/remoteproc/ |
D | remoteproc_core.c | 490 struct rproc_mem_entry *mapping; in rproc_handle_devmem() local 509 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in rproc_handle_devmem() 510 if (!mapping) in rproc_handle_devmem() 526 mapping->da = rsc->da; in rproc_handle_devmem() 527 mapping->len = rsc->len; in rproc_handle_devmem() 528 list_add_tail(&mapping->node, &rproc->mappings); in rproc_handle_devmem() 536 kfree(mapping); in rproc_handle_devmem() 563 struct rproc_mem_entry *carveout, *mapping; in rproc_handle_carveout() local 615 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in rproc_handle_carveout() 616 if (!mapping) { in rproc_handle_carveout() [all …]
|
/linux-4.4.14/arch/mips/pci/ |
D | pci-rc32434.c | 155 rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START); in rc32434_pcibridge_init() 163 rc32434_pci->pcilba[1].mapping = 0x60000000; in rc32434_pcibridge_init() 170 rc32434_pci->pcilba[2].mapping = 0x18FFFFFF; in rc32434_pcibridge_init() 179 rc32434_pci->pcilba[3].mapping = 0x18800000; in rc32434_pcibridge_init()
|
/linux-4.4.14/fs/cifs/ |
D | fscache.h | 78 struct address_space *mapping, in cifs_readpages_from_fscache() argument 83 return __cifs_readpages_from_fscache(inode, mapping, pages, in cifs_readpages_from_fscache() 132 struct address_space *mapping, in cifs_readpages_from_fscache() argument
|
D | file.c | 1835 struct address_space *mapping = page->mapping; in cifs_partialpagewrite() local 1843 if (!mapping || !mapping->host) in cifs_partialpagewrite() 1846 inode = page->mapping->host; in cifs_partialpagewrite() 1858 if (offset > mapping->host->i_size) { in cifs_partialpagewrite() 1864 if (mapping->host->i_size - offset < (loff_t)to) in cifs_partialpagewrite() 1865 to = (unsigned)(mapping->host->i_size - offset); in cifs_partialpagewrite() 1867 open_file = find_writable_file(CIFS_I(mapping->host), false); in cifs_partialpagewrite() 1888 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, in wdata_alloc_and_fillpages() argument 1910 nr_pages = find_get_pages_tag(mapping, index, in wdata_alloc_and_fillpages() 1923 struct address_space *mapping, in wdata_prepare_pages() argument [all …]
|
D | fscache.c | 131 struct inode *inode = page->mapping->host; in cifs_fscache_release_page() 185 struct address_space *mapping, in __cifs_readpages_from_fscache() argument 193 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, in __cifs_readpages_from_fscache() 197 mapping_gfp_mask(mapping)); in __cifs_readpages_from_fscache()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | b44.c | 633 rp->mapping, in b44_tx() 664 dma_addr_t mapping; in b44_alloc_rx_skb() local 677 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb() 683 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb() 684 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { in b44_alloc_rx_skb() 686 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_alloc_rx_skb() 687 dma_unmap_single(bp->sdev->dma_dev, mapping, in b44_alloc_rx_skb() 693 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb() 696 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb() 697 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { in b44_alloc_rx_skb() [all …]
|
/linux-4.4.14/fs/btrfs/ |
D | compression.c | 192 page->mapping = NULL; in end_compressed_bio_read() 288 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; in end_compressed_bio_write() 294 cb->compressed_pages[0]->mapping = NULL; in end_compressed_bio_write() 306 page->mapping = NULL; in end_compressed_bio_write() 374 page->mapping = inode->i_mapping; in btrfs_submit_compressed_write() 382 page->mapping = NULL; in btrfs_submit_compressed_write() 453 struct address_space *mapping = inode->i_mapping; in add_ra_bio_pages() local 476 page = radix_tree_lookup(&mapping->page_tree, pg_index); in add_ra_bio_pages() 485 page = __page_cache_alloc(mapping_gfp_constraint(mapping, in add_ra_bio_pages() 490 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { in add_ra_bio_pages() [all …]
|
D | extent_io.c | 93 if (!tree->mapping) in __btrfs_debug_check_extent_io_range() 96 inode = tree->mapping->host; in __btrfs_debug_check_extent_io_range() 157 if (!tree->mapping) in tree_fs_info() 159 return btrfs_sb(tree->mapping->host->i_sb); in tree_fs_info() 218 struct address_space *mapping) in extent_io_tree_init() argument 224 tree->mapping = mapping; in extent_io_tree_init() 369 tree->ops->merge_extent_hook(tree->mapping->host, new, in merge_cb() 421 tree->ops->set_bit_hook(tree->mapping->host, state, bits); in set_state_cb() 428 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); in clear_state_cb() 478 tree->ops->split_extent_hook(tree->mapping->host, orig, split); in split_cb() [all …]
|
D | compression.h | 25 int btrfs_compress_pages(int type, struct address_space *mapping, 57 struct address_space *mapping,
|
/linux-4.4.14/fs/ext2/ |
D | inode.c | 56 static void ext2_write_failed(struct address_space *mapping, loff_t to) in ext2_write_failed() argument 58 struct inode *inode = mapping->host; in ext2_write_failed() 797 ext2_readpages(struct file *file, struct address_space *mapping, in ext2_readpages() argument 800 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); in ext2_readpages() 804 ext2_write_begin(struct file *file, struct address_space *mapping, in ext2_write_begin() argument 810 ret = block_write_begin(mapping, pos, len, flags, pagep, in ext2_write_begin() 813 ext2_write_failed(mapping, pos + len); in ext2_write_begin() 817 static int ext2_write_end(struct file *file, struct address_space *mapping, in ext2_write_end() argument 823 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); in ext2_write_end() 825 ext2_write_failed(mapping, pos + len); in ext2_write_end() [all …]
|
/linux-4.4.14/drivers/net/ethernet/smsc/ |
D | smsc9420.c | 53 dma_addr_t mapping; member 557 BUG_ON(!pd->tx_buffers[i].mapping); in smsc9420_free_tx_ring() 558 pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, in smsc9420_free_tx_ring() 590 if (pd->rx_buffers[i].mapping) in smsc9420_free_rx_ring() 591 pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, in smsc9420_free_rx_ring() 811 pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, in smsc9420_rx_handoff() 813 pd->rx_buffers[index].mapping = 0; in smsc9420_rx_handoff() 836 dma_addr_t mapping; in smsc9420_alloc_rx_buffer() local 839 BUG_ON(pd->rx_buffers[index].mapping); in smsc9420_alloc_rx_buffer() 844 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), in smsc9420_alloc_rx_buffer() [all …]
|
/linux-4.4.14/arch/sh/mm/ |
D | cache-sh7705.c | 139 struct address_space *mapping = page_mapping(page); in sh7705_flush_dcache_page() local 141 if (mapping && !mapping_mapped(mapping)) in sh7705_flush_dcache_page()
|
/linux-4.4.14/include/asm-generic/ |
D | cacheflush.h | 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/cris/include/asm/ |
D | cacheflush.h | 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/nios2/include/asm/ |
D | cacheflush.h | 49 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 50 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/hexagon/include/asm/ |
D | cacheflush.h | 48 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 49 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/drivers/staging/comedi/drivers/ |
D | serial2002.c | 340 unsigned char *mapping, in serial2002_setup_subdevice() argument 373 if (mapping) in serial2002_setup_subdevice() 374 mapping[chan] = j; in serial2002_setup_subdevice() 479 unsigned char *mapping = NULL; in serial2002_setup_subdevs() local 488 mapping = devpriv->digital_in_mapping; in serial2002_setup_subdevs() 493 mapping = devpriv->digital_out_mapping; in serial2002_setup_subdevs() 498 mapping = devpriv->analog_in_mapping; in serial2002_setup_subdevs() 504 mapping = devpriv->analog_out_mapping; in serial2002_setup_subdevs() 510 mapping = devpriv->encoder_in_mapping; in serial2002_setup_subdevs() 516 if (serial2002_setup_subdevice(s, cfg, range, mapping, kind)) in serial2002_setup_subdevs()
|
/linux-4.4.14/arch/ia64/include/asm/ |
D | cacheflush.h | 34 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 35 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/c6x/include/asm/ |
D | cacheflush.h | 34 #define flush_dcache_mmap_lock(mapping) do {} while (0) argument 35 #define flush_dcache_mmap_unlock(mapping) do {} while (0) argument
|
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/fsl/ |
D | dma.txt | 12 - ranges : describes the mapping between the address space of the 16 - interrupt-parent : optional, if needed for interrupt mapping 28 - interrupt-parent : optional, if needed for interrupt mapping 82 - ranges : describes the mapping between the address space of the 91 - interrupt-parent : optional, if needed for interrupt mapping 142 - ranges : describes the mapping between the address space of the 149 - interrupt-parent : optional, if needed for interrupt mapping
|
/linux-4.4.14/arch/parisc/kernel/ |
D | cache.c | 288 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 294 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 301 if (!mapping) in flush_dcache_page() 311 flush_dcache_mmap_lock(mapping); in flush_dcache_page() 312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_dcache_page() 334 flush_dcache_mmap_unlock(mapping); in flush_dcache_page()
|
/linux-4.4.14/fs/fuse/ |
D | file.c | 702 struct inode *inode = page->mapping->host; in fuse_do_readpage() 749 struct inode *inode = page->mapping->host; in fuse_readpage() 768 struct address_space *mapping = NULL; in fuse_readpages_end() local 770 for (i = 0; mapping == NULL && i < req->num_pages; i++) in fuse_readpages_end() 771 mapping = req->pages[i]->mapping; in fuse_readpages_end() 773 if (mapping) { in fuse_readpages_end() 774 struct inode *inode = mapping->host; in fuse_readpages_end() 869 static int fuse_readpages(struct file *file, struct address_space *mapping, in fuse_readpages() argument 872 struct inode *inode = mapping->host; in fuse_readpages() 893 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); in fuse_readpages() [all …]
|
/linux-4.4.14/arch/score/include/asm/ |
D | cacheflush.h | 23 #define flush_dcache_mmap_lock(mapping) do {} while (0) argument 24 #define flush_dcache_mmap_unlock(mapping) do {} while (0) argument
|
/linux-4.4.14/fs/fat/ |
D | inode.c | 182 static int fat_writepages(struct address_space *mapping, in fat_writepages() argument 185 return mpage_writepages(mapping, wbc, fat_get_block); in fat_writepages() 193 static int fat_readpages(struct file *file, struct address_space *mapping, in fat_readpages() argument 196 return mpage_readpages(mapping, pages, nr_pages, fat_get_block); in fat_readpages() 199 static void fat_write_failed(struct address_space *mapping, loff_t to) in fat_write_failed() argument 201 struct inode *inode = mapping->host; in fat_write_failed() 209 static int fat_write_begin(struct file *file, struct address_space *mapping, in fat_write_begin() argument 216 err = cont_write_begin(file, mapping, pos, len, flags, in fat_write_begin() 218 &MSDOS_I(mapping->host)->mmu_private); in fat_write_begin() 220 fat_write_failed(mapping, pos + len); in fat_write_begin() [all …]
|
/linux-4.4.14/fs/fscache/ |
D | page.c | 287 struct address_space *mapping, in fscache_alloc_retrieval() argument 307 op->mapping = mapping; in fscache_alloc_retrieval() 439 op = fscache_alloc_retrieval(cookie, page->mapping, in __fscache_read_or_alloc_page() 539 struct address_space *mapping, in __fscache_read_or_alloc_pages() argument 570 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); in __fscache_read_or_alloc_pages() 681 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); in __fscache_alloc_page() 1133 op->mapping, page); in fscache_mark_page_cached() 1164 struct address_space *mapping = inode->i_mapping; in __fscache_uncache_all_inode_pages() local 1171 if (!mapping || mapping->nrpages == 0) { in __fscache_uncache_all_inode_pages() 1179 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) in __fscache_uncache_all_inode_pages()
|
/linux-4.4.14/arch/alpha/include/asm/ |
D | cacheflush.h | 14 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 15 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | cacheflush.h | 30 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 31 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/m68k/include/asm/ |
D | cacheflush_no.h | 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/fs/udf/ |
D | file.c | 43 struct inode *inode = page->mapping->host; in __udf_adinicb_readpage() 67 struct inode *inode = page->mapping->host; in udf_adinicb_writepage() 84 struct address_space *mapping, loff_t pos, in udf_adinicb_write_begin() argument 92 page = grab_cache_page_write_begin(mapping, 0, flags); in udf_adinicb_write_begin()
|
/linux-4.4.14/drivers/nvdimm/ |
D | region_devs.c | 31 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_release() 119 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_to_nstype() 146 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in size_show() 199 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_available_dpa() 398 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; in nd_region_notify_driver_action() 454 nd_mapping = &nd_region->mapping[n]; in mappingN() 462 static ssize_t mapping##idx##_show(struct device *dev, \ 467 static DEVICE_ATTR_RO(mapping##idx) 693 memcpy(nd_region->mapping, ndr_desc->nd_mapping, in nd_region_create()
|
/linux-4.4.14/drivers/perf/ |
D | arm_pmu.c | 61 int mapping; in armpmu_map_hw_event() local 66 mapping = (*event_map)[config]; in armpmu_map_hw_event() 67 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; in armpmu_map_hw_event() 396 int mapping; in __hw_perf_event_init() local 398 mapping = armpmu->map_event(event); in __hw_perf_event_init() 400 if (mapping < 0) { in __hw_perf_event_init() 403 return mapping; in __hw_perf_event_init() 431 hwc->config_base |= (unsigned long)mapping; in __hw_perf_event_init()
|
/linux-4.4.14/fs/qnx6/ |
D | inode.c | 101 static int qnx6_readpages(struct file *file, struct address_space *mapping, in qnx6_readpages() argument 104 return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); in qnx6_readpages() 186 struct address_space *mapping = root->i_mapping; in qnx6_checkroot() local 187 struct page *page = read_mapping_page(mapping, 0, NULL); in qnx6_checkroot() 493 static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) in qnx6_bmap() argument 495 return generic_block_bmap(mapping, block, qnx6_get_block); in qnx6_bmap() 525 struct address_space *mapping; in qnx6_iget() local 547 mapping = sbi->inodes->i_mapping; in qnx6_iget() 548 page = read_mapping_page(mapping, n, NULL); in qnx6_iget()
|
D | dir.c | 28 struct address_space *mapping = dir->i_mapping; in qnx6_get_page() local 29 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_get_page() 53 struct address_space *mapping = sbi->longfile->i_mapping; in qnx6_longname() local 54 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_longname()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/ |
D | bnxt.c | 171 dma_addr_t mapping; in bnxt_start_xmit() local 290 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE); in bnxt_start_xmit() 292 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) { in bnxt_start_xmit() 298 dma_unmap_addr_set(tx_buf, mapping, mapping); in bnxt_start_xmit() 302 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_start_xmit() 345 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len, in bnxt_start_xmit() 348 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) in bnxt_start_xmit() 352 dma_unmap_addr_set(tx_buf, mapping, mapping); in bnxt_start_xmit() 354 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_start_xmit() 401 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), in bnxt_start_xmit() [all …]
|
/linux-4.4.14/fs/reiserfs/ |
D | ioctl.c | 176 struct address_space *mapping; in reiserfs_unpack() local 207 mapping = inode->i_mapping; in reiserfs_unpack() 208 page = grab_cache_page(mapping, index); in reiserfs_unpack()
|
/linux-4.4.14/arch/sparc/include/asm/ |
D | cacheflush_32.h | 43 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 44 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
D | cacheflush_64.h | 71 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 72 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/Documentation/devicetree/bindings/net/ |
D | ipq806x-dwmac.txt | 12 - qcom,nss-common: should contain a phandle to a syscon device mapping the 15 - qcom,qsgmii-csr: should contain a phandle to a syscon device mapping the
|
/linux-4.4.14/arch/arc/include/asm/ |
D | cacheflush.h | 47 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 48 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.4.14/arch/frv/include/asm/ |
D | cacheflush.h | 28 #define flush_dcache_mmap_lock(mapping) do {} while(0) argument 29 #define flush_dcache_mmap_unlock(mapping) do {} while(0) argument
|
/linux-4.4.14/fs/squashfs/ |
D | file_direct.c | 30 struct inode *inode = target_page->mapping->host; in squashfs_readpage_block() 62 grab_cache_page_nowait(target_page->mapping, n); in squashfs_readpage_block() 143 struct inode *i = target_page->mapping->host; in squashfs_read_cache()
|
/linux-4.4.14/arch/score/mm/ |
D | cache.c | 57 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 62 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page()
|
/linux-4.4.14/Documentation/devicetree/bindings/mips/cavium/ |
D | uctl.txt | 14 - ranges: Empty to signify direct mapping of the children. 26 ranges; /* Direct mapping */
|
/linux-4.4.14/drivers/net/ethernet/adaptec/ |
D | starfire.c | 522 dma_addr_t mapping; member 526 dma_addr_t mapping; member 1155 …np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); in init_ring() 1157 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); in init_ring() 1166 np->rx_info[i].mapping = 0; in init_ring() 1227 np->tx_info[entry].mapping = in start_tx() 1232 np->tx_info[entry].mapping = in start_tx() 1239 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); in start_tx() 1341 np->tx_info[entry].mapping, in intr_handler() 1344 np->tx_info[entry].mapping = 0; in intr_handler() [all …]
|