/linux-4.1.27/mm/ |
D | truncate.c | 26 static void clear_exceptional_entry(struct address_space *mapping, in clear_exceptional_entry() argument 33 if (shmem_mapping(mapping)) in clear_exceptional_entry() 36 spin_lock_irq(&mapping->tree_lock); in clear_exceptional_entry() 42 if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) in clear_exceptional_entry() 47 mapping->nrshadows--; in clear_exceptional_entry() 61 __radix_tree_delete_node(&mapping->page_tree, node); in clear_exceptional_entry() 63 spin_unlock_irq(&mapping->tree_lock); in clear_exceptional_entry() 86 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage() 106 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 108 if (page->mapping != mapping) in truncate_complete_page() [all …]
|
D | filemap.c | 111 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument 122 __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); in page_cache_tree_delete() 125 mapping->nrshadows++; in page_cache_tree_delete() 134 mapping->nrpages--; in page_cache_tree_delete() 138 mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; in page_cache_tree_delete() 148 radix_tree_tag_clear(&mapping->page_tree, index, tag); in page_cache_tree_delete() 157 if (__radix_tree_delete_node(&mapping->page_tree, node)) in page_cache_tree_delete() 169 node->private_data = mapping; in page_cache_tree_delete() 181 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local 192 cleancache_invalidate_page(mapping, page); in __delete_from_page_cache() [all …]
|
D | readahead.c | 28 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) in file_ra_state_init() argument 30 ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; in file_ra_state_init() 44 static void read_cache_pages_invalidate_page(struct address_space *mapping, in read_cache_pages_invalidate_page() argument 50 page->mapping = mapping; in read_cache_pages_invalidate_page() 52 page->mapping = NULL; in read_cache_pages_invalidate_page() 61 static void read_cache_pages_invalidate_pages(struct address_space *mapping, in read_cache_pages_invalidate_pages() argument 69 read_cache_pages_invalidate_page(mapping, victim); in read_cache_pages_invalidate_pages() 83 int read_cache_pages(struct address_space *mapping, struct list_head *pages, in read_cache_pages() argument 92 if (add_to_page_cache_lru(page, mapping, in read_cache_pages() 94 read_cache_pages_invalidate_page(mapping, page); in read_cache_pages() [all …]
|
D | page-writeback.c | 1341 static void balance_dirty_pages(struct address_space *mapping, in balance_dirty_pages() argument 1357 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in balance_dirty_pages() 1578 void balance_dirty_pages_ratelimited(struct address_space *mapping) in balance_dirty_pages_ratelimited() argument 1580 struct backing_dev_info *bdi = inode_to_bdi(mapping->host); in balance_dirty_pages_ratelimited() 1620 balance_dirty_pages(mapping, current->nr_dirtied); in balance_dirty_pages_ratelimited() 1793 void tag_pages_for_writeback(struct address_space *mapping, in tag_pages_for_writeback() argument 1800 spin_lock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 1801 tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree, in tag_pages_for_writeback() 1804 spin_unlock_irq(&mapping->tree_lock); in tag_pages_for_writeback() 1834 int write_cache_pages(struct address_space *mapping, in write_cache_pages() argument [all …]
|
D | fadvise.c | 32 struct address_space *mapping; in SYSCALL_DEFINE4() local 49 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 50 if (!mapping || len < 0) { in SYSCALL_DEFINE4() 78 bdi = inode_to_bdi(mapping->host); in SYSCALL_DEFINE4() 112 force_page_cache_readahead(mapping, f.file, start_index, in SYSCALL_DEFINE4() 119 __filemap_fdatawrite_range(mapping, offset, endbyte, in SYSCALL_DEFINE4() 131 unsigned long count = invalidate_mapping_pages(mapping, in SYSCALL_DEFINE4() 142 invalidate_mapping_pages(mapping, start_index, in SYSCALL_DEFINE4()
|
D | cleancache.c | 189 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_get_page() 193 if (cleancache_get_key(page->mapping->host, &key) < 0) in __cleancache_get_page() 227 pool_id = page->mapping->host->i_sb->cleancache_poolid; in __cleancache_put_page() 229 cleancache_get_key(page->mapping->host, &key) >= 0) { in __cleancache_put_page() 244 void __cleancache_invalidate_page(struct address_space *mapping, in __cleancache_invalidate_page() argument 248 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_page() 256 if (cleancache_get_key(mapping->host, &key) >= 0) { in __cleancache_invalidate_page() 274 void __cleancache_invalidate_inode(struct address_space *mapping) in __cleancache_invalidate_inode() argument 276 int pool_id = mapping->host->i_sb->cleancache_poolid; in __cleancache_invalidate_inode() 282 if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) in __cleancache_invalidate_inode()
|
D | util.c | 330 unsigned long mapping; in __page_rmapping() local 332 mapping = (unsigned long)page->mapping; in __page_rmapping() 333 mapping &= ~PAGE_MAPPING_FLAGS; in __page_rmapping() 335 return (void *)mapping; in __page_rmapping() 347 unsigned long mapping; in page_anon_vma() local 350 mapping = (unsigned long)page->mapping; in page_anon_vma() 351 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) in page_anon_vma() 358 unsigned long mapping; in page_mapping() local 371 mapping = (unsigned long)page->mapping; in page_mapping() 372 if (mapping & PAGE_MAPPING_FLAGS) in page_mapping() [all …]
|
D | migrate.c | 308 int migrate_page_move_mapping(struct address_space *mapping, in migrate_page_move_mapping() argument 316 if (!mapping) { in migrate_page_move_mapping() 323 spin_lock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 325 pslot = radix_tree_lookup_slot(&mapping->page_tree, in migrate_page_move_mapping() 330 radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) { in migrate_page_move_mapping() 331 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 336 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 350 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 388 spin_unlock_irq(&mapping->tree_lock); in migrate_page_move_mapping() 397 int migrate_huge_page_move_mapping(struct address_space *mapping, in migrate_huge_page_move_mapping() argument [all …]
|
D | workingset.c | 213 void *workingset_eviction(struct address_space *mapping, struct page *page) in workingset_eviction() argument 309 struct address_space *mapping; in shadow_lru_isolate() local 327 mapping = node->private_data; in shadow_lru_isolate() 330 if (!spin_trylock(&mapping->tree_lock)) { in shadow_lru_isolate() 354 BUG_ON(!mapping->nrshadows); in shadow_lru_isolate() 355 mapping->nrshadows--; in shadow_lru_isolate() 360 if (!__radix_tree_delete_node(&mapping->page_tree, node)) in shadow_lru_isolate() 363 spin_unlock(&mapping->tree_lock); in shadow_lru_isolate()
|
D | shmem.c | 253 static int shmem_radix_tree_replace(struct address_space *mapping, in shmem_radix_tree_replace() argument 261 pslot = radix_tree_lookup_slot(&mapping->page_tree, index); in shmem_radix_tree_replace() 264 item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock); in shmem_radix_tree_replace() 278 static bool shmem_confirm_swap(struct address_space *mapping, in shmem_confirm_swap() argument 284 item = radix_tree_lookup(&mapping->page_tree, index); in shmem_confirm_swap() 293 struct address_space *mapping, in shmem_add_to_page_cache() argument 302 page->mapping = mapping; in shmem_add_to_page_cache() 305 spin_lock_irq(&mapping->tree_lock); in shmem_add_to_page_cache() 307 error = radix_tree_insert(&mapping->page_tree, index, page); in shmem_add_to_page_cache() 309 error = shmem_radix_tree_replace(mapping, index, expected, in shmem_add_to_page_cache() [all …]
|
D | memory-failure.c | 81 struct address_space *mapping; in hwpoison_filter_dev() local 94 mapping = page_mapping(p); in hwpoison_filter_dev() 95 if (mapping == NULL || mapping->host == NULL) in hwpoison_filter_dev() 98 dev = mapping->host->i_sb->s_dev; in hwpoison_filter_dev() 455 struct address_space *mapping = page->mapping; in collect_procs_file() local 457 i_mmap_lock_read(mapping); in collect_procs_file() 465 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, in collect_procs_file() 479 i_mmap_unlock_read(mapping); in collect_procs_file() 493 if (!page->mapping) in collect_procs() 620 struct address_space *mapping; in me_pagecache_clean() local [all …]
|
D | page_io.c | 140 struct address_space *mapping = swap_file->f_mapping; in generic_swapfile_activate() local 141 struct inode *inode = mapping->host; in generic_swapfile_activate() 266 struct address_space *mapping = swap_file->f_mapping; in __swap_writepage() local 280 ret = mapping->a_ops->direct_IO(&kiocb, &from, kiocb.ki_pos); in __swap_writepage() 344 struct address_space *mapping = swap_file->f_mapping; in swap_readpage() local 346 ret = mapping->a_ops->readpage(swap_file, page); in swap_readpage() 376 struct address_space *mapping = sis->swap_file->f_mapping; in swap_set_page_dirty() local 377 return mapping->a_ops->set_page_dirty(page); in swap_set_page_dirty()
|
D | vmscan.c | 479 static void handle_write_error(struct address_space *mapping, in handle_write_error() argument 483 if (page_mapping(page) == mapping) in handle_write_error() 484 mapping_set_error(mapping, error); in handle_write_error() 504 static pageout_t pageout(struct page *page, struct address_space *mapping, in pageout() argument 525 if (!mapping) { in pageout() 539 if (mapping->a_ops->writepage == NULL) in pageout() 541 if (!may_write_to_queue(inode_to_bdi(mapping->host), sc)) in pageout() 555 res = mapping->a_ops->writepage(page, &wbc); in pageout() 557 handle_write_error(mapping, page, res); in pageout() 579 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument [all …]
|
D | mincore.c | 49 static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) in mincore_page() argument 61 if (shmem_mapping(mapping)) { in mincore_page() 62 page = find_get_entry(mapping, pgoff); in mincore_page() 72 page = find_get_page(mapping, pgoff); in mincore_page() 74 page = find_get_page(mapping, pgoff); in mincore_page()
|
D | rmap.c | 459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma() 503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read() 600 } else if (page->mapping) { in page_address_in_vma() 601 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma() 910 struct address_space *mapping; in page_mkclean() local 922 mapping = page_mapping(page); in page_mkclean() 923 if (!mapping) in page_mkclean() 953 page->mapping = (struct address_space *) anon_vma; in page_move_anon_rmap() 982 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap() 1509 struct address_space *mapping = page->mapping; in rmap_walk_file() local [all …]
|
D | mmap.c | 238 struct file *file, struct address_space *mapping) in __remove_shared_vm_struct() argument 243 mapping_unmap_writable(mapping); in __remove_shared_vm_struct() 245 flush_dcache_mmap_lock(mapping); in __remove_shared_vm_struct() 246 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct() 247 flush_dcache_mmap_unlock(mapping); in __remove_shared_vm_struct() 259 struct address_space *mapping = file->f_mapping; in unlink_file_vma() local 260 i_mmap_lock_write(mapping); in unlink_file_vma() 261 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma() 262 i_mmap_unlock_write(mapping); in unlink_file_vma() 645 struct address_space *mapping = file->f_mapping; in __vma_link_file() local [all …]
|
D | hugetlb.c | 513 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map() local 514 struct inode *inode = mapping->host; in vma_resv_map() 970 page->mapping = NULL; in free_huge_page() 2877 struct address_space *mapping; in unmap_ref_private() local 2887 mapping = file_inode(vma->vm_file)->i_mapping; in unmap_ref_private() 2894 i_mmap_lock_write(mapping); in unmap_ref_private() 2895 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { in unmap_ref_private() 2919 i_mmap_unlock_write(mapping); in unmap_ref_private() 3053 struct address_space *mapping; in hugetlbfs_pagecache_page() local 3056 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page() [all …]
|
/linux-4.1.27/include/linux/ |
D | pagemap.h | 30 static inline void mapping_set_error(struct address_space *mapping, int error) in mapping_set_error() argument 34 set_bit(AS_ENOSPC, &mapping->flags); in mapping_set_error() 36 set_bit(AS_EIO, &mapping->flags); in mapping_set_error() 40 static inline void mapping_set_unevictable(struct address_space *mapping) in mapping_set_unevictable() argument 42 set_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_set_unevictable() 45 static inline void mapping_clear_unevictable(struct address_space *mapping) in mapping_clear_unevictable() argument 47 clear_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_clear_unevictable() 50 static inline int mapping_unevictable(struct address_space *mapping) in mapping_unevictable() argument 52 if (mapping) in mapping_unevictable() 53 return test_bit(AS_UNEVICTABLE, &mapping->flags); in mapping_unevictable() [all …]
|
D | io-mapping.h | 76 io_mapping_free(struct io_mapping *mapping) in io_mapping_free() argument 78 iomap_free(mapping->base, mapping->size); in io_mapping_free() 79 kfree(mapping); in io_mapping_free() 84 io_mapping_map_atomic_wc(struct io_mapping *mapping, in io_mapping_map_atomic_wc() argument 90 BUG_ON(offset >= mapping->size); in io_mapping_map_atomic_wc() 91 phys_addr = mapping->base + offset; in io_mapping_map_atomic_wc() 93 return iomap_atomic_prot_pfn(pfn, mapping->prot); in io_mapping_map_atomic_wc() 103 io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) in io_mapping_map_wc() argument 107 BUG_ON(offset >= mapping->size); in io_mapping_map_wc() 108 phys_addr = mapping->base + offset; in io_mapping_map_wc() [all …]
|
D | cleancache.h | 53 return page->mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled() 55 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) in cleancache_fs_enabled_mapping() argument 57 return mapping->host->i_sb->cleancache_poolid >= 0; in cleancache_fs_enabled_mapping() 105 static inline void cleancache_invalidate_page(struct address_space *mapping, in cleancache_invalidate_page() argument 109 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_page() 110 __cleancache_invalidate_page(mapping, page); in cleancache_invalidate_page() 113 static inline void cleancache_invalidate_inode(struct address_space *mapping) in cleancache_invalidate_inode() argument 115 if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) in cleancache_invalidate_inode() 116 __cleancache_invalidate_inode(mapping); in cleancache_invalidate_inode()
|
D | shmem_fs.h | 56 extern bool shmem_mapping(struct address_space *mapping); 57 extern void shmem_unlock_mapping(struct address_space *mapping); 58 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, 64 struct address_space *mapping, pgoff_t index) in shmem_read_mapping_page() argument 66 return shmem_read_mapping_page_gfp(mapping, index, in shmem_read_mapping_page() 67 mapping_gfp_mask(mapping)); in shmem_read_mapping_page()
|
D | writeback.h | 170 void balance_dirty_pages_ratelimited(struct address_space *mapping); 175 int generic_writepages(struct address_space *mapping, 177 void tag_pages_for_writeback(struct address_space *mapping, 179 int write_cache_pages(struct address_space *mapping, 182 int do_writepages(struct address_space *mapping, struct writeback_control *wbc); 184 void tag_pages_for_writeback(struct address_space *mapping,
|
D | pagevec.h | 26 struct address_space *mapping, 30 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 33 struct address_space *mapping, pgoff_t *index, int tag,
|
D | mpage.h | 16 int mpage_readpages(struct address_space *mapping, struct list_head *pages, 19 int mpage_writepages(struct address_space *mapping,
|
D | migrate.h | 40 extern int migrate_huge_page_move_mapping(struct address_space *mapping, 42 extern int migrate_page_move_mapping(struct address_space *mapping, 60 static inline int migrate_huge_page_move_mapping(struct address_space *mapping, in migrate_huge_page_move_mapping() argument
|
D | fs.h | 377 int (*readpages)(struct file *filp, struct address_space *mapping, 380 int (*write_begin)(struct file *, struct address_space *mapping, 383 int (*write_end)(struct file *, struct address_space *mapping, 417 int pagecache_write_begin(struct file *, struct address_space *mapping, 421 int pagecache_write_end(struct file *, struct address_space *mapping, 494 int mapping_tagged(struct address_space *mapping, int tag); 496 static inline void i_mmap_lock_write(struct address_space *mapping) in i_mmap_lock_write() argument 498 down_write(&mapping->i_mmap_rwsem); in i_mmap_lock_write() 501 static inline void i_mmap_unlock_write(struct address_space *mapping) in i_mmap_unlock_write() argument 503 up_write(&mapping->i_mmap_rwsem); in i_mmap_unlock_write() [all …]
|
D | backing-dev.h | 305 static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) in mapping_cap_writeback_dirty() argument 307 return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host)); in mapping_cap_writeback_dirty() 310 static inline bool mapping_cap_account_dirty(struct address_space *mapping) in mapping_cap_account_dirty() argument 312 return bdi_cap_account_dirty(inode_to_bdi(mapping->host)); in mapping_cap_account_dirty()
|
/linux-4.1.27/drivers/gpu/drm/exynos/ |
D | exynos_drm_iommu.c | 31 struct dma_iommu_mapping *mapping = NULL; in drm_create_iommu_mapping() local 40 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start, in drm_create_iommu_mapping() 43 if (IS_ERR(mapping)) in drm_create_iommu_mapping() 44 return PTR_ERR(mapping); in drm_create_iommu_mapping() 52 dev->archdata.mapping = mapping; in drm_create_iommu_mapping() 56 arm_iommu_release_mapping(mapping); in drm_create_iommu_mapping() 72 arm_iommu_release_mapping(dev->archdata.mapping); in drm_release_iommu_mapping() 90 if (!dev->archdata.mapping) { in drm_iommu_attach_device() 103 ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping); in drm_iommu_attach_device() 136 struct dma_iommu_mapping *mapping = dev->archdata.mapping; in drm_iommu_detach_device() local [all …]
|
/linux-4.1.27/drivers/media/usb/uvc/ |
D | uvc_ctrl.c | 368 static __s32 uvc_ctrl_get_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_get_zoom() argument 386 static void uvc_ctrl_set_zoom(struct uvc_control_mapping *mapping, in uvc_ctrl_set_zoom() argument 393 static __s32 uvc_ctrl_get_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_get_rel_speed() argument 396 unsigned int first = mapping->offset / 8; in uvc_ctrl_get_rel_speed() 413 static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping, in uvc_ctrl_set_rel_speed() argument 416 unsigned int first = mapping->offset / 8; in uvc_ctrl_set_rel_speed() 767 static __s32 uvc_get_le_value(struct uvc_control_mapping *mapping, in uvc_get_le_value() argument 770 int bits = mapping->size; in uvc_get_le_value() 771 int offset = mapping->offset; in uvc_get_le_value() 788 if (mapping->data_type == UVC_CTRL_DATA_TYPE_SIGNED) in uvc_get_le_value() [all …]
|
/linux-4.1.27/arch/arm/mm/ |
D | dma-mapping.c | 1020 static int extend_iommu_mapping(struct dma_iommu_mapping *mapping); 1022 static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, in __alloc_iova() argument 1028 size_t mapping_size = mapping->bits << PAGE_SHIFT; in __alloc_iova() 1039 spin_lock_irqsave(&mapping->lock, flags); in __alloc_iova() 1040 for (i = 0; i < mapping->nr_bitmaps; i++) { in __alloc_iova() 1041 start = bitmap_find_next_zero_area(mapping->bitmaps[i], in __alloc_iova() 1042 mapping->bits, 0, count, align); in __alloc_iova() 1044 if (start > mapping->bits) in __alloc_iova() 1047 bitmap_set(mapping->bitmaps[i], start, count); in __alloc_iova() 1056 if (i == mapping->nr_bitmaps) { in __alloc_iova() [all …]
|
D | flush.c | 187 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 221 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page() 226 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument 240 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases() 241 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases() 254 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases() 262 struct address_space *mapping; in __sync_icache_dcache() local 273 mapping = page_mapping(page); in __sync_icache_dcache() 275 mapping = NULL; in __sync_icache_dcache() 278 __flush_dcache_page(mapping, page); in __sync_icache_dcache() [all …]
|
D | fault-armv.c | 132 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, in make_coherent() argument 148 flush_dcache_mmap_lock(mapping); in make_coherent() 149 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in make_coherent() 162 flush_dcache_mmap_unlock(mapping); in make_coherent() 184 struct address_space *mapping; in update_mmu_cache() local 198 mapping = page_mapping(page); in update_mmu_cache() 200 __flush_dcache_page(mapping, page); in update_mmu_cache() 201 if (mapping) { in update_mmu_cache() 203 make_coherent(mapping, vma, addr, ptep, pfn); in update_mmu_cache()
|
/linux-4.1.27/drivers/net/wireless/mwifiex/ |
D | util.h | 69 struct mwifiex_dma_mapping *mapping) in mwifiex_store_mapping() argument 73 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping)); in mwifiex_store_mapping() 77 struct mwifiex_dma_mapping *mapping) in mwifiex_get_mapping() argument 81 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping)); in mwifiex_get_mapping() 86 struct mwifiex_dma_mapping mapping; in MWIFIEX_SKB_DMA_ADDR() local 88 mwifiex_get_mapping(skb, &mapping); in MWIFIEX_SKB_DMA_ADDR() 90 return mapping.addr; in MWIFIEX_SKB_DMA_ADDR()
|
/linux-4.1.27/fs/gfs2/ |
D | aops.c | 100 struct inode *inode = page->mapping->host; in gfs2_writepage_common() 114 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in gfs2_writepage_common() 156 struct inode *inode = page->mapping->host; in __gfs2_jdata_writepage() 181 struct inode *inode = page->mapping->host; in gfs2_jdata_writepage() 214 static int gfs2_writepages(struct address_space *mapping, in gfs2_writepages() argument 217 return mpage_writepages(mapping, wbc, gfs2_get_block_noalloc); in gfs2_writepages() 231 static int gfs2_write_jdata_pagevec(struct address_space *mapping, in gfs2_write_jdata_pagevec() argument 237 struct inode *inode = mapping->host; in gfs2_write_jdata_pagevec() 270 if (unlikely(page->mapping != mapping)) { in gfs2_write_jdata_pagevec() 344 static int gfs2_write_cache_jdata(struct address_space *mapping, in gfs2_write_cache_jdata() argument [all …]
|
D | meta_io.h | 43 static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping) in gfs2_mapping2sbd() argument 45 struct inode *inode = mapping->host; in gfs2_mapping2sbd() 46 if (mapping->a_ops == &gfs2_meta_aops) in gfs2_mapping2sbd() 47 return (((struct gfs2_glock *)mapping) - 1)->gl_sbd; in gfs2_mapping2sbd() 48 else if (mapping->a_ops == &gfs2_rgrp_aops) in gfs2_mapping2sbd() 49 return container_of(mapping, struct gfs2_sbd, sd_aspace); in gfs2_mapping2sbd()
|
D | meta_io.c | 116 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf() local 124 if (mapping == NULL) in gfs2_getbuf() 125 mapping = &sdp->sd_aspace; in gfs2_getbuf() 133 page = grab_cache_page(mapping, index); in gfs2_getbuf() 139 page = find_get_page_flags(mapping, index, in gfs2_getbuf() 266 struct address_space *mapping = bh->b_page->mapping; in gfs2_remove_from_journal() local 267 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); in gfs2_remove_from_journal()
|
D | glops.c | 37 bh->b_page->mapping, bh->b_page->flags); in gfs2_ail_error() 143 struct address_space *mapping = &sdp->sd_aspace; in rgrp_go_sync() local 152 filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync() 153 error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_sync() 154 mapping_set_error(mapping, error); in rgrp_go_sync() 177 struct address_space *mapping = &sdp->sd_aspace; in rgrp_go_inval() local 181 truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end); in rgrp_go_inval() 216 struct address_space *mapping = ip->i_inode.i_mapping; in inode_go_sync() local 217 filemap_fdatawrite(mapping); in inode_go_sync() 218 error = filemap_fdatawait(mapping); in inode_go_sync() [all …]
|
/linux-4.1.27/arch/nios2/mm/ |
D | cacheflush.c | 89 static void flush_aliases(struct address_space *mapping, struct page *page) in flush_aliases() argument 97 flush_dcache_mmap_lock(mapping); in flush_aliases() 98 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_aliases() 110 flush_dcache_mmap_unlock(mapping); in flush_aliases() 176 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 190 struct address_space *mapping; in flush_dcache_page() local 199 mapping = page_mapping(page); in flush_dcache_page() 202 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 205 __flush_dcache_page(mapping, page); in flush_dcache_page() 206 if (mapping) { in flush_dcache_page() [all …]
|
/linux-4.1.27/fs/hpfs/ |
D | file.c | 122 static int hpfs_readpages(struct file *file, struct address_space *mapping, in hpfs_readpages() argument 125 return mpage_readpages(mapping, pages, nr_pages, hpfs_get_block); in hpfs_readpages() 128 static int hpfs_writepages(struct address_space *mapping, in hpfs_writepages() argument 131 return mpage_writepages(mapping, wbc, hpfs_get_block); in hpfs_writepages() 134 static void hpfs_write_failed(struct address_space *mapping, loff_t to) in hpfs_write_failed() argument 136 struct inode *inode = mapping->host; in hpfs_write_failed() 148 static int hpfs_write_begin(struct file *file, struct address_space *mapping, in hpfs_write_begin() argument 155 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hpfs_write_begin() 157 &hpfs_i(mapping->host)->mmu_private); in hpfs_write_begin() 159 hpfs_write_failed(mapping, pos + len); in hpfs_write_begin() [all …]
|
/linux-4.1.27/arch/unicore32/mm/ |
D | flush.c | 61 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument 77 struct address_space *mapping; in flush_dcache_page() local 86 mapping = page_mapping(page); in flush_dcache_page() 88 if (mapping && !mapping_mapped(mapping)) in flush_dcache_page() 91 __flush_dcache_page(mapping, page); in flush_dcache_page() 92 if (mapping) in flush_dcache_page()
|
/linux-4.1.27/drivers/sh/clk/ |
D | core.c | 339 struct clk_mapping *mapping = clk->mapping; in clk_establish_mapping() local 344 if (!mapping) { in clk_establish_mapping() 351 clk->mapping = &dummy_mapping; in clk_establish_mapping() 360 mapping = clkp->mapping; in clk_establish_mapping() 361 BUG_ON(!mapping); in clk_establish_mapping() 367 if (!mapping->base && mapping->phys) { in clk_establish_mapping() 368 kref_init(&mapping->ref); in clk_establish_mapping() 370 mapping->base = ioremap_nocache(mapping->phys, mapping->len); in clk_establish_mapping() 371 if (unlikely(!mapping->base)) in clk_establish_mapping() 373 } else if (mapping->base) { in clk_establish_mapping() [all …]
|
D | cpg.c | 414 value = __raw_readl(clk->mapping->base); in fsidiv_recalc() 430 __raw_writel(0, clk->mapping->base); in fsidiv_disable() 437 value = __raw_readl(clk->mapping->base) >> 16; in fsidiv_enable() 441 __raw_writel((value << 16) | 0x3, clk->mapping->base); in fsidiv_enable() 452 __raw_writel(0, clk->mapping->base); in fsidiv_set_rate() 454 __raw_writel(idx << 16, clk->mapping->base); in fsidiv_set_rate() 486 clks[i].mapping = map; in sh_clk_fsidiv_register()
|
/linux-4.1.27/fs/9p/ |
D | vfs_addr.c | 54 struct inode *inode = page->mapping->host; in v9fs_fid_readpage() 111 static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, in v9fs_vfs_readpages() argument 117 inode = mapping->host; in v9fs_vfs_readpages() 120 ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); in v9fs_vfs_readpages() 124 ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); in v9fs_vfs_readpages() 162 struct inode *inode = page->mapping->host; in v9fs_vfs_writepage_locked() 203 mapping_set_error(page->mapping, retval); in v9fs_vfs_writepage() 220 struct inode *inode = page->mapping->host; in v9fs_launder_page() 267 static int v9fs_write_begin(struct file *filp, struct address_space *mapping, in v9fs_write_begin() argument 275 struct inode *inode = mapping->host; in v9fs_write_begin() [all …]
|
D | cache.h | 49 struct address_space *mapping, 74 struct address_space *mapping, in v9fs_readpages_from_fscache() argument 78 return __v9fs_readpages_from_fscache(inode, mapping, pages, in v9fs_readpages_from_fscache() 130 struct address_space *mapping, in v9fs_readpages_from_fscache() argument
|
D | cache.c | 282 struct inode *inode = page->mapping->host; in __v9fs_fscache_release_page() 292 struct inode *inode = page->mapping->host; in __v9fs_fscache_invalidate_page() 356 struct address_space *mapping, in __v9fs_readpages_from_fscache() argument 368 mapping, pages, nr_pages, in __v9fs_readpages_from_fscache() 371 mapping_gfp_mask(mapping)); in __v9fs_readpages_from_fscache()
|
/linux-4.1.27/fs/ |
D | buffer.c | 354 set_bit(AS_EIO, &page->mapping->flags); in end_buffer_async_write() 570 int sync_mapping_buffers(struct address_space *mapping) in sync_mapping_buffers() argument 572 struct address_space *buffer_mapping = mapping->private_data; in sync_mapping_buffers() 574 if (buffer_mapping == NULL || list_empty(&mapping->private_list)) in sync_mapping_buffers() 578 &mapping->private_list); in sync_mapping_buffers() 601 struct address_space *mapping = inode->i_mapping; in mark_buffer_dirty_inode() local 602 struct address_space *buffer_mapping = bh->b_page->mapping; in mark_buffer_dirty_inode() 605 if (!mapping->private_data) { in mark_buffer_dirty_inode() 606 mapping->private_data = buffer_mapping; in mark_buffer_dirty_inode() 608 BUG_ON(mapping->private_data != buffer_mapping); in mark_buffer_dirty_inode() [all …]
|
D | dax.c | 202 struct address_space *mapping = inode->i_mapping; in dax_do_io() local 204 retval = filemap_write_and_wait_range(mapping, pos, end - 1); in dax_do_io() 236 static int dax_load_hole(struct address_space *mapping, struct page *page, in dax_load_hole() argument 240 struct inode *inode = mapping->host; in dax_load_hole() 242 page = find_or_create_page(mapping, vmf->pgoff, in dax_load_hole() 273 struct address_space *mapping = inode->i_mapping; in dax_insert_mapping() local 281 i_mmap_lock_read(mapping); in dax_insert_mapping() 310 i_mmap_unlock_read(mapping); in dax_insert_mapping() 319 struct address_space *mapping = file->f_mapping; in do_dax_fault() local 320 struct inode *inode = mapping->host; in do_dax_fault() [all …]
|
D | mpage.c | 100 struct inode *inode = page->mapping->host; in map_buffer_to_page() 144 struct inode *inode = page->mapping->host; in do_mpage_readpage() 356 mpage_readpages(struct address_space *mapping, struct list_head *pages, in mpage_readpages() argument 372 if (!add_to_page_cache_lru(page, mapping, in mpage_readpages() 467 struct address_space *mapping = page->mapping; in __mpage_writepage() local 468 struct inode *inode = page->mapping->host; in __mpage_writepage() 642 ret = mapping->a_ops->writepage(page, wbc); in __mpage_writepage() 650 mapping_set_error(mapping, ret); in __mpage_writepage() 676 mpage_writepages(struct address_space *mapping, in mpage_writepages() argument 685 ret = generic_writepages(mapping, wbc); in mpage_writepages() [all …]
|
D | sync.c | 282 struct address_space *mapping; in SYSCALL_DEFINE4() local 332 mapping = f.file->f_mapping; in SYSCALL_DEFINE4() 333 if (!mapping) { in SYSCALL_DEFINE4() 340 ret = filemap_fdatawait_range(mapping, offset, endbyte); in SYSCALL_DEFINE4() 346 ret = filemap_fdatawrite_range(mapping, offset, endbyte); in SYSCALL_DEFINE4() 352 ret = filemap_fdatawait_range(mapping, offset, endbyte); in SYSCALL_DEFINE4()
|
D | inode.c | 135 struct address_space *const mapping = &inode->i_data; in inode_init_always() local 168 mapping->a_ops = &empty_aops; in inode_init_always() 169 mapping->host = inode; in inode_init_always() 170 mapping->flags = 0; in inode_init_always() 171 atomic_set(&mapping->i_mmap_writable, 0); in inode_init_always() 172 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); in inode_init_always() 173 mapping->private_data = NULL; in inode_init_always() 174 mapping->writeback_index = 0; in inode_init_always() 176 inode->i_mapping = mapping; in inode_init_always() 338 void address_space_init_once(struct address_space *mapping) in address_space_init_once() argument [all …]
|
D | block_dev.c | 65 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev() local 67 if (mapping->nrpages == 0 && mapping->nrshadows == 0) in kill_bdev() 71 truncate_inode_pages(mapping, 0); in kill_bdev() 78 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev() local 80 if (mapping->nrpages == 0) in invalidate_bdev() 85 invalidate_mapping_pages(mapping, 0, -1); in invalidate_bdev() 89 cleancache_invalidate_inode(mapping); in invalidate_bdev() 293 static int blkdev_readpages(struct file *file, struct address_space *mapping, in blkdev_readpages() argument 296 return mpage_readpages(mapping, pages, nr_pages, blkdev_get_block); in blkdev_readpages() 299 static int blkdev_write_begin(struct file *file, struct address_space *mapping, in blkdev_write_begin() argument [all …]
|
D | splice.c | 47 struct address_space *mapping; in page_cache_pipe_buf_steal() local 51 mapping = page_mapping(page); in page_cache_pipe_buf_steal() 52 if (mapping) { in page_cache_pipe_buf_steal() 73 if (remove_mapping(mapping, page)) { in page_cache_pipe_buf_steal() 112 if (!page->mapping) { in page_cache_pipe_buf_confirm() 310 struct address_space *mapping = in->f_mapping; in __generic_file_splice_read() local 338 spd.nr_pages = find_get_pages_contig(mapping, index, nr_pages, spd.pages); in __generic_file_splice_read() 346 page_cache_sync_readahead(mapping, &in->f_ra, in, in __generic_file_splice_read() 355 page = find_get_page(mapping, index); in __generic_file_splice_read() 360 page = page_cache_alloc_cold(mapping); in __generic_file_splice_read() [all …]
|
/linux-4.1.27/net/rds/ |
D | iw_rdma.c | 52 struct rds_iw_mapping mapping; member 390 ibmr = list_entry(pool->clean_list.next, struct rds_iw_mr, mapping.m_list); 391 list_del_init(&ibmr->mapping.m_list); 439 spin_lock_init(&ibmr->mapping.m_lock); 440 INIT_LIST_HEAD(&ibmr->mapping.m_list); 441 ibmr->mapping.m_mr = ibmr; 466 ib_dma_sync_sg_for_cpu(rds_iwdev->dev, ibmr->mapping.m_sg.list, 467 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); 470 ib_dma_sync_sg_for_device(rds_iwdev->dev, ibmr->mapping.m_sg.list, 471 ibmr->mapping.m_sg.dma_len, DMA_BIDIRECTIONAL); [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/ |
D | lustre_patchless_compat.h | 50 truncate_complete_page(struct address_space *mapping, struct page *page) in truncate_complete_page() argument 52 if (page->mapping != mapping) in truncate_complete_page() 56 page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); in truncate_complete_page() 59 account_page_cleaned(page, mapping); in truncate_complete_page()
|
/linux-4.1.27/Documentation/ |
D | io-mapping.txt | 1 The io_mapping functions in linux/io-mapping.h provide an abstraction for 2 efficiently mapping small regions of an I/O device to the CPU. The initial 7 A mapping object is created during driver initialization using 13 mappable, while 'size' indicates how large a mapping region to 16 This _wc variant provides a mapping which may only be used 19 With this mapping object, individual pages can be mapped either atomically 23 void *io_mapping_map_atomic_wc(struct io_mapping *mapping, 26 'offset' is the offset within the defined mapping region. 48 void *io_mapping_map_wc(struct io_mapping *mapping, 61 void io_mapping_free(struct io_mapping *mapping) [all …]
|
D | nommu-mmap.txt | 5 The kernel has limited support for memory mapping under no-MMU conditions, such 7 mapping is made use of in conjunction with the mmap() system call, the shmat() 9 mapping is actually performed by the binfmt drivers, which call back into the 12 Memory mapping behaviour also involves the way fork(), vfork(), clone() and 19 (*) Anonymous mapping, MAP_PRIVATE 27 (*) Anonymous mapping, MAP_SHARED 37 the underlying file are reflected in the mapping; copied across fork. 41 - If one exists, the kernel will re-use an existing mapping to the 45 - If possible, the file mapping will be directly on the backing device 47 appropriate mapping protection capabilities. Ramfs, romfs, cramfs [all …]
|
D | IRQ-domain.txt | 1 irq_domain interrupt number mapping library 26 irq numbers, but they don't provide any support for reverse mapping of 30 The irq_domain library adds mapping between hwirq and IRQ numbers on 31 top of the irq_alloc_desc*() API. An irq_domain to manage mapping is 33 reverse mapping scheme. 41 calling one of the irq_domain_add_*() functions (each mapping method 49 hwirq number as arguments. If a mapping for the hwirq doesn't already 66 There are several mechanisms available for reverse mapping from hwirq 98 Very few drivers should need this mapping. 103 The No Map mapping is to be used when the hwirq number is [all …]
|
D | DMA-attributes.txt | 28 DMA_ATTR_WEAK_ORDERING specifies that reads and writes to the mapping 38 DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be 57 virtual mapping for the allocated buffer. On some architectures creating 58 such mapping is non-trivial task and consumes very limited resources 77 having a mapping created separately for each device and is usually 92 device domain after releasing a mapping for it. Use this attribute with 98 By default DMA-mapping subsystem is allowed to assemble the buffer
|
D | DMA-API-HOWTO.txt | 1 Dynamic DMA mapping Guide 51 | | mapping | | by host | | 59 | | mapping | RAM | by IOMMU 84 mapping and returns the DMA address Z. The driver then tells the device to 88 So that Linux can use the dynamic DMA mapping, it needs some help from the 103 #include <linux/dma-mapping.h> 107 everywhere you hold a DMA address returned from the DMA mapping functions. 112 be used with the DMA mapping facilities. There has been an unwritten 369 The interfaces for using this type of mapping were designed in 374 Neither type of DMA mapping has alignment restrictions that come from [all …]
|
/linux-4.1.27/fs/ecryptfs/ |
D | mmap.c | 148 page_virt, page->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 170 crypt_stat->extent_size, page->mapping->host); in ecryptfs_copy_up_encrypted_with_header() 197 &ecryptfs_inode_to_private(page->mapping->host)->crypt_stat; in ecryptfs_readpage() 203 page->mapping->host); in ecryptfs_readpage() 220 page->mapping->host); in ecryptfs_readpage() 251 struct inode *inode = page->mapping->host; in fill_zeros_to_end_of_page() 279 struct address_space *mapping, in ecryptfs_write_begin() argument 288 page = grab_cache_page_write_begin(mapping, index, flags); in ecryptfs_write_begin() 296 &ecryptfs_inode_to_private(mapping->host)->crypt_stat; in ecryptfs_write_begin() 300 page, index, 0, PAGE_CACHE_SIZE, mapping->host); in ecryptfs_write_begin() [all …]
|
/linux-4.1.27/fs/nilfs2/ |
D | page.c | 62 struct address_space *mapping, in nilfs_grab_buffer() argument 71 page = grab_cache_page(mapping, index); in nilfs_grab_buffer() 178 m = page->mapping; in nilfs_page_bug() 352 page->mapping = NULL; in nilfs_copy_back_pages() 355 page->mapping = dmap; in nilfs_copy_back_pages() 377 void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent) in nilfs_clear_dirty_pages() argument 385 while (pagevec_lookup_tag(&pvec, mapping, &index, PAGECACHE_TAG_DIRTY, in nilfs_clear_dirty_pages() 406 struct inode *inode = page->mapping->host; in nilfs_clear_dirty_page() 460 void nilfs_mapping_init(struct address_space *mapping, struct inode *inode) in nilfs_mapping_init() argument 462 mapping->host = inode; in nilfs_mapping_init() [all …]
|
D | dir.c | 90 struct address_space *mapping, in nilfs_commit_chunk() argument 93 struct inode *dir = mapping->host; in nilfs_commit_chunk() 100 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); in nilfs_commit_chunk() 112 struct inode *dir = page->mapping->host; in nilfs_check_page() 187 struct address_space *mapping = dir->i_mapping; in nilfs_get_page() local 188 struct page *page = read_mapping_page(mapping, n, NULL); in nilfs_get_page() 420 struct address_space *mapping = page->mapping; in nilfs_set_link() local 428 nilfs_commit_chunk(page, mapping, from, to); in nilfs_set_link() 519 nilfs_commit_chunk(page, page->mapping, from, to); in nilfs_add_link() 538 struct address_space *mapping = page->mapping; in nilfs_delete_entry() local [all …]
|
D | inode.c | 169 static int nilfs_readpages(struct file *file, struct address_space *mapping, in nilfs_readpages() argument 172 return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); in nilfs_readpages() 175 static int nilfs_writepages(struct address_space *mapping, in nilfs_writepages() argument 178 struct inode *inode = mapping->host; in nilfs_writepages() 182 nilfs_clear_dirty_pages(mapping, false); in nilfs_writepages() 195 struct inode *inode = page->mapping->host; in nilfs_writepage() 225 struct inode *inode = page->mapping->host; in nilfs_set_page_dirty() 259 void nilfs_write_failed(struct address_space *mapping, loff_t to) in nilfs_write_failed() argument 261 struct inode *inode = mapping->host; in nilfs_write_failed() 269 static int nilfs_write_begin(struct file *file, struct address_space *mapping, in nilfs_write_begin() argument [all …]
|
D | btnode.c | 144 struct address_space *mapping; in nilfs_btnode_delete() local 155 mapping = page->mapping; in nilfs_btnode_delete() 159 if (!still_dirty && mapping) in nilfs_btnode_delete() 160 invalidate_inode_pages2_range(mapping, index, index); in nilfs_btnode_delete()
|
/linux-4.1.27/arch/powerpc/boot/dts/fsl/ |
D | t4240si-pre.dtsi | 73 fsl,portid-mapping = <0x80000000>; 80 fsl,portid-mapping = <0x80000000>; 87 fsl,portid-mapping = <0x80000000>; 94 fsl,portid-mapping = <0x80000000>; 101 fsl,portid-mapping = <0x40000000>; 108 fsl,portid-mapping = <0x40000000>; 115 fsl,portid-mapping = <0x40000000>; 122 fsl,portid-mapping = <0x40000000>; 129 fsl,portid-mapping = <0x20000000>; 136 fsl,portid-mapping = <0x20000000>; [all …]
|
D | p4080si-pre.dtsi | 86 fsl,portid-mapping = <0x80000000>; 96 fsl,portid-mapping = <0x40000000>; 106 fsl,portid-mapping = <0x20000000>; 116 fsl,portid-mapping = <0x10000000>; 126 fsl,portid-mapping = <0x08000000>; 136 fsl,portid-mapping = <0x04000000>; 146 fsl,portid-mapping = <0x02000000>; 156 fsl,portid-mapping = <0x01000000>;
|
D | t208xsi-pre.dtsi | 75 fsl,portid-mapping = <0x80000000>; 82 fsl,portid-mapping = <0x80000000>; 89 fsl,portid-mapping = <0x80000000>; 96 fsl,portid-mapping = <0x80000000>;
|
D | b4860si-pre.dtsi | 69 fsl,portid-mapping = <0x80000000>; 76 fsl,portid-mapping = <0x80000000>; 83 fsl,portid-mapping = <0x80000000>; 90 fsl,portid-mapping = <0x80000000>;
|
D | p2041si-pre.dtsi | 86 fsl,portid-mapping = <0x80000000>; 96 fsl,portid-mapping = <0x40000000>; 106 fsl,portid-mapping = <0x20000000>; 116 fsl,portid-mapping = <0x10000000>;
|
D | p3041si-pre.dtsi | 87 fsl,portid-mapping = <0x80000000>; 97 fsl,portid-mapping = <0x40000000>; 107 fsl,portid-mapping = <0x20000000>; 117 fsl,portid-mapping = <0x10000000>;
|
D | p5040si-pre.dtsi | 86 fsl,portid-mapping = <0x80000000>; 96 fsl,portid-mapping = <0x40000000>; 106 fsl,portid-mapping = <0x20000000>; 116 fsl,portid-mapping = <0x10000000>;
|
/linux-4.1.27/fs/jfs/ |
D | inode.c | 287 static int jfs_writepages(struct address_space *mapping, in jfs_writepages() argument 290 return mpage_writepages(mapping, wbc, jfs_get_block); in jfs_writepages() 298 static int jfs_readpages(struct file *file, struct address_space *mapping, in jfs_readpages() argument 301 return mpage_readpages(mapping, pages, nr_pages, jfs_get_block); in jfs_readpages() 304 static void jfs_write_failed(struct address_space *mapping, loff_t to) in jfs_write_failed() argument 306 struct inode *inode = mapping->host; in jfs_write_failed() 314 static int jfs_write_begin(struct file *file, struct address_space *mapping, in jfs_write_begin() argument 320 ret = nobh_write_begin(mapping, pos, len, flags, pagep, fsdata, in jfs_write_begin() 323 jfs_write_failed(mapping, pos + len); in jfs_write_begin() 328 static sector_t jfs_bmap(struct address_space *mapping, sector_t block) in jfs_bmap() argument [all …]
|
D | jfs_metapage.c | 119 l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; in insert_metapage() 131 int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits; in remove_metapage() 352 struct inode *inode = page->mapping->host; in metapage_writepage() 485 struct inode *inode = page->mapping->host; in metapage_readpage() 592 struct address_space *mapping; in __get_metapage() local 612 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping; in __get_metapage() 621 mapping = inode->i_mapping; in __get_metapage() 625 page = grab_cache_page(mapping, page_index); in __get_metapage() 632 page = read_mapping_page(mapping, page_index, NULL); in __get_metapage() 776 struct address_space *mapping = in __invalidate_metapages() local [all …]
|
/linux-4.1.27/drivers/mfd/ |
D | htc-pasic3.c | 25 void __iomem *mapping; member 41 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_write_register() 42 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_write_register() 56 void __iomem *addr = asic->mapping + (REG_ADDR << bus_shift); in pasic3_read_register() 57 void __iomem *data = asic->mapping + (REG_DATA << bus_shift); in pasic3_read_register() 156 asic->mapping = ioremap(r->start, resource_size(r)); in pasic3_probe() 157 if (!asic->mapping) { in pasic3_probe() 194 iounmap(asic->mapping); in pasic3_remove()
|
/linux-4.1.27/arch/c6x/platforms/ |
D | megamod-pic.c | 178 int *mapping, int size) in parse_priority_map() argument 194 mapping[i] = val; in parse_priority_map() 204 int mapping[NR_MUX_OUTPUTS]; in init_megamod_pic() local 232 for (i = 0; i < ARRAY_SIZE(mapping); i++) in init_megamod_pic() 233 mapping[i] = IRQ_UNMAPPED; in init_megamod_pic() 235 parse_priority_map(pic, mapping, ARRAY_SIZE(mapping)); in init_megamod_pic() 273 mapping[hwirq - 4] = i; in init_megamod_pic() 291 if (mapping[i] != IRQ_UNMAPPED) { in init_megamod_pic() 293 np->full_name, mapping[i], i + 4); in init_megamod_pic() 294 set_megamod_mux(pic, mapping[i], i); in init_megamod_pic()
|
/linux-4.1.27/fs/afs/ |
D | file.c | 27 static int afs_readpages(struct file *filp, struct address_space *mapping, 127 struct inode *inode = page->mapping->host; in afs_page_filler() 225 struct inode *inode = page->mapping->host; in afs_readpage() 240 static int afs_readpages(struct file *file, struct address_space *mapping, in afs_readpages() argument 248 key_serial(key), mapping->host->i_ino, nr_pages); in afs_readpages() 252 vnode = AFS_FS_I(mapping->host); in afs_readpages() 261 mapping, in afs_readpages() 266 mapping_gfp_mask(mapping)); in afs_readpages() 291 ret = read_cache_pages(mapping, pages, afs_page_filler, key); in afs_readpages() 325 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); in afs_invalidatepage() [all …]
|
D | write.c | 118 int afs_write_begin(struct file *file, struct address_space *mapping, in afs_write_begin() argument 146 page = grab_cache_page_write_begin(mapping, index, flags); in afs_write_begin() 244 int afs_write_end(struct file *file, struct address_space *mapping, in afs_write_end() argument 463 static int afs_writepages_region(struct address_space *mapping, in afs_writepages_region() argument 474 n = find_get_pages_tag(mapping, &index, PAGECACHE_TAG_DIRTY, in afs_writepages_region() 495 if (page->mapping != mapping) { in afs_writepages_region() 537 int afs_writepages(struct address_space *mapping, in afs_writepages() argument 546 start = mapping->writeback_index; in afs_writepages() 548 ret = afs_writepages_region(mapping, wbc, start, end, &next); in afs_writepages() 550 ret = afs_writepages_region(mapping, wbc, 0, start, in afs_writepages() [all …]
|
/linux-4.1.27/arch/m32r/include/asm/ |
D | cacheflush.h | 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 39 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 40 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 53 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 54 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 95 u64 mapping[IPOIB_UD_RX_SG]) in ipoib_ud_dma_unmap_rx() 97 ib_dma_unmap_single(priv->ca, mapping[0], in ipoib_ud_dma_unmap_rx() 109 priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; in ipoib_ib_post_receive() 110 priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; in ipoib_ib_post_receive() 116 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); in ipoib_ib_post_receive() 129 u64 *mapping; in ipoib_alloc_rx_skb() local 144 mapping = priv->rx_ring[id].mapping; in ipoib_alloc_rx_skb() 145 mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, in ipoib_alloc_rx_skb() 147 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) in ipoib_alloc_rx_skb() 181 u64 mapping[IPOIB_UD_RX_SG]; in ipoib_ib_handle_rx_wc() local [all …]
|
D | ipoib_cm.c | 81 u64 mapping[IPOIB_CM_RX_SG]) in ipoib_cm_dma_unmap_rx() 85 ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx() 88 ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); in ipoib_cm_dma_unmap_rx() 100 priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i]; in ipoib_cm_post_receive_srq() 106 priv->cm.srq_ring[id].mapping); in ipoib_cm_post_receive_srq() 126 sge[i].addr = rx->rx_ring[id].mapping[i]; in ipoib_cm_post_receive_nonsrq() 132 rx->rx_ring[id].mapping); in ipoib_cm_post_receive_nonsrq() 143 u64 mapping[IPOIB_CM_RX_SG], in ipoib_cm_alloc_rx_skb() 160 mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE, in ipoib_cm_alloc_rx_skb() 162 if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { in ipoib_cm_alloc_rx_skb() [all …]
|
/linux-4.1.27/drivers/net/ethernet/dec/tulip/ |
D | interrupt.c | 70 dma_addr_t mapping; in tulip_refill_rx() local 77 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, in tulip_refill_rx() 79 if (dma_mapping_error(&tp->pdev->dev, mapping)) { in tulip_refill_rx() 85 tp->rx_buffers[entry].mapping = mapping; in tulip_refill_rx() 87 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping); in tulip_refill_rx() 214 tp->rx_buffers[entry].mapping, in tulip_poll() 226 tp->rx_buffers[entry].mapping, in tulip_poll() 233 if (tp->rx_buffers[entry].mapping != in tulip_poll() 238 (unsigned long long)tp->rx_buffers[entry].mapping, in tulip_poll() 243 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping, in tulip_poll() [all …]
|
D | tulip_core.c | 356 dma_addr_t mapping; in tulip_up() local 365 mapping = pci_map_single(tp->pdev, tp->setup_frame, in tulip_up() 369 tp->tx_buffers[tp->cur_tx].mapping = mapping; in tulip_up() 373 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping); in tulip_up() 631 tp->rx_buffers[i].mapping = 0; in tulip_init_ring() 638 dma_addr_t mapping; in tulip_init_ring() local 647 mapping = pci_map_single(tp->pdev, skb->data, in tulip_init_ring() 649 tp->rx_buffers[i].mapping = mapping; in tulip_init_ring() 651 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping); in tulip_init_ring() 659 tp->tx_buffers[i].mapping = 0; in tulip_init_ring() [all …]
|
D | de2104x.c | 294 dma_addr_t mapping; member 411 dma_addr_t mapping; in de_rx() local 423 mapping = de->rx_skb[rx_tail].mapping; in de_rx() 451 pci_unmap_single(de->pdev, mapping, in de_rx() 455 mapping = in de_rx() 456 de->rx_skb[rx_tail].mapping = in de_rx() 461 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); in de_rx() 465 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); in de_rx() 485 de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping); in de_rx() 562 pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping, in de_tx() [all …]
|
/linux-4.1.27/fs/hfsplus/ |
D | bitmap.c | 23 struct address_space *mapping; in hfsplus_block_allocate() local 35 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_allocate() 36 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, NULL); in hfsplus_block_allocate() 80 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate() 131 page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS, in hfsplus_block_allocate() 167 struct address_space *mapping; in hfsplus_block_free() local 182 mapping = sbi->alloc_file->i_mapping; in hfsplus_block_free() 184 page = read_mapping_page(mapping, pnr, NULL); in hfsplus_block_free() 218 page = read_mapping_page(mapping, ++pnr, NULL); in hfsplus_block_free()
|
D | inode.c | 34 static void hfsplus_write_failed(struct address_space *mapping, loff_t to) in hfsplus_write_failed() argument 36 struct inode *inode = mapping->host; in hfsplus_write_failed() 44 static int hfsplus_write_begin(struct file *file, struct address_space *mapping, in hfsplus_write_begin() argument 51 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hfsplus_write_begin() 53 &HFSPLUS_I(mapping->host)->phys_size); in hfsplus_write_begin() 55 hfsplus_write_failed(mapping, pos + len); in hfsplus_write_begin() 60 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block) in hfsplus_bmap() argument 62 return generic_block_bmap(mapping, block, hfsplus_get_block); in hfsplus_bmap() 67 struct inode *inode = page->mapping->host; in hfsplus_releasepage() 129 struct address_space *mapping = file->f_mapping; in hfsplus_direct_IO() local [all …]
|
/linux-4.1.27/fs/nfs/ |
D | file.c | 354 static int nfs_write_begin(struct file *file, struct address_space *mapping, in nfs_write_begin() argument 364 file, mapping->host->i_ino, len, (long long) pos); in nfs_write_begin() 371 ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING, in nfs_write_begin() 378 nfs_inode_dio_wait(mapping->host); in nfs_write_begin() 380 page = grab_cache_page_write_begin(mapping, index, flags); in nfs_write_begin() 400 static int nfs_write_end(struct file *file, struct address_space *mapping, in nfs_write_end() argument 409 file, mapping->host->i_ino, len, (long long) pos); in nfs_write_end() 438 NFS_I(mapping->host)->write_io += copied; in nfs_write_end() 441 status = nfs_wb_all(mapping->host); in nfs_write_end() 467 nfs_fscache_invalidate_page(page, page->mapping->host); in nfs_invalidate_page() [all …]
|
D | fscache.c | 263 struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host); in nfs_fscache_release_page() 267 cookie, page, NFS_I(page->mapping->host)); in nfs_fscache_release_page() 272 nfs_inc_fscache_stats(page->mapping->host, in nfs_fscache_release_page() 296 nfs_inc_fscache_stats(page->mapping->host, in __nfs_fscache_invalidate_page() 318 error = nfs_readpage_async(context, page->mapping->host, page); in nfs_readpage_from_fscache_complete() 368 struct address_space *mapping, in __nfs_readpages_from_fscache() argument 379 mapping, pages, nr_pages, in __nfs_readpages_from_fscache() 382 mapping_gfp_mask(mapping)); in __nfs_readpages_from_fscache()
|
D | direct.c | 571 struct address_space *mapping = file->f_mapping; in nfs_file_direct_read() local 572 struct inode *inode = mapping->host; in nfs_file_direct_read() 577 nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); in nfs_file_direct_read() 587 result = nfs_sync_mapping(mapping); in nfs_file_direct_read() 962 struct address_space *mapping = file->f_mapping; in nfs_file_direct_write() local 963 struct inode *inode = mapping->host; in nfs_file_direct_write() 971 nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, in nfs_file_direct_write() 979 result = nfs_sync_mapping(mapping); in nfs_file_direct_write() 983 if (mapping->nrpages) { in nfs_file_direct_write() 984 result = invalidate_inode_pages2_range(mapping, in nfs_file_direct_write() [all …]
|
D | fscache.h | 131 struct address_space *mapping, in nfs_readpages_from_fscache() argument 136 return __nfs_readpages_from_fscache(ctx, inode, mapping, pages, in nfs_readpages_from_fscache() 210 struct address_space *mapping, in nfs_readpages_from_fscache() argument
|
/linux-4.1.27/Documentation/devicetree/bindings/sound/ |
D | st,sta32x.txt | 30 - st,ch1-output-mapping: Channel 1 output mapping 31 - st,ch2-output-mapping: Channel 2 output mapping 32 - st,ch3-output-mapping: Channel 3 output mapping 84 st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1 85 st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1 86 st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
|
D | st,sta350.txt | 30 - st,ch1-output-mapping: Channel 1 output mapping 31 - st,ch2-output-mapping: Channel 2 output mapping 32 - st,ch3-output-mapping: Channel 3 output mapping 123 st,ch1-output-mapping = /bits/ 8 <0>; // set channel 1 output ch 1 124 st,ch2-output-mapping = /bits/ 8 <0>; // set channel 2 output ch 1 125 st,ch3-output-mapping = /bits/ 8 <0>; // set channel 3 output ch 1
|
/linux-4.1.27/fs/bfs/ |
D | file.c | 162 static void bfs_write_failed(struct address_space *mapping, loff_t to) in bfs_write_failed() argument 164 struct inode *inode = mapping->host; in bfs_write_failed() 170 static int bfs_write_begin(struct file *file, struct address_space *mapping, in bfs_write_begin() argument 176 ret = block_write_begin(mapping, pos, len, flags, pagep, in bfs_write_begin() 179 bfs_write_failed(mapping, pos + len); in bfs_write_begin() 184 static sector_t bfs_bmap(struct address_space *mapping, sector_t block) in bfs_bmap() argument 186 return generic_block_bmap(mapping, block, bfs_get_block); in bfs_bmap()
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/4xx/ |
D | ppc440spe-adma.txt | 16 - reg : <registers mapping> 35 - reg : <registers mapping> 37 - interrupts : <interrupt mapping for DMA0/1 interrupts sources: 41 - interrupt-parent : needed for interrupt mapping 66 - reg : <registers mapping> 67 - interrupts : <interrupt mapping for XOR interrupt source> 68 - interrupt-parent : for interrupt mapping
|
D | hsta.txt | 15 - reg : register mapping for the HSTA MSI space 16 - interrupt-parent : parent controller for mapping interrupts 17 - interrupts : ordered interrupt mapping for each MSI in the register
|
D | emac.txt | 20 - interrupts : <interrupt mapping for EMAC IRQ and WOL IRQ> 21 - interrupt-parent : optional, if needed for interrupt mapping 22 - reg : <registers mapping> 116 - interrupts : <interrupt mapping for the MAL interrupts sources: 120 and rxeob. Thus we end up with mapping those 5 MPIC 135 - reg : <registers mapping> 144 - reg : <registers mapping>
|
/linux-4.1.27/include/trace/events/ |
D | filemap.h | 29 __entry->i_ino = page->mapping->host->i_ino; 31 if (page->mapping->host->i_sb) 32 __entry->s_dev = page->mapping->host->i_sb->s_dev; 34 __entry->s_dev = page->mapping->host->i_rdev;
|
/linux-4.1.27/arch/arm64/include/asm/ |
D | cacheflush.h | 129 #define flush_dcache_mmap_lock(mapping) \ argument 130 spin_lock_irq(&(mapping)->tree_lock) 131 #define flush_dcache_mmap_unlock(mapping) \ argument 132 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/fs/omfs/ |
D | file.c | 292 static int omfs_readpages(struct file *file, struct address_space *mapping, in omfs_readpages() argument 295 return mpage_readpages(mapping, pages, nr_pages, omfs_get_block); in omfs_readpages() 304 omfs_writepages(struct address_space *mapping, struct writeback_control *wbc) in omfs_writepages() argument 306 return mpage_writepages(mapping, wbc, omfs_get_block); in omfs_writepages() 309 static void omfs_write_failed(struct address_space *mapping, loff_t to) in omfs_write_failed() argument 311 struct inode *inode = mapping->host; in omfs_write_failed() 319 static int omfs_write_begin(struct file *file, struct address_space *mapping, in omfs_write_begin() argument 325 ret = block_write_begin(mapping, pos, len, flags, pagep, in omfs_write_begin() 328 omfs_write_failed(mapping, pos + len); in omfs_write_begin() 333 static sector_t omfs_bmap(struct address_space *mapping, sector_t block) in omfs_bmap() argument [all …]
|
/linux-4.1.27/fs/ceph/ |
D | addr.c | 71 struct address_space *mapping = page->mapping; in ceph_set_page_dirty() local 77 if (unlikely(!mapping)) in ceph_set_page_dirty() 82 mapping->host, page, page->index); in ceph_set_page_dirty() 87 inode = mapping->host; in ceph_set_page_dirty() 106 mapping->host, page, page->index, in ceph_set_page_dirty() 122 WARN_ON(!page->mapping); in ceph_set_page_dirty() 139 inode = page->mapping->host; in ceph_invalidatepage() 174 struct inode *inode = page->mapping ? page->mapping->host : NULL; in ceph_releasepage() 397 static int ceph_readpages(struct file *file, struct address_space *mapping, in ceph_readpages() argument 408 rc = ceph_readpages_from_fscache(mapping->host, mapping, page_list, in ceph_readpages() [all …]
|
D | cache.h | 44 struct address_space *mapping, 71 struct inode* inode = page->mapping->host; in ceph_release_fscache_page() 132 struct address_space *mapping, in ceph_readpages_from_fscache() argument
|
/linux-4.1.27/Documentation/ia64/ |
D | aliasing.txt | 25 page with both a cacheable mapping and an uncacheable mapping[1]. 52 in the system because of constraints imposed by the identity mapping 65 identity mapping only when the entire granule supports cacheable 69 can referenced safely by an identity mapping. 92 by a kernel identity mapping, the user mapping must use the same 93 attribute as the kernel mapping. 95 If the region is not in kern_memmap, the user mapping should use 99 machines, this should use an uncacheable mapping as a fallback. 122 the WC mapping is allowed. 124 Otherwise, the user mapping must use the same attribute as the [all …]
|
/linux-4.1.27/fs/freevxfs/ |
D | vxfs_subr.c | 68 vxfs_get_page(struct address_space *mapping, u_long n) in vxfs_get_page() argument 72 pp = read_mapping_page(mapping, n, NULL); in vxfs_get_page() 180 vxfs_bmap(struct address_space *mapping, sector_t block) in vxfs_bmap() argument 182 return generic_block_bmap(mapping, block, vxfs_getblk); in vxfs_bmap()
|
/linux-4.1.27/arch/unicore32/include/asm/ |
D | cacheflush.h | 182 #define flush_dcache_mmap_lock(mapping) \ argument 183 spin_lock_irq(&(mapping)->tree_lock) 184 #define flush_dcache_mmap_unlock(mapping) \ argument 185 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/fs/xfs/ |
D | xfs_aops.c | 650 if (!page->mapping) in xfs_check_page_type() 705 if (page->mapping != inode->i_mapping) in xfs_convert_page() 859 trace_xfs_invalidatepage(page->mapping->host, page, offset, in xfs_vm_invalidatepage() 884 struct inode *inode = page->mapping->host; in xfs_aops_discard_page() 942 struct inode *inode = page->mapping->host; in xfs_vm_writepage() 1202 struct address_space *mapping, in xfs_vm_writepages() argument 1205 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); in xfs_vm_writepages() 1206 return generic_writepages(mapping, wbc); in xfs_vm_writepages() 1223 trace_xfs_releasepage(page->mapping->host, page, 0, 0); in xfs_vm_releasepage() 1740 struct address_space *mapping, in xfs_vm_write_begin() argument [all …]
|
D | xfs_file.c | 98 struct address_space *mapping; in xfs_iozero() local 101 mapping = VFS_I(ip)->i_mapping; in xfs_iozero() 111 status = pagecache_write_begin(NULL, mapping, pos, bytes, in xfs_iozero() 119 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, in xfs_iozero() 660 struct address_space *mapping = file->f_mapping; in xfs_file_dio_aio_write() local 661 struct inode *inode = mapping->host; in xfs_file_dio_aio_write() 689 if (unaligned_io || mapping->nrpages) in xfs_file_dio_aio_write() 700 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { in xfs_file_dio_aio_write() 713 if (mapping->nrpages) { in xfs_file_dio_aio_write() 744 ret = mapping->a_ops->direct_IO(iocb, &data, pos); in xfs_file_dio_aio_write() [all …]
|
/linux-4.1.27/fs/f2fs/ |
D | data.c | 61 set_bit(AS_EIO, &page->mapping->flags); in f2fs_write_end_io() 910 struct address_space *mapping = inode->i_mapping; in find_data_page() local 928 page = find_get_page(mapping, index); in find_data_page() 952 page = grab_cache_page(mapping, index); in find_data_page() 983 struct address_space *mapping = inode->i_mapping; in get_lock_data_page() local 993 page = grab_cache_page(mapping, index); in get_lock_data_page() 1041 if (unlikely(page->mapping != mapping)) { in get_lock_data_page() 1059 struct address_space *mapping = inode->i_mapping; in get_new_data_page() local 1069 page = grab_cache_page(mapping, index); in get_new_data_page() 1097 if (unlikely(page->mapping != mapping)) { in get_new_data_page() [all …]
|
D | checkpoint.c | 34 struct address_space *mapping = META_MAPPING(sbi); in grab_meta_page() local 37 page = grab_cache_page(mapping, index); in grab_meta_page() 52 struct address_space *mapping = META_MAPPING(sbi); in get_meta_page() local 60 page = grab_cache_page(mapping, index); in get_meta_page() 72 if (unlikely(page->mapping != mapping)) { in get_meta_page() 214 static int f2fs_write_meta_pages(struct address_space *mapping, in f2fs_write_meta_pages() argument 217 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); in f2fs_write_meta_pages() 220 trace_f2fs_writepages(mapping->host, wbc, META); in f2fs_write_meta_pages() 243 struct address_space *mapping = META_MAPPING(sbi); in sync_meta_pages() local 255 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, in sync_meta_pages() [all …]
|
/linux-4.1.27/fs/logfs/ |
D | file.c | 12 static int logfs_write_begin(struct file *file, struct address_space *mapping, in logfs_write_begin() argument 16 struct inode *inode = mapping->host; in logfs_write_begin() 20 page = grab_cache_page_write_begin(mapping, index, flags); in logfs_write_begin() 38 static int logfs_write_end(struct file *file, struct address_space *mapping, in logfs_write_end() argument 42 struct inode *inode = mapping->host; in logfs_write_end() 105 struct inode *inode = page->mapping->host; in __logfs_writepage() 119 struct inode *inode = page->mapping->host; in logfs_writepage() 168 struct super_block *sb = page->mapping->host->i_sb; in logfs_invalidatepage()
|
D | dev_mtd.c | 76 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_erase_mapping() local 81 page = find_get_page(mapping, index); in logfs_mtd_erase_mapping() 151 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_find_first_sb() local 162 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_first_sb() 168 struct address_space *mapping = super->s_mapping_inode->i_mapping; in logfs_mtd_find_last_sb() local 180 return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb); in logfs_mtd_find_last_sb() 187 struct address_space *mapping = super->s_mapping_inode->i_mapping; in __logfs_mtd_writeseg() local 192 page = find_lock_page(mapping, index + i); in __logfs_mtd_writeseg()
|
D | dev_bdev.c | 80 struct address_space *mapping = super->s_mapping_inode->i_mapping; in __bdev_writeseg() local 111 page = find_lock_page(mapping, index + i); in __bdev_writeseg() 252 struct address_space *mapping = super->s_mapping_inode->i_mapping; in bdev_find_first_sb() local 256 return read_cache_page(mapping, 0, filler, sb); in bdev_find_first_sb() 262 struct address_space *mapping = super->s_mapping_inode->i_mapping; in bdev_find_last_sb() local 268 return read_cache_page(mapping, index, filler, sb); in bdev_find_last_sb()
|
/linux-4.1.27/Documentation/arm/ |
D | memory.txt | 22 setup a minicache mapping. 38 fffe8000 fffeffff DTCM mapping area for platforms with 41 fffe0000 fffe7fff ITCM mapping area for platforms with 44 ffc00000 ffefffff Fixmap mapping region. Addresses provided 48 mapping within the vmalloc space. 64 One way of mapping HIGHMEM pages into kernel 79 space are also caught via this mapping. 85 Since future CPUs may impact the kernel mapping layout, user programs
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 93 inode = vmpage->mapping->host; in ll_invalidatepage() 123 struct address_space *mapping; in ll_releasepage() local 130 mapping = vmpage->mapping; in ll_releasepage() 131 if (mapping == NULL) in ll_releasepage() 134 obj = ll_i2info(mapping->host)->lli_clob; in ll_releasepage() 168 struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host); in ll_set_page_dirty() 335 struct address_space *mapping, in ll_direct_IO_26_seg() argument 472 static int ll_write_begin(struct file *file, struct address_space *mapping, in ll_write_begin() argument 481 page = grab_cache_page_write_begin(mapping, index, flags); in ll_write_begin() 495 static int ll_write_end(struct file *file, struct address_space *mapping, in ll_write_end() argument [all …]
|
D | rw.c | 99 clob = ll_i2info(vmpage->mapping->host)->lli_clob; in ll_cl_init() 115 struct inode *inode = vmpage->mapping->host; in ll_cl_init() 363 void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which) in ll_ra_stats_inc() argument 365 struct ll_sb_info *sbi = ll_i2sbi(mapping->host); in ll_ra_stats_inc() 495 pgoff_t index, struct address_space *mapping) in ll_read_ahead_page() argument 498 struct cl_object *clob = ll_i2info(mapping->host)->lli_clob; in ll_read_ahead_page() 504 vmpage = grab_cache_page_nowait(mapping, index); in ll_read_ahead_page() 507 if (vmpage->mapping == mapping) { in ll_read_ahead_page() 533 ll_ra_stats_inc(mapping, which); in ll_read_ahead_page() 641 struct address_space *mapping, in ll_read_ahead_pages() argument [all …]
|
D | vvp_page.c | 140 struct address_space *mapping; in vvp_page_discard() local 146 mapping = vmpage->mapping; in vvp_page_discard() 149 ll_ra_stats_inc(mapping, RA_STAT_DISCARDED); in vvp_page_discard() 155 truncate_complete_page(mapping, vmpage); in vvp_page_discard() 173 ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); in vvp_page_unmap() 181 struct inode *inode = vmpage->mapping->host; in vvp_page_delete()
|
/linux-4.1.27/fs/jffs2/ |
D | file.c | 24 static int jffs2_write_end(struct file *filp, struct address_space *mapping, 27 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, 124 struct jffs2_inode_info *f = JFFS2_INODE_INFO(pg->mapping->host); in jffs2_readpage() 128 ret = jffs2_do_readpage_unlock(pg->mapping->host, pg); in jffs2_readpage() 133 static int jffs2_write_begin(struct file *filp, struct address_space *mapping, in jffs2_write_begin() argument 138 struct inode *inode = mapping->host; in jffs2_write_begin() 144 pg = grab_cache_page_write_begin(mapping, index, flags); in jffs2_write_begin() 237 static int jffs2_write_end(struct file *filp, struct address_space *mapping, in jffs2_write_end() argument 244 struct inode *inode = mapping->host; in jffs2_write_end()
|
/linux-4.1.27/fs/exofs/ |
D | dir.c | 65 struct address_space *mapping = page->mapping; in exofs_commit_chunk() local 66 struct inode *dir = mapping->host; in exofs_commit_chunk() 90 struct inode *dir = page->mapping->host; in exofs_check_page() 165 struct address_space *mapping = dir->i_mapping; in exofs_get_page() local 166 struct page *page = read_mapping_page(mapping, n, NULL); in exofs_get_page() 414 err = exofs_write_begin(NULL, page->mapping, pos, len, in exofs_set_link() 496 err = exofs_write_begin(NULL, page->mapping, pos, rec_len, 0, in exofs_add_link() 527 struct address_space *mapping = page->mapping; in exofs_delete_entry() local 528 struct inode *inode = mapping->host; in exofs_delete_entry() 553 err = exofs_write_begin(NULL, page->mapping, pos, to - from, 0, in exofs_delete_entry() [all …]
|
D | inode.c | 186 mapping_set_error(page->mapping, ret); in update_write_page() 216 struct inode *inode = page->mapping->host; in __readpages_done() 473 static int exofs_readpages(struct file *file, struct address_space *mapping, in exofs_readpages() argument 479 _pcol_init(&pcol, nr_pages, mapping->host); in exofs_readpages() 481 ret = read_cache_pages(mapping, pages, readpage_strip, &pcol); in exofs_readpages() 499 _pcol_init(&pcol, 1, page->mapping->host); in _readpage() 544 struct inode *inode = page->mapping->host; in writepages_done() 784 set_bit(AS_EIO, &page->mapping->flags); in writepage_strip() 789 static int exofs_writepages(struct address_space *mapping, in exofs_writepages() argument 798 start + mapping->nrpages : in exofs_writepages() [all …]
|
/linux-4.1.27/fs/adfs/ |
D | inode.c | 48 static void adfs_write_failed(struct address_space *mapping, loff_t to) in adfs_write_failed() argument 50 struct inode *inode = mapping->host; in adfs_write_failed() 56 static int adfs_write_begin(struct file *file, struct address_space *mapping, in adfs_write_begin() argument 63 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in adfs_write_begin() 65 &ADFS_I(mapping->host)->mmu_private); in adfs_write_begin() 67 adfs_write_failed(mapping, pos + len); in adfs_write_begin() 72 static sector_t _adfs_bmap(struct address_space *mapping, sector_t block) in _adfs_bmap() argument 74 return generic_block_bmap(mapping, block, adfs_get_block); in _adfs_bmap()
|
/linux-4.1.27/drivers/gpu/drm/rockchip/ |
D | rockchip_drm_drv.c | 46 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping; in rockchip_drm_dma_attach_device() local 55 return arm_iommu_attach_device(dev, mapping); in rockchip_drm_dma_attach_device() 130 struct dma_iommu_mapping *mapping; in rockchip_drm_load() local 153 mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000, in rockchip_drm_load() 155 if (IS_ERR(mapping)) { in rockchip_drm_load() 156 ret = PTR_ERR(mapping); in rockchip_drm_load() 166 ret = arm_iommu_attach_device(dev, mapping); in rockchip_drm_load() 226 arm_iommu_release_mapping(dev->archdata.mapping); in rockchip_drm_load() 242 arm_iommu_release_mapping(dev->archdata.mapping); in rockchip_drm_unload()
|
/linux-4.1.27/fs/ext4/ |
D | inode.c | 918 struct inode *inode = page->mapping->host; in ext4_block_write_begin() 1000 static int ext4_write_begin(struct file *file, struct address_space *mapping, in ext4_write_begin() argument 1004 struct inode *inode = mapping->host; in ext4_write_begin() 1023 ret = ext4_try_to_write_inline_data(mapping, inode, pos, len, in ext4_write_begin() 1039 page = grab_cache_page_write_begin(mapping, index, flags); in ext4_write_begin() 1052 if (page->mapping != mapping) { in ext4_write_begin() 1138 struct address_space *mapping, in ext4_write_end() argument 1143 struct inode *inode = mapping->host; in ext4_write_end() 1165 copied = block_write_end(file, mapping, pos, in ext4_write_end() 1212 struct address_space *mapping, in ext4_journalled_write_end() argument [all …]
|
D | move_extent.c | 137 struct address_space *mapping[2]; in mext_page_double_lock() local 142 mapping[0] = inode1->i_mapping; in mext_page_double_lock() 143 mapping[1] = inode2->i_mapping; in mext_page_double_lock() 148 mapping[0] = inode2->i_mapping; in mext_page_double_lock() 149 mapping[1] = inode1->i_mapping; in mext_page_double_lock() 152 page[0] = grab_cache_page_write_begin(mapping[0], index1, fl); in mext_page_double_lock() 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); in mext_page_double_lock() 182 struct inode *inode = page->mapping->host; in mext_page_mkuptodate()
|
/linux-4.1.27/fs/ntfs/ |
D | bitmap.c | 52 struct address_space *mapping; in __ntfs_bitmap_set_bits_in_run() local 74 mapping = vi->i_mapping; in __ntfs_bitmap_set_bits_in_run() 75 page = ntfs_map_page(mapping, index); in __ntfs_bitmap_set_bits_in_run() 127 page = ntfs_map_page(mapping, ++index); in __ntfs_bitmap_set_bits_in_run()
|
D | aops.h | 86 static inline struct page *ntfs_map_page(struct address_space *mapping, in ntfs_map_page() argument 89 struct page *page = read_mapping_page(mapping, index, NULL); in ntfs_map_page()
|
D | aops.c | 68 vi = page->mapping->host; in ntfs_end_buffer_async_read() 201 vi = page->mapping->host; in ntfs_read_block() 412 vi = page->mapping->host; in ntfs_readpage() 568 vi = page->mapping->host; in ntfs_write_block() 923 struct inode *vi = page->mapping->host; in ntfs_write_mst_block() 1356 struct inode *vi = page->mapping->host; in ntfs_writepage() 1563 static sector_t ntfs_bmap(struct address_space *mapping, sector_t block) in ntfs_bmap() argument 1569 ntfs_inode *ni = NTFS_I(mapping->host); in ntfs_bmap() 1730 struct address_space *mapping = page->mapping; in mark_ntfs_record_dirty() local 1731 ntfs_inode *ni = NTFS_I(mapping->host); in mark_ntfs_record_dirty() [all …]
|
/linux-4.1.27/drivers/iommu/ |
D | ipmmu-vmsa.c | 36 struct dma_iommu_mapping *mapping; member 452 if (!mmu->mapping) in ipmmu_irq() 455 io_domain = mmu->mapping->domain; in ipmmu_irq() 694 if (!mmu->mapping) { in ipmmu_add_device() 695 struct dma_iommu_mapping *mapping; in ipmmu_add_device() local 697 mapping = arm_iommu_create_mapping(&platform_bus_type, in ipmmu_add_device() 699 if (IS_ERR(mapping)) { in ipmmu_add_device() 701 ret = PTR_ERR(mapping); in ipmmu_add_device() 705 mmu->mapping = mapping; in ipmmu_add_device() 709 ret = arm_iommu_attach_device(dev, mmu->mapping); in ipmmu_add_device() [all …]
|
D | shmobile-iommu.c | 346 struct dma_iommu_mapping *mapping; in shmobile_iommu_add_device() local 350 mapping = archdata->iommu_mapping; in shmobile_iommu_add_device() 351 if (!mapping) { in shmobile_iommu_add_device() 352 mapping = arm_iommu_create_mapping(&platform_bus_type, 0, in shmobile_iommu_add_device() 354 if (IS_ERR(mapping)) in shmobile_iommu_add_device() 355 return PTR_ERR(mapping); in shmobile_iommu_add_device() 356 archdata->iommu_mapping = mapping; in shmobile_iommu_add_device() 359 if (arm_iommu_attach_device(dev, mapping)) in shmobile_iommu_add_device()
|
/linux-4.1.27/fs/affs/ |
D | file.c | 381 static void affs_write_failed(struct address_space *mapping, loff_t to) in affs_write_failed() argument 383 struct inode *inode = mapping->host; in affs_write_failed() 395 struct address_space *mapping = file->f_mapping; in affs_direct_IO() local 396 struct inode *inode = mapping->host; in affs_direct_IO() 409 affs_write_failed(mapping, offset + count); in affs_direct_IO() 413 static int affs_write_begin(struct file *file, struct address_space *mapping, in affs_write_begin() argument 420 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in affs_write_begin() 422 &AFFS_I(mapping->host)->mmu_private); in affs_write_begin() 424 affs_write_failed(mapping, pos + len); in affs_write_begin() 429 static sector_t _affs_bmap(struct address_space *mapping, sector_t block) in _affs_bmap() argument [all …]
|
/linux-4.1.27/Documentation/x86/x86_64/ |
D | mm.txt | 9 ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory 19 ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0 20 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space 24 The direct mapping covers all memory in the system up to the highest
|
/linux-4.1.27/fs/ocfs2/ |
D | mmap.c | 66 struct address_space *mapping = inode->i_mapping; in __ocfs2_page_mkwrite() local 89 if ((page->mapping != inode->i_mapping) || in __ocfs2_page_mkwrite() 107 ret = ocfs2_write_begin_nolock(file, mapping, pos, len, 0, &locked_page, in __ocfs2_page_mkwrite() 123 ret = ocfs2_write_end_nolock(mapping, pos, len, len, locked_page, in __ocfs2_page_mkwrite()
|
D | aops.c | 283 struct inode *inode = page->mapping->host; in ocfs2_readpage() 354 static int ocfs2_readpages(struct file *filp, struct address_space *mapping, in ocfs2_readpages() argument 358 struct inode *inode = mapping->host; in ocfs2_readpages() 392 err = mpage_readpages(mapping, pages, nr_pages, ocfs2_get_block); in ocfs2_readpages() 415 (unsigned long long)OCFS2_I(page->mapping->host)->ip_blkno, in ocfs2_writepage() 457 static sector_t ocfs2_bmap(struct address_space *mapping, sector_t block) in ocfs2_bmap() argument 462 struct inode *inode = mapping->host; in ocfs2_bmap() 1463 static int ocfs2_grab_pages_for_write(struct address_space *mapping, in ocfs2_grab_pages_for_write() argument 1471 struct inode *inode = mapping->host; in ocfs2_grab_pages_for_write() 1513 if (mmap_page->mapping != mapping) { in ocfs2_grab_pages_for_write() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/ata/ |
D | fsl-sata.txt | 11 - interrupts : <interrupt mapping for SATA IRQ> 19 - interrupt-parent : optional, if needed for interrupt mapping 20 - reg : <registers mapping>
|
D | exynos-sata.txt | 8 - interrupts : <interrupt mapping for SATA IRQ> 9 - reg : <registers mapping>
|
/linux-4.1.27/fs/minix/ |
D | dir.c | 55 struct address_space *mapping = page->mapping; in dir_commit_chunk() local 56 struct inode *dir = mapping->host; in dir_commit_chunk() 58 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 73 struct address_space *mapping = dir->i_mapping; in dir_get_page() local 74 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() 295 struct inode *inode = page->mapping->host; in minix_delete_entry() 419 struct inode *dir = page->mapping->host; in minix_set_link() 462 struct address_space *mapping = page->mapping; in minix_inode_by_name() local 463 struct inode *inode = mapping->host; in minix_inode_by_name()
|
/linux-4.1.27/drivers/misc/cxl/ |
D | context.c | 38 struct address_space *mapping) in cxl_context_init() argument 47 ctx->mapping = mapping; in cxl_context_init() 228 if (ctx->mapping) in cxl_context_detach_all() 229 unmap_mapping_range(ctx->mapping, 0, 0, 1); in cxl_context_detach_all()
|
/linux-4.1.27/arch/arm/kernel/ |
D | perf_event.c | 55 int mapping; in armpmu_map_hw_event() local 60 mapping = (*event_map)[config]; in armpmu_map_hw_event() 61 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; in armpmu_map_hw_event() 394 int mapping; in __hw_perf_event_init() local 396 mapping = armpmu->map_event(event); in __hw_perf_event_init() 398 if (mapping < 0) { in __hw_perf_event_init() 401 return mapping; in __hw_perf_event_init() 429 hwc->config_base |= (unsigned long)mapping; in __hw_perf_event_init()
|
/linux-4.1.27/arch/parisc/include/asm/ |
D | cacheflush.h | 77 #define flush_dcache_mmap_lock(mapping) \ argument 78 spin_lock_irq(&(mapping)->tree_lock) 79 #define flush_dcache_mmap_unlock(mapping) \ argument 80 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/drivers/input/joystick/ |
D | xpad.c | 119 u8 mapping; member 345 int mapping; /* map d-pad to buttons or to axes */ member 363 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { in xpad_process_packet() 378 if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { in xpad_process_packet() 387 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpad_process_packet() 435 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpad360_process_packet() 465 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { in xpad360_process_packet() 480 if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { in xpad360_process_packet() 544 if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { in xpadone_process_buttons() 565 if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { in xpadone_process_buttons() [all …]
|
/linux-4.1.27/drivers/net/ethernet/alteon/ |
D | acenic.c | 640 dma_addr_t mapping; in acenic_remove_one() local 643 mapping = dma_unmap_addr(ringp, mapping); in acenic_remove_one() 644 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 660 dma_addr_t mapping; in acenic_remove_one() local 663 mapping = dma_unmap_addr(ringp,mapping); in acenic_remove_one() 664 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 679 dma_addr_t mapping; in acenic_remove_one() local 682 mapping = dma_unmap_addr(ringp, mapping); in acenic_remove_one() 683 pci_unmap_page(ap->pdev, mapping, in acenic_remove_one() 1642 dma_addr_t mapping; in ace_load_std_rx_ring() local [all …]
|
/linux-4.1.27/drivers/mtd/devices/ |
D | block2mtd.c | 46 static struct page *page_read(struct address_space *mapping, int index) in page_read() argument 48 return read_mapping_page(mapping, index, NULL); in page_read() 54 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; in _block2mtd_erase() local 62 page = page_read(mapping, index); in _block2mtd_erase() 73 balance_dirty_pages_ratelimited(mapping); in _block2mtd_erase() 143 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping; in _block2mtd_write() local 155 page = page_read(mapping, index); in _block2mtd_write() 164 balance_dirty_pages_ratelimited(mapping); in _block2mtd_write()
|
/linux-4.1.27/fs/sysv/ |
D | dir.c | 43 struct address_space *mapping = page->mapping; in dir_commit_chunk() local 44 struct inode *dir = mapping->host; in dir_commit_chunk() 47 block_write_end(NULL, mapping, pos, len, len, page, NULL); in dir_commit_chunk() 61 struct address_space *mapping = dir->i_mapping; in dir_get_page() local 62 struct page *page = read_mapping_page(mapping, n, NULL); in dir_get_page() 236 struct inode *inode = page->mapping->host; in sysv_delete_entry() 334 struct inode *dir = page->mapping->host; in sysv_set_link()
|
D | itree.c | 467 static void sysv_write_failed(struct address_space *mapping, loff_t to) in sysv_write_failed() argument 469 struct inode *inode = mapping->host; in sysv_write_failed() 477 static int sysv_write_begin(struct file *file, struct address_space *mapping, in sysv_write_begin() argument 483 ret = block_write_begin(mapping, pos, len, flags, pagep, get_block); in sysv_write_begin() 485 sysv_write_failed(mapping, pos + len); in sysv_write_begin() 490 static sector_t sysv_bmap(struct address_space *mapping, sector_t block) in sysv_bmap() argument 492 return generic_block_bmap(mapping,block,get_block); in sysv_bmap()
|
/linux-4.1.27/arch/xtensa/mm/ |
D | cache.c | 135 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 143 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 162 if (!alias && !mapping) in flush_dcache_page() 173 if (mapping) in flush_dcache_page()
|
/linux-4.1.27/drivers/remoteproc/ |
D | remoteproc_core.c | 472 struct rproc_mem_entry *mapping; in rproc_handle_devmem() local 491 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in rproc_handle_devmem() 492 if (!mapping) { in rproc_handle_devmem() 510 mapping->da = rsc->da; in rproc_handle_devmem() 511 mapping->len = rsc->len; in rproc_handle_devmem() 512 list_add_tail(&mapping->node, &rproc->mappings); in rproc_handle_devmem() 520 kfree(mapping); in rproc_handle_devmem() 547 struct rproc_mem_entry *carveout, *mapping; in rproc_handle_carveout() local 601 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); in rproc_handle_carveout() 602 if (!mapping) { in rproc_handle_carveout() [all …]
|
/linux-4.1.27/Documentation/vm/ |
D | remap_file_pages.txt | 1 The remap_file_pages() system call is used to create a nonlinear mapping, 2 that is, a mapping in which the pages of the file are mapped into a 8 Supporting of nonlinear mapping requires significant amount of non-trivial 10 nonlinear mapping work kernel need a way to distinguish normal page table
|
D | highmem.txt | 31 The part of (physical) memory not covered by a permanent mapping is what we 67 (*) vmap(). This can be used to make a long duration mapping of multiple 71 (*) kmap(). This permits a short duration mapping of a single page. It needs 76 (*) kmap_atomic(). This permits a very short duration mapping of a single 77 page. Since the mapping is restricted to the CPU that issued it, it 97 struct page *page = find_get_page(mapping, offset); 130 If CONFIG_HIGHMEM is not set, then the kernel will try and create a mapping 147 pageframes need to live in the permanent mapping, which means:
|
/linux-4.1.27/arch/metag/include/asm/ |
D | cacheflush.h | 67 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 68 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument 95 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 96 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/fs/ufs/ |
D | util.c | 243 struct page *ufs_get_locked_page(struct address_space *mapping, in ufs_get_locked_page() argument 248 page = find_lock_page(mapping, index); in ufs_get_locked_page() 250 page = read_mapping_page(mapping, index, NULL); in ufs_get_locked_page() 255 mapping->host->i_ino, index); in ufs_get_locked_page() 261 if (unlikely(page->mapping == NULL)) { in ufs_get_locked_page() 275 mapping->host->i_ino, index); in ufs_get_locked_page()
|
D | dir.c | 45 struct address_space *mapping = page->mapping; in ufs_commit_chunk() local 46 struct inode *dir = mapping->host; in ufs_commit_chunk() 50 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ufs_commit_chunk() 113 struct inode *dir = page->mapping->host; in ufs_check_page() 192 struct address_space *mapping = dir->i_mapping; in ufs_get_page() local 193 struct page *page = read_mapping_page(mapping, n, NULL); in ufs_get_page() 562 struct address_space *mapping = inode->i_mapping; in ufs_make_empty() local 563 struct page *page = grab_cache_page(mapping, 0); in ufs_make_empty()
|
/linux-4.1.27/arch/arm/include/asm/ |
D | device.h | 18 struct dma_iommu_mapping *mapping; member 32 #define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
|
D | dma-iommu.h | 30 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping); 33 struct dma_iommu_mapping *mapping);
|
D | cacheflush.h | 325 #define flush_dcache_mmap_lock(mapping) \ argument 326 spin_lock_irq(&(mapping)->tree_lock) 327 #define flush_dcache_mmap_unlock(mapping) \ argument 328 spin_unlock_irq(&(mapping)->tree_lock)
|
/linux-4.1.27/fs/ubifs/ |
D | file.c | 110 struct inode *inode = page->mapping->host; in do_readpage() 221 static int write_begin_slow(struct address_space *mapping, in write_begin_slow() argument 225 struct inode *inode = mapping->host; in write_begin_slow() 251 page = grab_cache_page_write_begin(mapping, index, flags); in write_begin_slow() 425 static int ubifs_write_begin(struct file *file, struct address_space *mapping, in ubifs_write_begin() argument 429 struct inode *inode = mapping->host; in ubifs_write_begin() 444 page = grab_cache_page_write_begin(mapping, index, flags); in ubifs_write_begin() 500 return write_begin_slow(mapping, pos, len, pagep, flags); in ubifs_write_begin() 540 static int ubifs_write_end(struct file *file, struct address_space *mapping, in ubifs_write_end() argument 544 struct inode *inode = mapping->host; in ubifs_write_end() [all …]
|
/linux-4.1.27/fs/hfs/ |
D | inode.c | 39 static void hfs_write_failed(struct address_space *mapping, loff_t to) in hfs_write_failed() argument 41 struct inode *inode = mapping->host; in hfs_write_failed() 49 static int hfs_write_begin(struct file *file, struct address_space *mapping, in hfs_write_begin() argument 56 ret = cont_write_begin(file, mapping, pos, len, flags, pagep, fsdata, in hfs_write_begin() 58 &HFS_I(mapping->host)->phys_size); in hfs_write_begin() 60 hfs_write_failed(mapping, pos + len); in hfs_write_begin() 65 static sector_t hfs_bmap(struct address_space *mapping, sector_t block) in hfs_bmap() argument 67 return generic_block_bmap(mapping, block, hfs_get_block); in hfs_bmap() 72 struct inode *inode = page->mapping->host; in hfs_releasepage() 131 struct address_space *mapping = file->f_mapping; in hfs_direct_IO() local [all …]
|
/linux-4.1.27/arch/mips/pci/ |
D | pci-rc32434.c | 155 rc32434_pci->pcilba[0].mapping = (unsigned int) (PCI_ADDR_START); in rc32434_pcibridge_init() 163 rc32434_pci->pcilba[1].mapping = 0x60000000; in rc32434_pcibridge_init() 170 rc32434_pci->pcilba[2].mapping = 0x18FFFFFF; in rc32434_pcibridge_init() 179 rc32434_pci->pcilba[3].mapping = 0x18800000; in rc32434_pcibridge_init()
|
/linux-4.1.27/fs/cifs/ |
D | fscache.h | 78 struct address_space *mapping, in cifs_readpages_from_fscache() argument 83 return __cifs_readpages_from_fscache(inode, mapping, pages, in cifs_readpages_from_fscache() 132 struct address_space *mapping, in cifs_readpages_from_fscache() argument
|
D | file.c | 1835 struct address_space *mapping = page->mapping; in cifs_partialpagewrite() local 1843 if (!mapping || !mapping->host) in cifs_partialpagewrite() 1846 inode = page->mapping->host; in cifs_partialpagewrite() 1858 if (offset > mapping->host->i_size) { in cifs_partialpagewrite() 1864 if (mapping->host->i_size - offset < (loff_t)to) in cifs_partialpagewrite() 1865 to = (unsigned)(mapping->host->i_size - offset); in cifs_partialpagewrite() 1867 open_file = find_writable_file(CIFS_I(mapping->host), false); in cifs_partialpagewrite() 1888 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, in wdata_alloc_and_fillpages() argument 1910 nr_pages = find_get_pages_tag(mapping, index, in wdata_alloc_and_fillpages() 1923 struct address_space *mapping, in wdata_prepare_pages() argument [all …]
|
D | fscache.c | 131 struct inode *inode = page->mapping->host; in cifs_fscache_release_page() 185 struct address_space *mapping, in __cifs_readpages_from_fscache() argument 193 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, in __cifs_readpages_from_fscache() 197 mapping_gfp_mask(mapping)); in __cifs_readpages_from_fscache()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/ |
D | b44.c | 633 rp->mapping, in b44_tx() 664 dma_addr_t mapping; in b44_alloc_rx_skb() local 677 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb() 683 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb() 684 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { in b44_alloc_rx_skb() 686 if (!dma_mapping_error(bp->sdev->dma_dev, mapping)) in b44_alloc_rx_skb() 687 dma_unmap_single(bp->sdev->dma_dev, mapping, in b44_alloc_rx_skb() 693 mapping = dma_map_single(bp->sdev->dma_dev, skb->data, in b44_alloc_rx_skb() 696 if (dma_mapping_error(bp->sdev->dma_dev, mapping) || in b44_alloc_rx_skb() 697 mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) { in b44_alloc_rx_skb() [all …]
|
/linux-4.1.27/fs/btrfs/ |
D | extent_io.c | 93 if (!tree->mapping) in __btrfs_debug_check_extent_io_range() 96 inode = tree->mapping->host; in __btrfs_debug_check_extent_io_range() 138 if (!tree->mapping) in tree_fs_info() 140 return btrfs_sb(tree->mapping->host->i_sb); in tree_fs_info() 199 struct address_space *mapping) in extent_io_tree_init() argument 205 tree->mapping = mapping; in extent_io_tree_init() 350 tree->ops->merge_extent_hook(tree->mapping->host, new, in merge_cb() 402 tree->ops->set_bit_hook(tree->mapping->host, state, bits); in set_state_cb() 409 tree->ops->clear_bit_hook(tree->mapping->host, state, bits); in clear_state_cb() 458 tree->ops->split_extent_hook(tree->mapping->host, orig, split); in split_cb() [all …]
|
D | compression.c | 195 page->mapping = NULL; in end_compressed_bio_read() 291 cb->compressed_pages[0]->mapping = cb->inode->i_mapping; in end_compressed_bio_write() 297 cb->compressed_pages[0]->mapping = NULL; in end_compressed_bio_write() 309 page->mapping = NULL; in end_compressed_bio_write() 377 page->mapping = inode->i_mapping; in btrfs_submit_compressed_write() 385 page->mapping = NULL; in btrfs_submit_compressed_write() 456 struct address_space *mapping = inode->i_mapping; in add_ra_bio_pages() local 479 page = radix_tree_lookup(&mapping->page_tree, pg_index); in add_ra_bio_pages() 488 page = __page_cache_alloc(mapping_gfp_mask(mapping) & in add_ra_bio_pages() 493 if (add_to_page_cache_lru(page, mapping, pg_index, in add_ra_bio_pages() [all …]
|
D | compression.h | 25 int btrfs_compress_pages(int type, struct address_space *mapping, 57 struct address_space *mapping,
|
/linux-4.1.27/fs/hugetlbfs/ |
D | inode.c | 223 struct address_space *mapping = file->f_mapping; in hugetlbfs_read_iter() local 224 struct inode *inode = mapping->host; in hugetlbfs_read_iter() 251 page = find_lock_page(mapping, index); in hugetlbfs_read_iter() 282 struct address_space *mapping, in hugetlbfs_write_begin() argument 289 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, in hugetlbfs_write_end() argument 307 struct address_space *mapping = &inode->i_data; in truncate_hugepages() local 316 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { in truncate_hugepages() 336 BUG_ON(!lstart && mapping->nrpages); in truncate_hugepages() 379 struct address_space *mapping = inode->i_mapping; in hugetlb_vmtruncate() local 386 i_mmap_lock_write(mapping); in hugetlb_vmtruncate() [all …]
|
/linux-4.1.27/fs/ext2/ |
D | inode.c | 55 static void ext2_write_failed(struct address_space *mapping, loff_t to) in ext2_write_failed() argument 57 struct inode *inode = mapping->host; in ext2_write_failed() 796 ext2_readpages(struct file *file, struct address_space *mapping, in ext2_readpages() argument 799 return mpage_readpages(mapping, pages, nr_pages, ext2_get_block); in ext2_readpages() 803 ext2_write_begin(struct file *file, struct address_space *mapping, in ext2_write_begin() argument 809 ret = block_write_begin(mapping, pos, len, flags, pagep, in ext2_write_begin() 812 ext2_write_failed(mapping, pos + len); in ext2_write_begin() 816 static int ext2_write_end(struct file *file, struct address_space *mapping, in ext2_write_end() argument 822 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); in ext2_write_end() 824 ext2_write_failed(mapping, pos + len); in ext2_write_end() [all …]
|
D | file.c | 78 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; in ext2_fsync() local 81 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) { in ext2_fsync()
|
D | dir.c | 95 struct address_space *mapping = page->mapping; in ext2_commit_chunk() local 96 struct inode *dir = mapping->host; in ext2_commit_chunk() 100 block_write_end(NULL, mapping, pos, len, len, page, NULL); in ext2_commit_chunk() 120 struct inode *dir = page->mapping->host; in ext2_check_page() 205 struct address_space *mapping = dir->i_mapping; in ext2_get_page() local 206 struct page *page = read_mapping_page(mapping, n, NULL); in ext2_get_page() 588 struct inode *inode = page->mapping->host; in ext2_delete_entry()
|
/linux-4.1.27/drivers/net/ethernet/smsc/ |
D | smsc9420.c | 53 dma_addr_t mapping; member 557 BUG_ON(!pd->tx_buffers[i].mapping); in smsc9420_free_tx_ring() 558 pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping, in smsc9420_free_tx_ring() 590 if (pd->rx_buffers[i].mapping) in smsc9420_free_rx_ring() 591 pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping, in smsc9420_free_rx_ring() 811 pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping, in smsc9420_rx_handoff() 813 pd->rx_buffers[index].mapping = 0; in smsc9420_rx_handoff() 836 dma_addr_t mapping; in smsc9420_alloc_rx_buffer() local 839 BUG_ON(pd->rx_buffers[index].mapping); in smsc9420_alloc_rx_buffer() 844 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), in smsc9420_alloc_rx_buffer() [all …]
|
/linux-4.1.27/arch/sh/mm/ |
D | cache-sh7705.c | 139 struct address_space *mapping = page_mapping(page); in sh7705_flush_dcache_page() local 141 if (mapping && !mapping_mapped(mapping)) in sh7705_flush_dcache_page()
|
/linux-4.1.27/arch/cris/include/asm/ |
D | cacheflush.h | 17 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 18 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/include/asm-generic/ |
D | cacheflush.h | 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/nios2/include/asm/ |
D | cacheflush.h | 49 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 50 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/hexagon/include/asm/ |
D | cacheflush.h | 48 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 49 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
D | serial2002.c | 341 unsigned char *mapping, in serial2002_setup_subdevice() argument 374 if (mapping) in serial2002_setup_subdevice() 375 mapping[chan] = j; in serial2002_setup_subdevice() 480 unsigned char *mapping = NULL; in serial2002_setup_subdevs() local 489 mapping = devpriv->digital_in_mapping; in serial2002_setup_subdevs() 494 mapping = devpriv->digital_out_mapping; in serial2002_setup_subdevs() 499 mapping = devpriv->analog_in_mapping; in serial2002_setup_subdevs() 505 mapping = devpriv->analog_out_mapping; in serial2002_setup_subdevs() 511 mapping = devpriv->encoder_in_mapping; in serial2002_setup_subdevs() 517 if (serial2002_setup_subdevice(s, cfg, range, mapping, kind)) in serial2002_setup_subdevs()
|
/linux-4.1.27/arch/c6x/include/asm/ |
D | cacheflush.h | 34 #define flush_dcache_mmap_lock(mapping) do {} while (0) argument 35 #define flush_dcache_mmap_unlock(mapping) do {} while (0) argument
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | cacheflush.h | 34 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 35 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/ |
D | dma.txt | 12 - ranges : describes the mapping between the address space of the 16 - interrupt-parent : optional, if needed for interrupt mapping 28 - interrupt-parent : optional, if needed for interrupt mapping 82 - ranges : describes the mapping between the address space of the 91 - interrupt-parent : optional, if needed for interrupt mapping 142 - ranges : describes the mapping between the address space of the 149 - interrupt-parent : optional, if needed for interrupt mapping
|
/linux-4.1.27/arch/parisc/kernel/ |
D | cache.c | 288 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 294 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page() 301 if (!mapping) in flush_dcache_page() 311 flush_dcache_mmap_lock(mapping); in flush_dcache_page() 312 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in flush_dcache_page() 334 flush_dcache_mmap_unlock(mapping); in flush_dcache_page()
|
/linux-4.1.27/fs/fuse/ |
D | file.c | 702 struct inode *inode = page->mapping->host; in fuse_do_readpage() 749 struct inode *inode = page->mapping->host; in fuse_readpage() 768 struct address_space *mapping = NULL; in fuse_readpages_end() local 770 for (i = 0; mapping == NULL && i < req->num_pages; i++) in fuse_readpages_end() 771 mapping = req->pages[i]->mapping; in fuse_readpages_end() 773 if (mapping) { in fuse_readpages_end() 774 struct inode *inode = mapping->host; in fuse_readpages_end() 869 static int fuse_readpages(struct file *file, struct address_space *mapping, in fuse_readpages() argument 872 struct inode *inode = mapping->host; in fuse_readpages() 893 err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); in fuse_readpages() [all …]
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | cacheflush.h | 30 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 31 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/score/include/asm/ |
D | cacheflush.h | 23 #define flush_dcache_mmap_lock(mapping) do {} while (0) argument 24 #define flush_dcache_mmap_unlock(mapping) do {} while (0) argument
|
/linux-4.1.27/fs/fat/ |
D | inode.c | 181 static int fat_writepages(struct address_space *mapping, in fat_writepages() argument 184 return mpage_writepages(mapping, wbc, fat_get_block); in fat_writepages() 192 static int fat_readpages(struct file *file, struct address_space *mapping, in fat_readpages() argument 195 return mpage_readpages(mapping, pages, nr_pages, fat_get_block); in fat_readpages() 198 static void fat_write_failed(struct address_space *mapping, loff_t to) in fat_write_failed() argument 200 struct inode *inode = mapping->host; in fat_write_failed() 208 static int fat_write_begin(struct file *file, struct address_space *mapping, in fat_write_begin() argument 215 err = cont_write_begin(file, mapping, pos, len, flags, in fat_write_begin() 217 &MSDOS_I(mapping->host)->mmu_private); in fat_write_begin() 219 fat_write_failed(mapping, pos + len); in fat_write_begin() [all …]
|
/linux-4.1.27/fs/fscache/ |
D | page.c | 275 struct address_space *mapping, in fscache_alloc_retrieval() argument 292 op->mapping = mapping; in fscache_alloc_retrieval() 428 op = fscache_alloc_retrieval(cookie, page->mapping, in __fscache_read_or_alloc_page() 533 struct address_space *mapping, in __fscache_read_or_alloc_pages() argument 564 op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context); in __fscache_read_or_alloc_pages() 680 op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL); in __fscache_alloc_page() 1133 op->mapping, page); in fscache_mark_page_cached() 1164 struct address_space *mapping = inode->i_mapping; in __fscache_uncache_all_inode_pages() local 1171 if (!mapping || mapping->nrpages == 0) { in __fscache_uncache_all_inode_pages() 1179 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) in __fscache_uncache_all_inode_pages()
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | cacheflush.h | 14 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 15 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/m68k/include/asm/ |
D | cacheflush_no.h | 18 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 19 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/fs/udf/ |
D | file.c | 43 struct inode *inode = page->mapping->host; in __udf_adinicb_readpage() 67 struct inode *inode = page->mapping->host; in udf_adinicb_writepage() 84 struct address_space *mapping, loff_t pos, in udf_adinicb_write_begin() argument 92 page = grab_cache_page_write_begin(mapping, 0, flags); in udf_adinicb_write_begin()
|
/linux-4.1.27/fs/qnx6/ |
D | inode.c | 101 static int qnx6_readpages(struct file *file, struct address_space *mapping, in qnx6_readpages() argument 104 return mpage_readpages(mapping, pages, nr_pages, qnx6_get_block); in qnx6_readpages() 186 struct address_space *mapping = root->i_mapping; in qnx6_checkroot() local 187 struct page *page = read_mapping_page(mapping, 0, NULL); in qnx6_checkroot() 493 static sector_t qnx6_bmap(struct address_space *mapping, sector_t block) in qnx6_bmap() argument 495 return generic_block_bmap(mapping, block, qnx6_get_block); in qnx6_bmap() 525 struct address_space *mapping; in qnx6_iget() local 547 mapping = sbi->inodes->i_mapping; in qnx6_iget() 548 page = read_mapping_page(mapping, n, NULL); in qnx6_iget()
|
D | dir.c | 28 struct address_space *mapping = dir->i_mapping; in qnx6_get_page() local 29 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_get_page() 58 struct address_space *mapping = sbi->longfile->i_mapping; in qnx6_longname() local 59 struct page *page = read_mapping_page(mapping, n, NULL); in qnx6_longname()
|
/linux-4.1.27/fs/reiserfs/ |
D | ioctl.c | 176 struct address_space *mapping; in reiserfs_unpack() local 207 mapping = inode->i_mapping; in reiserfs_unpack() 208 page = grab_cache_page(mapping, index); in reiserfs_unpack()
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | cacheflush_32.h | 43 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 44 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
D | cacheflush_64.h | 71 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 72 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/frv/include/asm/ |
D | cacheflush.h | 28 #define flush_dcache_mmap_lock(mapping) do {} while(0) argument 29 #define flush_dcache_mmap_unlock(mapping) do {} while(0) argument
|
/linux-4.1.27/fs/squashfs/ |
D | file_direct.c | 30 struct inode *inode = target_page->mapping->host; in squashfs_readpage_block() 62 grab_cache_page_nowait(target_page->mapping, n); in squashfs_readpage_block() 143 struct inode *i = target_page->mapping->host; in squashfs_read_cache()
|
/linux-4.1.27/arch/score/mm/ |
D | cache.c | 57 struct address_space *mapping = page_mapping(page); in flush_dcache_page() local 62 if (mapping && !mapping_mapped(mapping)) { in flush_dcache_page()
|
/linux-4.1.27/kernel/irq/ |
D | Kconfig | 77 bool "Expose hardware/virtual IRQ mapping via debugfs" 80 This option will show the mapping relationship between hardware irq 81 numbers and Linux irq numbers. The mapping is exposed via debugfs
|
/linux-4.1.27/Documentation/devicetree/bindings/mips/cavium/ |
D | uctl.txt | 14 - ranges: Empty to signify direct mapping of the children. 26 ranges; /* Direct mapping */
|
/linux-4.1.27/arch/arc/include/asm/ |
D | cacheflush.h | 49 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 50 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/arch/arm/mach-shmobile/ |
D | clock-sh73a0.c | 444 value = __raw_readl(clk->mapping->base); in dsiphy_recalc() 471 value = __raw_readl(clk->mapping->base); in dsiphy_disable() 474 __raw_writel(value , clk->mapping->base); in dsiphy_disable() 482 value = __raw_readl(clk->mapping->base); in dsiphy_enable() 488 __raw_writel(value | 0x000B8000, clk->mapping->base); in dsiphy_enable() 504 value = __raw_readl(clk->mapping->base); in dsiphy_set_rate() 507 __raw_writel(value, clk->mapping->base); in dsiphy_set_rate() 533 .mapping = &dsi0phy_clk_mapping, 539 .mapping = &dsi1phy_clk_mapping,
|
/linux-4.1.27/drivers/net/ethernet/adaptec/ |
D | starfire.c | 522 dma_addr_t mapping; member 526 dma_addr_t mapping; member 1155 …np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); in init_ring() 1157 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); in init_ring() 1166 np->rx_info[i].mapping = 0; in init_ring() 1227 np->tx_info[entry].mapping = in start_tx() 1232 np->tx_info[entry].mapping = in start_tx() 1239 np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping); in start_tx() 1341 np->tx_info[entry].mapping, in intr_handler() 1344 np->tx_info[entry].mapping = 0; in intr_handler() [all …]
|
/linux-4.1.27/arch/mips/mm/ |
D | cache.c | 84 struct address_space *mapping = page_mapping(page); in __flush_dcache_page() local 87 if (mapping && !mapping_mapped(mapping)) { in __flush_dcache_page()
|
/linux-4.1.27/arch/sh/include/asm/ |
D | cacheflush.h | 92 #define flush_dcache_mmap_lock(mapping) do { } while (0) argument 93 #define flush_dcache_mmap_unlock(mapping) do { } while (0) argument
|
/linux-4.1.27/drivers/net/wireless/p54/ |
D | p54pci.c | 154 dma_addr_t mapping; in p54p_refill_rx_ring() local 159 mapping = pci_map_single(priv->pdev, in p54p_refill_rx_ring() 164 if (pci_dma_mapping_error(priv->pdev, mapping)) { in p54p_refill_rx_ring() 171 desc->host_addr = cpu_to_le32(mapping); in p54p_refill_rx_ring() 333 dma_addr_t mapping; in p54p_tx() local 340 mapping = pci_map_single(priv->pdev, skb->data, skb->len, in p54p_tx() 342 if (pci_dma_mapping_error(priv->pdev, mapping)) { in p54p_tx() 351 desc->host_addr = cpu_to_le32(mapping); in p54p_tx()
|