/linux-4.1.27/fs/ocfs2/ |
D | reservations.c | 53 struct ocfs2_alloc_reservation *resv) in ocfs2_resv_window_bits() argument 58 if (!(resv->r_flags & OCFS2_RESV_FLAG_DIR)) { in ocfs2_resv_window_bits() 67 static inline unsigned int ocfs2_resv_end(struct ocfs2_alloc_reservation *resv) in ocfs2_resv_end() argument 69 if (resv->r_len) in ocfs2_resv_end() 70 return resv->r_start + resv->r_len - 1; in ocfs2_resv_end() 71 return resv->r_start; in ocfs2_resv_end() 74 static inline int ocfs2_resv_empty(struct ocfs2_alloc_reservation *resv) in ocfs2_resv_empty() argument 76 return !!(resv->r_len == 0); in ocfs2_resv_empty() 90 struct ocfs2_alloc_reservation *resv; in ocfs2_dump_resv() local 98 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_dump_resv() [all …]
|
D | reservations.h | 64 void ocfs2_resv_init_once(struct ocfs2_alloc_reservation *resv); 67 void ocfs2_resv_set_type(struct ocfs2_alloc_reservation *resv, 81 struct ocfs2_alloc_reservation *resv); 137 struct ocfs2_alloc_reservation *resv, 156 struct ocfs2_alloc_reservation *resv,
|
D | localalloc.c | 56 struct ocfs2_alloc_reservation *resv); 840 struct ocfs2_alloc_reservation *resv) in ocfs2_local_alloc_find_clear_bits() argument 853 if (!resv) { in ocfs2_local_alloc_find_clear_bits() 857 resv = &r; in ocfs2_local_alloc_find_clear_bits() 861 if (ocfs2_resmap_resv_bits(resmap, resv, &bitoff, &numfound) == 0) { in ocfs2_local_alloc_find_clear_bits() 917 ocfs2_resv_discard(resmap, resv); in ocfs2_local_alloc_find_clear_bits()
|
/linux-4.1.27/drivers/gpu/drm/radeon/ |
D | radeon_prime.c | 64 struct reservation_object *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 69 ww_mutex_lock(&resv->lock, NULL); in radeon_gem_prime_import_sg_table() 71 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 72 ww_mutex_unlock(&resv->lock); in radeon_gem_prime_import_sg_table() 116 return bo->tbo.resv; in radeon_gem_prime_res_obj()
|
D | radeon_benchmark.c | 38 struct reservation_object *resv) in radeon_benchmark_do_move() argument 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.resv); in radeon_benchmark_move() 136 dobj->tbo.resv); in radeon_benchmark_move()
|
D | radeon_sync.c | 91 struct reservation_object *resv, in radeon_sync_resv() argument 101 f = reservation_object_get_excl(resv); in radeon_sync_resv() 108 flist = reservation_object_get_list(resv); in radeon_sync_resv() 114 reservation_object_held(resv)); in radeon_sync_resv()
|
D | rv770_dma.c | 45 struct reservation_object *resv) in rv770_copy_dma() argument 66 radeon_sync_resv(rdev, &sync, resv, false); in rv770_copy_dma()
|
D | radeon_object.c | 181 struct reservation_object *resv, in radeon_bo_create() argument 255 acc_size, sg, resv, &radeon_ttm_bo_destroy); in radeon_bo_create() 594 lockdep_assert_held(&bo->tbo.resv->lock.base); in radeon_bo_get_surface_reg() 720 lockdep_assert_held(&bo->tbo.resv->lock.base); in radeon_bo_get_tiling_flags() 732 lockdep_assert_held(&bo->tbo.resv->lock.base); in radeon_bo_check_tiling() 848 struct reservation_object *resv = bo->tbo.resv; in radeon_bo_fence() local 851 reservation_object_add_shared_fence(resv, &fence->base); in radeon_bo_fence() 853 reservation_object_add_excl_fence(resv, &fence->base); in radeon_bo_fence()
|
D | evergreen_dma.c | 111 struct reservation_object *resv) in evergreen_copy_dma() argument 132 radeon_sync_resv(rdev, &sync, resv, false); in evergreen_copy_dma()
|
D | radeon_test.c | 122 vram_obj->tbo.resv); in radeon_do_test_moves() 126 vram_obj->tbo.resv); in radeon_do_test_moves() 173 vram_obj->tbo.resv); in radeon_do_test_moves() 177 vram_obj->tbo.resv); in radeon_do_test_moves()
|
D | si_dma.c | 234 struct reservation_object *resv) in si_copy_dma() argument 255 radeon_sync_resv(rdev, &sync, resv, false); in si_copy_dma()
|
D | radeon_asic.h | 89 struct reservation_object *resv); 160 struct reservation_object *resv); 350 struct reservation_object *resv); 354 struct reservation_object *resv); 475 struct reservation_object *resv); 549 struct reservation_object *resv); 726 struct reservation_object *resv); 796 struct reservation_object *resv); 800 struct reservation_object *resv);
|
D | r600_dma.c | 442 struct reservation_object *resv) in r600_copy_dma() argument 463 radeon_sync_resv(rdev, &sync, resv, false); in r600_copy_dma()
|
D | radeon_cs.c | 239 struct reservation_object *resv; in radeon_cs_sync_rings() local 241 resv = reloc->robj->tbo.resv; in radeon_cs_sync_rings() 242 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv, in radeon_cs_sync_rings()
|
D | radeon_object.h | 129 struct reservation_object *resv,
|
D | radeon.h | 620 struct reservation_object *resv, 1920 struct reservation_object *resv); 1926 struct reservation_object *resv); 1933 struct reservation_object *resv); 2908 #define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (res… argument 2909 #define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv)) argument 2910 #define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv)) argument
|
D | radeon_vm.c | 706 radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); in radeon_vm_update_page_directory() 834 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); in radeon_vm_update_ptes() 835 r = reservation_object_reserve_shared(pt->tbo.resv); in radeon_vm_update_ptes()
|
D | radeon_mn.c | 154 r = reservation_object_wait_timeout_rcu(bo->tbo.resv, in radeon_mn_invalidate_range_start()
|
D | radeon_gem.c | 114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); in radeon_gem_set_domain() 468 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ); in radeon_gem_wait_idle_ioctl()
|
D | cik_sdma.c | 582 struct reservation_object *resv) in cik_copy_dma() argument 603 radeon_sync_resv(rdev, &sync, resv, false); in cik_copy_dma()
|
D | r200.c | 87 struct reservation_object *resv) in r200_copy_dma() argument
|
D | radeon_uvd.c | 436 f = reservation_object_get_excl(bo->tbo.resv); in radeon_uvd_cs_msg()
|
D | radeon_ttm.c | 299 fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); in radeon_move_blit()
|
D | radeon_display.c | 506 work->fence = fence_get(reservation_object_get_excl(new_rbo->tbo.resv)); in radeon_crtc_page_flip()
|
D | r600.c | 2917 struct reservation_object *resv) in r600_copy_cpdma() argument 2938 radeon_sync_resv(rdev, &sync, resv, false); in r600_copy_cpdma()
|
D | cik.c | 4045 struct reservation_object *resv) in cik_copy_cpdma() argument 4066 radeon_sync_resv(rdev, &sync, resv, false); in cik_copy_cpdma()
|
D | r100.c | 886 struct reservation_object *resv) in r100_copy_blit() argument
|
/linux-4.1.27/net/sunrpc/ |
D | svc.c | 1072 svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) in svc_process_common() argument 1099 svc_putu32(resv, rqstp->rq_xid); in svc_process_common() 1104 svc_putnl(resv, 1); /* REPLY */ in svc_process_common() 1110 reply_statp = resv->iov_base + resv->iov_len; in svc_process_common() 1112 svc_putnl(resv, 0); /* ACCEPT */ in svc_process_common() 1168 statp = resv->iov_base +resv->iov_len; in svc_process_common() 1169 svc_putnl(resv, RPC_SUCCESS); in svc_process_common() 1201 !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) { in svc_process_common() 1218 resv->iov_len = ((void*)statp) - resv->iov_base + 4; in svc_process_common() 1245 svc_putnl(resv, 1); /* REJECT */ in svc_process_common() [all …]
|
D | svcauth_unix.c | 728 struct kvec *resv = &rqstp->rq_res.head[0]; in svcauth_null_accept() local 757 svc_putnl(resv, RPC_AUTH_NULL); in svcauth_null_accept() 758 svc_putnl(resv, 0); in svcauth_null_accept() 792 struct kvec *resv = &rqstp->rq_res.head[0]; in svcauth_unix_accept() local 836 svc_putnl(resv, RPC_AUTH_NULL); in svcauth_unix_accept() 837 svc_putnl(resv, 0); in svcauth_unix_accept()
|
D | svcsock.c | 1220 struct kvec *resv = &rqstp->rq_res.head[0]; in svc_tcp_prep_reply_hdr() local 1223 svc_putnl(resv, 0); in svc_tcp_prep_reply_hdr()
|
/linux-4.1.27/drivers/dma-buf/ |
D | dma-buf.c | 72 if (dmabuf->resv == (struct reservation_object *)&dmabuf[1]) in dma_buf_release() 73 reservation_object_fini(dmabuf->resv); in dma_buf_release() 136 struct reservation_object *resv; in dma_buf_poll() local 143 if (!dmabuf || !dmabuf->resv) in dma_buf_poll() 146 resv = dmabuf->resv; in dma_buf_poll() 155 seq = read_seqcount_begin(&resv->seq); in dma_buf_poll() 158 fobj = rcu_dereference(resv->fence); in dma_buf_poll() 163 fence_excl = rcu_dereference(resv->fence_excl); in dma_buf_poll() 164 if (read_seqcount_retry(&resv->seq, seq)) { in dma_buf_poll() 285 struct reservation_object *resv = exp_info->resv; in dma_buf_export() local [all …]
|
/linux-4.1.27/drivers/gpu/drm/ttm/ |
D | ttm_execbuf_util.c | 134 ret = reservation_object_reserve_shared(bo->resv); in ttm_eu_reserve_buffers() 146 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, in ttm_eu_reserve_buffers() 149 ww_mutex_lock_slow(&bo->resv->lock, ticket); in ttm_eu_reserve_buffers() 154 ret = reservation_object_reserve_shared(bo->resv); in ttm_eu_reserve_buffers() 204 reservation_object_add_shared_fence(bo->resv, fence); in ttm_eu_fence_buffer_objects() 206 reservation_object_add_excl_fence(bo->resv, fence); in ttm_eu_fence_buffer_objects()
|
D | ttm_bo.c | 153 if (bo->resv == &bo->ttm_resv) in ttm_bo_release_list() 169 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_add_to_lru() 403 ww_mutex_unlock (&bo->resv->lock); in ttm_bo_cleanup_memtype_use() 412 fobj = reservation_object_get_list(bo->resv); in ttm_bo_flush_all_fences() 413 fence = reservation_object_get_excl(bo->resv); in ttm_bo_flush_all_fences() 419 reservation_object_held(bo->resv)); in ttm_bo_flush_all_fences() 494 ww_mutex_unlock(&bo->resv->lock); in ttm_bo_cleanup_refs_and_unlock() 497 lret = reservation_object_wait_timeout_rcu(bo->resv, in ttm_bo_cleanup_refs_and_unlock() 676 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_evict() 973 lockdep_assert_held(&bo->resv->lock.base); in ttm_bo_move_buffer() [all …]
|
D | ttm_bo_util.c | 472 fbo->resv = &fbo->ttm_resv; in ttm_buffer_object_transfer() 473 reservation_object_init(fbo->resv); in ttm_buffer_object_transfer() 474 ret = ww_mutex_trylock(&fbo->resv->lock); in ttm_buffer_object_transfer() 645 reservation_object_add_excl_fence(bo->resv, fence); in ttm_bo_move_accel_cleanup() 673 reservation_object_add_excl_fence(ghost_obj->resv, fence); in ttm_bo_move_accel_cleanup()
|
/linux-4.1.27/net/sunrpc/auth_gss/ |
D | svcauth_gss.c | 637 svc_safe_putnetobj(struct kvec *resv, struct xdr_netobj *o) in svc_safe_putnetobj() argument 641 if (resv->iov_len + 4 > PAGE_SIZE) in svc_safe_putnetobj() 643 svc_putnl(resv, o->len); in svc_safe_putnetobj() 644 p = resv->iov_base + resv->iov_len; in svc_safe_putnetobj() 645 resv->iov_len += round_up_to_quad(o->len); in svc_safe_putnetobj() 646 if (resv->iov_len > PAGE_SIZE) in svc_safe_putnetobj() 1074 gss_write_resv(struct kvec *resv, size_t size_limit, in gss_write_resv() argument 1078 if (resv->iov_len + 4 > size_limit) in gss_write_resv() 1080 svc_putnl(resv, RPC_SUCCESS); in gss_write_resv() 1081 if (svc_safe_putnetobj(resv, out_handle)) in gss_write_resv() [all …]
|
/linux-4.1.27/include/drm/ttm/ |
D | ttm_bo_driver.h | 790 success = ww_mutex_trylock(&bo->resv->lock); in __ttm_bo_reserve() 795 ret = ww_mutex_lock_interruptible(&bo->resv->lock, ticket); in __ttm_bo_reserve() 797 ret = ww_mutex_lock(&bo->resv->lock, ticket); in __ttm_bo_reserve() 883 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, in ttm_bo_reserve_slowpath() 886 ww_mutex_lock_slow(&bo->resv->lock, ticket); in ttm_bo_reserve_slowpath() 905 ww_mutex_unlock(&bo->resv->lock); in __ttm_bo_unreserve()
|
D | ttm_bo_api.h | 257 struct reservation_object *resv; member 491 struct reservation_object *resv,
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | vio.h | 43 u64 resv[6]; member 69 u16 resv; member 77 u64 resv[5]; member 177 u32 resv; member 198 u16 resv; member 240 u32 resv; member
|
D | hypervisor.h | 473 unsigned long resv; member
|
/linux-4.1.27/include/uapi/linux/ |
D | igmp.h | 70 resv:4; member 72 __u8 resv:4, member
|
D | vfio.h | 189 __u32 resv; /* Reserved for alignment */ member
|
/linux-4.1.27/mm/ |
D | hugetlb.c | 229 static long region_add(struct resv_map *resv, long f, long t) in region_add() argument 231 struct list_head *head = &resv->regions; in region_add() 234 spin_lock(&resv->lock); in region_add() 264 spin_unlock(&resv->lock); in region_add() 268 static long region_chg(struct resv_map *resv, long f, long t) in region_chg() argument 270 struct list_head *head = &resv->regions; in region_chg() 275 spin_lock(&resv->lock); in region_chg() 286 spin_unlock(&resv->lock); in region_chg() 325 spin_unlock(&resv->lock); in region_chg() 330 spin_unlock(&resv->lock); in region_chg() [all …]
|
/linux-4.1.27/drivers/gpu/drm/nouveau/ |
D | nouveau_prime.c | 64 struct reservation_object *robj = attach->dmabuf->resv; in nouveau_gem_prime_import_sg_table() 114 return nvbo->bo.resv; in nouveau_gem_prime_res_obj()
|
D | nouveau_fence.c | 395 struct reservation_object *resv = nvbo->bo.resv; in nouveau_fence_sync() local 401 ret = reservation_object_reserve_shared(resv); in nouveau_fence_sync() 407 fobj = reservation_object_get_list(resv); in nouveau_fence_sync() 408 fence = reservation_object_get_excl(resv); in nouveau_fence_sync() 437 reservation_object_held(resv)); in nouveau_fence_sync()
|
D | nouveau_gem.c | 118 struct reservation_object *resv = nvbo->bo.resv; in nouveau_gem_object_unmap() local 122 fobj = reservation_object_get_list(resv); in nouveau_gem_object_unmap() 130 reservation_object_held(resv)); in nouveau_gem_object_unmap() 132 fence = reservation_object_get_excl(nvbo->bo.resv); in nouveau_gem_object_unmap() 874 ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY; in nouveau_gem_ioctl_cpu_prep() 878 lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ); in nouveau_gem_ioctl_cpu_prep()
|
D | nouveau_bo.c | 1282 struct fence *fence = reservation_object_get_excl(bo->resv); in nouveau_bo_vm_cleanup() 1595 struct reservation_object *resv = nvbo->bo.resv; in nouveau_bo_fence() local 1598 reservation_object_add_excl_fence(resv, &fence->base); in nouveau_bo_fence() 1600 reservation_object_add_shared_fence(resv, &fence->base); in nouveau_bo_fence()
|
/linux-4.1.27/include/rdma/ |
D | ib_smi.h | 55 __be16 resv; member 118 u8 resv; member
|
D | ib_mad.h | 149 __be16 resv; member
|
/linux-4.1.27/include/linux/ |
D | dma-buf.h | 134 struct reservation_object *resv; member 182 struct reservation_object *resv; member
|
/linux-4.1.27/drivers/gpu/drm/msm/ |
D | msm_gem_submit.c | 130 ww_mutex_unlock(&msm_obj->resv->lock); in submit_unlock_unpin_bo() 156 ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock, in submit_validate_objects() 202 ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock, in submit_validate_objects()
|
D | msm_gem.h | 61 struct reservation_object *resv; member
|
D | msm_gem.c | 548 if (msm_obj->resv == &msm_obj->_resv) in msm_gem_free_object() 549 reservation_object_fini(msm_obj->resv); in msm_gem_free_object() 623 msm_obj->resv = &msm_obj->_resv; in msm_gem_new_impl() 624 reservation_object_init(msm_obj->resv); in msm_gem_new_impl()
|
/linux-4.1.27/fs/nfsd/ |
D | nfscache.c | 541 struct kvec *resv = &rqstp->rq_res.head[0], *cachv; in nfsd_cache_update() local 553 len = resv->iov_len - ((char*)statp - (char*)resv->iov_base); in nfsd_cache_update()
|
/linux-4.1.27/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_resource.c | 580 return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY; in vmw_user_dmabuf_synccpu_grab() 582 lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT); in vmw_user_dmabuf_synccpu_grab() 1187 lockdep_assert_held(&res->backup->base.resv->lock.base); in vmw_resource_unreserve() 1193 lockdep_assert_held(&new_backup->base.resv->lock.base); in vmw_resource_unreserve() 1450 reservation_object_add_excl_fence(bo->resv, &fence->base); in vmw_fence_single_bo() 1453 reservation_object_add_excl_fence(bo->resv, &fence->base); in vmw_fence_single_bo()
|
D | vmwgfx_dmabuf.c | 307 lockdep_assert_held(&bo->resv->lock.base); in vmw_bo_pin()
|
/linux-4.1.27/drivers/gpu/drm/qxl/ |
D | qxl_debugfs.c | 65 fobj = rcu_dereference(bo->tbo.resv->fence); in qxl_debugfs_buffers_info()
|
D | qxl_release.c | 246 ret = reservation_object_reserve_shared(bo->tbo.resv); in qxl_release_validate_bo() 469 reservation_object_add_shared_fence(bo->resv, &release->base); in qxl_release_fence_buffer_objects()
|
/linux-4.1.27/drivers/net/ethernet/rocker/ |
D | rocker.h | 110 u16 resv[5]; member
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_mad.h | 259 __be16 resv; member
|
/linux-4.1.27/arch/sparc/kernel/ |
D | pci_fire.c | 99 u64 resv[6]; member
|
D | mdesc.c | 53 u16 resv; member
|
/linux-4.1.27/arch/sparc/mm/ |
D | tsb.c | 257 hp->resv = 0; in setup_tsb_params()
|
D | init_64.c | 1867 ktsb_descr[0].resv = 0; in sun4v_ktsb_init() 1884 ktsb_descr[1].resv = 0; in sun4v_ktsb_init()
|
/linux-4.1.27/drivers/gpu/drm/ |
D | drm_prime.c | 350 exp_info.resv = dev->driver->gem_prime_res_obj(obj); in drm_gem_prime_export()
|
/linux-4.1.27/drivers/infiniband/hw/ehca/ |
D | ehca_reqs.c | 134 mad_hdr->resv, in trace_send_wr_ud()
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfa_defs_svc.h | 1054 u8 resv[2]; member
|
/linux-4.1.27/drivers/scsi/lpfc/ |
D | lpfc_hw.h | 969 uint8_t resv; member 977 uint8_t resv;
|
D | lpfc_els.c | 4704 switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) { in lpfc_rscn_payload_check()
|
/linux-4.1.27/include/linux/mlx4/ |
D | device.h | 937 __be16 resv; member
|
/linux-4.1.27/drivers/block/ |
D | nvme-scsi.c | 2880 u32 resv; member 2886 u32 resv; member
|
/linux-4.1.27/drivers/scsi/ |
D | ipr.h | 1233 u8 resv; member
|
/linux-4.1.27/drivers/iommu/ |
D | amd_iommu_init.c | 126 u64 resv; member
|
/linux-4.1.27/drivers/net/ethernet/sun/ |
D | niu.h | 2821 __le64 resv; member
|
D | niu.c | 6678 tp->resv = 0; in niu_start_xmit()
|