Lines Matching refs:man
145 struct vmw_cmdbuf_man *man; member
187 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
196 static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible) in vmw_cmdbuf_cur_lock() argument
199 if (mutex_lock_interruptible(&man->cur_mutex)) in vmw_cmdbuf_cur_lock()
202 mutex_lock(&man->cur_mutex); in vmw_cmdbuf_cur_lock()
213 static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_cur_unlock() argument
215 mutex_unlock(&man->cur_mutex); in vmw_cmdbuf_cur_unlock()
234 dma_pool_free(header->man->dheaders, dheader, header->handle); in vmw_cmdbuf_header_inline_free()
248 struct vmw_cmdbuf_man *man = header->man; in __vmw_cmdbuf_header_free() local
250 lockdep_assert_held_once(&man->lock); in __vmw_cmdbuf_header_free()
258 wake_up_all(&man->alloc_queue); in __vmw_cmdbuf_header_free()
260 dma_pool_free(man->headers, header->cb_header, in __vmw_cmdbuf_header_free()
273 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_free() local
280 spin_lock_bh(&man->lock); in vmw_cmdbuf_header_free()
282 spin_unlock_bh(&man->lock); in vmw_cmdbuf_header_free()
293 struct vmw_cmdbuf_man *man = header->man; in vmw_cmdbuf_header_submit() local
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); in vmw_cmdbuf_header_submit()
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); in vmw_cmdbuf_header_submit()
332 static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_submit() argument
335 while (ctx->num_hw_submitted < man->max_hw_submitted && in vmw_cmdbuf_ctx_submit()
369 static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_process() argument
375 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
384 wake_up_all(&man->idle_queue); in vmw_cmdbuf_ctx_process()
392 list_add_tail(&entry->list, &man->error); in vmw_cmdbuf_ctx_process()
393 schedule_work(&man->work); in vmw_cmdbuf_ctx_process()
405 vmw_cmdbuf_ctx_submit(man, ctx); in vmw_cmdbuf_ctx_process()
420 static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_process() argument
428 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_process()
429 vmw_cmdbuf_ctx_process(man, ctx, ¬empty); in vmw_cmdbuf_man_process()
431 if (man->irq_on && !notempty) { in vmw_cmdbuf_man_process()
432 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_man_process()
434 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
435 man->irq_on = false; in vmw_cmdbuf_man_process()
436 } else if (!man->irq_on && notempty) { in vmw_cmdbuf_man_process()
437 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_man_process()
439 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_man_process()
440 man->irq_on = true; in vmw_cmdbuf_man_process()
460 static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_ctx_add() argument
467 list_add_tail(&header->list, &man->ctx[cb_context].submitted); in vmw_cmdbuf_ctx_add()
469 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_ctx_add()
485 struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data; in vmw_cmdbuf_man_tasklet() local
487 spin_lock(&man->lock); in vmw_cmdbuf_man_tasklet()
488 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_man_tasklet()
489 spin_unlock(&man->lock); in vmw_cmdbuf_man_tasklet()
503 struct vmw_cmdbuf_man *man = in vmw_cmdbuf_work_func() local
509 spin_lock_bh(&man->lock); in vmw_cmdbuf_work_func()
510 list_for_each_entry_safe(entry, next, &man->error, list) { in vmw_cmdbuf_work_func()
516 wake_up_all(&man->idle_queue); in vmw_cmdbuf_work_func()
518 spin_unlock_bh(&man->lock); in vmw_cmdbuf_work_func()
520 if (restart && vmw_cmdbuf_startstop(man, true)) in vmw_cmdbuf_work_func()
524 vmw_fifo_send_fence(man->dev_priv, &dummy); in vmw_cmdbuf_work_func()
534 static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_man_idle() argument
541 spin_lock_bh(&man->lock); in vmw_cmdbuf_man_idle()
542 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_man_idle()
543 for_each_cmdbuf_ctx(man, i, ctx) { in vmw_cmdbuf_man_idle()
550 idle = list_empty(&man->error); in vmw_cmdbuf_man_idle()
553 spin_unlock_bh(&man->lock); in vmw_cmdbuf_man_idle()
567 static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man) in __vmw_cmdbuf_cur_flush() argument
569 struct vmw_cmdbuf_header *cur = man->cur; in __vmw_cmdbuf_cur_flush()
571 WARN_ON(!mutex_is_locked(&man->cur_mutex)); in __vmw_cmdbuf_cur_flush()
576 spin_lock_bh(&man->lock); in __vmw_cmdbuf_cur_flush()
577 if (man->cur_pos == 0) { in __vmw_cmdbuf_cur_flush()
582 man->cur->cb_header->length = man->cur_pos; in __vmw_cmdbuf_cur_flush()
583 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0); in __vmw_cmdbuf_cur_flush()
585 spin_unlock_bh(&man->lock); in __vmw_cmdbuf_cur_flush()
586 man->cur = NULL; in __vmw_cmdbuf_cur_flush()
587 man->cur_pos = 0; in __vmw_cmdbuf_cur_flush()
600 int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_cur_flush() argument
603 int ret = vmw_cmdbuf_cur_lock(man, interruptible); in vmw_cmdbuf_cur_flush()
608 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_cur_flush()
609 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_cur_flush()
625 int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, in vmw_cmdbuf_idle() argument
630 ret = vmw_cmdbuf_cur_flush(man, interruptible); in vmw_cmdbuf_idle()
631 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_idle()
633 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
637 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
641 (man->idle_queue, vmw_cmdbuf_man_idle(man, true), in vmw_cmdbuf_idle()
644 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_idle()
646 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_idle()
648 if (!vmw_cmdbuf_man_idle(man, true)) in vmw_cmdbuf_idle()
669 static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_try_alloc() argument
678 spin_lock_bh(&man->lock); in vmw_cmdbuf_try_alloc()
679 ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size, in vmw_cmdbuf_try_alloc()
684 vmw_cmdbuf_man_process(man); in vmw_cmdbuf_try_alloc()
685 ret = drm_mm_insert_node_generic(&man->mm, info->node, in vmw_cmdbuf_try_alloc()
691 spin_unlock_bh(&man->lock); in vmw_cmdbuf_try_alloc()
709 static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc_space() argument
725 if (mutex_lock_interruptible(&man->space_mutex)) in vmw_cmdbuf_alloc_space()
728 mutex_lock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
732 if (vmw_cmdbuf_try_alloc(man, &info)) in vmw_cmdbuf_alloc_space()
735 vmw_generic_waiter_add(man->dev_priv, in vmw_cmdbuf_alloc_space()
737 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
743 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
746 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER, in vmw_cmdbuf_alloc_space()
747 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
748 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
752 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info)); in vmw_cmdbuf_alloc_space()
754 vmw_generic_waiter_remove(man->dev_priv, in vmw_cmdbuf_alloc_space()
756 &man->dev_priv->cmdbuf_waiters); in vmw_cmdbuf_alloc_space()
759 mutex_unlock(&man->space_mutex); in vmw_cmdbuf_alloc_space()
773 static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_pool() argument
782 if (!man->has_pool) in vmw_cmdbuf_space_pool()
785 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible); in vmw_cmdbuf_space_pool()
790 header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, in vmw_cmdbuf_space_pool()
800 header->cmd = man->map + offset; in vmw_cmdbuf_space_pool()
802 if (man->using_mob) { in vmw_cmdbuf_space_pool()
804 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; in vmw_cmdbuf_space_pool()
807 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset; in vmw_cmdbuf_space_pool()
813 spin_lock_bh(&man->lock); in vmw_cmdbuf_space_pool()
815 spin_unlock_bh(&man->lock); in vmw_cmdbuf_space_pool()
828 static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_space_inline() argument
838 dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, in vmw_cmdbuf_space_inline()
870 void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_alloc() argument
884 ret = vmw_cmdbuf_space_inline(man, header, size); in vmw_cmdbuf_alloc()
886 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible); in vmw_cmdbuf_alloc()
893 header->man = man; in vmw_cmdbuf_alloc()
913 static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_reserve_cur() argument
921 if (vmw_cmdbuf_cur_lock(man, interruptible)) in vmw_cmdbuf_reserve_cur()
924 cur = man->cur; in vmw_cmdbuf_reserve_cur()
925 if (cur && (size + man->cur_pos > cur->size || in vmw_cmdbuf_reserve_cur()
928 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_reserve_cur()
930 if (!man->cur) { in vmw_cmdbuf_reserve_cur()
931 ret = vmw_cmdbuf_alloc(man, in vmw_cmdbuf_reserve_cur()
932 max_t(size_t, size, man->default_size), in vmw_cmdbuf_reserve_cur()
933 interruptible, &man->cur); in vmw_cmdbuf_reserve_cur()
935 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_reserve_cur()
939 cur = man->cur; in vmw_cmdbuf_reserve_cur()
949 return (void *) (man->cur->cmd + man->cur_pos); in vmw_cmdbuf_reserve_cur()
959 static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_commit_cur() argument
962 struct vmw_cmdbuf_header *cur = man->cur; in vmw_cmdbuf_commit_cur()
964 WARN_ON(!mutex_is_locked(&man->cur_mutex)); in vmw_cmdbuf_commit_cur()
967 man->cur_pos += size; in vmw_cmdbuf_commit_cur()
971 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit_cur()
972 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit_cur()
988 void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_reserve() argument
993 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible); in vmw_cmdbuf_reserve()
1016 void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size, in vmw_cmdbuf_commit() argument
1020 vmw_cmdbuf_commit_cur(man, size, flush); in vmw_cmdbuf_commit()
1024 (void) vmw_cmdbuf_cur_lock(man, false); in vmw_cmdbuf_commit()
1025 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1027 man->cur = header; in vmw_cmdbuf_commit()
1028 man->cur_pos = size; in vmw_cmdbuf_commit()
1032 __vmw_cmdbuf_cur_flush(man); in vmw_cmdbuf_commit()
1033 vmw_cmdbuf_cur_unlock(man); in vmw_cmdbuf_commit()
1041 void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_tasklet_schedule() argument
1043 if (!man) in vmw_cmdbuf_tasklet_schedule()
1046 tasklet_schedule(&man->tasklet); in vmw_cmdbuf_tasklet_schedule()
1058 static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_send_device_command() argument
1064 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header); in vmw_cmdbuf_send_device_command()
1072 spin_lock_bh(&man->lock); in vmw_cmdbuf_send_device_command()
1074 spin_unlock_bh(&man->lock); in vmw_cmdbuf_send_device_command()
1095 static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_startstop() argument
1107 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd)); in vmw_cmdbuf_startstop()
1125 int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, in vmw_cmdbuf_set_pool_size() argument
1128 struct vmw_private *dev_priv = man->dev_priv; in vmw_cmdbuf_set_pool_size()
1132 if (man->has_pool) in vmw_cmdbuf_set_pool_size()
1137 man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, in vmw_cmdbuf_set_pool_size()
1138 &man->handle, GFP_KERNEL); in vmw_cmdbuf_set_pool_size()
1139 if (man->map) { in vmw_cmdbuf_set_pool_size()
1140 man->using_mob = false; in vmw_cmdbuf_set_pool_size()
1153 &man->cmd_space); in vmw_cmdbuf_set_pool_size()
1157 man->using_mob = true; in vmw_cmdbuf_set_pool_size()
1158 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT, in vmw_cmdbuf_set_pool_size()
1159 &man->map_obj); in vmw_cmdbuf_set_pool_size()
1163 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy); in vmw_cmdbuf_set_pool_size()
1166 man->size = size; in vmw_cmdbuf_set_pool_size()
1167 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT); in vmw_cmdbuf_set_pool_size()
1169 man->has_pool = true; in vmw_cmdbuf_set_pool_size()
1177 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_set_pool_size()
1179 (man->using_mob) ? "MOB" : "DMA"); in vmw_cmdbuf_set_pool_size()
1184 if (man->using_mob) in vmw_cmdbuf_set_pool_size()
1185 ttm_bo_unref(&man->cmd_space); in vmw_cmdbuf_set_pool_size()
1202 struct vmw_cmdbuf_man *man; in vmw_cmdbuf_man_create() local
1210 man = kzalloc(sizeof(*man), GFP_KERNEL); in vmw_cmdbuf_man_create()
1211 if (!man) in vmw_cmdbuf_man_create()
1214 man->headers = dma_pool_create("vmwgfx cmdbuf", in vmw_cmdbuf_man_create()
1218 if (!man->headers) { in vmw_cmdbuf_man_create()
1223 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", in vmw_cmdbuf_man_create()
1227 if (!man->dheaders) { in vmw_cmdbuf_man_create()
1232 for_each_cmdbuf_ctx(man, i, ctx) in vmw_cmdbuf_man_create()
1235 INIT_LIST_HEAD(&man->error); in vmw_cmdbuf_man_create()
1236 spin_lock_init(&man->lock); in vmw_cmdbuf_man_create()
1237 mutex_init(&man->cur_mutex); in vmw_cmdbuf_man_create()
1238 mutex_init(&man->space_mutex); in vmw_cmdbuf_man_create()
1239 tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet, in vmw_cmdbuf_man_create()
1240 (unsigned long) man); in vmw_cmdbuf_man_create()
1241 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_man_create()
1242 init_waitqueue_head(&man->alloc_queue); in vmw_cmdbuf_man_create()
1243 init_waitqueue_head(&man->idle_queue); in vmw_cmdbuf_man_create()
1244 man->dev_priv = dev_priv; in vmw_cmdbuf_man_create()
1245 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1; in vmw_cmdbuf_man_create()
1246 INIT_WORK(&man->work, &vmw_cmdbuf_work_func); in vmw_cmdbuf_man_create()
1249 ret = vmw_cmdbuf_startstop(man, true); in vmw_cmdbuf_man_create()
1252 vmw_cmdbuf_man_destroy(man); in vmw_cmdbuf_man_create()
1256 return man; in vmw_cmdbuf_man_create()
1259 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_create()
1261 kfree(man); in vmw_cmdbuf_man_create()
1277 void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_remove_pool() argument
1279 if (!man->has_pool) in vmw_cmdbuf_remove_pool()
1282 man->has_pool = false; in vmw_cmdbuf_remove_pool()
1283 man->default_size = VMW_CMDBUF_INLINE_SIZE; in vmw_cmdbuf_remove_pool()
1284 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_remove_pool()
1285 if (man->using_mob) { in vmw_cmdbuf_remove_pool()
1286 (void) ttm_bo_kunmap(&man->map_obj); in vmw_cmdbuf_remove_pool()
1287 ttm_bo_unref(&man->cmd_space); in vmw_cmdbuf_remove_pool()
1289 dma_free_coherent(&man->dev_priv->dev->pdev->dev, in vmw_cmdbuf_remove_pool()
1290 man->size, man->map, man->handle); in vmw_cmdbuf_remove_pool()
1301 void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man) in vmw_cmdbuf_man_destroy() argument
1303 WARN_ON_ONCE(man->has_pool); in vmw_cmdbuf_man_destroy()
1304 (void) vmw_cmdbuf_idle(man, false, 10*HZ); in vmw_cmdbuf_man_destroy()
1305 if (vmw_cmdbuf_startstop(man, false)) in vmw_cmdbuf_man_destroy()
1308 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR, in vmw_cmdbuf_man_destroy()
1309 &man->dev_priv->error_waiters); in vmw_cmdbuf_man_destroy()
1310 tasklet_kill(&man->tasklet); in vmw_cmdbuf_man_destroy()
1311 (void) cancel_work_sync(&man->work); in vmw_cmdbuf_man_destroy()
1312 dma_pool_destroy(man->dheaders); in vmw_cmdbuf_man_destroy()
1313 dma_pool_destroy(man->headers); in vmw_cmdbuf_man_destroy()
1314 mutex_destroy(&man->cur_mutex); in vmw_cmdbuf_man_destroy()
1315 mutex_destroy(&man->space_mutex); in vmw_cmdbuf_man_destroy()
1316 kfree(man); in vmw_cmdbuf_man_destroy()