Lines Matching refs:dev_priv
36 struct vmw_private *dev_priv = vmw_priv(dev); in vmw_irq_handler() local
39 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_handler()
40 masked_status = status & READ_ONCE(dev_priv->irq_mask); in vmw_irq_handler()
43 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_handler()
50 vmw_fences_update(dev_priv->fman); in vmw_irq_handler()
51 wake_up_all(&dev_priv->fence_queue); in vmw_irq_handler()
55 wake_up_all(&dev_priv->fifo_queue); in vmw_irq_handler()
59 vmw_cmdbuf_tasklet_schedule(dev_priv->cman); in vmw_irq_handler()
64 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) in vmw_fifo_idle() argument
67 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); in vmw_fifo_idle()
70 void vmw_update_seqno(struct vmw_private *dev_priv, in vmw_update_seqno() argument
73 u32 *fifo_mem = dev_priv->mmio_virt; in vmw_update_seqno()
76 if (dev_priv->last_read_seqno != seqno) { in vmw_update_seqno()
77 dev_priv->last_read_seqno = seqno; in vmw_update_seqno()
79 vmw_fences_update(dev_priv->fman); in vmw_update_seqno()
83 bool vmw_seqno_passed(struct vmw_private *dev_priv, in vmw_seqno_passed() argument
89 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed()
92 fifo_state = &dev_priv->fifo; in vmw_seqno_passed()
93 vmw_update_seqno(dev_priv, fifo_state); in vmw_seqno_passed()
94 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_seqno_passed()
98 vmw_fifo_idle(dev_priv, seqno)) in vmw_seqno_passed()
106 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) in vmw_seqno_passed()
112 int vmw_fallback_wait(struct vmw_private *dev_priv, in vmw_fallback_wait() argument
119 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; in vmw_fallback_wait()
137 if (dev_priv->cman) { in vmw_fallback_wait()
138 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, in vmw_fallback_wait()
145 signal_seq = atomic_read(&dev_priv->marker_seq); in vmw_fallback_wait()
149 prepare_to_wait(&dev_priv->fence_queue, &__wait, in vmw_fallback_wait()
152 if (wait_condition(dev_priv, seqno)) in vmw_fallback_wait()
177 finish_wait(&dev_priv->fence_queue, &__wait); in vmw_fallback_wait()
179 u32 *fifo_mem = dev_priv->mmio_virt; in vmw_fallback_wait()
183 wake_up_all(&dev_priv->fence_queue); in vmw_fallback_wait()
191 void vmw_generic_waiter_add(struct vmw_private *dev_priv, in vmw_generic_waiter_add() argument
194 spin_lock_bh(&dev_priv->waiter_lock); in vmw_generic_waiter_add()
196 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_generic_waiter_add()
197 dev_priv->irq_mask |= flag; in vmw_generic_waiter_add()
198 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); in vmw_generic_waiter_add()
200 spin_unlock_bh(&dev_priv->waiter_lock); in vmw_generic_waiter_add()
203 void vmw_generic_waiter_remove(struct vmw_private *dev_priv, in vmw_generic_waiter_remove() argument
206 spin_lock_bh(&dev_priv->waiter_lock); in vmw_generic_waiter_remove()
208 dev_priv->irq_mask &= ~flag; in vmw_generic_waiter_remove()
209 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); in vmw_generic_waiter_remove()
211 spin_unlock_bh(&dev_priv->waiter_lock); in vmw_generic_waiter_remove()
214 void vmw_seqno_waiter_add(struct vmw_private *dev_priv) in vmw_seqno_waiter_add() argument
216 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE, in vmw_seqno_waiter_add()
217 &dev_priv->fence_queue_waiters); in vmw_seqno_waiter_add()
220 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) in vmw_seqno_waiter_remove() argument
222 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE, in vmw_seqno_waiter_remove()
223 &dev_priv->fence_queue_waiters); in vmw_seqno_waiter_remove()
226 void vmw_goal_waiter_add(struct vmw_private *dev_priv) in vmw_goal_waiter_add() argument
228 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, in vmw_goal_waiter_add()
229 &dev_priv->goal_queue_waiters); in vmw_goal_waiter_add()
232 void vmw_goal_waiter_remove(struct vmw_private *dev_priv) in vmw_goal_waiter_remove() argument
234 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL, in vmw_goal_waiter_remove()
235 &dev_priv->goal_queue_waiters); in vmw_goal_waiter_remove()
238 int vmw_wait_seqno(struct vmw_private *dev_priv, in vmw_wait_seqno() argument
243 struct vmw_fifo_state *fifo = &dev_priv->fifo; in vmw_wait_seqno()
245 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) in vmw_wait_seqno()
248 if (likely(vmw_seqno_passed(dev_priv, seqno))) in vmw_wait_seqno()
251 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); in vmw_wait_seqno()
254 return vmw_fallback_wait(dev_priv, lazy, true, seqno, in vmw_wait_seqno()
257 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) in vmw_wait_seqno()
258 return vmw_fallback_wait(dev_priv, lazy, false, seqno, in vmw_wait_seqno()
261 vmw_seqno_waiter_add(dev_priv); in vmw_wait_seqno()
265 (dev_priv->fence_queue, in vmw_wait_seqno()
266 vmw_seqno_passed(dev_priv, seqno), in vmw_wait_seqno()
270 (dev_priv->fence_queue, in vmw_wait_seqno()
271 vmw_seqno_passed(dev_priv, seqno), in vmw_wait_seqno()
274 vmw_seqno_waiter_remove(dev_priv); in vmw_wait_seqno()
286 struct vmw_private *dev_priv = vmw_priv(dev); in vmw_irq_preinstall() local
289 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) in vmw_irq_preinstall()
292 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_preinstall()
293 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_preinstall()
303 struct vmw_private *dev_priv = vmw_priv(dev); in vmw_irq_uninstall() local
306 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) in vmw_irq_uninstall()
309 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); in vmw_irq_uninstall()
311 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_uninstall()
312 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); in vmw_irq_uninstall()