Lines Matching refs:dev_priv
121 static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, in intel_hpd_irq_storm_detect() argument
124 unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies; in intel_hpd_irq_storm_detect()
129 dev_priv->hotplug.stats[pin].last_jiffies = jiffies; in intel_hpd_irq_storm_detect()
130 dev_priv->hotplug.stats[pin].count = 0; in intel_hpd_irq_storm_detect()
132 } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) { in intel_hpd_irq_storm_detect()
133 dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED; in intel_hpd_irq_storm_detect()
137 dev_priv->hotplug.stats[pin].count++; in intel_hpd_irq_storm_detect()
139 dev_priv->hotplug.stats[pin].count); in intel_hpd_irq_storm_detect()
145 static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) in intel_hpd_irq_storm_disable() argument
147 struct drm_device *dev = dev_priv->dev; in intel_hpd_irq_storm_disable()
155 assert_spin_locked(&dev_priv->irq_lock); in intel_hpd_irq_storm_disable()
168 dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED) in intel_hpd_irq_storm_disable()
175 dev_priv->hotplug.stats[pin].state = HPD_DISABLED; in intel_hpd_irq_storm_disable()
184 mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work, in intel_hpd_irq_storm_disable()
191 struct drm_i915_private *dev_priv = in intel_hpd_irq_storm_reenable_work() local
192 container_of(work, typeof(*dev_priv), in intel_hpd_irq_storm_reenable_work()
194 struct drm_device *dev = dev_priv->dev; in intel_hpd_irq_storm_reenable_work()
198 intel_runtime_pm_get(dev_priv); in intel_hpd_irq_storm_reenable_work()
200 spin_lock_irq(&dev_priv->irq_lock); in intel_hpd_irq_storm_reenable_work()
204 if (dev_priv->hotplug.stats[i].state != HPD_DISABLED) in intel_hpd_irq_storm_reenable_work()
207 dev_priv->hotplug.stats[i].state = HPD_ENABLED; in intel_hpd_irq_storm_reenable_work()
222 if (dev_priv->display.hpd_irq_setup) in intel_hpd_irq_storm_reenable_work()
223 dev_priv->display.hpd_irq_setup(dev); in intel_hpd_irq_storm_reenable_work()
224 spin_unlock_irq(&dev_priv->irq_lock); in intel_hpd_irq_storm_reenable_work()
226 intel_runtime_pm_put(dev_priv); in intel_hpd_irq_storm_reenable_work()
252 struct drm_i915_private *dev_priv = in i915_digport_work_func() local
259 spin_lock_irq(&dev_priv->irq_lock); in i915_digport_work_func()
260 long_port_mask = dev_priv->hotplug.long_port_mask; in i915_digport_work_func()
261 dev_priv->hotplug.long_port_mask = 0; in i915_digport_work_func()
262 short_port_mask = dev_priv->hotplug.short_port_mask; in i915_digport_work_func()
263 dev_priv->hotplug.short_port_mask = 0; in i915_digport_work_func()
264 spin_unlock_irq(&dev_priv->irq_lock); in i915_digport_work_func()
269 intel_dig_port = dev_priv->hotplug.irq_port[i]; in i915_digport_work_func()
291 spin_lock_irq(&dev_priv->irq_lock); in i915_digport_work_func()
292 dev_priv->hotplug.event_bits |= old_bits; in i915_digport_work_func()
293 spin_unlock_irq(&dev_priv->irq_lock); in i915_digport_work_func()
294 schedule_work(&dev_priv->hotplug.hotplug_work); in i915_digport_work_func()
303 struct drm_i915_private *dev_priv = in i915_hotplug_work_func() local
305 struct drm_device *dev = dev_priv->dev; in i915_hotplug_work_func()
316 spin_lock_irq(&dev_priv->irq_lock); in i915_hotplug_work_func()
318 hpd_event_bits = dev_priv->hotplug.event_bits; in i915_hotplug_work_func()
319 dev_priv->hotplug.event_bits = 0; in i915_hotplug_work_func()
322 intel_hpd_irq_storm_disable(dev_priv); in i915_hotplug_work_func()
324 spin_unlock_irq(&dev_priv->irq_lock); in i915_hotplug_work_func()
366 struct drm_i915_private *dev_priv = dev->dev_private; in intel_hpd_irq_handler() local
376 spin_lock(&dev_priv->irq_lock); in intel_hpd_irq_handler()
382 dev_priv->hotplug.irq_port[port]; in intel_hpd_irq_handler()
395 dev_priv->hotplug.long_port_mask |= (1 << port); in intel_hpd_irq_handler()
398 dev_priv->hotplug.short_port_mask |= (1 << port); in intel_hpd_irq_handler()
403 if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) { in intel_hpd_irq_handler()
415 if (dev_priv->hotplug.stats[i].state != HPD_ENABLED) in intel_hpd_irq_handler()
419 dev_priv->hotplug.event_bits |= BIT(i); in intel_hpd_irq_handler()
423 if (intel_hpd_irq_storm_detect(dev_priv, i)) { in intel_hpd_irq_handler()
424 dev_priv->hotplug.event_bits &= ~BIT(i); in intel_hpd_irq_handler()
430 dev_priv->display.hpd_irq_setup(dev); in intel_hpd_irq_handler()
431 spin_unlock(&dev_priv->irq_lock); in intel_hpd_irq_handler()
440 queue_work(dev_priv->hotplug.dp_wq, &dev_priv->hotplug.dig_port_work); in intel_hpd_irq_handler()
442 schedule_work(&dev_priv->hotplug.hotplug_work); in intel_hpd_irq_handler()
457 void intel_hpd_init(struct drm_i915_private *dev_priv) in intel_hpd_init() argument
459 struct drm_device *dev = dev_priv->dev; in intel_hpd_init()
465 dev_priv->hotplug.stats[i].count = 0; in intel_hpd_init()
466 dev_priv->hotplug.stats[i].state = HPD_ENABLED; in intel_hpd_init()
486 spin_lock_irq(&dev_priv->irq_lock); in intel_hpd_init()
487 if (dev_priv->display.hpd_irq_setup) in intel_hpd_init()
488 dev_priv->display.hpd_irq_setup(dev); in intel_hpd_init()
489 spin_unlock_irq(&dev_priv->irq_lock); in intel_hpd_init()
492 void intel_hpd_init_work(struct drm_i915_private *dev_priv) in intel_hpd_init_work() argument
494 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); in intel_hpd_init_work()
495 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); in intel_hpd_init_work()
496 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, in intel_hpd_init_work()
500 void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) in intel_hpd_cancel_work() argument
502 spin_lock_irq(&dev_priv->irq_lock); in intel_hpd_cancel_work()
504 dev_priv->hotplug.long_port_mask = 0; in intel_hpd_cancel_work()
505 dev_priv->hotplug.short_port_mask = 0; in intel_hpd_cancel_work()
506 dev_priv->hotplug.event_bits = 0; in intel_hpd_cancel_work()
508 spin_unlock_irq(&dev_priv->irq_lock); in intel_hpd_cancel_work()
510 cancel_work_sync(&dev_priv->hotplug.dig_port_work); in intel_hpd_cancel_work()
511 cancel_work_sync(&dev_priv->hotplug.hotplug_work); in intel_hpd_cancel_work()
512 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); in intel_hpd_cancel_work()