1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #ifndef __I915_TIMELINE_TYPES_H__ 8 #define __I915_TIMELINE_TYPES_H__ 9 10 #include <linux/list.h> 11 #include <linux/kref.h> 12 #include <linux/mutex.h> 13 #include <linux/types.h> 14 15 #include "i915_active_types.h" 16 17 struct drm_i915_private; 18 struct i915_vma; 19 struct intel_timeline_cacheline; 20 struct i915_syncmap; 21 22 struct intel_timeline { 23 u64 fence_context; 24 u32 seqno; 25 26 struct mutex mutex; /* protects the flow of requests */ 27 28 /* 29 * pin_count and active_count track essentially the same thing: 30 * How many requests are in flight or may be under construction. 31 * 32 * We need two distinct counters so that we can assign different 33 * lifetimes to the events for different use-cases. For example, 34 * we want to permanently keep the timeline pinned for the kernel 35 * context so that we can issue requests at any time without having 36 * to acquire space in the GGTT. However, we want to keep tracking 37 * the activity (to be able to detect when we become idle) along that 38 * permanently pinned timeline and so end up requiring two counters. 39 * 40 * Note that the active_count is protected by the intel_timeline.mutex, 41 * but the pin_count is protected by a combination of serialisation 42 * from the intel_context caller plus internal atomicity. 43 */ 44 atomic_t pin_count; 45 unsigned int active_count; 46 47 const u32 *hwsp_seqno; 48 struct i915_vma *hwsp_ggtt; 49 u32 hwsp_offset; 50 51 struct intel_timeline_cacheline *hwsp_cacheline; 52 53 bool has_initial_breadcrumb; 54 55 /** 56 * List of breadcrumbs associated with GPU requests currently 57 * outstanding. 58 */ 59 struct list_head requests; 60 61 /* Contains an RCU guarded pointer to the last request. No reference is 62 * held to the request, users must carefully acquire a reference to 63 * the request using i915_active_request_get_request_rcu(), or hold the 64 * struct_mutex. 65 */ 66 struct i915_active_request last_request; 67 68 /** 69 * We track the most recent seqno that we wait on in every context so 70 * that we only have to emit a new await and dependency on a more 71 * recent sync point. As the contexts may be executed out-of-order, we 72 * have to track each individually and can not rely on an absolute 73 * global_seqno. When we know that all tracked fences are completed 74 * (i.e. when the driver is idle), we know that the syncmap is 75 * redundant and we can discard it without loss of generality. 76 */ 77 struct i915_syncmap *sync; 78 79 struct list_head link; 80 struct intel_gt *gt; 81 82 struct kref kref; 83 }; 84 85 #endif /* __I915_TIMELINE_TYPES_H__ */