This source file includes following definitions.
- igt_fill_blt
- igt_copy_blt
- i915_gem_object_blt_live_selftests
1
2
3
4
5
6 #include "gt/intel_gt.h"
7
8 #include "i915_selftest.h"
9
10 #include "selftests/igt_flush_test.h"
11 #include "selftests/mock_drm.h"
12 #include "huge_gem_object.h"
13 #include "mock_context.h"
14
15 static int igt_fill_blt(void *arg)
16 {
17 struct drm_i915_private *i915 = arg;
18 struct intel_context *ce = i915->engine[BCS0]->kernel_context;
19 struct drm_i915_gem_object *obj;
20 struct rnd_state prng;
21 IGT_TIMEOUT(end);
22 u32 *vaddr;
23 int err = 0;
24
25 prandom_seed_state(&prng, i915_selftest.random_seed);
26
27
28
29
30
31
32
33 do {
34 const u32 max_block_size = S16_MAX * PAGE_SIZE;
35 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
36 u32 phys_sz = sz % (max_block_size + 1);
37 u32 val = prandom_u32_state(&prng);
38 u32 i;
39
40 sz = round_up(sz, PAGE_SIZE);
41 phys_sz = round_up(phys_sz, PAGE_SIZE);
42
43 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
44 phys_sz, sz, val);
45
46 obj = huge_gem_object(i915, phys_sz, sz);
47 if (IS_ERR(obj)) {
48 err = PTR_ERR(obj);
49 goto err_flush;
50 }
51
52 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
53 if (IS_ERR(vaddr)) {
54 err = PTR_ERR(vaddr);
55 goto err_put;
56 }
57
58
59
60
61
62 memset32(vaddr, val ^ 0xdeadbeaf,
63 huge_gem_object_phys_size(obj) / sizeof(u32));
64
65 if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
66 obj->cache_dirty = true;
67
68 mutex_lock(&i915->drm.struct_mutex);
69 err = i915_gem_object_fill_blt(obj, ce, val);
70 mutex_unlock(&i915->drm.struct_mutex);
71 if (err)
72 goto err_unpin;
73
74 i915_gem_object_lock(obj);
75 err = i915_gem_object_set_to_cpu_domain(obj, false);
76 i915_gem_object_unlock(obj);
77 if (err)
78 goto err_unpin;
79
80 for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
81 if (vaddr[i] != val) {
82 pr_err("vaddr[%u]=%x, expected=%x\n", i,
83 vaddr[i], val);
84 err = -EINVAL;
85 goto err_unpin;
86 }
87 }
88
89 i915_gem_object_unpin_map(obj);
90 i915_gem_object_put(obj);
91 } while (!time_after(jiffies, end));
92
93 goto err_flush;
94
95 err_unpin:
96 i915_gem_object_unpin_map(obj);
97 err_put:
98 i915_gem_object_put(obj);
99 err_flush:
100 if (err == -ENOMEM)
101 err = 0;
102
103 return err;
104 }
105
106 static int igt_copy_blt(void *arg)
107 {
108 struct drm_i915_private *i915 = arg;
109 struct intel_context *ce = i915->engine[BCS0]->kernel_context;
110 struct drm_i915_gem_object *src, *dst;
111 struct rnd_state prng;
112 IGT_TIMEOUT(end);
113 u32 *vaddr;
114 int err = 0;
115
116 prandom_seed_state(&prng, i915_selftest.random_seed);
117
118 do {
119 const u32 max_block_size = S16_MAX * PAGE_SIZE;
120 u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
121 u32 phys_sz = sz % (max_block_size + 1);
122 u32 val = prandom_u32_state(&prng);
123 u32 i;
124
125 sz = round_up(sz, PAGE_SIZE);
126 phys_sz = round_up(phys_sz, PAGE_SIZE);
127
128 pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
129 phys_sz, sz, val);
130
131 src = huge_gem_object(i915, phys_sz, sz);
132 if (IS_ERR(src)) {
133 err = PTR_ERR(src);
134 goto err_flush;
135 }
136
137 vaddr = i915_gem_object_pin_map(src, I915_MAP_WB);
138 if (IS_ERR(vaddr)) {
139 err = PTR_ERR(vaddr);
140 goto err_put_src;
141 }
142
143 memset32(vaddr, val,
144 huge_gem_object_phys_size(src) / sizeof(u32));
145
146 i915_gem_object_unpin_map(src);
147
148 if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
149 src->cache_dirty = true;
150
151 dst = huge_gem_object(i915, phys_sz, sz);
152 if (IS_ERR(dst)) {
153 err = PTR_ERR(dst);
154 goto err_put_src;
155 }
156
157 vaddr = i915_gem_object_pin_map(dst, I915_MAP_WB);
158 if (IS_ERR(vaddr)) {
159 err = PTR_ERR(vaddr);
160 goto err_put_dst;
161 }
162
163 memset32(vaddr, val ^ 0xdeadbeaf,
164 huge_gem_object_phys_size(dst) / sizeof(u32));
165
166 if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
167 dst->cache_dirty = true;
168
169 mutex_lock(&i915->drm.struct_mutex);
170 err = i915_gem_object_copy_blt(src, dst, ce);
171 mutex_unlock(&i915->drm.struct_mutex);
172 if (err)
173 goto err_unpin;
174
175 i915_gem_object_lock(dst);
176 err = i915_gem_object_set_to_cpu_domain(dst, false);
177 i915_gem_object_unlock(dst);
178 if (err)
179 goto err_unpin;
180
181 for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
182 if (vaddr[i] != val) {
183 pr_err("vaddr[%u]=%x, expected=%x\n", i,
184 vaddr[i], val);
185 err = -EINVAL;
186 goto err_unpin;
187 }
188 }
189
190 i915_gem_object_unpin_map(dst);
191
192 i915_gem_object_put(src);
193 i915_gem_object_put(dst);
194 } while (!time_after(jiffies, end));
195
196 goto err_flush;
197
198 err_unpin:
199 i915_gem_object_unpin_map(dst);
200 err_put_dst:
201 i915_gem_object_put(dst);
202 err_put_src:
203 i915_gem_object_put(src);
204 err_flush:
205 if (err == -ENOMEM)
206 err = 0;
207
208 return err;
209 }
210
211 int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
212 {
213 static const struct i915_subtest tests[] = {
214 SUBTEST(igt_fill_blt),
215 SUBTEST(igt_copy_blt),
216 };
217
218 if (intel_gt_is_wedged(&i915->gt))
219 return 0;
220
221 if (!HAS_ENGINE(i915, BCS0))
222 return 0;
223
224 return i915_live_subtests(tests, i915);
225 }