This source file includes following definitions.
- vmw_find_first_diff
- vmw_find_last_diff
- vmw_memcpy
- vmw_adjust_rect
- vmw_diff_memcpy
- vmw_bo_cpu_blit_line
- vmw_bo_cpu_blit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29 #include "vmwgfx_drv.h"
30
31
32
33
34
35 #define VMW_FIND_FIRST_DIFF(_type) \
36 static size_t vmw_find_first_diff_ ## _type \
37 (const _type * dst, const _type * src, size_t size)\
38 { \
39 size_t i; \
40 \
41 for (i = 0; i < size; i += sizeof(_type)) { \
42 if (*dst++ != *src++) \
43 break; \
44 } \
45 \
46 return i; \
47 }
48
49
50
51
52
53
54
55
56 #define VMW_FIND_LAST_DIFF(_type) \
57 static ssize_t vmw_find_last_diff_ ## _type( \
58 const _type * dst, const _type * src, size_t size) \
59 { \
60 while (size) { \
61 if (*--dst != *--src) \
62 break; \
63 \
64 size -= sizeof(_type); \
65 } \
66 return size; \
67 }
68
69
70
71
72
73
74
75
76 VMW_FIND_FIRST_DIFF(u8);
77 VMW_FIND_LAST_DIFF(u8);
78
79 VMW_FIND_FIRST_DIFF(u16);
80 VMW_FIND_LAST_DIFF(u16);
81
82 VMW_FIND_FIRST_DIFF(u32);
83 VMW_FIND_LAST_DIFF(u32);
84
85 #ifdef CONFIG_64BIT
86 VMW_FIND_FIRST_DIFF(u64);
87 VMW_FIND_LAST_DIFF(u64);
88 #endif
89
90
91
92 #define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
93
94
95
96
97
98
99
100
101
102 #define VMW_TRY_FIND_FIRST_DIFF(_type) \
103 do { \
104 unsigned int spill = SPILL(dst, _type); \
105 size_t diff_offs; \
106 \
107 if (spill && spill == SPILL(src, _type) && \
108 sizeof(_type) - spill <= size) { \
109 spill = sizeof(_type) - spill; \
110 diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
111 if (diff_offs < spill) \
112 return round_down(offset + diff_offs, granularity); \
113 \
114 dst += spill; \
115 src += spill; \
116 size -= spill; \
117 offset += spill; \
118 spill = 0; \
119 } \
120 if (!spill && !SPILL(src, _type)) { \
121 size_t to_copy = size & ~(sizeof(_type) - 1); \
122 \
123 diff_offs = vmw_find_first_diff_ ## _type \
124 ((_type *) dst, (_type *) src, to_copy); \
125 if (diff_offs >= size || granularity == sizeof(_type)) \
126 return (offset + diff_offs); \
127 \
128 dst += diff_offs; \
129 src += diff_offs; \
130 size -= diff_offs; \
131 offset += diff_offs; \
132 } \
133 } while (0) \
134
135
136
137
138
139
140
141
142
143
144
145
146
147 static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
148 size_t granularity)
149 {
150 size_t offset = 0;
151
152
153
154
155
156
157 #ifdef CONFIG_64BIT
158 VMW_TRY_FIND_FIRST_DIFF(u64);
159 #endif
160 VMW_TRY_FIND_FIRST_DIFF(u32);
161 VMW_TRY_FIND_FIRST_DIFF(u16);
162
163 return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
164 granularity);
165 }
166
167
168
169
170
171
172
173
174
175 #define VMW_TRY_FIND_LAST_DIFF(_type) \
176 do { \
177 unsigned int spill = SPILL(dst, _type); \
178 ssize_t location; \
179 ssize_t diff_offs; \
180 \
181 if (spill && spill <= size && spill == SPILL(src, _type)) { \
182 diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
183 if (diff_offs) { \
184 location = size - spill + diff_offs - 1; \
185 return round_down(location, granularity); \
186 } \
187 \
188 dst -= spill; \
189 src -= spill; \
190 size -= spill; \
191 spill = 0; \
192 } \
193 if (!spill && !SPILL(src, _type)) { \
194 size_t to_copy = round_down(size, sizeof(_type)); \
195 \
196 diff_offs = vmw_find_last_diff_ ## _type \
197 ((_type *) dst, (_type *) src, to_copy); \
198 location = size - to_copy + diff_offs - sizeof(_type); \
199 if (location < 0 || granularity == sizeof(_type)) \
200 return location; \
201 \
202 dst -= to_copy - diff_offs; \
203 src -= to_copy - diff_offs; \
204 size -= to_copy - diff_offs; \
205 } \
206 } while (0)
207
208
209
210
211
212
213
214
215
216
217
218
219 static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
220 size_t granularity)
221 {
222 dst += size;
223 src += size;
224
225 #ifdef CONFIG_64BIT
226 VMW_TRY_FIND_LAST_DIFF(u64);
227 #endif
228 VMW_TRY_FIND_LAST_DIFF(u32);
229 VMW_TRY_FIND_LAST_DIFF(u16);
230
231 return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
232 granularity);
233 }
234
235
236
237
238
239
240
241
242
243
244
245 void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
246 {
247 memcpy(dest, src, n);
248 }
249
250
251
252
253
254
255
256
257
258 static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
259 {
260 size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
261 struct drm_rect *rect = &diff->rect;
262
263 rect->x1 = min_t(int, rect->x1, offs);
264 rect->x2 = max_t(int, rect->x2, offs + 1);
265 rect->y1 = min_t(int, rect->y1, diff->line);
266 rect->y2 = max_t(int, rect->y2, diff->line + 1);
267 }
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287 void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
288 size_t n)
289 {
290 ssize_t csize, byte_len;
291
292 if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
293 return;
294
295
296 csize = vmw_find_first_diff(dest, src, n, diff->cpp);
297 if (csize < n) {
298 vmw_adjust_rect(diff, csize);
299 byte_len = diff->cpp;
300
301
302
303
304
305 diff->line_offset += csize;
306 dest += csize;
307 src += csize;
308 n -= csize;
309 csize = vmw_find_last_diff(dest, src, n, diff->cpp);
310 if (csize >= 0) {
311 byte_len += csize;
312 vmw_adjust_rect(diff, csize);
313 }
314 memcpy(dest, src, byte_len);
315 }
316 diff->line_offset += n;
317 }
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334 struct vmw_bo_blit_line_data {
335 u32 mapped_dst;
336 u8 *dst_addr;
337 struct page **dst_pages;
338 u32 dst_num_pages;
339 pgprot_t dst_prot;
340 u32 mapped_src;
341 u8 *src_addr;
342 struct page **src_pages;
343 u32 src_num_pages;
344 pgprot_t src_prot;
345 struct vmw_diff_cpy *diff;
346 };
347
348
349
350
351
352
353
354
355
356 static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
357 u32 dst_offset,
358 u32 src_offset,
359 u32 bytes_to_copy)
360 {
361 struct vmw_diff_cpy *diff = d->diff;
362
363 while (bytes_to_copy) {
364 u32 copy_size = bytes_to_copy;
365 u32 dst_page = dst_offset >> PAGE_SHIFT;
366 u32 src_page = src_offset >> PAGE_SHIFT;
367 u32 dst_page_offset = dst_offset & ~PAGE_MASK;
368 u32 src_page_offset = src_offset & ~PAGE_MASK;
369 bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
370 bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
371 unmap_dst);
372
373 copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
374 copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
375
376 if (unmap_src) {
377 ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
378 d->src_addr = NULL;
379 }
380
381 if (unmap_dst) {
382 ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
383 d->dst_addr = NULL;
384 }
385
386 if (!d->dst_addr) {
387 if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
388 return -EINVAL;
389
390 d->dst_addr =
391 ttm_kmap_atomic_prot(d->dst_pages[dst_page],
392 d->dst_prot);
393 if (!d->dst_addr)
394 return -ENOMEM;
395
396 d->mapped_dst = dst_page;
397 }
398
399 if (!d->src_addr) {
400 if (WARN_ON_ONCE(src_page >= d->src_num_pages))
401 return -EINVAL;
402
403 d->src_addr =
404 ttm_kmap_atomic_prot(d->src_pages[src_page],
405 d->src_prot);
406 if (!d->src_addr)
407 return -ENOMEM;
408
409 d->mapped_src = src_page;
410 }
411 diff->do_cpy(diff, d->dst_addr + dst_page_offset,
412 d->src_addr + src_page_offset, copy_size);
413
414 bytes_to_copy -= copy_size;
415 dst_offset += copy_size;
416 src_offset += copy_size;
417 }
418
419 return 0;
420 }
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445 int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
446 u32 dst_offset, u32 dst_stride,
447 struct ttm_buffer_object *src,
448 u32 src_offset, u32 src_stride,
449 u32 w, u32 h,
450 struct vmw_diff_cpy *diff)
451 {
452 struct ttm_operation_ctx ctx = {
453 .interruptible = false,
454 .no_wait_gpu = false
455 };
456 u32 j, initial_line = dst_offset / dst_stride;
457 struct vmw_bo_blit_line_data d;
458 int ret = 0;
459
460
461 if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
462 dma_resv_assert_held(dst->base.resv);
463 if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
464 dma_resv_assert_held(src->base.resv);
465
466 if (dst->ttm->state == tt_unpopulated) {
467 ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
468 if (ret)
469 return ret;
470 }
471
472 if (src->ttm->state == tt_unpopulated) {
473 ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
474 if (ret)
475 return ret;
476 }
477
478 d.mapped_dst = 0;
479 d.mapped_src = 0;
480 d.dst_addr = NULL;
481 d.src_addr = NULL;
482 d.dst_pages = dst->ttm->pages;
483 d.src_pages = src->ttm->pages;
484 d.dst_num_pages = dst->num_pages;
485 d.src_num_pages = src->num_pages;
486 d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
487 d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
488 d.diff = diff;
489
490 for (j = 0; j < h; ++j) {
491 diff->line = j + initial_line;
492 diff->line_offset = dst_offset % dst_stride;
493 ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
494 if (ret)
495 goto out;
496
497 dst_offset += dst_stride;
498 src_offset += src_stride;
499 }
500 out:
501 if (d.src_addr)
502 ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
503 if (d.dst_addr)
504 ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
505
506 return ret;
507 }