This source file includes following definitions.
- gasket_page_table_init
- gasket_is_pte_range_free
- gasket_free_extended_subtable
- gasket_page_table_garbage_collect_nolock
- gasket_page_table_garbage_collect
- gasket_page_table_cleanup
- gasket_page_table_partition
- is_coherent
- gasket_release_page
- gasket_perform_mapping
- gasket_simple_page_idx
- gasket_extended_lvl0_page_idx
- gasket_extended_lvl1_page_idx
- gasket_alloc_simple_entries
- gasket_perform_unmapping
- gasket_unmap_simple_pages
- gasket_unmap_extended_pages
- gasket_addr_is_simple
- gasket_components_to_dev_address
- gasket_is_simple_dev_addr_bad
- gasket_is_extended_dev_addr_bad
- gasket_page_table_unmap_nolock
- gasket_map_simple_pages
- gasket_alloc_extended_subtable
- gasket_alloc_extended_entries
- gasket_map_extended_pages
- gasket_page_table_map
- gasket_page_table_unmap
- gasket_page_table_unmap_all_nolock
- gasket_page_table_unmap_all
- gasket_page_table_reset
- gasket_page_table_lookup_page
- gasket_page_table_are_addrs_bad
- gasket_page_table_is_dev_addr_bad
- gasket_page_table_max_size
- gasket_page_table_num_entries
- gasket_page_table_num_simple_entries
- gasket_page_table_num_active_pages
- gasket_page_table_system_status
- gasket_set_user_virt
- gasket_alloc_coherent_memory
- gasket_free_coherent_memory
- gasket_free_coherent_memory_all
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42 #include "gasket_page_table.h"
43
44 #include <linux/device.h>
45 #include <linux/file.h>
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/pagemap.h>
51 #include <linux/vmalloc.h>
52
53 #include "gasket_constants.h"
54 #include "gasket_core.h"
55
56
57
58 #define GASKET_PAGES_PER_SUBTABLE 512
59
60
61 #define GASKET_SIMPLE_PAGE_SHIFT 12
62
63
64 #define GASKET_VALID_SLOT_FLAG 1
65
66
67
68
69
70
71
72 #define GASKET_EXTENDED_LVL0_SHIFT 21
73
74
75
76
77
78
79
80
81
82 #define GASKET_EXTENDED_LVL0_WIDTH 13
83
84
85
86
87
88 #define GASKET_EXTENDED_LVL1_SHIFT 12
89
90
91
92 enum pte_status {
93 PTE_FREE,
94 PTE_INUSE,
95 };
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110 struct gasket_page_table_entry {
111
112 enum pte_status status;
113
114
115
116
117
118
119
120
121
122 int offset;
123
124
125 dma_addr_t dma_addr;
126
127
128 struct page *page;
129
130
131
132
133
134 struct gasket_page_table_entry *sublevel;
135 };
136
137
138
139
140
141
142
143
144
145
146 struct gasket_coherent_page_entry {
147
148 dma_addr_t paddr;
149
150
151 u64 user_virt;
152
153
154 u64 kernel_virt;
155
156
157
158
159
160 u32 in_use;
161 };
162
163
164
165
166
167
168
169 struct gasket_page_table {
170
171 struct gasket_page_table_config config;
172
173
174 uint num_simple_entries;
175
176
177 uint num_extended_entries;
178
179
180 struct gasket_page_table_entry *entries;
181
182
183 uint num_active_pages;
184
185
186 u64 __iomem *base_slot;
187
188
189
190
191 u64 __iomem *extended_offset_reg;
192
193
194 struct device *device;
195
196
197 struct pci_dev *pci_dev;
198
199
200 u64 extended_flag;
201
202
203 struct mutex mutex;
204
205
206 int num_coherent_pages;
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222 struct gasket_coherent_page_entry *coherent_pages;
223 };
224
225
226 int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
227 const struct gasket_bar_data *bar_data,
228 const struct gasket_page_table_config *page_table_config,
229 struct device *device, struct pci_dev *pci_dev)
230 {
231 ulong bytes;
232 struct gasket_page_table *pg_tbl;
233 ulong total_entries = page_table_config->total_entries;
234
235
236
237
238
239 if (total_entries == ULONG_MAX) {
240 dev_dbg(device,
241 "Error reading page table size. Initializing page table with size 0\n");
242 total_entries = 0;
243 }
244
245 dev_dbg(device,
246 "Attempting to initialize page table of size 0x%lx\n",
247 total_entries);
248
249 dev_dbg(device,
250 "Table has base reg 0x%x, extended offset reg 0x%x\n",
251 page_table_config->base_reg,
252 page_table_config->extended_reg);
253
254 *ppg_tbl = kzalloc(sizeof(**ppg_tbl), GFP_KERNEL);
255 if (!*ppg_tbl) {
256 dev_dbg(device, "No memory for page table\n");
257 return -ENOMEM;
258 }
259
260 pg_tbl = *ppg_tbl;
261 bytes = total_entries * sizeof(struct gasket_page_table_entry);
262 if (bytes != 0) {
263 pg_tbl->entries = vzalloc(bytes);
264 if (!pg_tbl->entries) {
265 dev_dbg(device,
266 "No memory for address translation metadata\n");
267 kfree(pg_tbl);
268 *ppg_tbl = NULL;
269 return -ENOMEM;
270 }
271 }
272
273 mutex_init(&pg_tbl->mutex);
274 memcpy(&pg_tbl->config, page_table_config, sizeof(*page_table_config));
275 if (pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_NORMAL ||
276 pg_tbl->config.mode == GASKET_PAGE_TABLE_MODE_SIMPLE) {
277 pg_tbl->num_simple_entries = total_entries;
278 pg_tbl->num_extended_entries = 0;
279 pg_tbl->extended_flag = 1ull << page_table_config->extended_bit;
280 } else {
281 pg_tbl->num_simple_entries = 0;
282 pg_tbl->num_extended_entries = total_entries;
283 pg_tbl->extended_flag = 0;
284 }
285 pg_tbl->num_active_pages = 0;
286 pg_tbl->base_slot =
287 (u64 __iomem *)&bar_data->virt_base[page_table_config->base_reg];
288 pg_tbl->extended_offset_reg =
289 (u64 __iomem *)&bar_data->virt_base[page_table_config->extended_reg];
290 pg_tbl->device = get_device(device);
291 pg_tbl->pci_dev = pci_dev;
292
293 dev_dbg(device, "Page table initialized successfully\n");
294
295 return 0;
296 }
297
298
299
300
301
302 static bool gasket_is_pte_range_free(struct gasket_page_table_entry *ptes,
303 uint num_entries)
304 {
305 int i;
306
307 for (i = 0; i < num_entries; i++) {
308 if (ptes[i].status != PTE_FREE)
309 return false;
310 }
311
312 return true;
313 }
314
315
316
317
318
319 static void gasket_free_extended_subtable(struct gasket_page_table *pg_tbl,
320 struct gasket_page_table_entry *pte,
321 u64 __iomem *slot)
322 {
323
324 pte->status = PTE_FREE;
325
326
327 writeq(0, slot);
328
329 if (pte->dma_addr)
330 dma_unmap_page(pg_tbl->device, pte->dma_addr, PAGE_SIZE,
331 DMA_TO_DEVICE);
332
333 vfree(pte->sublevel);
334
335 if (pte->page)
336 free_page((ulong)page_address(pte->page));
337
338 memset(pte, 0, sizeof(struct gasket_page_table_entry));
339 }
340
341
342
343
344
345 static void
346 gasket_page_table_garbage_collect_nolock(struct gasket_page_table *pg_tbl)
347 {
348 struct gasket_page_table_entry *pte;
349 u64 __iomem *slot;
350
351
352
353
354 for (pte = pg_tbl->entries + pg_tbl->num_simple_entries,
355 slot = pg_tbl->base_slot + pg_tbl->num_simple_entries;
356 pte < pg_tbl->entries + pg_tbl->config.total_entries;
357 pte++, slot++) {
358 if (pte->status == PTE_INUSE) {
359 if (gasket_is_pte_range_free(pte->sublevel,
360 GASKET_PAGES_PER_SUBTABLE))
361 gasket_free_extended_subtable(pg_tbl, pte,
362 slot);
363 }
364 }
365 }
366
367
368 void gasket_page_table_garbage_collect(struct gasket_page_table *pg_tbl)
369 {
370 mutex_lock(&pg_tbl->mutex);
371 gasket_page_table_garbage_collect_nolock(pg_tbl);
372 mutex_unlock(&pg_tbl->mutex);
373 }
374
375
376 void gasket_page_table_cleanup(struct gasket_page_table *pg_tbl)
377 {
378
379 gasket_page_table_garbage_collect(pg_tbl);
380
381
382
383 vfree(pg_tbl->entries);
384 pg_tbl->entries = NULL;
385
386 put_device(pg_tbl->device);
387 kfree(pg_tbl);
388 }
389
390
391 int gasket_page_table_partition(struct gasket_page_table *pg_tbl,
392 uint num_simple_entries)
393 {
394 int i, start;
395
396 mutex_lock(&pg_tbl->mutex);
397 if (num_simple_entries > pg_tbl->config.total_entries) {
398 mutex_unlock(&pg_tbl->mutex);
399 return -EINVAL;
400 }
401
402 gasket_page_table_garbage_collect_nolock(pg_tbl);
403
404 start = min(pg_tbl->num_simple_entries, num_simple_entries);
405
406 for (i = start; i < pg_tbl->config.total_entries; i++) {
407 if (pg_tbl->entries[i].status != PTE_FREE) {
408 dev_err(pg_tbl->device, "entry %d is not free\n", i);
409 mutex_unlock(&pg_tbl->mutex);
410 return -EBUSY;
411 }
412 }
413
414 pg_tbl->num_simple_entries = num_simple_entries;
415 pg_tbl->num_extended_entries =
416 pg_tbl->config.total_entries - num_simple_entries;
417 writeq(num_simple_entries, pg_tbl->extended_offset_reg);
418
419 mutex_unlock(&pg_tbl->mutex);
420 return 0;
421 }
422 EXPORT_SYMBOL(gasket_page_table_partition);
423
424
425
426
427
428
429
430 static int is_coherent(struct gasket_page_table *pg_tbl, ulong host_addr)
431 {
432 u64 min, max;
433
434
435 if (!pg_tbl->coherent_pages)
436 return 0;
437
438 min = (u64)pg_tbl->coherent_pages[0].user_virt;
439 max = min + PAGE_SIZE * pg_tbl->num_coherent_pages;
440
441 return min <= host_addr && host_addr < max;
442 }
443
444
445 static bool gasket_release_page(struct page *page)
446 {
447 if (!page)
448 return false;
449
450 if (!PageReserved(page))
451 SetPageDirty(page);
452 put_page(page);
453
454 return true;
455 }
456
457
458
459
460
461
462
463
464
465 static int gasket_perform_mapping(struct gasket_page_table *pg_tbl,
466 struct gasket_page_table_entry *ptes,
467 u64 __iomem *slots, ulong host_addr,
468 uint num_pages, int is_simple_mapping)
469 {
470 int ret;
471 ulong offset;
472 struct page *page;
473 dma_addr_t dma_addr;
474 ulong page_addr;
475 int i;
476
477 for (i = 0; i < num_pages; i++) {
478 page_addr = host_addr + i * PAGE_SIZE;
479 offset = page_addr & (PAGE_SIZE - 1);
480 if (is_coherent(pg_tbl, host_addr)) {
481 u64 off =
482 (u64)host_addr -
483 (u64)pg_tbl->coherent_pages[0].user_virt;
484 ptes[i].page = NULL;
485 ptes[i].offset = offset;
486 ptes[i].dma_addr = pg_tbl->coherent_pages[0].paddr +
487 off + i * PAGE_SIZE;
488 } else {
489 ret = get_user_pages_fast(page_addr - offset, 1,
490 FOLL_WRITE, &page);
491
492 if (ret <= 0) {
493 dev_err(pg_tbl->device,
494 "get user pages failed for addr=0x%lx, offset=0x%lx [ret=%d]\n",
495 page_addr, offset, ret);
496 return ret ? ret : -ENOMEM;
497 }
498 ++pg_tbl->num_active_pages;
499
500 ptes[i].page = page;
501 ptes[i].offset = offset;
502
503
504 ptes[i].dma_addr =
505 dma_map_page(pg_tbl->device, page, 0, PAGE_SIZE,
506 DMA_BIDIRECTIONAL);
507
508 if (dma_mapping_error(pg_tbl->device,
509 ptes[i].dma_addr)) {
510 if (gasket_release_page(ptes[i].page))
511 --pg_tbl->num_active_pages;
512
513 memset(&ptes[i], 0,
514 sizeof(struct gasket_page_table_entry));
515 return -EINVAL;
516 }
517 }
518
519
520 dma_addr = (ptes[i].dma_addr + offset) | GASKET_VALID_SLOT_FLAG;
521
522 if (is_simple_mapping) {
523 writeq(dma_addr, &slots[i]);
524 } else {
525 ((u64 __force *)slots)[i] = dma_addr;
526
527
528
529 dma_map_single(pg_tbl->device,
530 (void *)&((u64 __force *)slots)[i],
531 sizeof(u64), DMA_TO_DEVICE);
532 }
533 ptes[i].status = PTE_INUSE;
534 }
535 return 0;
536 }
537
538
539
540
541
542 static int gasket_simple_page_idx(struct gasket_page_table *pg_tbl,
543 ulong dev_addr)
544 {
545 return (dev_addr >> GASKET_SIMPLE_PAGE_SHIFT) &
546 (pg_tbl->config.total_entries - 1);
547 }
548
549
550
551
552
553 static ulong gasket_extended_lvl0_page_idx(struct gasket_page_table *pg_tbl,
554 ulong dev_addr)
555 {
556 return (dev_addr >> GASKET_EXTENDED_LVL0_SHIFT) &
557 (pg_tbl->config.total_entries - 1);
558 }
559
560
561
562
563
564 static ulong gasket_extended_lvl1_page_idx(struct gasket_page_table *pg_tbl,
565 ulong dev_addr)
566 {
567 return (dev_addr >> GASKET_EXTENDED_LVL1_SHIFT) &
568 (GASKET_PAGES_PER_SUBTABLE - 1);
569 }
570
571
572
573
574
575 static int gasket_alloc_simple_entries(struct gasket_page_table *pg_tbl,
576 ulong dev_addr, uint num_pages)
577 {
578 if (!gasket_is_pte_range_free(pg_tbl->entries +
579 gasket_simple_page_idx(pg_tbl, dev_addr),
580 num_pages))
581 return -EBUSY;
582
583 return 0;
584 }
585
586
587
588
589
590 static void gasket_perform_unmapping(struct gasket_page_table *pg_tbl,
591 struct gasket_page_table_entry *ptes,
592 u64 __iomem *slots, uint num_pages,
593 int is_simple_mapping)
594 {
595 int i;
596
597
598
599
600 for (i = 0; i < num_pages; i++) {
601
602 if (is_simple_mapping || ptes[i].status == PTE_INUSE) {
603 writeq(0, &slots[i]);
604 } else {
605 ((u64 __force *)slots)[i] = 0;
606
607 wmb();
608 }
609
610
611 if (ptes[i].status == PTE_INUSE) {
612 if (ptes[i].page && ptes[i].dma_addr) {
613 dma_unmap_page(pg_tbl->device, ptes[i].dma_addr,
614 PAGE_SIZE, DMA_BIDIRECTIONAL);
615 }
616 if (gasket_release_page(ptes[i].page))
617 --pg_tbl->num_active_pages;
618 }
619
620
621 memset(&ptes[i], 0, sizeof(struct gasket_page_table_entry));
622 }
623 }
624
625
626
627
628
629 static void gasket_unmap_simple_pages(struct gasket_page_table *pg_tbl,
630 ulong dev_addr, uint num_pages)
631 {
632 uint slot = gasket_simple_page_idx(pg_tbl, dev_addr);
633
634 gasket_perform_unmapping(pg_tbl, pg_tbl->entries + slot,
635 pg_tbl->base_slot + slot, num_pages, 1);
636 }
637
638
639
640
641
642 static void gasket_unmap_extended_pages(struct gasket_page_table *pg_tbl,
643 ulong dev_addr, uint num_pages)
644 {
645 uint slot_idx, remain, len;
646 struct gasket_page_table_entry *pte;
647 u64 __iomem *slot_base;
648
649 remain = num_pages;
650 slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
651 pte = pg_tbl->entries + pg_tbl->num_simple_entries +
652 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
653
654 while (remain > 0) {
655
656 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
657
658 if (pte->status == PTE_INUSE) {
659 slot_base = (u64 __iomem *)(page_address(pte->page) +
660 pte->offset);
661 gasket_perform_unmapping(pg_tbl,
662 pte->sublevel + slot_idx,
663 slot_base + slot_idx, len, 0);
664 }
665
666 remain -= len;
667 slot_idx = 0;
668 pte++;
669 }
670 }
671
672
673 static inline bool gasket_addr_is_simple(struct gasket_page_table *pg_tbl,
674 ulong addr)
675 {
676 return !((addr) & (pg_tbl)->extended_flag);
677 }
678
679
680
681
682
683
684
685
686
687
688
689
690
691 static ulong gasket_components_to_dev_address(struct gasket_page_table *pg_tbl,
692 int is_simple, uint page_index,
693 uint offset)
694 {
695 ulong dev_addr = (page_index << GASKET_SIMPLE_PAGE_SHIFT) | offset;
696
697 return is_simple ? dev_addr : (pg_tbl->extended_flag | dev_addr);
698 }
699
700
701
702
703
704
705
706
707 static bool gasket_is_simple_dev_addr_bad(struct gasket_page_table *pg_tbl,
708 ulong dev_addr, uint num_pages)
709 {
710 ulong page_offset = dev_addr & (PAGE_SIZE - 1);
711 ulong page_index =
712 (dev_addr / PAGE_SIZE) & (pg_tbl->config.total_entries - 1);
713
714 if (gasket_components_to_dev_address(pg_tbl, 1, page_index,
715 page_offset) != dev_addr) {
716 dev_err(pg_tbl->device, "address is invalid, 0x%lX\n",
717 dev_addr);
718 return true;
719 }
720
721 if (page_index >= pg_tbl->num_simple_entries) {
722 dev_err(pg_tbl->device,
723 "starting slot at %lu is too large, max is < %u\n",
724 page_index, pg_tbl->num_simple_entries);
725 return true;
726 }
727
728 if (page_index + num_pages > pg_tbl->num_simple_entries) {
729 dev_err(pg_tbl->device,
730 "ending slot at %lu is too large, max is <= %u\n",
731 page_index + num_pages, pg_tbl->num_simple_entries);
732 return true;
733 }
734
735 return false;
736 }
737
738
739
740
741
742
743
744
745 static bool gasket_is_extended_dev_addr_bad(struct gasket_page_table *pg_tbl,
746 ulong dev_addr, uint num_pages)
747 {
748
749 ulong page_offset = dev_addr & (PAGE_SIZE - 1);
750 ulong page_global_idx, page_lvl0_idx;
751 ulong num_lvl0_pages;
752 ulong addr;
753
754
755 addr = dev_addr & ~((pg_tbl)->extended_flag);
756 if (addr >> (GASKET_EXTENDED_LVL0_WIDTH + GASKET_EXTENDED_LVL0_SHIFT)) {
757 dev_err(pg_tbl->device, "device address out of bounds: 0x%lx\n",
758 dev_addr);
759 return true;
760 }
761
762
763 page_global_idx = (dev_addr / PAGE_SIZE) &
764 (pg_tbl->config.total_entries * GASKET_PAGES_PER_SUBTABLE - 1);
765
766
767 page_lvl0_idx = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
768
769
770 num_lvl0_pages = DIV_ROUND_UP(num_pages, GASKET_PAGES_PER_SUBTABLE);
771
772 if (gasket_components_to_dev_address(pg_tbl, 0, page_global_idx,
773 page_offset) != dev_addr) {
774 dev_err(pg_tbl->device, "address is invalid: 0x%lx\n",
775 dev_addr);
776 return true;
777 }
778
779 if (page_lvl0_idx >= pg_tbl->num_extended_entries) {
780 dev_err(pg_tbl->device,
781 "starting level 0 slot at %lu is too large, max is < %u\n",
782 page_lvl0_idx, pg_tbl->num_extended_entries);
783 return true;
784 }
785
786 if (page_lvl0_idx + num_lvl0_pages > pg_tbl->num_extended_entries) {
787 dev_err(pg_tbl->device,
788 "ending level 0 slot at %lu is too large, max is <= %u\n",
789 page_lvl0_idx + num_lvl0_pages,
790 pg_tbl->num_extended_entries);
791 return true;
792 }
793
794 return false;
795 }
796
797
798
799
800
801 static void gasket_page_table_unmap_nolock(struct gasket_page_table *pg_tbl,
802 ulong dev_addr, uint num_pages)
803 {
804 if (!num_pages)
805 return;
806
807 if (gasket_addr_is_simple(pg_tbl, dev_addr))
808 gasket_unmap_simple_pages(pg_tbl, dev_addr, num_pages);
809 else
810 gasket_unmap_extended_pages(pg_tbl, dev_addr, num_pages);
811 }
812
813
814
815
816
817 static int gasket_map_simple_pages(struct gasket_page_table *pg_tbl,
818 ulong host_addr, ulong dev_addr,
819 uint num_pages)
820 {
821 int ret;
822 uint slot_idx = gasket_simple_page_idx(pg_tbl, dev_addr);
823
824 ret = gasket_alloc_simple_entries(pg_tbl, dev_addr, num_pages);
825 if (ret) {
826 dev_err(pg_tbl->device,
827 "page table slots %u (@ 0x%lx) to %u are not available\n",
828 slot_idx, dev_addr, slot_idx + num_pages - 1);
829 return ret;
830 }
831
832 ret = gasket_perform_mapping(pg_tbl, pg_tbl->entries + slot_idx,
833 pg_tbl->base_slot + slot_idx, host_addr,
834 num_pages, 1);
835
836 if (ret) {
837 gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
838 dev_err(pg_tbl->device, "gasket_perform_mapping %d\n", ret);
839 }
840 return ret;
841 }
842
843
844
845
846
847 static int gasket_alloc_extended_subtable(struct gasket_page_table *pg_tbl,
848 struct gasket_page_table_entry *pte,
849 u64 __iomem *slot)
850 {
851 ulong page_addr, subtable_bytes;
852 dma_addr_t dma_addr;
853
854
855
856
857
858
859
860 page_addr = get_zeroed_page(GFP_KERNEL | GFP_DMA);
861 if (!page_addr)
862 return -ENOMEM;
863 pte->page = virt_to_page((void *)page_addr);
864 pte->offset = 0;
865
866 subtable_bytes = sizeof(struct gasket_page_table_entry) *
867 GASKET_PAGES_PER_SUBTABLE;
868 pte->sublevel = vzalloc(subtable_bytes);
869 if (!pte->sublevel) {
870 free_page(page_addr);
871 memset(pte, 0, sizeof(struct gasket_page_table_entry));
872 return -ENOMEM;
873 }
874
875
876 pte->dma_addr = dma_map_page(pg_tbl->device, pte->page, 0, PAGE_SIZE,
877 DMA_TO_DEVICE);
878 if (dma_mapping_error(pg_tbl->device, pte->dma_addr)) {
879 free_page(page_addr);
880 vfree(pte->sublevel);
881 memset(pte, 0, sizeof(struct gasket_page_table_entry));
882 return -ENOMEM;
883 }
884
885
886 dma_addr = (pte->dma_addr + pte->offset) | GASKET_VALID_SLOT_FLAG;
887 writeq(dma_addr, slot);
888
889 pte->status = PTE_INUSE;
890
891 return 0;
892 }
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908 static int gasket_alloc_extended_entries(struct gasket_page_table *pg_tbl,
909 ulong dev_addr, uint num_entries)
910 {
911 int ret = 0;
912 uint remain, subtable_slot_idx, len;
913 struct gasket_page_table_entry *pte;
914 u64 __iomem *slot;
915
916 remain = num_entries;
917 subtable_slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
918 pte = pg_tbl->entries + pg_tbl->num_simple_entries +
919 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
920 slot = pg_tbl->base_slot + pg_tbl->num_simple_entries +
921 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
922
923 while (remain > 0) {
924 len = min(remain,
925 GASKET_PAGES_PER_SUBTABLE - subtable_slot_idx);
926
927 if (pte->status == PTE_FREE) {
928 ret = gasket_alloc_extended_subtable(pg_tbl, pte, slot);
929 if (ret) {
930 dev_err(pg_tbl->device,
931 "no memory for extended addr subtable\n");
932 return ret;
933 }
934 } else {
935 if (!gasket_is_pte_range_free(pte->sublevel +
936 subtable_slot_idx, len))
937 return -EBUSY;
938 }
939
940 remain -= len;
941 subtable_slot_idx = 0;
942 pte++;
943 slot++;
944 }
945
946 return 0;
947 }
948
949
950
951
952
953 static int gasket_map_extended_pages(struct gasket_page_table *pg_tbl,
954 ulong host_addr, ulong dev_addr,
955 uint num_pages)
956 {
957 int ret;
958 ulong dev_addr_end;
959 uint slot_idx, remain, len;
960 struct gasket_page_table_entry *pte;
961 u64 __iomem *slot_base;
962
963 ret = gasket_alloc_extended_entries(pg_tbl, dev_addr, num_pages);
964 if (ret) {
965 dev_addr_end = dev_addr + (num_pages / PAGE_SIZE) - 1;
966 dev_err(pg_tbl->device,
967 "page table slots (%lu,%lu) (@ 0x%lx) to (%lu,%lu) are not available\n",
968 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr),
969 dev_addr,
970 gasket_extended_lvl1_page_idx(pg_tbl, dev_addr),
971 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr_end),
972 gasket_extended_lvl1_page_idx(pg_tbl, dev_addr_end));
973 return ret;
974 }
975
976 remain = num_pages;
977 slot_idx = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
978 pte = pg_tbl->entries + pg_tbl->num_simple_entries +
979 gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
980
981 while (remain > 0) {
982 len = min(remain, GASKET_PAGES_PER_SUBTABLE - slot_idx);
983
984 slot_base =
985 (u64 __iomem *)(page_address(pte->page) + pte->offset);
986 ret = gasket_perform_mapping(pg_tbl, pte->sublevel + slot_idx,
987 slot_base + slot_idx, host_addr,
988 len, 0);
989 if (ret) {
990 gasket_page_table_unmap_nolock(pg_tbl, dev_addr,
991 num_pages);
992 return ret;
993 }
994
995 remain -= len;
996 slot_idx = 0;
997 pte++;
998 host_addr += len * PAGE_SIZE;
999 }
1000
1001 return 0;
1002 }
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012 int gasket_page_table_map(struct gasket_page_table *pg_tbl, ulong host_addr,
1013 ulong dev_addr, uint num_pages)
1014 {
1015 int ret;
1016
1017 if (!num_pages)
1018 return 0;
1019
1020 mutex_lock(&pg_tbl->mutex);
1021
1022 if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1023 ret = gasket_map_simple_pages(pg_tbl, host_addr, dev_addr,
1024 num_pages);
1025 } else {
1026 ret = gasket_map_extended_pages(pg_tbl, host_addr, dev_addr,
1027 num_pages);
1028 }
1029
1030 mutex_unlock(&pg_tbl->mutex);
1031 return ret;
1032 }
1033 EXPORT_SYMBOL(gasket_page_table_map);
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044 void gasket_page_table_unmap(struct gasket_page_table *pg_tbl, ulong dev_addr,
1045 uint num_pages)
1046 {
1047 if (!num_pages)
1048 return;
1049
1050 mutex_lock(&pg_tbl->mutex);
1051 gasket_page_table_unmap_nolock(pg_tbl, dev_addr, num_pages);
1052 mutex_unlock(&pg_tbl->mutex);
1053 }
1054 EXPORT_SYMBOL(gasket_page_table_unmap);
1055
1056 static void gasket_page_table_unmap_all_nolock(struct gasket_page_table *pg_tbl)
1057 {
1058 gasket_unmap_simple_pages(pg_tbl,
1059 gasket_components_to_dev_address(pg_tbl, 1, 0,
1060 0),
1061 pg_tbl->num_simple_entries);
1062 gasket_unmap_extended_pages(pg_tbl,
1063 gasket_components_to_dev_address(pg_tbl, 0,
1064 0, 0),
1065 pg_tbl->num_extended_entries *
1066 GASKET_PAGES_PER_SUBTABLE);
1067 }
1068
1069
1070 void gasket_page_table_unmap_all(struct gasket_page_table *pg_tbl)
1071 {
1072 mutex_lock(&pg_tbl->mutex);
1073 gasket_page_table_unmap_all_nolock(pg_tbl);
1074 mutex_unlock(&pg_tbl->mutex);
1075 }
1076 EXPORT_SYMBOL(gasket_page_table_unmap_all);
1077
1078
1079 void gasket_page_table_reset(struct gasket_page_table *pg_tbl)
1080 {
1081 mutex_lock(&pg_tbl->mutex);
1082 gasket_page_table_unmap_all_nolock(pg_tbl);
1083 writeq(pg_tbl->config.total_entries, pg_tbl->extended_offset_reg);
1084 mutex_unlock(&pg_tbl->mutex);
1085 }
1086
1087
1088 int gasket_page_table_lookup_page(struct gasket_page_table *pg_tbl,
1089 ulong dev_addr, struct page **ppage,
1090 ulong *poffset)
1091 {
1092 uint page_num;
1093 struct gasket_page_table_entry *pte;
1094
1095 mutex_lock(&pg_tbl->mutex);
1096 if (gasket_addr_is_simple(pg_tbl, dev_addr)) {
1097 page_num = gasket_simple_page_idx(pg_tbl, dev_addr);
1098 if (page_num >= pg_tbl->num_simple_entries)
1099 goto fail;
1100
1101 pte = pg_tbl->entries + page_num;
1102 if (pte->status != PTE_INUSE)
1103 goto fail;
1104 } else {
1105
1106 page_num = gasket_extended_lvl0_page_idx(pg_tbl, dev_addr);
1107 if (page_num >= pg_tbl->num_extended_entries)
1108 goto fail;
1109
1110 pte = pg_tbl->entries + pg_tbl->num_simple_entries + page_num;
1111 if (pte->status != PTE_INUSE)
1112 goto fail;
1113
1114
1115 page_num = gasket_extended_lvl1_page_idx(pg_tbl, dev_addr);
1116 pte = pte->sublevel + page_num;
1117 if (pte->status != PTE_INUSE)
1118 goto fail;
1119 }
1120
1121 *ppage = pte->page;
1122 *poffset = pte->offset;
1123 mutex_unlock(&pg_tbl->mutex);
1124 return 0;
1125
1126 fail:
1127 *ppage = NULL;
1128 *poffset = 0;
1129 mutex_unlock(&pg_tbl->mutex);
1130 return -EINVAL;
1131 }
1132
1133
1134 bool gasket_page_table_are_addrs_bad(struct gasket_page_table *pg_tbl,
1135 ulong host_addr, ulong dev_addr,
1136 ulong bytes)
1137 {
1138 if (host_addr & (PAGE_SIZE - 1)) {
1139 dev_err(pg_tbl->device,
1140 "host mapping address 0x%lx must be page aligned\n",
1141 host_addr);
1142 return true;
1143 }
1144
1145 return gasket_page_table_is_dev_addr_bad(pg_tbl, dev_addr, bytes);
1146 }
1147 EXPORT_SYMBOL(gasket_page_table_are_addrs_bad);
1148
1149
1150 bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *pg_tbl,
1151 ulong dev_addr, ulong bytes)
1152 {
1153 uint num_pages = bytes / PAGE_SIZE;
1154
1155 if (bytes & (PAGE_SIZE - 1)) {
1156 dev_err(pg_tbl->device,
1157 "mapping size 0x%lX must be page aligned\n", bytes);
1158 return true;
1159 }
1160
1161 if (num_pages == 0) {
1162 dev_err(pg_tbl->device,
1163 "requested mapping is less than one page: %lu / %lu\n",
1164 bytes, PAGE_SIZE);
1165 return true;
1166 }
1167
1168 if (gasket_addr_is_simple(pg_tbl, dev_addr))
1169 return gasket_is_simple_dev_addr_bad(pg_tbl, dev_addr,
1170 num_pages);
1171 return gasket_is_extended_dev_addr_bad(pg_tbl, dev_addr, num_pages);
1172 }
1173 EXPORT_SYMBOL(gasket_page_table_is_dev_addr_bad);
1174
1175
1176 uint gasket_page_table_max_size(struct gasket_page_table *page_table)
1177 {
1178 if (!page_table)
1179 return 0;
1180 return page_table->config.total_entries;
1181 }
1182 EXPORT_SYMBOL(gasket_page_table_max_size);
1183
1184
1185 uint gasket_page_table_num_entries(struct gasket_page_table *pg_tbl)
1186 {
1187 if (!pg_tbl)
1188 return 0;
1189 return pg_tbl->num_simple_entries + pg_tbl->num_extended_entries;
1190 }
1191 EXPORT_SYMBOL(gasket_page_table_num_entries);
1192
1193
1194 uint gasket_page_table_num_simple_entries(struct gasket_page_table *pg_tbl)
1195 {
1196 if (!pg_tbl)
1197 return 0;
1198 return pg_tbl->num_simple_entries;
1199 }
1200 EXPORT_SYMBOL(gasket_page_table_num_simple_entries);
1201
1202
1203 uint gasket_page_table_num_active_pages(struct gasket_page_table *pg_tbl)
1204 {
1205 if (!pg_tbl)
1206 return 0;
1207 return pg_tbl->num_active_pages;
1208 }
1209 EXPORT_SYMBOL(gasket_page_table_num_active_pages);
1210
1211
1212 int gasket_page_table_system_status(struct gasket_page_table *page_table)
1213 {
1214 if (!page_table)
1215 return GASKET_STATUS_LAMED;
1216
1217 if (gasket_page_table_num_entries(page_table) == 0) {
1218 dev_dbg(page_table->device, "Page table size is 0\n");
1219 return GASKET_STATUS_LAMED;
1220 }
1221
1222 return GASKET_STATUS_ALIVE;
1223 }
1224
1225
1226 int gasket_set_user_virt(struct gasket_dev *gasket_dev, u64 size,
1227 dma_addr_t dma_address, ulong vma)
1228 {
1229 int j;
1230 struct gasket_page_table *pg_tbl;
1231
1232 unsigned int num_pages = size / PAGE_SIZE;
1233
1234
1235
1236
1237
1238 pg_tbl = gasket_dev->page_table[0];
1239 if (!pg_tbl) {
1240 dev_dbg(gasket_dev->dev, "%s: invalid page table index\n",
1241 __func__);
1242 return 0;
1243 }
1244 for (j = 0; j < num_pages; j++) {
1245 pg_tbl->coherent_pages[j].user_virt =
1246 (u64)vma + j * PAGE_SIZE;
1247 }
1248 return 0;
1249 }
1250
1251
1252 int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1253 dma_addr_t *dma_address, u64 index)
1254 {
1255 dma_addr_t handle;
1256 void *mem;
1257 int j;
1258 unsigned int num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
1259 const struct gasket_driver_desc *driver_desc =
1260 gasket_get_driver_desc(gasket_dev);
1261
1262 if (!gasket_dev->page_table[index])
1263 return -EFAULT;
1264
1265 if (num_pages == 0)
1266 return -EINVAL;
1267
1268 mem = dma_alloc_coherent(gasket_get_device(gasket_dev),
1269 num_pages * PAGE_SIZE, &handle, GFP_KERNEL);
1270 if (!mem)
1271 goto nomem;
1272
1273 gasket_dev->page_table[index]->num_coherent_pages = num_pages;
1274
1275
1276 gasket_dev->page_table[index]->coherent_pages =
1277 kcalloc(num_pages,
1278 sizeof(*gasket_dev->page_table[index]->coherent_pages),
1279 GFP_KERNEL);
1280 if (!gasket_dev->page_table[index]->coherent_pages)
1281 goto nomem;
1282
1283 gasket_dev->coherent_buffer.length_bytes =
1284 PAGE_SIZE * (num_pages);
1285 gasket_dev->coherent_buffer.phys_base = handle;
1286 gasket_dev->coherent_buffer.virt_base = mem;
1287
1288 *dma_address = driver_desc->coherent_buffer_description.base;
1289 for (j = 0; j < num_pages; j++) {
1290 gasket_dev->page_table[index]->coherent_pages[j].paddr =
1291 handle + j * PAGE_SIZE;
1292 gasket_dev->page_table[index]->coherent_pages[j].kernel_virt =
1293 (u64)mem + j * PAGE_SIZE;
1294 }
1295
1296 return 0;
1297
1298 nomem:
1299 if (mem) {
1300 dma_free_coherent(gasket_get_device(gasket_dev),
1301 num_pages * PAGE_SIZE, mem, handle);
1302 gasket_dev->coherent_buffer.length_bytes = 0;
1303 gasket_dev->coherent_buffer.virt_base = NULL;
1304 gasket_dev->coherent_buffer.phys_base = 0;
1305 }
1306
1307 kfree(gasket_dev->page_table[index]->coherent_pages);
1308 gasket_dev->page_table[index]->coherent_pages = NULL;
1309 gasket_dev->page_table[index]->num_coherent_pages = 0;
1310 return -ENOMEM;
1311 }
1312
1313
1314 int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, u64 size,
1315 dma_addr_t dma_address, u64 index)
1316 {
1317 const struct gasket_driver_desc *driver_desc;
1318
1319 if (!gasket_dev->page_table[index])
1320 return -EFAULT;
1321
1322 driver_desc = gasket_get_driver_desc(gasket_dev);
1323
1324 if (driver_desc->coherent_buffer_description.base != dma_address)
1325 return -EADDRNOTAVAIL;
1326
1327 if (gasket_dev->coherent_buffer.length_bytes) {
1328 dma_free_coherent(gasket_get_device(gasket_dev),
1329 gasket_dev->coherent_buffer.length_bytes,
1330 gasket_dev->coherent_buffer.virt_base,
1331 gasket_dev->coherent_buffer.phys_base);
1332 gasket_dev->coherent_buffer.length_bytes = 0;
1333 gasket_dev->coherent_buffer.virt_base = NULL;
1334 gasket_dev->coherent_buffer.phys_base = 0;
1335 }
1336
1337 kfree(gasket_dev->page_table[index]->coherent_pages);
1338 gasket_dev->page_table[index]->coherent_pages = NULL;
1339 gasket_dev->page_table[index]->num_coherent_pages = 0;
1340
1341 return 0;
1342 }
1343
1344
1345 void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev, u64 index)
1346 {
1347 if (!gasket_dev->page_table[index])
1348 return;
1349
1350 if (gasket_dev->coherent_buffer.length_bytes) {
1351 dma_free_coherent(gasket_get_device(gasket_dev),
1352 gasket_dev->coherent_buffer.length_bytes,
1353 gasket_dev->coherent_buffer.virt_base,
1354 gasket_dev->coherent_buffer.phys_base);
1355 gasket_dev->coherent_buffer.length_bytes = 0;
1356 gasket_dev->coherent_buffer.virt_base = NULL;
1357 gasket_dev->coherent_buffer.phys_base = 0;
1358 }
1359 }