This source file includes following definitions.
- __genwqe_writeq
- __genwqe_readq
- __genwqe_writel
- __genwqe_readl
- genwqe_read_app_id
- genwqe_init_crc32
- genwqe_crc32
- __genwqe_alloc_consistent
- __genwqe_free_consistent
- genwqe_unmap_pages
- genwqe_map_pages
- genwqe_sgl_size
- genwqe_alloc_sync_sgl
- genwqe_setup_sgl
- genwqe_free_sync_sgl
- genwqe_free_user_pages
- genwqe_user_vmap
- genwqe_user_vunmap
- genwqe_card_type
- genwqe_card_reset
- genwqe_read_softreset
- genwqe_set_interrupt_capability
- genwqe_reset_interrupt_capability
- set_reg_idx
- set_reg
- genwqe_read_ffdc_regs
- genwqe_ffdc_buff_size
- genwqe_ffdc_buff_read
- genwqe_write_vreg
- genwqe_read_vreg
- genwqe_base_clock_frequency
- genwqe_stop_traps
- genwqe_start_traps
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/vmalloc.h>
20 #include <linux/page-flags.h>
21 #include <linux/scatterlist.h>
22 #include <linux/hugetlb.h>
23 #include <linux/iommu.h>
24 #include <linux/pci.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/ctype.h>
27 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/delay.h>
30 #include <asm/pgtable.h>
31
32 #include "genwqe_driver.h"
33 #include "card_base.h"
34 #include "card_ddcb.h"
35
36
37
38
39
40
41
42
43
44 int __genwqe_writeq(struct genwqe_dev *cd, u64 byte_offs, u64 val)
45 {
46 struct pci_dev *pci_dev = cd->pci_dev;
47
48 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
49 return -EIO;
50
51 if (cd->mmio == NULL)
52 return -EIO;
53
54 if (pci_channel_offline(pci_dev))
55 return -EIO;
56
57 __raw_writeq((__force u64)cpu_to_be64(val), cd->mmio + byte_offs);
58 return 0;
59 }
60
61
62
63
64
65
66
67
68 u64 __genwqe_readq(struct genwqe_dev *cd, u64 byte_offs)
69 {
70 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
71 return 0xffffffffffffffffull;
72
73 if ((cd->err_inject & GENWQE_INJECT_GFIR_FATAL) &&
74 (byte_offs == IO_SLC_CFGREG_GFIR))
75 return 0x000000000000ffffull;
76
77 if ((cd->err_inject & GENWQE_INJECT_GFIR_INFO) &&
78 (byte_offs == IO_SLC_CFGREG_GFIR))
79 return 0x00000000ffff0000ull;
80
81 if (cd->mmio == NULL)
82 return 0xffffffffffffffffull;
83
84 return be64_to_cpu((__force __be64)__raw_readq(cd->mmio + byte_offs));
85 }
86
87
88
89
90
91
92
93
94
95 int __genwqe_writel(struct genwqe_dev *cd, u64 byte_offs, u32 val)
96 {
97 struct pci_dev *pci_dev = cd->pci_dev;
98
99 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
100 return -EIO;
101
102 if (cd->mmio == NULL)
103 return -EIO;
104
105 if (pci_channel_offline(pci_dev))
106 return -EIO;
107
108 __raw_writel((__force u32)cpu_to_be32(val), cd->mmio + byte_offs);
109 return 0;
110 }
111
112
113
114
115
116
117
118
119 u32 __genwqe_readl(struct genwqe_dev *cd, u64 byte_offs)
120 {
121 if (cd->err_inject & GENWQE_INJECT_HARDWARE_FAILURE)
122 return 0xffffffff;
123
124 if (cd->mmio == NULL)
125 return 0xffffffff;
126
127 return be32_to_cpu((__force __be32)__raw_readl(cd->mmio + byte_offs));
128 }
129
130
131
132
133
134
135 int genwqe_read_app_id(struct genwqe_dev *cd, char *app_name, int len)
136 {
137 int i, j;
138 u32 app_id = (u32)cd->app_unitcfg;
139
140 memset(app_name, 0, len);
141 for (i = 0, j = 0; j < min(len, 4); j++) {
142 char ch = (char)((app_id >> (24 - j*8)) & 0xff);
143
144 if (ch == ' ')
145 continue;
146 app_name[i++] = isprint(ch) ? ch : 'X';
147 }
148 return i;
149 }
150
151
152
153
154
155
156
157
158
159 #define CRC32_POLYNOMIAL 0x20044009
160 static u32 crc32_tab[256];
161
162 void genwqe_init_crc32(void)
163 {
164 int i, j;
165 u32 crc;
166
167 for (i = 0; i < 256; i++) {
168 crc = i << 24;
169 for (j = 0; j < 8; j++) {
170 if (crc & 0x80000000)
171 crc = (crc << 1) ^ CRC32_POLYNOMIAL;
172 else
173 crc = (crc << 1);
174 }
175 crc32_tab[i] = crc;
176 }
177 }
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194 u32 genwqe_crc32(u8 *buff, size_t len, u32 init)
195 {
196 int i;
197 u32 crc;
198
199 crc = init;
200 while (len--) {
201 i = ((crc >> 24) ^ *buff++) & 0xFF;
202 crc = (crc << 8) ^ crc32_tab[i];
203 }
204 return crc;
205 }
206
207 void *__genwqe_alloc_consistent(struct genwqe_dev *cd, size_t size,
208 dma_addr_t *dma_handle)
209 {
210 if (get_order(size) >= MAX_ORDER)
211 return NULL;
212
213 return dma_alloc_coherent(&cd->pci_dev->dev, size, dma_handle,
214 GFP_KERNEL);
215 }
216
217 void __genwqe_free_consistent(struct genwqe_dev *cd, size_t size,
218 void *vaddr, dma_addr_t dma_handle)
219 {
220 if (vaddr == NULL)
221 return;
222
223 dma_free_coherent(&cd->pci_dev->dev, size, vaddr, dma_handle);
224 }
225
226 static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
227 int num_pages)
228 {
229 int i;
230 struct pci_dev *pci_dev = cd->pci_dev;
231
232 for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
233 pci_unmap_page(pci_dev, dma_list[i],
234 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
235 dma_list[i] = 0x0;
236 }
237 }
238
239 static int genwqe_map_pages(struct genwqe_dev *cd,
240 struct page **page_list, int num_pages,
241 dma_addr_t *dma_list)
242 {
243 int i;
244 struct pci_dev *pci_dev = cd->pci_dev;
245
246
247 for (i = 0; i < num_pages; i++) {
248 dma_addr_t daddr;
249
250 dma_list[i] = 0x0;
251 daddr = pci_map_page(pci_dev, page_list[i],
252 0,
253 PAGE_SIZE,
254 PCI_DMA_BIDIRECTIONAL);
255
256 if (pci_dma_mapping_error(pci_dev, daddr)) {
257 dev_err(&pci_dev->dev,
258 "[%s] err: no dma addr daddr=%016llx!\n",
259 __func__, (long long)daddr);
260 goto err;
261 }
262
263 dma_list[i] = daddr;
264 }
265 return 0;
266
267 err:
268 genwqe_unmap_pages(cd, dma_list, num_pages);
269 return -EIO;
270 }
271
272 static int genwqe_sgl_size(int num_pages)
273 {
274 int len, num_tlb = num_pages / 7;
275
276 len = sizeof(struct sg_entry) * (num_pages+num_tlb + 1);
277 return roundup(len, PAGE_SIZE);
278 }
279
280
281
282
283
284
285
286
287
288 int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
289 void __user *user_addr, size_t user_size, int write)
290 {
291 int ret = -ENOMEM;
292 struct pci_dev *pci_dev = cd->pci_dev;
293
294 sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
295 sgl->fpage_size = min_t(size_t, PAGE_SIZE-sgl->fpage_offs, user_size);
296 sgl->nr_pages = DIV_ROUND_UP(sgl->fpage_offs + user_size, PAGE_SIZE);
297 sgl->lpage_size = (user_size - sgl->fpage_size) % PAGE_SIZE;
298
299 dev_dbg(&pci_dev->dev, "[%s] uaddr=%p usize=%8ld nr_pages=%ld fpage_offs=%lx fpage_size=%ld lpage_size=%ld\n",
300 __func__, user_addr, user_size, sgl->nr_pages,
301 sgl->fpage_offs, sgl->fpage_size, sgl->lpage_size);
302
303 sgl->user_addr = user_addr;
304 sgl->user_size = user_size;
305 sgl->write = write;
306 sgl->sgl_size = genwqe_sgl_size(sgl->nr_pages);
307
308 if (get_order(sgl->sgl_size) > MAX_ORDER) {
309 dev_err(&pci_dev->dev,
310 "[%s] err: too much memory requested!\n", __func__);
311 return ret;
312 }
313
314 sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
315 &sgl->sgl_dma_addr);
316 if (sgl->sgl == NULL) {
317 dev_err(&pci_dev->dev,
318 "[%s] err: no memory available!\n", __func__);
319 return ret;
320 }
321
322
323 if ((sgl->fpage_size != 0) && (sgl->fpage_size != PAGE_SIZE)) {
324 sgl->fpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
325 &sgl->fpage_dma_addr);
326 if (sgl->fpage == NULL)
327 goto err_out;
328
329
330 if (copy_from_user(sgl->fpage + sgl->fpage_offs,
331 user_addr, sgl->fpage_size)) {
332 ret = -EFAULT;
333 goto err_out;
334 }
335 }
336 if (sgl->lpage_size != 0) {
337 sgl->lpage = __genwqe_alloc_consistent(cd, PAGE_SIZE,
338 &sgl->lpage_dma_addr);
339 if (sgl->lpage == NULL)
340 goto err_out1;
341
342
343 if (copy_from_user(sgl->lpage, user_addr + user_size -
344 sgl->lpage_size, sgl->lpage_size)) {
345 ret = -EFAULT;
346 goto err_out2;
347 }
348 }
349 return 0;
350
351 err_out2:
352 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
353 sgl->lpage_dma_addr);
354 sgl->lpage = NULL;
355 sgl->lpage_dma_addr = 0;
356 err_out1:
357 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
358 sgl->fpage_dma_addr);
359 sgl->fpage = NULL;
360 sgl->fpage_dma_addr = 0;
361 err_out:
362 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
363 sgl->sgl_dma_addr);
364 sgl->sgl = NULL;
365 sgl->sgl_dma_addr = 0;
366 sgl->sgl_size = 0;
367
368 return ret;
369 }
370
371 int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
372 dma_addr_t *dma_list)
373 {
374 int i = 0, j = 0, p;
375 unsigned long dma_offs, map_offs;
376 dma_addr_t prev_daddr = 0;
377 struct sg_entry *s, *last_s = NULL;
378 size_t size = sgl->user_size;
379
380 dma_offs = 128;
381 map_offs = sgl->fpage_offs;
382
383 s = &sgl->sgl[0];
384 p = 0;
385 while (p < sgl->nr_pages) {
386 dma_addr_t daddr;
387 unsigned int size_to_map;
388
389
390 j = 0;
391 s[j].target_addr = cpu_to_be64(sgl->sgl_dma_addr + dma_offs);
392 s[j].len = cpu_to_be32(128);
393 s[j].flags = cpu_to_be32(SG_CHAINED);
394 j++;
395
396 while (j < 8) {
397
398 size_to_map = min(size, PAGE_SIZE - map_offs);
399
400 if ((p == 0) && (sgl->fpage != NULL)) {
401 daddr = sgl->fpage_dma_addr + map_offs;
402
403 } else if ((p == sgl->nr_pages - 1) &&
404 (sgl->lpage != NULL)) {
405 daddr = sgl->lpage_dma_addr;
406 } else {
407 daddr = dma_list[p] + map_offs;
408 }
409
410 size -= size_to_map;
411 map_offs = 0;
412
413 if (prev_daddr == daddr) {
414 u32 prev_len = be32_to_cpu(last_s->len);
415
416
417
418
419
420 last_s->len = cpu_to_be32(prev_len +
421 size_to_map);
422
423 p++;
424 if (p == sgl->nr_pages)
425 goto fixup;
426
427 prev_daddr = daddr + size_to_map;
428 continue;
429 }
430
431
432 s[j].target_addr = cpu_to_be64(daddr);
433 s[j].len = cpu_to_be32(size_to_map);
434 s[j].flags = cpu_to_be32(SG_DATA);
435 prev_daddr = daddr + size_to_map;
436 last_s = &s[j];
437 j++;
438
439 p++;
440 if (p == sgl->nr_pages)
441 goto fixup;
442 }
443 dma_offs += 128;
444 s += 8;
445 }
446 fixup:
447 if (j == 1) {
448 s -= 8;
449 j = 7;
450 }
451
452 for (i = 0; i < j; i++)
453 s[i] = s[i + 1];
454
455 s[i].target_addr = cpu_to_be64(0);
456 s[i].len = cpu_to_be32(0);
457 s[i].flags = cpu_to_be32(SG_END_LIST);
458 return 0;
459 }
460
461
462
463
464
465
466
467
468 int genwqe_free_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl)
469 {
470 int rc = 0;
471 size_t offset;
472 unsigned long res;
473 struct pci_dev *pci_dev = cd->pci_dev;
474
475 if (sgl->fpage) {
476 if (sgl->write) {
477 res = copy_to_user(sgl->user_addr,
478 sgl->fpage + sgl->fpage_offs, sgl->fpage_size);
479 if (res) {
480 dev_err(&pci_dev->dev,
481 "[%s] err: copying fpage! (res=%lu)\n",
482 __func__, res);
483 rc = -EFAULT;
484 }
485 }
486 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
487 sgl->fpage_dma_addr);
488 sgl->fpage = NULL;
489 sgl->fpage_dma_addr = 0;
490 }
491 if (sgl->lpage) {
492 if (sgl->write) {
493 offset = sgl->user_size - sgl->lpage_size;
494 res = copy_to_user(sgl->user_addr + offset, sgl->lpage,
495 sgl->lpage_size);
496 if (res) {
497 dev_err(&pci_dev->dev,
498 "[%s] err: copying lpage! (res=%lu)\n",
499 __func__, res);
500 rc = -EFAULT;
501 }
502 }
503 __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
504 sgl->lpage_dma_addr);
505 sgl->lpage = NULL;
506 sgl->lpage_dma_addr = 0;
507 }
508 __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
509 sgl->sgl_dma_addr);
510
511 sgl->sgl = NULL;
512 sgl->sgl_dma_addr = 0x0;
513 sgl->sgl_size = 0;
514 return rc;
515 }
516
517
518
519
520
521
522
523
524
525
526 static int genwqe_free_user_pages(struct page **page_list,
527 unsigned int nr_pages, int dirty)
528 {
529 unsigned int i;
530
531 for (i = 0; i < nr_pages; i++) {
532 if (page_list[i] != NULL) {
533 if (dirty)
534 set_page_dirty_lock(page_list[i]);
535 put_page(page_list[i]);
536 }
537 }
538 return 0;
539 }
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564 int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
565 unsigned long size)
566 {
567 int rc = -EINVAL;
568 unsigned long data, offs;
569 struct pci_dev *pci_dev = cd->pci_dev;
570
571 if ((uaddr == NULL) || (size == 0)) {
572 m->size = 0;
573 return -EINVAL;
574 }
575 m->u_vaddr = uaddr;
576 m->size = size;
577
578
579 data = (unsigned long)uaddr;
580 offs = offset_in_page(data);
581 if (size > ULONG_MAX - PAGE_SIZE - offs) {
582 m->size = 0;
583 return -EINVAL;
584 }
585 m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
586
587 m->page_list = kcalloc(m->nr_pages,
588 sizeof(struct page *) + sizeof(dma_addr_t),
589 GFP_KERNEL);
590 if (!m->page_list) {
591 dev_err(&pci_dev->dev, "err: alloc page_list failed\n");
592 m->nr_pages = 0;
593 m->u_vaddr = NULL;
594 m->size = 0;
595 return -ENOMEM;
596 }
597 m->dma_list = (dma_addr_t *)(m->page_list + m->nr_pages);
598
599
600 rc = get_user_pages_fast(data & PAGE_MASK,
601 m->nr_pages,
602 m->write ? FOLL_WRITE : 0,
603 m->page_list);
604 if (rc < 0)
605 goto fail_get_user_pages;
606
607
608 if (rc < m->nr_pages) {
609 genwqe_free_user_pages(m->page_list, rc, m->write);
610 rc = -EFAULT;
611 goto fail_get_user_pages;
612 }
613
614 rc = genwqe_map_pages(cd, m->page_list, m->nr_pages, m->dma_list);
615 if (rc != 0)
616 goto fail_free_user_pages;
617
618 return 0;
619
620 fail_free_user_pages:
621 genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
622
623 fail_get_user_pages:
624 kfree(m->page_list);
625 m->page_list = NULL;
626 m->dma_list = NULL;
627 m->nr_pages = 0;
628 m->u_vaddr = NULL;
629 m->size = 0;
630 return rc;
631 }
632
633
634
635
636
637
638
639 int genwqe_user_vunmap(struct genwqe_dev *cd, struct dma_mapping *m)
640 {
641 struct pci_dev *pci_dev = cd->pci_dev;
642
643 if (!dma_mapping_used(m)) {
644 dev_err(&pci_dev->dev, "[%s] err: mapping %p not used!\n",
645 __func__, m);
646 return -EINVAL;
647 }
648
649 if (m->dma_list)
650 genwqe_unmap_pages(cd, m->dma_list, m->nr_pages);
651
652 if (m->page_list) {
653 genwqe_free_user_pages(m->page_list, m->nr_pages, m->write);
654
655 kfree(m->page_list);
656 m->page_list = NULL;
657 m->dma_list = NULL;
658 m->nr_pages = 0;
659 }
660
661 m->u_vaddr = NULL;
662 m->size = 0;
663 return 0;
664 }
665
666
667
668
669
670
671
672
673
674 u8 genwqe_card_type(struct genwqe_dev *cd)
675 {
676 u64 card_type = cd->slu_unitcfg;
677
678 return (u8)((card_type & IO_SLU_UNITCFG_TYPE_MASK) >> 20);
679 }
680
681
682
683
684
685 int genwqe_card_reset(struct genwqe_dev *cd)
686 {
687 u64 softrst;
688 struct pci_dev *pci_dev = cd->pci_dev;
689
690 if (!genwqe_is_privileged(cd))
691 return -ENODEV;
692
693
694 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, 0x1ull);
695 msleep(1000);
696 __genwqe_readq(cd, IO_HSU_FIR_CLR);
697 __genwqe_readq(cd, IO_APP_FIR_CLR);
698 __genwqe_readq(cd, IO_SLU_FIR_CLR);
699
700
701
702
703
704
705
706
707
708 softrst = __genwqe_readq(cd, IO_SLC_CFGREG_SOFTRESET) & 0x3cull;
709 __genwqe_writeq(cd, IO_SLC_CFGREG_SOFTRESET, softrst | 0x2ull);
710
711
712 msleep(50);
713
714 if (genwqe_need_err_masking(cd)) {
715 dev_info(&pci_dev->dev,
716 "[%s] masking errors for old bitstreams\n", __func__);
717 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
718 }
719 return 0;
720 }
721
722 int genwqe_read_softreset(struct genwqe_dev *cd)
723 {
724 u64 bitstream;
725
726 if (!genwqe_is_privileged(cd))
727 return -ENODEV;
728
729 bitstream = __genwqe_readq(cd, IO_SLU_BITSTREAM) & 0x1;
730 cd->softreset = (bitstream == 0) ? 0x8ull : 0xcull;
731 return 0;
732 }
733
734
735
736
737
738
739 int genwqe_set_interrupt_capability(struct genwqe_dev *cd, int count)
740 {
741 int rc;
742
743 rc = pci_alloc_irq_vectors(cd->pci_dev, 1, count, PCI_IRQ_MSI);
744 if (rc < 0)
745 return rc;
746 return 0;
747 }
748
749
750
751
752
753 void genwqe_reset_interrupt_capability(struct genwqe_dev *cd)
754 {
755 pci_free_irq_vectors(cd->pci_dev);
756 }
757
758
759
760
761
762
763
764
765
766
767
768 static int set_reg_idx(struct genwqe_dev *cd, struct genwqe_reg *r,
769 unsigned int *i, unsigned int m, u32 addr, u32 idx,
770 u64 val)
771 {
772 if (WARN_ON_ONCE(*i >= m))
773 return -EFAULT;
774
775 r[*i].addr = addr;
776 r[*i].idx = idx;
777 r[*i].val = val;
778 ++*i;
779 return 0;
780 }
781
782 static int set_reg(struct genwqe_dev *cd, struct genwqe_reg *r,
783 unsigned int *i, unsigned int m, u32 addr, u64 val)
784 {
785 return set_reg_idx(cd, r, i, m, addr, 0, val);
786 }
787
788 int genwqe_read_ffdc_regs(struct genwqe_dev *cd, struct genwqe_reg *regs,
789 unsigned int max_regs, int all)
790 {
791 unsigned int i, j, idx = 0;
792 u32 ufir_addr, ufec_addr, sfir_addr, sfec_addr;
793 u64 gfir, sluid, appid, ufir, ufec, sfir, sfec;
794
795
796 gfir = __genwqe_readq(cd, IO_SLC_CFGREG_GFIR);
797 set_reg(cd, regs, &idx, max_regs, IO_SLC_CFGREG_GFIR, gfir);
798
799
800 sluid = __genwqe_readq(cd, IO_SLU_UNITCFG);
801 set_reg(cd, regs, &idx, max_regs, IO_SLU_UNITCFG, sluid);
802
803
804 appid = __genwqe_readq(cd, IO_APP_UNITCFG);
805 set_reg(cd, regs, &idx, max_regs, IO_APP_UNITCFG, appid);
806
807
808 for (i = 0; i < GENWQE_MAX_UNITS; i++) {
809
810
811 ufir_addr = (i << 24) | 0x008;
812 ufir = __genwqe_readq(cd, ufir_addr);
813 set_reg(cd, regs, &idx, max_regs, ufir_addr, ufir);
814
815
816 ufec_addr = (i << 24) | 0x018;
817 ufec = __genwqe_readq(cd, ufec_addr);
818 set_reg(cd, regs, &idx, max_regs, ufec_addr, ufec);
819
820 for (j = 0; j < 64; j++) {
821
822 if (!all && (!(ufir & (1ull << j))))
823 continue;
824
825 sfir_addr = (i << 24) | (0x100 + 8 * j);
826 sfir = __genwqe_readq(cd, sfir_addr);
827 set_reg(cd, regs, &idx, max_regs, sfir_addr, sfir);
828
829 sfec_addr = (i << 24) | (0x300 + 8 * j);
830 sfec = __genwqe_readq(cd, sfec_addr);
831 set_reg(cd, regs, &idx, max_regs, sfec_addr, sfec);
832 }
833 }
834
835
836 for (i = idx; i < max_regs; i++) {
837 regs[i].addr = 0xffffffff;
838 regs[i].val = 0xffffffffffffffffull;
839 }
840 return idx;
841 }
842
843
844
845
846 int genwqe_ffdc_buff_size(struct genwqe_dev *cd, int uid)
847 {
848 int entries = 0, ring, traps, traces, trace_entries;
849 u32 eevptr_addr, l_addr, d_len, d_type;
850 u64 eevptr, val, addr;
851
852 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
853 eevptr = __genwqe_readq(cd, eevptr_addr);
854
855 if ((eevptr != 0x0) && (eevptr != -1ull)) {
856 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
857
858 while (1) {
859 val = __genwqe_readq(cd, l_addr);
860
861 if ((val == 0x0) || (val == -1ull))
862 break;
863
864
865 d_len = (val & 0x0000007fff000000ull) >> 24;
866
867
868 d_type = (val & 0x0000008000000000ull) >> 36;
869
870 if (d_type) {
871 entries += d_len;
872 } else {
873 entries += d_len >> 3;
874 }
875
876 l_addr += 8;
877 }
878 }
879
880 for (ring = 0; ring < 8; ring++) {
881 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
882 val = __genwqe_readq(cd, addr);
883
884 if ((val == 0x0ull) || (val == -1ull))
885 continue;
886
887 traps = (val >> 24) & 0xff;
888 traces = (val >> 16) & 0xff;
889 trace_entries = val & 0xffff;
890
891 entries += traps + (traces * trace_entries);
892 }
893 return entries;
894 }
895
896
897
898
899 int genwqe_ffdc_buff_read(struct genwqe_dev *cd, int uid,
900 struct genwqe_reg *regs, unsigned int max_regs)
901 {
902 int i, traps, traces, trace, trace_entries, trace_entry, ring;
903 unsigned int idx = 0;
904 u32 eevptr_addr, l_addr, d_addr, d_len, d_type;
905 u64 eevptr, e, val, addr;
906
907 eevptr_addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_ERROR_POINTER;
908 eevptr = __genwqe_readq(cd, eevptr_addr);
909
910 if ((eevptr != 0x0) && (eevptr != 0xffffffffffffffffull)) {
911 l_addr = GENWQE_UID_OFFS(uid) | eevptr;
912 while (1) {
913 e = __genwqe_readq(cd, l_addr);
914 if ((e == 0x0) || (e == 0xffffffffffffffffull))
915 break;
916
917 d_addr = (e & 0x0000000000ffffffull);
918 d_len = (e & 0x0000007fff000000ull) >> 24;
919 d_type = (e & 0x0000008000000000ull) >> 36;
920 d_addr |= GENWQE_UID_OFFS(uid);
921
922 if (d_type) {
923 for (i = 0; i < (int)d_len; i++) {
924 val = __genwqe_readq(cd, d_addr);
925 set_reg_idx(cd, regs, &idx, max_regs,
926 d_addr, i, val);
927 }
928 } else {
929 d_len >>= 3;
930 for (i = 0; i < (int)d_len; i++, d_addr += 8) {
931 val = __genwqe_readq(cd, d_addr);
932 set_reg_idx(cd, regs, &idx, max_regs,
933 d_addr, 0, val);
934 }
935 }
936 l_addr += 8;
937 }
938 }
939
940
941
942
943
944 for (ring = 0; ring < 8; ring++) {
945
946 addr = GENWQE_UID_OFFS(uid) | IO_EXTENDED_DIAG_MAP(ring);
947 val = __genwqe_readq(cd, addr);
948
949 if ((val == 0x0ull) || (val == -1ull))
950 continue;
951
952 traps = (val >> 24) & 0xff;
953 traces = (val >> 16) & 0xff;
954 trace_entries = val & 0xffff;
955
956
957
958
959 for (trace = 0; trace <= traces; trace++) {
960 u32 diag_sel =
961 GENWQE_EXTENDED_DIAG_SELECTOR(ring, trace);
962
963 addr = (GENWQE_UID_OFFS(uid) |
964 IO_EXTENDED_DIAG_SELECTOR);
965 __genwqe_writeq(cd, addr, diag_sel);
966
967 for (trace_entry = 0;
968 trace_entry < (trace ? trace_entries : traps);
969 trace_entry++) {
970 addr = (GENWQE_UID_OFFS(uid) |
971 IO_EXTENDED_DIAG_READ_MBX);
972 val = __genwqe_readq(cd, addr);
973 set_reg_idx(cd, regs, &idx, max_regs, addr,
974 (diag_sel<<16) | trace_entry, val);
975 }
976 }
977 }
978 return 0;
979 }
980
981
982
983
984
985
986
987 int genwqe_write_vreg(struct genwqe_dev *cd, u32 reg, u64 val, int func)
988 {
989 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
990 __genwqe_writeq(cd, reg, val);
991 return 0;
992 }
993
994
995
996
997
998
999
1000 u64 genwqe_read_vreg(struct genwqe_dev *cd, u32 reg, int func)
1001 {
1002 __genwqe_writeq(cd, IO_PF_SLC_VIRTUAL_WINDOW, func & 0xf);
1003 return __genwqe_readq(cd, reg);
1004 }
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 int genwqe_base_clock_frequency(struct genwqe_dev *cd)
1019 {
1020 u16 speed;
1021 static const int speed_grade[] = { 250, 200, 166, 175 };
1022
1023 speed = (u16)((cd->slu_unitcfg >> 28) & 0x0full);
1024 if (speed >= ARRAY_SIZE(speed_grade))
1025 return 0;
1026
1027 return speed_grade[speed];
1028 }
1029
1030
1031
1032
1033
1034
1035 void genwqe_stop_traps(struct genwqe_dev *cd)
1036 {
1037 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_SET, 0xcull);
1038 }
1039
1040
1041
1042
1043
1044
1045 void genwqe_start_traps(struct genwqe_dev *cd)
1046 {
1047 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG_CLR, 0xcull);
1048
1049 if (genwqe_need_err_masking(cd))
1050 __genwqe_writeq(cd, IO_SLC_MISC_DEBUG, 0x0aull);
1051 }