This source file includes following definitions.
- genwqe_open_files
- genwqe_add_file
- genwqe_del_file
- genwqe_add_pin
- genwqe_del_pin
- genwqe_search_pin
- __genwqe_add_mapping
- __genwqe_del_mapping
- __genwqe_search_mapping
- genwqe_remove_mappings
- genwqe_remove_pinnings
- genwqe_kill_fasync
- genwqe_terminate
- genwqe_open
- genwqe_fasync
- genwqe_release
- genwqe_vma_open
- genwqe_vma_close
- genwqe_mmap
- do_flash_update
- do_flash_read
- genwqe_pin_mem
- genwqe_unpin_mem
- ddcb_cmd_cleanup
- ddcb_cmd_fixups
- genwqe_execute_ddcb
- do_execute_ddcb
- genwqe_ioctl
- genwqe_compat_ioctl
- genwqe_device_initialized
- genwqe_device_create
- genwqe_inform_and_stop_processes
- genwqe_device_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/string.h>
23 #include <linux/fs.h>
24 #include <linux/sched/signal.h>
25 #include <linux/wait.h>
26 #include <linux/delay.h>
27 #include <linux/atomic.h>
28
29 #include "card_base.h"
30 #include "card_ddcb.h"
31
32 static int genwqe_open_files(struct genwqe_dev *cd)
33 {
34 int rc;
35 unsigned long flags;
36
37 spin_lock_irqsave(&cd->file_lock, flags);
38 rc = list_empty(&cd->file_list);
39 spin_unlock_irqrestore(&cd->file_lock, flags);
40 return !rc;
41 }
42
43 static void genwqe_add_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
44 {
45 unsigned long flags;
46
47 cfile->opener = get_pid(task_tgid(current));
48 spin_lock_irqsave(&cd->file_lock, flags);
49 list_add(&cfile->list, &cd->file_list);
50 spin_unlock_irqrestore(&cd->file_lock, flags);
51 }
52
53 static int genwqe_del_file(struct genwqe_dev *cd, struct genwqe_file *cfile)
54 {
55 unsigned long flags;
56
57 spin_lock_irqsave(&cd->file_lock, flags);
58 list_del(&cfile->list);
59 spin_unlock_irqrestore(&cd->file_lock, flags);
60 put_pid(cfile->opener);
61
62 return 0;
63 }
64
65 static void genwqe_add_pin(struct genwqe_file *cfile, struct dma_mapping *m)
66 {
67 unsigned long flags;
68
69 spin_lock_irqsave(&cfile->pin_lock, flags);
70 list_add(&m->pin_list, &cfile->pin_list);
71 spin_unlock_irqrestore(&cfile->pin_lock, flags);
72 }
73
74 static int genwqe_del_pin(struct genwqe_file *cfile, struct dma_mapping *m)
75 {
76 unsigned long flags;
77
78 spin_lock_irqsave(&cfile->pin_lock, flags);
79 list_del(&m->pin_list);
80 spin_unlock_irqrestore(&cfile->pin_lock, flags);
81
82 return 0;
83 }
84
85
86
87
88
89
90
91
92
93
94 static struct dma_mapping *genwqe_search_pin(struct genwqe_file *cfile,
95 unsigned long u_addr,
96 unsigned int size,
97 void **virt_addr)
98 {
99 unsigned long flags;
100 struct dma_mapping *m;
101
102 spin_lock_irqsave(&cfile->pin_lock, flags);
103
104 list_for_each_entry(m, &cfile->pin_list, pin_list) {
105 if ((((u64)m->u_vaddr) <= (u_addr)) &&
106 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
107
108 if (virt_addr)
109 *virt_addr = m->k_vaddr +
110 (u_addr - (u64)m->u_vaddr);
111
112 spin_unlock_irqrestore(&cfile->pin_lock, flags);
113 return m;
114 }
115 }
116 spin_unlock_irqrestore(&cfile->pin_lock, flags);
117 return NULL;
118 }
119
120 static void __genwqe_add_mapping(struct genwqe_file *cfile,
121 struct dma_mapping *dma_map)
122 {
123 unsigned long flags;
124
125 spin_lock_irqsave(&cfile->map_lock, flags);
126 list_add(&dma_map->card_list, &cfile->map_list);
127 spin_unlock_irqrestore(&cfile->map_lock, flags);
128 }
129
130 static void __genwqe_del_mapping(struct genwqe_file *cfile,
131 struct dma_mapping *dma_map)
132 {
133 unsigned long flags;
134
135 spin_lock_irqsave(&cfile->map_lock, flags);
136 list_del(&dma_map->card_list);
137 spin_unlock_irqrestore(&cfile->map_lock, flags);
138 }
139
140
141
142
143
144
145
146
147
148
149 static struct dma_mapping *__genwqe_search_mapping(struct genwqe_file *cfile,
150 unsigned long u_addr,
151 unsigned int size,
152 dma_addr_t *dma_addr,
153 void **virt_addr)
154 {
155 unsigned long flags;
156 struct dma_mapping *m;
157 struct pci_dev *pci_dev = cfile->cd->pci_dev;
158
159 spin_lock_irqsave(&cfile->map_lock, flags);
160 list_for_each_entry(m, &cfile->map_list, card_list) {
161
162 if ((((u64)m->u_vaddr) <= (u_addr)) &&
163 (((u64)m->u_vaddr + m->size) >= (u_addr + size))) {
164
165
166
167 if (dma_addr)
168 *dma_addr = m->dma_addr +
169 (u_addr - (u64)m->u_vaddr);
170
171 if (virt_addr)
172 *virt_addr = m->k_vaddr +
173 (u_addr - (u64)m->u_vaddr);
174
175 spin_unlock_irqrestore(&cfile->map_lock, flags);
176 return m;
177 }
178 }
179 spin_unlock_irqrestore(&cfile->map_lock, flags);
180
181 dev_err(&pci_dev->dev,
182 "[%s] Entry not found: u_addr=%lx, size=%x\n",
183 __func__, u_addr, size);
184
185 return NULL;
186 }
187
188 static void genwqe_remove_mappings(struct genwqe_file *cfile)
189 {
190 int i = 0;
191 struct list_head *node, *next;
192 struct dma_mapping *dma_map;
193 struct genwqe_dev *cd = cfile->cd;
194 struct pci_dev *pci_dev = cfile->cd->pci_dev;
195
196 list_for_each_safe(node, next, &cfile->map_list) {
197 dma_map = list_entry(node, struct dma_mapping, card_list);
198
199 list_del_init(&dma_map->card_list);
200
201
202
203
204
205
206
207
208 dev_err(&pci_dev->dev,
209 "[%s] %d. cleanup mapping: u_vaddr=%p u_kaddr=%016lx dma_addr=%lx\n",
210 __func__, i++, dma_map->u_vaddr,
211 (unsigned long)dma_map->k_vaddr,
212 (unsigned long)dma_map->dma_addr);
213
214 if (dma_map->type == GENWQE_MAPPING_RAW) {
215
216 __genwqe_free_consistent(cd, dma_map->size,
217 dma_map->k_vaddr,
218 dma_map->dma_addr);
219 kfree(dma_map);
220 } else if (dma_map->type == GENWQE_MAPPING_SGL_TEMP) {
221
222 genwqe_user_vunmap(cd, dma_map);
223 }
224 }
225 }
226
227 static void genwqe_remove_pinnings(struct genwqe_file *cfile)
228 {
229 struct list_head *node, *next;
230 struct dma_mapping *dma_map;
231 struct genwqe_dev *cd = cfile->cd;
232
233 list_for_each_safe(node, next, &cfile->pin_list) {
234 dma_map = list_entry(node, struct dma_mapping, pin_list);
235
236
237
238
239
240
241
242
243
244 list_del_init(&dma_map->pin_list);
245 genwqe_user_vunmap(cd, dma_map);
246 kfree(dma_map);
247 }
248 }
249
250
251
252
253
254
255 static int genwqe_kill_fasync(struct genwqe_dev *cd, int sig)
256 {
257 unsigned int files = 0;
258 unsigned long flags;
259 struct genwqe_file *cfile;
260
261 spin_lock_irqsave(&cd->file_lock, flags);
262 list_for_each_entry(cfile, &cd->file_list, list) {
263 if (cfile->async_queue)
264 kill_fasync(&cfile->async_queue, sig, POLL_HUP);
265 files++;
266 }
267 spin_unlock_irqrestore(&cd->file_lock, flags);
268 return files;
269 }
270
271 static int genwqe_terminate(struct genwqe_dev *cd)
272 {
273 unsigned int files = 0;
274 unsigned long flags;
275 struct genwqe_file *cfile;
276
277 spin_lock_irqsave(&cd->file_lock, flags);
278 list_for_each_entry(cfile, &cd->file_list, list) {
279 kill_pid(cfile->opener, SIGKILL, 1);
280 files++;
281 }
282 spin_unlock_irqrestore(&cd->file_lock, flags);
283 return files;
284 }
285
286
287
288
289
290
291
292
293
294
295
296 static int genwqe_open(struct inode *inode, struct file *filp)
297 {
298 struct genwqe_dev *cd;
299 struct genwqe_file *cfile;
300
301 cfile = kzalloc(sizeof(*cfile), GFP_KERNEL);
302 if (cfile == NULL)
303 return -ENOMEM;
304
305 cd = container_of(inode->i_cdev, struct genwqe_dev, cdev_genwqe);
306 cfile->cd = cd;
307 cfile->filp = filp;
308 cfile->client = NULL;
309
310 spin_lock_init(&cfile->map_lock);
311 INIT_LIST_HEAD(&cfile->map_list);
312
313 spin_lock_init(&cfile->pin_lock);
314 INIT_LIST_HEAD(&cfile->pin_list);
315
316 filp->private_data = cfile;
317
318 genwqe_add_file(cd, cfile);
319 return 0;
320 }
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337 static int genwqe_fasync(int fd, struct file *filp, int mode)
338 {
339 struct genwqe_file *cdev = (struct genwqe_file *)filp->private_data;
340
341 return fasync_helper(fd, filp, mode, &cdev->async_queue);
342 }
343
344
345
346
347
348
349
350
351
352
353
354 static int genwqe_release(struct inode *inode, struct file *filp)
355 {
356 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
357 struct genwqe_dev *cd = cfile->cd;
358
359
360 genwqe_remove_mappings(cfile);
361 genwqe_remove_pinnings(cfile);
362
363
364 genwqe_fasync(-1, filp, 0);
365
366
367
368
369
370
371 genwqe_del_file(cd, cfile);
372 kfree(cfile);
373 return 0;
374 }
375
376 static void genwqe_vma_open(struct vm_area_struct *vma)
377 {
378
379 }
380
381
382
383
384
385
386 static void genwqe_vma_close(struct vm_area_struct *vma)
387 {
388 unsigned long vsize = vma->vm_end - vma->vm_start;
389 struct inode *inode = file_inode(vma->vm_file);
390 struct dma_mapping *dma_map;
391 struct genwqe_dev *cd = container_of(inode->i_cdev, struct genwqe_dev,
392 cdev_genwqe);
393 struct pci_dev *pci_dev = cd->pci_dev;
394 dma_addr_t d_addr = 0;
395 struct genwqe_file *cfile = vma->vm_private_data;
396
397 dma_map = __genwqe_search_mapping(cfile, vma->vm_start, vsize,
398 &d_addr, NULL);
399 if (dma_map == NULL) {
400 dev_err(&pci_dev->dev,
401 " [%s] err: mapping not found: v=%lx, p=%lx s=%lx\n",
402 __func__, vma->vm_start, vma->vm_pgoff << PAGE_SHIFT,
403 vsize);
404 return;
405 }
406 __genwqe_del_mapping(cfile, dma_map);
407 __genwqe_free_consistent(cd, dma_map->size, dma_map->k_vaddr,
408 dma_map->dma_addr);
409 kfree(dma_map);
410 }
411
412 static const struct vm_operations_struct genwqe_vma_ops = {
413 .open = genwqe_vma_open,
414 .close = genwqe_vma_close,
415 };
416
417
418
419
420
421
422
423
424
425
426
427
428
429 static int genwqe_mmap(struct file *filp, struct vm_area_struct *vma)
430 {
431 int rc;
432 unsigned long pfn, vsize = vma->vm_end - vma->vm_start;
433 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
434 struct genwqe_dev *cd = cfile->cd;
435 struct dma_mapping *dma_map;
436
437 if (vsize == 0)
438 return -EINVAL;
439
440 if (get_order(vsize) > MAX_ORDER)
441 return -ENOMEM;
442
443 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
444 if (dma_map == NULL)
445 return -ENOMEM;
446
447 genwqe_mapping_init(dma_map, GENWQE_MAPPING_RAW);
448 dma_map->u_vaddr = (void *)vma->vm_start;
449 dma_map->size = vsize;
450 dma_map->nr_pages = DIV_ROUND_UP(vsize, PAGE_SIZE);
451 dma_map->k_vaddr = __genwqe_alloc_consistent(cd, vsize,
452 &dma_map->dma_addr);
453 if (dma_map->k_vaddr == NULL) {
454 rc = -ENOMEM;
455 goto free_dma_map;
456 }
457
458 if (capable(CAP_SYS_ADMIN) && (vsize > sizeof(dma_addr_t)))
459 *(dma_addr_t *)dma_map->k_vaddr = dma_map->dma_addr;
460
461 pfn = virt_to_phys(dma_map->k_vaddr) >> PAGE_SHIFT;
462 rc = remap_pfn_range(vma,
463 vma->vm_start,
464 pfn,
465 vsize,
466 vma->vm_page_prot);
467 if (rc != 0) {
468 rc = -EFAULT;
469 goto free_dma_mem;
470 }
471
472 vma->vm_private_data = cfile;
473 vma->vm_ops = &genwqe_vma_ops;
474 __genwqe_add_mapping(cfile, dma_map);
475
476 return 0;
477
478 free_dma_mem:
479 __genwqe_free_consistent(cd, dma_map->size,
480 dma_map->k_vaddr,
481 dma_map->dma_addr);
482 free_dma_map:
483 kfree(dma_map);
484 return rc;
485 }
486
487
488
489
490
491
492
493
494
495 #define FLASH_BLOCK 0x40000
496
497 static int do_flash_update(struct genwqe_file *cfile,
498 struct genwqe_bitstream *load)
499 {
500 int rc = 0;
501 int blocks_to_flash;
502 dma_addr_t dma_addr;
503 u64 flash = 0;
504 size_t tocopy = 0;
505 u8 __user *buf;
506 u8 *xbuf;
507 u32 crc;
508 u8 cmdopts;
509 struct genwqe_dev *cd = cfile->cd;
510 struct file *filp = cfile->filp;
511 struct pci_dev *pci_dev = cd->pci_dev;
512
513 if ((load->size & 0x3) != 0)
514 return -EINVAL;
515
516 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
517 return -EINVAL;
518
519
520 switch ((char)load->partition) {
521 case '0':
522 cmdopts = 0x14;
523 break;
524 case '1':
525 cmdopts = 0x1C;
526 break;
527 case 'v':
528 cmdopts = 0x0C;
529 break;
530 default:
531 return -EINVAL;
532 }
533
534 buf = (u8 __user *)load->data_addr;
535 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
536 if (xbuf == NULL)
537 return -ENOMEM;
538
539 blocks_to_flash = load->size / FLASH_BLOCK;
540 while (load->size) {
541 struct genwqe_ddcb_cmd *req;
542
543
544
545
546
547 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
548
549 rc = copy_from_user(xbuf, buf, tocopy);
550 if (rc) {
551 rc = -EFAULT;
552 goto free_buffer;
553 }
554 crc = genwqe_crc32(xbuf, tocopy, 0xffffffff);
555
556 dev_dbg(&pci_dev->dev,
557 "[%s] DMA: %lx CRC: %08x SZ: %ld %d\n",
558 __func__, (unsigned long)dma_addr, crc, tocopy,
559 blocks_to_flash);
560
561
562 req = ddcb_requ_alloc();
563 if (req == NULL) {
564 rc = -ENOMEM;
565 goto free_buffer;
566 }
567
568 req->cmd = SLCMD_MOVE_FLASH;
569 req->cmdopts = cmdopts;
570
571
572 if (genwqe_get_slu_id(cd) <= 0x2) {
573 *(__be64 *)&req->__asiv[0] = cpu_to_be64(dma_addr);
574 *(__be64 *)&req->__asiv[8] = cpu_to_be64(tocopy);
575 *(__be64 *)&req->__asiv[16] = cpu_to_be64(flash);
576 *(__be32 *)&req->__asiv[24] = cpu_to_be32(0);
577 req->__asiv[24] = load->uid;
578 *(__be32 *)&req->__asiv[28] = cpu_to_be32(crc);
579
580
581 *(__be64 *)&req->__asiv[88] = cpu_to_be64(load->slu_id);
582 *(__be64 *)&req->__asiv[96] = cpu_to_be64(load->app_id);
583 req->asiv_length = 32;
584 } else {
585 *(__be64 *)&req->asiv[0] = cpu_to_be64(dma_addr);
586 *(__be32 *)&req->asiv[8] = cpu_to_be32(tocopy);
587 *(__be32 *)&req->asiv[12] = cpu_to_be32(0);
588 *(__be64 *)&req->asiv[16] = cpu_to_be64(flash);
589 *(__be32 *)&req->asiv[24] = cpu_to_be32(load->uid<<24);
590 *(__be32 *)&req->asiv[28] = cpu_to_be32(crc);
591
592
593 *(__be64 *)&req->asiv[80] = cpu_to_be64(load->slu_id);
594 *(__be64 *)&req->asiv[88] = cpu_to_be64(load->app_id);
595
596
597 req->ats = 0x4ULL << 44;
598 req->asiv_length = 40;
599 }
600 req->asv_length = 8;
601
602
603 *(u64 *)&req->asv[0] = 0ULL;
604
605 rc = __genwqe_execute_raw_ddcb(cd, req, filp->f_flags);
606
607 load->retc = req->retc;
608 load->attn = req->attn;
609 load->progress = req->progress;
610
611 if (rc < 0) {
612 ddcb_requ_free(req);
613 goto free_buffer;
614 }
615
616 if (req->retc != DDCB_RETC_COMPLETE) {
617 rc = -EIO;
618 ddcb_requ_free(req);
619 goto free_buffer;
620 }
621
622 load->size -= tocopy;
623 flash += tocopy;
624 buf += tocopy;
625 blocks_to_flash--;
626 ddcb_requ_free(req);
627 }
628
629 free_buffer:
630 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
631 return rc;
632 }
633
634 static int do_flash_read(struct genwqe_file *cfile,
635 struct genwqe_bitstream *load)
636 {
637 int rc, blocks_to_flash;
638 dma_addr_t dma_addr;
639 u64 flash = 0;
640 size_t tocopy = 0;
641 u8 __user *buf;
642 u8 *xbuf;
643 u8 cmdopts;
644 struct genwqe_dev *cd = cfile->cd;
645 struct file *filp = cfile->filp;
646 struct pci_dev *pci_dev = cd->pci_dev;
647 struct genwqe_ddcb_cmd *cmd;
648
649 if ((load->size & 0x3) != 0)
650 return -EINVAL;
651
652 if (((unsigned long)(load->data_addr) & ~PAGE_MASK) != 0)
653 return -EINVAL;
654
655
656 switch ((char)load->partition) {
657 case '0':
658 cmdopts = 0x12;
659 break;
660 case '1':
661 cmdopts = 0x1A;
662 break;
663 case 'v':
664 cmdopts = 0x0A;
665 break;
666 default:
667 return -EINVAL;
668 }
669
670 buf = (u8 __user *)load->data_addr;
671 xbuf = __genwqe_alloc_consistent(cd, FLASH_BLOCK, &dma_addr);
672 if (xbuf == NULL)
673 return -ENOMEM;
674
675 blocks_to_flash = load->size / FLASH_BLOCK;
676 while (load->size) {
677
678
679
680
681 tocopy = min_t(size_t, load->size, FLASH_BLOCK);
682
683 dev_dbg(&pci_dev->dev,
684 "[%s] DMA: %lx SZ: %ld %d\n",
685 __func__, (unsigned long)dma_addr, tocopy,
686 blocks_to_flash);
687
688
689 cmd = ddcb_requ_alloc();
690 if (cmd == NULL) {
691 rc = -ENOMEM;
692 goto free_buffer;
693 }
694 cmd->cmd = SLCMD_MOVE_FLASH;
695 cmd->cmdopts = cmdopts;
696
697
698 if (genwqe_get_slu_id(cd) <= 0x2) {
699 *(__be64 *)&cmd->__asiv[0] = cpu_to_be64(dma_addr);
700 *(__be64 *)&cmd->__asiv[8] = cpu_to_be64(tocopy);
701 *(__be64 *)&cmd->__asiv[16] = cpu_to_be64(flash);
702 *(__be32 *)&cmd->__asiv[24] = cpu_to_be32(0);
703 cmd->__asiv[24] = load->uid;
704 *(__be32 *)&cmd->__asiv[28] = cpu_to_be32(0) ;
705 cmd->asiv_length = 32;
706 } else {
707 *(__be64 *)&cmd->asiv[0] = cpu_to_be64(dma_addr);
708 *(__be32 *)&cmd->asiv[8] = cpu_to_be32(tocopy);
709 *(__be32 *)&cmd->asiv[12] = cpu_to_be32(0);
710 *(__be64 *)&cmd->asiv[16] = cpu_to_be64(flash);
711 *(__be32 *)&cmd->asiv[24] = cpu_to_be32(load->uid<<24);
712 *(__be32 *)&cmd->asiv[28] = cpu_to_be32(0);
713
714
715 cmd->ats = 0x5ULL << 44;
716 cmd->asiv_length = 40;
717 }
718 cmd->asv_length = 8;
719
720
721 *(u64 *)&cmd->asv[0] = 0ULL;
722
723 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
724
725 load->retc = cmd->retc;
726 load->attn = cmd->attn;
727 load->progress = cmd->progress;
728
729 if ((rc < 0) && (rc != -EBADMSG)) {
730 ddcb_requ_free(cmd);
731 goto free_buffer;
732 }
733
734 rc = copy_to_user(buf, xbuf, tocopy);
735 if (rc) {
736 rc = -EFAULT;
737 ddcb_requ_free(cmd);
738 goto free_buffer;
739 }
740
741
742 if (((cmd->retc == DDCB_RETC_FAULT) &&
743 (cmd->attn != 0x02)) ||
744 ((cmd->retc == DDCB_RETC_COMPLETE) &&
745 (cmd->attn != 0x00))) {
746 rc = -EIO;
747 ddcb_requ_free(cmd);
748 goto free_buffer;
749 }
750
751 load->size -= tocopy;
752 flash += tocopy;
753 buf += tocopy;
754 blocks_to_flash--;
755 ddcb_requ_free(cmd);
756 }
757 rc = 0;
758
759 free_buffer:
760 __genwqe_free_consistent(cd, FLASH_BLOCK, xbuf, dma_addr);
761 return rc;
762 }
763
764 static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
765 {
766 int rc;
767 struct genwqe_dev *cd = cfile->cd;
768 struct pci_dev *pci_dev = cfile->cd->pci_dev;
769 struct dma_mapping *dma_map;
770 unsigned long map_addr;
771 unsigned long map_size;
772
773 if ((m->addr == 0x0) || (m->size == 0))
774 return -EINVAL;
775 if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
776 return -EINVAL;
777
778 map_addr = (m->addr & PAGE_MASK);
779 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
780
781 dma_map = kzalloc(sizeof(struct dma_mapping), GFP_KERNEL);
782 if (dma_map == NULL)
783 return -ENOMEM;
784
785 genwqe_mapping_init(dma_map, GENWQE_MAPPING_SGL_PINNED);
786 rc = genwqe_user_vmap(cd, dma_map, (void *)map_addr, map_size);
787 if (rc != 0) {
788 dev_err(&pci_dev->dev,
789 "[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
790 kfree(dma_map);
791 return rc;
792 }
793
794 genwqe_add_pin(cfile, dma_map);
795 return 0;
796 }
797
798 static int genwqe_unpin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
799 {
800 struct genwqe_dev *cd = cfile->cd;
801 struct dma_mapping *dma_map;
802 unsigned long map_addr;
803 unsigned long map_size;
804
805 if (m->addr == 0x0)
806 return -EINVAL;
807
808 map_addr = (m->addr & PAGE_MASK);
809 map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
810
811 dma_map = genwqe_search_pin(cfile, map_addr, map_size, NULL);
812 if (dma_map == NULL)
813 return -ENOENT;
814
815 genwqe_del_pin(cfile, dma_map);
816 genwqe_user_vunmap(cd, dma_map);
817 kfree(dma_map);
818 return 0;
819 }
820
821
822
823
824
825
826 static int ddcb_cmd_cleanup(struct genwqe_file *cfile, struct ddcb_requ *req)
827 {
828 unsigned int i;
829 struct dma_mapping *dma_map;
830 struct genwqe_dev *cd = cfile->cd;
831
832 for (i = 0; i < DDCB_FIXUPS; i++) {
833 dma_map = &req->dma_mappings[i];
834
835 if (dma_mapping_used(dma_map)) {
836 __genwqe_del_mapping(cfile, dma_map);
837 genwqe_user_vunmap(cd, dma_map);
838 }
839 if (req->sgls[i].sgl != NULL)
840 genwqe_free_sync_sgl(cd, &req->sgls[i]);
841 }
842 return 0;
843 }
844
845
846
847
848
849
850
851
852
853 static int ddcb_cmd_fixups(struct genwqe_file *cfile, struct ddcb_requ *req)
854 {
855 int rc;
856 unsigned int asiv_offs, i;
857 struct genwqe_dev *cd = cfile->cd;
858 struct genwqe_ddcb_cmd *cmd = &req->cmd;
859 struct dma_mapping *m;
860
861 for (i = 0, asiv_offs = 0x00; asiv_offs <= 0x58;
862 i++, asiv_offs += 0x08) {
863
864 u64 u_addr;
865 dma_addr_t d_addr;
866 u32 u_size = 0;
867 u64 ats_flags;
868
869 ats_flags = ATS_GET_FLAGS(cmd->ats, asiv_offs);
870
871 switch (ats_flags) {
872
873 case ATS_TYPE_DATA:
874 break;
875
876 case ATS_TYPE_FLAT_RDWR:
877 case ATS_TYPE_FLAT_RD: {
878 u_addr = be64_to_cpu(*((__be64 *)&cmd->
879 asiv[asiv_offs]));
880 u_size = be32_to_cpu(*((__be32 *)&cmd->
881 asiv[asiv_offs + 0x08]));
882
883
884
885
886
887
888 if (u_size == 0x0) {
889 *((__be64 *)&cmd->asiv[asiv_offs]) =
890 cpu_to_be64(0x0);
891 break;
892 }
893
894 m = __genwqe_search_mapping(cfile, u_addr, u_size,
895 &d_addr, NULL);
896 if (m == NULL) {
897 rc = -EFAULT;
898 goto err_out;
899 }
900
901 *((__be64 *)&cmd->asiv[asiv_offs]) =
902 cpu_to_be64(d_addr);
903 break;
904 }
905
906 case ATS_TYPE_SGL_RDWR:
907 case ATS_TYPE_SGL_RD: {
908 int page_offs;
909
910 u_addr = be64_to_cpu(*((__be64 *)
911 &cmd->asiv[asiv_offs]));
912 u_size = be32_to_cpu(*((__be32 *)
913 &cmd->asiv[asiv_offs + 0x08]));
914
915
916
917
918
919
920 if (u_size == 0x0) {
921 *((__be64 *)&cmd->asiv[asiv_offs]) =
922 cpu_to_be64(0x0);
923 break;
924 }
925
926 m = genwqe_search_pin(cfile, u_addr, u_size, NULL);
927 if (m != NULL) {
928 page_offs = (u_addr -
929 (u64)m->u_vaddr)/PAGE_SIZE;
930 } else {
931 m = &req->dma_mappings[i];
932
933 genwqe_mapping_init(m,
934 GENWQE_MAPPING_SGL_TEMP);
935
936 if (ats_flags == ATS_TYPE_SGL_RD)
937 m->write = 0;
938
939 rc = genwqe_user_vmap(cd, m, (void *)u_addr,
940 u_size);
941 if (rc != 0)
942 goto err_out;
943
944 __genwqe_add_mapping(cfile, m);
945 page_offs = 0;
946 }
947
948
949 rc = genwqe_alloc_sync_sgl(cd, &req->sgls[i],
950 (void __user *)u_addr,
951 u_size, m->write);
952 if (rc != 0)
953 goto err_out;
954
955 genwqe_setup_sgl(cd, &req->sgls[i],
956 &m->dma_list[page_offs]);
957
958 *((__be64 *)&cmd->asiv[asiv_offs]) =
959 cpu_to_be64(req->sgls[i].sgl_dma_addr);
960
961 break;
962 }
963 default:
964 rc = -EINVAL;
965 goto err_out;
966 }
967 }
968 return 0;
969
970 err_out:
971 ddcb_cmd_cleanup(cfile, req);
972 return rc;
973 }
974
975
976
977
978
979
980
981
982 static int genwqe_execute_ddcb(struct genwqe_file *cfile,
983 struct genwqe_ddcb_cmd *cmd)
984 {
985 int rc;
986 struct genwqe_dev *cd = cfile->cd;
987 struct file *filp = cfile->filp;
988 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd);
989
990 rc = ddcb_cmd_fixups(cfile, req);
991 if (rc != 0)
992 return rc;
993
994 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
995 ddcb_cmd_cleanup(cfile, req);
996 return rc;
997 }
998
999 static int do_execute_ddcb(struct genwqe_file *cfile,
1000 unsigned long arg, int raw)
1001 {
1002 int rc;
1003 struct genwqe_ddcb_cmd *cmd;
1004 struct genwqe_dev *cd = cfile->cd;
1005 struct file *filp = cfile->filp;
1006
1007 cmd = ddcb_requ_alloc();
1008 if (cmd == NULL)
1009 return -ENOMEM;
1010
1011 if (copy_from_user(cmd, (void __user *)arg, sizeof(*cmd))) {
1012 ddcb_requ_free(cmd);
1013 return -EFAULT;
1014 }
1015
1016 if (!raw)
1017 rc = genwqe_execute_ddcb(cfile, cmd);
1018 else
1019 rc = __genwqe_execute_raw_ddcb(cd, cmd, filp->f_flags);
1020
1021
1022
1023 if (copy_to_user((void __user *)arg, cmd,
1024 sizeof(*cmd) - DDCB_ASIV_LENGTH)) {
1025 ddcb_requ_free(cmd);
1026 return -EFAULT;
1027 }
1028
1029 ddcb_requ_free(cmd);
1030 return rc;
1031 }
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041 static long genwqe_ioctl(struct file *filp, unsigned int cmd,
1042 unsigned long arg)
1043 {
1044 int rc = 0;
1045 struct genwqe_file *cfile = (struct genwqe_file *)filp->private_data;
1046 struct genwqe_dev *cd = cfile->cd;
1047 struct pci_dev *pci_dev = cd->pci_dev;
1048 struct genwqe_reg_io __user *io;
1049 u64 val;
1050 u32 reg_offs;
1051
1052
1053 if (pci_channel_offline(pci_dev))
1054 return -EIO;
1055
1056 if (_IOC_TYPE(cmd) != GENWQE_IOC_CODE)
1057 return -EINVAL;
1058
1059 switch (cmd) {
1060
1061 case GENWQE_GET_CARD_STATE:
1062 put_user(cd->card_state, (enum genwqe_card_state __user *)arg);
1063 return 0;
1064
1065
1066 case GENWQE_READ_REG64: {
1067 io = (struct genwqe_reg_io __user *)arg;
1068
1069 if (get_user(reg_offs, &io->num))
1070 return -EFAULT;
1071
1072 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1073 return -EINVAL;
1074
1075 val = __genwqe_readq(cd, reg_offs);
1076 put_user(val, &io->val64);
1077 return 0;
1078 }
1079
1080 case GENWQE_WRITE_REG64: {
1081 io = (struct genwqe_reg_io __user *)arg;
1082
1083 if (!capable(CAP_SYS_ADMIN))
1084 return -EPERM;
1085
1086 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1087 return -EPERM;
1088
1089 if (get_user(reg_offs, &io->num))
1090 return -EFAULT;
1091
1092 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x7))
1093 return -EINVAL;
1094
1095 if (get_user(val, &io->val64))
1096 return -EFAULT;
1097
1098 __genwqe_writeq(cd, reg_offs, val);
1099 return 0;
1100 }
1101
1102 case GENWQE_READ_REG32: {
1103 io = (struct genwqe_reg_io __user *)arg;
1104
1105 if (get_user(reg_offs, &io->num))
1106 return -EFAULT;
1107
1108 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1109 return -EINVAL;
1110
1111 val = __genwqe_readl(cd, reg_offs);
1112 put_user(val, &io->val64);
1113 return 0;
1114 }
1115
1116 case GENWQE_WRITE_REG32: {
1117 io = (struct genwqe_reg_io __user *)arg;
1118
1119 if (!capable(CAP_SYS_ADMIN))
1120 return -EPERM;
1121
1122 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1123 return -EPERM;
1124
1125 if (get_user(reg_offs, &io->num))
1126 return -EFAULT;
1127
1128 if ((reg_offs >= cd->mmio_len) || (reg_offs & 0x3))
1129 return -EINVAL;
1130
1131 if (get_user(val, &io->val64))
1132 return -EFAULT;
1133
1134 __genwqe_writel(cd, reg_offs, val);
1135 return 0;
1136 }
1137
1138
1139 case GENWQE_SLU_UPDATE: {
1140 struct genwqe_bitstream load;
1141
1142 if (!genwqe_is_privileged(cd))
1143 return -EPERM;
1144
1145 if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
1146 return -EPERM;
1147
1148 if (copy_from_user(&load, (void __user *)arg,
1149 sizeof(load)))
1150 return -EFAULT;
1151
1152 rc = do_flash_update(cfile, &load);
1153
1154 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1155 return -EFAULT;
1156
1157 return rc;
1158 }
1159
1160 case GENWQE_SLU_READ: {
1161 struct genwqe_bitstream load;
1162
1163 if (!genwqe_is_privileged(cd))
1164 return -EPERM;
1165
1166 if (genwqe_flash_readback_fails(cd))
1167 return -ENOSPC;
1168
1169 if (copy_from_user(&load, (void __user *)arg, sizeof(load)))
1170 return -EFAULT;
1171
1172 rc = do_flash_read(cfile, &load);
1173
1174 if (copy_to_user((void __user *)arg, &load, sizeof(load)))
1175 return -EFAULT;
1176
1177 return rc;
1178 }
1179
1180
1181 case GENWQE_PIN_MEM: {
1182 struct genwqe_mem m;
1183
1184 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1185 return -EFAULT;
1186
1187 return genwqe_pin_mem(cfile, &m);
1188 }
1189
1190 case GENWQE_UNPIN_MEM: {
1191 struct genwqe_mem m;
1192
1193 if (copy_from_user(&m, (void __user *)arg, sizeof(m)))
1194 return -EFAULT;
1195
1196 return genwqe_unpin_mem(cfile, &m);
1197 }
1198
1199
1200 case GENWQE_EXECUTE_DDCB:
1201 return do_execute_ddcb(cfile, arg, 0);
1202
1203 case GENWQE_EXECUTE_RAW_DDCB: {
1204
1205 if (!capable(CAP_SYS_ADMIN))
1206 return -EPERM;
1207
1208 return do_execute_ddcb(cfile, arg, 1);
1209 }
1210
1211 default:
1212 return -EINVAL;
1213 }
1214
1215 return rc;
1216 }
1217
1218 #if defined(CONFIG_COMPAT)
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 static long genwqe_compat_ioctl(struct file *filp, unsigned int cmd,
1231 unsigned long arg)
1232 {
1233 return genwqe_ioctl(filp, cmd, arg);
1234 }
1235 #endif
1236
1237 static const struct file_operations genwqe_fops = {
1238 .owner = THIS_MODULE,
1239 .open = genwqe_open,
1240 .fasync = genwqe_fasync,
1241 .mmap = genwqe_mmap,
1242 .unlocked_ioctl = genwqe_ioctl,
1243 #if defined(CONFIG_COMPAT)
1244 .compat_ioctl = genwqe_compat_ioctl,
1245 #endif
1246 .release = genwqe_release,
1247 };
1248
1249 static int genwqe_device_initialized(struct genwqe_dev *cd)
1250 {
1251 return cd->dev != NULL;
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262 int genwqe_device_create(struct genwqe_dev *cd)
1263 {
1264 int rc;
1265 struct pci_dev *pci_dev = cd->pci_dev;
1266
1267
1268
1269
1270
1271
1272
1273 rc = alloc_chrdev_region(&cd->devnum_genwqe, 0,
1274 GENWQE_MAX_MINOR, GENWQE_DEVNAME);
1275 if (rc < 0) {
1276 dev_err(&pci_dev->dev, "err: alloc_chrdev_region failed\n");
1277 goto err_dev;
1278 }
1279
1280 cdev_init(&cd->cdev_genwqe, &genwqe_fops);
1281 cd->cdev_genwqe.owner = THIS_MODULE;
1282
1283 rc = cdev_add(&cd->cdev_genwqe, cd->devnum_genwqe, 1);
1284 if (rc < 0) {
1285 dev_err(&pci_dev->dev, "err: cdev_add failed\n");
1286 goto err_add;
1287 }
1288
1289
1290
1291
1292
1293 cd->dev = device_create_with_groups(cd->class_genwqe,
1294 &cd->pci_dev->dev,
1295 cd->devnum_genwqe, cd,
1296 genwqe_attribute_groups,
1297 GENWQE_DEVNAME "%u_card",
1298 cd->card_idx);
1299 if (IS_ERR(cd->dev)) {
1300 rc = PTR_ERR(cd->dev);
1301 goto err_cdev;
1302 }
1303
1304 genwqe_init_debugfs(cd);
1305
1306 return 0;
1307
1308 err_cdev:
1309 cdev_del(&cd->cdev_genwqe);
1310 err_add:
1311 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1312 err_dev:
1313 cd->dev = NULL;
1314 return rc;
1315 }
1316
1317 static int genwqe_inform_and_stop_processes(struct genwqe_dev *cd)
1318 {
1319 int rc;
1320 unsigned int i;
1321 struct pci_dev *pci_dev = cd->pci_dev;
1322
1323 if (!genwqe_open_files(cd))
1324 return 0;
1325
1326 dev_warn(&pci_dev->dev, "[%s] send SIGIO and wait ...\n", __func__);
1327
1328 rc = genwqe_kill_fasync(cd, SIGIO);
1329 if (rc > 0) {
1330
1331 for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1332 genwqe_open_files(cd); i++) {
1333 dev_info(&pci_dev->dev, " %d sec ...", i);
1334
1335 cond_resched();
1336 msleep(1000);
1337 }
1338
1339
1340 if (!genwqe_open_files(cd))
1341 return 0;
1342
1343 dev_warn(&pci_dev->dev,
1344 "[%s] send SIGKILL and wait ...\n", __func__);
1345
1346 rc = genwqe_terminate(cd);
1347 if (rc) {
1348
1349 for (i = 0; (i < GENWQE_KILL_TIMEOUT) &&
1350 genwqe_open_files(cd); i++) {
1351 dev_warn(&pci_dev->dev, " %d sec ...", i);
1352
1353 cond_resched();
1354 msleep(1000);
1355 }
1356 }
1357 }
1358 return 0;
1359 }
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370 int genwqe_device_remove(struct genwqe_dev *cd)
1371 {
1372 int rc;
1373 struct pci_dev *pci_dev = cd->pci_dev;
1374
1375 if (!genwqe_device_initialized(cd))
1376 return 1;
1377
1378 genwqe_inform_and_stop_processes(cd);
1379
1380
1381
1382
1383
1384
1385
1386 rc = kref_read(&cd->cdev_genwqe.kobj.kref);
1387 if (rc != 1) {
1388 dev_err(&pci_dev->dev,
1389 "[%s] err: cdev_genwqe...refcount=%d\n", __func__, rc);
1390 panic("Fatal err: cannot free resources with pending references!");
1391 }
1392
1393 genqwe_exit_debugfs(cd);
1394 device_destroy(cd->class_genwqe, cd->devnum_genwqe);
1395 cdev_del(&cd->cdev_genwqe);
1396 unregister_chrdev_region(cd->devnum_genwqe, GENWQE_MAX_MINOR);
1397 cd->dev = NULL;
1398
1399 return 0;
1400 }