This source file includes following definitions.
- mraid_mm_open
- mraid_mm_ioctl
- mraid_mm_unlocked_ioctl
- mraid_mm_get_adapter
- handle_drvrcmd
- mimd_to_kioc
- mraid_mm_attach_buf
- mraid_mm_alloc_kioc
- mraid_mm_dealloc_kioc
- lld_ioctl
- ioctl_done
- lld_timedout
- kioc_to_mimd
- hinfo_to_cinfo
- mraid_mm_register_adp
- mraid_mm_adapter_app_handle
- mraid_mm_setup_dma_pools
- mraid_mm_unregister_adp
- mraid_mm_free_adp_resources
- mraid_mm_teardown_dma_pools
- mraid_mm_init
- mraid_mm_compat_ioctl
- mraid_mm_exit
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/mutex.h>
16 #include "megaraid_mm.h"
17
18
19
20 static DEFINE_MUTEX(mraid_mm_mutex);
21 static int mraid_mm_open(struct inode *, struct file *);
22 static long mraid_mm_unlocked_ioctl(struct file *, uint, unsigned long);
23
24
25
26 static int mimd_to_kioc(mimd_t __user *, mraid_mmadp_t *, uioc_t *);
27 static int kioc_to_mimd(uioc_t *, mimd_t __user *);
28
29
30
31 static int handle_drvrcmd(void __user *, uint8_t, int *);
32 static int lld_ioctl(mraid_mmadp_t *, uioc_t *);
33 static void ioctl_done(uioc_t *);
34 static void lld_timedout(struct timer_list *);
35 static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *);
36 static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *);
37 static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *);
38 static void mraid_mm_dealloc_kioc(mraid_mmadp_t *, uioc_t *);
39 static int mraid_mm_attach_buf(mraid_mmadp_t *, uioc_t *, int);
40 static int mraid_mm_setup_dma_pools(mraid_mmadp_t *);
41 static void mraid_mm_free_adp_resources(mraid_mmadp_t *);
42 static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *);
43
44 #ifdef CONFIG_COMPAT
45 static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long);
46 #endif
47
48 MODULE_AUTHOR("LSI Logic Corporation");
49 MODULE_DESCRIPTION("LSI Logic Management Module");
50 MODULE_LICENSE("GPL");
51 MODULE_VERSION(LSI_COMMON_MOD_VERSION);
52
53 static int dbglevel = CL_ANN;
54 module_param_named(dlevel, dbglevel, int, 0);
55 MODULE_PARM_DESC(dlevel, "Debug level (default=0)");
56
57 EXPORT_SYMBOL(mraid_mm_register_adp);
58 EXPORT_SYMBOL(mraid_mm_unregister_adp);
59 EXPORT_SYMBOL(mraid_mm_adapter_app_handle);
60
61 static uint32_t drvr_ver = 0x02200207;
62
63 static int adapters_count_g;
64 static struct list_head adapters_list_g;
65
66 static wait_queue_head_t wait_q;
67
68 static const struct file_operations lsi_fops = {
69 .open = mraid_mm_open,
70 .unlocked_ioctl = mraid_mm_unlocked_ioctl,
71 #ifdef CONFIG_COMPAT
72 .compat_ioctl = mraid_mm_compat_ioctl,
73 #endif
74 .owner = THIS_MODULE,
75 .llseek = noop_llseek,
76 };
77
78 static struct miscdevice megaraid_mm_dev = {
79 .minor = MISC_DYNAMIC_MINOR,
80 .name = "megadev0",
81 .fops = &lsi_fops,
82 };
83
84
85
86
87
88
89
90
91 static int
92 mraid_mm_open(struct inode *inode, struct file *filep)
93 {
94
95
96
97 if (!capable(CAP_SYS_ADMIN)) return (-EACCES);
98
99 return 0;
100 }
101
102
103
104
105
106
107
108
109 static int
110 mraid_mm_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
111 {
112 uioc_t *kioc;
113 char signature[EXT_IOCTL_SIGN_SZ] = {0};
114 int rval;
115 mraid_mmadp_t *adp;
116 uint8_t old_ioctl;
117 int drvrcmd_rval;
118 void __user *argp = (void __user *)arg;
119
120
121
122
123
124
125 if ((_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD)) {
126 return (-EINVAL);
127 }
128
129
130
131
132 if (copy_from_user(signature, argp, EXT_IOCTL_SIGN_SZ)) {
133 con_log(CL_ANN, (KERN_WARNING
134 "megaraid cmm: copy from usr addr failed\n"));
135 return (-EFAULT);
136 }
137
138 if (memcmp(signature, EXT_IOCTL_SIGN, EXT_IOCTL_SIGN_SZ) == 0)
139 old_ioctl = 0;
140 else
141 old_ioctl = 1;
142
143
144
145
146 if (!old_ioctl )
147 return (-EINVAL);
148
149
150
151
152
153 rval = handle_drvrcmd(argp, old_ioctl, &drvrcmd_rval);
154
155 if (rval < 0)
156 return rval;
157 else if (rval == 0)
158 return drvrcmd_rval;
159
160 rval = 0;
161 if ((adp = mraid_mm_get_adapter(argp, &rval)) == NULL) {
162 return rval;
163 }
164
165
166
167
168
169 if (!adp->quiescent) {
170 con_log(CL_ANN, (KERN_WARNING
171 "megaraid cmm: controller cannot accept cmds due to "
172 "earlier errors\n" ));
173 return -EFAULT;
174 }
175
176
177
178
179
180
181 kioc = mraid_mm_alloc_kioc(adp);
182 if (!kioc)
183 return -ENXIO;
184
185
186
187
188 if ((rval = mimd_to_kioc(argp, adp, kioc))) {
189 mraid_mm_dealloc_kioc(adp, kioc);
190 return rval;
191 }
192
193 kioc->done = ioctl_done;
194
195
196
197
198
199
200 if ((rval = lld_ioctl(adp, kioc))) {
201
202 if (!kioc->timedout)
203 mraid_mm_dealloc_kioc(adp, kioc);
204
205 return rval;
206 }
207
208
209
210
211 rval = kioc_to_mimd(kioc, argp);
212
213
214
215
216 mraid_mm_dealloc_kioc(adp, kioc);
217
218 return rval;
219 }
220
221 static long
222 mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd,
223 unsigned long arg)
224 {
225 int err;
226
227
228 mutex_lock(&mraid_mm_mutex);
229 err = mraid_mm_ioctl(filep, cmd, arg);
230 mutex_unlock(&mraid_mm_mutex);
231
232 return err;
233 }
234
235
236
237
238
239
240
241
242 static mraid_mmadp_t *
243 mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
244 {
245 mraid_mmadp_t *adapter;
246 mimd_t mimd;
247 uint32_t adapno;
248 int iterator;
249
250
251 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
252 *rval = -EFAULT;
253 return NULL;
254 }
255
256 adapno = GETADAP(mimd.ui.fcs.adapno);
257
258 if (adapno >= adapters_count_g) {
259 *rval = -ENODEV;
260 return NULL;
261 }
262
263 adapter = NULL;
264 iterator = 0;
265
266 list_for_each_entry(adapter, &adapters_list_g, list) {
267 if (iterator++ == adapno) break;
268 }
269
270 if (!adapter) {
271 *rval = -ENODEV;
272 return NULL;
273 }
274
275 return adapter;
276 }
277
278
279
280
281
282
283
284 static int
285 handle_drvrcmd(void __user *arg, uint8_t old_ioctl, int *rval)
286 {
287 mimd_t __user *umimd;
288 mimd_t kmimd;
289 uint8_t opcode;
290 uint8_t subopcode;
291
292 if (old_ioctl)
293 goto old_packet;
294 else
295 goto new_packet;
296
297 new_packet:
298 return (-ENOTSUPP);
299
300 old_packet:
301 *rval = 0;
302 umimd = arg;
303
304 if (copy_from_user(&kmimd, umimd, sizeof(mimd_t)))
305 return (-EFAULT);
306
307 opcode = kmimd.ui.fcs.opcode;
308 subopcode = kmimd.ui.fcs.subopcode;
309
310
311
312
313
314
315 if (opcode != 0x82)
316 return 1;
317
318 switch (subopcode) {
319
320 case MEGAIOC_QDRVRVER:
321
322 if (copy_to_user(kmimd.data, &drvr_ver, sizeof(uint32_t)))
323 return (-EFAULT);
324
325 return 0;
326
327 case MEGAIOC_QNADAP:
328
329 *rval = adapters_count_g;
330
331 if (copy_to_user(kmimd.data, &adapters_count_g,
332 sizeof(uint32_t)))
333 return (-EFAULT);
334
335 return 0;
336
337 default:
338
339 return 1;
340 }
341
342 return 0;
343 }
344
345
346
347
348
349
350
351
352
353
354
355
356
357 static int
358 mimd_to_kioc(mimd_t __user *umimd, mraid_mmadp_t *adp, uioc_t *kioc)
359 {
360 mbox64_t *mbox64;
361 mbox_t *mbox;
362 mraid_passthru_t *pthru32;
363 uint32_t adapno;
364 uint8_t opcode;
365 uint8_t subopcode;
366 mimd_t mimd;
367
368 if (copy_from_user(&mimd, umimd, sizeof(mimd_t)))
369 return (-EFAULT);
370
371
372
373
374 if ((mimd.mbox[0] == MBOXCMD_PASSTHRU64) ||
375 (mimd.mbox[0] == MBOXCMD_EXTPTHRU))
376 return (-EINVAL);
377
378 opcode = mimd.ui.fcs.opcode;
379 subopcode = mimd.ui.fcs.subopcode;
380 adapno = GETADAP(mimd.ui.fcs.adapno);
381
382 if (adapno >= adapters_count_g)
383 return (-ENODEV);
384
385 kioc->adapno = adapno;
386 kioc->mb_type = MBOX_LEGACY;
387 kioc->app_type = APPTYPE_MIMD;
388
389 switch (opcode) {
390
391 case 0x82:
392
393 if (subopcode == MEGAIOC_QADAPINFO) {
394
395 kioc->opcode = GET_ADAP_INFO;
396 kioc->data_dir = UIOC_RD;
397 kioc->xferlen = sizeof(mraid_hba_info_t);
398
399 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
400 return (-ENOMEM);
401 }
402 else {
403 con_log(CL_ANN, (KERN_WARNING
404 "megaraid cmm: Invalid subop\n"));
405 return (-EINVAL);
406 }
407
408 break;
409
410 case 0x81:
411
412 kioc->opcode = MBOX_CMD;
413 kioc->xferlen = mimd.ui.fcs.length;
414 kioc->user_data_len = kioc->xferlen;
415 kioc->user_data = mimd.ui.fcs.buffer;
416
417 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
418 return (-ENOMEM);
419
420 if (mimd.outlen) kioc->data_dir = UIOC_RD;
421 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
422
423 break;
424
425 case 0x80:
426
427 kioc->opcode = MBOX_CMD;
428 kioc->xferlen = (mimd.outlen > mimd.inlen) ?
429 mimd.outlen : mimd.inlen;
430 kioc->user_data_len = kioc->xferlen;
431 kioc->user_data = mimd.data;
432
433 if (mraid_mm_attach_buf(adp, kioc, kioc->xferlen))
434 return (-ENOMEM);
435
436 if (mimd.outlen) kioc->data_dir = UIOC_RD;
437 if (mimd.inlen) kioc->data_dir |= UIOC_WR;
438
439 break;
440
441 default:
442 return (-EINVAL);
443 }
444
445
446
447
448 if (opcode == 0x82)
449 return 0;
450
451
452
453
454 mbox64 = (mbox64_t *)((unsigned long)kioc->cmdbuf);
455 mbox = &mbox64->mbox32;
456 memcpy(mbox, mimd.mbox, 14);
457
458 if (mbox->cmd != MBOXCMD_PASSTHRU) {
459
460 mbox->xferaddr = (uint32_t)kioc->buf_paddr;
461
462 if (kioc->data_dir & UIOC_WR) {
463 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
464 kioc->xferlen)) {
465 return (-EFAULT);
466 }
467 }
468
469 return 0;
470 }
471
472
473
474
475
476
477
478 pthru32 = kioc->pthru32;
479 kioc->user_pthru = &umimd->pthru;
480 mbox->xferaddr = (uint32_t)kioc->pthru32_h;
481
482 if (copy_from_user(pthru32, kioc->user_pthru,
483 sizeof(mraid_passthru_t))) {
484 return (-EFAULT);
485 }
486
487 pthru32->dataxferaddr = kioc->buf_paddr;
488 if (kioc->data_dir & UIOC_WR) {
489 if (pthru32->dataxferlen > kioc->xferlen)
490 return -EINVAL;
491 if (copy_from_user(kioc->buf_vaddr, kioc->user_data,
492 pthru32->dataxferlen)) {
493 return (-EFAULT);
494 }
495 }
496
497 return 0;
498 }
499
500
501
502
503
504
505
506
507
508
509
510
511 static int
512 mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
513 {
514 mm_dmapool_t *pool;
515 int right_pool = -1;
516 unsigned long flags;
517 int i;
518
519 kioc->pool_index = -1;
520 kioc->buf_vaddr = NULL;
521 kioc->buf_paddr = 0;
522 kioc->free_buf = 0;
523
524
525
526
527
528
529 for (i = 0; i < MAX_DMA_POOLS; i++) {
530
531 pool = &adp->dma_pool_list[i];
532
533 if (xferlen > pool->buf_size)
534 continue;
535
536 if (right_pool == -1)
537 right_pool = i;
538
539 spin_lock_irqsave(&pool->lock, flags);
540
541 if (!pool->in_use) {
542
543 pool->in_use = 1;
544 kioc->pool_index = i;
545 kioc->buf_vaddr = pool->vaddr;
546 kioc->buf_paddr = pool->paddr;
547
548 spin_unlock_irqrestore(&pool->lock, flags);
549 return 0;
550 }
551 else {
552 spin_unlock_irqrestore(&pool->lock, flags);
553 continue;
554 }
555 }
556
557
558
559
560 if (right_pool == -1)
561 return -EINVAL;
562
563
564
565
566
567 pool = &adp->dma_pool_list[right_pool];
568
569 spin_lock_irqsave(&pool->lock, flags);
570
571 kioc->pool_index = right_pool;
572 kioc->free_buf = 1;
573 kioc->buf_vaddr = dma_pool_alloc(pool->handle, GFP_ATOMIC,
574 &kioc->buf_paddr);
575 spin_unlock_irqrestore(&pool->lock, flags);
576
577 if (!kioc->buf_vaddr)
578 return -ENOMEM;
579
580 return 0;
581 }
582
583
584
585
586
587
588
589
590
591 static uioc_t *
592 mraid_mm_alloc_kioc(mraid_mmadp_t *adp)
593 {
594 uioc_t *kioc;
595 struct list_head* head;
596 unsigned long flags;
597
598 down(&adp->kioc_semaphore);
599
600 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
601
602 head = &adp->kioc_pool;
603
604 if (list_empty(head)) {
605 up(&adp->kioc_semaphore);
606 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
607
608 con_log(CL_ANN, ("megaraid cmm: kioc list empty!\n"));
609 return NULL;
610 }
611
612 kioc = list_entry(head->next, uioc_t, list);
613 list_del_init(&kioc->list);
614
615 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
616
617 memset((caddr_t)(unsigned long)kioc->cmdbuf, 0, sizeof(mbox64_t));
618 memset((caddr_t) kioc->pthru32, 0, sizeof(mraid_passthru_t));
619
620 kioc->buf_vaddr = NULL;
621 kioc->buf_paddr = 0;
622 kioc->pool_index =-1;
623 kioc->free_buf = 0;
624 kioc->user_data = NULL;
625 kioc->user_data_len = 0;
626 kioc->user_pthru = NULL;
627 kioc->timedout = 0;
628
629 return kioc;
630 }
631
632
633
634
635
636
637 static void
638 mraid_mm_dealloc_kioc(mraid_mmadp_t *adp, uioc_t *kioc)
639 {
640 mm_dmapool_t *pool;
641 unsigned long flags;
642
643 if (kioc->pool_index != -1) {
644 pool = &adp->dma_pool_list[kioc->pool_index];
645
646
647 spin_lock_irqsave(&pool->lock, flags);
648
649
650
651
652
653
654
655
656 if (kioc->free_buf == 1)
657 dma_pool_free(pool->handle, kioc->buf_vaddr,
658 kioc->buf_paddr);
659 else
660 pool->in_use = 0;
661
662 spin_unlock_irqrestore(&pool->lock, flags);
663 }
664
665
666 spin_lock_irqsave(&adp->kioc_pool_lock, flags);
667 list_add(&kioc->list, &adp->kioc_pool);
668 spin_unlock_irqrestore(&adp->kioc_pool_lock, flags);
669
670
671 up(&adp->kioc_semaphore);
672
673 return;
674 }
675
676
677
678
679
680
681 static int
682 lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc)
683 {
684 int rval;
685 struct uioc_timeout timeout = { };
686
687 kioc->status = -ENODATA;
688 rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE);
689
690 if (rval) return rval;
691
692
693
694
695 if (adp->timeout > 0) {
696 timeout.uioc = kioc;
697 timer_setup_on_stack(&timeout.timer, lld_timedout, 0);
698
699 timeout.timer.expires = jiffies + adp->timeout * HZ;
700
701 add_timer(&timeout.timer);
702 }
703
704
705
706
707
708 wait_event(wait_q, (kioc->status != -ENODATA));
709 if (timeout.timer.function) {
710 del_timer_sync(&timeout.timer);
711 destroy_timer_on_stack(&timeout.timer);
712 }
713
714
715
716
717
718 if (kioc->timedout) {
719 adp->quiescent = 0;
720 }
721
722 return kioc->status;
723 }
724
725
726
727
728
729
730 static void
731 ioctl_done(uioc_t *kioc)
732 {
733 uint32_t adapno;
734 int iterator;
735 mraid_mmadp_t* adapter;
736
737
738
739
740
741
742 if (kioc->status == -ENODATA) {
743 con_log(CL_ANN, (KERN_WARNING
744 "megaraid cmm: lld didn't change status!\n"));
745
746 kioc->status = -EINVAL;
747 }
748
749
750
751
752
753
754 if (kioc->timedout) {
755 iterator = 0;
756 adapter = NULL;
757 adapno = kioc->adapno;
758
759 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
760 "ioctl that was timedout before\n"));
761
762 list_for_each_entry(adapter, &adapters_list_g, list) {
763 if (iterator++ == adapno) break;
764 }
765
766 kioc->timedout = 0;
767
768 if (adapter) {
769 mraid_mm_dealloc_kioc( adapter, kioc );
770 }
771 }
772 else {
773 wake_up(&wait_q);
774 }
775 }
776
777
778
779
780
781
782 static void
783 lld_timedout(struct timer_list *t)
784 {
785 struct uioc_timeout *timeout = from_timer(timeout, t, timer);
786 uioc_t *kioc = timeout->uioc;
787
788 kioc->status = -ETIME;
789 kioc->timedout = 1;
790
791 con_log(CL_ANN, (KERN_WARNING "megaraid cmm: ioctl timed out\n"));
792
793 wake_up(&wait_q);
794 }
795
796
797
798
799
800
801
802 static int
803 kioc_to_mimd(uioc_t *kioc, mimd_t __user *mimd)
804 {
805 mimd_t kmimd;
806 uint8_t opcode;
807 uint8_t subopcode;
808
809 mbox64_t *mbox64;
810 mraid_passthru_t __user *upthru32;
811 mraid_passthru_t *kpthru32;
812 mcontroller_t cinfo;
813 mraid_hba_info_t *hinfo;
814
815
816 if (copy_from_user(&kmimd, mimd, sizeof(mimd_t)))
817 return (-EFAULT);
818
819 opcode = kmimd.ui.fcs.opcode;
820 subopcode = kmimd.ui.fcs.subopcode;
821
822 if (opcode == 0x82) {
823 switch (subopcode) {
824
825 case MEGAIOC_QADAPINFO:
826
827 hinfo = (mraid_hba_info_t *)(unsigned long)
828 kioc->buf_vaddr;
829
830 hinfo_to_cinfo(hinfo, &cinfo);
831
832 if (copy_to_user(kmimd.data, &cinfo, sizeof(cinfo)))
833 return (-EFAULT);
834
835 return 0;
836
837 default:
838 return (-EINVAL);
839 }
840
841 return 0;
842 }
843
844 mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
845
846 if (kioc->user_pthru) {
847
848 upthru32 = kioc->user_pthru;
849 kpthru32 = kioc->pthru32;
850
851 if (copy_to_user(&upthru32->scsistatus,
852 &kpthru32->scsistatus,
853 sizeof(uint8_t))) {
854 return (-EFAULT);
855 }
856 }
857
858 if (kioc->user_data) {
859 if (copy_to_user(kioc->user_data, kioc->buf_vaddr,
860 kioc->user_data_len)) {
861 return (-EFAULT);
862 }
863 }
864
865 if (copy_to_user(&mimd->mbox[17],
866 &mbox64->mbox32.status, sizeof(uint8_t))) {
867 return (-EFAULT);
868 }
869
870 return 0;
871 }
872
873
874
875
876
877
878
879 static void
880 hinfo_to_cinfo(mraid_hba_info_t *hinfo, mcontroller_t *cinfo)
881 {
882 if (!hinfo || !cinfo)
883 return;
884
885 cinfo->base = hinfo->baseport;
886 cinfo->irq = hinfo->irq;
887 cinfo->numldrv = hinfo->num_ldrv;
888 cinfo->pcibus = hinfo->pci_bus;
889 cinfo->pcidev = hinfo->pci_slot;
890 cinfo->pcifun = PCI_FUNC(hinfo->pci_dev_fn);
891 cinfo->pciid = hinfo->pci_device_id;
892 cinfo->pcivendor = hinfo->pci_vendor_id;
893 cinfo->pcislot = hinfo->pci_slot;
894 cinfo->uid = hinfo->unique_id;
895 }
896
897
898
899
900
901
902 int
903 mraid_mm_register_adp(mraid_mmadp_t *lld_adp)
904 {
905 mraid_mmadp_t *adapter;
906 mbox64_t *mbox_list;
907 uioc_t *kioc;
908 uint32_t rval;
909 int i;
910
911
912 if (lld_adp->drvr_type != DRVRTYPE_MBOX)
913 return (-EINVAL);
914
915 adapter = kzalloc(sizeof(mraid_mmadp_t), GFP_KERNEL);
916
917 if (!adapter)
918 return -ENOMEM;
919
920
921 adapter->unique_id = lld_adp->unique_id;
922 adapter->drvr_type = lld_adp->drvr_type;
923 adapter->drvr_data = lld_adp->drvr_data;
924 adapter->pdev = lld_adp->pdev;
925 adapter->issue_uioc = lld_adp->issue_uioc;
926 adapter->timeout = lld_adp->timeout;
927 adapter->max_kioc = lld_adp->max_kioc;
928 adapter->quiescent = 1;
929
930
931
932
933
934 adapter->kioc_list = kmalloc_array(lld_adp->max_kioc,
935 sizeof(uioc_t),
936 GFP_KERNEL);
937 adapter->mbox_list = kmalloc_array(lld_adp->max_kioc,
938 sizeof(mbox64_t),
939 GFP_KERNEL);
940 adapter->pthru_dma_pool = dma_pool_create("megaraid mm pthru pool",
941 &adapter->pdev->dev,
942 sizeof(mraid_passthru_t),
943 16, 0);
944
945 if (!adapter->kioc_list || !adapter->mbox_list ||
946 !adapter->pthru_dma_pool) {
947
948 con_log(CL_ANN, (KERN_WARNING
949 "megaraid cmm: out of memory, %s %d\n", __func__,
950 __LINE__));
951
952 rval = (-ENOMEM);
953
954 goto memalloc_error;
955 }
956
957
958
959
960 INIT_LIST_HEAD(&adapter->kioc_pool);
961 spin_lock_init(&adapter->kioc_pool_lock);
962 sema_init(&adapter->kioc_semaphore, lld_adp->max_kioc);
963
964 mbox_list = (mbox64_t *)adapter->mbox_list;
965
966 for (i = 0; i < lld_adp->max_kioc; i++) {
967
968 kioc = adapter->kioc_list + i;
969 kioc->cmdbuf = (uint64_t)(unsigned long)(mbox_list + i);
970 kioc->pthru32 = dma_pool_alloc(adapter->pthru_dma_pool,
971 GFP_KERNEL, &kioc->pthru32_h);
972
973 if (!kioc->pthru32) {
974
975 con_log(CL_ANN, (KERN_WARNING
976 "megaraid cmm: out of memory, %s %d\n",
977 __func__, __LINE__));
978
979 rval = (-ENOMEM);
980
981 goto pthru_dma_pool_error;
982 }
983
984 list_add_tail(&kioc->list, &adapter->kioc_pool);
985 }
986
987
988 if ((rval = mraid_mm_setup_dma_pools(adapter)) != 0) {
989 goto dma_pool_error;
990 }
991
992 list_add_tail(&adapter->list, &adapters_list_g);
993
994 adapters_count_g++;
995
996 return 0;
997
998 dma_pool_error:
999
1000
1001 pthru_dma_pool_error:
1002
1003 for (i = 0; i < lld_adp->max_kioc; i++) {
1004 kioc = adapter->kioc_list + i;
1005 if (kioc->pthru32) {
1006 dma_pool_free(adapter->pthru_dma_pool, kioc->pthru32,
1007 kioc->pthru32_h);
1008 }
1009 }
1010
1011 memalloc_error:
1012
1013 kfree(adapter->kioc_list);
1014 kfree(adapter->mbox_list);
1015
1016 dma_pool_destroy(adapter->pthru_dma_pool);
1017
1018 kfree(adapter);
1019
1020 return rval;
1021 }
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 uint32_t
1036 mraid_mm_adapter_app_handle(uint32_t unique_id)
1037 {
1038 mraid_mmadp_t *adapter;
1039 mraid_mmadp_t *tmp;
1040 int index = 0;
1041
1042 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1043
1044 if (adapter->unique_id == unique_id) {
1045
1046 return MKADAP(index);
1047 }
1048
1049 index++;
1050 }
1051
1052 return 0;
1053 }
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 static int
1067 mraid_mm_setup_dma_pools(mraid_mmadp_t *adp)
1068 {
1069 mm_dmapool_t *pool;
1070 int bufsize;
1071 int i;
1072
1073
1074
1075
1076 bufsize = MRAID_MM_INIT_BUFF_SIZE;
1077
1078 for (i = 0; i < MAX_DMA_POOLS; i++){
1079
1080 pool = &adp->dma_pool_list[i];
1081
1082 pool->buf_size = bufsize;
1083 spin_lock_init(&pool->lock);
1084
1085 pool->handle = dma_pool_create("megaraid mm data buffer",
1086 &adp->pdev->dev, bufsize,
1087 16, 0);
1088
1089 if (!pool->handle) {
1090 goto dma_pool_setup_error;
1091 }
1092
1093 pool->vaddr = dma_pool_alloc(pool->handle, GFP_KERNEL,
1094 &pool->paddr);
1095
1096 if (!pool->vaddr)
1097 goto dma_pool_setup_error;
1098
1099 bufsize = bufsize * 2;
1100 }
1101
1102 return 0;
1103
1104 dma_pool_setup_error:
1105
1106 mraid_mm_teardown_dma_pools(adp);
1107 return (-ENOMEM);
1108 }
1109
1110
1111
1112
1113
1114
1115
1116
1117 int
1118 mraid_mm_unregister_adp(uint32_t unique_id)
1119 {
1120 mraid_mmadp_t *adapter;
1121 mraid_mmadp_t *tmp;
1122
1123 list_for_each_entry_safe(adapter, tmp, &adapters_list_g, list) {
1124
1125
1126 if (adapter->unique_id == unique_id) {
1127
1128 adapters_count_g--;
1129
1130 list_del_init(&adapter->list);
1131
1132 mraid_mm_free_adp_resources(adapter);
1133
1134 kfree(adapter);
1135
1136 con_log(CL_ANN, (
1137 "megaraid cmm: Unregistered one adapter:%#x\n",
1138 unique_id));
1139
1140 return 0;
1141 }
1142 }
1143
1144 return (-ENODEV);
1145 }
1146
1147
1148
1149
1150
1151 static void
1152 mraid_mm_free_adp_resources(mraid_mmadp_t *adp)
1153 {
1154 uioc_t *kioc;
1155 int i;
1156
1157 mraid_mm_teardown_dma_pools(adp);
1158
1159 for (i = 0; i < adp->max_kioc; i++) {
1160
1161 kioc = adp->kioc_list + i;
1162
1163 dma_pool_free(adp->pthru_dma_pool, kioc->pthru32,
1164 kioc->pthru32_h);
1165 }
1166
1167 kfree(adp->kioc_list);
1168 kfree(adp->mbox_list);
1169
1170 dma_pool_destroy(adp->pthru_dma_pool);
1171
1172
1173 return;
1174 }
1175
1176
1177
1178
1179
1180
1181 static void
1182 mraid_mm_teardown_dma_pools(mraid_mmadp_t *adp)
1183 {
1184 int i;
1185 mm_dmapool_t *pool;
1186
1187 for (i = 0; i < MAX_DMA_POOLS; i++) {
1188
1189 pool = &adp->dma_pool_list[i];
1190
1191 if (pool->handle) {
1192
1193 if (pool->vaddr)
1194 dma_pool_free(pool->handle, pool->vaddr,
1195 pool->paddr);
1196
1197 dma_pool_destroy(pool->handle);
1198 pool->handle = NULL;
1199 }
1200 }
1201
1202 return;
1203 }
1204
1205
1206
1207
1208 static int __init
1209 mraid_mm_init(void)
1210 {
1211 int err;
1212
1213
1214 con_log(CL_ANN, (KERN_INFO "megaraid cmm: %s %s\n",
1215 LSI_COMMON_MOD_VERSION, LSI_COMMON_MOD_EXT_VERSION));
1216
1217 err = misc_register(&megaraid_mm_dev);
1218 if (err < 0) {
1219 con_log(CL_ANN, ("megaraid cmm: cannot register misc device\n"));
1220 return err;
1221 }
1222
1223 init_waitqueue_head(&wait_q);
1224
1225 INIT_LIST_HEAD(&adapters_list_g);
1226
1227 return 0;
1228 }
1229
1230
1231 #ifdef CONFIG_COMPAT
1232
1233
1234
1235
1236
1237
1238 static long
1239 mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd,
1240 unsigned long arg)
1241 {
1242 int err;
1243
1244 err = mraid_mm_ioctl(filep, cmd, arg);
1245
1246 return err;
1247 }
1248 #endif
1249
1250
1251
1252
1253 static void __exit
1254 mraid_mm_exit(void)
1255 {
1256 con_log(CL_DLEVEL1 , ("exiting common mod\n"));
1257
1258 misc_deregister(&megaraid_mm_dev);
1259 }
1260
1261 module_init(mraid_mm_init);
1262 module_exit(mraid_mm_exit);
1263
1264