This source file includes following definitions.
- h_copy_rdma
- h_free_crq
- h_request_vmc
- ibmvmc_handle_event
- ibmvmc_release_crq_queue
- ibmvmc_reset_crq_queue
- crq_queue_next_crq
- ibmvmc_send_crq
- alloc_dma_buffer
- free_dma_buffer
- ibmvmc_get_valid_hmc_buffer
- ibmvmc_get_free_hmc_buffer
- ibmvmc_free_hmc_buffer
- ibmvmc_count_hmc_buffers
- ibmvmc_get_free_hmc
- ibmvmc_return_hmc
- ibmvmc_send_open
- ibmvmc_send_close
- ibmvmc_send_capabilities
- ibmvmc_send_add_buffer_resp
- ibmvmc_send_rem_buffer_resp
- ibmvmc_send_msg
- ibmvmc_open
- ibmvmc_close
- ibmvmc_read
- ibmvmc_poll
- ibmvmc_write
- ibmvmc_setup_hmc
- ibmvmc_ioctl_sethmcid
- ibmvmc_ioctl_query
- ibmvmc_ioctl_requestvmc
- ibmvmc_ioctl
- ibmvmc_add_buffer
- ibmvmc_rem_buffer
- ibmvmc_recv_msg
- ibmvmc_process_capabilities
- ibmvmc_validate_hmc_session
- ibmvmc_reset
- ibmvmc_reset_task
- ibmvmc_process_open_resp
- ibmvmc_process_close_resp
- ibmvmc_crq_process
- ibmvmc_handle_crq_init
- ibmvmc_handle_crq
- ibmvmc_task
- ibmvmc_init_crq_queue
- read_dma_window
- ibmvmc_probe
- ibmvmc_remove
- ibmvmc_scrub_module_parms
- ibmvmc_module_init
- ibmvmc_module_exit
1
2
3
4
5
6
7
8
9
10
11
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/kthread.h>
15 #include <linux/major.h>
16 #include <linux/string.h>
17 #include <linux/fcntl.h>
18 #include <linux/slab.h>
19 #include <linux/poll.h>
20 #include <linux/init.h>
21 #include <linux/fs.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock.h>
24 #include <linux/percpu.h>
25 #include <linux/delay.h>
26 #include <linux/uaccess.h>
27 #include <linux/io.h>
28 #include <linux/miscdevice.h>
29 #include <linux/sched/signal.h>
30
31 #include <asm/byteorder.h>
32 #include <asm/irq.h>
33 #include <asm/vio.h>
34
35 #include "ibmvmc.h"
36
37 #define IBMVMC_DRIVER_VERSION "1.0"
38
39
40
41
42 static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
43
44 static const char ibmvmc_driver_name[] = "ibmvmc";
45
46 static struct ibmvmc_struct ibmvmc;
47 static struct ibmvmc_hmc hmcs[MAX_HMCS];
48 static struct crq_server_adapter ibmvmc_adapter;
49
50 static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
51 static int ibmvmc_max_hmcs = DEFAULT_HMCS;
52 static int ibmvmc_max_mtu = DEFAULT_MTU;
53
54 static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
55 u64 dliobn, u64 dlioba)
56 {
57 long rc = 0;
58
59
60 dma_wmb();
61 pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
62 length, sliobn, slioba, dliobn, dlioba);
63 rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
64 dliobn, dlioba);
65 pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
66
67 return rc;
68 }
69
70 static inline void h_free_crq(uint32_t unit_address)
71 {
72 long rc = 0;
73
74 do {
75 if (H_IS_LONG_BUSY(rc))
76 msleep(get_longbusy_msecs(rc));
77
78 rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
79 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
80 }
81
82
83
84
85
86
87
88
89
90
91
92
93
94 static inline long h_request_vmc(u32 *vmc_index)
95 {
96 long rc = 0;
97 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
98
99 do {
100 if (H_IS_LONG_BUSY(rc))
101 msleep(get_longbusy_msecs(rc));
102
103
104 rc = plpar_hcall(H_REQUEST_VMC, retbuf);
105 pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
106 *vmc_index = retbuf[0];
107 } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
108
109 return rc;
110 }
111
112
113
114
115
116
117
118
119
120
121
122 static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
123 {
124 struct crq_server_adapter *adapter =
125 (struct crq_server_adapter *)dev_instance;
126
127 vio_disable_interrupts(to_vio_dev(adapter->dev));
128 tasklet_schedule(&adapter->work_task);
129
130 return IRQ_HANDLED;
131 }
132
133
134
135
136
137
138
139
140
141
142 static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
143 {
144 struct vio_dev *vdev = to_vio_dev(adapter->dev);
145 struct crq_queue *queue = &adapter->queue;
146
147 free_irq(vdev->irq, (void *)adapter);
148 tasklet_kill(&adapter->work_task);
149
150 if (adapter->reset_task)
151 kthread_stop(adapter->reset_task);
152
153 h_free_crq(vdev->unit_address);
154 dma_unmap_single(adapter->dev,
155 queue->msg_token,
156 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
157 free_page((unsigned long)queue->msgs);
158 }
159
160
161
162
163
164
165
166
167
168
169
170
171
172 static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
173 {
174 struct vio_dev *vdev = to_vio_dev(adapter->dev);
175 struct crq_queue *queue = &adapter->queue;
176 int rc = 0;
177
178
179 h_free_crq(vdev->unit_address);
180
181
182 memset(queue->msgs, 0x00, PAGE_SIZE);
183 queue->cur = 0;
184
185
186 rc = plpar_hcall_norets(H_REG_CRQ,
187 vdev->unit_address,
188 queue->msg_token, PAGE_SIZE);
189 if (rc == 2)
190
191 dev_warn(adapter->dev, "Partner adapter not ready\n");
192 else if (rc != 0)
193 dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
194
195 return rc;
196 }
197
198
199
200
201
202
203
204
205 static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
206 {
207 struct ibmvmc_crq_msg *crq;
208 unsigned long flags;
209
210 spin_lock_irqsave(&queue->lock, flags);
211 crq = &queue->msgs[queue->cur];
212 if (crq->valid & 0x80) {
213 if (++queue->cur == queue->size)
214 queue->cur = 0;
215
216
217
218
219 dma_rmb();
220 } else {
221 crq = NULL;
222 }
223
224 spin_unlock_irqrestore(&queue->lock, flags);
225
226 return crq;
227 }
228
229
230
231
232
233
234
235
236
237
238
239
240 static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
241 u64 word1, u64 word2)
242 {
243 struct vio_dev *vdev = to_vio_dev(adapter->dev);
244 long rc = 0;
245
246 dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
247 vdev->unit_address, word1, word2);
248
249
250
251
252
253 dma_wmb();
254 rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
255 dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
256
257 return rc;
258 }
259
260
261
262
263
264
265
266
267
268
269
270
271
272 static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
273 dma_addr_t *dma_handle)
274 {
275
276 void *buffer = kzalloc(size, GFP_ATOMIC);
277
278 if (!buffer) {
279 *dma_handle = 0;
280 return NULL;
281 }
282
283
284 *dma_handle = dma_map_single(&vdev->dev, buffer, size,
285 DMA_BIDIRECTIONAL);
286
287 if (dma_mapping_error(&vdev->dev, *dma_handle)) {
288 *dma_handle = 0;
289 kzfree(buffer);
290 return NULL;
291 }
292
293 return buffer;
294 }
295
296
297
298
299
300
301
302
303
304
305
306 static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
307 dma_addr_t dma_handle)
308 {
309
310 dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
311
312
313 kzfree(vaddr);
314 }
315
316
317
318
319
320
321
322
323
324 static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
325 {
326 struct ibmvmc_buffer *buffer;
327 struct ibmvmc_buffer *ret_buf = NULL;
328 unsigned long i;
329
330 if (hmc_index > ibmvmc.max_hmc_index)
331 return NULL;
332
333 buffer = hmcs[hmc_index].buffer;
334
335 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
336 if (buffer[i].valid && buffer[i].free &&
337 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
338 buffer[i].free = 0;
339 ret_buf = &buffer[i];
340 break;
341 }
342 }
343
344 return ret_buf;
345 }
346
347
348
349
350
351
352
353
354
355
356 static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
357 u8 hmc_index)
358 {
359 struct ibmvmc_buffer *buffer;
360 struct ibmvmc_buffer *ret_buf = NULL;
361 unsigned long i;
362
363 if (hmc_index > ibmvmc.max_hmc_index) {
364 dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
365 hmc_index);
366 return NULL;
367 }
368
369 buffer = hmcs[hmc_index].buffer;
370
371 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
372 if (buffer[i].free &&
373 buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
374 buffer[i].free = 0;
375 ret_buf = &buffer[i];
376 break;
377 }
378 }
379
380 return ret_buf;
381 }
382
383
384
385
386
387
388
389
390 static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
391 struct ibmvmc_buffer *buffer)
392 {
393 unsigned long flags;
394
395 spin_lock_irqsave(&hmc->lock, flags);
396 buffer->free = 1;
397 spin_unlock_irqrestore(&hmc->lock, flags);
398 }
399
400
401
402
403
404
405
406
407
408 static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
409 unsigned int *free)
410 {
411 struct ibmvmc_buffer *buffer;
412 unsigned long i;
413 unsigned long flags;
414
415 if (hmc_index > ibmvmc.max_hmc_index)
416 return;
417
418 if (!valid || !free)
419 return;
420
421 *valid = 0; *free = 0;
422
423 buffer = hmcs[hmc_index].buffer;
424 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
425
426 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
427 if (buffer[i].valid) {
428 *valid = *valid + 1;
429 if (buffer[i].free)
430 *free = *free + 1;
431 }
432 }
433
434 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
435 }
436
437
438
439
440
441
442
443
444 static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
445 {
446 unsigned long i;
447 unsigned long flags;
448
449
450
451
452 for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
453 spin_lock_irqsave(&hmcs[i].lock, flags);
454 if (hmcs[i].state == ibmhmc_state_free) {
455 hmcs[i].index = i;
456 hmcs[i].state = ibmhmc_state_initial;
457 spin_unlock_irqrestore(&hmcs[i].lock, flags);
458 return &hmcs[i];
459 }
460 spin_unlock_irqrestore(&hmcs[i].lock, flags);
461 }
462
463 return NULL;
464 }
465
466
467
468
469
470
471
472
473
474
475
476
477
478 static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
479 {
480 struct ibmvmc_buffer *buffer;
481 struct crq_server_adapter *adapter;
482 struct vio_dev *vdev;
483 unsigned long i;
484 unsigned long flags;
485
486 if (!hmc || !hmc->adapter)
487 return -EIO;
488
489 if (release_readers) {
490 if (hmc->file_session) {
491 struct ibmvmc_file_session *session = hmc->file_session;
492
493 session->valid = 0;
494 wake_up_interruptible(&ibmvmc_read_wait);
495 }
496 }
497
498 adapter = hmc->adapter;
499 vdev = to_vio_dev(adapter->dev);
500
501 spin_lock_irqsave(&hmc->lock, flags);
502 hmc->index = 0;
503 hmc->state = ibmhmc_state_free;
504 hmc->queue_head = 0;
505 hmc->queue_tail = 0;
506 buffer = hmc->buffer;
507 for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
508 if (buffer[i].valid) {
509 free_dma_buffer(vdev,
510 ibmvmc.max_mtu,
511 buffer[i].real_addr_local,
512 buffer[i].dma_addr_local);
513 dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
514 }
515 memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
516
517 hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
518 }
519
520 spin_unlock_irqrestore(&hmc->lock, flags);
521
522 return 0;
523 }
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543 static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
544 struct ibmvmc_hmc *hmc)
545 {
546 struct ibmvmc_crq_msg crq_msg;
547 struct crq_server_adapter *adapter;
548 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
549 int rc = 0;
550
551 if (!hmc || !hmc->adapter)
552 return -EIO;
553
554 adapter = hmc->adapter;
555
556 dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
557 (unsigned long)buffer->size, (unsigned long)adapter->liobn,
558 (unsigned long)buffer->dma_addr_local,
559 (unsigned long)adapter->riobn,
560 (unsigned long)buffer->dma_addr_remote);
561
562 rc = h_copy_rdma(buffer->size,
563 adapter->liobn,
564 buffer->dma_addr_local,
565 adapter->riobn,
566 buffer->dma_addr_remote);
567 if (rc) {
568 dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
569 rc);
570 return -EIO;
571 }
572
573 hmc->state = ibmhmc_state_opening;
574
575 crq_msg.valid = 0x80;
576 crq_msg.type = VMC_MSG_OPEN;
577 crq_msg.status = 0;
578 crq_msg.var1.rsvd = 0;
579 crq_msg.hmc_session = hmc->session;
580 crq_msg.hmc_index = hmc->index;
581 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
582 crq_msg.rsvd = 0;
583 crq_msg.var3.rsvd = 0;
584
585 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
586 be64_to_cpu(crq_as_u64[1]));
587
588 return rc;
589 }
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605 static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
606 {
607 struct ibmvmc_crq_msg crq_msg;
608 struct crq_server_adapter *adapter;
609 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
610 int rc = 0;
611
612 if (!hmc || !hmc->adapter)
613 return -EIO;
614
615 adapter = hmc->adapter;
616
617 dev_info(adapter->dev, "CRQ send: close\n");
618
619 crq_msg.valid = 0x80;
620 crq_msg.type = VMC_MSG_CLOSE;
621 crq_msg.status = 0;
622 crq_msg.var1.rsvd = 0;
623 crq_msg.hmc_session = hmc->session;
624 crq_msg.hmc_index = hmc->index;
625 crq_msg.var2.rsvd = 0;
626 crq_msg.rsvd = 0;
627 crq_msg.var3.rsvd = 0;
628
629 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
630 be64_to_cpu(crq_as_u64[1]));
631
632 return rc;
633 }
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651 static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
652 {
653 struct ibmvmc_admin_crq_msg crq_msg;
654 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
655
656 dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
657 crq_msg.valid = 0x80;
658 crq_msg.type = VMC_MSG_CAP;
659 crq_msg.status = 0;
660 crq_msg.rsvd[0] = 0;
661 crq_msg.rsvd[1] = 0;
662 crq_msg.max_hmc = ibmvmc_max_hmcs;
663 crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
664 crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
665 crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
666 crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
667
668 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
669 be64_to_cpu(crq_as_u64[1]));
670
671 ibmvmc.state = ibmvmc_state_capabilities;
672
673 return 0;
674 }
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692 static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
693 u8 status, u8 hmc_session,
694 u8 hmc_index, u16 buffer_id)
695 {
696 struct ibmvmc_crq_msg crq_msg;
697 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
698
699 dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
700 crq_msg.valid = 0x80;
701 crq_msg.type = VMC_MSG_ADD_BUF_RESP;
702 crq_msg.status = status;
703 crq_msg.var1.rsvd = 0;
704 crq_msg.hmc_session = hmc_session;
705 crq_msg.hmc_index = hmc_index;
706 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
707 crq_msg.rsvd = 0;
708 crq_msg.var3.rsvd = 0;
709
710 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
711 be64_to_cpu(crq_as_u64[1]));
712
713 return 0;
714 }
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733 static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
734 u8 status, u8 hmc_session,
735 u8 hmc_index, u16 buffer_id)
736 {
737 struct ibmvmc_crq_msg crq_msg;
738 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
739
740 dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
741 crq_msg.valid = 0x80;
742 crq_msg.type = VMC_MSG_REM_BUF_RESP;
743 crq_msg.status = status;
744 crq_msg.var1.rsvd = 0;
745 crq_msg.hmc_session = hmc_session;
746 crq_msg.hmc_index = hmc_index;
747 crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
748 crq_msg.rsvd = 0;
749 crq_msg.var3.rsvd = 0;
750
751 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
752 be64_to_cpu(crq_as_u64[1]));
753
754 return 0;
755 }
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777 static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
778 struct ibmvmc_buffer *buffer,
779 struct ibmvmc_hmc *hmc, int msg_len)
780 {
781 struct ibmvmc_crq_msg crq_msg;
782 __be64 *crq_as_u64 = (__be64 *)&crq_msg;
783 int rc = 0;
784
785 dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
786 rc = h_copy_rdma(msg_len,
787 adapter->liobn,
788 buffer->dma_addr_local,
789 adapter->riobn,
790 buffer->dma_addr_remote);
791 if (rc) {
792 dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
793 rc);
794 return rc;
795 }
796
797 crq_msg.valid = 0x80;
798 crq_msg.type = VMC_MSG_SIGNAL;
799 crq_msg.status = 0;
800 crq_msg.var1.rsvd = 0;
801 crq_msg.hmc_session = hmc->session;
802 crq_msg.hmc_index = hmc->index;
803 crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
804 crq_msg.var3.msg_len = cpu_to_be32(msg_len);
805 dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
806 be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
807
808 buffer->owner = VMC_BUF_OWNER_HV;
809 ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
810 be64_to_cpu(crq_as_u64[1]));
811
812 return rc;
813 }
814
815
816
817
818
819
820
821
822
823
824
825 static int ibmvmc_open(struct inode *inode, struct file *file)
826 {
827 struct ibmvmc_file_session *session;
828
829 pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
830 (unsigned long)inode, (unsigned long)file,
831 ibmvmc.state);
832
833 session = kzalloc(sizeof(*session), GFP_KERNEL);
834 if (!session)
835 return -ENOMEM;
836
837 session->file = file;
838 file->private_data = session;
839
840 return 0;
841 }
842
843
844
845
846
847
848
849
850
851
852
853 static int ibmvmc_close(struct inode *inode, struct file *file)
854 {
855 struct ibmvmc_file_session *session;
856 struct ibmvmc_hmc *hmc;
857 int rc = 0;
858 unsigned long flags;
859
860 pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
861 (unsigned long)file, ibmvmc.state);
862
863 session = file->private_data;
864 if (!session)
865 return -EIO;
866
867 hmc = session->hmc;
868 if (hmc) {
869 if (!hmc->adapter)
870 return -EIO;
871
872 if (ibmvmc.state == ibmvmc_state_failed) {
873 dev_warn(hmc->adapter->dev, "close: state_failed\n");
874 return -EIO;
875 }
876
877 spin_lock_irqsave(&hmc->lock, flags);
878 if (hmc->state >= ibmhmc_state_opening) {
879 rc = ibmvmc_send_close(hmc);
880 if (rc)
881 dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
882 }
883 spin_unlock_irqrestore(&hmc->lock, flags);
884 }
885
886 kzfree(session);
887
888 return rc;
889 }
890
891
892
893
894
895
896
897
898
899
900
901
902
903 static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
904 loff_t *ppos)
905 {
906 struct ibmvmc_file_session *session;
907 struct ibmvmc_hmc *hmc;
908 struct crq_server_adapter *adapter;
909 struct ibmvmc_buffer *buffer;
910 ssize_t n;
911 ssize_t retval = 0;
912 unsigned long flags;
913 DEFINE_WAIT(wait);
914
915 pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
916 (unsigned long)file, (unsigned long)buf,
917 (unsigned long)nbytes);
918
919 if (nbytes == 0)
920 return 0;
921
922 if (nbytes > ibmvmc.max_mtu) {
923 pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
924 (unsigned int)nbytes);
925 return -EINVAL;
926 }
927
928 session = file->private_data;
929 if (!session) {
930 pr_warn("ibmvmc: read: no session\n");
931 return -EIO;
932 }
933
934 hmc = session->hmc;
935 if (!hmc) {
936 pr_warn("ibmvmc: read: no hmc\n");
937 return -EIO;
938 }
939
940 adapter = hmc->adapter;
941 if (!adapter) {
942 pr_warn("ibmvmc: read: no adapter\n");
943 return -EIO;
944 }
945
946 do {
947 prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
948
949 spin_lock_irqsave(&hmc->lock, flags);
950 if (hmc->queue_tail != hmc->queue_head)
951
952 break;
953
954 spin_unlock_irqrestore(&hmc->lock, flags);
955
956 if (!session->valid) {
957 retval = -EBADFD;
958 goto out;
959 }
960 if (file->f_flags & O_NONBLOCK) {
961 retval = -EAGAIN;
962 goto out;
963 }
964
965 schedule();
966
967 if (signal_pending(current)) {
968 retval = -ERESTARTSYS;
969 goto out;
970 }
971 } while (1);
972
973 buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
974 hmc->queue_tail++;
975 if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
976 hmc->queue_tail = 0;
977 spin_unlock_irqrestore(&hmc->lock, flags);
978
979 nbytes = min_t(size_t, nbytes, buffer->msg_len);
980 n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
981 dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
982 ibmvmc_free_hmc_buffer(hmc, buffer);
983 retval = nbytes;
984
985 if (n) {
986 dev_warn(adapter->dev, "read: copy to user failed.\n");
987 retval = -EFAULT;
988 }
989
990 out:
991 finish_wait(&ibmvmc_read_wait, &wait);
992 dev_dbg(adapter->dev, "read: out %ld\n", retval);
993 return retval;
994 }
995
996
997
998
999
1000
1001
1002
1003
1004
1005 static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
1006 {
1007 struct ibmvmc_file_session *session;
1008 struct ibmvmc_hmc *hmc;
1009 unsigned int mask = 0;
1010
1011 session = file->private_data;
1012 if (!session)
1013 return 0;
1014
1015 hmc = session->hmc;
1016 if (!hmc)
1017 return 0;
1018
1019 poll_wait(file, &ibmvmc_read_wait, wait);
1020
1021 if (hmc->queue_head != hmc->queue_tail)
1022 mask |= POLLIN | POLLRDNORM;
1023
1024 return mask;
1025 }
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039 static ssize_t ibmvmc_write(struct file *file, const char *buffer,
1040 size_t count, loff_t *ppos)
1041 {
1042 struct ibmvmc_buffer *vmc_buffer;
1043 struct ibmvmc_file_session *session;
1044 struct crq_server_adapter *adapter;
1045 struct ibmvmc_hmc *hmc;
1046 unsigned char *buf;
1047 unsigned long flags;
1048 size_t bytes;
1049 const char *p = buffer;
1050 size_t c = count;
1051 int ret = 0;
1052
1053 session = file->private_data;
1054 if (!session)
1055 return -EIO;
1056
1057 hmc = session->hmc;
1058 if (!hmc)
1059 return -EIO;
1060
1061 spin_lock_irqsave(&hmc->lock, flags);
1062 if (hmc->state == ibmhmc_state_free) {
1063
1064 ret = -EIO;
1065 goto out;
1066 }
1067
1068 adapter = hmc->adapter;
1069 if (!adapter) {
1070 ret = -EIO;
1071 goto out;
1072 }
1073
1074 if (count > ibmvmc.max_mtu) {
1075 dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
1076 (unsigned long)count);
1077 ret = -EIO;
1078 goto out;
1079 }
1080
1081
1082 if (hmc->state == ibmhmc_state_opening) {
1083 ret = -EBUSY;
1084 goto out;
1085 }
1086
1087
1088
1089
1090 if (hmc->state != ibmhmc_state_ready) {
1091 ret = -EIO;
1092 goto out;
1093 }
1094
1095 vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1096 if (!vmc_buffer) {
1097
1098
1099
1100
1101 ret = -EBUSY;
1102 goto out;
1103 }
1104 if (!vmc_buffer->real_addr_local) {
1105 dev_err(adapter->dev, "no buffer storage assigned\n");
1106 ret = -EIO;
1107 goto out;
1108 }
1109 buf = vmc_buffer->real_addr_local;
1110
1111 while (c > 0) {
1112 bytes = min_t(size_t, c, vmc_buffer->size);
1113
1114 bytes -= copy_from_user(buf, p, bytes);
1115 if (!bytes) {
1116 ret = -EFAULT;
1117 goto out;
1118 }
1119 c -= bytes;
1120 p += bytes;
1121 }
1122 if (p == buffer)
1123 goto out;
1124
1125 file->f_path.dentry->d_inode->i_mtime = current_time(file_inode(file));
1126 mark_inode_dirty(file->f_path.dentry->d_inode);
1127
1128 dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
1129 (unsigned long)file, (unsigned long)count);
1130
1131 ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
1132 ret = p - buffer;
1133 out:
1134 spin_unlock_irqrestore(&hmc->lock, flags);
1135 return (ssize_t)(ret);
1136 }
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
1148 {
1149 struct ibmvmc_hmc *hmc;
1150 unsigned int valid, free, index;
1151
1152 if (ibmvmc.state == ibmvmc_state_failed) {
1153 pr_warn("ibmvmc: Reserve HMC: state_failed\n");
1154 return -EIO;
1155 }
1156
1157 if (ibmvmc.state < ibmvmc_state_ready) {
1158 pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
1159 return -EAGAIN;
1160 }
1161
1162
1163
1164
1165 for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
1166 valid = 0;
1167 ibmvmc_count_hmc_buffers(index, &valid, &free);
1168 if (valid == 0) {
1169 pr_warn("ibmvmc: buffers not ready for index %d\n",
1170 index);
1171 return -ENOBUFS;
1172 }
1173 }
1174
1175
1176 hmc = ibmvmc_get_free_hmc();
1177 if (!hmc) {
1178 pr_warn("%s: free hmc not found\n", __func__);
1179 return -EBUSY;
1180 }
1181
1182 hmc->session = hmc->session + 1;
1183 if (hmc->session == 0xff)
1184 hmc->session = 1;
1185
1186 session->hmc = hmc;
1187 hmc->adapter = &ibmvmc_adapter;
1188 hmc->file_session = session;
1189 session->valid = 1;
1190
1191 return 0;
1192 }
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206 static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
1207 unsigned char __user *new_hmc_id)
1208 {
1209 struct ibmvmc_hmc *hmc;
1210 struct ibmvmc_buffer *buffer;
1211 size_t bytes;
1212 char print_buffer[HMC_ID_LEN + 1];
1213 unsigned long flags;
1214 long rc = 0;
1215
1216
1217 hmc = session->hmc;
1218 if (!hmc) {
1219 rc = ibmvmc_setup_hmc(session);
1220 if (rc)
1221 return rc;
1222
1223 hmc = session->hmc;
1224 if (!hmc) {
1225 pr_err("ibmvmc: setup_hmc success but no hmc\n");
1226 return -EIO;
1227 }
1228 }
1229
1230 if (hmc->state != ibmhmc_state_initial) {
1231 pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
1232 hmc->state);
1233 return -EIO;
1234 }
1235
1236 bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
1237 if (bytes)
1238 return -EFAULT;
1239
1240
1241 spin_lock_irqsave(&hmc->lock, flags);
1242 buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
1243 spin_unlock_irqrestore(&hmc->lock, flags);
1244
1245 if (!buffer || !buffer->real_addr_local) {
1246 pr_warn("ibmvmc: sethmcid: no buffer available\n");
1247 return -EIO;
1248 }
1249
1250
1251 memset(print_buffer, 0, HMC_ID_LEN + 1);
1252 strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
1253 pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
1254
1255 memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
1256
1257 rc = ibmvmc_send_open(buffer, hmc);
1258
1259 return rc;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272 static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
1273 struct ibmvmc_query_struct __user *ret_struct)
1274 {
1275 struct ibmvmc_query_struct query_struct;
1276 size_t bytes;
1277
1278 memset(&query_struct, 0, sizeof(query_struct));
1279 query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
1280 query_struct.state = ibmvmc.state;
1281 query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
1282
1283 bytes = copy_to_user(ret_struct, &query_struct,
1284 sizeof(query_struct));
1285 if (bytes)
1286 return -EFAULT;
1287
1288 return 0;
1289 }
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301 static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
1302 u32 __user *ret_vmc_index)
1303 {
1304
1305 size_t bytes;
1306 long rc;
1307 u32 vmc_drc_index;
1308
1309
1310 rc = h_request_vmc(&vmc_drc_index);
1311 pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
1312
1313 if (rc == H_SUCCESS) {
1314 rc = 0;
1315 } else if (rc == H_FUNCTION) {
1316 pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
1317 return -EPERM;
1318 } else if (rc == H_AUTHORITY) {
1319 pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
1320 return -EPERM;
1321 } else if (rc == H_HARDWARE) {
1322 pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
1323 return -EIO;
1324 } else if (rc == H_RESOURCE) {
1325 pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
1326 return -ENODEV;
1327 } else if (rc == H_NOT_AVAILABLE) {
1328 pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
1329 return -EPERM;
1330 } else if (rc == H_PARAMETER) {
1331 pr_err("ibmvmc: requestvmc: invalid parameter\n");
1332 return -EINVAL;
1333 }
1334
1335
1336 ibmvmc.vmc_drc_index = vmc_drc_index;
1337
1338 bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
1339 sizeof(*ret_vmc_index));
1340 if (bytes) {
1341 pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
1342 return -EFAULT;
1343 }
1344 return rc;
1345 }
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358 static long ibmvmc_ioctl(struct file *file,
1359 unsigned int cmd, unsigned long arg)
1360 {
1361 struct ibmvmc_file_session *session = file->private_data;
1362
1363 pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
1364 (unsigned long)file, cmd, arg,
1365 (unsigned long)session);
1366
1367 if (!session) {
1368 pr_warn("ibmvmc: ioctl: no session\n");
1369 return -EIO;
1370 }
1371
1372 switch (cmd) {
1373 case VMC_IOCTL_SETHMCID:
1374 return ibmvmc_ioctl_sethmcid(session,
1375 (unsigned char __user *)arg);
1376 case VMC_IOCTL_QUERY:
1377 return ibmvmc_ioctl_query(session,
1378 (struct ibmvmc_query_struct __user *)arg);
1379 case VMC_IOCTL_REQUESTVMC:
1380 return ibmvmc_ioctl_requestvmc(session,
1381 (unsigned int __user *)arg);
1382 default:
1383 pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
1384 return -EINVAL;
1385 }
1386 }
1387
1388 static const struct file_operations ibmvmc_fops = {
1389 .owner = THIS_MODULE,
1390 .read = ibmvmc_read,
1391 .write = ibmvmc_write,
1392 .poll = ibmvmc_poll,
1393 .unlocked_ioctl = ibmvmc_ioctl,
1394 .open = ibmvmc_open,
1395 .release = ibmvmc_close,
1396 };
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425 static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
1426 struct ibmvmc_crq_msg *crq)
1427 {
1428 struct ibmvmc_buffer *buffer;
1429 u8 hmc_index;
1430 u8 hmc_session;
1431 u16 buffer_id;
1432 unsigned long flags;
1433 int rc = 0;
1434
1435 if (!crq)
1436 return -1;
1437
1438 hmc_session = crq->hmc_session;
1439 hmc_index = crq->hmc_index;
1440 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1441
1442 if (hmc_index > ibmvmc.max_hmc_index) {
1443 dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
1444 hmc_index);
1445 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1446 hmc_session, hmc_index, buffer_id);
1447 return -1;
1448 }
1449
1450 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1451 dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
1452 buffer_id);
1453 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1454 hmc_session, hmc_index, buffer_id);
1455 return -1;
1456 }
1457
1458 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1459 buffer = &hmcs[hmc_index].buffer[buffer_id];
1460
1461 if (buffer->real_addr_local || buffer->dma_addr_local) {
1462 dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
1463 (unsigned long)buffer_id);
1464 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1465 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1466 hmc_session, hmc_index, buffer_id);
1467 return -1;
1468 }
1469
1470 buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
1471 ibmvmc.max_mtu,
1472 &buffer->dma_addr_local);
1473
1474 if (!buffer->real_addr_local) {
1475 dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
1476 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1477 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
1478 hmc_session, hmc_index, buffer_id);
1479 return -1;
1480 }
1481
1482 buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
1483 buffer->size = ibmvmc.max_mtu;
1484 buffer->owner = crq->var1.owner;
1485 buffer->free = 1;
1486
1487 dma_wmb();
1488 buffer->valid = 1;
1489 buffer->id = buffer_id;
1490
1491 dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
1492 dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
1493 hmc_index, hmc_session, buffer_id, buffer->owner);
1494 dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
1495 (u32)buffer->dma_addr_local,
1496 (u32)buffer->dma_addr_remote);
1497 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1498
1499 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1500 hmc_index, buffer_id);
1501
1502 return rc;
1503 }
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542 static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
1543 struct ibmvmc_crq_msg *crq)
1544 {
1545 struct ibmvmc_buffer *buffer;
1546 u8 hmc_index;
1547 u8 hmc_session;
1548 u16 buffer_id = 0;
1549 unsigned long flags;
1550 int rc = 0;
1551
1552 if (!crq)
1553 return -1;
1554
1555 hmc_session = crq->hmc_session;
1556 hmc_index = crq->hmc_index;
1557
1558 if (hmc_index > ibmvmc.max_hmc_index) {
1559 dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
1560 hmc_index);
1561 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1562 hmc_session, hmc_index, buffer_id);
1563 return -1;
1564 }
1565
1566 spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
1567 buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
1568 if (!buffer) {
1569 dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
1570 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1571 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
1572 hmc_session, hmc_index,
1573 VMC_INVALID_BUFFER_ID);
1574 return -1;
1575 }
1576
1577 buffer_id = buffer->id;
1578
1579 if (buffer->valid)
1580 free_dma_buffer(to_vio_dev(adapter->dev),
1581 ibmvmc.max_mtu,
1582 buffer->real_addr_local,
1583 buffer->dma_addr_local);
1584
1585 memset(buffer, 0, sizeof(struct ibmvmc_buffer));
1586 spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
1587
1588 dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
1589 ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
1590 hmc_index, buffer_id);
1591
1592 return rc;
1593 }
1594
1595 static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
1596 struct ibmvmc_crq_msg *crq)
1597 {
1598 struct ibmvmc_buffer *buffer;
1599 struct ibmvmc_hmc *hmc;
1600 unsigned long msg_len;
1601 u8 hmc_index;
1602 u8 hmc_session;
1603 u16 buffer_id;
1604 unsigned long flags;
1605 int rc = 0;
1606
1607 if (!crq)
1608 return -1;
1609
1610
1611 dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
1612 be64_to_cpu(*((unsigned long *)crq)),
1613 be64_to_cpu(*(((unsigned long *)crq) + 1)));
1614
1615 hmc_session = crq->hmc_session;
1616 hmc_index = crq->hmc_index;
1617 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1618 msg_len = be32_to_cpu(crq->var3.msg_len);
1619
1620 if (hmc_index > ibmvmc.max_hmc_index) {
1621 dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
1622 hmc_index);
1623 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
1624 hmc_session, hmc_index, buffer_id);
1625 return -1;
1626 }
1627
1628 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1629 dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
1630 buffer_id);
1631 ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
1632 hmc_session, hmc_index, buffer_id);
1633 return -1;
1634 }
1635
1636 hmc = &hmcs[hmc_index];
1637 spin_lock_irqsave(&hmc->lock, flags);
1638
1639 if (hmc->state == ibmhmc_state_free) {
1640 dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
1641 hmc->state);
1642
1643 spin_unlock_irqrestore(&hmc->lock, flags);
1644 return -1;
1645 }
1646
1647 buffer = &hmc->buffer[buffer_id];
1648
1649 if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
1650 dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
1651 buffer->valid, buffer->owner);
1652 spin_unlock_irqrestore(&hmc->lock, flags);
1653 return -1;
1654 }
1655
1656
1657 rc = h_copy_rdma(msg_len,
1658 adapter->riobn,
1659 buffer->dma_addr_remote,
1660 adapter->liobn,
1661 buffer->dma_addr_local);
1662
1663 dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
1664 (unsigned int)msg_len, (unsigned int)buffer_id,
1665 (unsigned int)hmc->queue_head, (unsigned int)hmc_index);
1666 buffer->msg_len = msg_len;
1667 buffer->free = 0;
1668 buffer->owner = VMC_BUF_OWNER_ALPHA;
1669
1670 if (rc) {
1671 dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
1672 rc);
1673 spin_unlock_irqrestore(&hmc->lock, flags);
1674 return -1;
1675 }
1676
1677
1678 hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
1679 hmc->queue_head++;
1680 if (hmc->queue_head == ibmvmc_max_buf_pool_size)
1681 hmc->queue_head = 0;
1682
1683 if (hmc->queue_head == hmc->queue_tail)
1684 dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
1685
1686 spin_unlock_irqrestore(&hmc->lock, flags);
1687
1688 wake_up_interruptible(&ibmvmc_read_wait);
1689
1690 return 0;
1691 }
1692
1693
1694
1695
1696
1697
1698
1699
1700 static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
1701 struct ibmvmc_crq_msg *crqp)
1702 {
1703 struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
1704
1705 if ((be16_to_cpu(crq->version) >> 8) !=
1706 (IBMVMC_PROTOCOL_VERSION >> 8)) {
1707 dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
1708 be16_to_cpu(crq->version),
1709 IBMVMC_PROTOCOL_VERSION);
1710 ibmvmc.state = ibmvmc_state_failed;
1711 return;
1712 }
1713
1714 ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
1715 ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
1716 be16_to_cpu(crq->pool_size));
1717 ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
1718 ibmvmc.state = ibmvmc_state_ready;
1719
1720 dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
1721 ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
1722 ibmvmc.max_hmc_index);
1723 }
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735 static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
1736 struct ibmvmc_crq_msg *crq)
1737 {
1738 unsigned char hmc_index;
1739
1740 hmc_index = crq->hmc_index;
1741
1742 if (crq->hmc_session == 0)
1743 return 0;
1744
1745 if (hmc_index > ibmvmc.max_hmc_index)
1746 return -1;
1747
1748 if (hmcs[hmc_index].session != crq->hmc_session) {
1749 dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
1750 hmcs[hmc_index].session, crq->hmc_session);
1751 return -1;
1752 }
1753
1754 return 0;
1755 }
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767 static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
1768 {
1769 int i;
1770
1771 if (ibmvmc.state != ibmvmc_state_sched_reset) {
1772 dev_info(adapter->dev, "*** Reset to initial state.\n");
1773 for (i = 0; i < ibmvmc_max_hmcs; i++)
1774 ibmvmc_return_hmc(&hmcs[i], xport_event);
1775
1776 if (xport_event) {
1777
1778
1779
1780
1781 ibmvmc.state = ibmvmc_state_crqinit;
1782 } else {
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 ibmvmc.state = ibmvmc_state_sched_reset;
1793 dev_dbg(adapter->dev, "Device reset scheduled");
1794 wake_up_interruptible(&adapter->reset_wait_queue);
1795 }
1796 }
1797 }
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807 static int ibmvmc_reset_task(void *data)
1808 {
1809 struct crq_server_adapter *adapter = data;
1810 int rc;
1811
1812 set_user_nice(current, -20);
1813
1814 while (!kthread_should_stop()) {
1815 wait_event_interruptible(adapter->reset_wait_queue,
1816 (ibmvmc.state == ibmvmc_state_sched_reset) ||
1817 kthread_should_stop());
1818
1819 if (kthread_should_stop())
1820 break;
1821
1822 dev_dbg(adapter->dev, "CRQ resetting in process context");
1823 tasklet_disable(&adapter->work_task);
1824
1825 rc = ibmvmc_reset_crq_queue(adapter);
1826
1827 if (rc != H_SUCCESS && rc != H_RESOURCE) {
1828 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
1829 rc);
1830 ibmvmc.state = ibmvmc_state_failed;
1831 } else {
1832 ibmvmc.state = ibmvmc_state_crqinit;
1833
1834 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
1835 != 0 && rc != H_RESOURCE)
1836 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
1837 }
1838
1839 vio_enable_interrupts(to_vio_dev(adapter->dev));
1840 tasklet_enable(&adapter->work_task);
1841 }
1842
1843 return 0;
1844 }
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856 static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
1857 struct crq_server_adapter *adapter)
1858 {
1859 unsigned char hmc_index;
1860 unsigned short buffer_id;
1861
1862 hmc_index = crq->hmc_index;
1863 if (hmc_index > ibmvmc.max_hmc_index) {
1864
1865 ibmvmc_reset(adapter, false);
1866 return;
1867 }
1868
1869 if (crq->status) {
1870 dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
1871 crq->status);
1872 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1873 return;
1874 }
1875
1876 if (hmcs[hmc_index].state == ibmhmc_state_opening) {
1877 buffer_id = be16_to_cpu(crq->var2.buffer_id);
1878 if (buffer_id >= ibmvmc.max_buffer_pool_size) {
1879 dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
1880 buffer_id);
1881 hmcs[hmc_index].state = ibmhmc_state_failed;
1882 } else {
1883 ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
1884 &hmcs[hmc_index].buffer[buffer_id]);
1885 hmcs[hmc_index].state = ibmhmc_state_ready;
1886 dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
1887 }
1888 } else {
1889 dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
1890 hmcs[hmc_index].state);
1891 }
1892 }
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906 static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
1907 struct crq_server_adapter *adapter)
1908 {
1909 unsigned char hmc_index;
1910
1911 hmc_index = crq->hmc_index;
1912 if (hmc_index > ibmvmc.max_hmc_index) {
1913 ibmvmc_reset(adapter, false);
1914 return;
1915 }
1916
1917 if (crq->status) {
1918 dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
1919 crq->status);
1920 ibmvmc_reset(adapter, false);
1921 return;
1922 }
1923
1924 ibmvmc_return_hmc(&hmcs[hmc_index], false);
1925 }
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
1937 struct ibmvmc_crq_msg *crq)
1938 {
1939 switch (crq->type) {
1940 case VMC_MSG_CAP_RESP:
1941 dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
1942 crq->type);
1943 if (ibmvmc.state == ibmvmc_state_capabilities)
1944 ibmvmc_process_capabilities(adapter, crq);
1945 else
1946 dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
1947 ibmvmc.state);
1948 break;
1949 case VMC_MSG_OPEN_RESP:
1950 dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
1951 crq->type);
1952 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1953 ibmvmc_process_open_resp(crq, adapter);
1954 break;
1955 case VMC_MSG_ADD_BUF:
1956 dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
1957 crq->type);
1958 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1959 ibmvmc_add_buffer(adapter, crq);
1960 break;
1961 case VMC_MSG_REM_BUF:
1962 dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
1963 crq->type);
1964 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1965 ibmvmc_rem_buffer(adapter, crq);
1966 break;
1967 case VMC_MSG_SIGNAL:
1968 dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
1969 crq->type);
1970 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1971 ibmvmc_recv_msg(adapter, crq);
1972 break;
1973 case VMC_MSG_CLOSE_RESP:
1974 dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
1975 crq->type);
1976 if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
1977 ibmvmc_process_close_resp(crq, adapter);
1978 break;
1979 case VMC_MSG_CAP:
1980 case VMC_MSG_OPEN:
1981 case VMC_MSG_CLOSE:
1982 case VMC_MSG_ADD_BUF_RESP:
1983 case VMC_MSG_REM_BUF_RESP:
1984 dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
1985 crq->type);
1986 break;
1987 default:
1988 dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
1989 crq->type);
1990 break;
1991 }
1992 }
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004 static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
2005 struct crq_server_adapter *adapter)
2006 {
2007 switch (crq->type) {
2008 case 0x01:
2009 dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
2010 ibmvmc.state);
2011 if (ibmvmc.state == ibmvmc_state_crqinit) {
2012
2013 if (ibmvmc_send_crq(adapter, 0xC002000000000000,
2014 0) == 0)
2015 ibmvmc_send_capabilities(adapter);
2016 else
2017 dev_err(adapter->dev, " Unable to send init rsp\n");
2018 } else {
2019 dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
2020 ibmvmc.state, ibmvmc.max_mtu);
2021 }
2022
2023 break;
2024 case 0x02:
2025 dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
2026 ibmvmc.state);
2027 if (ibmvmc.state == ibmvmc_state_crqinit)
2028 ibmvmc_send_capabilities(adapter);
2029 break;
2030 default:
2031 dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
2032 (unsigned long)crq->type);
2033 }
2034 }
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046 static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
2047 struct crq_server_adapter *adapter)
2048 {
2049 switch (crq->valid) {
2050 case 0xC0:
2051 ibmvmc_handle_crq_init(crq, adapter);
2052 break;
2053 case 0xFF:
2054 dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
2055 ibmvmc_reset(adapter, true);
2056 break;
2057 case 0x80:
2058 ibmvmc_crq_process(adapter, crq);
2059 break;
2060 default:
2061 dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
2062 crq->valid);
2063 break;
2064 }
2065 }
2066
2067 static void ibmvmc_task(unsigned long data)
2068 {
2069 struct crq_server_adapter *adapter =
2070 (struct crq_server_adapter *)data;
2071 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2072 struct ibmvmc_crq_msg *crq;
2073 int done = 0;
2074
2075 while (!done) {
2076
2077 while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
2078 ibmvmc_handle_crq(crq, adapter);
2079 crq->valid = 0x00;
2080
2081
2082
2083 if (ibmvmc.state == ibmvmc_state_sched_reset)
2084 return;
2085 }
2086
2087 vio_enable_interrupts(vdev);
2088 crq = crq_queue_next_crq(&adapter->queue);
2089 if (crq) {
2090 vio_disable_interrupts(vdev);
2091 ibmvmc_handle_crq(crq, adapter);
2092 crq->valid = 0x00;
2093
2094
2095
2096 if (ibmvmc.state == ibmvmc_state_sched_reset)
2097 return;
2098 } else {
2099 done = 1;
2100 }
2101 }
2102 }
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113 static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
2114 {
2115 struct vio_dev *vdev = to_vio_dev(adapter->dev);
2116 struct crq_queue *queue = &adapter->queue;
2117 int rc = 0;
2118 int retrc = 0;
2119
2120 queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
2121
2122 if (!queue->msgs)
2123 goto malloc_failed;
2124
2125 queue->size = PAGE_SIZE / sizeof(*queue->msgs);
2126
2127 queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
2128 queue->size * sizeof(*queue->msgs),
2129 DMA_BIDIRECTIONAL);
2130
2131 if (dma_mapping_error(adapter->dev, queue->msg_token))
2132 goto map_failed;
2133
2134 retrc = plpar_hcall_norets(H_REG_CRQ,
2135 vdev->unit_address,
2136 queue->msg_token, PAGE_SIZE);
2137 rc = retrc;
2138
2139 if (rc == H_RESOURCE)
2140 rc = ibmvmc_reset_crq_queue(adapter);
2141
2142 if (rc == 2) {
2143 dev_warn(adapter->dev, "Partner adapter not ready\n");
2144 retrc = 0;
2145 } else if (rc != 0) {
2146 dev_err(adapter->dev, "Error %d opening adapter\n", rc);
2147 goto reg_crq_failed;
2148 }
2149
2150 queue->cur = 0;
2151 spin_lock_init(&queue->lock);
2152
2153 tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
2154
2155 if (request_irq(vdev->irq,
2156 ibmvmc_handle_event,
2157 0, "ibmvmc", (void *)adapter) != 0) {
2158 dev_err(adapter->dev, "couldn't register irq 0x%x\n",
2159 vdev->irq);
2160 goto req_irq_failed;
2161 }
2162
2163 rc = vio_enable_interrupts(vdev);
2164 if (rc != 0) {
2165 dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
2166 goto req_irq_failed;
2167 }
2168
2169 return retrc;
2170
2171 req_irq_failed:
2172
2173
2174
2175 tasklet_kill(&adapter->work_task);
2176 h_free_crq(vdev->unit_address);
2177 reg_crq_failed:
2178 dma_unmap_single(adapter->dev,
2179 queue->msg_token,
2180 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
2181 map_failed:
2182 free_page((unsigned long)queue->msgs);
2183 malloc_failed:
2184 return -ENOMEM;
2185 }
2186
2187
2188 static int read_dma_window(struct vio_dev *vdev,
2189 struct crq_server_adapter *adapter)
2190 {
2191 const __be32 *dma_window;
2192 const __be32 *prop;
2193
2194
2195
2196
2197
2198 dma_window =
2199 (const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
2200 NULL);
2201 if (!dma_window) {
2202 dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
2203 return -1;
2204 }
2205
2206 adapter->liobn = be32_to_cpu(*dma_window);
2207 dma_window++;
2208
2209 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
2210 NULL);
2211 if (!prop) {
2212 dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
2213 dma_window++;
2214 } else {
2215 dma_window += be32_to_cpu(*prop);
2216 }
2217
2218 prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
2219 NULL);
2220 if (!prop) {
2221 dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
2222 dma_window++;
2223 } else {
2224 dma_window += be32_to_cpu(*prop);
2225 }
2226
2227
2228 adapter->riobn = be32_to_cpu(*dma_window);
2229
2230 return 0;
2231 }
2232
2233 static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2234 {
2235 struct crq_server_adapter *adapter = &ibmvmc_adapter;
2236 int rc;
2237
2238 dev_set_drvdata(&vdev->dev, NULL);
2239 memset(adapter, 0, sizeof(*adapter));
2240 adapter->dev = &vdev->dev;
2241
2242 dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
2243
2244 rc = read_dma_window(vdev, adapter);
2245 if (rc != 0) {
2246 ibmvmc.state = ibmvmc_state_failed;
2247 return -1;
2248 }
2249
2250 dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
2251 adapter->liobn, adapter->riobn);
2252
2253 init_waitqueue_head(&adapter->reset_wait_queue);
2254 adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
2255 if (IS_ERR(adapter->reset_task)) {
2256 dev_err(adapter->dev, "Failed to start reset thread\n");
2257 ibmvmc.state = ibmvmc_state_failed;
2258 rc = PTR_ERR(adapter->reset_task);
2259 adapter->reset_task = NULL;
2260 return rc;
2261 }
2262
2263 rc = ibmvmc_init_crq_queue(adapter);
2264 if (rc != 0 && rc != H_RESOURCE) {
2265 dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
2266 rc);
2267 ibmvmc.state = ibmvmc_state_failed;
2268 goto crq_failed;
2269 }
2270
2271 ibmvmc.state = ibmvmc_state_crqinit;
2272
2273
2274
2275
2276
2277 if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
2278 rc != H_RESOURCE)
2279 dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
2280
2281 dev_set_drvdata(&vdev->dev, adapter);
2282
2283 return 0;
2284
2285 crq_failed:
2286 kthread_stop(adapter->reset_task);
2287 adapter->reset_task = NULL;
2288 return -EPERM;
2289 }
2290
2291 static int ibmvmc_remove(struct vio_dev *vdev)
2292 {
2293 struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
2294
2295 dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
2296 vdev->unit_address);
2297 ibmvmc_release_crq_queue(adapter);
2298
2299 return 0;
2300 }
2301
2302 static struct vio_device_id ibmvmc_device_table[] = {
2303 { "ibm,vmc", "IBM,vmc" },
2304 { "", "" }
2305 };
2306 MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
2307
2308 static struct vio_driver ibmvmc_driver = {
2309 .name = ibmvmc_driver_name,
2310 .id_table = ibmvmc_device_table,
2311 .probe = ibmvmc_probe,
2312 .remove = ibmvmc_remove,
2313 };
2314
2315 static void __init ibmvmc_scrub_module_parms(void)
2316 {
2317 if (ibmvmc_max_mtu > MAX_MTU) {
2318 pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
2319 ibmvmc_max_mtu = MAX_MTU;
2320 } else if (ibmvmc_max_mtu < MIN_MTU) {
2321 pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
2322 ibmvmc_max_mtu = MIN_MTU;
2323 }
2324
2325 if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
2326 pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
2327 MAX_BUF_POOL_SIZE);
2328 ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
2329 } else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
2330 pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
2331 MIN_BUF_POOL_SIZE);
2332 ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
2333 }
2334
2335 if (ibmvmc_max_hmcs > MAX_HMCS) {
2336 pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
2337 ibmvmc_max_hmcs = MAX_HMCS;
2338 } else if (ibmvmc_max_hmcs < MIN_HMCS) {
2339 pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
2340 ibmvmc_max_hmcs = MIN_HMCS;
2341 }
2342 }
2343
2344 static struct miscdevice ibmvmc_miscdev = {
2345 .name = ibmvmc_driver_name,
2346 .minor = MISC_DYNAMIC_MINOR,
2347 .fops = &ibmvmc_fops,
2348 };
2349
2350 static int __init ibmvmc_module_init(void)
2351 {
2352 int rc, i, j;
2353
2354 ibmvmc.state = ibmvmc_state_initial;
2355 pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
2356
2357 rc = misc_register(&ibmvmc_miscdev);
2358 if (rc) {
2359 pr_err("ibmvmc: misc registration failed\n");
2360 goto misc_register_failed;
2361 }
2362 pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
2363 ibmvmc_miscdev.minor);
2364
2365
2366 memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
2367 for (i = 0; i < MAX_HMCS; i++) {
2368 spin_lock_init(&hmcs[i].lock);
2369 hmcs[i].state = ibmhmc_state_free;
2370 for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
2371 hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
2372 }
2373
2374
2375 ibmvmc_scrub_module_parms();
2376
2377
2378
2379
2380
2381 ibmvmc.max_mtu = ibmvmc_max_mtu;
2382 ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
2383 ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
2384
2385 rc = vio_register_driver(&ibmvmc_driver);
2386
2387 if (rc) {
2388 pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
2389 goto vio_reg_failed;
2390 }
2391
2392 return 0;
2393
2394 vio_reg_failed:
2395 misc_deregister(&ibmvmc_miscdev);
2396 misc_register_failed:
2397 return rc;
2398 }
2399
2400 static void __exit ibmvmc_module_exit(void)
2401 {
2402 pr_info("ibmvmc: module exit\n");
2403 vio_unregister_driver(&ibmvmc_driver);
2404 misc_deregister(&ibmvmc_miscdev);
2405 }
2406
2407 module_init(ibmvmc_module_init);
2408 module_exit(ibmvmc_module_exit);
2409
2410 module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
2411 int, 0644);
2412 MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
2413 module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
2414 MODULE_PARM_DESC(max_hmcs, "Max HMCs");
2415 module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
2416 MODULE_PARM_DESC(max_mtu, "Max MTU");
2417
2418 MODULE_AUTHOR("Steven Royer <seroyer@linux.vnet.ibm.com>");
2419 MODULE_DESCRIPTION("IBM VMC");
2420 MODULE_VERSION(IBMVMC_DRIVER_VERSION);
2421 MODULE_LICENSE("GPL v2");