This source file includes following definitions.
- ehea_dump
- ehea_schedule_port_reset
- ehea_update_firmware_handles
- ehea_update_bcmc_registrations
- ehea_get_stats64
- ehea_update_stats
- ehea_refill_rq1
- ehea_init_fill_rq1
- ehea_refill_rq_def
- ehea_refill_rq2
- ehea_refill_rq3
- ehea_check_cqe
- ehea_fill_skb
- get_skb_by_index
- get_skb_by_index_ll
- ehea_treat_poll_error
- ehea_proc_rwqes
- reset_sq_restart_flag
- check_sqs
- ehea_proc_cqes
- ehea_poll
- ehea_recv_irq_handler
- ehea_qp_aff_irq_handler
- ehea_get_port
- ehea_sense_port_attr
- ehea_set_portspeed
- ehea_parse_eqe
- ehea_neq_tasklet
- ehea_interrupt_neq
- ehea_fill_port_res
- ehea_reg_interrupts
- ehea_free_interrupts
- ehea_configure_port
- ehea_gen_smrs
- ehea_rem_smrs
- ehea_init_q_skba
- ehea_init_port_res
- ehea_clean_portres
- write_swqe2_immediate
- write_swqe2_data
- ehea_broadcast_reg_helper
- ehea_set_mac_addr
- ehea_promiscuous_error
- ehea_promiscuous
- ehea_multicast_reg_helper
- ehea_drop_multicast_list
- ehea_allmulti
- ehea_add_multicast_entry
- ehea_set_multicast_list
- xmit_common
- ehea_xmit2
- ehea_xmit3
- ehea_start_xmit
- ehea_vlan_rx_add_vid
- ehea_vlan_rx_kill_vid
- ehea_activate_qp
- ehea_port_res_setup
- ehea_clean_all_portres
- ehea_remove_adapter_mr
- ehea_add_adapter_mr
- ehea_up
- port_napi_disable
- port_napi_enable
- ehea_open
- ehea_down
- ehea_stop
- ehea_purge_sq
- ehea_flush_sq
- ehea_stop_qps
- ehea_update_rqs
- ehea_restart_qps
- ehea_reset_port
- ehea_rereg_mrs
- ehea_tx_watchdog
- ehea_sense_adapter_attr
- ehea_get_jumboframe_status
- ehea_show_port_id
- logical_port_release
- ehea_register_port
- ehea_unregister_port
- ehea_setup_single_port
- ehea_shutdown_single_port
- ehea_setup_ports
- ehea_get_eth_dn
- ehea_probe_port
- ehea_remove_port
- ehea_create_device_sysfs
- ehea_remove_device_sysfs
- ehea_reboot_notifier
- ehea_mem_notifier
- ehea_crash_handler
- ehea_register_memory_hooks
- ehea_unregister_memory_hooks
- ehea_probe_adapter
- ehea_remove
- check_module_parm
- capabilities_show
- ehea_module_init
- ehea_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/device.h>
18 #include <linux/in.h>
19 #include <linux/ip.h>
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/if.h>
23 #include <linux/list.h>
24 #include <linux/slab.h>
25 #include <linux/if_ether.h>
26 #include <linux/notifier.h>
27 #include <linux/reboot.h>
28 #include <linux/memory.h>
29 #include <asm/kexec.h>
30 #include <linux/mutex.h>
31 #include <linux/prefetch.h>
32
33 #include <net/ip.h>
34
35 #include "ehea.h"
36 #include "ehea_qmr.h"
37 #include "ehea_phyp.h"
38
39
40 MODULE_LICENSE("GPL");
41 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
42 MODULE_DESCRIPTION("IBM eServer HEA Driver");
43 MODULE_VERSION(DRV_VERSION);
44
45
46 static int msg_level = -1;
47 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
48 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
49 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
50 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
51 static int use_mcs = 1;
52 static int prop_carrier_state;
53
54 module_param(msg_level, int, 0);
55 module_param(rq1_entries, int, 0);
56 module_param(rq2_entries, int, 0);
57 module_param(rq3_entries, int, 0);
58 module_param(sq_entries, int, 0);
59 module_param(prop_carrier_state, int, 0);
60 module_param(use_mcs, int, 0);
61
62 MODULE_PARM_DESC(msg_level, "msg_level");
63 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
64 "port to stack. 1:yes, 0:no. Default = 0 ");
65 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
66 "[2^x - 1], x = [7..14]. Default = "
67 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
68 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
69 "[2^x - 1], x = [7..14]. Default = "
70 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
71 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
72 "[2^x - 1], x = [7..14]. Default = "
73 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
74 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
75 "[2^x - 1], x = [7..14]. Default = "
76 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
77 MODULE_PARM_DESC(use_mcs, " Multiple receive queues, 1: enable, 0: disable, "
78 "Default = 1");
79
80 static int port_name_cnt;
81 static LIST_HEAD(adapter_list);
82 static unsigned long ehea_driver_flags;
83 static DEFINE_MUTEX(dlpar_mem_lock);
84 static struct ehea_fw_handle_array ehea_fw_handles;
85 static struct ehea_bcmc_reg_array ehea_bcmc_regs;
86
87
88 static int ehea_probe_adapter(struct platform_device *dev);
89
90 static int ehea_remove(struct platform_device *dev);
91
92 static const struct of_device_id ehea_module_device_table[] = {
93 {
94 .name = "lhea",
95 .compatible = "IBM,lhea",
96 },
97 {
98 .type = "network",
99 .compatible = "IBM,lhea-ethernet",
100 },
101 {},
102 };
103 MODULE_DEVICE_TABLE(of, ehea_module_device_table);
104
105 static const struct of_device_id ehea_device_table[] = {
106 {
107 .name = "lhea",
108 .compatible = "IBM,lhea",
109 },
110 {},
111 };
112
113 static struct platform_driver ehea_driver = {
114 .driver = {
115 .name = "ehea",
116 .owner = THIS_MODULE,
117 .of_match_table = ehea_device_table,
118 },
119 .probe = ehea_probe_adapter,
120 .remove = ehea_remove,
121 };
122
123 void ehea_dump(void *adr, int len, char *msg)
124 {
125 int x;
126 unsigned char *deb = adr;
127 for (x = 0; x < len; x += 16) {
128 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
129 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
130 deb += 16;
131 }
132 }
133
134 static void ehea_schedule_port_reset(struct ehea_port *port)
135 {
136 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
137 schedule_work(&port->reset_task);
138 }
139
140 static void ehea_update_firmware_handles(void)
141 {
142 struct ehea_fw_handle_entry *arr = NULL;
143 struct ehea_adapter *adapter;
144 int num_adapters = 0;
145 int num_ports = 0;
146 int num_portres = 0;
147 int i = 0;
148 int num_fw_handles, k, l;
149
150
151 mutex_lock(&ehea_fw_handles.lock);
152
153 list_for_each_entry(adapter, &adapter_list, list) {
154 num_adapters++;
155
156 for (k = 0; k < EHEA_MAX_PORTS; k++) {
157 struct ehea_port *port = adapter->port[k];
158
159 if (!port || (port->state != EHEA_PORT_UP))
160 continue;
161
162 num_ports++;
163 num_portres += port->num_def_qps;
164 }
165 }
166
167 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
168 num_ports * EHEA_NUM_PORT_FW_HANDLES +
169 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
170
171 if (num_fw_handles) {
172 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
173 if (!arr)
174 goto out;
175 } else
176 goto out_update;
177
178 list_for_each_entry(adapter, &adapter_list, list) {
179 if (num_adapters == 0)
180 break;
181
182 for (k = 0; k < EHEA_MAX_PORTS; k++) {
183 struct ehea_port *port = adapter->port[k];
184
185 if (!port || (port->state != EHEA_PORT_UP) ||
186 (num_ports == 0))
187 continue;
188
189 for (l = 0; l < port->num_def_qps; l++) {
190 struct ehea_port_res *pr = &port->port_res[l];
191
192 arr[i].adh = adapter->handle;
193 arr[i++].fwh = pr->qp->fw_handle;
194 arr[i].adh = adapter->handle;
195 arr[i++].fwh = pr->send_cq->fw_handle;
196 arr[i].adh = adapter->handle;
197 arr[i++].fwh = pr->recv_cq->fw_handle;
198 arr[i].adh = adapter->handle;
199 arr[i++].fwh = pr->eq->fw_handle;
200 arr[i].adh = adapter->handle;
201 arr[i++].fwh = pr->send_mr.handle;
202 arr[i].adh = adapter->handle;
203 arr[i++].fwh = pr->recv_mr.handle;
204 }
205 arr[i].adh = adapter->handle;
206 arr[i++].fwh = port->qp_eq->fw_handle;
207 num_ports--;
208 }
209
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = adapter->neq->fw_handle;
212
213 if (adapter->mr.handle) {
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = adapter->mr.handle;
216 }
217 num_adapters--;
218 }
219
220 out_update:
221 kfree(ehea_fw_handles.arr);
222 ehea_fw_handles.arr = arr;
223 ehea_fw_handles.num_entries = i;
224 out:
225 mutex_unlock(&ehea_fw_handles.lock);
226 }
227
228 static void ehea_update_bcmc_registrations(void)
229 {
230 unsigned long flags;
231 struct ehea_bcmc_reg_entry *arr = NULL;
232 struct ehea_adapter *adapter;
233 struct ehea_mc_list *mc_entry;
234 int num_registrations = 0;
235 int i = 0;
236 int k;
237
238 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
239
240
241 list_for_each_entry(adapter, &adapter_list, list)
242 for (k = 0; k < EHEA_MAX_PORTS; k++) {
243 struct ehea_port *port = adapter->port[k];
244
245 if (!port || (port->state != EHEA_PORT_UP))
246 continue;
247
248 num_registrations += 2;
249
250 list_for_each_entry(mc_entry, &port->mc_list->list,list)
251 num_registrations += 2;
252 }
253
254 if (num_registrations) {
255 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
256 if (!arr)
257 goto out;
258 } else
259 goto out_update;
260
261 list_for_each_entry(adapter, &adapter_list, list) {
262 for (k = 0; k < EHEA_MAX_PORTS; k++) {
263 struct ehea_port *port = adapter->port[k];
264
265 if (!port || (port->state != EHEA_PORT_UP))
266 continue;
267
268 if (num_registrations == 0)
269 goto out_update;
270
271 arr[i].adh = adapter->handle;
272 arr[i].port_id = port->logical_port_id;
273 arr[i].reg_type = EHEA_BCMC_BROADCAST |
274 EHEA_BCMC_UNTAGGED;
275 arr[i++].macaddr = port->mac_addr;
276
277 arr[i].adh = adapter->handle;
278 arr[i].port_id = port->logical_port_id;
279 arr[i].reg_type = EHEA_BCMC_BROADCAST |
280 EHEA_BCMC_VLANID_ALL;
281 arr[i++].macaddr = port->mac_addr;
282 num_registrations -= 2;
283
284 list_for_each_entry(mc_entry,
285 &port->mc_list->list, list) {
286 if (num_registrations == 0)
287 goto out_update;
288
289 arr[i].adh = adapter->handle;
290 arr[i].port_id = port->logical_port_id;
291 arr[i].reg_type = EHEA_BCMC_MULTICAST |
292 EHEA_BCMC_UNTAGGED;
293 if (mc_entry->macaddr == 0)
294 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
295 arr[i++].macaddr = mc_entry->macaddr;
296
297 arr[i].adh = adapter->handle;
298 arr[i].port_id = port->logical_port_id;
299 arr[i].reg_type = EHEA_BCMC_MULTICAST |
300 EHEA_BCMC_VLANID_ALL;
301 if (mc_entry->macaddr == 0)
302 arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL;
303 arr[i++].macaddr = mc_entry->macaddr;
304 num_registrations -= 2;
305 }
306 }
307 }
308
309 out_update:
310 kfree(ehea_bcmc_regs.arr);
311 ehea_bcmc_regs.arr = arr;
312 ehea_bcmc_regs.num_entries = i;
313 out:
314 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
315 }
316
317 static void ehea_get_stats64(struct net_device *dev,
318 struct rtnl_link_stats64 *stats)
319 {
320 struct ehea_port *port = netdev_priv(dev);
321 u64 rx_packets = 0, tx_packets = 0, rx_bytes = 0, tx_bytes = 0;
322 int i;
323
324 for (i = 0; i < port->num_def_qps; i++) {
325 rx_packets += port->port_res[i].rx_packets;
326 rx_bytes += port->port_res[i].rx_bytes;
327 }
328
329 for (i = 0; i < port->num_def_qps; i++) {
330 tx_packets += port->port_res[i].tx_packets;
331 tx_bytes += port->port_res[i].tx_bytes;
332 }
333
334 stats->tx_packets = tx_packets;
335 stats->rx_bytes = rx_bytes;
336 stats->tx_bytes = tx_bytes;
337 stats->rx_packets = rx_packets;
338
339 stats->multicast = port->stats.multicast;
340 stats->rx_errors = port->stats.rx_errors;
341 }
342
343 static void ehea_update_stats(struct work_struct *work)
344 {
345 struct ehea_port *port =
346 container_of(work, struct ehea_port, stats_work.work);
347 struct net_device *dev = port->netdev;
348 struct rtnl_link_stats64 *stats = &port->stats;
349 struct hcp_ehea_port_cb2 *cb2;
350 u64 hret;
351
352 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
353 if (!cb2) {
354 netdev_err(dev, "No mem for cb2. Some interface statistics were not updated\n");
355 goto resched;
356 }
357
358 hret = ehea_h_query_ehea_port(port->adapter->handle,
359 port->logical_port_id,
360 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
361 if (hret != H_SUCCESS) {
362 netdev_err(dev, "query_ehea_port failed\n");
363 goto out_herr;
364 }
365
366 if (netif_msg_hw(port))
367 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
368
369 stats->multicast = cb2->rxmcp;
370 stats->rx_errors = cb2->rxuerr;
371
372 out_herr:
373 free_page((unsigned long)cb2);
374 resched:
375 schedule_delayed_work(&port->stats_work,
376 round_jiffies_relative(msecs_to_jiffies(1000)));
377 }
378
379 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
380 {
381 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
382 struct net_device *dev = pr->port->netdev;
383 int max_index_mask = pr->rq1_skba.len - 1;
384 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
385 int adder = 0;
386 int i;
387
388 pr->rq1_skba.os_skbs = 0;
389
390 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
391 if (nr_of_wqes > 0)
392 pr->rq1_skba.index = index;
393 pr->rq1_skba.os_skbs = fill_wqes;
394 return;
395 }
396
397 for (i = 0; i < fill_wqes; i++) {
398 if (!skb_arr_rq1[index]) {
399 skb_arr_rq1[index] = netdev_alloc_skb(dev,
400 EHEA_L_PKT_SIZE);
401 if (!skb_arr_rq1[index]) {
402 pr->rq1_skba.os_skbs = fill_wqes - i;
403 break;
404 }
405 }
406 index--;
407 index &= max_index_mask;
408 adder++;
409 }
410
411 if (adder == 0)
412 return;
413
414
415 ehea_update_rq1a(pr->qp, adder);
416 }
417
418 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
419 {
420 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
421 struct net_device *dev = pr->port->netdev;
422 int i;
423
424 if (nr_rq1a > pr->rq1_skba.len) {
425 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
426 return;
427 }
428
429 for (i = 0; i < nr_rq1a; i++) {
430 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
431 if (!skb_arr_rq1[i])
432 break;
433 }
434
435 ehea_update_rq1a(pr->qp, i - 1);
436 }
437
438 static int ehea_refill_rq_def(struct ehea_port_res *pr,
439 struct ehea_q_skb_arr *q_skba, int rq_nr,
440 int num_wqes, int wqe_type, int packet_size)
441 {
442 struct net_device *dev = pr->port->netdev;
443 struct ehea_qp *qp = pr->qp;
444 struct sk_buff **skb_arr = q_skba->arr;
445 struct ehea_rwqe *rwqe;
446 int i, index, max_index_mask, fill_wqes;
447 int adder = 0;
448 int ret = 0;
449
450 fill_wqes = q_skba->os_skbs + num_wqes;
451 q_skba->os_skbs = 0;
452
453 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
454 q_skba->os_skbs = fill_wqes;
455 return ret;
456 }
457
458 index = q_skba->index;
459 max_index_mask = q_skba->len - 1;
460 for (i = 0; i < fill_wqes; i++) {
461 u64 tmp_addr;
462 struct sk_buff *skb;
463
464 skb = netdev_alloc_skb_ip_align(dev, packet_size);
465 if (!skb) {
466 q_skba->os_skbs = fill_wqes - i;
467 if (q_skba->os_skbs == q_skba->len - 2) {
468 netdev_info(pr->port->netdev,
469 "rq%i ran dry - no mem for skb\n",
470 rq_nr);
471 ret = -ENOMEM;
472 }
473 break;
474 }
475
476 skb_arr[index] = skb;
477 tmp_addr = ehea_map_vaddr(skb->data);
478 if (tmp_addr == -1) {
479 dev_consume_skb_any(skb);
480 q_skba->os_skbs = fill_wqes - i;
481 ret = 0;
482 break;
483 }
484
485 rwqe = ehea_get_next_rwqe(qp, rq_nr);
486 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
487 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
488 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
489 rwqe->sg_list[0].vaddr = tmp_addr;
490 rwqe->sg_list[0].len = packet_size;
491 rwqe->data_segments = 1;
492
493 index++;
494 index &= max_index_mask;
495 adder++;
496 }
497
498 q_skba->index = index;
499 if (adder == 0)
500 goto out;
501
502
503 iosync();
504 if (rq_nr == 2)
505 ehea_update_rq2a(pr->qp, adder);
506 else
507 ehea_update_rq3a(pr->qp, adder);
508 out:
509 return ret;
510 }
511
512
513 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
514 {
515 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
516 nr_of_wqes, EHEA_RWQE2_TYPE,
517 EHEA_RQ2_PKT_SIZE);
518 }
519
520
521 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
522 {
523 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
524 nr_of_wqes, EHEA_RWQE3_TYPE,
525 EHEA_MAX_PACKET_SIZE);
526 }
527
528 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
529 {
530 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
531 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
532 return 0;
533 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
534 (cqe->header_length == 0))
535 return 0;
536 return -EINVAL;
537 }
538
539 static inline void ehea_fill_skb(struct net_device *dev,
540 struct sk_buff *skb, struct ehea_cqe *cqe,
541 struct ehea_port_res *pr)
542 {
543 int length = cqe->num_bytes_transfered - 4;
544
545 skb_put(skb, length);
546 skb->protocol = eth_type_trans(skb, dev);
547
548
549
550 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
551 skb->ip_summed = CHECKSUM_COMPLETE;
552 skb->csum = csum_unfold(~cqe->inet_checksum_value);
553 } else
554 skb->ip_summed = CHECKSUM_UNNECESSARY;
555
556 skb_record_rx_queue(skb, pr - &pr->port->port_res[0]);
557 }
558
559 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
560 int arr_len,
561 struct ehea_cqe *cqe)
562 {
563 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
564 struct sk_buff *skb;
565 void *pref;
566 int x;
567
568 x = skb_index + 1;
569 x &= (arr_len - 1);
570
571 pref = skb_array[x];
572 if (pref) {
573 prefetchw(pref);
574 prefetchw(pref + EHEA_CACHE_LINE);
575
576 pref = (skb_array[x]->data);
577 prefetch(pref);
578 prefetch(pref + EHEA_CACHE_LINE);
579 prefetch(pref + EHEA_CACHE_LINE * 2);
580 prefetch(pref + EHEA_CACHE_LINE * 3);
581 }
582
583 skb = skb_array[skb_index];
584 skb_array[skb_index] = NULL;
585 return skb;
586 }
587
588 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
589 int arr_len, int wqe_index)
590 {
591 struct sk_buff *skb;
592 void *pref;
593 int x;
594
595 x = wqe_index + 1;
596 x &= (arr_len - 1);
597
598 pref = skb_array[x];
599 if (pref) {
600 prefetchw(pref);
601 prefetchw(pref + EHEA_CACHE_LINE);
602
603 pref = (skb_array[x]->data);
604 prefetchw(pref);
605 prefetchw(pref + EHEA_CACHE_LINE);
606 }
607
608 skb = skb_array[wqe_index];
609 skb_array[wqe_index] = NULL;
610 return skb;
611 }
612
613 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
614 struct ehea_cqe *cqe, int *processed_rq2,
615 int *processed_rq3)
616 {
617 struct sk_buff *skb;
618
619 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
620 pr->p_stats.err_tcp_cksum++;
621 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
622 pr->p_stats.err_ip_cksum++;
623 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
624 pr->p_stats.err_frame_crc++;
625
626 if (rq == 2) {
627 *processed_rq2 += 1;
628 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
629 dev_kfree_skb(skb);
630 } else if (rq == 3) {
631 *processed_rq3 += 1;
632 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
633 dev_kfree_skb(skb);
634 }
635
636 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
637 if (netif_msg_rx_err(pr->port)) {
638 pr_err("Critical receive error for QP %d. Resetting port.\n",
639 pr->qp->init_attr.qp_nr);
640 ehea_dump(cqe, sizeof(*cqe), "CQE");
641 }
642 ehea_schedule_port_reset(pr->port);
643 return 1;
644 }
645
646 return 0;
647 }
648
649 static int ehea_proc_rwqes(struct net_device *dev,
650 struct ehea_port_res *pr,
651 int budget)
652 {
653 struct ehea_port *port = pr->port;
654 struct ehea_qp *qp = pr->qp;
655 struct ehea_cqe *cqe;
656 struct sk_buff *skb;
657 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
658 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
659 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
660 int skb_arr_rq1_len = pr->rq1_skba.len;
661 int skb_arr_rq2_len = pr->rq2_skba.len;
662 int skb_arr_rq3_len = pr->rq3_skba.len;
663 int processed, processed_rq1, processed_rq2, processed_rq3;
664 u64 processed_bytes = 0;
665 int wqe_index, last_wqe_index, rq, port_reset;
666
667 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
668 last_wqe_index = 0;
669
670 cqe = ehea_poll_rq1(qp, &wqe_index);
671 while ((processed < budget) && cqe) {
672 ehea_inc_rq1(qp);
673 processed_rq1++;
674 processed++;
675 if (netif_msg_rx_status(port))
676 ehea_dump(cqe, sizeof(*cqe), "CQE");
677
678 last_wqe_index = wqe_index;
679 rmb();
680 if (!ehea_check_cqe(cqe, &rq)) {
681 if (rq == 1) {
682
683 skb = get_skb_by_index_ll(skb_arr_rq1,
684 skb_arr_rq1_len,
685 wqe_index);
686 if (unlikely(!skb)) {
687 netif_info(port, rx_err, dev,
688 "LL rq1: skb=NULL\n");
689
690 skb = netdev_alloc_skb(dev,
691 EHEA_L_PKT_SIZE);
692 if (!skb)
693 break;
694 }
695 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
696 cqe->num_bytes_transfered - 4);
697 ehea_fill_skb(dev, skb, cqe, pr);
698 } else if (rq == 2) {
699
700 skb = get_skb_by_index(skb_arr_rq2,
701 skb_arr_rq2_len, cqe);
702 if (unlikely(!skb)) {
703 netif_err(port, rx_err, dev,
704 "rq2: skb=NULL\n");
705 break;
706 }
707 ehea_fill_skb(dev, skb, cqe, pr);
708 processed_rq2++;
709 } else {
710
711 skb = get_skb_by_index(skb_arr_rq3,
712 skb_arr_rq3_len, cqe);
713 if (unlikely(!skb)) {
714 netif_err(port, rx_err, dev,
715 "rq3: skb=NULL\n");
716 break;
717 }
718 ehea_fill_skb(dev, skb, cqe, pr);
719 processed_rq3++;
720 }
721
722 processed_bytes += skb->len;
723
724 if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
725 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
726 cqe->vlan_tag);
727
728 napi_gro_receive(&pr->napi, skb);
729 } else {
730 pr->p_stats.poll_receive_errors++;
731 port_reset = ehea_treat_poll_error(pr, rq, cqe,
732 &processed_rq2,
733 &processed_rq3);
734 if (port_reset)
735 break;
736 }
737 cqe = ehea_poll_rq1(qp, &wqe_index);
738 }
739
740 pr->rx_packets += processed;
741 pr->rx_bytes += processed_bytes;
742
743 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
744 ehea_refill_rq2(pr, processed_rq2);
745 ehea_refill_rq3(pr, processed_rq3);
746
747 return processed;
748 }
749
750 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
751
752 static void reset_sq_restart_flag(struct ehea_port *port)
753 {
754 int i;
755
756 for (i = 0; i < port->num_def_qps; i++) {
757 struct ehea_port_res *pr = &port->port_res[i];
758 pr->sq_restart_flag = 0;
759 }
760 wake_up(&port->restart_wq);
761 }
762
763 static void check_sqs(struct ehea_port *port)
764 {
765 struct ehea_swqe *swqe;
766 int swqe_index;
767 int i;
768
769 for (i = 0; i < port->num_def_qps; i++) {
770 struct ehea_port_res *pr = &port->port_res[i];
771 int ret;
772 swqe = ehea_get_swqe(pr->qp, &swqe_index);
773 memset(swqe, 0, SWQE_HEADER_SIZE);
774 atomic_dec(&pr->swqe_avail);
775
776 swqe->tx_control |= EHEA_SWQE_PURGE;
777 swqe->wr_id = SWQE_RESTART_CHECK;
778 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
779 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
780 swqe->immediate_data_length = 80;
781
782 ehea_post_swqe(pr->qp, swqe);
783
784 ret = wait_event_timeout(port->restart_wq,
785 pr->sq_restart_flag == 0,
786 msecs_to_jiffies(100));
787
788 if (!ret) {
789 pr_err("HW/SW queues out of sync\n");
790 ehea_schedule_port_reset(pr->port);
791 return;
792 }
793 }
794 }
795
796
797 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
798 {
799 struct sk_buff *skb;
800 struct ehea_cq *send_cq = pr->send_cq;
801 struct ehea_cqe *cqe;
802 int quota = my_quota;
803 int cqe_counter = 0;
804 int swqe_av = 0;
805 int index;
806 struct netdev_queue *txq = netdev_get_tx_queue(pr->port->netdev,
807 pr - &pr->port->port_res[0]);
808
809 cqe = ehea_poll_cq(send_cq);
810 while (cqe && (quota > 0)) {
811 ehea_inc_cq(send_cq);
812
813 cqe_counter++;
814 rmb();
815
816 if (cqe->wr_id == SWQE_RESTART_CHECK) {
817 pr->sq_restart_flag = 1;
818 swqe_av++;
819 break;
820 }
821
822 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
823 pr_err("Bad send completion status=0x%04X\n",
824 cqe->status);
825
826 if (netif_msg_tx_err(pr->port))
827 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
828
829 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
830 pr_err("Resetting port\n");
831 ehea_schedule_port_reset(pr->port);
832 break;
833 }
834 }
835
836 if (netif_msg_tx_done(pr->port))
837 ehea_dump(cqe, sizeof(*cqe), "CQE");
838
839 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
840 == EHEA_SWQE2_TYPE)) {
841
842 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
843 skb = pr->sq_skba.arr[index];
844 dev_consume_skb_any(skb);
845 pr->sq_skba.arr[index] = NULL;
846 }
847
848 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
849 quota--;
850
851 cqe = ehea_poll_cq(send_cq);
852 }
853
854 ehea_update_feca(send_cq, cqe_counter);
855 atomic_add(swqe_av, &pr->swqe_avail);
856
857 if (unlikely(netif_tx_queue_stopped(txq) &&
858 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))) {
859 __netif_tx_lock(txq, smp_processor_id());
860 if (netif_tx_queue_stopped(txq) &&
861 (atomic_read(&pr->swqe_avail) >= pr->swqe_refill_th))
862 netif_tx_wake_queue(txq);
863 __netif_tx_unlock(txq);
864 }
865
866 wake_up(&pr->port->swqe_avail_wq);
867
868 return cqe;
869 }
870
871 #define EHEA_POLL_MAX_CQES 65535
872
873 static int ehea_poll(struct napi_struct *napi, int budget)
874 {
875 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
876 napi);
877 struct net_device *dev = pr->port->netdev;
878 struct ehea_cqe *cqe;
879 struct ehea_cqe *cqe_skb = NULL;
880 int wqe_index;
881 int rx = 0;
882
883 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
884 rx += ehea_proc_rwqes(dev, pr, budget - rx);
885
886 while (rx != budget) {
887 napi_complete(napi);
888 ehea_reset_cq_ep(pr->recv_cq);
889 ehea_reset_cq_ep(pr->send_cq);
890 ehea_reset_cq_n1(pr->recv_cq);
891 ehea_reset_cq_n1(pr->send_cq);
892 rmb();
893 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
894 cqe_skb = ehea_poll_cq(pr->send_cq);
895
896 if (!cqe && !cqe_skb)
897 return rx;
898
899 if (!napi_reschedule(napi))
900 return rx;
901
902 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
903 rx += ehea_proc_rwqes(dev, pr, budget - rx);
904 }
905
906 return rx;
907 }
908
909 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
910 {
911 struct ehea_port_res *pr = param;
912
913 napi_schedule(&pr->napi);
914
915 return IRQ_HANDLED;
916 }
917
918 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
919 {
920 struct ehea_port *port = param;
921 struct ehea_eqe *eqe;
922 struct ehea_qp *qp;
923 u32 qp_token;
924 u64 resource_type, aer, aerr;
925 int reset_port = 0;
926
927 eqe = ehea_poll_eq(port->qp_eq);
928
929 while (eqe) {
930 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
931 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
932 eqe->entry, qp_token);
933
934 qp = port->port_res[qp_token].qp;
935
936 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
937 &aer, &aerr);
938
939 if (resource_type == EHEA_AER_RESTYPE_QP) {
940 if ((aer & EHEA_AER_RESET_MASK) ||
941 (aerr & EHEA_AERR_RESET_MASK))
942 reset_port = 1;
943 } else
944 reset_port = 1;
945
946 eqe = ehea_poll_eq(port->qp_eq);
947 }
948
949 if (reset_port) {
950 pr_err("Resetting port\n");
951 ehea_schedule_port_reset(port);
952 }
953
954 return IRQ_HANDLED;
955 }
956
957 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
958 int logical_port)
959 {
960 int i;
961
962 for (i = 0; i < EHEA_MAX_PORTS; i++)
963 if (adapter->port[i])
964 if (adapter->port[i]->logical_port_id == logical_port)
965 return adapter->port[i];
966 return NULL;
967 }
968
969 int ehea_sense_port_attr(struct ehea_port *port)
970 {
971 int ret;
972 u64 hret;
973 struct hcp_ehea_port_cb0 *cb0;
974
975
976 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
977 if (!cb0) {
978 pr_err("no mem for cb0\n");
979 ret = -ENOMEM;
980 goto out;
981 }
982
983 hret = ehea_h_query_ehea_port(port->adapter->handle,
984 port->logical_port_id, H_PORT_CB0,
985 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
986 cb0);
987 if (hret != H_SUCCESS) {
988 ret = -EIO;
989 goto out_free;
990 }
991
992
993 port->mac_addr = cb0->port_mac_addr << 16;
994
995 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
996 ret = -EADDRNOTAVAIL;
997 goto out_free;
998 }
999
1000
1001 switch (cb0->port_speed) {
1002 case H_SPEED_10M_H:
1003 port->port_speed = EHEA_SPEED_10M;
1004 port->full_duplex = 0;
1005 break;
1006 case H_SPEED_10M_F:
1007 port->port_speed = EHEA_SPEED_10M;
1008 port->full_duplex = 1;
1009 break;
1010 case H_SPEED_100M_H:
1011 port->port_speed = EHEA_SPEED_100M;
1012 port->full_duplex = 0;
1013 break;
1014 case H_SPEED_100M_F:
1015 port->port_speed = EHEA_SPEED_100M;
1016 port->full_duplex = 1;
1017 break;
1018 case H_SPEED_1G_F:
1019 port->port_speed = EHEA_SPEED_1G;
1020 port->full_duplex = 1;
1021 break;
1022 case H_SPEED_10G_F:
1023 port->port_speed = EHEA_SPEED_10G;
1024 port->full_duplex = 1;
1025 break;
1026 default:
1027 port->port_speed = 0;
1028 port->full_duplex = 0;
1029 break;
1030 }
1031
1032 port->autoneg = 1;
1033 port->num_mcs = cb0->num_default_qps;
1034
1035
1036 if (use_mcs)
1037 port->num_def_qps = cb0->num_default_qps;
1038 else
1039 port->num_def_qps = 1;
1040
1041 if (!port->num_def_qps) {
1042 ret = -EINVAL;
1043 goto out_free;
1044 }
1045
1046 ret = 0;
1047 out_free:
1048 if (ret || netif_msg_probe(port))
1049 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1050 free_page((unsigned long)cb0);
1051 out:
1052 return ret;
1053 }
1054
1055 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1056 {
1057 struct hcp_ehea_port_cb4 *cb4;
1058 u64 hret;
1059 int ret = 0;
1060
1061 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1062 if (!cb4) {
1063 pr_err("no mem for cb4\n");
1064 ret = -ENOMEM;
1065 goto out;
1066 }
1067
1068 cb4->port_speed = port_speed;
1069
1070 netif_carrier_off(port->netdev);
1071
1072 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1073 port->logical_port_id,
1074 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1075 if (hret == H_SUCCESS) {
1076 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1077
1078 hret = ehea_h_query_ehea_port(port->adapter->handle,
1079 port->logical_port_id,
1080 H_PORT_CB4, H_PORT_CB4_SPEED,
1081 cb4);
1082 if (hret == H_SUCCESS) {
1083 switch (cb4->port_speed) {
1084 case H_SPEED_10M_H:
1085 port->port_speed = EHEA_SPEED_10M;
1086 port->full_duplex = 0;
1087 break;
1088 case H_SPEED_10M_F:
1089 port->port_speed = EHEA_SPEED_10M;
1090 port->full_duplex = 1;
1091 break;
1092 case H_SPEED_100M_H:
1093 port->port_speed = EHEA_SPEED_100M;
1094 port->full_duplex = 0;
1095 break;
1096 case H_SPEED_100M_F:
1097 port->port_speed = EHEA_SPEED_100M;
1098 port->full_duplex = 1;
1099 break;
1100 case H_SPEED_1G_F:
1101 port->port_speed = EHEA_SPEED_1G;
1102 port->full_duplex = 1;
1103 break;
1104 case H_SPEED_10G_F:
1105 port->port_speed = EHEA_SPEED_10G;
1106 port->full_duplex = 1;
1107 break;
1108 default:
1109 port->port_speed = 0;
1110 port->full_duplex = 0;
1111 break;
1112 }
1113 } else {
1114 pr_err("Failed sensing port speed\n");
1115 ret = -EIO;
1116 }
1117 } else {
1118 if (hret == H_AUTHORITY) {
1119 pr_info("Hypervisor denied setting port speed\n");
1120 ret = -EPERM;
1121 } else {
1122 ret = -EIO;
1123 pr_err("Failed setting port speed\n");
1124 }
1125 }
1126 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1127 netif_carrier_on(port->netdev);
1128
1129 free_page((unsigned long)cb4);
1130 out:
1131 return ret;
1132 }
1133
1134 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1135 {
1136 int ret;
1137 u8 ec;
1138 u8 portnum;
1139 struct ehea_port *port;
1140 struct net_device *dev;
1141
1142 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1143 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1144 port = ehea_get_port(adapter, portnum);
1145 if (!port) {
1146 netdev_err(NULL, "unknown portnum %x\n", portnum);
1147 return;
1148 }
1149 dev = port->netdev;
1150
1151 switch (ec) {
1152 case EHEA_EC_PORTSTATE_CHG:
1153
1154 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1155 if (!netif_carrier_ok(dev)) {
1156 ret = ehea_sense_port_attr(port);
1157 if (ret) {
1158 netdev_err(dev, "failed resensing port attributes\n");
1159 break;
1160 }
1161
1162 netif_info(port, link, dev,
1163 "Logical port up: %dMbps %s Duplex\n",
1164 port->port_speed,
1165 port->full_duplex == 1 ?
1166 "Full" : "Half");
1167
1168 netif_carrier_on(dev);
1169 netif_wake_queue(dev);
1170 }
1171 } else
1172 if (netif_carrier_ok(dev)) {
1173 netif_info(port, link, dev,
1174 "Logical port down\n");
1175 netif_carrier_off(dev);
1176 netif_tx_disable(dev);
1177 }
1178
1179 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1180 port->phy_link = EHEA_PHY_LINK_UP;
1181 netif_info(port, link, dev,
1182 "Physical port up\n");
1183 if (prop_carrier_state)
1184 netif_carrier_on(dev);
1185 } else {
1186 port->phy_link = EHEA_PHY_LINK_DOWN;
1187 netif_info(port, link, dev,
1188 "Physical port down\n");
1189 if (prop_carrier_state)
1190 netif_carrier_off(dev);
1191 }
1192
1193 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1194 netdev_info(dev,
1195 "External switch port is primary port\n");
1196 else
1197 netdev_info(dev,
1198 "External switch port is backup port\n");
1199
1200 break;
1201 case EHEA_EC_ADAPTER_MALFUNC:
1202 netdev_err(dev, "Adapter malfunction\n");
1203 break;
1204 case EHEA_EC_PORT_MALFUNC:
1205 netdev_info(dev, "Port malfunction\n");
1206 netif_carrier_off(dev);
1207 netif_tx_disable(dev);
1208 break;
1209 default:
1210 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1211 break;
1212 }
1213 }
1214
1215 static void ehea_neq_tasklet(unsigned long data)
1216 {
1217 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1218 struct ehea_eqe *eqe;
1219 u64 event_mask;
1220
1221 eqe = ehea_poll_eq(adapter->neq);
1222 pr_debug("eqe=%p\n", eqe);
1223
1224 while (eqe) {
1225 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1226 ehea_parse_eqe(adapter, eqe->entry);
1227 eqe = ehea_poll_eq(adapter->neq);
1228 pr_debug("next eqe=%p\n", eqe);
1229 }
1230
1231 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1232 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1233 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1234
1235 ehea_h_reset_events(adapter->handle,
1236 adapter->neq->fw_handle, event_mask);
1237 }
1238
1239 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1240 {
1241 struct ehea_adapter *adapter = param;
1242 tasklet_hi_schedule(&adapter->neq_tasklet);
1243 return IRQ_HANDLED;
1244 }
1245
1246
1247 static int ehea_fill_port_res(struct ehea_port_res *pr)
1248 {
1249 int ret;
1250 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1251
1252 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1253
1254 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1255
1256 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1257
1258 return ret;
1259 }
1260
1261 static int ehea_reg_interrupts(struct net_device *dev)
1262 {
1263 struct ehea_port *port = netdev_priv(dev);
1264 struct ehea_port_res *pr;
1265 int i, ret;
1266
1267
1268 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1269 dev->name);
1270
1271 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1272 ehea_qp_aff_irq_handler,
1273 0, port->int_aff_name, port);
1274 if (ret) {
1275 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1276 port->qp_eq->attr.ist1);
1277 goto out_free_qpeq;
1278 }
1279
1280 netif_info(port, ifup, dev,
1281 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1282 port->qp_eq->attr.ist1);
1283
1284
1285 for (i = 0; i < port->num_def_qps; i++) {
1286 pr = &port->port_res[i];
1287 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1288 "%s-queue%d", dev->name, i);
1289 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1290 ehea_recv_irq_handler,
1291 0, pr->int_send_name, pr);
1292 if (ret) {
1293 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1294 i, pr->eq->attr.ist1);
1295 goto out_free_req;
1296 }
1297 netif_info(port, ifup, dev,
1298 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1299 pr->eq->attr.ist1, i);
1300 }
1301 out:
1302 return ret;
1303
1304
1305 out_free_req:
1306 while (--i >= 0) {
1307 u32 ist = port->port_res[i].eq->attr.ist1;
1308 ibmebus_free_irq(ist, &port->port_res[i]);
1309 }
1310
1311 out_free_qpeq:
1312 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1313 i = port->num_def_qps;
1314
1315 goto out;
1316
1317 }
1318
1319 static void ehea_free_interrupts(struct net_device *dev)
1320 {
1321 struct ehea_port *port = netdev_priv(dev);
1322 struct ehea_port_res *pr;
1323 int i;
1324
1325
1326
1327 for (i = 0; i < port->num_def_qps; i++) {
1328 pr = &port->port_res[i];
1329 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1330 netif_info(port, intr, dev,
1331 "free send irq for res %d with handle 0x%X\n",
1332 i, pr->eq->attr.ist1);
1333 }
1334
1335
1336 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1337 netif_info(port, intr, dev,
1338 "associated event interrupt for handle 0x%X freed\n",
1339 port->qp_eq->attr.ist1);
1340 }
1341
1342 static int ehea_configure_port(struct ehea_port *port)
1343 {
1344 int ret, i;
1345 u64 hret, mask;
1346 struct hcp_ehea_port_cb0 *cb0;
1347
1348 ret = -ENOMEM;
1349 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1350 if (!cb0)
1351 goto out;
1352
1353 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1354 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1355 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1356 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1357 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1358 PXLY_RC_VLAN_FILTER)
1359 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1360
1361 for (i = 0; i < port->num_mcs; i++)
1362 if (use_mcs)
1363 cb0->default_qpn_arr[i] =
1364 port->port_res[i].qp->init_attr.qp_nr;
1365 else
1366 cb0->default_qpn_arr[i] =
1367 port->port_res[0].qp->init_attr.qp_nr;
1368
1369 if (netif_msg_ifup(port))
1370 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1371
1372 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1373 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1374
1375 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1376 port->logical_port_id,
1377 H_PORT_CB0, mask, cb0);
1378 ret = -EIO;
1379 if (hret != H_SUCCESS)
1380 goto out_free;
1381
1382 ret = 0;
1383
1384 out_free:
1385 free_page((unsigned long)cb0);
1386 out:
1387 return ret;
1388 }
1389
1390 static int ehea_gen_smrs(struct ehea_port_res *pr)
1391 {
1392 int ret;
1393 struct ehea_adapter *adapter = pr->port->adapter;
1394
1395 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1396 if (ret)
1397 goto out;
1398
1399 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1400 if (ret)
1401 goto out_free;
1402
1403 return 0;
1404
1405 out_free:
1406 ehea_rem_mr(&pr->send_mr);
1407 out:
1408 pr_err("Generating SMRS failed\n");
1409 return -EIO;
1410 }
1411
1412 static int ehea_rem_smrs(struct ehea_port_res *pr)
1413 {
1414 if ((ehea_rem_mr(&pr->send_mr)) ||
1415 (ehea_rem_mr(&pr->recv_mr)))
1416 return -EIO;
1417 else
1418 return 0;
1419 }
1420
1421 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1422 {
1423 int arr_size = sizeof(void *) * max_q_entries;
1424
1425 q_skba->arr = vzalloc(arr_size);
1426 if (!q_skba->arr)
1427 return -ENOMEM;
1428
1429 q_skba->len = max_q_entries;
1430 q_skba->index = 0;
1431 q_skba->os_skbs = 0;
1432
1433 return 0;
1434 }
1435
1436 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1437 struct port_res_cfg *pr_cfg, int queue_token)
1438 {
1439 struct ehea_adapter *adapter = port->adapter;
1440 enum ehea_eq_type eq_type = EHEA_EQ;
1441 struct ehea_qp_init_attr *init_attr = NULL;
1442 int ret = -EIO;
1443 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1444
1445 tx_bytes = pr->tx_bytes;
1446 tx_packets = pr->tx_packets;
1447 rx_bytes = pr->rx_bytes;
1448 rx_packets = pr->rx_packets;
1449
1450 memset(pr, 0, sizeof(struct ehea_port_res));
1451
1452 pr->tx_bytes = tx_bytes;
1453 pr->tx_packets = tx_packets;
1454 pr->rx_bytes = rx_bytes;
1455 pr->rx_packets = rx_packets;
1456
1457 pr->port = port;
1458
1459 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1460 if (!pr->eq) {
1461 pr_err("create_eq failed (eq)\n");
1462 goto out_free;
1463 }
1464
1465 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1466 pr->eq->fw_handle,
1467 port->logical_port_id);
1468 if (!pr->recv_cq) {
1469 pr_err("create_cq failed (cq_recv)\n");
1470 goto out_free;
1471 }
1472
1473 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1474 pr->eq->fw_handle,
1475 port->logical_port_id);
1476 if (!pr->send_cq) {
1477 pr_err("create_cq failed (cq_send)\n");
1478 goto out_free;
1479 }
1480
1481 if (netif_msg_ifup(port))
1482 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1483 pr->send_cq->attr.act_nr_of_cqes,
1484 pr->recv_cq->attr.act_nr_of_cqes);
1485
1486 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1487 if (!init_attr) {
1488 ret = -ENOMEM;
1489 pr_err("no mem for ehea_qp_init_attr\n");
1490 goto out_free;
1491 }
1492
1493 init_attr->low_lat_rq1 = 1;
1494 init_attr->signalingtype = 1;
1495 init_attr->rq_count = 3;
1496 init_attr->qp_token = queue_token;
1497 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1498 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1499 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1500 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1501 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1502 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1503 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1504 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1505 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1506 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1507 init_attr->port_nr = port->logical_port_id;
1508 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1509 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1510 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1511
1512 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1513 if (!pr->qp) {
1514 pr_err("create_qp failed\n");
1515 ret = -EIO;
1516 goto out_free;
1517 }
1518
1519 if (netif_msg_ifup(port))
1520 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1521 init_attr->qp_nr,
1522 init_attr->act_nr_send_wqes,
1523 init_attr->act_nr_rwqes_rq1,
1524 init_attr->act_nr_rwqes_rq2,
1525 init_attr->act_nr_rwqes_rq3);
1526
1527 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1528
1529 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1530 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1531 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1532 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1533 if (ret)
1534 goto out_free;
1535
1536 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1537 if (ehea_gen_smrs(pr) != 0) {
1538 ret = -EIO;
1539 goto out_free;
1540 }
1541
1542 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1543
1544 kfree(init_attr);
1545
1546 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1547
1548 ret = 0;
1549 goto out;
1550
1551 out_free:
1552 kfree(init_attr);
1553 vfree(pr->sq_skba.arr);
1554 vfree(pr->rq1_skba.arr);
1555 vfree(pr->rq2_skba.arr);
1556 vfree(pr->rq3_skba.arr);
1557 ehea_destroy_qp(pr->qp);
1558 ehea_destroy_cq(pr->send_cq);
1559 ehea_destroy_cq(pr->recv_cq);
1560 ehea_destroy_eq(pr->eq);
1561 out:
1562 return ret;
1563 }
1564
1565 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1566 {
1567 int ret, i;
1568
1569 if (pr->qp)
1570 netif_napi_del(&pr->napi);
1571
1572 ret = ehea_destroy_qp(pr->qp);
1573
1574 if (!ret) {
1575 ehea_destroy_cq(pr->send_cq);
1576 ehea_destroy_cq(pr->recv_cq);
1577 ehea_destroy_eq(pr->eq);
1578
1579 for (i = 0; i < pr->rq1_skba.len; i++)
1580 dev_kfree_skb(pr->rq1_skba.arr[i]);
1581
1582 for (i = 0; i < pr->rq2_skba.len; i++)
1583 dev_kfree_skb(pr->rq2_skba.arr[i]);
1584
1585 for (i = 0; i < pr->rq3_skba.len; i++)
1586 dev_kfree_skb(pr->rq3_skba.arr[i]);
1587
1588 for (i = 0; i < pr->sq_skba.len; i++)
1589 dev_kfree_skb(pr->sq_skba.arr[i]);
1590
1591 vfree(pr->rq1_skba.arr);
1592 vfree(pr->rq2_skba.arr);
1593 vfree(pr->rq3_skba.arr);
1594 vfree(pr->sq_skba.arr);
1595 ret = ehea_rem_smrs(pr);
1596 }
1597 return ret;
1598 }
1599
1600 static void write_swqe2_immediate(struct sk_buff *skb, struct ehea_swqe *swqe,
1601 u32 lkey)
1602 {
1603 int skb_data_size = skb_headlen(skb);
1604 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1605 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1606 unsigned int immediate_len = SWQE2_MAX_IMM;
1607
1608 swqe->descriptors = 0;
1609
1610 if (skb_is_gso(skb)) {
1611 swqe->tx_control |= EHEA_SWQE_TSO;
1612 swqe->mss = skb_shinfo(skb)->gso_size;
1613
1614
1615
1616
1617 immediate_len = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1618 }
1619
1620 if (skb_is_gso(skb) || skb_data_size >= SWQE2_MAX_IMM) {
1621 skb_copy_from_linear_data(skb, imm_data, immediate_len);
1622 swqe->immediate_data_length = immediate_len;
1623
1624 if (skb_data_size > immediate_len) {
1625 sg1entry->l_key = lkey;
1626 sg1entry->len = skb_data_size - immediate_len;
1627 sg1entry->vaddr =
1628 ehea_map_vaddr(skb->data + immediate_len);
1629 swqe->descriptors++;
1630 }
1631 } else {
1632 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1633 swqe->immediate_data_length = skb_data_size;
1634 }
1635 }
1636
1637 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1638 struct ehea_swqe *swqe, u32 lkey)
1639 {
1640 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1641 skb_frag_t *frag;
1642 int nfrags, sg1entry_contains_frag_data, i;
1643
1644 nfrags = skb_shinfo(skb)->nr_frags;
1645 sg1entry = &swqe->u.immdata_desc.sg_entry;
1646 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1647 sg1entry_contains_frag_data = 0;
1648
1649 write_swqe2_immediate(skb, swqe, lkey);
1650
1651
1652 if (nfrags > 0) {
1653 if (swqe->descriptors == 0) {
1654
1655 frag = &skb_shinfo(skb)->frags[0];
1656
1657
1658 sg1entry->l_key = lkey;
1659 sg1entry->len = skb_frag_size(frag);
1660 sg1entry->vaddr =
1661 ehea_map_vaddr(skb_frag_address(frag));
1662 swqe->descriptors++;
1663 sg1entry_contains_frag_data = 1;
1664 }
1665
1666 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1667
1668 frag = &skb_shinfo(skb)->frags[i];
1669 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1670
1671 sgentry->l_key = lkey;
1672 sgentry->len = skb_frag_size(frag);
1673 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag));
1674 swqe->descriptors++;
1675 }
1676 }
1677 }
1678
1679 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1680 {
1681 int ret = 0;
1682 u64 hret;
1683 u8 reg_type;
1684
1685
1686 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1687 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1688 port->logical_port_id,
1689 reg_type, port->mac_addr, 0, hcallid);
1690 if (hret != H_SUCCESS) {
1691 pr_err("%sregistering bc address failed (tagged)\n",
1692 hcallid == H_REG_BCMC ? "" : "de");
1693 ret = -EIO;
1694 goto out_herr;
1695 }
1696
1697
1698 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1699 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1700 port->logical_port_id,
1701 reg_type, port->mac_addr, 0, hcallid);
1702 if (hret != H_SUCCESS) {
1703 pr_err("%sregistering bc address failed (vlan)\n",
1704 hcallid == H_REG_BCMC ? "" : "de");
1705 ret = -EIO;
1706 }
1707 out_herr:
1708 return ret;
1709 }
1710
1711 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1712 {
1713 struct ehea_port *port = netdev_priv(dev);
1714 struct sockaddr *mac_addr = sa;
1715 struct hcp_ehea_port_cb0 *cb0;
1716 int ret;
1717 u64 hret;
1718
1719 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1720 ret = -EADDRNOTAVAIL;
1721 goto out;
1722 }
1723
1724 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1725 if (!cb0) {
1726 pr_err("no mem for cb0\n");
1727 ret = -ENOMEM;
1728 goto out;
1729 }
1730
1731 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1732
1733 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1734
1735 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1736 port->logical_port_id, H_PORT_CB0,
1737 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1738 if (hret != H_SUCCESS) {
1739 ret = -EIO;
1740 goto out_free;
1741 }
1742
1743 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1744
1745
1746 if (port->state == EHEA_PORT_UP) {
1747 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1748 if (ret)
1749 goto out_upregs;
1750 }
1751
1752 port->mac_addr = cb0->port_mac_addr << 16;
1753
1754
1755 if (port->state == EHEA_PORT_UP) {
1756 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1757 if (ret)
1758 goto out_upregs;
1759 }
1760
1761 ret = 0;
1762
1763 out_upregs:
1764 ehea_update_bcmc_registrations();
1765 out_free:
1766 free_page((unsigned long)cb0);
1767 out:
1768 return ret;
1769 }
1770
1771 static void ehea_promiscuous_error(u64 hret, int enable)
1772 {
1773 if (hret == H_AUTHORITY)
1774 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1775 enable == 1 ? "en" : "dis");
1776 else
1777 pr_err("failed %sabling promiscuous mode\n",
1778 enable == 1 ? "en" : "dis");
1779 }
1780
1781 static void ehea_promiscuous(struct net_device *dev, int enable)
1782 {
1783 struct ehea_port *port = netdev_priv(dev);
1784 struct hcp_ehea_port_cb7 *cb7;
1785 u64 hret;
1786
1787 if (enable == port->promisc)
1788 return;
1789
1790 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1791 if (!cb7) {
1792 pr_err("no mem for cb7\n");
1793 goto out;
1794 }
1795
1796
1797 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1798
1799 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1800 port->logical_port_id,
1801 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1802 if (hret) {
1803 ehea_promiscuous_error(hret, enable);
1804 goto out;
1805 }
1806
1807 port->promisc = enable;
1808 out:
1809 free_page((unsigned long)cb7);
1810 }
1811
1812 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1813 u32 hcallid)
1814 {
1815 u64 hret;
1816 u8 reg_type;
1817
1818 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED;
1819 if (mc_mac_addr == 0)
1820 reg_type |= EHEA_BCMC_SCOPE_ALL;
1821
1822 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1823 port->logical_port_id,
1824 reg_type, mc_mac_addr, 0, hcallid);
1825 if (hret)
1826 goto out;
1827
1828 reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL;
1829 if (mc_mac_addr == 0)
1830 reg_type |= EHEA_BCMC_SCOPE_ALL;
1831
1832 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1833 port->logical_port_id,
1834 reg_type, mc_mac_addr, 0, hcallid);
1835 out:
1836 return hret;
1837 }
1838
1839 static int ehea_drop_multicast_list(struct net_device *dev)
1840 {
1841 struct ehea_port *port = netdev_priv(dev);
1842 struct ehea_mc_list *mc_entry = port->mc_list;
1843 struct list_head *pos;
1844 struct list_head *temp;
1845 int ret = 0;
1846 u64 hret;
1847
1848 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
1849 mc_entry = list_entry(pos, struct ehea_mc_list, list);
1850
1851 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
1852 H_DEREG_BCMC);
1853 if (hret) {
1854 pr_err("failed deregistering mcast MAC\n");
1855 ret = -EIO;
1856 }
1857
1858 list_del(pos);
1859 kfree(mc_entry);
1860 }
1861 return ret;
1862 }
1863
1864 static void ehea_allmulti(struct net_device *dev, int enable)
1865 {
1866 struct ehea_port *port = netdev_priv(dev);
1867 u64 hret;
1868
1869 if (!port->allmulti) {
1870 if (enable) {
1871
1872 ehea_drop_multicast_list(dev);
1873 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
1874 if (!hret)
1875 port->allmulti = 1;
1876 else
1877 netdev_err(dev,
1878 "failed enabling IFF_ALLMULTI\n");
1879 }
1880 } else {
1881 if (!enable) {
1882
1883 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
1884 if (!hret)
1885 port->allmulti = 0;
1886 else
1887 netdev_err(dev,
1888 "failed disabling IFF_ALLMULTI\n");
1889 }
1890 }
1891 }
1892
1893 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1894 {
1895 struct ehea_mc_list *ehea_mcl_entry;
1896 u64 hret;
1897
1898 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
1899 if (!ehea_mcl_entry)
1900 return;
1901
1902 INIT_LIST_HEAD(&ehea_mcl_entry->list);
1903
1904 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
1905
1906 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
1907 H_REG_BCMC);
1908 if (!hret)
1909 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
1910 else {
1911 pr_err("failed registering mcast MAC\n");
1912 kfree(ehea_mcl_entry);
1913 }
1914 }
1915
1916 static void ehea_set_multicast_list(struct net_device *dev)
1917 {
1918 struct ehea_port *port = netdev_priv(dev);
1919 struct netdev_hw_addr *ha;
1920 int ret;
1921
1922 ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC));
1923
1924 if (dev->flags & IFF_ALLMULTI) {
1925 ehea_allmulti(dev, 1);
1926 goto out;
1927 }
1928 ehea_allmulti(dev, 0);
1929
1930 if (!netdev_mc_empty(dev)) {
1931 ret = ehea_drop_multicast_list(dev);
1932 if (ret) {
1933
1934
1935
1936 ehea_allmulti(dev, 1);
1937 }
1938
1939 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
1940 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
1941 port->adapter->max_mc_mac);
1942 goto out;
1943 }
1944
1945 netdev_for_each_mc_addr(ha, dev)
1946 ehea_add_multicast_entry(port, ha->addr);
1947
1948 }
1949 out:
1950 ehea_update_bcmc_registrations();
1951 }
1952
1953 static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe)
1954 {
1955 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC;
1956
1957 if (vlan_get_protocol(skb) != htons(ETH_P_IP))
1958 return;
1959
1960 if (skb->ip_summed == CHECKSUM_PARTIAL)
1961 swqe->tx_control |= EHEA_SWQE_IP_CHECKSUM;
1962
1963 swqe->ip_start = skb_network_offset(skb);
1964 swqe->ip_end = swqe->ip_start + ip_hdrlen(skb) - 1;
1965
1966 switch (ip_hdr(skb)->protocol) {
1967 case IPPROTO_UDP:
1968 if (skb->ip_summed == CHECKSUM_PARTIAL)
1969 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1970
1971 swqe->tcp_offset = swqe->ip_end + 1 +
1972 offsetof(struct udphdr, check);
1973 break;
1974
1975 case IPPROTO_TCP:
1976 if (skb->ip_summed == CHECKSUM_PARTIAL)
1977 swqe->tx_control |= EHEA_SWQE_TCP_CHECKSUM;
1978
1979 swqe->tcp_offset = swqe->ip_end + 1 +
1980 offsetof(struct tcphdr, check);
1981 break;
1982 }
1983 }
1984
1985 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1986 struct ehea_swqe *swqe, u32 lkey)
1987 {
1988 swqe->tx_control |= EHEA_SWQE_DESCRIPTORS_PRESENT;
1989
1990 xmit_common(skb, swqe);
1991
1992 write_swqe2_data(skb, dev, swqe, lkey);
1993 }
1994
1995 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1996 struct ehea_swqe *swqe)
1997 {
1998 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
1999
2000 xmit_common(skb, swqe);
2001
2002 if (!skb->data_len)
2003 skb_copy_from_linear_data(skb, imm_data, skb->len);
2004 else
2005 skb_copy_bits(skb, 0, imm_data, skb->len);
2006
2007 swqe->immediate_data_length = skb->len;
2008 dev_consume_skb_any(skb);
2009 }
2010
2011 static netdev_tx_t ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2012 {
2013 struct ehea_port *port = netdev_priv(dev);
2014 struct ehea_swqe *swqe;
2015 u32 lkey;
2016 int swqe_index;
2017 struct ehea_port_res *pr;
2018 struct netdev_queue *txq;
2019
2020 pr = &port->port_res[skb_get_queue_mapping(skb)];
2021 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
2022
2023 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2024 memset(swqe, 0, SWQE_HEADER_SIZE);
2025 atomic_dec(&pr->swqe_avail);
2026
2027 if (skb_vlan_tag_present(skb)) {
2028 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2029 swqe->vlan_tag = skb_vlan_tag_get(skb);
2030 }
2031
2032 pr->tx_packets++;
2033 pr->tx_bytes += skb->len;
2034
2035 if (skb->len <= SWQE3_MAX_IMM) {
2036 u32 sig_iv = port->sig_comp_iv;
2037 u32 swqe_num = pr->swqe_id_counter;
2038 ehea_xmit3(skb, dev, swqe);
2039 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2040 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2041 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2042 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2043 sig_iv);
2044 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2045 pr->swqe_ll_count = 0;
2046 } else
2047 pr->swqe_ll_count += 1;
2048 } else {
2049 swqe->wr_id =
2050 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2051 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2052 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2053 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2054 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2055
2056 pr->sq_skba.index++;
2057 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2058
2059 lkey = pr->send_mr.lkey;
2060 ehea_xmit2(skb, dev, swqe, lkey);
2061 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2062 }
2063 pr->swqe_id_counter += 1;
2064
2065 netif_info(port, tx_queued, dev,
2066 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2067 if (netif_msg_tx_queued(port))
2068 ehea_dump(swqe, 512, "swqe");
2069
2070 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2071 netif_tx_stop_queue(txq);
2072 swqe->tx_control |= EHEA_SWQE_PURGE;
2073 }
2074
2075 ehea_post_swqe(pr->qp, swqe);
2076
2077 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2078 pr->p_stats.queue_stopped++;
2079 netif_tx_stop_queue(txq);
2080 }
2081
2082 return NETDEV_TX_OK;
2083 }
2084
2085 static int ehea_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
2086 {
2087 struct ehea_port *port = netdev_priv(dev);
2088 struct ehea_adapter *adapter = port->adapter;
2089 struct hcp_ehea_port_cb1 *cb1;
2090 int index;
2091 u64 hret;
2092 int err = 0;
2093
2094 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2095 if (!cb1) {
2096 pr_err("no mem for cb1\n");
2097 err = -ENOMEM;
2098 goto out;
2099 }
2100
2101 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2102 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2103 if (hret != H_SUCCESS) {
2104 pr_err("query_ehea_port failed\n");
2105 err = -EINVAL;
2106 goto out;
2107 }
2108
2109 index = (vid / 64);
2110 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2111
2112 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2113 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2114 if (hret != H_SUCCESS) {
2115 pr_err("modify_ehea_port failed\n");
2116 err = -EINVAL;
2117 }
2118 out:
2119 free_page((unsigned long)cb1);
2120 return err;
2121 }
2122
2123 static int ehea_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
2124 {
2125 struct ehea_port *port = netdev_priv(dev);
2126 struct ehea_adapter *adapter = port->adapter;
2127 struct hcp_ehea_port_cb1 *cb1;
2128 int index;
2129 u64 hret;
2130 int err = 0;
2131
2132 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2133 if (!cb1) {
2134 pr_err("no mem for cb1\n");
2135 err = -ENOMEM;
2136 goto out;
2137 }
2138
2139 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2140 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2141 if (hret != H_SUCCESS) {
2142 pr_err("query_ehea_port failed\n");
2143 err = -EINVAL;
2144 goto out;
2145 }
2146
2147 index = (vid / 64);
2148 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2149
2150 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2151 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2152 if (hret != H_SUCCESS) {
2153 pr_err("modify_ehea_port failed\n");
2154 err = -EINVAL;
2155 }
2156 out:
2157 free_page((unsigned long)cb1);
2158 return err;
2159 }
2160
2161 static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2162 {
2163 int ret = -EIO;
2164 u64 hret;
2165 u16 dummy16 = 0;
2166 u64 dummy64 = 0;
2167 struct hcp_modify_qp_cb0 *cb0;
2168
2169 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2170 if (!cb0) {
2171 ret = -ENOMEM;
2172 goto out;
2173 }
2174
2175 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2176 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2177 if (hret != H_SUCCESS) {
2178 pr_err("query_ehea_qp failed (1)\n");
2179 goto out;
2180 }
2181
2182 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2183 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2184 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2185 &dummy64, &dummy64, &dummy16, &dummy16);
2186 if (hret != H_SUCCESS) {
2187 pr_err("modify_ehea_qp failed (1)\n");
2188 goto out;
2189 }
2190
2191 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2192 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2193 if (hret != H_SUCCESS) {
2194 pr_err("query_ehea_qp failed (2)\n");
2195 goto out;
2196 }
2197
2198 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2199 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2200 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2201 &dummy64, &dummy64, &dummy16, &dummy16);
2202 if (hret != H_SUCCESS) {
2203 pr_err("modify_ehea_qp failed (2)\n");
2204 goto out;
2205 }
2206
2207 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2208 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2209 if (hret != H_SUCCESS) {
2210 pr_err("query_ehea_qp failed (3)\n");
2211 goto out;
2212 }
2213
2214 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2215 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2216 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2217 &dummy64, &dummy64, &dummy16, &dummy16);
2218 if (hret != H_SUCCESS) {
2219 pr_err("modify_ehea_qp failed (3)\n");
2220 goto out;
2221 }
2222
2223 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2224 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2225 if (hret != H_SUCCESS) {
2226 pr_err("query_ehea_qp failed (4)\n");
2227 goto out;
2228 }
2229
2230 ret = 0;
2231 out:
2232 free_page((unsigned long)cb0);
2233 return ret;
2234 }
2235
2236 static int ehea_port_res_setup(struct ehea_port *port, int def_qps)
2237 {
2238 int ret, i;
2239 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2240 enum ehea_eq_type eq_type = EHEA_EQ;
2241
2242 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2243 EHEA_MAX_ENTRIES_EQ, 1);
2244 if (!port->qp_eq) {
2245 ret = -EINVAL;
2246 pr_err("ehea_create_eq failed (qp_eq)\n");
2247 goto out_kill_eq;
2248 }
2249
2250 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2251 pr_cfg.max_entries_scq = sq_entries * 2;
2252 pr_cfg.max_entries_sq = sq_entries;
2253 pr_cfg.max_entries_rq1 = rq1_entries;
2254 pr_cfg.max_entries_rq2 = rq2_entries;
2255 pr_cfg.max_entries_rq3 = rq3_entries;
2256
2257 pr_cfg_small_rx.max_entries_rcq = 1;
2258 pr_cfg_small_rx.max_entries_scq = sq_entries;
2259 pr_cfg_small_rx.max_entries_sq = sq_entries;
2260 pr_cfg_small_rx.max_entries_rq1 = 1;
2261 pr_cfg_small_rx.max_entries_rq2 = 1;
2262 pr_cfg_small_rx.max_entries_rq3 = 1;
2263
2264 for (i = 0; i < def_qps; i++) {
2265 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2266 if (ret)
2267 goto out_clean_pr;
2268 }
2269 for (i = def_qps; i < def_qps; i++) {
2270 ret = ehea_init_port_res(port, &port->port_res[i],
2271 &pr_cfg_small_rx, i);
2272 if (ret)
2273 goto out_clean_pr;
2274 }
2275
2276 return 0;
2277
2278 out_clean_pr:
2279 while (--i >= 0)
2280 ehea_clean_portres(port, &port->port_res[i]);
2281
2282 out_kill_eq:
2283 ehea_destroy_eq(port->qp_eq);
2284 return ret;
2285 }
2286
2287 static int ehea_clean_all_portres(struct ehea_port *port)
2288 {
2289 int ret = 0;
2290 int i;
2291
2292 for (i = 0; i < port->num_def_qps; i++)
2293 ret |= ehea_clean_portres(port, &port->port_res[i]);
2294
2295 ret |= ehea_destroy_eq(port->qp_eq);
2296
2297 return ret;
2298 }
2299
2300 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2301 {
2302 if (adapter->active_ports)
2303 return;
2304
2305 ehea_rem_mr(&adapter->mr);
2306 }
2307
2308 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2309 {
2310 if (adapter->active_ports)
2311 return 0;
2312
2313 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2314 }
2315
2316 static int ehea_up(struct net_device *dev)
2317 {
2318 int ret, i;
2319 struct ehea_port *port = netdev_priv(dev);
2320
2321 if (port->state == EHEA_PORT_UP)
2322 return 0;
2323
2324 ret = ehea_port_res_setup(port, port->num_def_qps);
2325 if (ret) {
2326 netdev_err(dev, "port_res_failed\n");
2327 goto out;
2328 }
2329
2330
2331 ret = ehea_configure_port(port);
2332 if (ret) {
2333 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2334 goto out_clean_pr;
2335 }
2336
2337 ret = ehea_reg_interrupts(dev);
2338 if (ret) {
2339 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2340 goto out_clean_pr;
2341 }
2342
2343 for (i = 0; i < port->num_def_qps; i++) {
2344 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2345 if (ret) {
2346 netdev_err(dev, "activate_qp failed\n");
2347 goto out_free_irqs;
2348 }
2349 }
2350
2351 for (i = 0; i < port->num_def_qps; i++) {
2352 ret = ehea_fill_port_res(&port->port_res[i]);
2353 if (ret) {
2354 netdev_err(dev, "out_free_irqs\n");
2355 goto out_free_irqs;
2356 }
2357 }
2358
2359 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2360 if (ret) {
2361 ret = -EIO;
2362 goto out_free_irqs;
2363 }
2364
2365 port->state = EHEA_PORT_UP;
2366
2367 ret = 0;
2368 goto out;
2369
2370 out_free_irqs:
2371 ehea_free_interrupts(dev);
2372
2373 out_clean_pr:
2374 ehea_clean_all_portres(port);
2375 out:
2376 if (ret)
2377 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2378
2379 ehea_update_bcmc_registrations();
2380 ehea_update_firmware_handles();
2381
2382 return ret;
2383 }
2384
2385 static void port_napi_disable(struct ehea_port *port)
2386 {
2387 int i;
2388
2389 for (i = 0; i < port->num_def_qps; i++)
2390 napi_disable(&port->port_res[i].napi);
2391 }
2392
2393 static void port_napi_enable(struct ehea_port *port)
2394 {
2395 int i;
2396
2397 for (i = 0; i < port->num_def_qps; i++)
2398 napi_enable(&port->port_res[i].napi);
2399 }
2400
2401 static int ehea_open(struct net_device *dev)
2402 {
2403 int ret;
2404 struct ehea_port *port = netdev_priv(dev);
2405
2406 mutex_lock(&port->port_lock);
2407
2408 netif_info(port, ifup, dev, "enabling port\n");
2409
2410 netif_carrier_off(dev);
2411
2412 ret = ehea_up(dev);
2413 if (!ret) {
2414 port_napi_enable(port);
2415 netif_tx_start_all_queues(dev);
2416 }
2417
2418 mutex_unlock(&port->port_lock);
2419 schedule_delayed_work(&port->stats_work,
2420 round_jiffies_relative(msecs_to_jiffies(1000)));
2421
2422 return ret;
2423 }
2424
2425 static int ehea_down(struct net_device *dev)
2426 {
2427 int ret;
2428 struct ehea_port *port = netdev_priv(dev);
2429
2430 if (port->state == EHEA_PORT_DOWN)
2431 return 0;
2432
2433 ehea_drop_multicast_list(dev);
2434 ehea_allmulti(dev, 0);
2435 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2436
2437 ehea_free_interrupts(dev);
2438
2439 port->state = EHEA_PORT_DOWN;
2440
2441 ehea_update_bcmc_registrations();
2442
2443 ret = ehea_clean_all_portres(port);
2444 if (ret)
2445 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2446
2447 ehea_update_firmware_handles();
2448
2449 return ret;
2450 }
2451
2452 static int ehea_stop(struct net_device *dev)
2453 {
2454 int ret;
2455 struct ehea_port *port = netdev_priv(dev);
2456
2457 netif_info(port, ifdown, dev, "disabling port\n");
2458
2459 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2460 cancel_work_sync(&port->reset_task);
2461 cancel_delayed_work_sync(&port->stats_work);
2462 mutex_lock(&port->port_lock);
2463 netif_tx_stop_all_queues(dev);
2464 port_napi_disable(port);
2465 ret = ehea_down(dev);
2466 mutex_unlock(&port->port_lock);
2467 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2468 return ret;
2469 }
2470
2471 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2472 {
2473 struct ehea_qp qp = *orig_qp;
2474 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2475 struct ehea_swqe *swqe;
2476 int wqe_index;
2477 int i;
2478
2479 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2480 swqe = ehea_get_swqe(&qp, &wqe_index);
2481 swqe->tx_control |= EHEA_SWQE_PURGE;
2482 }
2483 }
2484
2485 static void ehea_flush_sq(struct ehea_port *port)
2486 {
2487 int i;
2488
2489 for (i = 0; i < port->num_def_qps; i++) {
2490 struct ehea_port_res *pr = &port->port_res[i];
2491 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2492 int ret;
2493
2494 ret = wait_event_timeout(port->swqe_avail_wq,
2495 atomic_read(&pr->swqe_avail) >= swqe_max,
2496 msecs_to_jiffies(100));
2497
2498 if (!ret) {
2499 pr_err("WARNING: sq not flushed completely\n");
2500 break;
2501 }
2502 }
2503 }
2504
2505 static int ehea_stop_qps(struct net_device *dev)
2506 {
2507 struct ehea_port *port = netdev_priv(dev);
2508 struct ehea_adapter *adapter = port->adapter;
2509 struct hcp_modify_qp_cb0 *cb0;
2510 int ret = -EIO;
2511 int dret;
2512 int i;
2513 u64 hret;
2514 u64 dummy64 = 0;
2515 u16 dummy16 = 0;
2516
2517 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2518 if (!cb0) {
2519 ret = -ENOMEM;
2520 goto out;
2521 }
2522
2523 for (i = 0; i < (port->num_def_qps); i++) {
2524 struct ehea_port_res *pr = &port->port_res[i];
2525 struct ehea_qp *qp = pr->qp;
2526
2527
2528 ehea_purge_sq(qp);
2529
2530
2531 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2532 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2533 cb0);
2534 if (hret != H_SUCCESS) {
2535 pr_err("query_ehea_qp failed (1)\n");
2536 goto out;
2537 }
2538
2539 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2540 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2541
2542 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2543 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2544 1), cb0, &dummy64,
2545 &dummy64, &dummy16, &dummy16);
2546 if (hret != H_SUCCESS) {
2547 pr_err("modify_ehea_qp failed (1)\n");
2548 goto out;
2549 }
2550
2551 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2552 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2553 cb0);
2554 if (hret != H_SUCCESS) {
2555 pr_err("query_ehea_qp failed (2)\n");
2556 goto out;
2557 }
2558
2559
2560 dret = ehea_rem_smrs(pr);
2561 if (dret) {
2562 pr_err("unreg shared memory region failed\n");
2563 goto out;
2564 }
2565 }
2566
2567 ret = 0;
2568 out:
2569 free_page((unsigned long)cb0);
2570
2571 return ret;
2572 }
2573
2574 static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2575 {
2576 struct ehea_qp qp = *orig_qp;
2577 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2578 struct ehea_rwqe *rwqe;
2579 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2580 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2581 struct sk_buff *skb;
2582 u32 lkey = pr->recv_mr.lkey;
2583
2584
2585 int i;
2586 int index;
2587
2588 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2589 rwqe = ehea_get_next_rwqe(&qp, 2);
2590 rwqe->sg_list[0].l_key = lkey;
2591 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2592 skb = skba_rq2[index];
2593 if (skb)
2594 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2595 }
2596
2597 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2598 rwqe = ehea_get_next_rwqe(&qp, 3);
2599 rwqe->sg_list[0].l_key = lkey;
2600 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2601 skb = skba_rq3[index];
2602 if (skb)
2603 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2604 }
2605 }
2606
2607 static int ehea_restart_qps(struct net_device *dev)
2608 {
2609 struct ehea_port *port = netdev_priv(dev);
2610 struct ehea_adapter *adapter = port->adapter;
2611 int ret = 0;
2612 int i;
2613
2614 struct hcp_modify_qp_cb0 *cb0;
2615 u64 hret;
2616 u64 dummy64 = 0;
2617 u16 dummy16 = 0;
2618
2619 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2620 if (!cb0) {
2621 ret = -ENOMEM;
2622 goto out;
2623 }
2624
2625 for (i = 0; i < (port->num_def_qps); i++) {
2626 struct ehea_port_res *pr = &port->port_res[i];
2627 struct ehea_qp *qp = pr->qp;
2628
2629 ret = ehea_gen_smrs(pr);
2630 if (ret) {
2631 netdev_err(dev, "creation of shared memory regions failed\n");
2632 goto out;
2633 }
2634
2635 ehea_update_rqs(qp, pr);
2636
2637
2638 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2639 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2640 cb0);
2641 if (hret != H_SUCCESS) {
2642 netdev_err(dev, "query_ehea_qp failed (1)\n");
2643 goto out;
2644 }
2645
2646 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2647 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2648
2649 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2650 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2651 1), cb0, &dummy64,
2652 &dummy64, &dummy16, &dummy16);
2653 if (hret != H_SUCCESS) {
2654 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2655 goto out;
2656 }
2657
2658 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2659 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2660 cb0);
2661 if (hret != H_SUCCESS) {
2662 netdev_err(dev, "query_ehea_qp failed (2)\n");
2663 goto out;
2664 }
2665
2666
2667 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2668 ehea_refill_rq2(pr, 0);
2669 ehea_refill_rq3(pr, 0);
2670 }
2671 out:
2672 free_page((unsigned long)cb0);
2673
2674 return ret;
2675 }
2676
2677 static void ehea_reset_port(struct work_struct *work)
2678 {
2679 int ret;
2680 struct ehea_port *port =
2681 container_of(work, struct ehea_port, reset_task);
2682 struct net_device *dev = port->netdev;
2683
2684 mutex_lock(&dlpar_mem_lock);
2685 port->resets++;
2686 mutex_lock(&port->port_lock);
2687 netif_tx_disable(dev);
2688
2689 port_napi_disable(port);
2690
2691 ehea_down(dev);
2692
2693 ret = ehea_up(dev);
2694 if (ret)
2695 goto out;
2696
2697 ehea_set_multicast_list(dev);
2698
2699 netif_info(port, timer, dev, "reset successful\n");
2700
2701 port_napi_enable(port);
2702
2703 netif_tx_wake_all_queues(dev);
2704 out:
2705 mutex_unlock(&port->port_lock);
2706 mutex_unlock(&dlpar_mem_lock);
2707 }
2708
2709 static void ehea_rereg_mrs(void)
2710 {
2711 int ret, i;
2712 struct ehea_adapter *adapter;
2713
2714 pr_info("LPAR memory changed - re-initializing driver\n");
2715
2716 list_for_each_entry(adapter, &adapter_list, list)
2717 if (adapter->active_ports) {
2718
2719 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2720 struct ehea_port *port = adapter->port[i];
2721 struct net_device *dev;
2722
2723 if (!port)
2724 continue;
2725
2726 dev = port->netdev;
2727
2728 if (dev->flags & IFF_UP) {
2729 mutex_lock(&port->port_lock);
2730 netif_tx_disable(dev);
2731 ehea_flush_sq(port);
2732 ret = ehea_stop_qps(dev);
2733 if (ret) {
2734 mutex_unlock(&port->port_lock);
2735 goto out;
2736 }
2737 port_napi_disable(port);
2738 mutex_unlock(&port->port_lock);
2739 }
2740 reset_sq_restart_flag(port);
2741 }
2742
2743
2744 ret = ehea_rem_mr(&adapter->mr);
2745 if (ret) {
2746 pr_err("unregister MR failed - driver inoperable!\n");
2747 goto out;
2748 }
2749 }
2750
2751 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
2752
2753 list_for_each_entry(adapter, &adapter_list, list)
2754 if (adapter->active_ports) {
2755
2756 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
2757 if (ret) {
2758 pr_err("register MR failed - driver inoperable!\n");
2759 goto out;
2760 }
2761
2762
2763 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2764 struct ehea_port *port = adapter->port[i];
2765
2766 if (port) {
2767 struct net_device *dev = port->netdev;
2768
2769 if (dev->flags & IFF_UP) {
2770 mutex_lock(&port->port_lock);
2771 ret = ehea_restart_qps(dev);
2772 if (!ret) {
2773 check_sqs(port);
2774 port_napi_enable(port);
2775 netif_tx_wake_all_queues(dev);
2776 } else {
2777 netdev_err(dev, "Unable to restart QPS\n");
2778 }
2779 mutex_unlock(&port->port_lock);
2780 }
2781 }
2782 }
2783 }
2784 pr_info("re-initializing driver complete\n");
2785 out:
2786 return;
2787 }
2788
2789 static void ehea_tx_watchdog(struct net_device *dev)
2790 {
2791 struct ehea_port *port = netdev_priv(dev);
2792
2793 if (netif_carrier_ok(dev) &&
2794 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
2795 ehea_schedule_port_reset(port);
2796 }
2797
2798 static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
2799 {
2800 struct hcp_query_ehea *cb;
2801 u64 hret;
2802 int ret;
2803
2804 cb = (void *)get_zeroed_page(GFP_KERNEL);
2805 if (!cb) {
2806 ret = -ENOMEM;
2807 goto out;
2808 }
2809
2810 hret = ehea_h_query_ehea(adapter->handle, cb);
2811
2812 if (hret != H_SUCCESS) {
2813 ret = -EIO;
2814 goto out_herr;
2815 }
2816
2817 adapter->max_mc_mac = cb->max_mc_mac - 1;
2818 ret = 0;
2819
2820 out_herr:
2821 free_page((unsigned long)cb);
2822 out:
2823 return ret;
2824 }
2825
2826 static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
2827 {
2828 struct hcp_ehea_port_cb4 *cb4;
2829 u64 hret;
2830 int ret = 0;
2831
2832 *jumbo = 0;
2833
2834
2835 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
2836 if (!cb4) {
2837 pr_err("no mem for cb4\n");
2838 ret = -ENOMEM;
2839 goto out;
2840 } else {
2841 hret = ehea_h_query_ehea_port(port->adapter->handle,
2842 port->logical_port_id,
2843 H_PORT_CB4,
2844 H_PORT_CB4_JUMBO, cb4);
2845 if (hret == H_SUCCESS) {
2846 if (cb4->jumbo_frame)
2847 *jumbo = 1;
2848 else {
2849 cb4->jumbo_frame = 1;
2850 hret = ehea_h_modify_ehea_port(port->adapter->
2851 handle,
2852 port->
2853 logical_port_id,
2854 H_PORT_CB4,
2855 H_PORT_CB4_JUMBO,
2856 cb4);
2857 if (hret == H_SUCCESS)
2858 *jumbo = 1;
2859 }
2860 } else
2861 ret = -EINVAL;
2862
2863 free_page((unsigned long)cb4);
2864 }
2865 out:
2866 return ret;
2867 }
2868
2869 static ssize_t ehea_show_port_id(struct device *dev,
2870 struct device_attribute *attr, char *buf)
2871 {
2872 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2873 return sprintf(buf, "%d", port->logical_port_id);
2874 }
2875
2876 static DEVICE_ATTR(log_port_id, 0444, ehea_show_port_id, NULL);
2877
2878 static void logical_port_release(struct device *dev)
2879 {
2880 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
2881 of_node_put(port->ofdev.dev.of_node);
2882 }
2883
2884 static struct device *ehea_register_port(struct ehea_port *port,
2885 struct device_node *dn)
2886 {
2887 int ret;
2888
2889 port->ofdev.dev.of_node = of_node_get(dn);
2890 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
2891 port->ofdev.dev.bus = &ibmebus_bus_type;
2892
2893 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
2894 port->ofdev.dev.release = logical_port_release;
2895
2896 ret = of_device_register(&port->ofdev);
2897 if (ret) {
2898 pr_err("failed to register device. ret=%d\n", ret);
2899 goto out;
2900 }
2901
2902 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2903 if (ret) {
2904 pr_err("failed to register attributes, ret=%d\n", ret);
2905 goto out_unreg_of_dev;
2906 }
2907
2908 return &port->ofdev.dev;
2909
2910 out_unreg_of_dev:
2911 of_device_unregister(&port->ofdev);
2912 out:
2913 return NULL;
2914 }
2915
2916 static void ehea_unregister_port(struct ehea_port *port)
2917 {
2918 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
2919 of_device_unregister(&port->ofdev);
2920 }
2921
2922 static const struct net_device_ops ehea_netdev_ops = {
2923 .ndo_open = ehea_open,
2924 .ndo_stop = ehea_stop,
2925 .ndo_start_xmit = ehea_start_xmit,
2926 .ndo_get_stats64 = ehea_get_stats64,
2927 .ndo_set_mac_address = ehea_set_mac_addr,
2928 .ndo_validate_addr = eth_validate_addr,
2929 .ndo_set_rx_mode = ehea_set_multicast_list,
2930 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
2931 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
2932 .ndo_tx_timeout = ehea_tx_watchdog,
2933 };
2934
2935 static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
2936 u32 logical_port_id,
2937 struct device_node *dn)
2938 {
2939 int ret;
2940 struct net_device *dev;
2941 struct ehea_port *port;
2942 struct device *port_dev;
2943 int jumbo;
2944
2945
2946 dev = alloc_etherdev_mq(sizeof(struct ehea_port), EHEA_MAX_PORT_RES);
2947
2948 if (!dev) {
2949 ret = -ENOMEM;
2950 goto out_err;
2951 }
2952
2953 port = netdev_priv(dev);
2954
2955 mutex_init(&port->port_lock);
2956 port->state = EHEA_PORT_DOWN;
2957 port->sig_comp_iv = sq_entries / 10;
2958
2959 port->adapter = adapter;
2960 port->netdev = dev;
2961 port->logical_port_id = logical_port_id;
2962
2963 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
2964
2965 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
2966 if (!port->mc_list) {
2967 ret = -ENOMEM;
2968 goto out_free_ethdev;
2969 }
2970
2971 INIT_LIST_HEAD(&port->mc_list->list);
2972
2973 ret = ehea_sense_port_attr(port);
2974 if (ret)
2975 goto out_free_mc_list;
2976
2977 netif_set_real_num_rx_queues(dev, port->num_def_qps);
2978 netif_set_real_num_tx_queues(dev, port->num_def_qps);
2979
2980 port_dev = ehea_register_port(port, dn);
2981 if (!port_dev)
2982 goto out_free_mc_list;
2983
2984 SET_NETDEV_DEV(dev, port_dev);
2985
2986
2987 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
2988
2989 dev->netdev_ops = &ehea_netdev_ops;
2990 ehea_set_ethtool_ops(dev);
2991
2992 dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
2993 NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
2994 dev->features = NETIF_F_SG | NETIF_F_TSO |
2995 NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
2996 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
2997 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
2998 dev->vlan_features = NETIF_F_SG | NETIF_F_TSO | NETIF_F_HIGHDMA |
2999 NETIF_F_IP_CSUM;
3000 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3001
3002
3003 dev->min_mtu = ETH_MIN_MTU;
3004 dev->max_mtu = EHEA_MAX_PACKET_SIZE;
3005
3006 INIT_WORK(&port->reset_task, ehea_reset_port);
3007 INIT_DELAYED_WORK(&port->stats_work, ehea_update_stats);
3008
3009 init_waitqueue_head(&port->swqe_avail_wq);
3010 init_waitqueue_head(&port->restart_wq);
3011
3012 ret = register_netdev(dev);
3013 if (ret) {
3014 pr_err("register_netdev failed. ret=%d\n", ret);
3015 goto out_unreg_port;
3016 }
3017
3018 ret = ehea_get_jumboframe_status(port, &jumbo);
3019 if (ret)
3020 netdev_err(dev, "failed determining jumbo frame status\n");
3021
3022 netdev_info(dev, "Jumbo frames are %sabled\n",
3023 jumbo == 1 ? "en" : "dis");
3024
3025 adapter->active_ports++;
3026
3027 return port;
3028
3029 out_unreg_port:
3030 ehea_unregister_port(port);
3031
3032 out_free_mc_list:
3033 kfree(port->mc_list);
3034
3035 out_free_ethdev:
3036 free_netdev(dev);
3037
3038 out_err:
3039 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3040 logical_port_id, ret);
3041 return NULL;
3042 }
3043
3044 static void ehea_shutdown_single_port(struct ehea_port *port)
3045 {
3046 struct ehea_adapter *adapter = port->adapter;
3047
3048 cancel_work_sync(&port->reset_task);
3049 cancel_delayed_work_sync(&port->stats_work);
3050 unregister_netdev(port->netdev);
3051 ehea_unregister_port(port);
3052 kfree(port->mc_list);
3053 free_netdev(port->netdev);
3054 adapter->active_ports--;
3055 }
3056
3057 static int ehea_setup_ports(struct ehea_adapter *adapter)
3058 {
3059 struct device_node *lhea_dn;
3060 struct device_node *eth_dn = NULL;
3061
3062 const u32 *dn_log_port_id;
3063 int i = 0;
3064
3065 lhea_dn = adapter->ofdev->dev.of_node;
3066 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3067
3068 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3069 NULL);
3070 if (!dn_log_port_id) {
3071 pr_err("bad device node: eth_dn name=%pOF\n", eth_dn);
3072 continue;
3073 }
3074
3075 if (ehea_add_adapter_mr(adapter)) {
3076 pr_err("creating MR failed\n");
3077 of_node_put(eth_dn);
3078 return -EIO;
3079 }
3080
3081 adapter->port[i] = ehea_setup_single_port(adapter,
3082 *dn_log_port_id,
3083 eth_dn);
3084 if (adapter->port[i])
3085 netdev_info(adapter->port[i]->netdev,
3086 "logical port id #%d\n", *dn_log_port_id);
3087 else
3088 ehea_remove_adapter_mr(adapter);
3089
3090 i++;
3091 }
3092 return 0;
3093 }
3094
3095 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3096 u32 logical_port_id)
3097 {
3098 struct device_node *lhea_dn;
3099 struct device_node *eth_dn = NULL;
3100 const u32 *dn_log_port_id;
3101
3102 lhea_dn = adapter->ofdev->dev.of_node;
3103 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3104
3105 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3106 NULL);
3107 if (dn_log_port_id)
3108 if (*dn_log_port_id == logical_port_id)
3109 return eth_dn;
3110 }
3111
3112 return NULL;
3113 }
3114
3115 static ssize_t ehea_probe_port(struct device *dev,
3116 struct device_attribute *attr,
3117 const char *buf, size_t count)
3118 {
3119 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3120 struct ehea_port *port;
3121 struct device_node *eth_dn = NULL;
3122 int i;
3123
3124 u32 logical_port_id;
3125
3126 sscanf(buf, "%d", &logical_port_id);
3127
3128 port = ehea_get_port(adapter, logical_port_id);
3129
3130 if (port) {
3131 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3132 logical_port_id);
3133 return -EINVAL;
3134 }
3135
3136 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3137
3138 if (!eth_dn) {
3139 pr_info("no logical port with id %d found\n", logical_port_id);
3140 return -EINVAL;
3141 }
3142
3143 if (ehea_add_adapter_mr(adapter)) {
3144 pr_err("creating MR failed\n");
3145 of_node_put(eth_dn);
3146 return -EIO;
3147 }
3148
3149 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3150
3151 of_node_put(eth_dn);
3152
3153 if (port) {
3154 for (i = 0; i < EHEA_MAX_PORTS; i++)
3155 if (!adapter->port[i]) {
3156 adapter->port[i] = port;
3157 break;
3158 }
3159
3160 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3161 logical_port_id);
3162 } else {
3163 ehea_remove_adapter_mr(adapter);
3164 return -EIO;
3165 }
3166
3167 return (ssize_t) count;
3168 }
3169
3170 static ssize_t ehea_remove_port(struct device *dev,
3171 struct device_attribute *attr,
3172 const char *buf, size_t count)
3173 {
3174 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3175 struct ehea_port *port;
3176 int i;
3177 u32 logical_port_id;
3178
3179 sscanf(buf, "%d", &logical_port_id);
3180
3181 port = ehea_get_port(adapter, logical_port_id);
3182
3183 if (port) {
3184 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3185 logical_port_id);
3186
3187 ehea_shutdown_single_port(port);
3188
3189 for (i = 0; i < EHEA_MAX_PORTS; i++)
3190 if (adapter->port[i] == port) {
3191 adapter->port[i] = NULL;
3192 break;
3193 }
3194 } else {
3195 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3196 logical_port_id);
3197 return -EINVAL;
3198 }
3199
3200 ehea_remove_adapter_mr(adapter);
3201
3202 return (ssize_t) count;
3203 }
3204
3205 static DEVICE_ATTR(probe_port, 0200, NULL, ehea_probe_port);
3206 static DEVICE_ATTR(remove_port, 0200, NULL, ehea_remove_port);
3207
3208 static int ehea_create_device_sysfs(struct platform_device *dev)
3209 {
3210 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3211 if (ret)
3212 goto out;
3213
3214 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3215 out:
3216 return ret;
3217 }
3218
3219 static void ehea_remove_device_sysfs(struct platform_device *dev)
3220 {
3221 device_remove_file(&dev->dev, &dev_attr_probe_port);
3222 device_remove_file(&dev->dev, &dev_attr_remove_port);
3223 }
3224
3225 static int ehea_reboot_notifier(struct notifier_block *nb,
3226 unsigned long action, void *unused)
3227 {
3228 if (action == SYS_RESTART) {
3229 pr_info("Reboot: freeing all eHEA resources\n");
3230 ibmebus_unregister_driver(&ehea_driver);
3231 }
3232 return NOTIFY_DONE;
3233 }
3234
3235 static struct notifier_block ehea_reboot_nb = {
3236 .notifier_call = ehea_reboot_notifier,
3237 };
3238
3239 static int ehea_mem_notifier(struct notifier_block *nb,
3240 unsigned long action, void *data)
3241 {
3242 int ret = NOTIFY_BAD;
3243 struct memory_notify *arg = data;
3244
3245 mutex_lock(&dlpar_mem_lock);
3246
3247 switch (action) {
3248 case MEM_CANCEL_OFFLINE:
3249 pr_info("memory offlining canceled");
3250
3251
3252 case MEM_ONLINE:
3253 pr_info("memory is going online");
3254 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3255 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3256 goto out_unlock;
3257 ehea_rereg_mrs();
3258 break;
3259
3260 case MEM_GOING_OFFLINE:
3261 pr_info("memory is going offline");
3262 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3263 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3264 goto out_unlock;
3265 ehea_rereg_mrs();
3266 break;
3267
3268 default:
3269 break;
3270 }
3271
3272 ehea_update_firmware_handles();
3273 ret = NOTIFY_OK;
3274
3275 out_unlock:
3276 mutex_unlock(&dlpar_mem_lock);
3277 return ret;
3278 }
3279
3280 static struct notifier_block ehea_mem_nb = {
3281 .notifier_call = ehea_mem_notifier,
3282 };
3283
3284 static void ehea_crash_handler(void)
3285 {
3286 int i;
3287
3288 if (ehea_fw_handles.arr)
3289 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3290 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3291 ehea_fw_handles.arr[i].fwh,
3292 FORCE_FREE);
3293
3294 if (ehea_bcmc_regs.arr)
3295 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3296 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3297 ehea_bcmc_regs.arr[i].port_id,
3298 ehea_bcmc_regs.arr[i].reg_type,
3299 ehea_bcmc_regs.arr[i].macaddr,
3300 0, H_DEREG_BCMC);
3301 }
3302
3303 static atomic_t ehea_memory_hooks_registered;
3304
3305
3306 static int ehea_register_memory_hooks(void)
3307 {
3308 int ret = 0;
3309
3310 if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
3311 return 0;
3312
3313 ret = ehea_create_busmap();
3314 if (ret) {
3315 pr_info("ehea_create_busmap failed\n");
3316 goto out;
3317 }
3318
3319 ret = register_reboot_notifier(&ehea_reboot_nb);
3320 if (ret) {
3321 pr_info("register_reboot_notifier failed\n");
3322 goto out;
3323 }
3324
3325 ret = register_memory_notifier(&ehea_mem_nb);
3326 if (ret) {
3327 pr_info("register_memory_notifier failed\n");
3328 goto out2;
3329 }
3330
3331 ret = crash_shutdown_register(ehea_crash_handler);
3332 if (ret) {
3333 pr_info("crash_shutdown_register failed\n");
3334 goto out3;
3335 }
3336
3337 return 0;
3338
3339 out3:
3340 unregister_memory_notifier(&ehea_mem_nb);
3341 out2:
3342 unregister_reboot_notifier(&ehea_reboot_nb);
3343 out:
3344 atomic_dec(&ehea_memory_hooks_registered);
3345 return ret;
3346 }
3347
3348 static void ehea_unregister_memory_hooks(void)
3349 {
3350
3351 if (atomic_read(&ehea_memory_hooks_registered) == 0)
3352 return;
3353
3354 unregister_reboot_notifier(&ehea_reboot_nb);
3355 if (crash_shutdown_unregister(ehea_crash_handler))
3356 pr_info("failed unregistering crash handler\n");
3357 unregister_memory_notifier(&ehea_mem_nb);
3358 }
3359
3360 static int ehea_probe_adapter(struct platform_device *dev)
3361 {
3362 struct ehea_adapter *adapter;
3363 const u64 *adapter_handle;
3364 int ret;
3365 int i;
3366
3367 ret = ehea_register_memory_hooks();
3368 if (ret)
3369 return ret;
3370
3371 if (!dev || !dev->dev.of_node) {
3372 pr_err("Invalid ibmebus device probed\n");
3373 return -EINVAL;
3374 }
3375
3376 adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
3377 if (!adapter) {
3378 ret = -ENOMEM;
3379 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3380 goto out;
3381 }
3382
3383 list_add(&adapter->list, &adapter_list);
3384
3385 adapter->ofdev = dev;
3386
3387 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3388 NULL);
3389 if (adapter_handle)
3390 adapter->handle = *adapter_handle;
3391
3392 if (!adapter->handle) {
3393 dev_err(&dev->dev, "failed getting handle for adapter"
3394 " '%pOF'\n", dev->dev.of_node);
3395 ret = -ENODEV;
3396 goto out_free_ad;
3397 }
3398
3399 adapter->pd = EHEA_PD_ID;
3400
3401 platform_set_drvdata(dev, adapter);
3402
3403
3404
3405
3406 ret = ehea_sense_adapter_attr(adapter);
3407 if (ret) {
3408 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3409 goto out_free_ad;
3410 }
3411
3412 adapter->neq = ehea_create_eq(adapter,
3413 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3414 if (!adapter->neq) {
3415 ret = -EIO;
3416 dev_err(&dev->dev, "NEQ creation failed\n");
3417 goto out_free_ad;
3418 }
3419
3420 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3421 (unsigned long)adapter);
3422
3423 ret = ehea_create_device_sysfs(dev);
3424 if (ret)
3425 goto out_kill_eq;
3426
3427 ret = ehea_setup_ports(adapter);
3428 if (ret) {
3429 dev_err(&dev->dev, "setup_ports failed\n");
3430 goto out_rem_dev_sysfs;
3431 }
3432
3433 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3434 ehea_interrupt_neq, 0,
3435 "ehea_neq", adapter);
3436 if (ret) {
3437 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3438 goto out_shutdown_ports;
3439 }
3440
3441
3442 tasklet_hi_schedule(&adapter->neq_tasklet);
3443
3444 ret = 0;
3445 goto out;
3446
3447 out_shutdown_ports:
3448 for (i = 0; i < EHEA_MAX_PORTS; i++)
3449 if (adapter->port[i]) {
3450 ehea_shutdown_single_port(adapter->port[i]);
3451 adapter->port[i] = NULL;
3452 }
3453
3454 out_rem_dev_sysfs:
3455 ehea_remove_device_sysfs(dev);
3456
3457 out_kill_eq:
3458 ehea_destroy_eq(adapter->neq);
3459
3460 out_free_ad:
3461 list_del(&adapter->list);
3462
3463 out:
3464 ehea_update_firmware_handles();
3465
3466 return ret;
3467 }
3468
3469 static int ehea_remove(struct platform_device *dev)
3470 {
3471 struct ehea_adapter *adapter = platform_get_drvdata(dev);
3472 int i;
3473
3474 for (i = 0; i < EHEA_MAX_PORTS; i++)
3475 if (adapter->port[i]) {
3476 ehea_shutdown_single_port(adapter->port[i]);
3477 adapter->port[i] = NULL;
3478 }
3479
3480 ehea_remove_device_sysfs(dev);
3481
3482 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3483 tasklet_kill(&adapter->neq_tasklet);
3484
3485 ehea_destroy_eq(adapter->neq);
3486 ehea_remove_adapter_mr(adapter);
3487 list_del(&adapter->list);
3488
3489 ehea_update_firmware_handles();
3490
3491 return 0;
3492 }
3493
3494 static int check_module_parm(void)
3495 {
3496 int ret = 0;
3497
3498 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3499 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3500 pr_info("Bad parameter: rq1_entries\n");
3501 ret = -EINVAL;
3502 }
3503 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3504 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3505 pr_info("Bad parameter: rq2_entries\n");
3506 ret = -EINVAL;
3507 }
3508 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3509 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3510 pr_info("Bad parameter: rq3_entries\n");
3511 ret = -EINVAL;
3512 }
3513 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3514 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3515 pr_info("Bad parameter: sq_entries\n");
3516 ret = -EINVAL;
3517 }
3518
3519 return ret;
3520 }
3521
3522 static ssize_t capabilities_show(struct device_driver *drv, char *buf)
3523 {
3524 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3525 }
3526
3527 static DRIVER_ATTR_RO(capabilities);
3528
3529 static int __init ehea_module_init(void)
3530 {
3531 int ret;
3532
3533 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3534
3535 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3536 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3537
3538 mutex_init(&ehea_fw_handles.lock);
3539 spin_lock_init(&ehea_bcmc_regs.lock);
3540
3541 ret = check_module_parm();
3542 if (ret)
3543 goto out;
3544
3545 ret = ibmebus_register_driver(&ehea_driver);
3546 if (ret) {
3547 pr_err("failed registering eHEA device driver on ebus\n");
3548 goto out;
3549 }
3550
3551 ret = driver_create_file(&ehea_driver.driver,
3552 &driver_attr_capabilities);
3553 if (ret) {
3554 pr_err("failed to register capabilities attribute, ret=%d\n",
3555 ret);
3556 goto out2;
3557 }
3558
3559 return ret;
3560
3561 out2:
3562 ibmebus_unregister_driver(&ehea_driver);
3563 out:
3564 return ret;
3565 }
3566
3567 static void __exit ehea_module_exit(void)
3568 {
3569 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3570 ibmebus_unregister_driver(&ehea_driver);
3571 ehea_unregister_memory_hooks();
3572 kfree(ehea_fw_handles.arr);
3573 kfree(ehea_bcmc_regs.arr);
3574 ehea_destroy_busmap();
3575 }
3576
3577 module_init(ehea_module_init);
3578 module_exit(ehea_module_exit);