This source file includes following definitions.
- h_reg_sub_crq
- alloc_long_term_buff
- free_long_term_buff
- reset_long_term_buff
- deactivate_rx_pools
- replenish_rx_pool
- replenish_pools
- release_stats_buffers
- init_stats_buffers
- release_stats_token
- init_stats_token
- reset_rx_pools
- release_rx_pools
- init_rx_pools
- reset_one_tx_pool
- reset_tx_pools
- release_vpd_data
- release_one_tx_pool
- release_tx_pools
- init_one_tx_pool
- init_tx_pools
- ibmvnic_napi_enable
- ibmvnic_napi_disable
- init_napi
- release_napi
- ibmvnic_login
- release_login_buffer
- release_login_rsp_buffer
- release_resources
- set_link_state
- set_real_num_queues
- ibmvnic_get_vpd
- init_resources
- __ibmvnic_open
- ibmvnic_open
- clean_rx_pools
- clean_one_tx_pool
- clean_tx_pools
- ibmvnic_disable_irqs
- ibmvnic_cleanup
- __ibmvnic_close
- ibmvnic_close
- build_hdr_data
- create_hdr_descs
- build_hdr_descs_arr
- ibmvnic_xmit_workarounds
- ibmvnic_xmit
- ibmvnic_set_multi
- __ibmvnic_set_mac
- ibmvnic_set_mac
- do_change_param_reset
- do_reset
- do_hard_reset
- get_next_rwi
- free_all_rwi
- __ibmvnic_reset
- __ibmvnic_delayed_reset
- ibmvnic_reset
- ibmvnic_tx_timeout
- remove_buff_from_pool
- ibmvnic_poll
- wait_for_reset
- ibmvnic_change_mtu
- ibmvnic_features_check
- ibmvnic_get_link_ksettings
- ibmvnic_get_drvinfo
- ibmvnic_get_msglevel
- ibmvnic_set_msglevel
- ibmvnic_get_link
- ibmvnic_get_ringparam
- ibmvnic_set_ringparam
- ibmvnic_get_channels
- ibmvnic_set_channels
- ibmvnic_get_strings
- ibmvnic_get_sset_count
- ibmvnic_get_ethtool_stats
- ibmvnic_get_priv_flags
- ibmvnic_set_priv_flags
- reset_one_sub_crq_queue
- reset_sub_crq_queues
- release_sub_crq_queue
- init_sub_crq_queue
- release_sub_crqs
- disable_scrq_irq
- enable_scrq_irq
- ibmvnic_complete_tx
- ibmvnic_interrupt_tx
- ibmvnic_interrupt_rx
- init_sub_crq_irqs
- init_sub_crqs
- ibmvnic_send_req_caps
- pending_scrq
- ibmvnic_next_scrq
- ibmvnic_next_crq
- print_subcrq_error
- send_subcrq
- send_subcrq_indirect
- ibmvnic_send_crq
- ibmvnic_send_crq_init
- send_version_xchg
- vnic_client_data_len
- vnic_add_client_data
- send_login
- send_request_map
- send_request_unmap
- send_map_query
- send_cap_queries
- handle_vpd_size_rsp
- handle_vpd_rsp
- handle_query_ip_offload_rsp
- ibmvnic_fw_err_cause
- handle_error_indication
- handle_change_mac_rsp
- handle_request_cap_rsp
- handle_login_rsp
- handle_request_unmap_rsp
- handle_query_map_rsp
- handle_query_cap_rsp
- send_query_phys_parms
- handle_query_phys_parms_rsp
- ibmvnic_handle_crq
- ibmvnic_interrupt
- ibmvnic_tasklet
- ibmvnic_reenable_crq_queue
- ibmvnic_reset_crq
- release_crq_queue
- init_crq_queue
- ibmvnic_reset_init
- ibmvnic_init
- ibmvnic_probe
- ibmvnic_remove
- failover_store
- ibmvnic_get_desired_dma
- ibmvnic_resume
- ibmvnic_module_init
- ibmvnic_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/completion.h>
40 #include <linux/ioport.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/kernel.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/mm.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
51 #include <linux/if_arp.h>
52 #include <linux/in.h>
53 #include <linux/ip.h>
54 #include <linux/ipv6.h>
55 #include <linux/irq.h>
56 #include <linux/kthread.h>
57 #include <linux/seq_file.h>
58 #include <linux/interrupt.h>
59 #include <net/net_namespace.h>
60 #include <asm/hvcall.h>
61 #include <linux/atomic.h>
62 #include <asm/vio.h>
63 #include <asm/iommu.h>
64 #include <linux/uaccess.h>
65 #include <asm/firmware.h>
66 #include <linux/workqueue.h>
67 #include <linux/if_vlan.h>
68 #include <linux/utsname.h>
69
70 #include "ibmvnic.h"
71
72 static const char ibmvnic_driver_name[] = "ibmvnic";
73 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
74
75 MODULE_AUTHOR("Santiago Leon");
76 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
77 MODULE_LICENSE("GPL");
78 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
79
80 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
81 static int ibmvnic_remove(struct vio_dev *);
82 static void release_sub_crqs(struct ibmvnic_adapter *, bool);
83 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
84 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
85 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
86 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
87 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
88 union sub_crq *sub_crq);
89 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
90 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
91 static int enable_scrq_irq(struct ibmvnic_adapter *,
92 struct ibmvnic_sub_crq_queue *);
93 static int disable_scrq_irq(struct ibmvnic_adapter *,
94 struct ibmvnic_sub_crq_queue *);
95 static int pending_scrq(struct ibmvnic_adapter *,
96 struct ibmvnic_sub_crq_queue *);
97 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
98 struct ibmvnic_sub_crq_queue *);
99 static int ibmvnic_poll(struct napi_struct *napi, int data);
100 static void send_map_query(struct ibmvnic_adapter *adapter);
101 static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
102 static int send_request_unmap(struct ibmvnic_adapter *, u8);
103 static int send_login(struct ibmvnic_adapter *adapter);
104 static void send_cap_queries(struct ibmvnic_adapter *adapter);
105 static int init_sub_crqs(struct ibmvnic_adapter *);
106 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
107 static int ibmvnic_init(struct ibmvnic_adapter *);
108 static int ibmvnic_reset_init(struct ibmvnic_adapter *);
109 static void release_crq_queue(struct ibmvnic_adapter *);
110 static int __ibmvnic_set_mac(struct net_device *, u8 *);
111 static int init_crq_queue(struct ibmvnic_adapter *adapter);
112 static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
113
114 struct ibmvnic_stat {
115 char name[ETH_GSTRING_LEN];
116 int offset;
117 };
118
119 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
120 offsetof(struct ibmvnic_statistics, stat))
121 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
122
123 static const struct ibmvnic_stat ibmvnic_stats[] = {
124 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
125 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
126 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
127 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
128 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
129 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
130 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
131 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
132 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
133 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
134 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
135 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
136 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
137 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
138 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
139 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
140 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
141 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
142 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
143 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
144 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
145 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
146 };
147
148 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
149 unsigned long length, unsigned long *number,
150 unsigned long *irq)
151 {
152 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
153 long rc;
154
155 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
156 *number = retbuf[0];
157 *irq = retbuf[1];
158
159 return rc;
160 }
161
162 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
163 struct ibmvnic_long_term_buff *ltb, int size)
164 {
165 struct device *dev = &adapter->vdev->dev;
166 int rc;
167
168 ltb->size = size;
169 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
170 GFP_KERNEL);
171
172 if (!ltb->buff) {
173 dev_err(dev, "Couldn't alloc long term buffer\n");
174 return -ENOMEM;
175 }
176 ltb->map_id = adapter->map_id;
177 adapter->map_id++;
178
179 reinit_completion(&adapter->fw_done);
180 rc = send_request_map(adapter, ltb->addr,
181 ltb->size, ltb->map_id);
182 if (rc) {
183 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
184 return rc;
185 }
186 wait_for_completion(&adapter->fw_done);
187
188 if (adapter->fw_done_rc) {
189 dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
190 adapter->fw_done_rc);
191 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
192 return -1;
193 }
194 return 0;
195 }
196
197 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
198 struct ibmvnic_long_term_buff *ltb)
199 {
200 struct device *dev = &adapter->vdev->dev;
201
202 if (!ltb->buff)
203 return;
204
205 if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
206 adapter->reset_reason != VNIC_RESET_MOBILITY)
207 send_request_unmap(adapter, ltb->map_id);
208 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
209 }
210
211 static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
212 struct ibmvnic_long_term_buff *ltb)
213 {
214 int rc;
215
216 memset(ltb->buff, 0, ltb->size);
217
218 reinit_completion(&adapter->fw_done);
219 rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
220 if (rc)
221 return rc;
222 wait_for_completion(&adapter->fw_done);
223
224 if (adapter->fw_done_rc) {
225 dev_info(&adapter->vdev->dev,
226 "Reset failed, attempting to free and reallocate buffer\n");
227 free_long_term_buff(adapter, ltb);
228 return alloc_long_term_buff(adapter, ltb, ltb->size);
229 }
230 return 0;
231 }
232
233 static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
234 {
235 int i;
236
237 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
238 i++)
239 adapter->rx_pool[i].active = 0;
240 }
241
242 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
243 struct ibmvnic_rx_pool *pool)
244 {
245 int count = pool->size - atomic_read(&pool->available);
246 struct device *dev = &adapter->vdev->dev;
247 int buffers_added = 0;
248 unsigned long lpar_rc;
249 union sub_crq sub_crq;
250 struct sk_buff *skb;
251 unsigned int offset;
252 dma_addr_t dma_addr;
253 unsigned char *dst;
254 u64 *handle_array;
255 int shift = 0;
256 int index;
257 int i;
258
259 if (!pool->active)
260 return;
261
262 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
263 be32_to_cpu(adapter->login_rsp_buf->
264 off_rxadd_subcrqs));
265
266 for (i = 0; i < count; ++i) {
267 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
268 if (!skb) {
269 dev_err(dev, "Couldn't replenish rx buff\n");
270 adapter->replenish_no_mem++;
271 break;
272 }
273
274 index = pool->free_map[pool->next_free];
275
276 if (pool->rx_buff[index].skb)
277 dev_err(dev, "Inconsistent free_map!\n");
278
279
280 offset = index * pool->buff_size;
281 dst = pool->long_term_buff.buff + offset;
282 memset(dst, 0, pool->buff_size);
283 dma_addr = pool->long_term_buff.addr + offset;
284 pool->rx_buff[index].data = dst;
285
286 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
287 pool->rx_buff[index].dma = dma_addr;
288 pool->rx_buff[index].skb = skb;
289 pool->rx_buff[index].pool_index = pool->index;
290 pool->rx_buff[index].size = pool->buff_size;
291
292 memset(&sub_crq, 0, sizeof(sub_crq));
293 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
294 sub_crq.rx_add.correlator =
295 cpu_to_be64((u64)&pool->rx_buff[index]);
296 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
297 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
298
299
300
301
302
303
304 #ifdef __LITTLE_ENDIAN__
305 shift = 8;
306 #endif
307 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
308
309 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
310 &sub_crq);
311 if (lpar_rc != H_SUCCESS)
312 goto failure;
313
314 buffers_added++;
315 adapter->replenish_add_buff_success++;
316 pool->next_free = (pool->next_free + 1) % pool->size;
317 }
318 atomic_add(buffers_added, &pool->available);
319 return;
320
321 failure:
322 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
323 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326
327 dev_kfree_skb_any(skb);
328 adapter->replenish_add_buff_failure++;
329 atomic_add(buffers_added, &pool->available);
330
331 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
332
333
334
335
336
337 deactivate_rx_pools(adapter);
338 netif_carrier_off(adapter->netdev);
339 }
340 }
341
342 static void replenish_pools(struct ibmvnic_adapter *adapter)
343 {
344 int i;
345
346 adapter->replenish_task_cycles++;
347 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
348 i++) {
349 if (adapter->rx_pool[i].active)
350 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
351 }
352 }
353
354 static void release_stats_buffers(struct ibmvnic_adapter *adapter)
355 {
356 kfree(adapter->tx_stats_buffers);
357 kfree(adapter->rx_stats_buffers);
358 adapter->tx_stats_buffers = NULL;
359 adapter->rx_stats_buffers = NULL;
360 }
361
362 static int init_stats_buffers(struct ibmvnic_adapter *adapter)
363 {
364 adapter->tx_stats_buffers =
365 kcalloc(IBMVNIC_MAX_QUEUES,
366 sizeof(struct ibmvnic_tx_queue_stats),
367 GFP_KERNEL);
368 if (!adapter->tx_stats_buffers)
369 return -ENOMEM;
370
371 adapter->rx_stats_buffers =
372 kcalloc(IBMVNIC_MAX_QUEUES,
373 sizeof(struct ibmvnic_rx_queue_stats),
374 GFP_KERNEL);
375 if (!adapter->rx_stats_buffers)
376 return -ENOMEM;
377
378 return 0;
379 }
380
381 static void release_stats_token(struct ibmvnic_adapter *adapter)
382 {
383 struct device *dev = &adapter->vdev->dev;
384
385 if (!adapter->stats_token)
386 return;
387
388 dma_unmap_single(dev, adapter->stats_token,
389 sizeof(struct ibmvnic_statistics),
390 DMA_FROM_DEVICE);
391 adapter->stats_token = 0;
392 }
393
394 static int init_stats_token(struct ibmvnic_adapter *adapter)
395 {
396 struct device *dev = &adapter->vdev->dev;
397 dma_addr_t stok;
398
399 stok = dma_map_single(dev, &adapter->stats,
400 sizeof(struct ibmvnic_statistics),
401 DMA_FROM_DEVICE);
402 if (dma_mapping_error(dev, stok)) {
403 dev_err(dev, "Couldn't map stats buffer\n");
404 return -1;
405 }
406
407 adapter->stats_token = stok;
408 netdev_dbg(adapter->netdev, "Stats token initialized (%llx)\n", stok);
409 return 0;
410 }
411
412 static int reset_rx_pools(struct ibmvnic_adapter *adapter)
413 {
414 struct ibmvnic_rx_pool *rx_pool;
415 int rx_scrqs;
416 int i, j, rc;
417 u64 *size_array;
418
419 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
420 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
421
422 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
423 for (i = 0; i < rx_scrqs; i++) {
424 rx_pool = &adapter->rx_pool[i];
425
426 netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
427
428 if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
429 free_long_term_buff(adapter, &rx_pool->long_term_buff);
430 rx_pool->buff_size = be64_to_cpu(size_array[i]);
431 rc = alloc_long_term_buff(adapter,
432 &rx_pool->long_term_buff,
433 rx_pool->size *
434 rx_pool->buff_size);
435 } else {
436 rc = reset_long_term_buff(adapter,
437 &rx_pool->long_term_buff);
438 }
439
440 if (rc)
441 return rc;
442
443 for (j = 0; j < rx_pool->size; j++)
444 rx_pool->free_map[j] = j;
445
446 memset(rx_pool->rx_buff, 0,
447 rx_pool->size * sizeof(struct ibmvnic_rx_buff));
448
449 atomic_set(&rx_pool->available, 0);
450 rx_pool->next_alloc = 0;
451 rx_pool->next_free = 0;
452 rx_pool->active = 1;
453 }
454
455 return 0;
456 }
457
458 static void release_rx_pools(struct ibmvnic_adapter *adapter)
459 {
460 struct ibmvnic_rx_pool *rx_pool;
461 int i, j;
462
463 if (!adapter->rx_pool)
464 return;
465
466 for (i = 0; i < adapter->num_active_rx_pools; i++) {
467 rx_pool = &adapter->rx_pool[i];
468
469 netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
470
471 kfree(rx_pool->free_map);
472 free_long_term_buff(adapter, &rx_pool->long_term_buff);
473
474 if (!rx_pool->rx_buff)
475 continue;
476
477 for (j = 0; j < rx_pool->size; j++) {
478 if (rx_pool->rx_buff[j].skb) {
479 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
480 rx_pool->rx_buff[j].skb = NULL;
481 }
482 }
483
484 kfree(rx_pool->rx_buff);
485 }
486
487 kfree(adapter->rx_pool);
488 adapter->rx_pool = NULL;
489 adapter->num_active_rx_pools = 0;
490 }
491
492 static int init_rx_pools(struct net_device *netdev)
493 {
494 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
495 struct device *dev = &adapter->vdev->dev;
496 struct ibmvnic_rx_pool *rx_pool;
497 int rxadd_subcrqs;
498 u64 *size_array;
499 int i, j;
500
501 rxadd_subcrqs =
502 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
503 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
504 be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
505
506 adapter->rx_pool = kcalloc(rxadd_subcrqs,
507 sizeof(struct ibmvnic_rx_pool),
508 GFP_KERNEL);
509 if (!adapter->rx_pool) {
510 dev_err(dev, "Failed to allocate rx pools\n");
511 return -1;
512 }
513
514 adapter->num_active_rx_pools = rxadd_subcrqs;
515
516 for (i = 0; i < rxadd_subcrqs; i++) {
517 rx_pool = &adapter->rx_pool[i];
518
519 netdev_dbg(adapter->netdev,
520 "Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
521 i, adapter->req_rx_add_entries_per_subcrq,
522 be64_to_cpu(size_array[i]));
523
524 rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
525 rx_pool->index = i;
526 rx_pool->buff_size = be64_to_cpu(size_array[i]);
527 rx_pool->active = 1;
528
529 rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
530 GFP_KERNEL);
531 if (!rx_pool->free_map) {
532 release_rx_pools(adapter);
533 return -1;
534 }
535
536 rx_pool->rx_buff = kcalloc(rx_pool->size,
537 sizeof(struct ibmvnic_rx_buff),
538 GFP_KERNEL);
539 if (!rx_pool->rx_buff) {
540 dev_err(dev, "Couldn't alloc rx buffers\n");
541 release_rx_pools(adapter);
542 return -1;
543 }
544
545 if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
546 rx_pool->size * rx_pool->buff_size)) {
547 release_rx_pools(adapter);
548 return -1;
549 }
550
551 for (j = 0; j < rx_pool->size; ++j)
552 rx_pool->free_map[j] = j;
553
554 atomic_set(&rx_pool->available, 0);
555 rx_pool->next_alloc = 0;
556 rx_pool->next_free = 0;
557 }
558
559 return 0;
560 }
561
562 static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
563 struct ibmvnic_tx_pool *tx_pool)
564 {
565 int rc, i;
566
567 rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
568 if (rc)
569 return rc;
570
571 memset(tx_pool->tx_buff, 0,
572 tx_pool->num_buffers *
573 sizeof(struct ibmvnic_tx_buff));
574
575 for (i = 0; i < tx_pool->num_buffers; i++)
576 tx_pool->free_map[i] = i;
577
578 tx_pool->consumer_index = 0;
579 tx_pool->producer_index = 0;
580
581 return 0;
582 }
583
584 static int reset_tx_pools(struct ibmvnic_adapter *adapter)
585 {
586 int tx_scrqs;
587 int i, rc;
588
589 tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
590 for (i = 0; i < tx_scrqs; i++) {
591 rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
592 if (rc)
593 return rc;
594 rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
595 if (rc)
596 return rc;
597 }
598
599 return 0;
600 }
601
602 static void release_vpd_data(struct ibmvnic_adapter *adapter)
603 {
604 if (!adapter->vpd)
605 return;
606
607 kfree(adapter->vpd->buff);
608 kfree(adapter->vpd);
609
610 adapter->vpd = NULL;
611 }
612
613 static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
614 struct ibmvnic_tx_pool *tx_pool)
615 {
616 kfree(tx_pool->tx_buff);
617 kfree(tx_pool->free_map);
618 free_long_term_buff(adapter, &tx_pool->long_term_buff);
619 }
620
621 static void release_tx_pools(struct ibmvnic_adapter *adapter)
622 {
623 int i;
624
625 if (!adapter->tx_pool)
626 return;
627
628 for (i = 0; i < adapter->num_active_tx_pools; i++) {
629 release_one_tx_pool(adapter, &adapter->tx_pool[i]);
630 release_one_tx_pool(adapter, &adapter->tso_pool[i]);
631 }
632
633 kfree(adapter->tx_pool);
634 adapter->tx_pool = NULL;
635 kfree(adapter->tso_pool);
636 adapter->tso_pool = NULL;
637 adapter->num_active_tx_pools = 0;
638 }
639
640 static int init_one_tx_pool(struct net_device *netdev,
641 struct ibmvnic_tx_pool *tx_pool,
642 int num_entries, int buf_size)
643 {
644 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
645 int i;
646
647 tx_pool->tx_buff = kcalloc(num_entries,
648 sizeof(struct ibmvnic_tx_buff),
649 GFP_KERNEL);
650 if (!tx_pool->tx_buff)
651 return -1;
652
653 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
654 num_entries * buf_size))
655 return -1;
656
657 tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
658 if (!tx_pool->free_map)
659 return -1;
660
661 for (i = 0; i < num_entries; i++)
662 tx_pool->free_map[i] = i;
663
664 tx_pool->consumer_index = 0;
665 tx_pool->producer_index = 0;
666 tx_pool->num_buffers = num_entries;
667 tx_pool->buf_size = buf_size;
668
669 return 0;
670 }
671
672 static int init_tx_pools(struct net_device *netdev)
673 {
674 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
675 int tx_subcrqs;
676 int i, rc;
677
678 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
679 adapter->tx_pool = kcalloc(tx_subcrqs,
680 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
681 if (!adapter->tx_pool)
682 return -1;
683
684 adapter->tso_pool = kcalloc(tx_subcrqs,
685 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
686 if (!adapter->tso_pool)
687 return -1;
688
689 adapter->num_active_tx_pools = tx_subcrqs;
690
691 for (i = 0; i < tx_subcrqs; i++) {
692 rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
693 adapter->req_tx_entries_per_subcrq,
694 adapter->req_mtu + VLAN_HLEN);
695 if (rc) {
696 release_tx_pools(adapter);
697 return rc;
698 }
699
700 rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
701 IBMVNIC_TSO_BUFS,
702 IBMVNIC_TSO_BUF_SZ);
703 if (rc) {
704 release_tx_pools(adapter);
705 return rc;
706 }
707 }
708
709 return 0;
710 }
711
712 static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
713 {
714 int i;
715
716 if (adapter->napi_enabled)
717 return;
718
719 for (i = 0; i < adapter->req_rx_queues; i++)
720 napi_enable(&adapter->napi[i]);
721
722 adapter->napi_enabled = true;
723 }
724
725 static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
726 {
727 int i;
728
729 if (!adapter->napi_enabled)
730 return;
731
732 for (i = 0; i < adapter->req_rx_queues; i++) {
733 netdev_dbg(adapter->netdev, "Disabling napi[%d]\n", i);
734 napi_disable(&adapter->napi[i]);
735 }
736
737 adapter->napi_enabled = false;
738 }
739
740 static int init_napi(struct ibmvnic_adapter *adapter)
741 {
742 int i;
743
744 adapter->napi = kcalloc(adapter->req_rx_queues,
745 sizeof(struct napi_struct), GFP_KERNEL);
746 if (!adapter->napi)
747 return -ENOMEM;
748
749 for (i = 0; i < adapter->req_rx_queues; i++) {
750 netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
751 netif_napi_add(adapter->netdev, &adapter->napi[i],
752 ibmvnic_poll, NAPI_POLL_WEIGHT);
753 }
754
755 adapter->num_active_rx_napi = adapter->req_rx_queues;
756 return 0;
757 }
758
759 static void release_napi(struct ibmvnic_adapter *adapter)
760 {
761 int i;
762
763 if (!adapter->napi)
764 return;
765
766 for (i = 0; i < adapter->num_active_rx_napi; i++) {
767 netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i);
768 netif_napi_del(&adapter->napi[i]);
769 }
770
771 kfree(adapter->napi);
772 adapter->napi = NULL;
773 adapter->num_active_rx_napi = 0;
774 adapter->napi_enabled = false;
775 }
776
777 static int ibmvnic_login(struct net_device *netdev)
778 {
779 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
780 unsigned long timeout = msecs_to_jiffies(30000);
781 int retry_count = 0;
782 bool retry;
783 int rc;
784
785 do {
786 retry = false;
787 if (retry_count > IBMVNIC_MAX_QUEUES) {
788 netdev_warn(netdev, "Login attempts exceeded\n");
789 return -1;
790 }
791
792 adapter->init_done_rc = 0;
793 reinit_completion(&adapter->init_done);
794 rc = send_login(adapter);
795 if (rc) {
796 netdev_warn(netdev, "Unable to login\n");
797 return rc;
798 }
799
800 if (!wait_for_completion_timeout(&adapter->init_done,
801 timeout)) {
802 netdev_warn(netdev, "Login timed out\n");
803 return -1;
804 }
805
806 if (adapter->init_done_rc == PARTIALSUCCESS) {
807 retry_count++;
808 release_sub_crqs(adapter, 1);
809
810 retry = true;
811 netdev_dbg(netdev,
812 "Received partial success, retrying...\n");
813 adapter->init_done_rc = 0;
814 reinit_completion(&adapter->init_done);
815 send_cap_queries(adapter);
816 if (!wait_for_completion_timeout(&adapter->init_done,
817 timeout)) {
818 netdev_warn(netdev,
819 "Capabilities query timed out\n");
820 return -1;
821 }
822
823 rc = init_sub_crqs(adapter);
824 if (rc) {
825 netdev_warn(netdev,
826 "SCRQ initialization failed\n");
827 return -1;
828 }
829
830 rc = init_sub_crq_irqs(adapter);
831 if (rc) {
832 netdev_warn(netdev,
833 "SCRQ irq initialization failed\n");
834 return -1;
835 }
836 } else if (adapter->init_done_rc) {
837 netdev_warn(netdev, "Adapter login failed\n");
838 return -1;
839 }
840 } while (retry);
841
842 __ibmvnic_set_mac(netdev, adapter->mac_addr);
843
844 return 0;
845 }
846
847 static void release_login_buffer(struct ibmvnic_adapter *adapter)
848 {
849 kfree(adapter->login_buf);
850 adapter->login_buf = NULL;
851 }
852
853 static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
854 {
855 kfree(adapter->login_rsp_buf);
856 adapter->login_rsp_buf = NULL;
857 }
858
859 static void release_resources(struct ibmvnic_adapter *adapter)
860 {
861 release_vpd_data(adapter);
862
863 release_tx_pools(adapter);
864 release_rx_pools(adapter);
865
866 release_napi(adapter);
867 release_login_rsp_buffer(adapter);
868 }
869
870 static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
871 {
872 struct net_device *netdev = adapter->netdev;
873 unsigned long timeout = msecs_to_jiffies(30000);
874 union ibmvnic_crq crq;
875 bool resend;
876 int rc;
877
878 netdev_dbg(netdev, "setting link state %d\n", link_state);
879
880 memset(&crq, 0, sizeof(crq));
881 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
882 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
883 crq.logical_link_state.link_state = link_state;
884
885 do {
886 resend = false;
887
888 reinit_completion(&adapter->init_done);
889 rc = ibmvnic_send_crq(adapter, &crq);
890 if (rc) {
891 netdev_err(netdev, "Failed to set link state\n");
892 return rc;
893 }
894
895 if (!wait_for_completion_timeout(&adapter->init_done,
896 timeout)) {
897 netdev_err(netdev, "timeout setting link state\n");
898 return -1;
899 }
900
901 if (adapter->init_done_rc == 1) {
902
903 mdelay(1000);
904 resend = true;
905 } else if (adapter->init_done_rc) {
906 netdev_warn(netdev, "Unable to set link state, rc=%d\n",
907 adapter->init_done_rc);
908 return adapter->init_done_rc;
909 }
910 } while (resend);
911
912 return 0;
913 }
914
915 static int set_real_num_queues(struct net_device *netdev)
916 {
917 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
918 int rc;
919
920 netdev_dbg(netdev, "Setting real tx/rx queues (%llx/%llx)\n",
921 adapter->req_tx_queues, adapter->req_rx_queues);
922
923 rc = netif_set_real_num_tx_queues(netdev, adapter->req_tx_queues);
924 if (rc) {
925 netdev_err(netdev, "failed to set the number of tx queues\n");
926 return rc;
927 }
928
929 rc = netif_set_real_num_rx_queues(netdev, adapter->req_rx_queues);
930 if (rc)
931 netdev_err(netdev, "failed to set the number of rx queues\n");
932
933 return rc;
934 }
935
936 static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
937 {
938 struct device *dev = &adapter->vdev->dev;
939 union ibmvnic_crq crq;
940 int len = 0;
941 int rc;
942
943 if (adapter->vpd->buff)
944 len = adapter->vpd->len;
945
946 reinit_completion(&adapter->fw_done);
947 crq.get_vpd_size.first = IBMVNIC_CRQ_CMD;
948 crq.get_vpd_size.cmd = GET_VPD_SIZE;
949 rc = ibmvnic_send_crq(adapter, &crq);
950 if (rc)
951 return rc;
952 wait_for_completion(&adapter->fw_done);
953
954 if (!adapter->vpd->len)
955 return -ENODATA;
956
957 if (!adapter->vpd->buff)
958 adapter->vpd->buff = kzalloc(adapter->vpd->len, GFP_KERNEL);
959 else if (adapter->vpd->len != len)
960 adapter->vpd->buff =
961 krealloc(adapter->vpd->buff,
962 adapter->vpd->len, GFP_KERNEL);
963
964 if (!adapter->vpd->buff) {
965 dev_err(dev, "Could allocate VPD buffer\n");
966 return -ENOMEM;
967 }
968
969 adapter->vpd->dma_addr =
970 dma_map_single(dev, adapter->vpd->buff, adapter->vpd->len,
971 DMA_FROM_DEVICE);
972 if (dma_mapping_error(dev, adapter->vpd->dma_addr)) {
973 dev_err(dev, "Could not map VPD buffer\n");
974 kfree(adapter->vpd->buff);
975 adapter->vpd->buff = NULL;
976 return -ENOMEM;
977 }
978
979 reinit_completion(&adapter->fw_done);
980 crq.get_vpd.first = IBMVNIC_CRQ_CMD;
981 crq.get_vpd.cmd = GET_VPD;
982 crq.get_vpd.ioba = cpu_to_be32(adapter->vpd->dma_addr);
983 crq.get_vpd.len = cpu_to_be32((u32)adapter->vpd->len);
984 rc = ibmvnic_send_crq(adapter, &crq);
985 if (rc) {
986 kfree(adapter->vpd->buff);
987 adapter->vpd->buff = NULL;
988 return rc;
989 }
990 wait_for_completion(&adapter->fw_done);
991
992 return 0;
993 }
994
995 static int init_resources(struct ibmvnic_adapter *adapter)
996 {
997 struct net_device *netdev = adapter->netdev;
998 int rc;
999
1000 rc = set_real_num_queues(netdev);
1001 if (rc)
1002 return rc;
1003
1004 adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
1005 if (!adapter->vpd)
1006 return -ENOMEM;
1007
1008
1009 rc = ibmvnic_get_vpd(adapter);
1010 if (rc) {
1011 netdev_err(netdev, "failed to initialize Vital Product Data (VPD)\n");
1012 return rc;
1013 }
1014
1015 adapter->map_id = 1;
1016
1017 rc = init_napi(adapter);
1018 if (rc)
1019 return rc;
1020
1021 send_map_query(adapter);
1022
1023 rc = init_rx_pools(netdev);
1024 if (rc)
1025 return rc;
1026
1027 rc = init_tx_pools(netdev);
1028 return rc;
1029 }
1030
1031 static int __ibmvnic_open(struct net_device *netdev)
1032 {
1033 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1034 enum vnic_state prev_state = adapter->state;
1035 int i, rc;
1036
1037 adapter->state = VNIC_OPENING;
1038 replenish_pools(adapter);
1039 ibmvnic_napi_enable(adapter);
1040
1041
1042
1043
1044 for (i = 0; i < adapter->req_rx_queues; i++) {
1045 netdev_dbg(netdev, "Enabling rx_scrq[%d] irq\n", i);
1046 if (prev_state == VNIC_CLOSED)
1047 enable_irq(adapter->rx_scrq[i]->irq);
1048 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
1049 }
1050
1051 for (i = 0; i < adapter->req_tx_queues; i++) {
1052 netdev_dbg(netdev, "Enabling tx_scrq[%d] irq\n", i);
1053 if (prev_state == VNIC_CLOSED)
1054 enable_irq(adapter->tx_scrq[i]->irq);
1055 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
1056 }
1057
1058 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
1059 if (rc) {
1060 for (i = 0; i < adapter->req_rx_queues; i++)
1061 napi_disable(&adapter->napi[i]);
1062 release_resources(adapter);
1063 return rc;
1064 }
1065
1066 netif_tx_start_all_queues(netdev);
1067
1068 if (prev_state == VNIC_CLOSED) {
1069 for (i = 0; i < adapter->req_rx_queues; i++)
1070 napi_schedule(&adapter->napi[i]);
1071 }
1072
1073 adapter->state = VNIC_OPEN;
1074 return rc;
1075 }
1076
1077 static int ibmvnic_open(struct net_device *netdev)
1078 {
1079 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1080 int rc;
1081
1082
1083
1084
1085 if (adapter->failover_pending) {
1086 adapter->state = VNIC_OPEN;
1087 return 0;
1088 }
1089
1090 if (adapter->state != VNIC_CLOSED) {
1091 rc = ibmvnic_login(netdev);
1092 if (rc)
1093 return rc;
1094
1095 rc = init_resources(adapter);
1096 if (rc) {
1097 netdev_err(netdev, "failed to initialize resources\n");
1098 release_resources(adapter);
1099 return rc;
1100 }
1101 }
1102
1103 rc = __ibmvnic_open(netdev);
1104
1105 return rc;
1106 }
1107
1108 static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1109 {
1110 struct ibmvnic_rx_pool *rx_pool;
1111 struct ibmvnic_rx_buff *rx_buff;
1112 u64 rx_entries;
1113 int rx_scrqs;
1114 int i, j;
1115
1116 if (!adapter->rx_pool)
1117 return;
1118
1119 rx_scrqs = adapter->num_active_rx_pools;
1120 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1121
1122
1123 for (i = 0; i < rx_scrqs; i++) {
1124 rx_pool = &adapter->rx_pool[i];
1125 if (!rx_pool || !rx_pool->rx_buff)
1126 continue;
1127
1128 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1129 for (j = 0; j < rx_entries; j++) {
1130 rx_buff = &rx_pool->rx_buff[j];
1131 if (rx_buff && rx_buff->skb) {
1132 dev_kfree_skb_any(rx_buff->skb);
1133 rx_buff->skb = NULL;
1134 }
1135 }
1136 }
1137 }
1138
1139 static void clean_one_tx_pool(struct ibmvnic_adapter *adapter,
1140 struct ibmvnic_tx_pool *tx_pool)
1141 {
1142 struct ibmvnic_tx_buff *tx_buff;
1143 u64 tx_entries;
1144 int i;
1145
1146 if (!tx_pool || !tx_pool->tx_buff)
1147 return;
1148
1149 tx_entries = tx_pool->num_buffers;
1150
1151 for (i = 0; i < tx_entries; i++) {
1152 tx_buff = &tx_pool->tx_buff[i];
1153 if (tx_buff && tx_buff->skb) {
1154 dev_kfree_skb_any(tx_buff->skb);
1155 tx_buff->skb = NULL;
1156 }
1157 }
1158 }
1159
1160 static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1161 {
1162 int tx_scrqs;
1163 int i;
1164
1165 if (!adapter->tx_pool || !adapter->tso_pool)
1166 return;
1167
1168 tx_scrqs = adapter->num_active_tx_pools;
1169
1170
1171 for (i = 0; i < tx_scrqs; i++) {
1172 netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
1173 clean_one_tx_pool(adapter, &adapter->tx_pool[i]);
1174 clean_one_tx_pool(adapter, &adapter->tso_pool[i]);
1175 }
1176 }
1177
1178 static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter)
1179 {
1180 struct net_device *netdev = adapter->netdev;
1181 int i;
1182
1183 if (adapter->tx_scrq) {
1184 for (i = 0; i < adapter->req_tx_queues; i++)
1185 if (adapter->tx_scrq[i]->irq) {
1186 netdev_dbg(netdev,
1187 "Disabling tx_scrq[%d] irq\n", i);
1188 disable_scrq_irq(adapter, adapter->tx_scrq[i]);
1189 disable_irq(adapter->tx_scrq[i]->irq);
1190 }
1191 }
1192
1193 if (adapter->rx_scrq) {
1194 for (i = 0; i < adapter->req_rx_queues; i++) {
1195 if (adapter->rx_scrq[i]->irq) {
1196 netdev_dbg(netdev,
1197 "Disabling rx_scrq[%d] irq\n", i);
1198 disable_scrq_irq(adapter, adapter->rx_scrq[i]);
1199 disable_irq(adapter->rx_scrq[i]->irq);
1200 }
1201 }
1202 }
1203 }
1204
1205 static void ibmvnic_cleanup(struct net_device *netdev)
1206 {
1207 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1208
1209
1210 if (test_bit(0, &adapter->resetting))
1211 netif_tx_disable(netdev);
1212 else
1213 netif_tx_stop_all_queues(netdev);
1214
1215 ibmvnic_napi_disable(adapter);
1216 ibmvnic_disable_irqs(adapter);
1217
1218 clean_rx_pools(adapter);
1219 clean_tx_pools(adapter);
1220 }
1221
1222 static int __ibmvnic_close(struct net_device *netdev)
1223 {
1224 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1225 int rc = 0;
1226
1227 adapter->state = VNIC_CLOSING;
1228 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1229 if (rc)
1230 return rc;
1231 adapter->state = VNIC_CLOSED;
1232 return 0;
1233 }
1234
1235 static int ibmvnic_close(struct net_device *netdev)
1236 {
1237 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1238 int rc;
1239
1240
1241
1242
1243 if (adapter->failover_pending) {
1244 adapter->state = VNIC_CLOSED;
1245 return 0;
1246 }
1247
1248 rc = __ibmvnic_close(netdev);
1249 ibmvnic_cleanup(netdev);
1250
1251 return rc;
1252 }
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
1266 int *hdr_len, u8 *hdr_data)
1267 {
1268 int len = 0;
1269 u8 *hdr;
1270
1271 if (skb_vlan_tagged(skb) && !skb_vlan_tag_present(skb))
1272 hdr_len[0] = sizeof(struct vlan_ethhdr);
1273 else
1274 hdr_len[0] = sizeof(struct ethhdr);
1275
1276 if (skb->protocol == htons(ETH_P_IP)) {
1277 hdr_len[1] = ip_hdr(skb)->ihl * 4;
1278 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1279 hdr_len[2] = tcp_hdrlen(skb);
1280 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1281 hdr_len[2] = sizeof(struct udphdr);
1282 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1283 hdr_len[1] = sizeof(struct ipv6hdr);
1284 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1285 hdr_len[2] = tcp_hdrlen(skb);
1286 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
1287 hdr_len[2] = sizeof(struct udphdr);
1288 } else if (skb->protocol == htons(ETH_P_ARP)) {
1289 hdr_len[1] = arp_hdr_len(skb->dev);
1290 hdr_len[2] = 0;
1291 }
1292
1293 memset(hdr_data, 0, 120);
1294 if ((hdr_field >> 6) & 1) {
1295 hdr = skb_mac_header(skb);
1296 memcpy(hdr_data, hdr, hdr_len[0]);
1297 len += hdr_len[0];
1298 }
1299
1300 if ((hdr_field >> 5) & 1) {
1301 hdr = skb_network_header(skb);
1302 memcpy(hdr_data + len, hdr, hdr_len[1]);
1303 len += hdr_len[1];
1304 }
1305
1306 if ((hdr_field >> 4) & 1) {
1307 hdr = skb_transport_header(skb);
1308 memcpy(hdr_data + len, hdr, hdr_len[2]);
1309 len += hdr_len[2];
1310 }
1311 return len;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326 static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
1327 union sub_crq *scrq_arr)
1328 {
1329 union sub_crq hdr_desc;
1330 int tmp_len = len;
1331 int num_descs = 0;
1332 u8 *data, *cur;
1333 int tmp;
1334
1335 while (tmp_len > 0) {
1336 cur = hdr_data + len - tmp_len;
1337
1338 memset(&hdr_desc, 0, sizeof(hdr_desc));
1339 if (cur != hdr_data) {
1340 data = hdr_desc.hdr_ext.data;
1341 tmp = tmp_len > 29 ? 29 : tmp_len;
1342 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
1343 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
1344 hdr_desc.hdr_ext.len = tmp;
1345 } else {
1346 data = hdr_desc.hdr.data;
1347 tmp = tmp_len > 24 ? 24 : tmp_len;
1348 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
1349 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
1350 hdr_desc.hdr.len = tmp;
1351 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
1352 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
1353 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
1354 hdr_desc.hdr.flag = hdr_field << 1;
1355 }
1356 memcpy(data, cur, tmp);
1357 tmp_len -= tmp;
1358 *scrq_arr = hdr_desc;
1359 scrq_arr++;
1360 num_descs++;
1361 }
1362
1363 return num_descs;
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
1378 int *num_entries, u8 hdr_field)
1379 {
1380 int hdr_len[3] = {0, 0, 0};
1381 int tot_len;
1382 u8 *hdr_data = txbuff->hdr_data;
1383
1384 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
1385 txbuff->hdr_data);
1386 *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
1387 txbuff->indir_arr + 1);
1388 }
1389
1390 static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
1391 struct net_device *netdev)
1392 {
1393
1394
1395
1396
1397
1398
1399 if (skb->len < netdev->min_mtu)
1400 return skb_put_padto(skb, netdev->min_mtu);
1401
1402 return 0;
1403 }
1404
1405 static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1406 {
1407 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1408 int queue_num = skb_get_queue_mapping(skb);
1409 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
1410 struct device *dev = &adapter->vdev->dev;
1411 struct ibmvnic_tx_buff *tx_buff = NULL;
1412 struct ibmvnic_sub_crq_queue *tx_scrq;
1413 struct ibmvnic_tx_pool *tx_pool;
1414 unsigned int tx_send_failed = 0;
1415 unsigned int tx_map_failed = 0;
1416 unsigned int tx_dropped = 0;
1417 unsigned int tx_packets = 0;
1418 unsigned int tx_bytes = 0;
1419 dma_addr_t data_dma_addr;
1420 struct netdev_queue *txq;
1421 unsigned long lpar_rc;
1422 union sub_crq tx_crq;
1423 unsigned int offset;
1424 int num_entries = 1;
1425 unsigned char *dst;
1426 u64 *handle_array;
1427 int index = 0;
1428 u8 proto = 0;
1429 netdev_tx_t ret = NETDEV_TX_OK;
1430
1431 if (test_bit(0, &adapter->resetting)) {
1432 if (!netif_subqueue_stopped(netdev, skb))
1433 netif_stop_subqueue(netdev, queue_num);
1434 dev_kfree_skb_any(skb);
1435
1436 tx_send_failed++;
1437 tx_dropped++;
1438 ret = NETDEV_TX_OK;
1439 goto out;
1440 }
1441
1442 if (ibmvnic_xmit_workarounds(skb, netdev)) {
1443 tx_dropped++;
1444 tx_send_failed++;
1445 ret = NETDEV_TX_OK;
1446 goto out;
1447 }
1448 if (skb_is_gso(skb))
1449 tx_pool = &adapter->tso_pool[queue_num];
1450 else
1451 tx_pool = &adapter->tx_pool[queue_num];
1452
1453 tx_scrq = adapter->tx_scrq[queue_num];
1454 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
1455 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
1456 be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
1457
1458 index = tx_pool->free_map[tx_pool->consumer_index];
1459
1460 if (index == IBMVNIC_INVALID_MAP) {
1461 dev_kfree_skb_any(skb);
1462 tx_send_failed++;
1463 tx_dropped++;
1464 ret = NETDEV_TX_OK;
1465 goto out;
1466 }
1467
1468 tx_pool->free_map[tx_pool->consumer_index] = IBMVNIC_INVALID_MAP;
1469
1470 offset = index * tx_pool->buf_size;
1471 dst = tx_pool->long_term_buff.buff + offset;
1472 memset(dst, 0, tx_pool->buf_size);
1473 data_dma_addr = tx_pool->long_term_buff.addr + offset;
1474
1475 if (skb_shinfo(skb)->nr_frags) {
1476 int cur, i;
1477
1478
1479 skb_copy_from_linear_data(skb, dst, skb_headlen(skb));
1480 cur = skb_headlen(skb);
1481
1482
1483 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1484 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1485
1486 memcpy(dst + cur,
1487 page_address(skb_frag_page(frag)) +
1488 skb_frag_off(frag), skb_frag_size(frag));
1489 cur += skb_frag_size(frag);
1490 }
1491 } else {
1492 skb_copy_from_linear_data(skb, dst, skb->len);
1493 }
1494
1495 tx_pool->consumer_index =
1496 (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
1497
1498 tx_buff = &tx_pool->tx_buff[index];
1499 tx_buff->skb = skb;
1500 tx_buff->data_dma[0] = data_dma_addr;
1501 tx_buff->data_len[0] = skb->len;
1502 tx_buff->index = index;
1503 tx_buff->pool_index = queue_num;
1504 tx_buff->last_frag = true;
1505
1506 memset(&tx_crq, 0, sizeof(tx_crq));
1507 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
1508 tx_crq.v1.type = IBMVNIC_TX_DESC;
1509 tx_crq.v1.n_crq_elem = 1;
1510 tx_crq.v1.n_sge = 1;
1511 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
1512
1513 if (skb_is_gso(skb))
1514 tx_crq.v1.correlator =
1515 cpu_to_be32(index | IBMVNIC_TSO_POOL_MASK);
1516 else
1517 tx_crq.v1.correlator = cpu_to_be32(index);
1518 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
1519 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
1520 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
1521
1522 if (adapter->vlan_header_insertion && skb_vlan_tag_present(skb)) {
1523 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
1524 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
1525 }
1526
1527 if (skb->protocol == htons(ETH_P_IP)) {
1528 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
1529 proto = ip_hdr(skb)->protocol;
1530 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1531 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
1532 proto = ipv6_hdr(skb)->nexthdr;
1533 }
1534
1535 if (proto == IPPROTO_TCP)
1536 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
1537 else if (proto == IPPROTO_UDP)
1538 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
1539
1540 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1541 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
1542 hdrs += 2;
1543 }
1544 if (skb_is_gso(skb)) {
1545 tx_crq.v1.flags1 |= IBMVNIC_TX_LSO;
1546 tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
1547 hdrs += 2;
1548 }
1549
1550 if ((*hdrs >> 7) & 1) {
1551 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
1552 tx_crq.v1.n_crq_elem = num_entries;
1553 tx_buff->num_entries = num_entries;
1554 tx_buff->indir_arr[0] = tx_crq;
1555 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
1556 sizeof(tx_buff->indir_arr),
1557 DMA_TO_DEVICE);
1558 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
1559 dev_kfree_skb_any(skb);
1560 tx_buff->skb = NULL;
1561 if (!firmware_has_feature(FW_FEATURE_CMO))
1562 dev_err(dev, "tx: unable to map descriptor array\n");
1563 tx_map_failed++;
1564 tx_dropped++;
1565 ret = NETDEV_TX_OK;
1566 goto tx_err_out;
1567 }
1568 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
1569 (u64)tx_buff->indir_dma,
1570 (u64)num_entries);
1571 dma_unmap_single(dev, tx_buff->indir_dma,
1572 sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
1573 } else {
1574 tx_buff->num_entries = num_entries;
1575 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
1576 &tx_crq);
1577 }
1578 if (lpar_rc != H_SUCCESS) {
1579 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1580 dev_err_ratelimited(dev, "tx: send failed\n");
1581 dev_kfree_skb_any(skb);
1582 tx_buff->skb = NULL;
1583
1584 if (lpar_rc == H_CLOSED || adapter->failover_pending) {
1585
1586
1587
1588
1589
1590 netif_tx_stop_all_queues(netdev);
1591 netif_carrier_off(netdev);
1592 }
1593
1594 tx_send_failed++;
1595 tx_dropped++;
1596 ret = NETDEV_TX_OK;
1597 goto tx_err_out;
1598 }
1599
1600 if (atomic_add_return(num_entries, &tx_scrq->used)
1601 >= adapter->req_tx_entries_per_subcrq) {
1602 netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
1603 netif_stop_subqueue(netdev, queue_num);
1604 }
1605
1606 tx_packets++;
1607 tx_bytes += skb->len;
1608 txq->trans_start = jiffies;
1609 ret = NETDEV_TX_OK;
1610 goto out;
1611
1612 tx_err_out:
1613
1614 if (tx_pool->consumer_index == 0)
1615 tx_pool->consumer_index =
1616 tx_pool->num_buffers - 1;
1617 else
1618 tx_pool->consumer_index--;
1619 tx_pool->free_map[tx_pool->consumer_index] = index;
1620 out:
1621 netdev->stats.tx_dropped += tx_dropped;
1622 netdev->stats.tx_bytes += tx_bytes;
1623 netdev->stats.tx_packets += tx_packets;
1624 adapter->tx_send_failed += tx_send_failed;
1625 adapter->tx_map_failed += tx_map_failed;
1626 adapter->tx_stats_buffers[queue_num].packets += tx_packets;
1627 adapter->tx_stats_buffers[queue_num].bytes += tx_bytes;
1628 adapter->tx_stats_buffers[queue_num].dropped_packets += tx_dropped;
1629
1630 return ret;
1631 }
1632
1633 static void ibmvnic_set_multi(struct net_device *netdev)
1634 {
1635 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1636 struct netdev_hw_addr *ha;
1637 union ibmvnic_crq crq;
1638
1639 memset(&crq, 0, sizeof(crq));
1640 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1641 crq.request_capability.cmd = REQUEST_CAPABILITY;
1642
1643 if (netdev->flags & IFF_PROMISC) {
1644 if (!adapter->promisc_supported)
1645 return;
1646 } else {
1647 if (netdev->flags & IFF_ALLMULTI) {
1648
1649 memset(&crq, 0, sizeof(crq));
1650 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1651 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1652 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
1653 ibmvnic_send_crq(adapter, &crq);
1654 } else if (netdev_mc_empty(netdev)) {
1655
1656 memset(&crq, 0, sizeof(crq));
1657 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1658 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1659 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
1660 ibmvnic_send_crq(adapter, &crq);
1661 } else {
1662
1663 netdev_for_each_mc_addr(ha, netdev) {
1664 memset(&crq, 0, sizeof(crq));
1665 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
1666 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
1667 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
1668 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
1669 ha->addr);
1670 ibmvnic_send_crq(adapter, &crq);
1671 }
1672 }
1673 }
1674 }
1675
1676 static int __ibmvnic_set_mac(struct net_device *netdev, u8 *dev_addr)
1677 {
1678 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1679 union ibmvnic_crq crq;
1680 int rc;
1681
1682 if (!is_valid_ether_addr(dev_addr)) {
1683 rc = -EADDRNOTAVAIL;
1684 goto err;
1685 }
1686
1687 memset(&crq, 0, sizeof(crq));
1688 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
1689 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
1690 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], dev_addr);
1691
1692 reinit_completion(&adapter->fw_done);
1693 rc = ibmvnic_send_crq(adapter, &crq);
1694 if (rc) {
1695 rc = -EIO;
1696 goto err;
1697 }
1698
1699 wait_for_completion(&adapter->fw_done);
1700
1701 if (adapter->fw_done_rc) {
1702 rc = -EIO;
1703 goto err;
1704 }
1705
1706 return 0;
1707 err:
1708 ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
1709 return rc;
1710 }
1711
1712 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
1713 {
1714 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1715 struct sockaddr *addr = p;
1716 int rc;
1717
1718 rc = 0;
1719 ether_addr_copy(adapter->mac_addr, addr->sa_data);
1720 if (adapter->state != VNIC_PROBED)
1721 rc = __ibmvnic_set_mac(netdev, addr->sa_data);
1722
1723 return rc;
1724 }
1725
1726
1727
1728
1729
1730 static int do_change_param_reset(struct ibmvnic_adapter *adapter,
1731 struct ibmvnic_rwi *rwi,
1732 u32 reset_state)
1733 {
1734 struct net_device *netdev = adapter->netdev;
1735 int i, rc;
1736
1737 netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
1738 rwi->reset_reason);
1739
1740 netif_carrier_off(netdev);
1741 adapter->reset_reason = rwi->reset_reason;
1742
1743 ibmvnic_cleanup(netdev);
1744
1745 if (reset_state == VNIC_OPEN) {
1746 rc = __ibmvnic_close(netdev);
1747 if (rc)
1748 return rc;
1749 }
1750
1751 release_resources(adapter);
1752 release_sub_crqs(adapter, 1);
1753 release_crq_queue(adapter);
1754
1755 adapter->state = VNIC_PROBED;
1756
1757 rc = init_crq_queue(adapter);
1758
1759 if (rc) {
1760 netdev_err(adapter->netdev,
1761 "Couldn't initialize crq. rc=%d\n", rc);
1762 return rc;
1763 }
1764
1765 rc = ibmvnic_reset_init(adapter);
1766 if (rc)
1767 return IBMVNIC_INIT_FAILED;
1768
1769
1770
1771
1772 if (reset_state == VNIC_PROBED)
1773 return 0;
1774
1775 rc = ibmvnic_login(netdev);
1776 if (rc) {
1777 adapter->state = reset_state;
1778 return rc;
1779 }
1780
1781 rc = init_resources(adapter);
1782 if (rc)
1783 return rc;
1784
1785 ibmvnic_disable_irqs(adapter);
1786
1787 adapter->state = VNIC_CLOSED;
1788
1789 if (reset_state == VNIC_CLOSED)
1790 return 0;
1791
1792 rc = __ibmvnic_open(netdev);
1793 if (rc)
1794 return IBMVNIC_OPEN_FAILED;
1795
1796
1797 ibmvnic_set_multi(netdev);
1798
1799
1800 for (i = 0; i < adapter->req_rx_queues; i++)
1801 napi_schedule(&adapter->napi[i]);
1802
1803 return 0;
1804 }
1805
1806
1807
1808
1809
1810 static int do_reset(struct ibmvnic_adapter *adapter,
1811 struct ibmvnic_rwi *rwi, u32 reset_state)
1812 {
1813 u64 old_num_rx_queues, old_num_tx_queues;
1814 u64 old_num_rx_slots, old_num_tx_slots;
1815 struct net_device *netdev = adapter->netdev;
1816 int i, rc;
1817
1818 netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
1819 rwi->reset_reason);
1820
1821 rtnl_lock();
1822
1823 netif_carrier_off(netdev);
1824 adapter->reset_reason = rwi->reset_reason;
1825
1826 old_num_rx_queues = adapter->req_rx_queues;
1827 old_num_tx_queues = adapter->req_tx_queues;
1828 old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq;
1829 old_num_tx_slots = adapter->req_tx_entries_per_subcrq;
1830
1831 ibmvnic_cleanup(netdev);
1832
1833 if (reset_state == VNIC_OPEN &&
1834 adapter->reset_reason != VNIC_RESET_MOBILITY &&
1835 adapter->reset_reason != VNIC_RESET_FAILOVER) {
1836 adapter->state = VNIC_CLOSING;
1837
1838
1839
1840
1841
1842
1843 rtnl_unlock();
1844 rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
1845 rtnl_lock();
1846 if (rc)
1847 goto out;
1848
1849 if (adapter->state != VNIC_CLOSING) {
1850 rc = -1;
1851 goto out;
1852 }
1853
1854 adapter->state = VNIC_CLOSED;
1855 }
1856
1857 if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
1858
1859
1860
1861 adapter->state = VNIC_PROBED;
1862
1863 if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
1864 rc = ibmvnic_reenable_crq_queue(adapter);
1865 release_sub_crqs(adapter, 1);
1866 } else {
1867 rc = ibmvnic_reset_crq(adapter);
1868 if (!rc)
1869 rc = vio_enable_interrupts(adapter->vdev);
1870 }
1871
1872 if (rc) {
1873 netdev_err(adapter->netdev,
1874 "Couldn't initialize crq. rc=%d\n", rc);
1875 goto out;
1876 }
1877
1878 rc = ibmvnic_reset_init(adapter);
1879 if (rc) {
1880 rc = IBMVNIC_INIT_FAILED;
1881 goto out;
1882 }
1883
1884
1885
1886
1887 if (reset_state == VNIC_PROBED) {
1888 rc = 0;
1889 goto out;
1890 }
1891
1892 rc = ibmvnic_login(netdev);
1893 if (rc) {
1894 adapter->state = reset_state;
1895 goto out;
1896 }
1897
1898 if (adapter->req_rx_queues != old_num_rx_queues ||
1899 adapter->req_tx_queues != old_num_tx_queues ||
1900 adapter->req_rx_add_entries_per_subcrq !=
1901 old_num_rx_slots ||
1902 adapter->req_tx_entries_per_subcrq !=
1903 old_num_tx_slots) {
1904 release_rx_pools(adapter);
1905 release_tx_pools(adapter);
1906 release_napi(adapter);
1907 release_vpd_data(adapter);
1908
1909 rc = init_resources(adapter);
1910 if (rc)
1911 goto out;
1912
1913 } else {
1914 rc = reset_tx_pools(adapter);
1915 if (rc)
1916 goto out;
1917
1918 rc = reset_rx_pools(adapter);
1919 if (rc)
1920 goto out;
1921 }
1922 ibmvnic_disable_irqs(adapter);
1923 }
1924 adapter->state = VNIC_CLOSED;
1925
1926 if (reset_state == VNIC_CLOSED) {
1927 rc = 0;
1928 goto out;
1929 }
1930
1931 rc = __ibmvnic_open(netdev);
1932 if (rc) {
1933 rc = IBMVNIC_OPEN_FAILED;
1934 goto out;
1935 }
1936
1937
1938 ibmvnic_set_multi(netdev);
1939
1940
1941 for (i = 0; i < adapter->req_rx_queues; i++)
1942 napi_schedule(&adapter->napi[i]);
1943
1944 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1945 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
1946
1947 rc = 0;
1948
1949 out:
1950 rtnl_unlock();
1951
1952 return rc;
1953 }
1954
1955 static int do_hard_reset(struct ibmvnic_adapter *adapter,
1956 struct ibmvnic_rwi *rwi, u32 reset_state)
1957 {
1958 struct net_device *netdev = adapter->netdev;
1959 int rc;
1960
1961 netdev_dbg(adapter->netdev, "Hard resetting driver (%d)\n",
1962 rwi->reset_reason);
1963
1964 netif_carrier_off(netdev);
1965 adapter->reset_reason = rwi->reset_reason;
1966
1967 ibmvnic_cleanup(netdev);
1968 release_resources(adapter);
1969 release_sub_crqs(adapter, 0);
1970 release_crq_queue(adapter);
1971
1972
1973
1974
1975 adapter->state = VNIC_PROBED;
1976
1977 reinit_completion(&adapter->init_done);
1978 rc = init_crq_queue(adapter);
1979 if (rc) {
1980 netdev_err(adapter->netdev,
1981 "Couldn't initialize crq. rc=%d\n", rc);
1982 return rc;
1983 }
1984
1985 rc = ibmvnic_init(adapter);
1986 if (rc)
1987 return rc;
1988
1989
1990
1991
1992 if (reset_state == VNIC_PROBED)
1993 return 0;
1994
1995 rc = ibmvnic_login(netdev);
1996 if (rc) {
1997 adapter->state = VNIC_PROBED;
1998 return 0;
1999 }
2000
2001 rc = init_resources(adapter);
2002 if (rc)
2003 return rc;
2004
2005 ibmvnic_disable_irqs(adapter);
2006 adapter->state = VNIC_CLOSED;
2007
2008 if (reset_state == VNIC_CLOSED)
2009 return 0;
2010
2011 rc = __ibmvnic_open(netdev);
2012 if (rc)
2013 return IBMVNIC_OPEN_FAILED;
2014
2015 return 0;
2016 }
2017
2018 static struct ibmvnic_rwi *get_next_rwi(struct ibmvnic_adapter *adapter)
2019 {
2020 struct ibmvnic_rwi *rwi;
2021 unsigned long flags;
2022
2023 spin_lock_irqsave(&adapter->rwi_lock, flags);
2024
2025 if (!list_empty(&adapter->rwi_list)) {
2026 rwi = list_first_entry(&adapter->rwi_list, struct ibmvnic_rwi,
2027 list);
2028 list_del(&rwi->list);
2029 } else {
2030 rwi = NULL;
2031 }
2032
2033 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2034 return rwi;
2035 }
2036
2037 static void free_all_rwi(struct ibmvnic_adapter *adapter)
2038 {
2039 struct ibmvnic_rwi *rwi;
2040
2041 rwi = get_next_rwi(adapter);
2042 while (rwi) {
2043 kfree(rwi);
2044 rwi = get_next_rwi(adapter);
2045 }
2046 }
2047
2048 static void __ibmvnic_reset(struct work_struct *work)
2049 {
2050 struct ibmvnic_rwi *rwi;
2051 struct ibmvnic_adapter *adapter;
2052 u32 reset_state;
2053 int rc = 0;
2054
2055 adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
2056
2057 if (test_and_set_bit_lock(0, &adapter->resetting)) {
2058 schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
2059 IBMVNIC_RESET_DELAY);
2060 return;
2061 }
2062
2063 reset_state = adapter->state;
2064
2065 rwi = get_next_rwi(adapter);
2066 while (rwi) {
2067 if (adapter->state == VNIC_REMOVING ||
2068 adapter->state == VNIC_REMOVED) {
2069 kfree(rwi);
2070 rc = EBUSY;
2071 break;
2072 }
2073
2074 if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
2075
2076 rc = do_change_param_reset(adapter, rwi, reset_state);
2077 } else if (adapter->force_reset_recovery) {
2078
2079 if (adapter->wait_for_reset) {
2080
2081 adapter->force_reset_recovery = false;
2082 rc = do_hard_reset(adapter, rwi, reset_state);
2083 } else {
2084 rtnl_lock();
2085 adapter->force_reset_recovery = false;
2086 rc = do_hard_reset(adapter, rwi, reset_state);
2087 rtnl_unlock();
2088 }
2089 } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
2090 adapter->from_passive_init)) {
2091 rc = do_reset(adapter, rwi, reset_state);
2092 }
2093 kfree(rwi);
2094 if (rc == IBMVNIC_OPEN_FAILED) {
2095 if (list_empty(&adapter->rwi_list))
2096 adapter->state = VNIC_CLOSED;
2097 else
2098 adapter->state = reset_state;
2099 rc = 0;
2100 } else if (rc && rc != IBMVNIC_INIT_FAILED &&
2101 !adapter->force_reset_recovery)
2102 break;
2103
2104 rwi = get_next_rwi(adapter);
2105
2106 if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
2107 rwi->reset_reason == VNIC_RESET_MOBILITY))
2108 adapter->force_reset_recovery = true;
2109 }
2110
2111 if (adapter->wait_for_reset) {
2112 adapter->reset_done_rc = rc;
2113 complete(&adapter->reset_done);
2114 }
2115
2116 if (rc) {
2117 netdev_dbg(adapter->netdev, "Reset failed\n");
2118 free_all_rwi(adapter);
2119 }
2120
2121 clear_bit_unlock(0, &adapter->resetting);
2122 }
2123
2124 static void __ibmvnic_delayed_reset(struct work_struct *work)
2125 {
2126 struct ibmvnic_adapter *adapter;
2127
2128 adapter = container_of(work, struct ibmvnic_adapter,
2129 ibmvnic_delayed_reset.work);
2130 __ibmvnic_reset(&adapter->ibmvnic_reset);
2131 }
2132
2133 static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
2134 enum ibmvnic_reset_reason reason)
2135 {
2136 struct list_head *entry, *tmp_entry;
2137 struct ibmvnic_rwi *rwi, *tmp;
2138 struct net_device *netdev = adapter->netdev;
2139 unsigned long flags;
2140 int ret;
2141
2142 if (adapter->state == VNIC_REMOVING ||
2143 adapter->state == VNIC_REMOVED ||
2144 adapter->failover_pending) {
2145 ret = EBUSY;
2146 netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
2147 goto err;
2148 }
2149
2150 if (adapter->state == VNIC_PROBING) {
2151 netdev_warn(netdev, "Adapter reset during probe\n");
2152 ret = adapter->init_done_rc = EAGAIN;
2153 goto err;
2154 }
2155
2156 spin_lock_irqsave(&adapter->rwi_lock, flags);
2157
2158 list_for_each(entry, &adapter->rwi_list) {
2159 tmp = list_entry(entry, struct ibmvnic_rwi, list);
2160 if (tmp->reset_reason == reason) {
2161 netdev_dbg(netdev, "Skipping matching reset\n");
2162 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2163 ret = EBUSY;
2164 goto err;
2165 }
2166 }
2167
2168 rwi = kzalloc(sizeof(*rwi), GFP_ATOMIC);
2169 if (!rwi) {
2170 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2171 ibmvnic_close(netdev);
2172 ret = ENOMEM;
2173 goto err;
2174 }
2175
2176
2177
2178 if (adapter->force_reset_recovery && !list_empty(&adapter->rwi_list)) {
2179 list_for_each_safe(entry, tmp_entry, &adapter->rwi_list)
2180 list_del(entry);
2181 }
2182 rwi->reset_reason = reason;
2183 list_add_tail(&rwi->list, &adapter->rwi_list);
2184 spin_unlock_irqrestore(&adapter->rwi_lock, flags);
2185 netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
2186 schedule_work(&adapter->ibmvnic_reset);
2187
2188 return 0;
2189 err:
2190 return -ret;
2191 }
2192
2193 static void ibmvnic_tx_timeout(struct net_device *dev)
2194 {
2195 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2196
2197 ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
2198 }
2199
2200 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
2201 struct ibmvnic_rx_buff *rx_buff)
2202 {
2203 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
2204
2205 rx_buff->skb = NULL;
2206
2207 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
2208 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
2209
2210 atomic_dec(&pool->available);
2211 }
2212
2213 static int ibmvnic_poll(struct napi_struct *napi, int budget)
2214 {
2215 struct net_device *netdev = napi->dev;
2216 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2217 int scrq_num = (int)(napi - adapter->napi);
2218 int frames_processed = 0;
2219
2220 restart_poll:
2221 while (frames_processed < budget) {
2222 struct sk_buff *skb;
2223 struct ibmvnic_rx_buff *rx_buff;
2224 union sub_crq *next;
2225 u32 length;
2226 u16 offset;
2227 u8 flags = 0;
2228
2229 if (unlikely(test_bit(0, &adapter->resetting) &&
2230 adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
2231 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2232 napi_complete_done(napi, frames_processed);
2233 return frames_processed;
2234 }
2235
2236 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
2237 break;
2238 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
2239 rx_buff =
2240 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
2241 rx_comp.correlator);
2242
2243 if (next->rx_comp.rc) {
2244 netdev_dbg(netdev, "rx buffer returned with rc %x\n",
2245 be16_to_cpu(next->rx_comp.rc));
2246
2247 next->rx_comp.first = 0;
2248 dev_kfree_skb_any(rx_buff->skb);
2249 remove_buff_from_pool(adapter, rx_buff);
2250 continue;
2251 } else if (!rx_buff->skb) {
2252
2253 next->rx_comp.first = 0;
2254 remove_buff_from_pool(adapter, rx_buff);
2255 continue;
2256 }
2257
2258 length = be32_to_cpu(next->rx_comp.len);
2259 offset = be16_to_cpu(next->rx_comp.off_frame_data);
2260 flags = next->rx_comp.flags;
2261 skb = rx_buff->skb;
2262 skb_copy_to_linear_data(skb, rx_buff->data + offset,
2263 length);
2264
2265
2266
2267
2268 if (adapter->rx_vlan_header_insertion &&
2269 (flags & IBMVNIC_VLAN_STRIPPED))
2270 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2271 ntohs(next->rx_comp.vlan_tci));
2272
2273
2274 next->rx_comp.first = 0;
2275 remove_buff_from_pool(adapter, rx_buff);
2276
2277 skb_put(skb, length);
2278 skb->protocol = eth_type_trans(skb, netdev);
2279 skb_record_rx_queue(skb, scrq_num);
2280
2281 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
2282 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
2283 skb->ip_summed = CHECKSUM_UNNECESSARY;
2284 }
2285
2286 length = skb->len;
2287 napi_gro_receive(napi, skb);
2288 netdev->stats.rx_packets++;
2289 netdev->stats.rx_bytes += length;
2290 adapter->rx_stats_buffers[scrq_num].packets++;
2291 adapter->rx_stats_buffers[scrq_num].bytes += length;
2292 frames_processed++;
2293 }
2294
2295 if (adapter->state != VNIC_CLOSING)
2296 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
2297
2298 if (frames_processed < budget) {
2299 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2300 napi_complete_done(napi, frames_processed);
2301 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
2302 napi_reschedule(napi)) {
2303 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
2304 goto restart_poll;
2305 }
2306 }
2307 return frames_processed;
2308 }
2309
2310 static int wait_for_reset(struct ibmvnic_adapter *adapter)
2311 {
2312 int rc, ret;
2313
2314 adapter->fallback.mtu = adapter->req_mtu;
2315 adapter->fallback.rx_queues = adapter->req_rx_queues;
2316 adapter->fallback.tx_queues = adapter->req_tx_queues;
2317 adapter->fallback.rx_entries = adapter->req_rx_add_entries_per_subcrq;
2318 adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
2319
2320 reinit_completion(&adapter->reset_done);
2321 adapter->wait_for_reset = true;
2322 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2323 if (rc)
2324 return rc;
2325 wait_for_completion(&adapter->reset_done);
2326
2327 ret = 0;
2328 if (adapter->reset_done_rc) {
2329 ret = -EIO;
2330 adapter->desired.mtu = adapter->fallback.mtu;
2331 adapter->desired.rx_queues = adapter->fallback.rx_queues;
2332 adapter->desired.tx_queues = adapter->fallback.tx_queues;
2333 adapter->desired.rx_entries = adapter->fallback.rx_entries;
2334 adapter->desired.tx_entries = adapter->fallback.tx_entries;
2335
2336 reinit_completion(&adapter->reset_done);
2337 adapter->wait_for_reset = true;
2338 rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
2339 if (rc)
2340 return ret;
2341 wait_for_completion(&adapter->reset_done);
2342 }
2343 adapter->wait_for_reset = false;
2344
2345 return ret;
2346 }
2347
2348 static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
2349 {
2350 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2351
2352 adapter->desired.mtu = new_mtu + ETH_HLEN;
2353
2354 return wait_for_reset(adapter);
2355 }
2356
2357 static netdev_features_t ibmvnic_features_check(struct sk_buff *skb,
2358 struct net_device *dev,
2359 netdev_features_t features)
2360 {
2361
2362
2363
2364
2365 if (skb_is_gso(skb)) {
2366 if (skb_shinfo(skb)->gso_size < 224 ||
2367 skb_shinfo(skb)->gso_segs == 1)
2368 features &= ~NETIF_F_GSO_MASK;
2369 }
2370
2371 return features;
2372 }
2373
2374 static const struct net_device_ops ibmvnic_netdev_ops = {
2375 .ndo_open = ibmvnic_open,
2376 .ndo_stop = ibmvnic_close,
2377 .ndo_start_xmit = ibmvnic_xmit,
2378 .ndo_set_rx_mode = ibmvnic_set_multi,
2379 .ndo_set_mac_address = ibmvnic_set_mac,
2380 .ndo_validate_addr = eth_validate_addr,
2381 .ndo_tx_timeout = ibmvnic_tx_timeout,
2382 .ndo_change_mtu = ibmvnic_change_mtu,
2383 .ndo_features_check = ibmvnic_features_check,
2384 };
2385
2386
2387
2388 static int ibmvnic_get_link_ksettings(struct net_device *netdev,
2389 struct ethtool_link_ksettings *cmd)
2390 {
2391 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2392 int rc;
2393
2394 rc = send_query_phys_parms(adapter);
2395 if (rc) {
2396 adapter->speed = SPEED_UNKNOWN;
2397 adapter->duplex = DUPLEX_UNKNOWN;
2398 }
2399 cmd->base.speed = adapter->speed;
2400 cmd->base.duplex = adapter->duplex;
2401 cmd->base.port = PORT_FIBRE;
2402 cmd->base.phy_address = 0;
2403 cmd->base.autoneg = AUTONEG_ENABLE;
2404
2405 return 0;
2406 }
2407
2408 static void ibmvnic_get_drvinfo(struct net_device *netdev,
2409 struct ethtool_drvinfo *info)
2410 {
2411 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2412
2413 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
2414 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
2415 strlcpy(info->fw_version, adapter->fw_version,
2416 sizeof(info->fw_version));
2417 }
2418
2419 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
2420 {
2421 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2422
2423 return adapter->msg_enable;
2424 }
2425
2426 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
2427 {
2428 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2429
2430 adapter->msg_enable = data;
2431 }
2432
2433 static u32 ibmvnic_get_link(struct net_device *netdev)
2434 {
2435 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2436
2437
2438
2439
2440 return adapter->logical_link_state;
2441 }
2442
2443 static void ibmvnic_get_ringparam(struct net_device *netdev,
2444 struct ethtool_ringparam *ring)
2445 {
2446 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2447
2448 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2449 ring->rx_max_pending = adapter->max_rx_add_entries_per_subcrq;
2450 ring->tx_max_pending = adapter->max_tx_entries_per_subcrq;
2451 } else {
2452 ring->rx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2453 ring->tx_max_pending = IBMVNIC_MAX_QUEUE_SZ;
2454 }
2455 ring->rx_mini_max_pending = 0;
2456 ring->rx_jumbo_max_pending = 0;
2457 ring->rx_pending = adapter->req_rx_add_entries_per_subcrq;
2458 ring->tx_pending = adapter->req_tx_entries_per_subcrq;
2459 ring->rx_mini_pending = 0;
2460 ring->rx_jumbo_pending = 0;
2461 }
2462
2463 static int ibmvnic_set_ringparam(struct net_device *netdev,
2464 struct ethtool_ringparam *ring)
2465 {
2466 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2467 int ret;
2468
2469 ret = 0;
2470 adapter->desired.rx_entries = ring->rx_pending;
2471 adapter->desired.tx_entries = ring->tx_pending;
2472
2473 ret = wait_for_reset(adapter);
2474
2475 if (!ret &&
2476 (adapter->req_rx_add_entries_per_subcrq != ring->rx_pending ||
2477 adapter->req_tx_entries_per_subcrq != ring->tx_pending))
2478 netdev_info(netdev,
2479 "Could not match full ringsize request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2480 ring->rx_pending, ring->tx_pending,
2481 adapter->req_rx_add_entries_per_subcrq,
2482 adapter->req_tx_entries_per_subcrq);
2483 return ret;
2484 }
2485
2486 static void ibmvnic_get_channels(struct net_device *netdev,
2487 struct ethtool_channels *channels)
2488 {
2489 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2490
2491 if (adapter->priv_flags & IBMVNIC_USE_SERVER_MAXES) {
2492 channels->max_rx = adapter->max_rx_queues;
2493 channels->max_tx = adapter->max_tx_queues;
2494 } else {
2495 channels->max_rx = IBMVNIC_MAX_QUEUES;
2496 channels->max_tx = IBMVNIC_MAX_QUEUES;
2497 }
2498
2499 channels->max_other = 0;
2500 channels->max_combined = 0;
2501 channels->rx_count = adapter->req_rx_queues;
2502 channels->tx_count = adapter->req_tx_queues;
2503 channels->other_count = 0;
2504 channels->combined_count = 0;
2505 }
2506
2507 static int ibmvnic_set_channels(struct net_device *netdev,
2508 struct ethtool_channels *channels)
2509 {
2510 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2511 int ret;
2512
2513 ret = 0;
2514 adapter->desired.rx_queues = channels->rx_count;
2515 adapter->desired.tx_queues = channels->tx_count;
2516
2517 ret = wait_for_reset(adapter);
2518
2519 if (!ret &&
2520 (adapter->req_rx_queues != channels->rx_count ||
2521 adapter->req_tx_queues != channels->tx_count))
2522 netdev_info(netdev,
2523 "Could not match full channels request. Requested: RX %d, TX %d; Allowed: RX %llu, TX %llu\n",
2524 channels->rx_count, channels->tx_count,
2525 adapter->req_rx_queues, adapter->req_tx_queues);
2526 return ret;
2527
2528 }
2529
2530 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2531 {
2532 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2533 int i;
2534
2535 switch (stringset) {
2536 case ETH_SS_STATS:
2537 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats);
2538 i++, data += ETH_GSTRING_LEN)
2539 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
2540
2541 for (i = 0; i < adapter->req_tx_queues; i++) {
2542 snprintf(data, ETH_GSTRING_LEN, "tx%d_packets", i);
2543 data += ETH_GSTRING_LEN;
2544
2545 snprintf(data, ETH_GSTRING_LEN, "tx%d_bytes", i);
2546 data += ETH_GSTRING_LEN;
2547
2548 snprintf(data, ETH_GSTRING_LEN,
2549 "tx%d_dropped_packets", i);
2550 data += ETH_GSTRING_LEN;
2551 }
2552
2553 for (i = 0; i < adapter->req_rx_queues; i++) {
2554 snprintf(data, ETH_GSTRING_LEN, "rx%d_packets", i);
2555 data += ETH_GSTRING_LEN;
2556
2557 snprintf(data, ETH_GSTRING_LEN, "rx%d_bytes", i);
2558 data += ETH_GSTRING_LEN;
2559
2560 snprintf(data, ETH_GSTRING_LEN, "rx%d_interrupts", i);
2561 data += ETH_GSTRING_LEN;
2562 }
2563 break;
2564
2565 case ETH_SS_PRIV_FLAGS:
2566 for (i = 0; i < ARRAY_SIZE(ibmvnic_priv_flags); i++)
2567 strcpy(data + i * ETH_GSTRING_LEN,
2568 ibmvnic_priv_flags[i]);
2569 break;
2570 default:
2571 return;
2572 }
2573 }
2574
2575 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
2576 {
2577 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2578
2579 switch (sset) {
2580 case ETH_SS_STATS:
2581 return ARRAY_SIZE(ibmvnic_stats) +
2582 adapter->req_tx_queues * NUM_TX_STATS +
2583 adapter->req_rx_queues * NUM_RX_STATS;
2584 case ETH_SS_PRIV_FLAGS:
2585 return ARRAY_SIZE(ibmvnic_priv_flags);
2586 default:
2587 return -EOPNOTSUPP;
2588 }
2589 }
2590
2591 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
2592 struct ethtool_stats *stats, u64 *data)
2593 {
2594 struct ibmvnic_adapter *adapter = netdev_priv(dev);
2595 union ibmvnic_crq crq;
2596 int i, j;
2597 int rc;
2598
2599 memset(&crq, 0, sizeof(crq));
2600 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
2601 crq.request_statistics.cmd = REQUEST_STATISTICS;
2602 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
2603 crq.request_statistics.len =
2604 cpu_to_be32(sizeof(struct ibmvnic_statistics));
2605
2606
2607 reinit_completion(&adapter->stats_done);
2608 rc = ibmvnic_send_crq(adapter, &crq);
2609 if (rc)
2610 return;
2611 wait_for_completion(&adapter->stats_done);
2612
2613 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
2614 data[i] = be64_to_cpu(IBMVNIC_GET_STAT(adapter,
2615 ibmvnic_stats[i].offset));
2616
2617 for (j = 0; j < adapter->req_tx_queues; j++) {
2618 data[i] = adapter->tx_stats_buffers[j].packets;
2619 i++;
2620 data[i] = adapter->tx_stats_buffers[j].bytes;
2621 i++;
2622 data[i] = adapter->tx_stats_buffers[j].dropped_packets;
2623 i++;
2624 }
2625
2626 for (j = 0; j < adapter->req_rx_queues; j++) {
2627 data[i] = adapter->rx_stats_buffers[j].packets;
2628 i++;
2629 data[i] = adapter->rx_stats_buffers[j].bytes;
2630 i++;
2631 data[i] = adapter->rx_stats_buffers[j].interrupts;
2632 i++;
2633 }
2634 }
2635
2636 static u32 ibmvnic_get_priv_flags(struct net_device *netdev)
2637 {
2638 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2639
2640 return adapter->priv_flags;
2641 }
2642
2643 static int ibmvnic_set_priv_flags(struct net_device *netdev, u32 flags)
2644 {
2645 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
2646 bool which_maxes = !!(flags & IBMVNIC_USE_SERVER_MAXES);
2647
2648 if (which_maxes)
2649 adapter->priv_flags |= IBMVNIC_USE_SERVER_MAXES;
2650 else
2651 adapter->priv_flags &= ~IBMVNIC_USE_SERVER_MAXES;
2652
2653 return 0;
2654 }
2655 static const struct ethtool_ops ibmvnic_ethtool_ops = {
2656 .get_drvinfo = ibmvnic_get_drvinfo,
2657 .get_msglevel = ibmvnic_get_msglevel,
2658 .set_msglevel = ibmvnic_set_msglevel,
2659 .get_link = ibmvnic_get_link,
2660 .get_ringparam = ibmvnic_get_ringparam,
2661 .set_ringparam = ibmvnic_set_ringparam,
2662 .get_channels = ibmvnic_get_channels,
2663 .set_channels = ibmvnic_set_channels,
2664 .get_strings = ibmvnic_get_strings,
2665 .get_sset_count = ibmvnic_get_sset_count,
2666 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
2667 .get_link_ksettings = ibmvnic_get_link_ksettings,
2668 .get_priv_flags = ibmvnic_get_priv_flags,
2669 .set_priv_flags = ibmvnic_set_priv_flags,
2670 };
2671
2672
2673
2674 static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
2675 struct ibmvnic_sub_crq_queue *scrq)
2676 {
2677 int rc;
2678
2679 if (scrq->irq) {
2680 free_irq(scrq->irq, scrq);
2681 irq_dispose_mapping(scrq->irq);
2682 scrq->irq = 0;
2683 }
2684
2685 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
2686 atomic_set(&scrq->used, 0);
2687 scrq->cur = 0;
2688
2689 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2690 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2691 return rc;
2692 }
2693
2694 static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
2695 {
2696 int i, rc;
2697
2698 for (i = 0; i < adapter->req_tx_queues; i++) {
2699 netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
2700 rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
2701 if (rc)
2702 return rc;
2703 }
2704
2705 for (i = 0; i < adapter->req_rx_queues; i++) {
2706 netdev_dbg(adapter->netdev, "Re-setting rx_scrq[%d]\n", i);
2707 rc = reset_one_sub_crq_queue(adapter, adapter->rx_scrq[i]);
2708 if (rc)
2709 return rc;
2710 }
2711
2712 return rc;
2713 }
2714
2715 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
2716 struct ibmvnic_sub_crq_queue *scrq,
2717 bool do_h_free)
2718 {
2719 struct device *dev = &adapter->vdev->dev;
2720 long rc;
2721
2722 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
2723
2724 if (do_h_free) {
2725
2726 do {
2727 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
2728 adapter->vdev->unit_address,
2729 scrq->crq_num);
2730 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
2731
2732 if (rc) {
2733 netdev_err(adapter->netdev,
2734 "Failed to release sub-CRQ %16lx, rc = %ld\n",
2735 scrq->crq_num, rc);
2736 }
2737 }
2738
2739 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2740 DMA_BIDIRECTIONAL);
2741 free_pages((unsigned long)scrq->msgs, 2);
2742 kfree(scrq);
2743 }
2744
2745 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2746 *adapter)
2747 {
2748 struct device *dev = &adapter->vdev->dev;
2749 struct ibmvnic_sub_crq_queue *scrq;
2750 int rc;
2751
2752 scrq = kzalloc(sizeof(*scrq), GFP_KERNEL);
2753 if (!scrq)
2754 return NULL;
2755
2756 scrq->msgs =
2757 (union sub_crq *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 2);
2758 if (!scrq->msgs) {
2759 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
2760 goto zero_page_failed;
2761 }
2762
2763 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
2764 DMA_BIDIRECTIONAL);
2765 if (dma_mapping_error(dev, scrq->msg_token)) {
2766 dev_warn(dev, "Couldn't map crq queue messages page\n");
2767 goto map_failed;
2768 }
2769
2770 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
2771 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
2772
2773 if (rc == H_RESOURCE)
2774 rc = ibmvnic_reset_crq(adapter);
2775
2776 if (rc == H_CLOSED) {
2777 dev_warn(dev, "Partner adapter not ready, waiting.\n");
2778 } else if (rc) {
2779 dev_warn(dev, "Error %d registering sub-crq\n", rc);
2780 goto reg_failed;
2781 }
2782
2783 scrq->adapter = adapter;
2784 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2785 spin_lock_init(&scrq->lock);
2786
2787 netdev_dbg(adapter->netdev,
2788 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
2789 scrq->crq_num, scrq->hw_irq, scrq->irq);
2790
2791 return scrq;
2792
2793 reg_failed:
2794 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
2795 DMA_BIDIRECTIONAL);
2796 map_failed:
2797 free_pages((unsigned long)scrq->msgs, 2);
2798 zero_page_failed:
2799 kfree(scrq);
2800
2801 return NULL;
2802 }
2803
2804 static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
2805 {
2806 int i;
2807
2808 if (adapter->tx_scrq) {
2809 for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
2810 if (!adapter->tx_scrq[i])
2811 continue;
2812
2813 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n",
2814 i);
2815 if (adapter->tx_scrq[i]->irq) {
2816 free_irq(adapter->tx_scrq[i]->irq,
2817 adapter->tx_scrq[i]);
2818 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
2819 adapter->tx_scrq[i]->irq = 0;
2820 }
2821
2822 release_sub_crq_queue(adapter, adapter->tx_scrq[i],
2823 do_h_free);
2824 }
2825
2826 kfree(adapter->tx_scrq);
2827 adapter->tx_scrq = NULL;
2828 adapter->num_active_tx_scrqs = 0;
2829 }
2830
2831 if (adapter->rx_scrq) {
2832 for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
2833 if (!adapter->rx_scrq[i])
2834 continue;
2835
2836 netdev_dbg(adapter->netdev, "Releasing rx_scrq[%d]\n",
2837 i);
2838 if (adapter->rx_scrq[i]->irq) {
2839 free_irq(adapter->rx_scrq[i]->irq,
2840 adapter->rx_scrq[i]);
2841 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
2842 adapter->rx_scrq[i]->irq = 0;
2843 }
2844
2845 release_sub_crq_queue(adapter, adapter->rx_scrq[i],
2846 do_h_free);
2847 }
2848
2849 kfree(adapter->rx_scrq);
2850 adapter->rx_scrq = NULL;
2851 adapter->num_active_rx_scrqs = 0;
2852 }
2853 }
2854
2855 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
2856 struct ibmvnic_sub_crq_queue *scrq)
2857 {
2858 struct device *dev = &adapter->vdev->dev;
2859 unsigned long rc;
2860
2861 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2862 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2863 if (rc)
2864 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
2865 scrq->hw_irq, rc);
2866 return rc;
2867 }
2868
2869 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
2870 struct ibmvnic_sub_crq_queue *scrq)
2871 {
2872 struct device *dev = &adapter->vdev->dev;
2873 unsigned long rc;
2874
2875 if (scrq->hw_irq > 0x100000000ULL) {
2876 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
2877 return 1;
2878 }
2879
2880 if (test_bit(0, &adapter->resetting) &&
2881 adapter->reset_reason == VNIC_RESET_MOBILITY) {
2882 u64 val = (0xff000000) | scrq->hw_irq;
2883
2884 rc = plpar_hcall_norets(H_EOI, val);
2885
2886
2887
2888 if (rc && (rc != H_FUNCTION))
2889 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
2890 val, rc);
2891 }
2892
2893 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
2894 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
2895 if (rc)
2896 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
2897 scrq->hw_irq, rc);
2898 return rc;
2899 }
2900
2901 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2902 struct ibmvnic_sub_crq_queue *scrq)
2903 {
2904 struct device *dev = &adapter->vdev->dev;
2905 struct ibmvnic_tx_pool *tx_pool;
2906 struct ibmvnic_tx_buff *txbuff;
2907 union sub_crq *next;
2908 int index;
2909 int i, j;
2910
2911 restart_loop:
2912 while (pending_scrq(adapter, scrq)) {
2913 unsigned int pool = scrq->pool_index;
2914 int num_entries = 0;
2915
2916 next = ibmvnic_next_scrq(adapter, scrq);
2917 for (i = 0; i < next->tx_comp.num_comps; i++) {
2918 if (next->tx_comp.rcs[i]) {
2919 dev_err(dev, "tx error %x\n",
2920 next->tx_comp.rcs[i]);
2921 continue;
2922 }
2923 index = be32_to_cpu(next->tx_comp.correlators[i]);
2924 if (index & IBMVNIC_TSO_POOL_MASK) {
2925 tx_pool = &adapter->tso_pool[pool];
2926 index &= ~IBMVNIC_TSO_POOL_MASK;
2927 } else {
2928 tx_pool = &adapter->tx_pool[pool];
2929 }
2930
2931 txbuff = &tx_pool->tx_buff[index];
2932
2933 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
2934 if (!txbuff->data_dma[j])
2935 continue;
2936
2937 txbuff->data_dma[j] = 0;
2938 }
2939
2940 if (txbuff->last_frag) {
2941 dev_kfree_skb_any(txbuff->skb);
2942 txbuff->skb = NULL;
2943 }
2944
2945 num_entries += txbuff->num_entries;
2946
2947 tx_pool->free_map[tx_pool->producer_index] = index;
2948 tx_pool->producer_index =
2949 (tx_pool->producer_index + 1) %
2950 tx_pool->num_buffers;
2951 }
2952
2953 next->tx_comp.first = 0;
2954
2955 if (atomic_sub_return(num_entries, &scrq->used) <=
2956 (adapter->req_tx_entries_per_subcrq / 2) &&
2957 __netif_subqueue_stopped(adapter->netdev,
2958 scrq->pool_index)) {
2959 netif_wake_subqueue(adapter->netdev, scrq->pool_index);
2960 netdev_dbg(adapter->netdev, "Started queue %d\n",
2961 scrq->pool_index);
2962 }
2963 }
2964
2965 enable_scrq_irq(adapter, scrq);
2966
2967 if (pending_scrq(adapter, scrq)) {
2968 disable_scrq_irq(adapter, scrq);
2969 goto restart_loop;
2970 }
2971
2972 return 0;
2973 }
2974
2975 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
2976 {
2977 struct ibmvnic_sub_crq_queue *scrq = instance;
2978 struct ibmvnic_adapter *adapter = scrq->adapter;
2979
2980 disable_scrq_irq(adapter, scrq);
2981 ibmvnic_complete_tx(adapter, scrq);
2982
2983 return IRQ_HANDLED;
2984 }
2985
2986 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
2987 {
2988 struct ibmvnic_sub_crq_queue *scrq = instance;
2989 struct ibmvnic_adapter *adapter = scrq->adapter;
2990
2991
2992
2993
2994 if (unlikely(adapter->state != VNIC_OPEN))
2995 return IRQ_NONE;
2996
2997 adapter->rx_stats_buffers[scrq->scrq_num].interrupts++;
2998
2999 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
3000 disable_scrq_irq(adapter, scrq);
3001 __napi_schedule(&adapter->napi[scrq->scrq_num]);
3002 }
3003
3004 return IRQ_HANDLED;
3005 }
3006
3007 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
3008 {
3009 struct device *dev = &adapter->vdev->dev;
3010 struct ibmvnic_sub_crq_queue *scrq;
3011 int i = 0, j = 0;
3012 int rc = 0;
3013
3014 for (i = 0; i < adapter->req_tx_queues; i++) {
3015 netdev_dbg(adapter->netdev, "Initializing tx_scrq[%d] irq\n",
3016 i);
3017 scrq = adapter->tx_scrq[i];
3018 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3019
3020 if (!scrq->irq) {
3021 rc = -EINVAL;
3022 dev_err(dev, "Error mapping irq\n");
3023 goto req_tx_irq_failed;
3024 }
3025
3026 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-tx%d",
3027 adapter->vdev->unit_address, i);
3028 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
3029 0, scrq->name, scrq);
3030
3031 if (rc) {
3032 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
3033 scrq->irq, rc);
3034 irq_dispose_mapping(scrq->irq);
3035 goto req_tx_irq_failed;
3036 }
3037 }
3038
3039 for (i = 0; i < adapter->req_rx_queues; i++) {
3040 netdev_dbg(adapter->netdev, "Initializing rx_scrq[%d] irq\n",
3041 i);
3042 scrq = adapter->rx_scrq[i];
3043 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
3044 if (!scrq->irq) {
3045 rc = -EINVAL;
3046 dev_err(dev, "Error mapping irq\n");
3047 goto req_rx_irq_failed;
3048 }
3049 snprintf(scrq->name, sizeof(scrq->name), "ibmvnic-%x-rx%d",
3050 adapter->vdev->unit_address, i);
3051 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
3052 0, scrq->name, scrq);
3053 if (rc) {
3054 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
3055 scrq->irq, rc);
3056 irq_dispose_mapping(scrq->irq);
3057 goto req_rx_irq_failed;
3058 }
3059 }
3060 return rc;
3061
3062 req_rx_irq_failed:
3063 for (j = 0; j < i; j++) {
3064 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
3065 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3066 }
3067 i = adapter->req_tx_queues;
3068 req_tx_irq_failed:
3069 for (j = 0; j < i; j++) {
3070 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
3071 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
3072 }
3073 release_sub_crqs(adapter, 1);
3074 return rc;
3075 }
3076
3077 static int init_sub_crqs(struct ibmvnic_adapter *adapter)
3078 {
3079 struct device *dev = &adapter->vdev->dev;
3080 struct ibmvnic_sub_crq_queue **allqueues;
3081 int registered_queues = 0;
3082 int total_queues;
3083 int more = 0;
3084 int i;
3085
3086 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
3087
3088 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_KERNEL);
3089 if (!allqueues)
3090 return -1;
3091
3092 for (i = 0; i < total_queues; i++) {
3093 allqueues[i] = init_sub_crq_queue(adapter);
3094 if (!allqueues[i]) {
3095 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
3096 break;
3097 }
3098 registered_queues++;
3099 }
3100
3101
3102 if (registered_queues <
3103 adapter->min_tx_queues + adapter->min_rx_queues) {
3104 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
3105 goto tx_failed;
3106 }
3107
3108
3109 for (i = 0; i < total_queues - registered_queues + more ; i++) {
3110 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
3111 switch (i % 3) {
3112 case 0:
3113 if (adapter->req_rx_queues > adapter->min_rx_queues)
3114 adapter->req_rx_queues--;
3115 else
3116 more++;
3117 break;
3118 case 1:
3119 if (adapter->req_tx_queues > adapter->min_tx_queues)
3120 adapter->req_tx_queues--;
3121 else
3122 more++;
3123 break;
3124 }
3125 }
3126
3127 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
3128 sizeof(*adapter->tx_scrq), GFP_KERNEL);
3129 if (!adapter->tx_scrq)
3130 goto tx_failed;
3131
3132 for (i = 0; i < adapter->req_tx_queues; i++) {
3133 adapter->tx_scrq[i] = allqueues[i];
3134 adapter->tx_scrq[i]->pool_index = i;
3135 adapter->num_active_tx_scrqs++;
3136 }
3137
3138 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
3139 sizeof(*adapter->rx_scrq), GFP_KERNEL);
3140 if (!adapter->rx_scrq)
3141 goto rx_failed;
3142
3143 for (i = 0; i < adapter->req_rx_queues; i++) {
3144 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
3145 adapter->rx_scrq[i]->scrq_num = i;
3146 adapter->num_active_rx_scrqs++;
3147 }
3148
3149 kfree(allqueues);
3150 return 0;
3151
3152 rx_failed:
3153 kfree(adapter->tx_scrq);
3154 adapter->tx_scrq = NULL;
3155 tx_failed:
3156 for (i = 0; i < registered_queues; i++)
3157 release_sub_crq_queue(adapter, allqueues[i], 1);
3158 kfree(allqueues);
3159 return -1;
3160 }
3161
3162 static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
3163 {
3164 struct device *dev = &adapter->vdev->dev;
3165 union ibmvnic_crq crq;
3166 int max_entries;
3167
3168 if (!retry) {
3169
3170 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
3171
3172 if (adapter->min_tx_entries_per_subcrq > entries_page ||
3173 adapter->min_rx_add_entries_per_subcrq > entries_page) {
3174 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
3175 return;
3176 }
3177
3178 if (adapter->desired.mtu)
3179 adapter->req_mtu = adapter->desired.mtu;
3180 else
3181 adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
3182
3183 if (!adapter->desired.tx_entries)
3184 adapter->desired.tx_entries =
3185 adapter->max_tx_entries_per_subcrq;
3186 if (!adapter->desired.rx_entries)
3187 adapter->desired.rx_entries =
3188 adapter->max_rx_add_entries_per_subcrq;
3189
3190 max_entries = IBMVNIC_MAX_LTB_SIZE /
3191 (adapter->req_mtu + IBMVNIC_BUFFER_HLEN);
3192
3193 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3194 adapter->desired.tx_entries > IBMVNIC_MAX_LTB_SIZE) {
3195 adapter->desired.tx_entries = max_entries;
3196 }
3197
3198 if ((adapter->req_mtu + IBMVNIC_BUFFER_HLEN) *
3199 adapter->desired.rx_entries > IBMVNIC_MAX_LTB_SIZE) {
3200 adapter->desired.rx_entries = max_entries;
3201 }
3202
3203 if (adapter->desired.tx_entries)
3204 adapter->req_tx_entries_per_subcrq =
3205 adapter->desired.tx_entries;
3206 else
3207 adapter->req_tx_entries_per_subcrq =
3208 adapter->max_tx_entries_per_subcrq;
3209
3210 if (adapter->desired.rx_entries)
3211 adapter->req_rx_add_entries_per_subcrq =
3212 adapter->desired.rx_entries;
3213 else
3214 adapter->req_rx_add_entries_per_subcrq =
3215 adapter->max_rx_add_entries_per_subcrq;
3216
3217 if (adapter->desired.tx_queues)
3218 adapter->req_tx_queues =
3219 adapter->desired.tx_queues;
3220 else
3221 adapter->req_tx_queues =
3222 adapter->opt_tx_comp_sub_queues;
3223
3224 if (adapter->desired.rx_queues)
3225 adapter->req_rx_queues =
3226 adapter->desired.rx_queues;
3227 else
3228 adapter->req_rx_queues =
3229 adapter->opt_rx_comp_queues;
3230
3231 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
3232 }
3233
3234 memset(&crq, 0, sizeof(crq));
3235 crq.request_capability.first = IBMVNIC_CRQ_CMD;
3236 crq.request_capability.cmd = REQUEST_CAPABILITY;
3237
3238 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
3239 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
3240 atomic_inc(&adapter->running_cap_crqs);
3241 ibmvnic_send_crq(adapter, &crq);
3242
3243 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
3244 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
3245 atomic_inc(&adapter->running_cap_crqs);
3246 ibmvnic_send_crq(adapter, &crq);
3247
3248 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
3249 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
3250 atomic_inc(&adapter->running_cap_crqs);
3251 ibmvnic_send_crq(adapter, &crq);
3252
3253 crq.request_capability.capability =
3254 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
3255 crq.request_capability.number =
3256 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
3257 atomic_inc(&adapter->running_cap_crqs);
3258 ibmvnic_send_crq(adapter, &crq);
3259
3260 crq.request_capability.capability =
3261 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
3262 crq.request_capability.number =
3263 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
3264 atomic_inc(&adapter->running_cap_crqs);
3265 ibmvnic_send_crq(adapter, &crq);
3266
3267 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
3268 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
3269 atomic_inc(&adapter->running_cap_crqs);
3270 ibmvnic_send_crq(adapter, &crq);
3271
3272 if (adapter->netdev->flags & IFF_PROMISC) {
3273 if (adapter->promisc_supported) {
3274 crq.request_capability.capability =
3275 cpu_to_be16(PROMISC_REQUESTED);
3276 crq.request_capability.number = cpu_to_be64(1);
3277 atomic_inc(&adapter->running_cap_crqs);
3278 ibmvnic_send_crq(adapter, &crq);
3279 }
3280 } else {
3281 crq.request_capability.capability =
3282 cpu_to_be16(PROMISC_REQUESTED);
3283 crq.request_capability.number = cpu_to_be64(0);
3284 atomic_inc(&adapter->running_cap_crqs);
3285 ibmvnic_send_crq(adapter, &crq);
3286 }
3287 }
3288
3289 static int pending_scrq(struct ibmvnic_adapter *adapter,
3290 struct ibmvnic_sub_crq_queue *scrq)
3291 {
3292 union sub_crq *entry = &scrq->msgs[scrq->cur];
3293
3294 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP)
3295 return 1;
3296 else
3297 return 0;
3298 }
3299
3300 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
3301 struct ibmvnic_sub_crq_queue *scrq)
3302 {
3303 union sub_crq *entry;
3304 unsigned long flags;
3305
3306 spin_lock_irqsave(&scrq->lock, flags);
3307 entry = &scrq->msgs[scrq->cur];
3308 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3309 if (++scrq->cur == scrq->size)
3310 scrq->cur = 0;
3311 } else {
3312 entry = NULL;
3313 }
3314 spin_unlock_irqrestore(&scrq->lock, flags);
3315
3316 return entry;
3317 }
3318
3319 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3320 {
3321 struct ibmvnic_crq_queue *queue = &adapter->crq;
3322 union ibmvnic_crq *crq;
3323
3324 crq = &queue->msgs[queue->cur];
3325 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
3326 if (++queue->cur == queue->size)
3327 queue->cur = 0;
3328 } else {
3329 crq = NULL;
3330 }
3331
3332 return crq;
3333 }
3334
3335 static void print_subcrq_error(struct device *dev, int rc, const char *func)
3336 {
3337 switch (rc) {
3338 case H_PARAMETER:
3339 dev_warn_ratelimited(dev,
3340 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3341 func, rc);
3342 break;
3343 case H_CLOSED:
3344 dev_warn_ratelimited(dev,
3345 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3346 func, rc);
3347 break;
3348 default:
3349 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3350 break;
3351 }
3352 }
3353
3354 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3355 union sub_crq *sub_crq)
3356 {
3357 unsigned int ua = adapter->vdev->unit_address;
3358 struct device *dev = &adapter->vdev->dev;
3359 u64 *u64_crq = (u64 *)sub_crq;
3360 int rc;
3361
3362 netdev_dbg(adapter->netdev,
3363 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
3364 (unsigned long int)cpu_to_be64(remote_handle),
3365 (unsigned long int)cpu_to_be64(u64_crq[0]),
3366 (unsigned long int)cpu_to_be64(u64_crq[1]),
3367 (unsigned long int)cpu_to_be64(u64_crq[2]),
3368 (unsigned long int)cpu_to_be64(u64_crq[3]));
3369
3370
3371 mb();
3372
3373 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
3374 cpu_to_be64(remote_handle),
3375 cpu_to_be64(u64_crq[0]),
3376 cpu_to_be64(u64_crq[1]),
3377 cpu_to_be64(u64_crq[2]),
3378 cpu_to_be64(u64_crq[3]));
3379
3380 if (rc)
3381 print_subcrq_error(dev, rc, __func__);
3382
3383 return rc;
3384 }
3385
3386 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3387 u64 remote_handle, u64 ioba, u64 num_entries)
3388 {
3389 unsigned int ua = adapter->vdev->unit_address;
3390 struct device *dev = &adapter->vdev->dev;
3391 int rc;
3392
3393
3394 mb();
3395 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
3396 cpu_to_be64(remote_handle),
3397 ioba, num_entries);
3398
3399 if (rc)
3400 print_subcrq_error(dev, rc, __func__);
3401
3402 return rc;
3403 }
3404
3405 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
3406 union ibmvnic_crq *crq)
3407 {
3408 unsigned int ua = adapter->vdev->unit_address;
3409 struct device *dev = &adapter->vdev->dev;
3410 u64 *u64_crq = (u64 *)crq;
3411 int rc;
3412
3413 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
3414 (unsigned long int)cpu_to_be64(u64_crq[0]),
3415 (unsigned long int)cpu_to_be64(u64_crq[1]));
3416
3417 if (!adapter->crq.active &&
3418 crq->generic.first != IBMVNIC_CRQ_INIT_CMD) {
3419 dev_warn(dev, "Invalid request detected while CRQ is inactive, possible device state change during reset\n");
3420 return -EINVAL;
3421 }
3422
3423
3424 mb();
3425
3426 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
3427 cpu_to_be64(u64_crq[0]),
3428 cpu_to_be64(u64_crq[1]));
3429
3430 if (rc) {
3431 if (rc == H_CLOSED) {
3432 dev_warn(dev, "CRQ Queue closed\n");
3433 if (test_bit(0, &adapter->resetting))
3434 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
3435 }
3436
3437 dev_warn(dev, "Send error (rc=%d)\n", rc);
3438 }
3439
3440 return rc;
3441 }
3442
3443 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
3444 {
3445 union ibmvnic_crq crq;
3446
3447 memset(&crq, 0, sizeof(crq));
3448 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
3449 crq.generic.cmd = IBMVNIC_CRQ_INIT;
3450 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
3451
3452 return ibmvnic_send_crq(adapter, &crq);
3453 }
3454
3455 static int send_version_xchg(struct ibmvnic_adapter *adapter)
3456 {
3457 union ibmvnic_crq crq;
3458
3459 memset(&crq, 0, sizeof(crq));
3460 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
3461 crq.version_exchange.cmd = VERSION_EXCHANGE;
3462 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
3463
3464 return ibmvnic_send_crq(adapter, &crq);
3465 }
3466
3467 struct vnic_login_client_data {
3468 u8 type;
3469 __be16 len;
3470 char name[];
3471 } __packed;
3472
3473 static int vnic_client_data_len(struct ibmvnic_adapter *adapter)
3474 {
3475 int len;
3476
3477
3478
3479
3480
3481 len = 4 * sizeof(struct vnic_login_client_data);
3482 len += 6;
3483 len += strlen(utsname()->nodename) + 1;
3484 len += strlen(adapter->netdev->name) + 1;
3485
3486 return len;
3487 }
3488
3489 static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
3490 struct vnic_login_client_data *vlcd)
3491 {
3492 const char *os_name = "Linux";
3493 int len;
3494
3495
3496 vlcd->type = 1;
3497 len = strlen(os_name) + 1;
3498 vlcd->len = cpu_to_be16(len);
3499 strncpy(vlcd->name, os_name, len);
3500 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3501
3502
3503 vlcd->type = 2;
3504 len = strlen(utsname()->nodename) + 1;
3505 vlcd->len = cpu_to_be16(len);
3506 strncpy(vlcd->name, utsname()->nodename, len);
3507 vlcd = (struct vnic_login_client_data *)(vlcd->name + len);
3508
3509
3510 vlcd->type = 3;
3511 len = strlen(adapter->netdev->name) + 1;
3512 vlcd->len = cpu_to_be16(len);
3513 strncpy(vlcd->name, adapter->netdev->name, len);
3514 }
3515
3516 static int send_login(struct ibmvnic_adapter *adapter)
3517 {
3518 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
3519 struct ibmvnic_login_buffer *login_buffer;
3520 struct device *dev = &adapter->vdev->dev;
3521 dma_addr_t rsp_buffer_token;
3522 dma_addr_t buffer_token;
3523 size_t rsp_buffer_size;
3524 union ibmvnic_crq crq;
3525 size_t buffer_size;
3526 __be64 *tx_list_p;
3527 __be64 *rx_list_p;
3528 int client_data_len;
3529 struct vnic_login_client_data *vlcd;
3530 int i;
3531
3532 if (!adapter->tx_scrq || !adapter->rx_scrq) {
3533 netdev_err(adapter->netdev,
3534 "RX or TX queues are not allocated, device login failed\n");
3535 return -1;
3536 }
3537
3538 release_login_rsp_buffer(adapter);
3539 client_data_len = vnic_client_data_len(adapter);
3540
3541 buffer_size =
3542 sizeof(struct ibmvnic_login_buffer) +
3543 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues) +
3544 client_data_len;
3545
3546 login_buffer = kzalloc(buffer_size, GFP_ATOMIC);
3547 if (!login_buffer)
3548 goto buf_alloc_failed;
3549
3550 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
3551 DMA_TO_DEVICE);
3552 if (dma_mapping_error(dev, buffer_token)) {
3553 dev_err(dev, "Couldn't map login buffer\n");
3554 goto buf_map_failed;
3555 }
3556
3557 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
3558 sizeof(u64) * adapter->req_tx_queues +
3559 sizeof(u64) * adapter->req_rx_queues +
3560 sizeof(u64) * adapter->req_rx_queues +
3561 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
3562
3563 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
3564 if (!login_rsp_buffer)
3565 goto buf_rsp_alloc_failed;
3566
3567 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
3568 rsp_buffer_size, DMA_FROM_DEVICE);
3569 if (dma_mapping_error(dev, rsp_buffer_token)) {
3570 dev_err(dev, "Couldn't map login rsp buffer\n");
3571 goto buf_rsp_map_failed;
3572 }
3573
3574 adapter->login_buf = login_buffer;
3575 adapter->login_buf_token = buffer_token;
3576 adapter->login_buf_sz = buffer_size;
3577 adapter->login_rsp_buf = login_rsp_buffer;
3578 adapter->login_rsp_buf_token = rsp_buffer_token;
3579 adapter->login_rsp_buf_sz = rsp_buffer_size;
3580
3581 login_buffer->len = cpu_to_be32(buffer_size);
3582 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
3583 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
3584 login_buffer->off_txcomp_subcrqs =
3585 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
3586 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
3587 login_buffer->off_rxcomp_subcrqs =
3588 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
3589 sizeof(u64) * adapter->req_tx_queues);
3590 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
3591 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
3592
3593 tx_list_p = (__be64 *)((char *)login_buffer +
3594 sizeof(struct ibmvnic_login_buffer));
3595 rx_list_p = (__be64 *)((char *)login_buffer +
3596 sizeof(struct ibmvnic_login_buffer) +
3597 sizeof(u64) * adapter->req_tx_queues);
3598
3599 for (i = 0; i < adapter->req_tx_queues; i++) {
3600 if (adapter->tx_scrq[i]) {
3601 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
3602 crq_num);
3603 }
3604 }
3605
3606 for (i = 0; i < adapter->req_rx_queues; i++) {
3607 if (adapter->rx_scrq[i]) {
3608 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
3609 crq_num);
3610 }
3611 }
3612
3613
3614 vlcd = (struct vnic_login_client_data *)
3615 ((char *)rx_list_p + (sizeof(u64) * adapter->req_rx_queues));
3616 login_buffer->client_data_offset =
3617 cpu_to_be32((char *)vlcd - (char *)login_buffer);
3618 login_buffer->client_data_len = cpu_to_be32(client_data_len);
3619
3620 vnic_add_client_data(adapter, vlcd);
3621
3622 netdev_dbg(adapter->netdev, "Login Buffer:\n");
3623 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
3624 netdev_dbg(adapter->netdev, "%016lx\n",
3625 ((unsigned long int *)(adapter->login_buf))[i]);
3626 }
3627
3628 memset(&crq, 0, sizeof(crq));
3629 crq.login.first = IBMVNIC_CRQ_CMD;
3630 crq.login.cmd = LOGIN;
3631 crq.login.ioba = cpu_to_be32(buffer_token);
3632 crq.login.len = cpu_to_be32(buffer_size);
3633 ibmvnic_send_crq(adapter, &crq);
3634
3635 return 0;
3636
3637 buf_rsp_map_failed:
3638 kfree(login_rsp_buffer);
3639 buf_rsp_alloc_failed:
3640 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
3641 buf_map_failed:
3642 kfree(login_buffer);
3643 buf_alloc_failed:
3644 return -1;
3645 }
3646
3647 static int send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
3648 u32 len, u8 map_id)
3649 {
3650 union ibmvnic_crq crq;
3651
3652 memset(&crq, 0, sizeof(crq));
3653 crq.request_map.first = IBMVNIC_CRQ_CMD;
3654 crq.request_map.cmd = REQUEST_MAP;
3655 crq.request_map.map_id = map_id;
3656 crq.request_map.ioba = cpu_to_be32(addr);
3657 crq.request_map.len = cpu_to_be32(len);
3658 return ibmvnic_send_crq(adapter, &crq);
3659 }
3660
3661 static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
3662 {
3663 union ibmvnic_crq crq;
3664
3665 memset(&crq, 0, sizeof(crq));
3666 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
3667 crq.request_unmap.cmd = REQUEST_UNMAP;
3668 crq.request_unmap.map_id = map_id;
3669 return ibmvnic_send_crq(adapter, &crq);
3670 }
3671
3672 static void send_map_query(struct ibmvnic_adapter *adapter)
3673 {
3674 union ibmvnic_crq crq;
3675
3676 memset(&crq, 0, sizeof(crq));
3677 crq.query_map.first = IBMVNIC_CRQ_CMD;
3678 crq.query_map.cmd = QUERY_MAP;
3679 ibmvnic_send_crq(adapter, &crq);
3680 }
3681
3682
3683 static void send_cap_queries(struct ibmvnic_adapter *adapter)
3684 {
3685 union ibmvnic_crq crq;
3686
3687 atomic_set(&adapter->running_cap_crqs, 0);
3688 memset(&crq, 0, sizeof(crq));
3689 crq.query_capability.first = IBMVNIC_CRQ_CMD;
3690 crq.query_capability.cmd = QUERY_CAPABILITY;
3691
3692 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
3693 atomic_inc(&adapter->running_cap_crqs);
3694 ibmvnic_send_crq(adapter, &crq);
3695
3696 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
3697 atomic_inc(&adapter->running_cap_crqs);
3698 ibmvnic_send_crq(adapter, &crq);
3699
3700 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
3701 atomic_inc(&adapter->running_cap_crqs);
3702 ibmvnic_send_crq(adapter, &crq);
3703
3704 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
3705 atomic_inc(&adapter->running_cap_crqs);
3706 ibmvnic_send_crq(adapter, &crq);
3707
3708 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
3709 atomic_inc(&adapter->running_cap_crqs);
3710 ibmvnic_send_crq(adapter, &crq);
3711
3712 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
3713 atomic_inc(&adapter->running_cap_crqs);
3714 ibmvnic_send_crq(adapter, &crq);
3715
3716 crq.query_capability.capability =
3717 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
3718 atomic_inc(&adapter->running_cap_crqs);
3719 ibmvnic_send_crq(adapter, &crq);
3720
3721 crq.query_capability.capability =
3722 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
3723 atomic_inc(&adapter->running_cap_crqs);
3724 ibmvnic_send_crq(adapter, &crq);
3725
3726 crq.query_capability.capability =
3727 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
3728 atomic_inc(&adapter->running_cap_crqs);
3729 ibmvnic_send_crq(adapter, &crq);
3730
3731 crq.query_capability.capability =
3732 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
3733 atomic_inc(&adapter->running_cap_crqs);
3734 ibmvnic_send_crq(adapter, &crq);
3735
3736 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
3737 atomic_inc(&adapter->running_cap_crqs);
3738 ibmvnic_send_crq(adapter, &crq);
3739
3740 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
3741 atomic_inc(&adapter->running_cap_crqs);
3742 ibmvnic_send_crq(adapter, &crq);
3743
3744 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
3745 atomic_inc(&adapter->running_cap_crqs);
3746 ibmvnic_send_crq(adapter, &crq);
3747
3748 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
3749 atomic_inc(&adapter->running_cap_crqs);
3750 ibmvnic_send_crq(adapter, &crq);
3751
3752 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
3753 atomic_inc(&adapter->running_cap_crqs);
3754 ibmvnic_send_crq(adapter, &crq);
3755
3756 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
3757 atomic_inc(&adapter->running_cap_crqs);
3758 ibmvnic_send_crq(adapter, &crq);
3759
3760 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
3761 atomic_inc(&adapter->running_cap_crqs);
3762 ibmvnic_send_crq(adapter, &crq);
3763
3764 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
3765 atomic_inc(&adapter->running_cap_crqs);
3766 ibmvnic_send_crq(adapter, &crq);
3767
3768 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
3769 atomic_inc(&adapter->running_cap_crqs);
3770 ibmvnic_send_crq(adapter, &crq);
3771
3772 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
3773 atomic_inc(&adapter->running_cap_crqs);
3774 ibmvnic_send_crq(adapter, &crq);
3775
3776 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
3777 atomic_inc(&adapter->running_cap_crqs);
3778 ibmvnic_send_crq(adapter, &crq);
3779
3780 crq.query_capability.capability =
3781 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
3782 atomic_inc(&adapter->running_cap_crqs);
3783 ibmvnic_send_crq(adapter, &crq);
3784
3785 crq.query_capability.capability =
3786 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
3787 atomic_inc(&adapter->running_cap_crqs);
3788 ibmvnic_send_crq(adapter, &crq);
3789
3790 crq.query_capability.capability =
3791 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
3792 atomic_inc(&adapter->running_cap_crqs);
3793 ibmvnic_send_crq(adapter, &crq);
3794
3795 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
3796 atomic_inc(&adapter->running_cap_crqs);
3797 ibmvnic_send_crq(adapter, &crq);
3798 }
3799
3800 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
3801 struct ibmvnic_adapter *adapter)
3802 {
3803 struct device *dev = &adapter->vdev->dev;
3804
3805 if (crq->get_vpd_size_rsp.rc.code) {
3806 dev_err(dev, "Error retrieving VPD size, rc=%x\n",
3807 crq->get_vpd_size_rsp.rc.code);
3808 complete(&adapter->fw_done);
3809 return;
3810 }
3811
3812 adapter->vpd->len = be64_to_cpu(crq->get_vpd_size_rsp.len);
3813 complete(&adapter->fw_done);
3814 }
3815
3816 static void handle_vpd_rsp(union ibmvnic_crq *crq,
3817 struct ibmvnic_adapter *adapter)
3818 {
3819 struct device *dev = &adapter->vdev->dev;
3820 unsigned char *substr = NULL;
3821 u8 fw_level_len = 0;
3822
3823 memset(adapter->fw_version, 0, 32);
3824
3825 dma_unmap_single(dev, adapter->vpd->dma_addr, adapter->vpd->len,
3826 DMA_FROM_DEVICE);
3827
3828 if (crq->get_vpd_rsp.rc.code) {
3829 dev_err(dev, "Error retrieving VPD from device, rc=%x\n",
3830 crq->get_vpd_rsp.rc.code);
3831 goto complete;
3832 }
3833
3834
3835
3836
3837 substr = strnstr(adapter->vpd->buff, "RM", adapter->vpd->len);
3838 if (!substr) {
3839 dev_info(dev, "Warning - No FW level has been provided in the VPD buffer by the VIOS Server\n");
3840 goto complete;
3841 }
3842
3843
3844 if ((substr + 2) < (adapter->vpd->buff + adapter->vpd->len)) {
3845 fw_level_len = *(substr + 2);
3846 } else {
3847 dev_info(dev, "Length of FW substr extrapolated VDP buff\n");
3848 goto complete;
3849 }
3850
3851
3852 if ((substr + 3 + fw_level_len) <
3853 (adapter->vpd->buff + adapter->vpd->len)) {
3854 strncpy((char *)adapter->fw_version, substr + 3, fw_level_len);
3855 } else {
3856 dev_info(dev, "FW substr extrapolated VPD buff\n");
3857 }
3858
3859 complete:
3860 if (adapter->fw_version[0] == '\0')
3861 strncpy((char *)adapter->fw_version, "N/A", 3 * sizeof(char));
3862 complete(&adapter->fw_done);
3863 }
3864
3865 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
3866 {
3867 struct device *dev = &adapter->vdev->dev;
3868 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
3869 netdev_features_t old_hw_features = 0;
3870 union ibmvnic_crq crq;
3871 int i;
3872
3873 dma_unmap_single(dev, adapter->ip_offload_tok,
3874 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
3875
3876 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
3877 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
3878 netdev_dbg(adapter->netdev, "%016lx\n",
3879 ((unsigned long int *)(buf))[i]);
3880
3881 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
3882 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
3883 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
3884 buf->tcp_ipv4_chksum);
3885 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
3886 buf->tcp_ipv6_chksum);
3887 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
3888 buf->udp_ipv4_chksum);
3889 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
3890 buf->udp_ipv6_chksum);
3891 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
3892 buf->large_tx_ipv4);
3893 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
3894 buf->large_tx_ipv6);
3895 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
3896 buf->large_rx_ipv4);
3897 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
3898 buf->large_rx_ipv6);
3899 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
3900 buf->max_ipv4_header_size);
3901 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
3902 buf->max_ipv6_header_size);
3903 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
3904 buf->max_tcp_header_size);
3905 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
3906 buf->max_udp_header_size);
3907 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
3908 buf->max_large_tx_size);
3909 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
3910 buf->max_large_rx_size);
3911 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
3912 buf->ipv6_extension_header);
3913 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
3914 buf->tcp_pseudosum_req);
3915 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
3916 buf->num_ipv6_ext_headers);
3917 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
3918 buf->off_ipv6_ext_headers);
3919
3920 adapter->ip_offload_ctrl_tok =
3921 dma_map_single(dev, &adapter->ip_offload_ctrl,
3922 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
3923
3924 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
3925 dev_err(dev, "Couldn't map ip offload control buffer\n");
3926 return;
3927 }
3928
3929 adapter->ip_offload_ctrl.len =
3930 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3931 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
3932 adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
3933 adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
3934 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
3935 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
3936 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
3937 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
3938 adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
3939 adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
3940
3941
3942 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
3943 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
3944
3945 if (adapter->state != VNIC_PROBING) {
3946 old_hw_features = adapter->netdev->hw_features;
3947 adapter->netdev->hw_features = 0;
3948 }
3949
3950 adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
3951
3952 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
3953 adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
3954
3955 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
3956 adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
3957
3958 if ((adapter->netdev->features &
3959 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
3960 adapter->netdev->hw_features |= NETIF_F_RXCSUM;
3961
3962 if (buf->large_tx_ipv4)
3963 adapter->netdev->hw_features |= NETIF_F_TSO;
3964 if (buf->large_tx_ipv6)
3965 adapter->netdev->hw_features |= NETIF_F_TSO6;
3966
3967 if (adapter->state == VNIC_PROBING) {
3968 adapter->netdev->features |= adapter->netdev->hw_features;
3969 } else if (old_hw_features != adapter->netdev->hw_features) {
3970 netdev_features_t tmp = 0;
3971
3972
3973 adapter->netdev->features &= adapter->netdev->hw_features;
3974
3975 tmp = (old_hw_features ^ adapter->netdev->hw_features) &
3976 adapter->netdev->hw_features;
3977 adapter->netdev->features |=
3978 tmp & adapter->netdev->wanted_features;
3979 }
3980
3981 memset(&crq, 0, sizeof(crq));
3982 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
3983 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
3984 crq.control_ip_offload.len =
3985 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
3986 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
3987 ibmvnic_send_crq(adapter, &crq);
3988 }
3989
3990 static const char *ibmvnic_fw_err_cause(u16 cause)
3991 {
3992 switch (cause) {
3993 case ADAPTER_PROBLEM:
3994 return "adapter problem";
3995 case BUS_PROBLEM:
3996 return "bus problem";
3997 case FW_PROBLEM:
3998 return "firmware problem";
3999 case DD_PROBLEM:
4000 return "device driver problem";
4001 case EEH_RECOVERY:
4002 return "EEH recovery";
4003 case FW_UPDATED:
4004 return "firmware updated";
4005 case LOW_MEMORY:
4006 return "low Memory";
4007 default:
4008 return "unknown";
4009 }
4010 }
4011
4012 static void handle_error_indication(union ibmvnic_crq *crq,
4013 struct ibmvnic_adapter *adapter)
4014 {
4015 struct device *dev = &adapter->vdev->dev;
4016 u16 cause;
4017
4018 cause = be16_to_cpu(crq->error_indication.error_cause);
4019
4020 dev_warn_ratelimited(dev,
4021 "Firmware reports %serror, cause: %s. Starting recovery...\n",
4022 crq->error_indication.flags
4023 & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
4024 ibmvnic_fw_err_cause(cause));
4025
4026 if (crq->error_indication.flags & IBMVNIC_FATAL_ERROR)
4027 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4028 else
4029 ibmvnic_reset(adapter, VNIC_RESET_NON_FATAL);
4030 }
4031
4032 static int handle_change_mac_rsp(union ibmvnic_crq *crq,
4033 struct ibmvnic_adapter *adapter)
4034 {
4035 struct net_device *netdev = adapter->netdev;
4036 struct device *dev = &adapter->vdev->dev;
4037 long rc;
4038
4039 rc = crq->change_mac_addr_rsp.rc.code;
4040 if (rc) {
4041 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
4042 goto out;
4043 }
4044 ether_addr_copy(netdev->dev_addr,
4045 &crq->change_mac_addr_rsp.mac_addr[0]);
4046 out:
4047 complete(&adapter->fw_done);
4048 return rc;
4049 }
4050
4051 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
4052 struct ibmvnic_adapter *adapter)
4053 {
4054 struct device *dev = &adapter->vdev->dev;
4055 u64 *req_value;
4056 char *name;
4057
4058 atomic_dec(&adapter->running_cap_crqs);
4059 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
4060 case REQ_TX_QUEUES:
4061 req_value = &adapter->req_tx_queues;
4062 name = "tx";
4063 break;
4064 case REQ_RX_QUEUES:
4065 req_value = &adapter->req_rx_queues;
4066 name = "rx";
4067 break;
4068 case REQ_RX_ADD_QUEUES:
4069 req_value = &adapter->req_rx_add_queues;
4070 name = "rx_add";
4071 break;
4072 case REQ_TX_ENTRIES_PER_SUBCRQ:
4073 req_value = &adapter->req_tx_entries_per_subcrq;
4074 name = "tx_entries_per_subcrq";
4075 break;
4076 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
4077 req_value = &adapter->req_rx_add_entries_per_subcrq;
4078 name = "rx_add_entries_per_subcrq";
4079 break;
4080 case REQ_MTU:
4081 req_value = &adapter->req_mtu;
4082 name = "mtu";
4083 break;
4084 case PROMISC_REQUESTED:
4085 req_value = &adapter->promisc;
4086 name = "promisc";
4087 break;
4088 default:
4089 dev_err(dev, "Got invalid cap request rsp %d\n",
4090 crq->request_capability.capability);
4091 return;
4092 }
4093
4094 switch (crq->request_capability_rsp.rc.code) {
4095 case SUCCESS:
4096 break;
4097 case PARTIALSUCCESS:
4098 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
4099 *req_value,
4100 (long int)be64_to_cpu(crq->request_capability_rsp.
4101 number), name);
4102
4103 if (be16_to_cpu(crq->request_capability_rsp.capability) ==
4104 REQ_MTU) {
4105 pr_err("mtu of %llu is not supported. Reverting.\n",
4106 *req_value);
4107 *req_value = adapter->fallback.mtu;
4108 } else {
4109 *req_value =
4110 be64_to_cpu(crq->request_capability_rsp.number);
4111 }
4112
4113 ibmvnic_send_req_caps(adapter, 1);
4114 return;
4115 default:
4116 dev_err(dev, "Error %d in request cap rsp\n",
4117 crq->request_capability_rsp.rc.code);
4118 return;
4119 }
4120
4121
4122 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4123 union ibmvnic_crq newcrq;
4124 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
4125 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
4126 &adapter->ip_offload_buf;
4127
4128 adapter->wait_capability = false;
4129 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
4130 buf_sz,
4131 DMA_FROM_DEVICE);
4132
4133 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
4134 if (!firmware_has_feature(FW_FEATURE_CMO))
4135 dev_err(dev, "Couldn't map offload buffer\n");
4136 return;
4137 }
4138
4139 memset(&newcrq, 0, sizeof(newcrq));
4140 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
4141 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
4142 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
4143 newcrq.query_ip_offload.ioba =
4144 cpu_to_be32(adapter->ip_offload_tok);
4145
4146 ibmvnic_send_crq(adapter, &newcrq);
4147 }
4148 }
4149
4150 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
4151 struct ibmvnic_adapter *adapter)
4152 {
4153 struct device *dev = &adapter->vdev->dev;
4154 struct net_device *netdev = adapter->netdev;
4155 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
4156 struct ibmvnic_login_buffer *login = adapter->login_buf;
4157 int i;
4158
4159 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
4160 DMA_TO_DEVICE);
4161 dma_unmap_single(dev, adapter->login_rsp_buf_token,
4162 adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
4163
4164
4165
4166
4167
4168 if (login_rsp_crq->generic.rc.code) {
4169 adapter->init_done_rc = login_rsp_crq->generic.rc.code;
4170 complete(&adapter->init_done);
4171 return 0;
4172 }
4173
4174 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4175
4176 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
4177 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
4178 netdev_dbg(adapter->netdev, "%016lx\n",
4179 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
4180 }
4181
4182
4183 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
4184 (be32_to_cpu(login->num_rxcomp_subcrqs) *
4185 adapter->req_rx_add_queues !=
4186 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
4187 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
4188 ibmvnic_remove(adapter->vdev);
4189 return -EIO;
4190 }
4191 release_login_buffer(adapter);
4192 complete(&adapter->init_done);
4193
4194 return 0;
4195 }
4196
4197 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
4198 struct ibmvnic_adapter *adapter)
4199 {
4200 struct device *dev = &adapter->vdev->dev;
4201 long rc;
4202
4203 rc = crq->request_unmap_rsp.rc.code;
4204 if (rc)
4205 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
4206 }
4207
4208 static void handle_query_map_rsp(union ibmvnic_crq *crq,
4209 struct ibmvnic_adapter *adapter)
4210 {
4211 struct net_device *netdev = adapter->netdev;
4212 struct device *dev = &adapter->vdev->dev;
4213 long rc;
4214
4215 rc = crq->query_map_rsp.rc.code;
4216 if (rc) {
4217 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
4218 return;
4219 }
4220 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
4221 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
4222 crq->query_map_rsp.free_pages);
4223 }
4224
4225 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
4226 struct ibmvnic_adapter *adapter)
4227 {
4228 struct net_device *netdev = adapter->netdev;
4229 struct device *dev = &adapter->vdev->dev;
4230 long rc;
4231
4232 atomic_dec(&adapter->running_cap_crqs);
4233 netdev_dbg(netdev, "Outstanding queries: %d\n",
4234 atomic_read(&adapter->running_cap_crqs));
4235 rc = crq->query_capability.rc.code;
4236 if (rc) {
4237 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
4238 goto out;
4239 }
4240
4241 switch (be16_to_cpu(crq->query_capability.capability)) {
4242 case MIN_TX_QUEUES:
4243 adapter->min_tx_queues =
4244 be64_to_cpu(crq->query_capability.number);
4245 netdev_dbg(netdev, "min_tx_queues = %lld\n",
4246 adapter->min_tx_queues);
4247 break;
4248 case MIN_RX_QUEUES:
4249 adapter->min_rx_queues =
4250 be64_to_cpu(crq->query_capability.number);
4251 netdev_dbg(netdev, "min_rx_queues = %lld\n",
4252 adapter->min_rx_queues);
4253 break;
4254 case MIN_RX_ADD_QUEUES:
4255 adapter->min_rx_add_queues =
4256 be64_to_cpu(crq->query_capability.number);
4257 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
4258 adapter->min_rx_add_queues);
4259 break;
4260 case MAX_TX_QUEUES:
4261 adapter->max_tx_queues =
4262 be64_to_cpu(crq->query_capability.number);
4263 netdev_dbg(netdev, "max_tx_queues = %lld\n",
4264 adapter->max_tx_queues);
4265 break;
4266 case MAX_RX_QUEUES:
4267 adapter->max_rx_queues =
4268 be64_to_cpu(crq->query_capability.number);
4269 netdev_dbg(netdev, "max_rx_queues = %lld\n",
4270 adapter->max_rx_queues);
4271 break;
4272 case MAX_RX_ADD_QUEUES:
4273 adapter->max_rx_add_queues =
4274 be64_to_cpu(crq->query_capability.number);
4275 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
4276 adapter->max_rx_add_queues);
4277 break;
4278 case MIN_TX_ENTRIES_PER_SUBCRQ:
4279 adapter->min_tx_entries_per_subcrq =
4280 be64_to_cpu(crq->query_capability.number);
4281 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
4282 adapter->min_tx_entries_per_subcrq);
4283 break;
4284 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
4285 adapter->min_rx_add_entries_per_subcrq =
4286 be64_to_cpu(crq->query_capability.number);
4287 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
4288 adapter->min_rx_add_entries_per_subcrq);
4289 break;
4290 case MAX_TX_ENTRIES_PER_SUBCRQ:
4291 adapter->max_tx_entries_per_subcrq =
4292 be64_to_cpu(crq->query_capability.number);
4293 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
4294 adapter->max_tx_entries_per_subcrq);
4295 break;
4296 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
4297 adapter->max_rx_add_entries_per_subcrq =
4298 be64_to_cpu(crq->query_capability.number);
4299 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
4300 adapter->max_rx_add_entries_per_subcrq);
4301 break;
4302 case TCP_IP_OFFLOAD:
4303 adapter->tcp_ip_offload =
4304 be64_to_cpu(crq->query_capability.number);
4305 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
4306 adapter->tcp_ip_offload);
4307 break;
4308 case PROMISC_SUPPORTED:
4309 adapter->promisc_supported =
4310 be64_to_cpu(crq->query_capability.number);
4311 netdev_dbg(netdev, "promisc_supported = %lld\n",
4312 adapter->promisc_supported);
4313 break;
4314 case MIN_MTU:
4315 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
4316 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4317 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
4318 break;
4319 case MAX_MTU:
4320 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
4321 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4322 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
4323 break;
4324 case MAX_MULTICAST_FILTERS:
4325 adapter->max_multicast_filters =
4326 be64_to_cpu(crq->query_capability.number);
4327 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
4328 adapter->max_multicast_filters);
4329 break;
4330 case VLAN_HEADER_INSERTION:
4331 adapter->vlan_header_insertion =
4332 be64_to_cpu(crq->query_capability.number);
4333 if (adapter->vlan_header_insertion)
4334 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
4335 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
4336 adapter->vlan_header_insertion);
4337 break;
4338 case RX_VLAN_HEADER_INSERTION:
4339 adapter->rx_vlan_header_insertion =
4340 be64_to_cpu(crq->query_capability.number);
4341 netdev_dbg(netdev, "rx_vlan_header_insertion = %lld\n",
4342 adapter->rx_vlan_header_insertion);
4343 break;
4344 case MAX_TX_SG_ENTRIES:
4345 adapter->max_tx_sg_entries =
4346 be64_to_cpu(crq->query_capability.number);
4347 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
4348 adapter->max_tx_sg_entries);
4349 break;
4350 case RX_SG_SUPPORTED:
4351 adapter->rx_sg_supported =
4352 be64_to_cpu(crq->query_capability.number);
4353 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
4354 adapter->rx_sg_supported);
4355 break;
4356 case OPT_TX_COMP_SUB_QUEUES:
4357 adapter->opt_tx_comp_sub_queues =
4358 be64_to_cpu(crq->query_capability.number);
4359 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
4360 adapter->opt_tx_comp_sub_queues);
4361 break;
4362 case OPT_RX_COMP_QUEUES:
4363 adapter->opt_rx_comp_queues =
4364 be64_to_cpu(crq->query_capability.number);
4365 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
4366 adapter->opt_rx_comp_queues);
4367 break;
4368 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
4369 adapter->opt_rx_bufadd_q_per_rx_comp_q =
4370 be64_to_cpu(crq->query_capability.number);
4371 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
4372 adapter->opt_rx_bufadd_q_per_rx_comp_q);
4373 break;
4374 case OPT_TX_ENTRIES_PER_SUBCRQ:
4375 adapter->opt_tx_entries_per_subcrq =
4376 be64_to_cpu(crq->query_capability.number);
4377 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
4378 adapter->opt_tx_entries_per_subcrq);
4379 break;
4380 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
4381 adapter->opt_rxba_entries_per_subcrq =
4382 be64_to_cpu(crq->query_capability.number);
4383 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
4384 adapter->opt_rxba_entries_per_subcrq);
4385 break;
4386 case TX_RX_DESC_REQ:
4387 adapter->tx_rx_desc_req = crq->query_capability.number;
4388 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
4389 adapter->tx_rx_desc_req);
4390 break;
4391
4392 default:
4393 netdev_err(netdev, "Got invalid cap rsp %d\n",
4394 crq->query_capability.capability);
4395 }
4396
4397 out:
4398 if (atomic_read(&adapter->running_cap_crqs) == 0) {
4399 adapter->wait_capability = false;
4400 ibmvnic_send_req_caps(adapter, 0);
4401 }
4402 }
4403
4404 static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
4405 {
4406 union ibmvnic_crq crq;
4407 int rc;
4408
4409 memset(&crq, 0, sizeof(crq));
4410 crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
4411 crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
4412 reinit_completion(&adapter->fw_done);
4413 rc = ibmvnic_send_crq(adapter, &crq);
4414 if (rc)
4415 return rc;
4416 wait_for_completion(&adapter->fw_done);
4417 return adapter->fw_done_rc ? -EIO : 0;
4418 }
4419
4420 static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
4421 struct ibmvnic_adapter *adapter)
4422 {
4423 struct net_device *netdev = adapter->netdev;
4424 int rc;
4425 __be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
4426
4427 rc = crq->query_phys_parms_rsp.rc.code;
4428 if (rc) {
4429 netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
4430 return rc;
4431 }
4432 switch (rspeed) {
4433 case IBMVNIC_10MBPS:
4434 adapter->speed = SPEED_10;
4435 break;
4436 case IBMVNIC_100MBPS:
4437 adapter->speed = SPEED_100;
4438 break;
4439 case IBMVNIC_1GBPS:
4440 adapter->speed = SPEED_1000;
4441 break;
4442 case IBMVNIC_10GBP:
4443 adapter->speed = SPEED_10000;
4444 break;
4445 case IBMVNIC_25GBPS:
4446 adapter->speed = SPEED_25000;
4447 break;
4448 case IBMVNIC_40GBPS:
4449 adapter->speed = SPEED_40000;
4450 break;
4451 case IBMVNIC_50GBPS:
4452 adapter->speed = SPEED_50000;
4453 break;
4454 case IBMVNIC_100GBPS:
4455 adapter->speed = SPEED_100000;
4456 break;
4457 default:
4458 if (netif_carrier_ok(netdev))
4459 netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
4460 adapter->speed = SPEED_UNKNOWN;
4461 }
4462 if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
4463 adapter->duplex = DUPLEX_FULL;
4464 else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
4465 adapter->duplex = DUPLEX_HALF;
4466 else
4467 adapter->duplex = DUPLEX_UNKNOWN;
4468
4469 return rc;
4470 }
4471
4472 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
4473 struct ibmvnic_adapter *adapter)
4474 {
4475 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
4476 struct net_device *netdev = adapter->netdev;
4477 struct device *dev = &adapter->vdev->dev;
4478 u64 *u64_crq = (u64 *)crq;
4479 long rc;
4480
4481 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
4482 (unsigned long int)cpu_to_be64(u64_crq[0]),
4483 (unsigned long int)cpu_to_be64(u64_crq[1]));
4484 switch (gen_crq->first) {
4485 case IBMVNIC_CRQ_INIT_RSP:
4486 switch (gen_crq->cmd) {
4487 case IBMVNIC_CRQ_INIT:
4488 dev_info(dev, "Partner initialized\n");
4489 adapter->from_passive_init = true;
4490 adapter->failover_pending = false;
4491 if (!completion_done(&adapter->init_done)) {
4492 complete(&adapter->init_done);
4493 adapter->init_done_rc = -EIO;
4494 }
4495 ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
4496 break;
4497 case IBMVNIC_CRQ_INIT_COMPLETE:
4498 dev_info(dev, "Partner initialization complete\n");
4499 adapter->crq.active = true;
4500 send_version_xchg(adapter);
4501 break;
4502 default:
4503 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
4504 }
4505 return;
4506 case IBMVNIC_CRQ_XPORT_EVENT:
4507 netif_carrier_off(netdev);
4508 adapter->crq.active = false;
4509 if (test_bit(0, &adapter->resetting))
4510 adapter->force_reset_recovery = true;
4511 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
4512 dev_info(dev, "Migrated, re-enabling adapter\n");
4513 ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
4514 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
4515 dev_info(dev, "Backing device failover detected\n");
4516 adapter->failover_pending = true;
4517 } else {
4518
4519 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
4520 gen_crq->cmd);
4521 ibmvnic_reset(adapter, VNIC_RESET_FATAL);
4522 }
4523 return;
4524 case IBMVNIC_CRQ_CMD_RSP:
4525 break;
4526 default:
4527 dev_err(dev, "Got an invalid msg type 0x%02x\n",
4528 gen_crq->first);
4529 return;
4530 }
4531
4532 switch (gen_crq->cmd) {
4533 case VERSION_EXCHANGE_RSP:
4534 rc = crq->version_exchange_rsp.rc.code;
4535 if (rc) {
4536 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
4537 break;
4538 }
4539 ibmvnic_version =
4540 be16_to_cpu(crq->version_exchange_rsp.version);
4541 dev_info(dev, "Partner protocol version is %d\n",
4542 ibmvnic_version);
4543 send_cap_queries(adapter);
4544 break;
4545 case QUERY_CAPABILITY_RSP:
4546 handle_query_cap_rsp(crq, adapter);
4547 break;
4548 case QUERY_MAP_RSP:
4549 handle_query_map_rsp(crq, adapter);
4550 break;
4551 case REQUEST_MAP_RSP:
4552 adapter->fw_done_rc = crq->request_map_rsp.rc.code;
4553 complete(&adapter->fw_done);
4554 break;
4555 case REQUEST_UNMAP_RSP:
4556 handle_request_unmap_rsp(crq, adapter);
4557 break;
4558 case REQUEST_CAPABILITY_RSP:
4559 handle_request_cap_rsp(crq, adapter);
4560 break;
4561 case LOGIN_RSP:
4562 netdev_dbg(netdev, "Got Login Response\n");
4563 handle_login_rsp(crq, adapter);
4564 break;
4565 case LOGICAL_LINK_STATE_RSP:
4566 netdev_dbg(netdev,
4567 "Got Logical Link State Response, state: %d rc: %d\n",
4568 crq->logical_link_state_rsp.link_state,
4569 crq->logical_link_state_rsp.rc.code);
4570 adapter->logical_link_state =
4571 crq->logical_link_state_rsp.link_state;
4572 adapter->init_done_rc = crq->logical_link_state_rsp.rc.code;
4573 complete(&adapter->init_done);
4574 break;
4575 case LINK_STATE_INDICATION:
4576 netdev_dbg(netdev, "Got Logical Link State Indication\n");
4577 adapter->phys_link_state =
4578 crq->link_state_indication.phys_link_state;
4579 adapter->logical_link_state =
4580 crq->link_state_indication.logical_link_state;
4581 if (adapter->phys_link_state && adapter->logical_link_state)
4582 netif_carrier_on(netdev);
4583 else
4584 netif_carrier_off(netdev);
4585 break;
4586 case CHANGE_MAC_ADDR_RSP:
4587 netdev_dbg(netdev, "Got MAC address change Response\n");
4588 adapter->fw_done_rc = handle_change_mac_rsp(crq, adapter);
4589 break;
4590 case ERROR_INDICATION:
4591 netdev_dbg(netdev, "Got Error Indication\n");
4592 handle_error_indication(crq, adapter);
4593 break;
4594 case REQUEST_STATISTICS_RSP:
4595 netdev_dbg(netdev, "Got Statistics Response\n");
4596 complete(&adapter->stats_done);
4597 break;
4598 case QUERY_IP_OFFLOAD_RSP:
4599 netdev_dbg(netdev, "Got Query IP offload Response\n");
4600 handle_query_ip_offload_rsp(adapter);
4601 break;
4602 case MULTICAST_CTRL_RSP:
4603 netdev_dbg(netdev, "Got multicast control Response\n");
4604 break;
4605 case CONTROL_IP_OFFLOAD_RSP:
4606 netdev_dbg(netdev, "Got Control IP offload Response\n");
4607 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
4608 sizeof(adapter->ip_offload_ctrl),
4609 DMA_TO_DEVICE);
4610 complete(&adapter->init_done);
4611 break;
4612 case COLLECT_FW_TRACE_RSP:
4613 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
4614 complete(&adapter->fw_done);
4615 break;
4616 case GET_VPD_SIZE_RSP:
4617 handle_vpd_size_rsp(crq, adapter);
4618 break;
4619 case GET_VPD_RSP:
4620 handle_vpd_rsp(crq, adapter);
4621 break;
4622 case QUERY_PHYS_PARMS_RSP:
4623 adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
4624 complete(&adapter->fw_done);
4625 break;
4626 default:
4627 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
4628 gen_crq->cmd);
4629 }
4630 }
4631
4632 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
4633 {
4634 struct ibmvnic_adapter *adapter = instance;
4635
4636 tasklet_schedule(&adapter->tasklet);
4637 return IRQ_HANDLED;
4638 }
4639
4640 static void ibmvnic_tasklet(void *data)
4641 {
4642 struct ibmvnic_adapter *adapter = data;
4643 struct ibmvnic_crq_queue *queue = &adapter->crq;
4644 union ibmvnic_crq *crq;
4645 unsigned long flags;
4646 bool done = false;
4647
4648 spin_lock_irqsave(&queue->lock, flags);
4649 while (!done) {
4650
4651 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
4652 ibmvnic_handle_crq(crq, adapter);
4653 crq->generic.first = 0;
4654 }
4655
4656
4657
4658
4659 if (!adapter->wait_capability)
4660 done = true;
4661 }
4662
4663
4664
4665 if (atomic_read(&adapter->running_cap_crqs) != 0)
4666 adapter->wait_capability = true;
4667 spin_unlock_irqrestore(&queue->lock, flags);
4668 }
4669
4670 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
4671 {
4672 struct vio_dev *vdev = adapter->vdev;
4673 int rc;
4674
4675 do {
4676 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
4677 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
4678
4679 if (rc)
4680 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
4681
4682 return rc;
4683 }
4684
4685 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
4686 {
4687 struct ibmvnic_crq_queue *crq = &adapter->crq;
4688 struct device *dev = &adapter->vdev->dev;
4689 struct vio_dev *vdev = adapter->vdev;
4690 int rc;
4691
4692
4693 do {
4694 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4695 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4696
4697
4698 memset(crq->msgs, 0, PAGE_SIZE);
4699 crq->cur = 0;
4700 crq->active = false;
4701
4702
4703 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4704 crq->msg_token, PAGE_SIZE);
4705
4706 if (rc == H_CLOSED)
4707
4708 dev_warn(dev, "Partner adapter not ready\n");
4709 else if (rc != 0)
4710 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
4711
4712 return rc;
4713 }
4714
4715 static void release_crq_queue(struct ibmvnic_adapter *adapter)
4716 {
4717 struct ibmvnic_crq_queue *crq = &adapter->crq;
4718 struct vio_dev *vdev = adapter->vdev;
4719 long rc;
4720
4721 if (!crq->msgs)
4722 return;
4723
4724 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
4725 free_irq(vdev->irq, adapter);
4726 tasklet_kill(&adapter->tasklet);
4727 do {
4728 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4729 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4730
4731 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
4732 DMA_BIDIRECTIONAL);
4733 free_page((unsigned long)crq->msgs);
4734 crq->msgs = NULL;
4735 crq->active = false;
4736 }
4737
4738 static int init_crq_queue(struct ibmvnic_adapter *adapter)
4739 {
4740 struct ibmvnic_crq_queue *crq = &adapter->crq;
4741 struct device *dev = &adapter->vdev->dev;
4742 struct vio_dev *vdev = adapter->vdev;
4743 int rc, retrc = -ENOMEM;
4744
4745 if (crq->msgs)
4746 return 0;
4747
4748 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
4749
4750
4751 if (!crq->msgs)
4752 return -ENOMEM;
4753
4754 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
4755 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
4756 DMA_BIDIRECTIONAL);
4757 if (dma_mapping_error(dev, crq->msg_token))
4758 goto map_failed;
4759
4760 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4761 crq->msg_token, PAGE_SIZE);
4762
4763 if (rc == H_RESOURCE)
4764
4765 rc = ibmvnic_reset_crq(adapter);
4766 retrc = rc;
4767
4768 if (rc == H_CLOSED) {
4769 dev_warn(dev, "Partner adapter not ready\n");
4770 } else if (rc) {
4771 dev_warn(dev, "Error %d opening adapter\n", rc);
4772 goto reg_crq_failed;
4773 }
4774
4775 retrc = 0;
4776
4777 tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
4778 (unsigned long)adapter);
4779
4780 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
4781 snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
4782 adapter->vdev->unit_address);
4783 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, crq->name, adapter);
4784 if (rc) {
4785 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
4786 vdev->irq, rc);
4787 goto req_irq_failed;
4788 }
4789
4790 rc = vio_enable_interrupts(vdev);
4791 if (rc) {
4792 dev_err(dev, "Error %d enabling interrupts\n", rc);
4793 goto req_irq_failed;
4794 }
4795
4796 crq->cur = 0;
4797 spin_lock_init(&crq->lock);
4798
4799 return retrc;
4800
4801 req_irq_failed:
4802 tasklet_kill(&adapter->tasklet);
4803 do {
4804 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4805 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4806 reg_crq_failed:
4807 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
4808 map_failed:
4809 free_page((unsigned long)crq->msgs);
4810 crq->msgs = NULL;
4811 return retrc;
4812 }
4813
4814 static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4815 {
4816 struct device *dev = &adapter->vdev->dev;
4817 unsigned long timeout = msecs_to_jiffies(30000);
4818 u64 old_num_rx_queues, old_num_tx_queues;
4819 int rc;
4820
4821 adapter->from_passive_init = false;
4822
4823 old_num_rx_queues = adapter->req_rx_queues;
4824 old_num_tx_queues = adapter->req_tx_queues;
4825
4826 reinit_completion(&adapter->init_done);
4827 adapter->init_done_rc = 0;
4828 ibmvnic_send_crq_init(adapter);
4829 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4830 dev_err(dev, "Initialization sequence timed out\n");
4831 return -1;
4832 }
4833
4834 if (adapter->init_done_rc) {
4835 release_crq_queue(adapter);
4836 return adapter->init_done_rc;
4837 }
4838
4839 if (adapter->from_passive_init) {
4840 adapter->state = VNIC_OPEN;
4841 adapter->from_passive_init = false;
4842 return -1;
4843 }
4844
4845 if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
4846 adapter->reset_reason != VNIC_RESET_MOBILITY) {
4847 if (adapter->req_rx_queues != old_num_rx_queues ||
4848 adapter->req_tx_queues != old_num_tx_queues) {
4849 release_sub_crqs(adapter, 0);
4850 rc = init_sub_crqs(adapter);
4851 } else {
4852 rc = reset_sub_crq_queues(adapter);
4853 }
4854 } else {
4855 rc = init_sub_crqs(adapter);
4856 }
4857
4858 if (rc) {
4859 dev_err(dev, "Initialization of sub crqs failed\n");
4860 release_crq_queue(adapter);
4861 return rc;
4862 }
4863
4864 rc = init_sub_crq_irqs(adapter);
4865 if (rc) {
4866 dev_err(dev, "Failed to initialize sub crq irqs\n");
4867 release_crq_queue(adapter);
4868 }
4869
4870 return rc;
4871 }
4872
4873 static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4874 {
4875 struct device *dev = &adapter->vdev->dev;
4876 unsigned long timeout = msecs_to_jiffies(30000);
4877 int rc;
4878
4879 adapter->from_passive_init = false;
4880
4881 adapter->init_done_rc = 0;
4882 ibmvnic_send_crq_init(adapter);
4883 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
4884 dev_err(dev, "Initialization sequence timed out\n");
4885 return -1;
4886 }
4887
4888 if (adapter->init_done_rc) {
4889 release_crq_queue(adapter);
4890 return adapter->init_done_rc;
4891 }
4892
4893 if (adapter->from_passive_init) {
4894 adapter->state = VNIC_OPEN;
4895 adapter->from_passive_init = false;
4896 return -1;
4897 }
4898
4899 rc = init_sub_crqs(adapter);
4900 if (rc) {
4901 dev_err(dev, "Initialization of sub crqs failed\n");
4902 release_crq_queue(adapter);
4903 return rc;
4904 }
4905
4906 rc = init_sub_crq_irqs(adapter);
4907 if (rc) {
4908 dev_err(dev, "Failed to initialize sub crq irqs\n");
4909 release_crq_queue(adapter);
4910 }
4911
4912 return rc;
4913 }
4914
4915 static struct device_attribute dev_attr_failover;
4916
4917 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4918 {
4919 struct ibmvnic_adapter *adapter;
4920 struct net_device *netdev;
4921 unsigned char *mac_addr_p;
4922 int rc;
4923
4924 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
4925 dev->unit_address);
4926
4927 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
4928 VETH_MAC_ADDR, NULL);
4929 if (!mac_addr_p) {
4930 dev_err(&dev->dev,
4931 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
4932 __FILE__, __LINE__);
4933 return 0;
4934 }
4935
4936 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
4937 IBMVNIC_MAX_QUEUES);
4938 if (!netdev)
4939 return -ENOMEM;
4940
4941 adapter = netdev_priv(netdev);
4942 adapter->state = VNIC_PROBING;
4943 dev_set_drvdata(&dev->dev, netdev);
4944 adapter->vdev = dev;
4945 adapter->netdev = netdev;
4946
4947 ether_addr_copy(adapter->mac_addr, mac_addr_p);
4948 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
4949 netdev->irq = dev->irq;
4950 netdev->netdev_ops = &ibmvnic_netdev_ops;
4951 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
4952 SET_NETDEV_DEV(netdev, &dev->dev);
4953
4954 spin_lock_init(&adapter->stats_lock);
4955
4956 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4957 INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
4958 __ibmvnic_delayed_reset);
4959 INIT_LIST_HEAD(&adapter->rwi_list);
4960 spin_lock_init(&adapter->rwi_lock);
4961 init_completion(&adapter->init_done);
4962 init_completion(&adapter->fw_done);
4963 init_completion(&adapter->reset_done);
4964 init_completion(&adapter->stats_done);
4965 clear_bit(0, &adapter->resetting);
4966
4967 do {
4968 rc = init_crq_queue(adapter);
4969 if (rc) {
4970 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
4971 rc);
4972 goto ibmvnic_init_fail;
4973 }
4974
4975 rc = ibmvnic_init(adapter);
4976 if (rc && rc != EAGAIN)
4977 goto ibmvnic_init_fail;
4978 } while (rc == EAGAIN);
4979
4980 rc = init_stats_buffers(adapter);
4981 if (rc)
4982 goto ibmvnic_init_fail;
4983
4984 rc = init_stats_token(adapter);
4985 if (rc)
4986 goto ibmvnic_stats_fail;
4987
4988 netdev->mtu = adapter->req_mtu - ETH_HLEN;
4989 netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
4990 netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
4991
4992 rc = device_create_file(&dev->dev, &dev_attr_failover);
4993 if (rc)
4994 goto ibmvnic_dev_file_err;
4995
4996 netif_carrier_off(netdev);
4997 rc = register_netdev(netdev);
4998 if (rc) {
4999 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
5000 goto ibmvnic_register_fail;
5001 }
5002 dev_info(&dev->dev, "ibmvnic registered\n");
5003
5004 adapter->state = VNIC_PROBED;
5005
5006 adapter->wait_for_reset = false;
5007
5008 return 0;
5009
5010 ibmvnic_register_fail:
5011 device_remove_file(&dev->dev, &dev_attr_failover);
5012
5013 ibmvnic_dev_file_err:
5014 release_stats_token(adapter);
5015
5016 ibmvnic_stats_fail:
5017 release_stats_buffers(adapter);
5018
5019 ibmvnic_init_fail:
5020 release_sub_crqs(adapter, 1);
5021 release_crq_queue(adapter);
5022 free_netdev(netdev);
5023
5024 return rc;
5025 }
5026
5027 static int ibmvnic_remove(struct vio_dev *dev)
5028 {
5029 struct net_device *netdev = dev_get_drvdata(&dev->dev);
5030 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5031
5032 adapter->state = VNIC_REMOVING;
5033 rtnl_lock();
5034 unregister_netdevice(netdev);
5035
5036 release_resources(adapter);
5037 release_sub_crqs(adapter, 1);
5038 release_crq_queue(adapter);
5039
5040 release_stats_token(adapter);
5041 release_stats_buffers(adapter);
5042
5043 adapter->state = VNIC_REMOVED;
5044
5045 rtnl_unlock();
5046 device_remove_file(&dev->dev, &dev_attr_failover);
5047 free_netdev(netdev);
5048 dev_set_drvdata(&dev->dev, NULL);
5049
5050 return 0;
5051 }
5052
5053 static ssize_t failover_store(struct device *dev, struct device_attribute *attr,
5054 const char *buf, size_t count)
5055 {
5056 struct net_device *netdev = dev_get_drvdata(dev);
5057 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5058 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
5059 __be64 session_token;
5060 long rc;
5061
5062 if (!sysfs_streq(buf, "1"))
5063 return -EINVAL;
5064
5065 rc = plpar_hcall(H_VIOCTL, retbuf, adapter->vdev->unit_address,
5066 H_GET_SESSION_TOKEN, 0, 0, 0);
5067 if (rc) {
5068 netdev_err(netdev, "Couldn't retrieve session token, rc %ld\n",
5069 rc);
5070 return -EINVAL;
5071 }
5072
5073 session_token = (__be64)retbuf[0];
5074 netdev_dbg(netdev, "Initiating client failover, session id %llx\n",
5075 be64_to_cpu(session_token));
5076 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
5077 H_SESSION_ERR_DETECTED, session_token, 0, 0);
5078 if (rc) {
5079 netdev_err(netdev, "Client initiated failover failed, rc %ld\n",
5080 rc);
5081 return -EINVAL;
5082 }
5083
5084 return count;
5085 }
5086
5087 static DEVICE_ATTR_WO(failover);
5088
5089 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
5090 {
5091 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
5092 struct ibmvnic_adapter *adapter;
5093 struct iommu_table *tbl;
5094 unsigned long ret = 0;
5095 int i;
5096
5097 tbl = get_iommu_table_base(&vdev->dev);
5098
5099
5100 if (!netdev)
5101 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
5102
5103 adapter = netdev_priv(netdev);
5104
5105 ret += PAGE_SIZE;
5106 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
5107
5108 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
5109 ret += 4 * PAGE_SIZE;
5110
5111 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
5112 i++)
5113 ret += adapter->rx_pool[i].size *
5114 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
5115
5116 return ret;
5117 }
5118
5119 static int ibmvnic_resume(struct device *dev)
5120 {
5121 struct net_device *netdev = dev_get_drvdata(dev);
5122 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
5123
5124 if (adapter->state != VNIC_OPEN)
5125 return 0;
5126
5127 tasklet_schedule(&adapter->tasklet);
5128
5129 return 0;
5130 }
5131
5132 static const struct vio_device_id ibmvnic_device_table[] = {
5133 {"network", "IBM,vnic"},
5134 {"", "" }
5135 };
5136 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
5137
5138 static const struct dev_pm_ops ibmvnic_pm_ops = {
5139 .resume = ibmvnic_resume
5140 };
5141
5142 static struct vio_driver ibmvnic_driver = {
5143 .id_table = ibmvnic_device_table,
5144 .probe = ibmvnic_probe,
5145 .remove = ibmvnic_remove,
5146 .get_desired_dma = ibmvnic_get_desired_dma,
5147 .name = ibmvnic_driver_name,
5148 .pm = &ibmvnic_pm_ops,
5149 };
5150
5151
5152 static int __init ibmvnic_module_init(void)
5153 {
5154 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
5155 IBMVNIC_DRIVER_VERSION);
5156
5157 return vio_register_driver(&ibmvnic_driver);
5158 }
5159
5160 static void __exit ibmvnic_module_exit(void)
5161 {
5162 vio_unregister_driver(&ibmvnic_driver);
5163 }
5164
5165 module_init(ibmvnic_module_init);
5166 module_exit(ibmvnic_module_exit);