This source file includes following definitions.
- vxge_hw_vpath_intr_enable
- vxge_hw_vpath_intr_disable
- vxge_hw_vpath_tti_ci_set
- vxge_hw_vpath_dynamic_rti_ci_set
- vxge_hw_vpath_dynamic_tti_rtimer_set
- vxge_hw_vpath_dynamic_rti_rtimer_set
- vxge_hw_channel_msix_mask
- vxge_hw_channel_msix_unmask
- vxge_hw_channel_msix_clear
- vxge_hw_device_set_intr_type
- vxge_hw_device_intr_enable
- vxge_hw_device_intr_disable
- vxge_hw_device_mask_all
- vxge_hw_device_unmask_all
- vxge_hw_device_flush_io
- __vxge_hw_device_handle_error
- __vxge_hw_device_handle_link_down_ind
- __vxge_hw_device_handle_link_up_ind
- __vxge_hw_vpath_alarm_process
- vxge_hw_device_begin_irq
- vxge_hw_device_clear_tx_rx
- vxge_hw_channel_dtr_alloc
- vxge_hw_channel_dtr_post
- vxge_hw_channel_dtr_try_complete
- vxge_hw_channel_dtr_complete
- vxge_hw_channel_dtr_free
- vxge_hw_channel_dtr_count
- vxge_hw_ring_rxd_reserve
- vxge_hw_ring_rxd_free
- vxge_hw_ring_rxd_pre_post
- vxge_hw_ring_rxd_post_post
- vxge_hw_ring_rxd_post
- vxge_hw_ring_rxd_post_post_wmb
- vxge_hw_ring_rxd_next_completed
- vxge_hw_ring_handle_tcode
- __vxge_hw_non_offload_db_post
- vxge_hw_fifo_free_txdl_count_get
- vxge_hw_fifo_txdl_reserve
- vxge_hw_fifo_txdl_buffer_set
- vxge_hw_fifo_txdl_post
- vxge_hw_fifo_txdl_next_completed
- vxge_hw_fifo_handle_tcode
- vxge_hw_fifo_txdl_free
- vxge_hw_vpath_mac_addr_add
- vxge_hw_vpath_mac_addr_get
- vxge_hw_vpath_mac_addr_get_next
- vxge_hw_vpath_mac_addr_delete
- vxge_hw_vpath_vid_add
- vxge_hw_vpath_vid_delete
- vxge_hw_vpath_promisc_enable
- vxge_hw_vpath_promisc_disable
- vxge_hw_vpath_bcast_enable
- vxge_hw_vpath_mcast_enable
- vxge_hw_vpath_mcast_disable
- vxge_hw_vpath_alarm_process
- vxge_hw_vpath_msix_set
- vxge_hw_vpath_msix_mask
- vxge_hw_vpath_msix_clear
- vxge_hw_vpath_msix_unmask
- vxge_hw_vpath_inta_mask_tx_rx
- vxge_hw_vpath_inta_unmask_tx_rx
- vxge_hw_vpath_poll_rx
- vxge_hw_vpath_poll_tx
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/etherdevice.h>
15 #include <linux/io-64-nonatomic-lo-hi.h>
16 #include <linux/prefetch.h>
17
18 #include "vxge-traffic.h"
19 #include "vxge-config.h"
20 #include "vxge-main.h"
21
22
23
24
25
26
27
28
29
30
31 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
32 {
33 u64 val64;
34
35 struct __vxge_hw_virtualpath *vpath;
36 struct vxge_hw_vpath_reg __iomem *vp_reg;
37 enum vxge_hw_status status = VXGE_HW_OK;
38 if (vp == NULL) {
39 status = VXGE_HW_ERR_INVALID_HANDLE;
40 goto exit;
41 }
42
43 vpath = vp->vpath;
44
45 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
46 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
47 goto exit;
48 }
49
50 vp_reg = vpath->vp_reg;
51
52 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
53
54 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
55 &vp_reg->general_errors_reg);
56
57 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
58 &vp_reg->pci_config_errors_reg);
59
60 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
61 &vp_reg->mrpcim_to_vpath_alarm_reg);
62
63 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
64 &vp_reg->srpcim_to_vpath_alarm_reg);
65
66 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
67 &vp_reg->vpath_ppif_int_status);
68
69 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
70 &vp_reg->srpcim_msg_to_vpath_reg);
71
72 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
73 &vp_reg->vpath_pcipif_int_status);
74
75 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
76 &vp_reg->prc_alarm_reg);
77
78 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
79 &vp_reg->wrdma_alarm_status);
80
81 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
82 &vp_reg->asic_ntwk_vp_err_reg);
83
84 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
85 &vp_reg->xgmac_vp_int_status);
86
87 val64 = readq(&vp_reg->vpath_general_int_status);
88
89
90
91 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
92 &vp_reg->vpath_pcipif_int_mask);
93
94 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
95 &vp_reg->srpcim_msg_to_vpath_mask);
96
97 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
98 &vp_reg->srpcim_to_vpath_alarm_mask);
99
100 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
101 &vp_reg->mrpcim_to_vpath_alarm_mask);
102
103 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
104 &vp_reg->pci_config_errors_mask);
105
106
107
108 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
109 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
111 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
112 &vp_reg->general_errors_mask);
113
114 __vxge_hw_pio_mem_write32_upper(
115 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
120 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
121 &vp_reg->kdfcctl_errors_mask);
122
123 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
124
125 __vxge_hw_pio_mem_write32_upper(
126 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
127 &vp_reg->prc_alarm_mask);
128
129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
130 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
131
132 if (vpath->hldev->first_vp_id != vpath->vp_id)
133 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
134 &vp_reg->asic_ntwk_vp_err_mask);
135 else
136 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
138 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
139 &vp_reg->asic_ntwk_vp_err_mask);
140
141 __vxge_hw_pio_mem_write32_upper(0,
142 &vp_reg->vpath_general_int_mask);
143 exit:
144 return status;
145
146 }
147
148
149
150
151
152
153
154
155
156
157 enum vxge_hw_status vxge_hw_vpath_intr_disable(
158 struct __vxge_hw_vpath_handle *vp)
159 {
160 u64 val64;
161
162 struct __vxge_hw_virtualpath *vpath;
163 enum vxge_hw_status status = VXGE_HW_OK;
164 struct vxge_hw_vpath_reg __iomem *vp_reg;
165 if (vp == NULL) {
166 status = VXGE_HW_ERR_INVALID_HANDLE;
167 goto exit;
168 }
169
170 vpath = vp->vpath;
171
172 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
173 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
174 goto exit;
175 }
176 vp_reg = vpath->vp_reg;
177
178 __vxge_hw_pio_mem_write32_upper(
179 (u32)VXGE_HW_INTR_MASK_ALL,
180 &vp_reg->vpath_general_int_mask);
181
182 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
183
184 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
185
186 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
187 &vp_reg->general_errors_mask);
188
189 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
190 &vp_reg->pci_config_errors_mask);
191
192 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
193 &vp_reg->mrpcim_to_vpath_alarm_mask);
194
195 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
196 &vp_reg->srpcim_to_vpath_alarm_mask);
197
198 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
199 &vp_reg->vpath_ppif_int_mask);
200
201 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
202 &vp_reg->srpcim_msg_to_vpath_mask);
203
204 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
205 &vp_reg->vpath_pcipif_int_mask);
206
207 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
208 &vp_reg->wrdma_alarm_mask);
209
210 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
211 &vp_reg->prc_alarm_mask);
212
213 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
214 &vp_reg->xgmac_vp_int_mask);
215
216 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
217 &vp_reg->asic_ntwk_vp_err_mask);
218
219 exit:
220 return status;
221 }
222
223 void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo)
224 {
225 struct vxge_hw_vpath_reg __iomem *vp_reg;
226 struct vxge_hw_vp_config *config;
227 u64 val64;
228
229 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE)
230 return;
231
232 vp_reg = fifo->vp_reg;
233 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo);
234
235 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
236 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
237 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
238 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
239 fifo->tim_tti_cfg1_saved = val64;
240 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
241 }
242 }
243
244 void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring)
245 {
246 u64 val64 = ring->tim_rti_cfg1_saved;
247
248 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
249 ring->tim_rti_cfg1_saved = val64;
250 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
251 }
252
253 void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo)
254 {
255 u64 val64 = fifo->tim_tti_cfg3_saved;
256 u64 timer = (fifo->rtimer * 1000) / 272;
257
258 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
259 if (timer)
260 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
261 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5);
262
263 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
264
265
266
267 }
268
269 void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
270 {
271 u64 val64 = ring->tim_rti_cfg3_saved;
272 u64 timer = (ring->rtimer * 1000) / 272;
273
274 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff);
275 if (timer)
276 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) |
277 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4);
278
279 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
280
281
282
283 }
284
285
286
287
288
289
290
291
292
293
294 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
295 {
296
297 __vxge_hw_pio_mem_write32_upper(
298 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
299 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
300 }
301
302
303
304
305
306
307
308
309
310
311 void
312 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
313 {
314
315 __vxge_hw_pio_mem_write32_upper(
316 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
317 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
318 }
319
320
321
322
323
324
325
326
327
328
329
330 void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id)
331 {
332 __vxge_hw_pio_mem_write32_upper(
333 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
334 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
335 }
336
337
338
339
340
341
342
343 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
344 {
345
346 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
347 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
348 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
349 (intr_mode != VXGE_HW_INTR_MODE_DEF))
350 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
351
352 hldev->config.intr_mode = intr_mode;
353 return intr_mode;
354 }
355
356
357
358
359
360
361
362
363
364
365
366
367 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
368 {
369 u32 i;
370 u64 val64;
371 u32 val32;
372
373 vxge_hw_device_mask_all(hldev);
374
375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
376
377 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
378 continue;
379
380 vxge_hw_vpath_intr_enable(
381 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
382 }
383
384 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
385 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
386 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
387
388 if (val64 != 0) {
389 writeq(val64, &hldev->common_reg->tim_int_status0);
390
391 writeq(~val64, &hldev->common_reg->tim_int_mask0);
392 }
393
394 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
395 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
396
397 if (val32 != 0) {
398 __vxge_hw_pio_mem_write32_upper(val32,
399 &hldev->common_reg->tim_int_status1);
400
401 __vxge_hw_pio_mem_write32_upper(~val32,
402 &hldev->common_reg->tim_int_mask1);
403 }
404 }
405
406 val64 = readq(&hldev->common_reg->titan_general_int_status);
407
408 vxge_hw_device_unmask_all(hldev);
409 }
410
411
412
413
414
415
416
417
418
419
420
421 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
422 {
423 u32 i;
424
425 vxge_hw_device_mask_all(hldev);
426
427
428 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
429 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
430 &hldev->common_reg->tim_int_mask1);
431
432 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
433
434 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
435 continue;
436
437 vxge_hw_vpath_intr_disable(
438 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
439 }
440 }
441
442
443
444
445
446
447
448
449
450 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
451 {
452 u64 val64;
453
454 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
455 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
456
457 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
458 &hldev->common_reg->titan_mask_all_int);
459 }
460
461
462
463
464
465
466
467
468
469 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
470 {
471 u64 val64 = 0;
472
473 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
474 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
475
476 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
477 &hldev->common_reg->titan_mask_all_int);
478 }
479
480
481
482
483
484
485
486
487
488 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
489 {
490 u32 val32;
491
492 val32 = readl(&hldev->common_reg->titan_general_int_status);
493 }
494
495
496
497
498
499
500
501
502
503 static enum vxge_hw_status
504 __vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
505 enum vxge_hw_event type)
506 {
507 switch (type) {
508 case VXGE_HW_EVENT_UNKNOWN:
509 break;
510 case VXGE_HW_EVENT_RESET_START:
511 case VXGE_HW_EVENT_RESET_COMPLETE:
512 case VXGE_HW_EVENT_LINK_DOWN:
513 case VXGE_HW_EVENT_LINK_UP:
514 goto out;
515 case VXGE_HW_EVENT_ALARM_CLEARED:
516 goto out;
517 case VXGE_HW_EVENT_ECCERR:
518 case VXGE_HW_EVENT_MRPCIM_ECCERR:
519 goto out;
520 case VXGE_HW_EVENT_FIFO_ERR:
521 case VXGE_HW_EVENT_VPATH_ERR:
522 case VXGE_HW_EVENT_CRITICAL_ERR:
523 case VXGE_HW_EVENT_SERR:
524 break;
525 case VXGE_HW_EVENT_SRPCIM_SERR:
526 case VXGE_HW_EVENT_MRPCIM_SERR:
527 goto out;
528 case VXGE_HW_EVENT_SLOT_FREEZE:
529 break;
530 default:
531 vxge_assert(0);
532 goto out;
533 }
534
535
536 if (hldev->uld_callbacks->crit_err)
537 hldev->uld_callbacks->crit_err(hldev,
538 type, vp_id);
539 out:
540
541 return VXGE_HW_OK;
542 }
543
544
545
546
547
548
549
550
551 static enum vxge_hw_status
552 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
553 {
554
555
556
557 if (hldev->link_state == VXGE_HW_LINK_DOWN)
558 goto exit;
559
560 hldev->link_state = VXGE_HW_LINK_DOWN;
561
562
563 if (hldev->uld_callbacks->link_down)
564 hldev->uld_callbacks->link_down(hldev);
565 exit:
566 return VXGE_HW_OK;
567 }
568
569
570
571
572
573
574
575
576 static enum vxge_hw_status
577 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
578 {
579
580
581
582 if (hldev->link_state == VXGE_HW_LINK_UP)
583 goto exit;
584
585 hldev->link_state = VXGE_HW_LINK_UP;
586
587
588 if (hldev->uld_callbacks->link_up)
589 hldev->uld_callbacks->link_up(hldev);
590 exit:
591 return VXGE_HW_OK;
592 }
593
594
595
596
597
598
599
600
601
602 static enum vxge_hw_status
603 __vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
604 u32 skip_alarms)
605 {
606 u64 val64;
607 u64 alarm_status;
608 u64 pic_status;
609 struct __vxge_hw_device *hldev = NULL;
610 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
611 u64 mask64;
612 struct vxge_hw_vpath_stats_sw_info *sw_stats;
613 struct vxge_hw_vpath_reg __iomem *vp_reg;
614
615 if (vpath == NULL) {
616 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
617 alarm_event);
618 goto out2;
619 }
620
621 hldev = vpath->hldev;
622 vp_reg = vpath->vp_reg;
623 alarm_status = readq(&vp_reg->vpath_general_int_status);
624
625 if (alarm_status == VXGE_HW_ALL_FOXES) {
626 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
627 alarm_event);
628 goto out;
629 }
630
631 sw_stats = vpath->sw_stats;
632
633 if (alarm_status & ~(
634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
635 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
636 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
637 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
638 sw_stats->error_stats.unknown_alarms++;
639
640 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
641 alarm_event);
642 goto out;
643 }
644
645 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
646
647 val64 = readq(&vp_reg->xgmac_vp_int_status);
648
649 if (val64 &
650 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
651
652 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
653
654 if (((val64 &
655 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
656 (!(val64 &
657 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
658 ((val64 &
659 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
660 (!(val64 &
661 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
662 ))) {
663 sw_stats->error_stats.network_sustained_fault++;
664
665 writeq(
666 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
667 &vp_reg->asic_ntwk_vp_err_mask);
668
669 __vxge_hw_device_handle_link_down_ind(hldev);
670 alarm_event = VXGE_HW_SET_LEVEL(
671 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
672 }
673
674 if (((val64 &
675 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
676 (!(val64 &
677 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
678 ((val64 &
679 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
680 (!(val64 &
681 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
682 ))) {
683
684 sw_stats->error_stats.network_sustained_ok++;
685
686 writeq(
687 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
688 &vp_reg->asic_ntwk_vp_err_mask);
689
690 __vxge_hw_device_handle_link_up_ind(hldev);
691 alarm_event = VXGE_HW_SET_LEVEL(
692 VXGE_HW_EVENT_LINK_UP, alarm_event);
693 }
694
695 writeq(VXGE_HW_INTR_MASK_ALL,
696 &vp_reg->asic_ntwk_vp_err_reg);
697
698 alarm_event = VXGE_HW_SET_LEVEL(
699 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
700
701 if (skip_alarms)
702 return VXGE_HW_OK;
703 }
704 }
705
706 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
707
708 pic_status = readq(&vp_reg->vpath_ppif_int_status);
709
710 if (pic_status &
711 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
712
713 val64 = readq(&vp_reg->general_errors_reg);
714 mask64 = readq(&vp_reg->general_errors_mask);
715
716 if ((val64 &
717 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
718 ~mask64) {
719 sw_stats->error_stats.ini_serr_det++;
720
721 alarm_event = VXGE_HW_SET_LEVEL(
722 VXGE_HW_EVENT_SERR, alarm_event);
723 }
724
725 if ((val64 &
726 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
727 ~mask64) {
728 sw_stats->error_stats.dblgen_fifo0_overflow++;
729
730 alarm_event = VXGE_HW_SET_LEVEL(
731 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
732 }
733
734 if ((val64 &
735 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
736 ~mask64)
737 sw_stats->error_stats.statsb_pif_chain_error++;
738
739 if ((val64 &
740 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
741 ~mask64)
742 sw_stats->error_stats.statsb_drop_timeout++;
743
744 if ((val64 &
745 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
746 ~mask64)
747 sw_stats->error_stats.target_illegal_access++;
748
749 if (!skip_alarms) {
750 writeq(VXGE_HW_INTR_MASK_ALL,
751 &vp_reg->general_errors_reg);
752 alarm_event = VXGE_HW_SET_LEVEL(
753 VXGE_HW_EVENT_ALARM_CLEARED,
754 alarm_event);
755 }
756 }
757
758 if (pic_status &
759 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
760
761 val64 = readq(&vp_reg->kdfcctl_errors_reg);
762 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
763
764 if ((val64 &
765 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
766 ~mask64) {
767 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
768
769 alarm_event = VXGE_HW_SET_LEVEL(
770 VXGE_HW_EVENT_FIFO_ERR,
771 alarm_event);
772 }
773
774 if ((val64 &
775 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
776 ~mask64) {
777 sw_stats->error_stats.kdfcctl_fifo0_poison++;
778
779 alarm_event = VXGE_HW_SET_LEVEL(
780 VXGE_HW_EVENT_FIFO_ERR,
781 alarm_event);
782 }
783
784 if ((val64 &
785 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
786 ~mask64) {
787 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
788
789 alarm_event = VXGE_HW_SET_LEVEL(
790 VXGE_HW_EVENT_FIFO_ERR,
791 alarm_event);
792 }
793
794 if (!skip_alarms) {
795 writeq(VXGE_HW_INTR_MASK_ALL,
796 &vp_reg->kdfcctl_errors_reg);
797 alarm_event = VXGE_HW_SET_LEVEL(
798 VXGE_HW_EVENT_ALARM_CLEARED,
799 alarm_event);
800 }
801 }
802
803 }
804
805 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
806
807 val64 = readq(&vp_reg->wrdma_alarm_status);
808
809 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
810
811 val64 = readq(&vp_reg->prc_alarm_reg);
812 mask64 = readq(&vp_reg->prc_alarm_mask);
813
814 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
815 ~mask64)
816 sw_stats->error_stats.prc_ring_bumps++;
817
818 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
819 ~mask64) {
820 sw_stats->error_stats.prc_rxdcm_sc_err++;
821
822 alarm_event = VXGE_HW_SET_LEVEL(
823 VXGE_HW_EVENT_VPATH_ERR,
824 alarm_event);
825 }
826
827 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
828 & ~mask64) {
829 sw_stats->error_stats.prc_rxdcm_sc_abort++;
830
831 alarm_event = VXGE_HW_SET_LEVEL(
832 VXGE_HW_EVENT_VPATH_ERR,
833 alarm_event);
834 }
835
836 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
837 & ~mask64) {
838 sw_stats->error_stats.prc_quanta_size_err++;
839
840 alarm_event = VXGE_HW_SET_LEVEL(
841 VXGE_HW_EVENT_VPATH_ERR,
842 alarm_event);
843 }
844
845 if (!skip_alarms) {
846 writeq(VXGE_HW_INTR_MASK_ALL,
847 &vp_reg->prc_alarm_reg);
848 alarm_event = VXGE_HW_SET_LEVEL(
849 VXGE_HW_EVENT_ALARM_CLEARED,
850 alarm_event);
851 }
852 }
853 }
854 out:
855 hldev->stats.sw_dev_err_stats.vpath_alarms++;
856 out2:
857 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
858 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
859 return VXGE_HW_OK;
860
861 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
862
863 if (alarm_event == VXGE_HW_EVENT_SERR)
864 return VXGE_HW_ERR_CRITICAL;
865
866 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
867 VXGE_HW_ERR_SLOT_FREEZE :
868 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
869 VXGE_HW_ERR_VPATH;
870 }
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
892 u32 skip_alarms, u64 *reason)
893 {
894 u32 i;
895 u64 val64;
896 u64 adapter_status;
897 u64 vpath_mask;
898 enum vxge_hw_status ret = VXGE_HW_OK;
899
900 val64 = readq(&hldev->common_reg->titan_general_int_status);
901
902 if (unlikely(!val64)) {
903
904 *reason = 0;
905 ret = VXGE_HW_ERR_WRONG_IRQ;
906 goto exit;
907 }
908
909 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
910
911 adapter_status = readq(&hldev->common_reg->adapter_status);
912
913 if (adapter_status == VXGE_HW_ALL_FOXES) {
914
915 __vxge_hw_device_handle_error(hldev,
916 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
917 *reason = 0;
918 ret = VXGE_HW_ERR_SLOT_FREEZE;
919 goto exit;
920 }
921 }
922
923 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
924
925 *reason = val64;
926
927 vpath_mask = hldev->vpaths_deployed >>
928 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
929
930 if (val64 &
931 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
932 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
933
934 return VXGE_HW_OK;
935 }
936
937 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
938
939 if (unlikely(val64 &
940 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
941
942 enum vxge_hw_status error_level = VXGE_HW_OK;
943
944 hldev->stats.sw_dev_err_stats.vpath_alarms++;
945
946 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
947
948 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
949 continue;
950
951 ret = __vxge_hw_vpath_alarm_process(
952 &hldev->virtual_paths[i], skip_alarms);
953
954 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
955
956 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
957 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
958 break;
959 }
960
961 ret = error_level;
962 }
963 exit:
964 return ret;
965 }
966
967
968
969
970
971
972
973
974
975
976
977 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
978 {
979
980 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
981 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
982 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
983 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
984 &hldev->common_reg->tim_int_status0);
985 }
986
987 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
988 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
989 __vxge_hw_pio_mem_write32_upper(
990 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
991 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
992 &hldev->common_reg->tim_int_status1);
993 }
994 }
995
996
997
998
999
1000
1001
1002
1003
1004
1005 static enum vxge_hw_status
1006 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
1007 {
1008 if (channel->reserve_ptr - channel->reserve_top > 0) {
1009 _alloc_after_swap:
1010 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
1011
1012 return VXGE_HW_OK;
1013 }
1014
1015
1016
1017
1018
1019
1020
1021 if (channel->length - channel->free_ptr > 0) {
1022 swap(channel->reserve_arr, channel->free_arr);
1023 channel->reserve_ptr = channel->length;
1024 channel->reserve_top = channel->free_ptr;
1025 channel->free_ptr = channel->length;
1026
1027 channel->stats->reserve_free_swaps_cnt++;
1028
1029 goto _alloc_after_swap;
1030 }
1031
1032 channel->stats->full_cnt++;
1033
1034 *dtrh = NULL;
1035 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
1036 }
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 static void
1047 vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
1048 {
1049 vxge_assert(channel->work_arr[channel->post_index] == NULL);
1050
1051 channel->work_arr[channel->post_index++] = dtrh;
1052
1053
1054 if (channel->post_index == channel->length)
1055 channel->post_index = 0;
1056 }
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 void
1067 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
1068 {
1069 vxge_assert(channel->compl_index < channel->length);
1070
1071 *dtrh = channel->work_arr[channel->compl_index];
1072 prefetch(*dtrh);
1073 }
1074
1075
1076
1077
1078
1079
1080
1081
1082 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
1083 {
1084 channel->work_arr[channel->compl_index] = NULL;
1085
1086
1087 if (++channel->compl_index == channel->length)
1088 channel->compl_index = 0;
1089
1090 channel->stats->total_compl_cnt++;
1091 }
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
1102 {
1103 channel->free_arr[--channel->free_ptr] = dtrh;
1104 }
1105
1106
1107
1108
1109
1110
1111
1112
1113 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
1114 {
1115 return (channel->reserve_ptr - channel->reserve_top) +
1116 (channel->length - channel->free_ptr);
1117 }
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
1134 void **rxdh)
1135 {
1136 enum vxge_hw_status status;
1137 struct __vxge_hw_channel *channel;
1138
1139 channel = &ring->channel;
1140
1141 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
1142
1143 if (status == VXGE_HW_OK) {
1144 struct vxge_hw_ring_rxd_1 *rxdp =
1145 (struct vxge_hw_ring_rxd_1 *)*rxdh;
1146
1147 rxdp->control_0 = rxdp->control_1 = 0;
1148 }
1149
1150 return status;
1151 }
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
1178 {
1179 struct __vxge_hw_channel *channel;
1180
1181 channel = &ring->channel;
1182
1183 vxge_hw_channel_dtr_free(channel, rxdh);
1184
1185 }
1186
1187
1188
1189
1190
1191
1192
1193
1194 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
1195 {
1196 struct __vxge_hw_channel *channel;
1197
1198 channel = &ring->channel;
1199
1200 vxge_hw_channel_dtr_post(channel, rxdh);
1201 }
1202
1203
1204
1205
1206
1207
1208
1209
1210 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
1211 {
1212 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1213
1214 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1215
1216 if (ring->stats->common_stats.usage_cnt > 0)
1217 ring->stats->common_stats.usage_cnt--;
1218 }
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
1231 {
1232 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
1233 struct __vxge_hw_channel *channel;
1234
1235 channel = &ring->channel;
1236
1237 wmb();
1238 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1239
1240 vxge_hw_channel_dtr_post(channel, rxdh);
1241
1242 if (ring->stats->common_stats.usage_cnt > 0)
1243 ring->stats->common_stats.usage_cnt--;
1244 }
1245
1246
1247
1248
1249
1250
1251
1252
1253 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
1254 {
1255 wmb();
1256 vxge_hw_ring_rxd_post_post(ring, rxdh);
1257 }
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
1293 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
1294 {
1295 struct __vxge_hw_channel *channel;
1296 struct vxge_hw_ring_rxd_1 *rxdp;
1297 enum vxge_hw_status status = VXGE_HW_OK;
1298 u64 control_0, own;
1299
1300 channel = &ring->channel;
1301
1302 vxge_hw_channel_dtr_try_complete(channel, rxdh);
1303
1304 rxdp = *rxdh;
1305 if (rxdp == NULL) {
1306 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1307 goto exit;
1308 }
1309
1310 control_0 = rxdp->control_0;
1311 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
1312 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
1313
1314
1315 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
1316
1317 vxge_assert((rxdp)->host_control !=
1318 0);
1319
1320 ++ring->cmpl_cnt;
1321 vxge_hw_channel_dtr_complete(channel);
1322
1323 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
1324
1325 ring->stats->common_stats.usage_cnt++;
1326 if (ring->stats->common_stats.usage_max <
1327 ring->stats->common_stats.usage_cnt)
1328 ring->stats->common_stats.usage_max =
1329 ring->stats->common_stats.usage_cnt;
1330
1331 status = VXGE_HW_OK;
1332 goto exit;
1333 }
1334
1335
1336
1337 *rxdh = NULL;
1338 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1339 exit:
1340 return status;
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1358 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1359 {
1360 enum vxge_hw_status status = VXGE_HW_OK;
1361
1362
1363
1364
1365
1366
1367 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1368 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1369 status = VXGE_HW_OK;
1370 goto exit;
1371 }
1372
1373 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1374 status = VXGE_HW_ERR_INVALID_TCODE;
1375 goto exit;
1376 }
1377
1378 ring->stats->rxd_t_code_err_cnt[t_code]++;
1379 exit:
1380 return status;
1381 }
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1395 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1396 {
1397 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1398 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1399 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1400 &fifo->nofl_db->control_0);
1401
1402 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1403 }
1404
1405
1406
1407
1408
1409
1410 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1411 {
1412 return vxge_hw_channel_dtr_count(&fifoh->channel);
1413 }
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1436 struct __vxge_hw_fifo *fifo,
1437 void **txdlh, void **txdl_priv)
1438 {
1439 struct __vxge_hw_channel *channel;
1440 enum vxge_hw_status status;
1441 int i;
1442
1443 channel = &fifo->channel;
1444
1445 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1446
1447 if (status == VXGE_HW_OK) {
1448 struct vxge_hw_fifo_txd *txdp =
1449 (struct vxge_hw_fifo_txd *)*txdlh;
1450 struct __vxge_hw_fifo_txdl_priv *priv;
1451
1452 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1453
1454
1455 priv->align_dma_offset = 0;
1456 priv->align_vaddr_start = priv->align_vaddr;
1457 priv->align_used_frags = 0;
1458 priv->frags = 0;
1459 priv->alloc_frags = fifo->config->max_frags;
1460 priv->next_txdl_priv = NULL;
1461
1462 *txdl_priv = (void *)(size_t)txdp->host_control;
1463
1464 for (i = 0; i < fifo->config->max_frags; i++) {
1465 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1466 txdp->control_0 = txdp->control_1 = 0;
1467 }
1468 }
1469
1470 return status;
1471 }
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1491 void *txdlh, u32 frag_idx,
1492 dma_addr_t dma_pointer, u32 size)
1493 {
1494 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1495 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1496
1497 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1498 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1499
1500 if (frag_idx != 0)
1501 txdp->control_0 = txdp->control_1 = 0;
1502 else {
1503 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1504 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1505 txdp->control_1 |= fifo->interrupt_type;
1506 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1507 fifo->tx_intr_num);
1508 if (txdl_priv->frags) {
1509 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1510 (txdl_priv->frags - 1);
1511 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1512 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1513 }
1514 }
1515
1516 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1517
1518 txdp->buffer_pointer = (u64)dma_pointer;
1519 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1520 fifo->stats->total_buffers++;
1521 txdl_priv->frags++;
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1537 {
1538 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1539 struct vxge_hw_fifo_txd *txdp_last;
1540 struct vxge_hw_fifo_txd *txdp_first;
1541
1542 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1543 txdp_first = txdlh;
1544
1545 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1546 txdp_last->control_0 |=
1547 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1548 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1549
1550 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1551
1552 __vxge_hw_non_offload_db_post(fifo,
1553 (u64)txdl_priv->dma_addr,
1554 txdl_priv->frags - 1,
1555 fifo->no_snoop_bits);
1556
1557 fifo->stats->total_posts++;
1558 fifo->stats->common_stats.usage_cnt++;
1559 if (fifo->stats->common_stats.usage_max <
1560 fifo->stats->common_stats.usage_cnt)
1561 fifo->stats->common_stats.usage_max =
1562 fifo->stats->common_stats.usage_cnt;
1563 }
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1598 struct __vxge_hw_fifo *fifo, void **txdlh,
1599 enum vxge_hw_fifo_tcode *t_code)
1600 {
1601 struct __vxge_hw_channel *channel;
1602 struct vxge_hw_fifo_txd *txdp;
1603 enum vxge_hw_status status = VXGE_HW_OK;
1604
1605 channel = &fifo->channel;
1606
1607 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1608
1609 txdp = *txdlh;
1610 if (txdp == NULL) {
1611 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1612 goto exit;
1613 }
1614
1615
1616 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1617
1618 vxge_assert(txdp->host_control != 0);
1619
1620 vxge_hw_channel_dtr_complete(channel);
1621
1622 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1623
1624 if (fifo->stats->common_stats.usage_cnt > 0)
1625 fifo->stats->common_stats.usage_cnt--;
1626
1627 status = VXGE_HW_OK;
1628 goto exit;
1629 }
1630
1631
1632 *txdlh = NULL;
1633 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1634 exit:
1635 return status;
1636 }
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1653 void *txdlh,
1654 enum vxge_hw_fifo_tcode t_code)
1655 {
1656 enum vxge_hw_status status = VXGE_HW_OK;
1657
1658 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1659 status = VXGE_HW_ERR_INVALID_TCODE;
1660 goto exit;
1661 }
1662
1663 fifo->stats->txd_t_code_err_cnt[t_code]++;
1664 exit:
1665 return status;
1666 }
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1693 {
1694 struct __vxge_hw_channel *channel;
1695
1696 channel = &fifo->channel;
1697
1698 vxge_hw_channel_dtr_free(channel, txdlh);
1699 }
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716 enum vxge_hw_status
1717 vxge_hw_vpath_mac_addr_add(
1718 struct __vxge_hw_vpath_handle *vp,
1719 u8 (macaddr)[ETH_ALEN],
1720 u8 (macaddr_mask)[ETH_ALEN],
1721 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1722 {
1723 u32 i;
1724 u64 data1 = 0ULL;
1725 u64 data2 = 0ULL;
1726 enum vxge_hw_status status = VXGE_HW_OK;
1727
1728 if (vp == NULL) {
1729 status = VXGE_HW_ERR_INVALID_HANDLE;
1730 goto exit;
1731 }
1732
1733 for (i = 0; i < ETH_ALEN; i++) {
1734 data1 <<= 8;
1735 data1 |= (u8)macaddr[i];
1736
1737 data2 <<= 8;
1738 data2 |= (u8)macaddr_mask[i];
1739 }
1740
1741 switch (duplicate_mode) {
1742 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1743 i = 0;
1744 break;
1745 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1746 i = 1;
1747 break;
1748 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1749 i = 2;
1750 break;
1751 default:
1752 i = 0;
1753 break;
1754 }
1755
1756 status = __vxge_hw_vpath_rts_table_set(vp,
1757 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1758 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1759 0,
1760 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1761 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1762 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1763 exit:
1764 return status;
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779 enum vxge_hw_status
1780 vxge_hw_vpath_mac_addr_get(
1781 struct __vxge_hw_vpath_handle *vp,
1782 u8 (macaddr)[ETH_ALEN],
1783 u8 (macaddr_mask)[ETH_ALEN])
1784 {
1785 u32 i;
1786 u64 data1 = 0ULL;
1787 u64 data2 = 0ULL;
1788 enum vxge_hw_status status = VXGE_HW_OK;
1789
1790 if (vp == NULL) {
1791 status = VXGE_HW_ERR_INVALID_HANDLE;
1792 goto exit;
1793 }
1794
1795 status = __vxge_hw_vpath_rts_table_get(vp,
1796 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1797 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1798 0, &data1, &data2);
1799
1800 if (status != VXGE_HW_OK)
1801 goto exit;
1802
1803 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1804
1805 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1806
1807 for (i = ETH_ALEN; i > 0; i--) {
1808 macaddr[i-1] = (u8)(data1 & 0xFF);
1809 data1 >>= 8;
1810
1811 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1812 data2 >>= 8;
1813 }
1814 exit:
1815 return status;
1816 }
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831 enum vxge_hw_status
1832 vxge_hw_vpath_mac_addr_get_next(
1833 struct __vxge_hw_vpath_handle *vp,
1834 u8 (macaddr)[ETH_ALEN],
1835 u8 (macaddr_mask)[ETH_ALEN])
1836 {
1837 u32 i;
1838 u64 data1 = 0ULL;
1839 u64 data2 = 0ULL;
1840 enum vxge_hw_status status = VXGE_HW_OK;
1841
1842 if (vp == NULL) {
1843 status = VXGE_HW_ERR_INVALID_HANDLE;
1844 goto exit;
1845 }
1846
1847 status = __vxge_hw_vpath_rts_table_get(vp,
1848 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1849 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1850 0, &data1, &data2);
1851
1852 if (status != VXGE_HW_OK)
1853 goto exit;
1854
1855 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1856
1857 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1858
1859 for (i = ETH_ALEN; i > 0; i--) {
1860 macaddr[i-1] = (u8)(data1 & 0xFF);
1861 data1 >>= 8;
1862
1863 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1864 data2 >>= 8;
1865 }
1866
1867 exit:
1868 return status;
1869 }
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884 enum vxge_hw_status
1885 vxge_hw_vpath_mac_addr_delete(
1886 struct __vxge_hw_vpath_handle *vp,
1887 u8 (macaddr)[ETH_ALEN],
1888 u8 (macaddr_mask)[ETH_ALEN])
1889 {
1890 u32 i;
1891 u64 data1 = 0ULL;
1892 u64 data2 = 0ULL;
1893 enum vxge_hw_status status = VXGE_HW_OK;
1894
1895 if (vp == NULL) {
1896 status = VXGE_HW_ERR_INVALID_HANDLE;
1897 goto exit;
1898 }
1899
1900 for (i = 0; i < ETH_ALEN; i++) {
1901 data1 <<= 8;
1902 data1 |= (u8)macaddr[i];
1903
1904 data2 <<= 8;
1905 data2 |= (u8)macaddr_mask[i];
1906 }
1907
1908 status = __vxge_hw_vpath_rts_table_set(vp,
1909 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1910 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1911 0,
1912 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1913 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1914 exit:
1915 return status;
1916 }
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928 enum vxge_hw_status
1929 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1930 {
1931 enum vxge_hw_status status = VXGE_HW_OK;
1932
1933 if (vp == NULL) {
1934 status = VXGE_HW_ERR_INVALID_HANDLE;
1935 goto exit;
1936 }
1937
1938 status = __vxge_hw_vpath_rts_table_set(vp,
1939 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1940 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1941 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1942 exit:
1943 return status;
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 enum vxge_hw_status
1957 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1958 {
1959 enum vxge_hw_status status = VXGE_HW_OK;
1960
1961 if (vp == NULL) {
1962 status = VXGE_HW_ERR_INVALID_HANDLE;
1963 goto exit;
1964 }
1965
1966 status = __vxge_hw_vpath_rts_table_set(vp,
1967 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1968 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1969 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1970 exit:
1971 return status;
1972 }
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1983 struct __vxge_hw_vpath_handle *vp)
1984 {
1985 u64 val64;
1986 struct __vxge_hw_virtualpath *vpath;
1987 enum vxge_hw_status status = VXGE_HW_OK;
1988
1989 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1990 status = VXGE_HW_ERR_INVALID_HANDLE;
1991 goto exit;
1992 }
1993
1994 vpath = vp->vpath;
1995
1996
1997 if (!(vpath->hldev->access_rights &
1998 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1999 return VXGE_HW_OK;
2000
2001 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2002
2003 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
2004
2005 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2006 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2007 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
2008 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
2009
2010 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2011 }
2012 exit:
2013 return status;
2014 }
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
2025 struct __vxge_hw_vpath_handle *vp)
2026 {
2027 u64 val64;
2028 struct __vxge_hw_virtualpath *vpath;
2029 enum vxge_hw_status status = VXGE_HW_OK;
2030
2031 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2032 status = VXGE_HW_ERR_INVALID_HANDLE;
2033 goto exit;
2034 }
2035
2036 vpath = vp->vpath;
2037
2038 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2039
2040 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
2041
2042 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
2043 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
2044 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
2045
2046 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2047 }
2048 exit:
2049 return status;
2050 }
2051
2052
2053
2054
2055
2056
2057
2058 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
2059 struct __vxge_hw_vpath_handle *vp)
2060 {
2061 u64 val64;
2062 struct __vxge_hw_virtualpath *vpath;
2063 enum vxge_hw_status status = VXGE_HW_OK;
2064
2065 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2066 status = VXGE_HW_ERR_INVALID_HANDLE;
2067 goto exit;
2068 }
2069
2070 vpath = vp->vpath;
2071
2072 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2073
2074 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
2075 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
2076 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2077 }
2078 exit:
2079 return status;
2080 }
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
2091 struct __vxge_hw_vpath_handle *vp)
2092 {
2093 u64 val64;
2094 struct __vxge_hw_virtualpath *vpath;
2095 enum vxge_hw_status status = VXGE_HW_OK;
2096
2097 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2098 status = VXGE_HW_ERR_INVALID_HANDLE;
2099 goto exit;
2100 }
2101
2102 vpath = vp->vpath;
2103
2104 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2105
2106 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
2107 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2108 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2109 }
2110 exit:
2111 return status;
2112 }
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123 enum vxge_hw_status
2124 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
2125 {
2126 u64 val64;
2127 struct __vxge_hw_virtualpath *vpath;
2128 enum vxge_hw_status status = VXGE_HW_OK;
2129
2130 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
2131 status = VXGE_HW_ERR_INVALID_HANDLE;
2132 goto exit;
2133 }
2134
2135 vpath = vp->vpath;
2136
2137 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
2138
2139 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
2140 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
2141 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
2142 }
2143 exit:
2144 return status;
2145 }
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2156 struct __vxge_hw_vpath_handle *vp,
2157 u32 skip_alarms)
2158 {
2159 enum vxge_hw_status status = VXGE_HW_OK;
2160
2161 if (vp == NULL) {
2162 status = VXGE_HW_ERR_INVALID_HANDLE;
2163 goto exit;
2164 }
2165
2166 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2167 exit:
2168 return status;
2169 }
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183 void
2184 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2185 int alarm_msix_id)
2186 {
2187 u64 val64;
2188 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2189 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2190 u32 vp_id = vp->vpath->vp_id;
2191
2192 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2193 (vp_id * 4) + tim_msix_id[0]) |
2194 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2195 (vp_id * 4) + tim_msix_id[1]);
2196
2197 writeq(val64, &vp_reg->interrupt_cfg0);
2198
2199 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2200 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2201 &vp_reg->interrupt_cfg2);
2202
2203 if (vpath->hldev->config.intr_mode ==
2204 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2205 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2206 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN,
2207 0, 32), &vp_reg->one_shot_vect0_en);
2208 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2209 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2210 0, 32), &vp_reg->one_shot_vect1_en);
2211 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2212 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2213 0, 32), &vp_reg->one_shot_vect2_en);
2214 }
2215 }
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229 void
2230 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2231 {
2232 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2233 __vxge_hw_pio_mem_write32_upper(
2234 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2235 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2236 }
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250 void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2251 {
2252 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2253
2254 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)
2255 __vxge_hw_pio_mem_write32_upper(
2256 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2257 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]);
2258 else
2259 __vxge_hw_pio_mem_write32_upper(
2260 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32),
2261 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]);
2262 }
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276 void
2277 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2278 {
2279 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2280 __vxge_hw_pio_mem_write32_upper(
2281 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2282 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2294 {
2295 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2296 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2297 u64 val64;
2298 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2299
2300 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2301 tim_int_mask1, vp->vpath->vp_id);
2302
2303 val64 = readq(&hldev->common_reg->tim_int_mask0);
2304
2305 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2306 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2307 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2308 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2309 &hldev->common_reg->tim_int_mask0);
2310 }
2311
2312 val64 = readl(&hldev->common_reg->tim_int_mask1);
2313
2314 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2315 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2316 __vxge_hw_pio_mem_write32_upper(
2317 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2318 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2319 &hldev->common_reg->tim_int_mask1);
2320 }
2321 }
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2332 {
2333 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2334 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2335 u64 val64;
2336 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2337
2338 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2339 tim_int_mask1, vp->vpath->vp_id);
2340
2341 val64 = readq(&hldev->common_reg->tim_int_mask0);
2342
2343 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2344 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2345 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2346 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2347 &hldev->common_reg->tim_int_mask0);
2348 }
2349
2350 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2351 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2352 __vxge_hw_pio_mem_write32_upper(
2353 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2354 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2355 &hldev->common_reg->tim_int_mask1);
2356 }
2357 }
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2374 {
2375 u8 t_code;
2376 enum vxge_hw_status status = VXGE_HW_OK;
2377 void *first_rxdh;
2378 u64 val64 = 0;
2379 int new_count = 0;
2380
2381 ring->cmpl_cnt = 0;
2382
2383 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2384 if (status == VXGE_HW_OK)
2385 ring->callback(ring, first_rxdh,
2386 t_code, ring->channel.userdata);
2387
2388 if (ring->cmpl_cnt != 0) {
2389 ring->doorbell_cnt += ring->cmpl_cnt;
2390 if (ring->doorbell_cnt >= ring->rxds_limit) {
2391
2392
2393
2394
2395 new_count = (ring->doorbell_cnt * 4);
2396
2397
2398 ring->total_db_cnt += ring->doorbell_cnt;
2399 if (ring->total_db_cnt >= ring->rxds_per_block) {
2400 new_count += 4;
2401
2402 ring->total_db_cnt %= ring->rxds_per_block;
2403 }
2404 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2405 &ring->vp_reg->prc_rxd_doorbell);
2406 val64 =
2407 readl(&ring->common_reg->titan_general_int_status);
2408 ring->doorbell_cnt = 0;
2409 }
2410 }
2411
2412 return status;
2413 }
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2428 struct sk_buff ***skb_ptr, int nr_skb,
2429 int *more)
2430 {
2431 enum vxge_hw_fifo_tcode t_code;
2432 void *first_txdlh;
2433 enum vxge_hw_status status = VXGE_HW_OK;
2434 struct __vxge_hw_channel *channel;
2435
2436 channel = &fifo->channel;
2437
2438 status = vxge_hw_fifo_txdl_next_completed(fifo,
2439 &first_txdlh, &t_code);
2440 if (status == VXGE_HW_OK)
2441 if (fifo->callback(fifo, first_txdlh, t_code,
2442 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2443 status = VXGE_HW_COMPLETIONS_REMAIN;
2444
2445 return status;
2446 }