1/****************************************************************************** 2 * This software may be used and distributed according to the terms of 3 * the GNU General Public License (GPL), incorporated herein by reference. 4 * Drivers based on or derived from this code fall under the GPL and must 5 * retain the authorship, copyright and license notice. This file is not 6 * a complete program and may only be used when the entire operating 7 * system is licensed under the GPL. 8 * See the file COPYING in this distribution for more information. 9 * 10 * vxge-traffic.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O 11 * Virtualized Server Adapter. 12 * Copyright(c) 2002-2010 Exar Corp. 13 ******************************************************************************/ 14#include <linux/etherdevice.h> 15#include <linux/prefetch.h> 16 17#include "vxge-traffic.h" 18#include "vxge-config.h" 19#include "vxge-main.h" 20 21/* 22 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 23 * @vp: Virtual Path handle. 24 * 25 * Enable vpath interrupts. The function is to be executed the last in 26 * vpath initialization sequence. 27 * 28 * See also: vxge_hw_vpath_intr_disable() 29 */ 30enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp) 31{ 32 u64 val64; 33 34 struct __vxge_hw_virtualpath *vpath; 35 struct vxge_hw_vpath_reg __iomem *vp_reg; 36 enum vxge_hw_status status = VXGE_HW_OK; 37 if (vp == NULL) { 38 status = VXGE_HW_ERR_INVALID_HANDLE; 39 goto exit; 40 } 41 42 vpath = vp->vpath; 43 44 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { 45 status = VXGE_HW_ERR_VPATH_NOT_OPEN; 46 goto exit; 47 } 48 49 vp_reg = vpath->vp_reg; 50 51 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg); 52 53 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 54 &vp_reg->general_errors_reg); 55 56 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 57 &vp_reg->pci_config_errors_reg); 58 59 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 60 &vp_reg->mrpcim_to_vpath_alarm_reg); 61 62 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 63 &vp_reg->srpcim_to_vpath_alarm_reg); 64 65 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 66 &vp_reg->vpath_ppif_int_status); 67 68 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 69 &vp_reg->srpcim_msg_to_vpath_reg); 70 71 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 72 &vp_reg->vpath_pcipif_int_status); 73 74 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 75 &vp_reg->prc_alarm_reg); 76 77 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 78 &vp_reg->wrdma_alarm_status); 79 80 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 81 &vp_reg->asic_ntwk_vp_err_reg); 82 83 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 84 &vp_reg->xgmac_vp_int_status); 85 86 val64 = readq(&vp_reg->vpath_general_int_status); 87 88 /* Mask unwanted interrupts */ 89 90 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 91 &vp_reg->vpath_pcipif_int_mask); 92 93 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 94 &vp_reg->srpcim_msg_to_vpath_mask); 95 96 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 97 &vp_reg->srpcim_to_vpath_alarm_mask); 98 99 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 100 &vp_reg->mrpcim_to_vpath_alarm_mask); 101 102 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 103 &vp_reg->pci_config_errors_mask); 104 105 /* Unmask the individual interrupts */ 106 107 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW| 108 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW| 109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ| 110 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32), 111 &vp_reg->general_errors_mask); 112 113 __vxge_hw_pio_mem_write32_upper( 114 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR| 115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR| 116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON| 117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON| 118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR| 119 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32), 120 &vp_reg->kdfcctl_errors_mask); 121 122 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask); 123 124 __vxge_hw_pio_mem_write32_upper( 125 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32), 126 &vp_reg->prc_alarm_mask); 127 128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask); 129 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask); 130 131 if (vpath->hldev->first_vp_id != vpath->vp_id) 132 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 133 &vp_reg->asic_ntwk_vp_err_mask); 134 else 135 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(( 136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | 137 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32), 138 &vp_reg->asic_ntwk_vp_err_mask); 139 140 __vxge_hw_pio_mem_write32_upper(0, 141 &vp_reg->vpath_general_int_mask); 142exit: 143 return status; 144 145} 146 147/* 148 * vxge_hw_vpath_intr_disable - Disable vpath interrupts. 149 * @vp: Virtual Path handle. 150 * 151 * Disable vpath interrupts. The function is to be executed the last in 152 * vpath initialization sequence. 153 * 154 * See also: vxge_hw_vpath_intr_enable() 155 */ 156enum vxge_hw_status vxge_hw_vpath_intr_disable( 157 struct __vxge_hw_vpath_handle *vp) 158{ 159 u64 val64; 160 161 struct __vxge_hw_virtualpath *vpath; 162 enum vxge_hw_status status = VXGE_HW_OK; 163 struct vxge_hw_vpath_reg __iomem *vp_reg; 164 if (vp == NULL) { 165 status = VXGE_HW_ERR_INVALID_HANDLE; 166 goto exit; 167 } 168 169 vpath = vp->vpath; 170 171 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) { 172 status = VXGE_HW_ERR_VPATH_NOT_OPEN; 173 goto exit; 174 } 175 vp_reg = vpath->vp_reg; 176 177 __vxge_hw_pio_mem_write32_upper( 178 (u32)VXGE_HW_INTR_MASK_ALL, 179 &vp_reg->vpath_general_int_mask); 180 181 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id)); 182 183 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask); 184 185 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 186 &vp_reg->general_errors_mask); 187 188 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 189 &vp_reg->pci_config_errors_mask); 190 191 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 192 &vp_reg->mrpcim_to_vpath_alarm_mask); 193 194 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 195 &vp_reg->srpcim_to_vpath_alarm_mask); 196 197 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 198 &vp_reg->vpath_ppif_int_mask); 199 200 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 201 &vp_reg->srpcim_msg_to_vpath_mask); 202 203 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 204 &vp_reg->vpath_pcipif_int_mask); 205 206 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 207 &vp_reg->wrdma_alarm_mask); 208 209 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 210 &vp_reg->prc_alarm_mask); 211 212 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 213 &vp_reg->xgmac_vp_int_mask); 214 215 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL, 216 &vp_reg->asic_ntwk_vp_err_mask); 217 218exit: 219 return status; 220} 221 222void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_fifo *fifo) 223{ 224 struct vxge_hw_vpath_reg __iomem *vp_reg; 225 struct vxge_hw_vp_config *config; 226 u64 val64; 227 228 if (fifo->config->enable != VXGE_HW_FIFO_ENABLE) 229 return; 230 231 vp_reg = fifo->vp_reg; 232 config = container_of(fifo->config, struct vxge_hw_vp_config, fifo); 233 234 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { 235 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE; 236 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 237 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; 238 fifo->tim_tti_cfg1_saved = val64; 239 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 240 } 241} 242 243void vxge_hw_vpath_dynamic_rti_ci_set(struct __vxge_hw_ring *ring) 244{ 245 u64 val64 = ring->tim_rti_cfg1_saved; 246 247 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI; 248 ring->tim_rti_cfg1_saved = val64; 249 writeq(val64, &ring->vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]); 250} 251 252void vxge_hw_vpath_dynamic_tti_rtimer_set(struct __vxge_hw_fifo *fifo) 253{ 254 u64 val64 = fifo->tim_tti_cfg3_saved; 255 u64 timer = (fifo->rtimer * 1000) / 272; 256 257 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); 258 if (timer) 259 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | 260 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(5); 261 262 writeq(val64, &fifo->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]); 263 /* tti_cfg3_saved is not updated again because it is 264 * initialized at one place only - init time. 265 */ 266} 267 268void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring) 269{ 270 u64 val64 = ring->tim_rti_cfg3_saved; 271 u64 timer = (ring->rtimer * 1000) / 272; 272 273 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(0x3ffffff); 274 if (timer) 275 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(timer) | 276 VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_EVENT_SF(4); 277 278 writeq(val64, &ring->vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]); 279 /* rti_cfg3_saved is not updated again because it is 280 * initialized at one place only - init time. 281 */ 282} 283 284/** 285 * vxge_hw_channel_msix_mask - Mask MSIX Vector. 286 * @channeh: Channel for rx or tx handle 287 * @msix_id: MSIX ID 288 * 289 * The function masks the msix interrupt for the given msix_id 290 * 291 * Returns: 0 292 */ 293void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id) 294{ 295 296 __vxge_hw_pio_mem_write32_upper( 297 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 298 &channel->common_reg->set_msix_mask_vect[msix_id%4]); 299} 300 301/** 302 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector. 303 * @channeh: Channel for rx or tx handle 304 * @msix_id: MSI ID 305 * 306 * The function unmasks the msix interrupt for the given msix_id 307 * 308 * Returns: 0 309 */ 310void 311vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id) 312{ 313 314 __vxge_hw_pio_mem_write32_upper( 315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 316 &channel->common_reg->clear_msix_mask_vect[msix_id%4]); 317} 318 319/** 320 * vxge_hw_channel_msix_clear - Unmask the MSIX Vector. 321 * @channel: Channel for rx or tx handle 322 * @msix_id: MSI ID 323 * 324 * The function unmasks the msix interrupt for the given msix_id 325 * if configured in MSIX oneshot mode 326 * 327 * Returns: 0 328 */ 329void vxge_hw_channel_msix_clear(struct __vxge_hw_channel *channel, int msix_id) 330{ 331 __vxge_hw_pio_mem_write32_upper( 332 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 333 &channel->common_reg->clr_msix_one_shot_vec[msix_id % 4]); 334} 335 336/** 337 * vxge_hw_device_set_intr_type - Updates the configuration 338 * with new interrupt type. 339 * @hldev: HW device handle. 340 * @intr_mode: New interrupt type 341 */ 342u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode) 343{ 344 345 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && 346 (intr_mode != VXGE_HW_INTR_MODE_MSIX) && 347 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && 348 (intr_mode != VXGE_HW_INTR_MODE_DEF)) 349 intr_mode = VXGE_HW_INTR_MODE_IRQLINE; 350 351 hldev->config.intr_mode = intr_mode; 352 return intr_mode; 353} 354 355/** 356 * vxge_hw_device_intr_enable - Enable interrupts. 357 * @hldev: HW device handle. 358 * @op: One of the enum vxge_hw_device_intr enumerated values specifying 359 * the type(s) of interrupts to enable. 360 * 361 * Enable Titan interrupts. The function is to be executed the last in 362 * Titan initialization sequence. 363 * 364 * See also: vxge_hw_device_intr_disable() 365 */ 366void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev) 367{ 368 u32 i; 369 u64 val64; 370 u32 val32; 371 372 vxge_hw_device_mask_all(hldev); 373 374 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 375 376 if (!(hldev->vpaths_deployed & vxge_mBIT(i))) 377 continue; 378 379 vxge_hw_vpath_intr_enable( 380 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); 381 } 382 383 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) { 384 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | 385 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]; 386 387 if (val64 != 0) { 388 writeq(val64, &hldev->common_reg->tim_int_status0); 389 390 writeq(~val64, &hldev->common_reg->tim_int_mask0); 391 } 392 393 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | 394 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]; 395 396 if (val32 != 0) { 397 __vxge_hw_pio_mem_write32_upper(val32, 398 &hldev->common_reg->tim_int_status1); 399 400 __vxge_hw_pio_mem_write32_upper(~val32, 401 &hldev->common_reg->tim_int_mask1); 402 } 403 } 404 405 val64 = readq(&hldev->common_reg->titan_general_int_status); 406 407 vxge_hw_device_unmask_all(hldev); 408} 409 410/** 411 * vxge_hw_device_intr_disable - Disable Titan interrupts. 412 * @hldev: HW device handle. 413 * @op: One of the enum vxge_hw_device_intr enumerated values specifying 414 * the type(s) of interrupts to disable. 415 * 416 * Disable Titan interrupts. 417 * 418 * See also: vxge_hw_device_intr_enable() 419 */ 420void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev) 421{ 422 u32 i; 423 424 vxge_hw_device_mask_all(hldev); 425 426 /* mask all the tim interrupts */ 427 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0); 428 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32, 429 &hldev->common_reg->tim_int_mask1); 430 431 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 432 433 if (!(hldev->vpaths_deployed & vxge_mBIT(i))) 434 continue; 435 436 vxge_hw_vpath_intr_disable( 437 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); 438 } 439} 440 441/** 442 * vxge_hw_device_mask_all - Mask all device interrupts. 443 * @hldev: HW device handle. 444 * 445 * Mask all device interrupts. 446 * 447 * See also: vxge_hw_device_unmask_all() 448 */ 449void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev) 450{ 451 u64 val64; 452 453 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM | 454 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; 455 456 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 457 &hldev->common_reg->titan_mask_all_int); 458} 459 460/** 461 * vxge_hw_device_unmask_all - Unmask all device interrupts. 462 * @hldev: HW device handle. 463 * 464 * Unmask all device interrupts. 465 * 466 * See also: vxge_hw_device_mask_all() 467 */ 468void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev) 469{ 470 u64 val64 = 0; 471 472 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) 473 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC; 474 475 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 476 &hldev->common_reg->titan_mask_all_int); 477} 478 479/** 480 * vxge_hw_device_flush_io - Flush io writes. 481 * @hldev: HW device handle. 482 * 483 * The function performs a read operation to flush io writes. 484 * 485 * Returns: void 486 */ 487void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev) 488{ 489 u32 val32; 490 491 val32 = readl(&hldev->common_reg->titan_general_int_status); 492} 493 494/** 495 * __vxge_hw_device_handle_error - Handle error 496 * @hldev: HW device 497 * @vp_id: Vpath Id 498 * @type: Error type. Please see enum vxge_hw_event{} 499 * 500 * Handle error. 501 */ 502static enum vxge_hw_status 503__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id, 504 enum vxge_hw_event type) 505{ 506 switch (type) { 507 case VXGE_HW_EVENT_UNKNOWN: 508 break; 509 case VXGE_HW_EVENT_RESET_START: 510 case VXGE_HW_EVENT_RESET_COMPLETE: 511 case VXGE_HW_EVENT_LINK_DOWN: 512 case VXGE_HW_EVENT_LINK_UP: 513 goto out; 514 case VXGE_HW_EVENT_ALARM_CLEARED: 515 goto out; 516 case VXGE_HW_EVENT_ECCERR: 517 case VXGE_HW_EVENT_MRPCIM_ECCERR: 518 goto out; 519 case VXGE_HW_EVENT_FIFO_ERR: 520 case VXGE_HW_EVENT_VPATH_ERR: 521 case VXGE_HW_EVENT_CRITICAL_ERR: 522 case VXGE_HW_EVENT_SERR: 523 break; 524 case VXGE_HW_EVENT_SRPCIM_SERR: 525 case VXGE_HW_EVENT_MRPCIM_SERR: 526 goto out; 527 case VXGE_HW_EVENT_SLOT_FREEZE: 528 break; 529 default: 530 vxge_assert(0); 531 goto out; 532 } 533 534 /* notify driver */ 535 if (hldev->uld_callbacks->crit_err) 536 hldev->uld_callbacks->crit_err(hldev, 537 type, vp_id); 538out: 539 540 return VXGE_HW_OK; 541} 542 543/* 544 * __vxge_hw_device_handle_link_down_ind 545 * @hldev: HW device handle. 546 * 547 * Link down indication handler. The function is invoked by HW when 548 * Titan indicates that the link is down. 549 */ 550static enum vxge_hw_status 551__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev) 552{ 553 /* 554 * If the previous link state is not down, return. 555 */ 556 if (hldev->link_state == VXGE_HW_LINK_DOWN) 557 goto exit; 558 559 hldev->link_state = VXGE_HW_LINK_DOWN; 560 561 /* notify driver */ 562 if (hldev->uld_callbacks->link_down) 563 hldev->uld_callbacks->link_down(hldev); 564exit: 565 return VXGE_HW_OK; 566} 567 568/* 569 * __vxge_hw_device_handle_link_up_ind 570 * @hldev: HW device handle. 571 * 572 * Link up indication handler. The function is invoked by HW when 573 * Titan indicates that the link is up for programmable amount of time. 574 */ 575static enum vxge_hw_status 576__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev) 577{ 578 /* 579 * If the previous link state is not down, return. 580 */ 581 if (hldev->link_state == VXGE_HW_LINK_UP) 582 goto exit; 583 584 hldev->link_state = VXGE_HW_LINK_UP; 585 586 /* notify driver */ 587 if (hldev->uld_callbacks->link_up) 588 hldev->uld_callbacks->link_up(hldev); 589exit: 590 return VXGE_HW_OK; 591} 592 593/* 594 * __vxge_hw_vpath_alarm_process - Process Alarms. 595 * @vpath: Virtual Path. 596 * @skip_alarms: Do not clear the alarms 597 * 598 * Process vpath alarms. 599 * 600 */ 601static enum vxge_hw_status 602__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath, 603 u32 skip_alarms) 604{ 605 u64 val64; 606 u64 alarm_status; 607 u64 pic_status; 608 struct __vxge_hw_device *hldev = NULL; 609 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN; 610 u64 mask64; 611 struct vxge_hw_vpath_stats_sw_info *sw_stats; 612 struct vxge_hw_vpath_reg __iomem *vp_reg; 613 614 if (vpath == NULL) { 615 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, 616 alarm_event); 617 goto out2; 618 } 619 620 hldev = vpath->hldev; 621 vp_reg = vpath->vp_reg; 622 alarm_status = readq(&vp_reg->vpath_general_int_status); 623 624 if (alarm_status == VXGE_HW_ALL_FOXES) { 625 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE, 626 alarm_event); 627 goto out; 628 } 629 630 sw_stats = vpath->sw_stats; 631 632 if (alarm_status & ~( 633 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT | 634 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT | 635 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT | 636 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) { 637 sw_stats->error_stats.unknown_alarms++; 638 639 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN, 640 alarm_event); 641 goto out; 642 } 643 644 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) { 645 646 val64 = readq(&vp_reg->xgmac_vp_int_status); 647 648 if (val64 & 649 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) { 650 651 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg); 652 653 if (((val64 & 654 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) && 655 (!(val64 & 656 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) || 657 ((val64 & 658 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) && 659 (!(val64 & 660 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) 661 ))) { 662 sw_stats->error_stats.network_sustained_fault++; 663 664 writeq( 665 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT, 666 &vp_reg->asic_ntwk_vp_err_mask); 667 668 __vxge_hw_device_handle_link_down_ind(hldev); 669 alarm_event = VXGE_HW_SET_LEVEL( 670 VXGE_HW_EVENT_LINK_DOWN, alarm_event); 671 } 672 673 if (((val64 & 674 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) && 675 (!(val64 & 676 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) || 677 ((val64 & 678 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) && 679 (!(val64 & 680 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) 681 ))) { 682 683 sw_stats->error_stats.network_sustained_ok++; 684 685 writeq( 686 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK, 687 &vp_reg->asic_ntwk_vp_err_mask); 688 689 __vxge_hw_device_handle_link_up_ind(hldev); 690 alarm_event = VXGE_HW_SET_LEVEL( 691 VXGE_HW_EVENT_LINK_UP, alarm_event); 692 } 693 694 writeq(VXGE_HW_INTR_MASK_ALL, 695 &vp_reg->asic_ntwk_vp_err_reg); 696 697 alarm_event = VXGE_HW_SET_LEVEL( 698 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event); 699 700 if (skip_alarms) 701 return VXGE_HW_OK; 702 } 703 } 704 705 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) { 706 707 pic_status = readq(&vp_reg->vpath_ppif_int_status); 708 709 if (pic_status & 710 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) { 711 712 val64 = readq(&vp_reg->general_errors_reg); 713 mask64 = readq(&vp_reg->general_errors_mask); 714 715 if ((val64 & 716 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) & 717 ~mask64) { 718 sw_stats->error_stats.ini_serr_det++; 719 720 alarm_event = VXGE_HW_SET_LEVEL( 721 VXGE_HW_EVENT_SERR, alarm_event); 722 } 723 724 if ((val64 & 725 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) & 726 ~mask64) { 727 sw_stats->error_stats.dblgen_fifo0_overflow++; 728 729 alarm_event = VXGE_HW_SET_LEVEL( 730 VXGE_HW_EVENT_FIFO_ERR, alarm_event); 731 } 732 733 if ((val64 & 734 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) & 735 ~mask64) 736 sw_stats->error_stats.statsb_pif_chain_error++; 737 738 if ((val64 & 739 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) & 740 ~mask64) 741 sw_stats->error_stats.statsb_drop_timeout++; 742 743 if ((val64 & 744 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) & 745 ~mask64) 746 sw_stats->error_stats.target_illegal_access++; 747 748 if (!skip_alarms) { 749 writeq(VXGE_HW_INTR_MASK_ALL, 750 &vp_reg->general_errors_reg); 751 alarm_event = VXGE_HW_SET_LEVEL( 752 VXGE_HW_EVENT_ALARM_CLEARED, 753 alarm_event); 754 } 755 } 756 757 if (pic_status & 758 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) { 759 760 val64 = readq(&vp_reg->kdfcctl_errors_reg); 761 mask64 = readq(&vp_reg->kdfcctl_errors_mask); 762 763 if ((val64 & 764 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) & 765 ~mask64) { 766 sw_stats->error_stats.kdfcctl_fifo0_overwrite++; 767 768 alarm_event = VXGE_HW_SET_LEVEL( 769 VXGE_HW_EVENT_FIFO_ERR, 770 alarm_event); 771 } 772 773 if ((val64 & 774 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) & 775 ~mask64) { 776 sw_stats->error_stats.kdfcctl_fifo0_poison++; 777 778 alarm_event = VXGE_HW_SET_LEVEL( 779 VXGE_HW_EVENT_FIFO_ERR, 780 alarm_event); 781 } 782 783 if ((val64 & 784 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) & 785 ~mask64) { 786 sw_stats->error_stats.kdfcctl_fifo0_dma_error++; 787 788 alarm_event = VXGE_HW_SET_LEVEL( 789 VXGE_HW_EVENT_FIFO_ERR, 790 alarm_event); 791 } 792 793 if (!skip_alarms) { 794 writeq(VXGE_HW_INTR_MASK_ALL, 795 &vp_reg->kdfcctl_errors_reg); 796 alarm_event = VXGE_HW_SET_LEVEL( 797 VXGE_HW_EVENT_ALARM_CLEARED, 798 alarm_event); 799 } 800 } 801 802 } 803 804 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) { 805 806 val64 = readq(&vp_reg->wrdma_alarm_status); 807 808 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) { 809 810 val64 = readq(&vp_reg->prc_alarm_reg); 811 mask64 = readq(&vp_reg->prc_alarm_mask); 812 813 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)& 814 ~mask64) 815 sw_stats->error_stats.prc_ring_bumps++; 816 817 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) & 818 ~mask64) { 819 sw_stats->error_stats.prc_rxdcm_sc_err++; 820 821 alarm_event = VXGE_HW_SET_LEVEL( 822 VXGE_HW_EVENT_VPATH_ERR, 823 alarm_event); 824 } 825 826 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT) 827 & ~mask64) { 828 sw_stats->error_stats.prc_rxdcm_sc_abort++; 829 830 alarm_event = VXGE_HW_SET_LEVEL( 831 VXGE_HW_EVENT_VPATH_ERR, 832 alarm_event); 833 } 834 835 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR) 836 & ~mask64) { 837 sw_stats->error_stats.prc_quanta_size_err++; 838 839 alarm_event = VXGE_HW_SET_LEVEL( 840 VXGE_HW_EVENT_VPATH_ERR, 841 alarm_event); 842 } 843 844 if (!skip_alarms) { 845 writeq(VXGE_HW_INTR_MASK_ALL, 846 &vp_reg->prc_alarm_reg); 847 alarm_event = VXGE_HW_SET_LEVEL( 848 VXGE_HW_EVENT_ALARM_CLEARED, 849 alarm_event); 850 } 851 } 852 } 853out: 854 hldev->stats.sw_dev_err_stats.vpath_alarms++; 855out2: 856 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) || 857 (alarm_event == VXGE_HW_EVENT_UNKNOWN)) 858 return VXGE_HW_OK; 859 860 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event); 861 862 if (alarm_event == VXGE_HW_EVENT_SERR) 863 return VXGE_HW_ERR_CRITICAL; 864 865 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ? 866 VXGE_HW_ERR_SLOT_FREEZE : 867 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO : 868 VXGE_HW_ERR_VPATH; 869} 870 871/** 872 * vxge_hw_device_begin_irq - Begin IRQ processing. 873 * @hldev: HW device handle. 874 * @skip_alarms: Do not clear the alarms 875 * @reason: "Reason" for the interrupt, the value of Titan's 876 * general_int_status register. 877 * 878 * The function performs two actions, It first checks whether (shared IRQ) the 879 * interrupt was raised by the device. Next, it masks the device interrupts. 880 * 881 * Note: 882 * vxge_hw_device_begin_irq() does not flush MMIO writes through the 883 * bridge. Therefore, two back-to-back interrupts are potentially possible. 884 * 885 * Returns: 0, if the interrupt is not "ours" (note that in this case the 886 * device remain enabled). 887 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter 888 * status. 889 */ 890enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev, 891 u32 skip_alarms, u64 *reason) 892{ 893 u32 i; 894 u64 val64; 895 u64 adapter_status; 896 u64 vpath_mask; 897 enum vxge_hw_status ret = VXGE_HW_OK; 898 899 val64 = readq(&hldev->common_reg->titan_general_int_status); 900 901 if (unlikely(!val64)) { 902 /* not Titan interrupt */ 903 *reason = 0; 904 ret = VXGE_HW_ERR_WRONG_IRQ; 905 goto exit; 906 } 907 908 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) { 909 910 adapter_status = readq(&hldev->common_reg->adapter_status); 911 912 if (adapter_status == VXGE_HW_ALL_FOXES) { 913 914 __vxge_hw_device_handle_error(hldev, 915 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE); 916 *reason = 0; 917 ret = VXGE_HW_ERR_SLOT_FREEZE; 918 goto exit; 919 } 920 } 921 922 hldev->stats.sw_dev_info_stats.total_intr_cnt++; 923 924 *reason = val64; 925 926 vpath_mask = hldev->vpaths_deployed >> 927 (64 - VXGE_HW_MAX_VIRTUAL_PATHS); 928 929 if (val64 & 930 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) { 931 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++; 932 933 return VXGE_HW_OK; 934 } 935 936 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++; 937 938 if (unlikely(val64 & 939 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) { 940 941 enum vxge_hw_status error_level = VXGE_HW_OK; 942 943 hldev->stats.sw_dev_err_stats.vpath_alarms++; 944 945 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 946 947 if (!(hldev->vpaths_deployed & vxge_mBIT(i))) 948 continue; 949 950 ret = __vxge_hw_vpath_alarm_process( 951 &hldev->virtual_paths[i], skip_alarms); 952 953 error_level = VXGE_HW_SET_LEVEL(ret, error_level); 954 955 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) || 956 (ret == VXGE_HW_ERR_SLOT_FREEZE))) 957 break; 958 } 959 960 ret = error_level; 961 } 962exit: 963 return ret; 964} 965 966/** 967 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the 968 * condition that has caused the Tx and RX interrupt. 969 * @hldev: HW device. 970 * 971 * Acknowledge (that is, clear) the condition that has caused 972 * the Tx and Rx interrupt. 973 * See also: vxge_hw_device_begin_irq(), 974 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx(). 975 */ 976void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev) 977{ 978 979 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || 980 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { 981 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | 982 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]), 983 &hldev->common_reg->tim_int_status0); 984 } 985 986 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || 987 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { 988 __vxge_hw_pio_mem_write32_upper( 989 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | 990 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), 991 &hldev->common_reg->tim_int_status1); 992 } 993} 994 995/* 996 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel 997 * @channel: Channel 998 * @dtrh: Buffer to return the DTR pointer 999 * 1000 * Allocates a dtr from the reserve array. If the reserve array is empty, 1001 * it swaps the reserve and free arrays. 1002 * 1003 */ 1004static enum vxge_hw_status 1005vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh) 1006{ 1007 void **tmp_arr; 1008 1009 if (channel->reserve_ptr - channel->reserve_top > 0) { 1010_alloc_after_swap: 1011 *dtrh = channel->reserve_arr[--channel->reserve_ptr]; 1012 1013 return VXGE_HW_OK; 1014 } 1015 1016 /* switch between empty and full arrays */ 1017 1018 /* the idea behind such a design is that by having free and reserved 1019 * arrays separated we basically separated irq and non-irq parts. 1020 * i.e. no additional lock need to be done when we free a resource */ 1021 1022 if (channel->length - channel->free_ptr > 0) { 1023 1024 tmp_arr = channel->reserve_arr; 1025 channel->reserve_arr = channel->free_arr; 1026 channel->free_arr = tmp_arr; 1027 channel->reserve_ptr = channel->length; 1028 channel->reserve_top = channel->free_ptr; 1029 channel->free_ptr = channel->length; 1030 1031 channel->stats->reserve_free_swaps_cnt++; 1032 1033 goto _alloc_after_swap; 1034 } 1035 1036 channel->stats->full_cnt++; 1037 1038 *dtrh = NULL; 1039 return VXGE_HW_INF_OUT_OF_DESCRIPTORS; 1040} 1041 1042/* 1043 * vxge_hw_channel_dtr_post - Post a dtr to the channel 1044 * @channelh: Channel 1045 * @dtrh: DTR pointer 1046 * 1047 * Posts a dtr to work array. 1048 * 1049 */ 1050static void 1051vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh) 1052{ 1053 vxge_assert(channel->work_arr[channel->post_index] == NULL); 1054 1055 channel->work_arr[channel->post_index++] = dtrh; 1056 1057 /* wrap-around */ 1058 if (channel->post_index == channel->length) 1059 channel->post_index = 0; 1060} 1061 1062/* 1063 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr 1064 * @channel: Channel 1065 * @dtr: Buffer to return the next completed DTR pointer 1066 * 1067 * Returns the next completed dtr with out removing it from work array 1068 * 1069 */ 1070void 1071vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh) 1072{ 1073 vxge_assert(channel->compl_index < channel->length); 1074 1075 *dtrh = channel->work_arr[channel->compl_index]; 1076 prefetch(*dtrh); 1077} 1078 1079/* 1080 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array 1081 * @channel: Channel handle 1082 * 1083 * Removes the next completed dtr from work array 1084 * 1085 */ 1086void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel) 1087{ 1088 channel->work_arr[channel->compl_index] = NULL; 1089 1090 /* wrap-around */ 1091 if (++channel->compl_index == channel->length) 1092 channel->compl_index = 0; 1093 1094 channel->stats->total_compl_cnt++; 1095} 1096 1097/* 1098 * vxge_hw_channel_dtr_free - Frees a dtr 1099 * @channel: Channel handle 1100 * @dtr: DTR pointer 1101 * 1102 * Returns the dtr to free array 1103 * 1104 */ 1105void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh) 1106{ 1107 channel->free_arr[--channel->free_ptr] = dtrh; 1108} 1109 1110/* 1111 * vxge_hw_channel_dtr_count 1112 * @channel: Channel handle. Obtained via vxge_hw_channel_open(). 1113 * 1114 * Retrieve number of DTRs available. This function can not be called 1115 * from data path. ring_initial_replenishi() is the only user. 1116 */ 1117int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel) 1118{ 1119 return (channel->reserve_ptr - channel->reserve_top) + 1120 (channel->length - channel->free_ptr); 1121} 1122 1123/** 1124 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor. 1125 * @ring: Handle to the ring object used for receive 1126 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter 1127 * with a valid handle. 1128 * 1129 * Reserve Rx descriptor for the subsequent filling-in driver 1130 * and posting on the corresponding channel (@channelh) 1131 * via vxge_hw_ring_rxd_post(). 1132 * 1133 * Returns: VXGE_HW_OK - success. 1134 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available. 1135 * 1136 */ 1137enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring, 1138 void **rxdh) 1139{ 1140 enum vxge_hw_status status; 1141 struct __vxge_hw_channel *channel; 1142 1143 channel = &ring->channel; 1144 1145 status = vxge_hw_channel_dtr_alloc(channel, rxdh); 1146 1147 if (status == VXGE_HW_OK) { 1148 struct vxge_hw_ring_rxd_1 *rxdp = 1149 (struct vxge_hw_ring_rxd_1 *)*rxdh; 1150 1151 rxdp->control_0 = rxdp->control_1 = 0; 1152 } 1153 1154 return status; 1155} 1156 1157/** 1158 * vxge_hw_ring_rxd_free - Free descriptor. 1159 * @ring: Handle to the ring object used for receive 1160 * @rxdh: Descriptor handle. 1161 * 1162 * Free the reserved descriptor. This operation is "symmetrical" to 1163 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's 1164 * lifecycle. 1165 * 1166 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can 1167 * be: 1168 * 1169 * - reserved (vxge_hw_ring_rxd_reserve); 1170 * 1171 * - posted (vxge_hw_ring_rxd_post); 1172 * 1173 * - completed (vxge_hw_ring_rxd_next_completed); 1174 * 1175 * - and recycled again (vxge_hw_ring_rxd_free). 1176 * 1177 * For alternative state transitions and more details please refer to 1178 * the design doc. 1179 * 1180 */ 1181void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh) 1182{ 1183 struct __vxge_hw_channel *channel; 1184 1185 channel = &ring->channel; 1186 1187 vxge_hw_channel_dtr_free(channel, rxdh); 1188 1189} 1190 1191/** 1192 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post 1193 * @ring: Handle to the ring object used for receive 1194 * @rxdh: Descriptor handle. 1195 * 1196 * This routine prepares a rxd and posts 1197 */ 1198void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh) 1199{ 1200 struct __vxge_hw_channel *channel; 1201 1202 channel = &ring->channel; 1203 1204 vxge_hw_channel_dtr_post(channel, rxdh); 1205} 1206 1207/** 1208 * vxge_hw_ring_rxd_post_post - Process rxd after post. 1209 * @ring: Handle to the ring object used for receive 1210 * @rxdh: Descriptor handle. 1211 * 1212 * Processes rxd after post 1213 */ 1214void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh) 1215{ 1216 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1217 struct __vxge_hw_channel *channel; 1218 1219 channel = &ring->channel; 1220 1221 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 1222 1223 if (ring->stats->common_stats.usage_cnt > 0) 1224 ring->stats->common_stats.usage_cnt--; 1225} 1226 1227/** 1228 * vxge_hw_ring_rxd_post - Post descriptor on the ring. 1229 * @ring: Handle to the ring object used for receive 1230 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve(). 1231 * 1232 * Post descriptor on the ring. 1233 * Prior to posting the descriptor should be filled in accordance with 1234 * Host/Titan interface specification for a given service (LL, etc.). 1235 * 1236 */ 1237void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh) 1238{ 1239 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh; 1240 struct __vxge_hw_channel *channel; 1241 1242 channel = &ring->channel; 1243 1244 wmb(); 1245 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 1246 1247 vxge_hw_channel_dtr_post(channel, rxdh); 1248 1249 if (ring->stats->common_stats.usage_cnt > 0) 1250 ring->stats->common_stats.usage_cnt--; 1251} 1252 1253/** 1254 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier. 1255 * @ring: Handle to the ring object used for receive 1256 * @rxdh: Descriptor handle. 1257 * 1258 * Processes rxd after post with memory barrier. 1259 */ 1260void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) 1261{ 1262 wmb(); 1263 vxge_hw_ring_rxd_post_post(ring, rxdh); 1264} 1265 1266/** 1267 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor. 1268 * @ring: Handle to the ring object used for receive 1269 * @rxdh: Descriptor handle. Returned by HW. 1270 * @t_code: Transfer code, as per Titan User Guide, 1271 * Receive Descriptor Format. Returned by HW. 1272 * 1273 * Retrieve the _next_ completed descriptor. 1274 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy 1275 * driver of new completed descriptors. After that 1276 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest 1277 * completions (the very first completion is passed by HW via 1278 * vxge_hw_ring_callback_f). 1279 * 1280 * Implementation-wise, the driver is free to call 1281 * vxge_hw_ring_rxd_next_completed either immediately from inside the 1282 * ring callback, or in a deferred fashion and separate (from HW) 1283 * context. 1284 * 1285 * Non-zero @t_code means failure to fill-in receive buffer(s) 1286 * of the descriptor. 1287 * For instance, parity error detected during the data transfer. 1288 * In this case Titan will complete the descriptor and indicate 1289 * for the host that the received data is not to be used. 1290 * For details please refer to Titan User Guide. 1291 * 1292 * Returns: VXGE_HW_OK - success. 1293 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 1294 * are currently available for processing. 1295 * 1296 * See also: vxge_hw_ring_callback_f{}, 1297 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}. 1298 */ 1299enum vxge_hw_status vxge_hw_ring_rxd_next_completed( 1300 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code) 1301{ 1302 struct __vxge_hw_channel *channel; 1303 struct vxge_hw_ring_rxd_1 *rxdp; 1304 enum vxge_hw_status status = VXGE_HW_OK; 1305 u64 control_0, own; 1306 1307 channel = &ring->channel; 1308 1309 vxge_hw_channel_dtr_try_complete(channel, rxdh); 1310 1311 rxdp = *rxdh; 1312 if (rxdp == NULL) { 1313 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1314 goto exit; 1315 } 1316 1317 control_0 = rxdp->control_0; 1318 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 1319 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); 1320 1321 /* check whether it is not the end */ 1322 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) { 1323 1324 vxge_assert((rxdp)->host_control != 1325 0); 1326 1327 ++ring->cmpl_cnt; 1328 vxge_hw_channel_dtr_complete(channel); 1329 1330 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); 1331 1332 ring->stats->common_stats.usage_cnt++; 1333 if (ring->stats->common_stats.usage_max < 1334 ring->stats->common_stats.usage_cnt) 1335 ring->stats->common_stats.usage_max = 1336 ring->stats->common_stats.usage_cnt; 1337 1338 status = VXGE_HW_OK; 1339 goto exit; 1340 } 1341 1342 /* reset it. since we don't want to return 1343 * garbage to the driver */ 1344 *rxdh = NULL; 1345 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1346exit: 1347 return status; 1348} 1349 1350/** 1351 * vxge_hw_ring_handle_tcode - Handle transfer code. 1352 * @ring: Handle to the ring object used for receive 1353 * @rxdh: Descriptor handle. 1354 * @t_code: One of the enumerated (and documented in the Titan user guide) 1355 * "transfer codes". 1356 * 1357 * Handle descriptor's transfer code. The latter comes with each completed 1358 * descriptor. 1359 * 1360 * Returns: one of the enum vxge_hw_status{} enumerated types. 1361 * VXGE_HW_OK - for success. 1362 * VXGE_HW_ERR_CRITICAL - when encounters critical error. 1363 */ 1364enum vxge_hw_status vxge_hw_ring_handle_tcode( 1365 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code) 1366{ 1367 struct __vxge_hw_channel *channel; 1368 enum vxge_hw_status status = VXGE_HW_OK; 1369 1370 channel = &ring->channel; 1371 1372 /* If the t_code is not supported and if the 1373 * t_code is other than 0x5 (unparseable packet 1374 * such as unknown UPV6 header), Drop it !!! 1375 */ 1376 1377 if (t_code == VXGE_HW_RING_T_CODE_OK || 1378 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) { 1379 status = VXGE_HW_OK; 1380 goto exit; 1381 } 1382 1383 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) { 1384 status = VXGE_HW_ERR_INVALID_TCODE; 1385 goto exit; 1386 } 1387 1388 ring->stats->rxd_t_code_err_cnt[t_code]++; 1389exit: 1390 return status; 1391} 1392 1393/** 1394 * __vxge_hw_non_offload_db_post - Post non offload doorbell 1395 * 1396 * @fifo: fifohandle 1397 * @txdl_ptr: The starting location of the TxDL in host memory 1398 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256) 1399 * @no_snoop: No snoop flags 1400 * 1401 * This function posts a non-offload doorbell to doorbell FIFO 1402 * 1403 */ 1404static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo, 1405 u64 txdl_ptr, u32 num_txds, u32 no_snoop) 1406{ 1407 struct __vxge_hw_channel *channel; 1408 1409 channel = &fifo->channel; 1410 1411 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) | 1412 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) | 1413 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop), 1414 &fifo->nofl_db->control_0); 1415 1416 mmiowb(); 1417 1418 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr); 1419 1420 mmiowb(); 1421} 1422 1423/** 1424 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in 1425 * the fifo 1426 * @fifoh: Handle to the fifo object used for non offload send 1427 */ 1428u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh) 1429{ 1430 return vxge_hw_channel_dtr_count(&fifoh->channel); 1431} 1432 1433/** 1434 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor. 1435 * @fifoh: Handle to the fifo object used for non offload send 1436 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter 1437 * with a valid handle. 1438 * @txdl_priv: Buffer to return the pointer to per txdl space 1439 * 1440 * Reserve a single TxDL (that is, fifo descriptor) 1441 * for the subsequent filling-in by driver) 1442 * and posting on the corresponding channel (@channelh) 1443 * via vxge_hw_fifo_txdl_post(). 1444 * 1445 * Note: it is the responsibility of driver to reserve multiple descriptors 1446 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor 1447 * carries up to configured number (fifo.max_frags) of contiguous buffers. 1448 * 1449 * Returns: VXGE_HW_OK - success; 1450 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available 1451 * 1452 */ 1453enum vxge_hw_status vxge_hw_fifo_txdl_reserve( 1454 struct __vxge_hw_fifo *fifo, 1455 void **txdlh, void **txdl_priv) 1456{ 1457 struct __vxge_hw_channel *channel; 1458 enum vxge_hw_status status; 1459 int i; 1460 1461 channel = &fifo->channel; 1462 1463 status = vxge_hw_channel_dtr_alloc(channel, txdlh); 1464 1465 if (status == VXGE_HW_OK) { 1466 struct vxge_hw_fifo_txd *txdp = 1467 (struct vxge_hw_fifo_txd *)*txdlh; 1468 struct __vxge_hw_fifo_txdl_priv *priv; 1469 1470 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp); 1471 1472 /* reset the TxDL's private */ 1473 priv->align_dma_offset = 0; 1474 priv->align_vaddr_start = priv->align_vaddr; 1475 priv->align_used_frags = 0; 1476 priv->frags = 0; 1477 priv->alloc_frags = fifo->config->max_frags; 1478 priv->next_txdl_priv = NULL; 1479 1480 *txdl_priv = (void *)(size_t)txdp->host_control; 1481 1482 for (i = 0; i < fifo->config->max_frags; i++) { 1483 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i; 1484 txdp->control_0 = txdp->control_1 = 0; 1485 } 1486 } 1487 1488 return status; 1489} 1490 1491/** 1492 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the 1493 * descriptor. 1494 * @fifo: Handle to the fifo object used for non offload send 1495 * @txdlh: Descriptor handle. 1496 * @frag_idx: Index of the data buffer in the caller's scatter-gather list 1497 * (of buffers). 1498 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. 1499 * @size: Size of the data buffer (in bytes). 1500 * 1501 * This API is part of the preparation of the transmit descriptor for posting 1502 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include 1503 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits(). 1504 * All three APIs fill in the fields of the fifo descriptor, 1505 * in accordance with the Titan specification. 1506 * 1507 */ 1508void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo, 1509 void *txdlh, u32 frag_idx, 1510 dma_addr_t dma_pointer, u32 size) 1511{ 1512 struct __vxge_hw_fifo_txdl_priv *txdl_priv; 1513 struct vxge_hw_fifo_txd *txdp, *txdp_last; 1514 struct __vxge_hw_channel *channel; 1515 1516 channel = &fifo->channel; 1517 1518 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); 1519 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags; 1520 1521 if (frag_idx != 0) 1522 txdp->control_0 = txdp->control_1 = 0; 1523 else { 1524 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( 1525 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST); 1526 txdp->control_1 |= fifo->interrupt_type; 1527 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER( 1528 fifo->tx_intr_num); 1529 if (txdl_priv->frags) { 1530 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + 1531 (txdl_priv->frags - 1); 1532 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE( 1533 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); 1534 } 1535 } 1536 1537 vxge_assert(frag_idx < txdl_priv->alloc_frags); 1538 1539 txdp->buffer_pointer = (u64)dma_pointer; 1540 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size); 1541 fifo->stats->total_buffers++; 1542 txdl_priv->frags++; 1543} 1544 1545/** 1546 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel. 1547 * @fifo: Handle to the fifo object used for non offload send 1548 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve() 1549 * @frags: Number of contiguous buffers that are part of a single 1550 * transmit operation. 1551 * 1552 * Post descriptor on the 'fifo' type channel for transmission. 1553 * Prior to posting the descriptor should be filled in accordance with 1554 * Host/Titan interface specification for a given service (LL, etc.). 1555 * 1556 */ 1557void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh) 1558{ 1559 struct __vxge_hw_fifo_txdl_priv *txdl_priv; 1560 struct vxge_hw_fifo_txd *txdp_last; 1561 struct vxge_hw_fifo_txd *txdp_first; 1562 struct __vxge_hw_channel *channel; 1563 1564 channel = &fifo->channel; 1565 1566 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh); 1567 txdp_first = txdlh; 1568 1569 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1); 1570 txdp_last->control_0 |= 1571 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST); 1572 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER; 1573 1574 vxge_hw_channel_dtr_post(&fifo->channel, txdlh); 1575 1576 __vxge_hw_non_offload_db_post(fifo, 1577 (u64)txdl_priv->dma_addr, 1578 txdl_priv->frags - 1, 1579 fifo->no_snoop_bits); 1580 1581 fifo->stats->total_posts++; 1582 fifo->stats->common_stats.usage_cnt++; 1583 if (fifo->stats->common_stats.usage_max < 1584 fifo->stats->common_stats.usage_cnt) 1585 fifo->stats->common_stats.usage_max = 1586 fifo->stats->common_stats.usage_cnt; 1587} 1588 1589/** 1590 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor. 1591 * @fifo: Handle to the fifo object used for non offload send 1592 * @txdlh: Descriptor handle. Returned by HW. 1593 * @t_code: Transfer code, as per Titan User Guide, 1594 * Transmit Descriptor Format. 1595 * Returned by HW. 1596 * 1597 * Retrieve the _next_ completed descriptor. 1598 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy 1599 * driver of new completed descriptors. After that 1600 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest 1601 * completions (the very first completion is passed by HW via 1602 * vxge_hw_channel_callback_f). 1603 * 1604 * Implementation-wise, the driver is free to call 1605 * vxge_hw_fifo_txdl_next_completed either immediately from inside the 1606 * channel callback, or in a deferred fashion and separate (from HW) 1607 * context. 1608 * 1609 * Non-zero @t_code means failure to process the descriptor. 1610 * The failure could happen, for instance, when the link is 1611 * down, in which case Titan completes the descriptor because it 1612 * is not able to send the data out. 1613 * 1614 * For details please refer to Titan User Guide. 1615 * 1616 * Returns: VXGE_HW_OK - success. 1617 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors 1618 * are currently available for processing. 1619 * 1620 */ 1621enum vxge_hw_status vxge_hw_fifo_txdl_next_completed( 1622 struct __vxge_hw_fifo *fifo, void **txdlh, 1623 enum vxge_hw_fifo_tcode *t_code) 1624{ 1625 struct __vxge_hw_channel *channel; 1626 struct vxge_hw_fifo_txd *txdp; 1627 enum vxge_hw_status status = VXGE_HW_OK; 1628 1629 channel = &fifo->channel; 1630 1631 vxge_hw_channel_dtr_try_complete(channel, txdlh); 1632 1633 txdp = *txdlh; 1634 if (txdp == NULL) { 1635 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1636 goto exit; 1637 } 1638 1639 /* check whether host owns it */ 1640 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) { 1641 1642 vxge_assert(txdp->host_control != 0); 1643 1644 vxge_hw_channel_dtr_complete(channel); 1645 1646 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0); 1647 1648 if (fifo->stats->common_stats.usage_cnt > 0) 1649 fifo->stats->common_stats.usage_cnt--; 1650 1651 status = VXGE_HW_OK; 1652 goto exit; 1653 } 1654 1655 /* no more completions */ 1656 *txdlh = NULL; 1657 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS; 1658exit: 1659 return status; 1660} 1661 1662/** 1663 * vxge_hw_fifo_handle_tcode - Handle transfer code. 1664 * @fifo: Handle to the fifo object used for non offload send 1665 * @txdlh: Descriptor handle. 1666 * @t_code: One of the enumerated (and documented in the Titan user guide) 1667 * "transfer codes". 1668 * 1669 * Handle descriptor's transfer code. The latter comes with each completed 1670 * descriptor. 1671 * 1672 * Returns: one of the enum vxge_hw_status{} enumerated types. 1673 * VXGE_HW_OK - for success. 1674 * VXGE_HW_ERR_CRITICAL - when encounters critical error. 1675 */ 1676enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo, 1677 void *txdlh, 1678 enum vxge_hw_fifo_tcode t_code) 1679{ 1680 struct __vxge_hw_channel *channel; 1681 1682 enum vxge_hw_status status = VXGE_HW_OK; 1683 channel = &fifo->channel; 1684 1685 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) { 1686 status = VXGE_HW_ERR_INVALID_TCODE; 1687 goto exit; 1688 } 1689 1690 fifo->stats->txd_t_code_err_cnt[t_code]++; 1691exit: 1692 return status; 1693} 1694 1695/** 1696 * vxge_hw_fifo_txdl_free - Free descriptor. 1697 * @fifo: Handle to the fifo object used for non offload send 1698 * @txdlh: Descriptor handle. 1699 * 1700 * Free the reserved descriptor. This operation is "symmetrical" to 1701 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's 1702 * lifecycle. 1703 * 1704 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can 1705 * be: 1706 * 1707 * - reserved (vxge_hw_fifo_txdl_reserve); 1708 * 1709 * - posted (vxge_hw_fifo_txdl_post); 1710 * 1711 * - completed (vxge_hw_fifo_txdl_next_completed); 1712 * 1713 * - and recycled again (vxge_hw_fifo_txdl_free). 1714 * 1715 * For alternative state transitions and more details please refer to 1716 * the design doc. 1717 * 1718 */ 1719void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) 1720{ 1721 struct __vxge_hw_fifo_txdl_priv *txdl_priv; 1722 u32 max_frags; 1723 struct __vxge_hw_channel *channel; 1724 1725 channel = &fifo->channel; 1726 1727 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, 1728 (struct vxge_hw_fifo_txd *)txdlh); 1729 1730 max_frags = fifo->config->max_frags; 1731 1732 vxge_hw_channel_dtr_free(channel, txdlh); 1733} 1734 1735/** 1736 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath 1737 * to MAC address table. 1738 * @vp: Vpath handle. 1739 * @macaddr: MAC address to be added for this vpath into the list 1740 * @macaddr_mask: MAC address mask for macaddr 1741 * @duplicate_mode: Duplicate MAC address add mode. Please see 1742 * enum vxge_hw_vpath_mac_addr_add_mode{} 1743 * 1744 * Adds the given mac address and mac address mask into the list for this 1745 * vpath. 1746 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and 1747 * vxge_hw_vpath_mac_addr_get_next 1748 * 1749 */ 1750enum vxge_hw_status 1751vxge_hw_vpath_mac_addr_add( 1752 struct __vxge_hw_vpath_handle *vp, 1753 u8 (macaddr)[ETH_ALEN], 1754 u8 (macaddr_mask)[ETH_ALEN], 1755 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode) 1756{ 1757 u32 i; 1758 u64 data1 = 0ULL; 1759 u64 data2 = 0ULL; 1760 enum vxge_hw_status status = VXGE_HW_OK; 1761 1762 if (vp == NULL) { 1763 status = VXGE_HW_ERR_INVALID_HANDLE; 1764 goto exit; 1765 } 1766 1767 for (i = 0; i < ETH_ALEN; i++) { 1768 data1 <<= 8; 1769 data1 |= (u8)macaddr[i]; 1770 1771 data2 <<= 8; 1772 data2 |= (u8)macaddr_mask[i]; 1773 } 1774 1775 switch (duplicate_mode) { 1776 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE: 1777 i = 0; 1778 break; 1779 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE: 1780 i = 1; 1781 break; 1782 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE: 1783 i = 2; 1784 break; 1785 default: 1786 i = 0; 1787 break; 1788 } 1789 1790 status = __vxge_hw_vpath_rts_table_set(vp, 1791 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, 1792 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 1793 0, 1794 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), 1795 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)| 1796 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i)); 1797exit: 1798 return status; 1799} 1800 1801/** 1802 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath 1803 * from MAC address table. 1804 * @vp: Vpath handle. 1805 * @macaddr: First MAC address entry for this vpath in the list 1806 * @macaddr_mask: MAC address mask for macaddr 1807 * 1808 * Returns the first mac address and mac address mask in the list for this 1809 * vpath. 1810 * see also: vxge_hw_vpath_mac_addr_get_next 1811 * 1812 */ 1813enum vxge_hw_status 1814vxge_hw_vpath_mac_addr_get( 1815 struct __vxge_hw_vpath_handle *vp, 1816 u8 (macaddr)[ETH_ALEN], 1817 u8 (macaddr_mask)[ETH_ALEN]) 1818{ 1819 u32 i; 1820 u64 data1 = 0ULL; 1821 u64 data2 = 0ULL; 1822 enum vxge_hw_status status = VXGE_HW_OK; 1823 1824 if (vp == NULL) { 1825 status = VXGE_HW_ERR_INVALID_HANDLE; 1826 goto exit; 1827 } 1828 1829 status = __vxge_hw_vpath_rts_table_get(vp, 1830 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY, 1831 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 1832 0, &data1, &data2); 1833 1834 if (status != VXGE_HW_OK) 1835 goto exit; 1836 1837 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); 1838 1839 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); 1840 1841 for (i = ETH_ALEN; i > 0; i--) { 1842 macaddr[i-1] = (u8)(data1 & 0xFF); 1843 data1 >>= 8; 1844 1845 macaddr_mask[i-1] = (u8)(data2 & 0xFF); 1846 data2 >>= 8; 1847 } 1848exit: 1849 return status; 1850} 1851 1852/** 1853 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this 1854 * vpath 1855 * from MAC address table. 1856 * @vp: Vpath handle. 1857 * @macaddr: Next MAC address entry for this vpath in the list 1858 * @macaddr_mask: MAC address mask for macaddr 1859 * 1860 * Returns the next mac address and mac address mask in the list for this 1861 * vpath. 1862 * see also: vxge_hw_vpath_mac_addr_get 1863 * 1864 */ 1865enum vxge_hw_status 1866vxge_hw_vpath_mac_addr_get_next( 1867 struct __vxge_hw_vpath_handle *vp, 1868 u8 (macaddr)[ETH_ALEN], 1869 u8 (macaddr_mask)[ETH_ALEN]) 1870{ 1871 u32 i; 1872 u64 data1 = 0ULL; 1873 u64 data2 = 0ULL; 1874 enum vxge_hw_status status = VXGE_HW_OK; 1875 1876 if (vp == NULL) { 1877 status = VXGE_HW_ERR_INVALID_HANDLE; 1878 goto exit; 1879 } 1880 1881 status = __vxge_hw_vpath_rts_table_get(vp, 1882 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY, 1883 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 1884 0, &data1, &data2); 1885 1886 if (status != VXGE_HW_OK) 1887 goto exit; 1888 1889 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1); 1890 1891 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); 1892 1893 for (i = ETH_ALEN; i > 0; i--) { 1894 macaddr[i-1] = (u8)(data1 & 0xFF); 1895 data1 >>= 8; 1896 1897 macaddr_mask[i-1] = (u8)(data2 & 0xFF); 1898 data2 >>= 8; 1899 } 1900 1901exit: 1902 return status; 1903} 1904 1905/** 1906 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath 1907 * to MAC address table. 1908 * @vp: Vpath handle. 1909 * @macaddr: MAC address to be added for this vpath into the list 1910 * @macaddr_mask: MAC address mask for macaddr 1911 * 1912 * Delete the given mac address and mac address mask into the list for this 1913 * vpath. 1914 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and 1915 * vxge_hw_vpath_mac_addr_get_next 1916 * 1917 */ 1918enum vxge_hw_status 1919vxge_hw_vpath_mac_addr_delete( 1920 struct __vxge_hw_vpath_handle *vp, 1921 u8 (macaddr)[ETH_ALEN], 1922 u8 (macaddr_mask)[ETH_ALEN]) 1923{ 1924 u32 i; 1925 u64 data1 = 0ULL; 1926 u64 data2 = 0ULL; 1927 enum vxge_hw_status status = VXGE_HW_OK; 1928 1929 if (vp == NULL) { 1930 status = VXGE_HW_ERR_INVALID_HANDLE; 1931 goto exit; 1932 } 1933 1934 for (i = 0; i < ETH_ALEN; i++) { 1935 data1 <<= 8; 1936 data1 |= (u8)macaddr[i]; 1937 1938 data2 <<= 8; 1939 data2 |= (u8)macaddr_mask[i]; 1940 } 1941 1942 status = __vxge_hw_vpath_rts_table_set(vp, 1943 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, 1944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA, 1945 0, 1946 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1), 1947 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)); 1948exit: 1949 return status; 1950} 1951 1952/** 1953 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath 1954 * to vlan id table. 1955 * @vp: Vpath handle. 1956 * @vid: vlan id to be added for this vpath into the list 1957 * 1958 * Adds the given vlan id into the list for this vpath. 1959 * see also: vxge_hw_vpath_vid_delete 1960 * 1961 */ 1962enum vxge_hw_status 1963vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid) 1964{ 1965 enum vxge_hw_status status = VXGE_HW_OK; 1966 1967 if (vp == NULL) { 1968 status = VXGE_HW_ERR_INVALID_HANDLE; 1969 goto exit; 1970 } 1971 1972 status = __vxge_hw_vpath_rts_table_set(vp, 1973 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY, 1974 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, 1975 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); 1976exit: 1977 return status; 1978} 1979 1980/** 1981 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath 1982 * to vlan id table. 1983 * @vp: Vpath handle. 1984 * @vid: vlan id to be added for this vpath into the list 1985 * 1986 * Adds the given vlan id into the list for this vpath. 1987 * see also: vxge_hw_vpath_vid_add 1988 * 1989 */ 1990enum vxge_hw_status 1991vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid) 1992{ 1993 enum vxge_hw_status status = VXGE_HW_OK; 1994 1995 if (vp == NULL) { 1996 status = VXGE_HW_ERR_INVALID_HANDLE; 1997 goto exit; 1998 } 1999 2000 status = __vxge_hw_vpath_rts_table_set(vp, 2001 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY, 2002 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID, 2003 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0); 2004exit: 2005 return status; 2006} 2007 2008/** 2009 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode. 2010 * @vp: Vpath handle. 2011 * 2012 * Enable promiscuous mode of Titan-e operation. 2013 * 2014 * See also: vxge_hw_vpath_promisc_disable(). 2015 */ 2016enum vxge_hw_status vxge_hw_vpath_promisc_enable( 2017 struct __vxge_hw_vpath_handle *vp) 2018{ 2019 u64 val64; 2020 struct __vxge_hw_virtualpath *vpath; 2021 enum vxge_hw_status status = VXGE_HW_OK; 2022 2023 if ((vp == NULL) || (vp->vpath->ringh == NULL)) { 2024 status = VXGE_HW_ERR_INVALID_HANDLE; 2025 goto exit; 2026 } 2027 2028 vpath = vp->vpath; 2029 2030 /* Enable promiscuous mode for function 0 only */ 2031 if (!(vpath->hldev->access_rights & 2032 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) 2033 return VXGE_HW_OK; 2034 2035 val64 = readq(&vpath->vp_reg->rxmac_vcfg0); 2036 2037 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) { 2038 2039 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | 2040 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | 2041 VXGE_HW_RXMAC_VCFG0_BCAST_EN | 2042 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN; 2043 2044 writeq(val64, &vpath->vp_reg->rxmac_vcfg0); 2045 } 2046exit: 2047 return status; 2048} 2049 2050/** 2051 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode. 2052 * @vp: Vpath handle. 2053 * 2054 * Disable promiscuous mode of Titan-e operation. 2055 * 2056 * See also: vxge_hw_vpath_promisc_enable(). 2057 */ 2058enum vxge_hw_status vxge_hw_vpath_promisc_disable( 2059 struct __vxge_hw_vpath_handle *vp) 2060{ 2061 u64 val64; 2062 struct __vxge_hw_virtualpath *vpath; 2063 enum vxge_hw_status status = VXGE_HW_OK; 2064 2065 if ((vp == NULL) || (vp->vpath->ringh == NULL)) { 2066 status = VXGE_HW_ERR_INVALID_HANDLE; 2067 goto exit; 2068 } 2069 2070 vpath = vp->vpath; 2071 2072 val64 = readq(&vpath->vp_reg->rxmac_vcfg0); 2073 2074 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) { 2075 2076 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN | 2077 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN | 2078 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN); 2079 2080 writeq(val64, &vpath->vp_reg->rxmac_vcfg0); 2081 } 2082exit: 2083 return status; 2084} 2085 2086/* 2087 * vxge_hw_vpath_bcast_enable - Enable broadcast 2088 * @vp: Vpath handle. 2089 * 2090 * Enable receiving broadcasts. 2091 */ 2092enum vxge_hw_status vxge_hw_vpath_bcast_enable( 2093 struct __vxge_hw_vpath_handle *vp) 2094{ 2095 u64 val64; 2096 struct __vxge_hw_virtualpath *vpath; 2097 enum vxge_hw_status status = VXGE_HW_OK; 2098 2099 if ((vp == NULL) || (vp->vpath->ringh == NULL)) { 2100 status = VXGE_HW_ERR_INVALID_HANDLE; 2101 goto exit; 2102 } 2103 2104 vpath = vp->vpath; 2105 2106 val64 = readq(&vpath->vp_reg->rxmac_vcfg0); 2107 2108 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) { 2109 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN; 2110 writeq(val64, &vpath->vp_reg->rxmac_vcfg0); 2111 } 2112exit: 2113 return status; 2114} 2115 2116/** 2117 * vxge_hw_vpath_mcast_enable - Enable multicast addresses. 2118 * @vp: Vpath handle. 2119 * 2120 * Enable Titan-e multicast addresses. 2121 * Returns: VXGE_HW_OK on success. 2122 * 2123 */ 2124enum vxge_hw_status vxge_hw_vpath_mcast_enable( 2125 struct __vxge_hw_vpath_handle *vp) 2126{ 2127 u64 val64; 2128 struct __vxge_hw_virtualpath *vpath; 2129 enum vxge_hw_status status = VXGE_HW_OK; 2130 2131 if ((vp == NULL) || (vp->vpath->ringh == NULL)) { 2132 status = VXGE_HW_ERR_INVALID_HANDLE; 2133 goto exit; 2134 } 2135 2136 vpath = vp->vpath; 2137 2138 val64 = readq(&vpath->vp_reg->rxmac_vcfg0); 2139 2140 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) { 2141 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; 2142 writeq(val64, &vpath->vp_reg->rxmac_vcfg0); 2143 } 2144exit: 2145 return status; 2146} 2147 2148/** 2149 * vxge_hw_vpath_mcast_disable - Disable multicast addresses. 2150 * @vp: Vpath handle. 2151 * 2152 * Disable Titan-e multicast addresses. 2153 * Returns: VXGE_HW_OK - success. 2154 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle 2155 * 2156 */ 2157enum vxge_hw_status 2158vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp) 2159{ 2160 u64 val64; 2161 struct __vxge_hw_virtualpath *vpath; 2162 enum vxge_hw_status status = VXGE_HW_OK; 2163 2164 if ((vp == NULL) || (vp->vpath->ringh == NULL)) { 2165 status = VXGE_HW_ERR_INVALID_HANDLE; 2166 goto exit; 2167 } 2168 2169 vpath = vp->vpath; 2170 2171 val64 = readq(&vpath->vp_reg->rxmac_vcfg0); 2172 2173 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) { 2174 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN; 2175 writeq(val64, &vpath->vp_reg->rxmac_vcfg0); 2176 } 2177exit: 2178 return status; 2179} 2180 2181/* 2182 * vxge_hw_vpath_alarm_process - Process Alarms. 2183 * @vpath: Virtual Path. 2184 * @skip_alarms: Do not clear the alarms 2185 * 2186 * Process vpath alarms. 2187 * 2188 */ 2189enum vxge_hw_status vxge_hw_vpath_alarm_process( 2190 struct __vxge_hw_vpath_handle *vp, 2191 u32 skip_alarms) 2192{ 2193 enum vxge_hw_status status = VXGE_HW_OK; 2194 2195 if (vp == NULL) { 2196 status = VXGE_HW_ERR_INVALID_HANDLE; 2197 goto exit; 2198 } 2199 2200 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms); 2201exit: 2202 return status; 2203} 2204 2205/** 2206 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and 2207 * alrms 2208 * @vp: Virtual Path handle. 2209 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of 2210 * interrupts(Can be repeated). If fifo or ring are not enabled 2211 * the MSIX vector for that should be set to 0 2212 * @alarm_msix_id: MSIX vector for alarm. 2213 * 2214 * This API will associate a given MSIX vector numbers with the four TIM 2215 * interrupts and alarm interrupt. 2216 */ 2217void 2218vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, 2219 int alarm_msix_id) 2220{ 2221 u64 val64; 2222 struct __vxge_hw_virtualpath *vpath = vp->vpath; 2223 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; 2224 u32 vp_id = vp->vpath->vp_id; 2225 2226 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( 2227 (vp_id * 4) + tim_msix_id[0]) | 2228 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( 2229 (vp_id * 4) + tim_msix_id[1]); 2230 2231 writeq(val64, &vp_reg->interrupt_cfg0); 2232 2233 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( 2234 (vpath->hldev->first_vp_id * 4) + alarm_msix_id), 2235 &vp_reg->interrupt_cfg2); 2236 2237 if (vpath->hldev->config.intr_mode == 2238 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2239 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2240 VXGE_HW_ONE_SHOT_VECT0_EN_ONE_SHOT_VECT0_EN, 2241 0, 32), &vp_reg->one_shot_vect0_en); 2242 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2243 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN, 2244 0, 32), &vp_reg->one_shot_vect1_en); 2245 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn( 2246 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN, 2247 0, 32), &vp_reg->one_shot_vect2_en); 2248 } 2249} 2250 2251/** 2252 * vxge_hw_vpath_msix_mask - Mask MSIX Vector. 2253 * @vp: Virtual Path handle. 2254 * @msix_id: MSIX ID 2255 * 2256 * The function masks the msix interrupt for the given msix_id 2257 * 2258 * Returns: 0, 2259 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range 2260 * status. 2261 * See also: 2262 */ 2263void 2264vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id) 2265{ 2266 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2267 __vxge_hw_pio_mem_write32_upper( 2268 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 2269 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); 2270} 2271 2272/** 2273 * vxge_hw_vpath_msix_clear - Clear MSIX Vector. 2274 * @vp: Virtual Path handle. 2275 * @msix_id: MSI ID 2276 * 2277 * The function clears the msix interrupt for the given msix_id 2278 * 2279 * Returns: 0, 2280 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range 2281 * status. 2282 * See also: 2283 */ 2284void vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id) 2285{ 2286 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2287 2288 if ((hldev->config.intr_mode == VXGE_HW_INTR_MODE_MSIX_ONE_SHOT)) 2289 __vxge_hw_pio_mem_write32_upper( 2290 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), 2291 &hldev->common_reg->clr_msix_one_shot_vec[msix_id % 4]); 2292 else 2293 __vxge_hw_pio_mem_write32_upper( 2294 (u32) vxge_bVALn(vxge_mBIT((msix_id >> 2)), 0, 32), 2295 &hldev->common_reg->clear_msix_mask_vect[msix_id % 4]); 2296} 2297 2298/** 2299 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector. 2300 * @vp: Virtual Path handle. 2301 * @msix_id: MSI ID 2302 * 2303 * The function unmasks the msix interrupt for the given msix_id 2304 * 2305 * Returns: 0, 2306 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range 2307 * status. 2308 * See also: 2309 */ 2310void 2311vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id) 2312{ 2313 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2314 __vxge_hw_pio_mem_write32_upper( 2315 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32), 2316 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); 2317} 2318 2319/** 2320 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts. 2321 * @vp: Virtual Path handle. 2322 * 2323 * Mask Tx and Rx vpath interrupts. 2324 * 2325 * See also: vxge_hw_vpath_inta_mask_tx_rx() 2326 */ 2327void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp) 2328{ 2329 u64 tim_int_mask0[4] = {[0 ...3] = 0}; 2330 u32 tim_int_mask1[4] = {[0 ...3] = 0}; 2331 u64 val64; 2332 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2333 2334 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, 2335 tim_int_mask1, vp->vpath->vp_id); 2336 2337 val64 = readq(&hldev->common_reg->tim_int_mask0); 2338 2339 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || 2340 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { 2341 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | 2342 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64), 2343 &hldev->common_reg->tim_int_mask0); 2344 } 2345 2346 val64 = readl(&hldev->common_reg->tim_int_mask1); 2347 2348 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || 2349 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { 2350 __vxge_hw_pio_mem_write32_upper( 2351 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | 2352 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), 2353 &hldev->common_reg->tim_int_mask1); 2354 } 2355} 2356 2357/** 2358 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts. 2359 * @vp: Virtual Path handle. 2360 * 2361 * Unmask Tx and Rx vpath interrupts. 2362 * 2363 * See also: vxge_hw_vpath_inta_mask_tx_rx() 2364 */ 2365void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp) 2366{ 2367 u64 tim_int_mask0[4] = {[0 ...3] = 0}; 2368 u32 tim_int_mask1[4] = {[0 ...3] = 0}; 2369 u64 val64; 2370 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2371 2372 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0, 2373 tim_int_mask1, vp->vpath->vp_id); 2374 2375 val64 = readq(&hldev->common_reg->tim_int_mask0); 2376 2377 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) || 2378 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) { 2379 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] | 2380 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64, 2381 &hldev->common_reg->tim_int_mask0); 2382 } 2383 2384 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) || 2385 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) { 2386 __vxge_hw_pio_mem_write32_upper( 2387 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] | 2388 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, 2389 &hldev->common_reg->tim_int_mask1); 2390 } 2391} 2392 2393/** 2394 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed 2395 * descriptors and process the same. 2396 * @ring: Handle to the ring object used for receive 2397 * 2398 * The function polls the Rx for the completed descriptors and calls 2399 * the driver via supplied completion callback. 2400 * 2401 * Returns: VXGE_HW_OK, if the polling is completed successful. 2402 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed 2403 * descriptors available which are yet to be processed. 2404 * 2405 * See also: vxge_hw_vpath_poll_rx() 2406 */ 2407enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring) 2408{ 2409 u8 t_code; 2410 enum vxge_hw_status status = VXGE_HW_OK; 2411 void *first_rxdh; 2412 u64 val64 = 0; 2413 int new_count = 0; 2414 2415 ring->cmpl_cnt = 0; 2416 2417 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code); 2418 if (status == VXGE_HW_OK) 2419 ring->callback(ring, first_rxdh, 2420 t_code, ring->channel.userdata); 2421 2422 if (ring->cmpl_cnt != 0) { 2423 ring->doorbell_cnt += ring->cmpl_cnt; 2424 if (ring->doorbell_cnt >= ring->rxds_limit) { 2425 /* 2426 * Each RxD is of 4 qwords, update the number of 2427 * qwords replenished 2428 */ 2429 new_count = (ring->doorbell_cnt * 4); 2430 2431 /* For each block add 4 more qwords */ 2432 ring->total_db_cnt += ring->doorbell_cnt; 2433 if (ring->total_db_cnt >= ring->rxds_per_block) { 2434 new_count += 4; 2435 /* Reset total count */ 2436 ring->total_db_cnt %= ring->rxds_per_block; 2437 } 2438 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count), 2439 &ring->vp_reg->prc_rxd_doorbell); 2440 val64 = 2441 readl(&ring->common_reg->titan_general_int_status); 2442 ring->doorbell_cnt = 0; 2443 } 2444 } 2445 2446 return status; 2447} 2448 2449/** 2450 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process 2451 * the same. 2452 * @fifo: Handle to the fifo object used for non offload send 2453 * 2454 * The function polls the Tx for the completed descriptors and calls 2455 * the driver via supplied completion callback. 2456 * 2457 * Returns: VXGE_HW_OK, if the polling is completed successful. 2458 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed 2459 * descriptors available which are yet to be processed. 2460 */ 2461enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo, 2462 struct sk_buff ***skb_ptr, int nr_skb, 2463 int *more) 2464{ 2465 enum vxge_hw_fifo_tcode t_code; 2466 void *first_txdlh; 2467 enum vxge_hw_status status = VXGE_HW_OK; 2468 struct __vxge_hw_channel *channel; 2469 2470 channel = &fifo->channel; 2471 2472 status = vxge_hw_fifo_txdl_next_completed(fifo, 2473 &first_txdlh, &t_code); 2474 if (status == VXGE_HW_OK) 2475 if (fifo->callback(fifo, first_txdlh, t_code, 2476 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK) 2477 status = VXGE_HW_COMPLETIONS_REMAIN; 2478 2479 return status; 2480} 2481