1/**************************************************************************** 2 * Driver for Solarflare network controllers and boards 3 * Copyright 2005-2006 Fen Systems Ltd. 4 * Copyright 2006-2013 Solarflare Communications Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation, incorporated herein by reference. 9 */ 10 11#include <linux/bitops.h> 12#include <linux/delay.h> 13#include <linux/interrupt.h> 14#include <linux/pci.h> 15#include <linux/module.h> 16#include <linux/seq_file.h> 17#include <linux/crc32.h> 18#include "net_driver.h" 19#include "bitfield.h" 20#include "efx.h" 21#include "nic.h" 22#include "farch_regs.h" 23#include "sriov.h" 24#include "siena_sriov.h" 25#include "io.h" 26#include "workarounds.h" 27 28/* Falcon-architecture (SFC4000 and SFC9000-family) support */ 29 30/************************************************************************** 31 * 32 * Configurable values 33 * 34 ************************************************************************** 35 */ 36 37/* This is set to 16 for a good reason. In summary, if larger than 38 * 16, the descriptor cache holds more than a default socket 39 * buffer's worth of packets (for UDP we can only have at most one 40 * socket buffer's worth outstanding). This combined with the fact 41 * that we only get 1 TX event per descriptor cache means the NIC 42 * goes idle. 43 */ 44#define TX_DC_ENTRIES 16 45#define TX_DC_ENTRIES_ORDER 1 46 47#define RX_DC_ENTRIES 64 48#define RX_DC_ENTRIES_ORDER 3 49 50/* If EFX_MAX_INT_ERRORS internal errors occur within 51 * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 52 * disable it. 53 */ 54#define EFX_INT_ERROR_EXPIRE 3600 55#define EFX_MAX_INT_ERRORS 5 56 57/* Depth of RX flush request fifo */ 58#define EFX_RX_FLUSH_COUNT 4 59 60/* Driver generated events */ 61#define _EFX_CHANNEL_MAGIC_TEST 0x000101 62#define _EFX_CHANNEL_MAGIC_FILL 0x000102 63#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103 64#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104 65 66#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data)) 67#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8) 68 69#define EFX_CHANNEL_MAGIC_TEST(_channel) \ 70 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel) 71#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \ 72 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \ 73 efx_rx_queue_index(_rx_queue)) 74#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \ 75 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \ 76 efx_rx_queue_index(_rx_queue)) 77#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \ 78 _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \ 79 (_tx_queue)->queue) 80 81static void efx_farch_magic_event(struct efx_channel *channel, u32 magic); 82 83/************************************************************************** 84 * 85 * Hardware access 86 * 87 **************************************************************************/ 88 89static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, 90 unsigned int index) 91{ 92 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, 93 value, index); 94} 95 96static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, 97 const efx_oword_t *mask) 98{ 99 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || 100 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); 101} 102 103int efx_farch_test_registers(struct efx_nic *efx, 104 const struct efx_farch_register_test *regs, 105 size_t n_regs) 106{ 107 unsigned address = 0, i, j; 108 efx_oword_t mask, imask, original, reg, buf; 109 110 for (i = 0; i < n_regs; ++i) { 111 address = regs[i].address; 112 mask = imask = regs[i].mask; 113 EFX_INVERT_OWORD(imask); 114 115 efx_reado(efx, &original, address); 116 117 /* bit sweep on and off */ 118 for (j = 0; j < 128; j++) { 119 if (!EFX_EXTRACT_OWORD32(mask, j, j)) 120 continue; 121 122 /* Test this testable bit can be set in isolation */ 123 EFX_AND_OWORD(reg, original, mask); 124 EFX_SET_OWORD32(reg, j, j, 1); 125 126 efx_writeo(efx, ®, address); 127 efx_reado(efx, &buf, address); 128 129 if (efx_masked_compare_oword(®, &buf, &mask)) 130 goto fail; 131 132 /* Test this testable bit can be cleared in isolation */ 133 EFX_OR_OWORD(reg, original, mask); 134 EFX_SET_OWORD32(reg, j, j, 0); 135 136 efx_writeo(efx, ®, address); 137 efx_reado(efx, &buf, address); 138 139 if (efx_masked_compare_oword(®, &buf, &mask)) 140 goto fail; 141 } 142 143 efx_writeo(efx, &original, address); 144 } 145 146 return 0; 147 148fail: 149 netif_err(efx, hw, efx->net_dev, 150 "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT 151 " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), 152 EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); 153 return -EIO; 154} 155 156/************************************************************************** 157 * 158 * Special buffer handling 159 * Special buffers are used for event queues and the TX and RX 160 * descriptor rings. 161 * 162 *************************************************************************/ 163 164/* 165 * Initialise a special buffer 166 * 167 * This will define a buffer (previously allocated via 168 * efx_alloc_special_buffer()) in the buffer table, allowing 169 * it to be used for event queues, descriptor rings etc. 170 */ 171static void 172efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 173{ 174 efx_qword_t buf_desc; 175 unsigned int index; 176 dma_addr_t dma_addr; 177 int i; 178 179 EFX_BUG_ON_PARANOID(!buffer->buf.addr); 180 181 /* Write buffer descriptors to NIC */ 182 for (i = 0; i < buffer->entries; i++) { 183 index = buffer->index + i; 184 dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE); 185 netif_dbg(efx, probe, efx->net_dev, 186 "mapping special buffer %d at %llx\n", 187 index, (unsigned long long)dma_addr); 188 EFX_POPULATE_QWORD_3(buf_desc, 189 FRF_AZ_BUF_ADR_REGION, 0, 190 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, 191 FRF_AZ_BUF_OWNER_ID_FBUF, 0); 192 efx_write_buf_tbl(efx, &buf_desc, index); 193 } 194} 195 196/* Unmaps a buffer and clears the buffer table entries */ 197static void 198efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 199{ 200 efx_oword_t buf_tbl_upd; 201 unsigned int start = buffer->index; 202 unsigned int end = (buffer->index + buffer->entries - 1); 203 204 if (!buffer->entries) 205 return; 206 207 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", 208 buffer->index, buffer->index + buffer->entries - 1); 209 210 EFX_POPULATE_OWORD_4(buf_tbl_upd, 211 FRF_AZ_BUF_UPD_CMD, 0, 212 FRF_AZ_BUF_CLR_CMD, 1, 213 FRF_AZ_BUF_CLR_END_ID, end, 214 FRF_AZ_BUF_CLR_START_ID, start); 215 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); 216} 217 218/* 219 * Allocate a new special buffer 220 * 221 * This allocates memory for a new buffer, clears it and allocates a 222 * new buffer ID range. It does not write into the buffer table. 223 * 224 * This call will allocate 4KB buffers, since 8KB buffers can't be 225 * used for event queues and descriptor rings. 226 */ 227static int efx_alloc_special_buffer(struct efx_nic *efx, 228 struct efx_special_buffer *buffer, 229 unsigned int len) 230{ 231#ifdef CONFIG_SFC_SRIOV 232 struct siena_nic_data *nic_data = efx->nic_data; 233#endif 234 len = ALIGN(len, EFX_BUF_SIZE); 235 236 if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL)) 237 return -ENOMEM; 238 buffer->entries = len / EFX_BUF_SIZE; 239 BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1)); 240 241 /* Select new buffer ID */ 242 buffer->index = efx->next_buffer_table; 243 efx->next_buffer_table += buffer->entries; 244#ifdef CONFIG_SFC_SRIOV 245 BUG_ON(efx_siena_sriov_enabled(efx) && 246 nic_data->vf_buftbl_base < efx->next_buffer_table); 247#endif 248 249 netif_dbg(efx, probe, efx->net_dev, 250 "allocating special buffers %d-%d at %llx+%x " 251 "(virt %p phys %llx)\n", buffer->index, 252 buffer->index + buffer->entries - 1, 253 (u64)buffer->buf.dma_addr, len, 254 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 255 256 return 0; 257} 258 259static void 260efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) 261{ 262 if (!buffer->buf.addr) 263 return; 264 265 netif_dbg(efx, hw, efx->net_dev, 266 "deallocating special buffers %d-%d at %llx+%x " 267 "(virt %p phys %llx)\n", buffer->index, 268 buffer->index + buffer->entries - 1, 269 (u64)buffer->buf.dma_addr, buffer->buf.len, 270 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr)); 271 272 efx_nic_free_buffer(efx, &buffer->buf); 273 buffer->entries = 0; 274} 275 276/************************************************************************** 277 * 278 * TX path 279 * 280 **************************************************************************/ 281 282/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ 283static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue) 284{ 285 unsigned write_ptr; 286 efx_dword_t reg; 287 288 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 289 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); 290 efx_writed_page(tx_queue->efx, ®, 291 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 292} 293 294/* Write pointer and first descriptor for TX descriptor ring */ 295static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue, 296 const efx_qword_t *txd) 297{ 298 unsigned write_ptr; 299 efx_oword_t reg; 300 301 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); 302 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); 303 304 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 305 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, 306 FRF_AZ_TX_DESC_WPTR, write_ptr); 307 reg.qword[0] = *txd; 308 efx_writeo_page(tx_queue->efx, ®, 309 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); 310} 311 312 313/* For each entry inserted into the software descriptor ring, create a 314 * descriptor in the hardware TX descriptor ring (in host memory), and 315 * write a doorbell. 316 */ 317void efx_farch_tx_write(struct efx_tx_queue *tx_queue) 318{ 319 struct efx_tx_buffer *buffer; 320 efx_qword_t *txd; 321 unsigned write_ptr; 322 unsigned old_write_count = tx_queue->write_count; 323 324 tx_queue->xmit_more_available = false; 325 if (unlikely(tx_queue->write_count == tx_queue->insert_count)) 326 return; 327 328 do { 329 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; 330 buffer = &tx_queue->buffer[write_ptr]; 331 txd = efx_tx_desc(tx_queue, write_ptr); 332 ++tx_queue->write_count; 333 334 EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION); 335 336 /* Create TX descriptor ring entry */ 337 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1); 338 EFX_POPULATE_QWORD_4(*txd, 339 FSF_AZ_TX_KER_CONT, 340 buffer->flags & EFX_TX_BUF_CONT, 341 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, 342 FSF_AZ_TX_KER_BUF_REGION, 0, 343 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); 344 } while (tx_queue->write_count != tx_queue->insert_count); 345 346 wmb(); /* Ensure descriptors are written before they are fetched */ 347 348 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) { 349 txd = efx_tx_desc(tx_queue, 350 old_write_count & tx_queue->ptr_mask); 351 efx_farch_push_tx_desc(tx_queue, txd); 352 ++tx_queue->pushes; 353 } else { 354 efx_farch_notify_tx_desc(tx_queue); 355 } 356} 357 358/* Allocate hardware resources for a TX queue */ 359int efx_farch_tx_probe(struct efx_tx_queue *tx_queue) 360{ 361 struct efx_nic *efx = tx_queue->efx; 362 unsigned entries; 363 364 entries = tx_queue->ptr_mask + 1; 365 return efx_alloc_special_buffer(efx, &tx_queue->txd, 366 entries * sizeof(efx_qword_t)); 367} 368 369void efx_farch_tx_init(struct efx_tx_queue *tx_queue) 370{ 371 struct efx_nic *efx = tx_queue->efx; 372 efx_oword_t reg; 373 374 /* Pin TX descriptor ring */ 375 efx_init_special_buffer(efx, &tx_queue->txd); 376 377 /* Push TX descriptor ring to card */ 378 EFX_POPULATE_OWORD_10(reg, 379 FRF_AZ_TX_DESCQ_EN, 1, 380 FRF_AZ_TX_ISCSI_DDIG_EN, 0, 381 FRF_AZ_TX_ISCSI_HDIG_EN, 0, 382 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 383 FRF_AZ_TX_DESCQ_EVQ_ID, 384 tx_queue->channel->channel, 385 FRF_AZ_TX_DESCQ_OWNER_ID, 0, 386 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, 387 FRF_AZ_TX_DESCQ_SIZE, 388 __ffs(tx_queue->txd.entries), 389 FRF_AZ_TX_DESCQ_TYPE, 0, 390 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 391 392 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 393 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; 394 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 395 EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, 396 !csum); 397 } 398 399 efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, 400 tx_queue->queue); 401 402 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { 403 /* Only 128 bits in this register */ 404 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); 405 406 efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); 407 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) 408 __clear_bit_le(tx_queue->queue, ®); 409 else 410 __set_bit_le(tx_queue->queue, ®); 411 efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); 412 } 413 414 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 415 EFX_POPULATE_OWORD_1(reg, 416 FRF_BZ_TX_PACE, 417 (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? 418 FFE_BZ_TX_PACE_OFF : 419 FFE_BZ_TX_PACE_RESERVED); 420 efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, 421 tx_queue->queue); 422 } 423} 424 425static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue) 426{ 427 struct efx_nic *efx = tx_queue->efx; 428 efx_oword_t tx_flush_descq; 429 430 WARN_ON(atomic_read(&tx_queue->flush_outstanding)); 431 atomic_set(&tx_queue->flush_outstanding, 1); 432 433 EFX_POPULATE_OWORD_2(tx_flush_descq, 434 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, 435 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); 436 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); 437} 438 439void efx_farch_tx_fini(struct efx_tx_queue *tx_queue) 440{ 441 struct efx_nic *efx = tx_queue->efx; 442 efx_oword_t tx_desc_ptr; 443 444 /* Remove TX descriptor ring from card */ 445 EFX_ZERO_OWORD(tx_desc_ptr); 446 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 447 tx_queue->queue); 448 449 /* Unpin TX descriptor ring */ 450 efx_fini_special_buffer(efx, &tx_queue->txd); 451} 452 453/* Free buffers backing TX queue */ 454void efx_farch_tx_remove(struct efx_tx_queue *tx_queue) 455{ 456 efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); 457} 458 459/************************************************************************** 460 * 461 * RX path 462 * 463 **************************************************************************/ 464 465/* This creates an entry in the RX descriptor queue */ 466static inline void 467efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) 468{ 469 struct efx_rx_buffer *rx_buf; 470 efx_qword_t *rxd; 471 472 rxd = efx_rx_desc(rx_queue, index); 473 rx_buf = efx_rx_buffer(rx_queue, index); 474 EFX_POPULATE_QWORD_3(*rxd, 475 FSF_AZ_RX_KER_BUF_SIZE, 476 rx_buf->len - 477 rx_queue->efx->type->rx_buffer_padding, 478 FSF_AZ_RX_KER_BUF_REGION, 0, 479 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); 480} 481 482/* This writes to the RX_DESC_WPTR register for the specified receive 483 * descriptor ring. 484 */ 485void efx_farch_rx_write(struct efx_rx_queue *rx_queue) 486{ 487 struct efx_nic *efx = rx_queue->efx; 488 efx_dword_t reg; 489 unsigned write_ptr; 490 491 while (rx_queue->notified_count != rx_queue->added_count) { 492 efx_farch_build_rx_desc( 493 rx_queue, 494 rx_queue->notified_count & rx_queue->ptr_mask); 495 ++rx_queue->notified_count; 496 } 497 498 wmb(); 499 write_ptr = rx_queue->added_count & rx_queue->ptr_mask; 500 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); 501 efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, 502 efx_rx_queue_index(rx_queue)); 503} 504 505int efx_farch_rx_probe(struct efx_rx_queue *rx_queue) 506{ 507 struct efx_nic *efx = rx_queue->efx; 508 unsigned entries; 509 510 entries = rx_queue->ptr_mask + 1; 511 return efx_alloc_special_buffer(efx, &rx_queue->rxd, 512 entries * sizeof(efx_qword_t)); 513} 514 515void efx_farch_rx_init(struct efx_rx_queue *rx_queue) 516{ 517 efx_oword_t rx_desc_ptr; 518 struct efx_nic *efx = rx_queue->efx; 519 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 520 bool iscsi_digest_en = is_b0; 521 bool jumbo_en; 522 523 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables 524 * DMA to continue after a PCIe page boundary (and scattering 525 * is not possible). In Falcon B0 and Siena, it enables 526 * scatter. 527 */ 528 jumbo_en = !is_b0 || efx->rx_scatter; 529 530 netif_dbg(efx, hw, efx->net_dev, 531 "RX queue %d ring in special buffers %d-%d\n", 532 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 533 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 534 535 rx_queue->scatter_n = 0; 536 537 /* Pin RX descriptor ring */ 538 efx_init_special_buffer(efx, &rx_queue->rxd); 539 540 /* Push RX descriptor ring to card */ 541 EFX_POPULATE_OWORD_10(rx_desc_ptr, 542 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, 543 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, 544 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 545 FRF_AZ_RX_DESCQ_EVQ_ID, 546 efx_rx_queue_channel(rx_queue)->channel, 547 FRF_AZ_RX_DESCQ_OWNER_ID, 0, 548 FRF_AZ_RX_DESCQ_LABEL, 549 efx_rx_queue_index(rx_queue), 550 FRF_AZ_RX_DESCQ_SIZE, 551 __ffs(rx_queue->rxd.entries), 552 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 553 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en, 554 FRF_AZ_RX_DESCQ_EN, 1); 555 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 556 efx_rx_queue_index(rx_queue)); 557} 558 559static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue) 560{ 561 struct efx_nic *efx = rx_queue->efx; 562 efx_oword_t rx_flush_descq; 563 564 EFX_POPULATE_OWORD_2(rx_flush_descq, 565 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, 566 FRF_AZ_RX_FLUSH_DESCQ, 567 efx_rx_queue_index(rx_queue)); 568 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); 569} 570 571void efx_farch_rx_fini(struct efx_rx_queue *rx_queue) 572{ 573 efx_oword_t rx_desc_ptr; 574 struct efx_nic *efx = rx_queue->efx; 575 576 /* Remove RX descriptor ring from card */ 577 EFX_ZERO_OWORD(rx_desc_ptr); 578 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 579 efx_rx_queue_index(rx_queue)); 580 581 /* Unpin RX descriptor ring */ 582 efx_fini_special_buffer(efx, &rx_queue->rxd); 583} 584 585/* Free buffers backing RX queue */ 586void efx_farch_rx_remove(struct efx_rx_queue *rx_queue) 587{ 588 efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); 589} 590 591/************************************************************************** 592 * 593 * Flush handling 594 * 595 **************************************************************************/ 596 597/* efx_farch_flush_queues() must be woken up when all flushes are completed, 598 * or more RX flushes can be kicked off. 599 */ 600static bool efx_farch_flush_wake(struct efx_nic *efx) 601{ 602 /* Ensure that all updates are visible to efx_farch_flush_queues() */ 603 smp_mb(); 604 605 return (atomic_read(&efx->active_queues) == 0 || 606 (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT 607 && atomic_read(&efx->rxq_flush_pending) > 0)); 608} 609 610static bool efx_check_tx_flush_complete(struct efx_nic *efx) 611{ 612 bool i = true; 613 efx_oword_t txd_ptr_tbl; 614 struct efx_channel *channel; 615 struct efx_tx_queue *tx_queue; 616 617 efx_for_each_channel(channel, efx) { 618 efx_for_each_channel_tx_queue(tx_queue, channel) { 619 efx_reado_table(efx, &txd_ptr_tbl, 620 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue); 621 if (EFX_OWORD_FIELD(txd_ptr_tbl, 622 FRF_AZ_TX_DESCQ_FLUSH) || 623 EFX_OWORD_FIELD(txd_ptr_tbl, 624 FRF_AZ_TX_DESCQ_EN)) { 625 netif_dbg(efx, hw, efx->net_dev, 626 "flush did not complete on TXQ %d\n", 627 tx_queue->queue); 628 i = false; 629 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding, 630 1, 0)) { 631 /* The flush is complete, but we didn't 632 * receive a flush completion event 633 */ 634 netif_dbg(efx, hw, efx->net_dev, 635 "flush complete on TXQ %d, so drain " 636 "the queue\n", tx_queue->queue); 637 /* Don't need to increment active_queues as it 638 * has already been incremented for the queues 639 * which did not drain 640 */ 641 efx_farch_magic_event(channel, 642 EFX_CHANNEL_MAGIC_TX_DRAIN( 643 tx_queue)); 644 } 645 } 646 } 647 648 return i; 649} 650 651/* Flush all the transmit queues, and continue flushing receive queues until 652 * they're all flushed. Wait for the DRAIN events to be received so that there 653 * are no more RX and TX events left on any channel. */ 654static int efx_farch_do_flush(struct efx_nic *efx) 655{ 656 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */ 657 struct efx_channel *channel; 658 struct efx_rx_queue *rx_queue; 659 struct efx_tx_queue *tx_queue; 660 int rc = 0; 661 662 efx_for_each_channel(channel, efx) { 663 efx_for_each_channel_tx_queue(tx_queue, channel) { 664 efx_farch_flush_tx_queue(tx_queue); 665 } 666 efx_for_each_channel_rx_queue(rx_queue, channel) { 667 rx_queue->flush_pending = true; 668 atomic_inc(&efx->rxq_flush_pending); 669 } 670 } 671 672 while (timeout && atomic_read(&efx->active_queues) > 0) { 673 /* If SRIOV is enabled, then offload receive queue flushing to 674 * the firmware (though we will still have to poll for 675 * completion). If that fails, fall back to the old scheme. 676 */ 677 if (efx_siena_sriov_enabled(efx)) { 678 rc = efx_mcdi_flush_rxqs(efx); 679 if (!rc) 680 goto wait; 681 } 682 683 /* The hardware supports four concurrent rx flushes, each of 684 * which may need to be retried if there is an outstanding 685 * descriptor fetch 686 */ 687 efx_for_each_channel(channel, efx) { 688 efx_for_each_channel_rx_queue(rx_queue, channel) { 689 if (atomic_read(&efx->rxq_flush_outstanding) >= 690 EFX_RX_FLUSH_COUNT) 691 break; 692 693 if (rx_queue->flush_pending) { 694 rx_queue->flush_pending = false; 695 atomic_dec(&efx->rxq_flush_pending); 696 atomic_inc(&efx->rxq_flush_outstanding); 697 efx_farch_flush_rx_queue(rx_queue); 698 } 699 } 700 } 701 702 wait: 703 timeout = wait_event_timeout(efx->flush_wq, 704 efx_farch_flush_wake(efx), 705 timeout); 706 } 707 708 if (atomic_read(&efx->active_queues) && 709 !efx_check_tx_flush_complete(efx)) { 710 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues " 711 "(rx %d+%d)\n", atomic_read(&efx->active_queues), 712 atomic_read(&efx->rxq_flush_outstanding), 713 atomic_read(&efx->rxq_flush_pending)); 714 rc = -ETIMEDOUT; 715 716 atomic_set(&efx->active_queues, 0); 717 atomic_set(&efx->rxq_flush_pending, 0); 718 atomic_set(&efx->rxq_flush_outstanding, 0); 719 } 720 721 return rc; 722} 723 724int efx_farch_fini_dmaq(struct efx_nic *efx) 725{ 726 struct efx_channel *channel; 727 struct efx_tx_queue *tx_queue; 728 struct efx_rx_queue *rx_queue; 729 int rc = 0; 730 731 /* Do not attempt to write to the NIC during EEH recovery */ 732 if (efx->state != STATE_RECOVERY) { 733 /* Only perform flush if DMA is enabled */ 734 if (efx->pci_dev->is_busmaster) { 735 efx->type->prepare_flush(efx); 736 rc = efx_farch_do_flush(efx); 737 efx->type->finish_flush(efx); 738 } 739 740 efx_for_each_channel(channel, efx) { 741 efx_for_each_channel_rx_queue(rx_queue, channel) 742 efx_farch_rx_fini(rx_queue); 743 efx_for_each_channel_tx_queue(tx_queue, channel) 744 efx_farch_tx_fini(tx_queue); 745 } 746 } 747 748 return rc; 749} 750 751/* Reset queue and flush accounting after FLR 752 * 753 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus 754 * mastering was disabled), in which case we don't receive (RXQ) flush 755 * completion events. This means that efx->rxq_flush_outstanding remained at 4 756 * after the FLR; also, efx->active_queues was non-zero (as no flush completion 757 * events were received, and we didn't go through efx_check_tx_flush_complete()) 758 * If we don't fix this up, on the next call to efx_realloc_channels() we won't 759 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4 760 * for batched flush requests; and the efx->active_queues gets messed up because 761 * we keep incrementing for the newly initialised queues, but it never went to 762 * zero previously. Then we get a timeout every time we try to restart the 763 * queues, as it doesn't go back to zero when we should be flushing the queues. 764 */ 765void efx_farch_finish_flr(struct efx_nic *efx) 766{ 767 atomic_set(&efx->rxq_flush_pending, 0); 768 atomic_set(&efx->rxq_flush_outstanding, 0); 769 atomic_set(&efx->active_queues, 0); 770} 771 772 773/************************************************************************** 774 * 775 * Event queue processing 776 * Event queues are processed by per-channel tasklets. 777 * 778 **************************************************************************/ 779 780/* Update a channel's event queue's read pointer (RPTR) register 781 * 782 * This writes the EVQ_RPTR_REG register for the specified channel's 783 * event queue. 784 */ 785void efx_farch_ev_read_ack(struct efx_channel *channel) 786{ 787 efx_dword_t reg; 788 struct efx_nic *efx = channel->efx; 789 790 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, 791 channel->eventq_read_ptr & channel->eventq_mask); 792 793 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size 794 * of 4 bytes, but it is really 16 bytes just like later revisions. 795 */ 796 efx_writed(efx, ®, 797 efx->type->evq_rptr_tbl_base + 798 FR_BZ_EVQ_RPTR_STEP * channel->channel); 799} 800 801/* Use HW to insert a SW defined event */ 802void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq, 803 efx_qword_t *event) 804{ 805 efx_oword_t drv_ev_reg; 806 807 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || 808 FRF_AZ_DRV_EV_DATA_WIDTH != 64); 809 drv_ev_reg.u32[0] = event->u32[0]; 810 drv_ev_reg.u32[1] = event->u32[1]; 811 drv_ev_reg.u32[2] = 0; 812 drv_ev_reg.u32[3] = 0; 813 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq); 814 efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV); 815} 816 817static void efx_farch_magic_event(struct efx_channel *channel, u32 magic) 818{ 819 efx_qword_t event; 820 821 EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE, 822 FSE_AZ_EV_CODE_DRV_GEN_EV, 823 FSF_AZ_DRV_GEN_EV_MAGIC, magic); 824 efx_farch_generate_event(channel->efx, channel->channel, &event); 825} 826 827/* Handle a transmit completion event 828 * 829 * The NIC batches TX completion events; the message we receive is of 830 * the form "complete all TX events up to this index". 831 */ 832static int 833efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 834{ 835 unsigned int tx_ev_desc_ptr; 836 unsigned int tx_ev_q_label; 837 struct efx_tx_queue *tx_queue; 838 struct efx_nic *efx = channel->efx; 839 int tx_packets = 0; 840 841 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 842 return 0; 843 844 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 845 /* Transmit completion */ 846 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 847 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 848 tx_queue = efx_channel_get_tx_queue( 849 channel, tx_ev_q_label % EFX_TXQ_TYPES); 850 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & 851 tx_queue->ptr_mask); 852 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 853 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 854 /* Rewrite the FIFO write pointer */ 855 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 856 tx_queue = efx_channel_get_tx_queue( 857 channel, tx_ev_q_label % EFX_TXQ_TYPES); 858 859 netif_tx_lock(efx->net_dev); 860 efx_farch_notify_tx_desc(tx_queue); 861 netif_tx_unlock(efx->net_dev); 862 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) { 863 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 864 } else { 865 netif_err(efx, tx_err, efx->net_dev, 866 "channel %d unexpected TX event " 867 EFX_QWORD_FMT"\n", channel->channel, 868 EFX_QWORD_VAL(*event)); 869 } 870 871 return tx_packets; 872} 873 874/* Detect errors included in the rx_evt_pkt_ok bit. */ 875static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue, 876 const efx_qword_t *event) 877{ 878 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 879 struct efx_nic *efx = rx_queue->efx; 880 bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; 881 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; 882 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; 883 bool rx_ev_other_err, rx_ev_pause_frm; 884 bool rx_ev_hdr_type, rx_ev_mcast_pkt; 885 unsigned rx_ev_pkt_type; 886 887 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 888 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 889 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); 890 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); 891 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 892 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); 893 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 894 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); 895 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 896 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); 897 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); 898 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); 899 rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? 900 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); 901 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); 902 903 /* Every error apart from tobe_disc and pause_frm */ 904 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 905 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | 906 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); 907 908 /* Count errors that are not in MAC stats. Ignore expected 909 * checksum errors during self-test. */ 910 if (rx_ev_frm_trunc) 911 ++channel->n_rx_frm_trunc; 912 else if (rx_ev_tobe_disc) 913 ++channel->n_rx_tobe_disc; 914 else if (!efx->loopback_selftest) { 915 if (rx_ev_ip_hdr_chksum_err) 916 ++channel->n_rx_ip_hdr_chksum_err; 917 else if (rx_ev_tcp_udp_chksum_err) 918 ++channel->n_rx_tcp_udp_chksum_err; 919 } 920 921 /* TOBE_DISC is expected on unicast mismatches; don't print out an 922 * error message. FRM_TRUNC indicates RXDP dropped the packet due 923 * to a FIFO overflow. 924 */ 925#ifdef DEBUG 926 if (rx_ev_other_err && net_ratelimit()) { 927 netif_dbg(efx, rx_err, efx->net_dev, 928 " RX queue %d unexpected RX event " 929 EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", 930 efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), 931 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", 932 rx_ev_ip_hdr_chksum_err ? 933 " [IP_HDR_CHKSUM_ERR]" : "", 934 rx_ev_tcp_udp_chksum_err ? 935 " [TCP_UDP_CHKSUM_ERR]" : "", 936 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", 937 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", 938 rx_ev_drib_nib ? " [DRIB_NIB]" : "", 939 rx_ev_tobe_disc ? " [TOBE_DISC]" : "", 940 rx_ev_pause_frm ? " [PAUSE]" : ""); 941 } 942#endif 943 944 /* The frame must be discarded if any of these are true. */ 945 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | 946 rx_ev_tobe_disc | rx_ev_pause_frm) ? 947 EFX_RX_PKT_DISCARD : 0; 948} 949 950/* Handle receive events that are not in-order. Return true if this 951 * can be handled as a partial packet discard, false if it's more 952 * serious. 953 */ 954static bool 955efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 956{ 957 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 958 struct efx_nic *efx = rx_queue->efx; 959 unsigned expected, dropped; 960 961 if (rx_queue->scatter_n && 962 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) & 963 rx_queue->ptr_mask)) { 964 ++channel->n_rx_nodesc_trunc; 965 return true; 966 } 967 968 expected = rx_queue->removed_count & rx_queue->ptr_mask; 969 dropped = (index - expected) & rx_queue->ptr_mask; 970 netif_info(efx, rx_err, efx->net_dev, 971 "dropped %d events (index=%d expected=%d)\n", 972 dropped, index, expected); 973 974 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 975 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 976 return false; 977} 978 979/* Handle a packet received event 980 * 981 * The NIC gives a "discard" flag if it's a unicast packet with the 982 * wrong destination address 983 * Also "is multicast" and "matches multicast filter" flags can be used to 984 * discard non-matching multicast packets. 985 */ 986static void 987efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) 988{ 989 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 990 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 991 unsigned expected_ptr; 992 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont; 993 u16 flags; 994 struct efx_rx_queue *rx_queue; 995 struct efx_nic *efx = channel->efx; 996 997 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 998 return; 999 1000 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); 1001 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP); 1002 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1003 channel->channel); 1004 1005 rx_queue = efx_channel_get_rx_queue(channel); 1006 1007 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1008 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) & 1009 rx_queue->ptr_mask); 1010 1011 /* Check for partial drops and other errors */ 1012 if (unlikely(rx_ev_desc_ptr != expected_ptr) || 1013 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) { 1014 if (rx_ev_desc_ptr != expected_ptr && 1015 !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr)) 1016 return; 1017 1018 /* Discard all pending fragments */ 1019 if (rx_queue->scatter_n) { 1020 efx_rx_packet( 1021 rx_queue, 1022 rx_queue->removed_count & rx_queue->ptr_mask, 1023 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD); 1024 rx_queue->removed_count += rx_queue->scatter_n; 1025 rx_queue->scatter_n = 0; 1026 } 1027 1028 /* Return if there is no new fragment */ 1029 if (rx_ev_desc_ptr != expected_ptr) 1030 return; 1031 1032 /* Discard new fragment if not SOP */ 1033 if (!rx_ev_sop) { 1034 efx_rx_packet( 1035 rx_queue, 1036 rx_queue->removed_count & rx_queue->ptr_mask, 1037 1, 0, EFX_RX_PKT_DISCARD); 1038 ++rx_queue->removed_count; 1039 return; 1040 } 1041 } 1042 1043 ++rx_queue->scatter_n; 1044 if (rx_ev_cont) 1045 return; 1046 1047 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1048 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); 1049 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); 1050 1051 if (likely(rx_ev_pkt_ok)) { 1052 /* If packet is marked as OK then we can rely on the 1053 * hardware checksum and classification. 1054 */ 1055 flags = 0; 1056 switch (rx_ev_hdr_type) { 1057 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 1058 flags |= EFX_RX_PKT_TCP; 1059 /* fall through */ 1060 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 1061 flags |= EFX_RX_PKT_CSUMMED; 1062 /* fall through */ 1063 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 1064 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 1065 break; 1066 } 1067 } else { 1068 flags = efx_farch_handle_rx_not_ok(rx_queue, event); 1069 } 1070 1071 /* Detect multicast packets that didn't match the filter */ 1072 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); 1073 if (rx_ev_mcast_pkt) { 1074 unsigned int rx_ev_mcast_hash_match = 1075 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); 1076 1077 if (unlikely(!rx_ev_mcast_hash_match)) { 1078 ++channel->n_rx_mcast_mismatch; 1079 flags |= EFX_RX_PKT_DISCARD; 1080 } 1081 } 1082 1083 channel->irq_mod_score += 2; 1084 1085 /* Handle received packet */ 1086 efx_rx_packet(rx_queue, 1087 rx_queue->removed_count & rx_queue->ptr_mask, 1088 rx_queue->scatter_n, rx_ev_byte_cnt, flags); 1089 rx_queue->removed_count += rx_queue->scatter_n; 1090 rx_queue->scatter_n = 0; 1091} 1092 1093/* If this flush done event corresponds to a &struct efx_tx_queue, then 1094 * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue 1095 * of all transmit completions. 1096 */ 1097static void 1098efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1099{ 1100 struct efx_tx_queue *tx_queue; 1101 int qid; 1102 1103 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1104 if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) { 1105 tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES, 1106 qid % EFX_TXQ_TYPES); 1107 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) { 1108 efx_farch_magic_event(tx_queue->channel, 1109 EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue)); 1110 } 1111 } 1112} 1113 1114/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush 1115 * was successful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add 1116 * the RX queue back to the mask of RX queues in need of flushing. 1117 */ 1118static void 1119efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event) 1120{ 1121 struct efx_channel *channel; 1122 struct efx_rx_queue *rx_queue; 1123 int qid; 1124 bool failed; 1125 1126 qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1127 failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1128 if (qid >= efx->n_channels) 1129 return; 1130 channel = efx_get_channel(efx, qid); 1131 if (!efx_channel_has_rx_queue(channel)) 1132 return; 1133 rx_queue = efx_channel_get_rx_queue(channel); 1134 1135 if (failed) { 1136 netif_info(efx, hw, efx->net_dev, 1137 "RXQ %d flush retry\n", qid); 1138 rx_queue->flush_pending = true; 1139 atomic_inc(&efx->rxq_flush_pending); 1140 } else { 1141 efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 1142 EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)); 1143 } 1144 atomic_dec(&efx->rxq_flush_outstanding); 1145 if (efx_farch_flush_wake(efx)) 1146 wake_up(&efx->flush_wq); 1147} 1148 1149static void 1150efx_farch_handle_drain_event(struct efx_channel *channel) 1151{ 1152 struct efx_nic *efx = channel->efx; 1153 1154 WARN_ON(atomic_read(&efx->active_queues) == 0); 1155 atomic_dec(&efx->active_queues); 1156 if (efx_farch_flush_wake(efx)) 1157 wake_up(&efx->flush_wq); 1158} 1159 1160static void efx_farch_handle_generated_event(struct efx_channel *channel, 1161 efx_qword_t *event) 1162{ 1163 struct efx_nic *efx = channel->efx; 1164 struct efx_rx_queue *rx_queue = 1165 efx_channel_has_rx_queue(channel) ? 1166 efx_channel_get_rx_queue(channel) : NULL; 1167 unsigned magic, code; 1168 1169 magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); 1170 code = _EFX_CHANNEL_MAGIC_CODE(magic); 1171 1172 if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) { 1173 channel->event_test_cpu = raw_smp_processor_id(); 1174 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) { 1175 /* The queue must be empty, so we won't receive any rx 1176 * events, so efx_process_channel() won't refill the 1177 * queue. Refill it here */ 1178 efx_fast_push_rx_descriptors(rx_queue, true); 1179 } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) { 1180 efx_farch_handle_drain_event(channel); 1181 } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) { 1182 efx_farch_handle_drain_event(channel); 1183 } else { 1184 netif_dbg(efx, hw, efx->net_dev, "channel %d received " 1185 "generated event "EFX_QWORD_FMT"\n", 1186 channel->channel, EFX_QWORD_VAL(*event)); 1187 } 1188} 1189 1190static void 1191efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 1192{ 1193 struct efx_nic *efx = channel->efx; 1194 unsigned int ev_sub_code; 1195 unsigned int ev_sub_data; 1196 1197 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); 1198 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); 1199 1200 switch (ev_sub_code) { 1201 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: 1202 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", 1203 channel->channel, ev_sub_data); 1204 efx_farch_handle_tx_flush_done(efx, event); 1205#ifdef CONFIG_SFC_SRIOV 1206 efx_siena_sriov_tx_flush_done(efx, event); 1207#endif 1208 break; 1209 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: 1210 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", 1211 channel->channel, ev_sub_data); 1212 efx_farch_handle_rx_flush_done(efx, event); 1213#ifdef CONFIG_SFC_SRIOV 1214 efx_siena_sriov_rx_flush_done(efx, event); 1215#endif 1216 break; 1217 case FSE_AZ_EVQ_INIT_DONE_EV: 1218 netif_dbg(efx, hw, efx->net_dev, 1219 "channel %d EVQ %d initialised\n", 1220 channel->channel, ev_sub_data); 1221 break; 1222 case FSE_AZ_SRM_UPD_DONE_EV: 1223 netif_vdbg(efx, hw, efx->net_dev, 1224 "channel %d SRAM update done\n", channel->channel); 1225 break; 1226 case FSE_AZ_WAKE_UP_EV: 1227 netif_vdbg(efx, hw, efx->net_dev, 1228 "channel %d RXQ %d wakeup event\n", 1229 channel->channel, ev_sub_data); 1230 break; 1231 case FSE_AZ_TIMER_EV: 1232 netif_vdbg(efx, hw, efx->net_dev, 1233 "channel %d RX queue %d timer expired\n", 1234 channel->channel, ev_sub_data); 1235 break; 1236 case FSE_AA_RX_RECOVER_EV: 1237 netif_err(efx, rx_err, efx->net_dev, 1238 "channel %d seen DRIVER RX_RESET event. " 1239 "Resetting.\n", channel->channel); 1240 atomic_inc(&efx->rx_reset); 1241 efx_schedule_reset(efx, 1242 EFX_WORKAROUND_6555(efx) ? 1243 RESET_TYPE_RX_RECOVERY : 1244 RESET_TYPE_DISABLE); 1245 break; 1246 case FSE_BZ_RX_DSC_ERROR_EV: 1247 if (ev_sub_data < EFX_VI_BASE) { 1248 netif_err(efx, rx_err, efx->net_dev, 1249 "RX DMA Q %d reports descriptor fetch error." 1250 " RX Q %d is disabled.\n", ev_sub_data, 1251 ev_sub_data); 1252 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1253 } 1254#ifdef CONFIG_SFC_SRIOV 1255 else 1256 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 1257#endif 1258 break; 1259 case FSE_BZ_TX_DSC_ERROR_EV: 1260 if (ev_sub_data < EFX_VI_BASE) { 1261 netif_err(efx, tx_err, efx->net_dev, 1262 "TX DMA Q %d reports descriptor fetch error." 1263 " TX Q %d is disabled.\n", ev_sub_data, 1264 ev_sub_data); 1265 efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR); 1266 } 1267#ifdef CONFIG_SFC_SRIOV 1268 else 1269 efx_siena_sriov_desc_fetch_err(efx, ev_sub_data); 1270#endif 1271 break; 1272 default: 1273 netif_vdbg(efx, hw, efx->net_dev, 1274 "channel %d unknown driver event code %d " 1275 "data %04x\n", channel->channel, ev_sub_code, 1276 ev_sub_data); 1277 break; 1278 } 1279} 1280 1281int efx_farch_ev_process(struct efx_channel *channel, int budget) 1282{ 1283 struct efx_nic *efx = channel->efx; 1284 unsigned int read_ptr; 1285 efx_qword_t event, *p_event; 1286 int ev_code; 1287 int tx_packets = 0; 1288 int spent = 0; 1289 1290 if (budget <= 0) 1291 return spent; 1292 1293 read_ptr = channel->eventq_read_ptr; 1294 1295 for (;;) { 1296 p_event = efx_event(channel, read_ptr); 1297 event = *p_event; 1298 1299 if (!efx_event_present(&event)) 1300 /* End of events */ 1301 break; 1302 1303 netif_vdbg(channel->efx, intr, channel->efx->net_dev, 1304 "channel %d event is "EFX_QWORD_FMT"\n", 1305 channel->channel, EFX_QWORD_VAL(event)); 1306 1307 /* Clear this event by marking it all ones */ 1308 EFX_SET_QWORD(*p_event); 1309 1310 ++read_ptr; 1311 1312 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 1313 1314 switch (ev_code) { 1315 case FSE_AZ_EV_CODE_RX_EV: 1316 efx_farch_handle_rx_event(channel, &event); 1317 if (++spent == budget) 1318 goto out; 1319 break; 1320 case FSE_AZ_EV_CODE_TX_EV: 1321 tx_packets += efx_farch_handle_tx_event(channel, 1322 &event); 1323 if (tx_packets > efx->txq_entries) { 1324 spent = budget; 1325 goto out; 1326 } 1327 break; 1328 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1329 efx_farch_handle_generated_event(channel, &event); 1330 break; 1331 case FSE_AZ_EV_CODE_DRIVER_EV: 1332 efx_farch_handle_driver_event(channel, &event); 1333 break; 1334#ifdef CONFIG_SFC_SRIOV 1335 case FSE_CZ_EV_CODE_USER_EV: 1336 efx_siena_sriov_event(channel, &event); 1337 break; 1338#endif 1339 case FSE_CZ_EV_CODE_MCDI_EV: 1340 efx_mcdi_process_event(channel, &event); 1341 break; 1342 case FSE_AZ_EV_CODE_GLOBAL_EV: 1343 if (efx->type->handle_global_event && 1344 efx->type->handle_global_event(channel, &event)) 1345 break; 1346 /* else fall through */ 1347 default: 1348 netif_err(channel->efx, hw, channel->efx->net_dev, 1349 "channel %d unknown event type %d (data " 1350 EFX_QWORD_FMT ")\n", channel->channel, 1351 ev_code, EFX_QWORD_VAL(event)); 1352 } 1353 } 1354 1355out: 1356 channel->eventq_read_ptr = read_ptr; 1357 return spent; 1358} 1359 1360/* Allocate buffer table entries for event queue */ 1361int efx_farch_ev_probe(struct efx_channel *channel) 1362{ 1363 struct efx_nic *efx = channel->efx; 1364 unsigned entries; 1365 1366 entries = channel->eventq_mask + 1; 1367 return efx_alloc_special_buffer(efx, &channel->eventq, 1368 entries * sizeof(efx_qword_t)); 1369} 1370 1371int efx_farch_ev_init(struct efx_channel *channel) 1372{ 1373 efx_oword_t reg; 1374 struct efx_nic *efx = channel->efx; 1375 1376 netif_dbg(efx, hw, efx->net_dev, 1377 "channel %d event queue in special buffers %d-%d\n", 1378 channel->channel, channel->eventq.index, 1379 channel->eventq.index + channel->eventq.entries - 1); 1380 1381 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 1382 EFX_POPULATE_OWORD_3(reg, 1383 FRF_CZ_TIMER_Q_EN, 1, 1384 FRF_CZ_HOST_NOTIFY_MODE, 0, 1385 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1386 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1387 } 1388 1389 /* Pin event queue buffer */ 1390 efx_init_special_buffer(efx, &channel->eventq); 1391 1392 /* Fill event queue with all ones (i.e. empty events) */ 1393 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len); 1394 1395 /* Push event queue to card */ 1396 EFX_POPULATE_OWORD_3(reg, 1397 FRF_AZ_EVQ_EN, 1, 1398 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), 1399 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); 1400 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1401 channel->channel); 1402 1403 return 0; 1404} 1405 1406void efx_farch_ev_fini(struct efx_channel *channel) 1407{ 1408 efx_oword_t reg; 1409 struct efx_nic *efx = channel->efx; 1410 1411 /* Remove event queue from card */ 1412 EFX_ZERO_OWORD(reg); 1413 efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, 1414 channel->channel); 1415 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1416 efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); 1417 1418 /* Unpin event queue */ 1419 efx_fini_special_buffer(efx, &channel->eventq); 1420} 1421 1422/* Free buffers backing event queue */ 1423void efx_farch_ev_remove(struct efx_channel *channel) 1424{ 1425 efx_free_special_buffer(channel->efx, &channel->eventq); 1426} 1427 1428 1429void efx_farch_ev_test_generate(struct efx_channel *channel) 1430{ 1431 efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel)); 1432} 1433 1434void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue) 1435{ 1436 efx_farch_magic_event(efx_rx_queue_channel(rx_queue), 1437 EFX_CHANNEL_MAGIC_FILL(rx_queue)); 1438} 1439 1440/************************************************************************** 1441 * 1442 * Hardware interrupts 1443 * The hardware interrupt handler does very little work; all the event 1444 * queue processing is carried out by per-channel tasklets. 1445 * 1446 **************************************************************************/ 1447 1448/* Enable/disable/generate interrupts */ 1449static inline void efx_farch_interrupts(struct efx_nic *efx, 1450 bool enabled, bool force) 1451{ 1452 efx_oword_t int_en_reg_ker; 1453 1454 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1455 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level, 1456 FRF_AZ_KER_INT_KER, force, 1457 FRF_AZ_DRV_INT_EN_KER, enabled); 1458 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1459} 1460 1461void efx_farch_irq_enable_master(struct efx_nic *efx) 1462{ 1463 EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); 1464 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ 1465 1466 efx_farch_interrupts(efx, true, false); 1467} 1468 1469void efx_farch_irq_disable_master(struct efx_nic *efx) 1470{ 1471 /* Disable interrupts */ 1472 efx_farch_interrupts(efx, false, false); 1473} 1474 1475/* Generate a test interrupt 1476 * Interrupt must already have been enabled, otherwise nasty things 1477 * may happen. 1478 */ 1479void efx_farch_irq_test_generate(struct efx_nic *efx) 1480{ 1481 efx_farch_interrupts(efx, true, true); 1482} 1483 1484/* Process a fatal interrupt 1485 * Disable bus mastering ASAP and schedule a reset 1486 */ 1487irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) 1488{ 1489 struct falcon_nic_data *nic_data = efx->nic_data; 1490 efx_oword_t *int_ker = efx->irq_status.addr; 1491 efx_oword_t fatal_intr; 1492 int error, mem_perr; 1493 1494 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); 1495 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); 1496 1497 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " 1498 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1499 EFX_OWORD_VAL(fatal_intr), 1500 error ? "disabling bus mastering" : "no recognised error"); 1501 1502 /* If this is a memory parity error dump which blocks are offending */ 1503 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || 1504 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); 1505 if (mem_perr) { 1506 efx_oword_t reg; 1507 efx_reado(efx, ®, FR_AZ_MEM_STAT); 1508 netif_err(efx, hw, efx->net_dev, 1509 "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", 1510 EFX_OWORD_VAL(reg)); 1511 } 1512 1513 /* Disable both devices */ 1514 pci_clear_master(efx->pci_dev); 1515 if (efx_nic_is_dual_func(efx)) 1516 pci_clear_master(nic_data->pci_dev2); 1517 efx_farch_irq_disable_master(efx); 1518 1519 /* Count errors and reset or disable the NIC accordingly */ 1520 if (efx->int_error_count == 0 || 1521 time_after(jiffies, efx->int_error_expire)) { 1522 efx->int_error_count = 0; 1523 efx->int_error_expire = 1524 jiffies + EFX_INT_ERROR_EXPIRE * HZ; 1525 } 1526 if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { 1527 netif_err(efx, hw, efx->net_dev, 1528 "SYSTEM ERROR - reset scheduled\n"); 1529 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1530 } else { 1531 netif_err(efx, hw, efx->net_dev, 1532 "SYSTEM ERROR - max number of errors seen." 1533 "NIC will be disabled\n"); 1534 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1535 } 1536 1537 return IRQ_HANDLED; 1538} 1539 1540/* Handle a legacy interrupt 1541 * Acknowledges the interrupt and schedule event queue processing. 1542 */ 1543irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) 1544{ 1545 struct efx_nic *efx = dev_id; 1546 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); 1547 efx_oword_t *int_ker = efx->irq_status.addr; 1548 irqreturn_t result = IRQ_NONE; 1549 struct efx_channel *channel; 1550 efx_dword_t reg; 1551 u32 queues; 1552 int syserr; 1553 1554 /* Read the ISR which also ACKs the interrupts */ 1555 efx_readd(efx, ®, FR_BZ_INT_ISR0); 1556 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1557 1558 /* Legacy interrupts are disabled too late by the EEH kernel 1559 * code. Disable them earlier. 1560 * If an EEH error occurred, the read will have returned all ones. 1561 */ 1562 if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) && 1563 !efx->eeh_disabled_legacy_irq) { 1564 disable_irq_nosync(efx->legacy_irq); 1565 efx->eeh_disabled_legacy_irq = true; 1566 } 1567 1568 /* Handle non-event-queue sources */ 1569 if (queues & (1U << efx->irq_level) && soft_enabled) { 1570 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1571 if (unlikely(syserr)) 1572 return efx_farch_fatal_interrupt(efx); 1573 efx->last_irq_cpu = raw_smp_processor_id(); 1574 } 1575 1576 if (queues != 0) { 1577 efx->irq_zero_count = 0; 1578 1579 /* Schedule processing of any interrupting queues */ 1580 if (likely(soft_enabled)) { 1581 efx_for_each_channel(channel, efx) { 1582 if (queues & 1) 1583 efx_schedule_channel_irq(channel); 1584 queues >>= 1; 1585 } 1586 } 1587 result = IRQ_HANDLED; 1588 1589 } else { 1590 efx_qword_t *event; 1591 1592 /* Legacy ISR read can return zero once (SF bug 15783) */ 1593 1594 /* We can't return IRQ_HANDLED more than once on seeing ISR=0 1595 * because this might be a shared interrupt. */ 1596 if (efx->irq_zero_count++ == 0) 1597 result = IRQ_HANDLED; 1598 1599 /* Ensure we schedule or rearm all event queues */ 1600 if (likely(soft_enabled)) { 1601 efx_for_each_channel(channel, efx) { 1602 event = efx_event(channel, 1603 channel->eventq_read_ptr); 1604 if (efx_event_present(event)) 1605 efx_schedule_channel_irq(channel); 1606 else 1607 efx_farch_ev_read_ack(channel); 1608 } 1609 } 1610 } 1611 1612 if (result == IRQ_HANDLED) 1613 netif_vdbg(efx, intr, efx->net_dev, 1614 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1615 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1616 1617 return result; 1618} 1619 1620/* Handle an MSI interrupt 1621 * 1622 * Handle an MSI hardware interrupt. This routine schedules event 1623 * queue processing. No interrupt acknowledgement cycle is necessary. 1624 * Also, we never need to check that the interrupt is for us, since 1625 * MSI interrupts cannot be shared. 1626 */ 1627irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) 1628{ 1629 struct efx_msi_context *context = dev_id; 1630 struct efx_nic *efx = context->efx; 1631 efx_oword_t *int_ker = efx->irq_status.addr; 1632 int syserr; 1633 1634 netif_vdbg(efx, intr, efx->net_dev, 1635 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 1636 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1637 1638 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) 1639 return IRQ_HANDLED; 1640 1641 /* Handle non-event-queue sources */ 1642 if (context->index == efx->irq_level) { 1643 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1644 if (unlikely(syserr)) 1645 return efx_farch_fatal_interrupt(efx); 1646 efx->last_irq_cpu = raw_smp_processor_id(); 1647 } 1648 1649 /* Schedule processing of the channel */ 1650 efx_schedule_channel_irq(efx->channel[context->index]); 1651 1652 return IRQ_HANDLED; 1653} 1654 1655/* Setup RSS indirection table. 1656 * This maps from the hash value of the packet to RXQ 1657 */ 1658void efx_farch_rx_push_indir_table(struct efx_nic *efx) 1659{ 1660 size_t i = 0; 1661 efx_dword_t dword; 1662 1663 BUG_ON(efx_nic_rev(efx) < EFX_REV_FALCON_B0); 1664 1665 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != 1666 FR_BZ_RX_INDIRECTION_TBL_ROWS); 1667 1668 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { 1669 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1670 efx->rx_indir_table[i]); 1671 efx_writed(efx, &dword, 1672 FR_BZ_RX_INDIRECTION_TBL + 1673 FR_BZ_RX_INDIRECTION_TBL_STEP * i); 1674 } 1675} 1676 1677/* Looks at available SRAM resources and works out how many queues we 1678 * can support, and where things like descriptor caches should live. 1679 * 1680 * SRAM is split up as follows: 1681 * 0 buftbl entries for channels 1682 * efx->vf_buftbl_base buftbl entries for SR-IOV 1683 * efx->rx_dc_base RX descriptor caches 1684 * efx->tx_dc_base TX descriptor caches 1685 */ 1686void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw) 1687{ 1688 unsigned vi_count, buftbl_min; 1689 1690#ifdef CONFIG_SFC_SRIOV 1691 struct siena_nic_data *nic_data = efx->nic_data; 1692#endif 1693 1694 /* Account for the buffer table entries backing the datapath channels 1695 * and the descriptor caches for those channels. 1696 */ 1697 buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE + 1698 efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE + 1699 efx->n_channels * EFX_MAX_EVQ_SIZE) 1700 * sizeof(efx_qword_t) / EFX_BUF_SIZE); 1701 vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES); 1702 1703#ifdef CONFIG_SFC_SRIOV 1704 if (efx->type->sriov_wanted) { 1705 if (efx->type->sriov_wanted(efx)) { 1706 unsigned vi_dc_entries, buftbl_free; 1707 unsigned entries_per_vf, vf_limit; 1708 1709 nic_data->vf_buftbl_base = buftbl_min; 1710 1711 vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES; 1712 vi_count = max(vi_count, EFX_VI_BASE); 1713 buftbl_free = (sram_lim_qw - buftbl_min - 1714 vi_count * vi_dc_entries); 1715 1716 entries_per_vf = ((vi_dc_entries + 1717 EFX_VF_BUFTBL_PER_VI) * 1718 efx_vf_size(efx)); 1719 vf_limit = min(buftbl_free / entries_per_vf, 1720 (1024U - EFX_VI_BASE) >> efx->vi_scale); 1721 1722 if (efx->vf_count > vf_limit) { 1723 netif_err(efx, probe, efx->net_dev, 1724 "Reducing VF count from from %d to %d\n", 1725 efx->vf_count, vf_limit); 1726 efx->vf_count = vf_limit; 1727 } 1728 vi_count += efx->vf_count * efx_vf_size(efx); 1729 } 1730 } 1731#endif 1732 1733 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES; 1734 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES; 1735} 1736 1737u32 efx_farch_fpga_ver(struct efx_nic *efx) 1738{ 1739 efx_oword_t altera_build; 1740 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); 1741 return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); 1742} 1743 1744void efx_farch_init_common(struct efx_nic *efx) 1745{ 1746 efx_oword_t temp; 1747 1748 /* Set positions of descriptor caches in SRAM. */ 1749 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base); 1750 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); 1751 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base); 1752 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); 1753 1754 /* Set TX descriptor cache size. */ 1755 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); 1756 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 1757 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); 1758 1759 /* Set RX descriptor cache size. Set low watermark to size-8, as 1760 * this allows most efficient prefetching. 1761 */ 1762 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); 1763 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 1764 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); 1765 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 1766 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); 1767 1768 /* Program INT_KER address */ 1769 EFX_POPULATE_OWORD_2(temp, 1770 FRF_AZ_NORM_INT_VEC_DIS_KER, 1771 EFX_INT_MODE_USE_MSI(efx), 1772 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1773 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1774 1775 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) 1776 /* Use an interrupt level unused by event queues */ 1777 efx->irq_level = 0x1f; 1778 else 1779 /* Use a valid MSI-X vector */ 1780 efx->irq_level = 0; 1781 1782 /* Enable all the genuinely fatal interrupts. (They are still 1783 * masked by the overall interrupt mask, controlled by 1784 * falcon_interrupts()). 1785 * 1786 * Note: All other fatal interrupts are enabled 1787 */ 1788 EFX_POPULATE_OWORD_3(temp, 1789 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1790 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1791 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1792 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) 1793 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); 1794 EFX_INVERT_OWORD(temp); 1795 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1796 1797 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 1798 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 1799 */ 1800 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 1801 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1802 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1803 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1804 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); 1805 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1806 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1807 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1808 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1809 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1810 /* Disable hardware watchdog which can misfire */ 1811 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1812 /* Squash TX of packets of 16 bytes or less */ 1813 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1814 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 1815 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 1816 1817 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 1818 EFX_POPULATE_OWORD_4(temp, 1819 /* Default values */ 1820 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, 1821 FRF_BZ_TX_PACE_SB_AF, 0xb, 1822 FRF_BZ_TX_PACE_FB_BASE, 0, 1823 /* Allow large pace values in the 1824 * fast bin. */ 1825 FRF_BZ_TX_PACE_BIN_TH, 1826 FFE_BZ_TX_PACE_RESERVED); 1827 efx_writeo(efx, &temp, FR_BZ_TX_PACE); 1828 } 1829} 1830 1831/************************************************************************** 1832 * 1833 * Filter tables 1834 * 1835 ************************************************************************** 1836 */ 1837 1838/* "Fudge factors" - difference between programmed value and actual depth. 1839 * Due to pipelined implementation we need to program H/W with a value that 1840 * is larger than the hop limit we want. 1841 */ 1842#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3 1843#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1 1844 1845/* Hard maximum search limit. Hardware will time-out beyond 200-something. 1846 * We also need to avoid infinite loops in efx_farch_filter_search() when the 1847 * table is full. 1848 */ 1849#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200 1850 1851/* Don't try very hard to find space for performance hints, as this is 1852 * counter-productive. */ 1853#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5 1854 1855enum efx_farch_filter_type { 1856 EFX_FARCH_FILTER_TCP_FULL = 0, 1857 EFX_FARCH_FILTER_TCP_WILD, 1858 EFX_FARCH_FILTER_UDP_FULL, 1859 EFX_FARCH_FILTER_UDP_WILD, 1860 EFX_FARCH_FILTER_MAC_FULL = 4, 1861 EFX_FARCH_FILTER_MAC_WILD, 1862 EFX_FARCH_FILTER_UC_DEF = 8, 1863 EFX_FARCH_FILTER_MC_DEF, 1864 EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */ 1865}; 1866 1867enum efx_farch_filter_table_id { 1868 EFX_FARCH_FILTER_TABLE_RX_IP = 0, 1869 EFX_FARCH_FILTER_TABLE_RX_MAC, 1870 EFX_FARCH_FILTER_TABLE_RX_DEF, 1871 EFX_FARCH_FILTER_TABLE_TX_MAC, 1872 EFX_FARCH_FILTER_TABLE_COUNT, 1873}; 1874 1875enum efx_farch_filter_index { 1876 EFX_FARCH_FILTER_INDEX_UC_DEF, 1877 EFX_FARCH_FILTER_INDEX_MC_DEF, 1878 EFX_FARCH_FILTER_SIZE_RX_DEF, 1879}; 1880 1881struct efx_farch_filter_spec { 1882 u8 type:4; 1883 u8 priority:4; 1884 u8 flags; 1885 u16 dmaq_id; 1886 u32 data[3]; 1887}; 1888 1889struct efx_farch_filter_table { 1890 enum efx_farch_filter_table_id id; 1891 u32 offset; /* address of table relative to BAR */ 1892 unsigned size; /* number of entries */ 1893 unsigned step; /* step between entries */ 1894 unsigned used; /* number currently used */ 1895 unsigned long *used_bitmap; 1896 struct efx_farch_filter_spec *spec; 1897 unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT]; 1898}; 1899 1900struct efx_farch_filter_state { 1901 struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT]; 1902}; 1903 1904static void 1905efx_farch_filter_table_clear_entry(struct efx_nic *efx, 1906 struct efx_farch_filter_table *table, 1907 unsigned int filter_idx); 1908 1909/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 1910 * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 1911static u16 efx_farch_filter_hash(u32 key) 1912{ 1913 u16 tmp; 1914 1915 /* First 16 rounds */ 1916 tmp = 0x1fff ^ key >> 16; 1917 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1918 tmp = tmp ^ tmp >> 9; 1919 /* Last 16 rounds */ 1920 tmp = tmp ^ tmp << 13 ^ key; 1921 tmp = tmp ^ tmp >> 3 ^ tmp >> 6; 1922 return tmp ^ tmp >> 9; 1923} 1924 1925/* To allow for hash collisions, filter search continues at these 1926 * increments from the first possible entry selected by the hash. */ 1927static u16 efx_farch_filter_increment(u32 key) 1928{ 1929 return key * 2 - 1; 1930} 1931 1932static enum efx_farch_filter_table_id 1933efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec) 1934{ 1935 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1936 (EFX_FARCH_FILTER_TCP_FULL >> 2)); 1937 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1938 (EFX_FARCH_FILTER_TCP_WILD >> 2)); 1939 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1940 (EFX_FARCH_FILTER_UDP_FULL >> 2)); 1941 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP != 1942 (EFX_FARCH_FILTER_UDP_WILD >> 2)); 1943 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1944 (EFX_FARCH_FILTER_MAC_FULL >> 2)); 1945 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC != 1946 (EFX_FARCH_FILTER_MAC_WILD >> 2)); 1947 BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC != 1948 EFX_FARCH_FILTER_TABLE_RX_MAC + 2); 1949 return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0); 1950} 1951 1952static void efx_farch_filter_push_rx_config(struct efx_nic *efx) 1953{ 1954 struct efx_farch_filter_state *state = efx->filter_state; 1955 struct efx_farch_filter_table *table; 1956 efx_oword_t filter_ctl; 1957 1958 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 1959 1960 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 1961 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 1962 table->search_limit[EFX_FARCH_FILTER_TCP_FULL] + 1963 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1964 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 1965 table->search_limit[EFX_FARCH_FILTER_TCP_WILD] + 1966 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1967 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 1968 table->search_limit[EFX_FARCH_FILTER_UDP_FULL] + 1969 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1970 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 1971 table->search_limit[EFX_FARCH_FILTER_UDP_WILD] + 1972 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1973 1974 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 1975 if (table->size) { 1976 EFX_SET_OWORD_FIELD( 1977 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 1978 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 1979 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 1980 EFX_SET_OWORD_FIELD( 1981 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 1982 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 1983 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 1984 } 1985 1986 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 1987 if (table->size) { 1988 EFX_SET_OWORD_FIELD( 1989 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID, 1990 table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id); 1991 EFX_SET_OWORD_FIELD( 1992 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED, 1993 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 1994 EFX_FILTER_FLAG_RX_RSS)); 1995 EFX_SET_OWORD_FIELD( 1996 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID, 1997 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id); 1998 EFX_SET_OWORD_FIELD( 1999 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 2000 !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 2001 EFX_FILTER_FLAG_RX_RSS)); 2002 2003 /* There is a single bit to enable RX scatter for all 2004 * unmatched packets. Only set it if scatter is 2005 * enabled in both filter specs. 2006 */ 2007 EFX_SET_OWORD_FIELD( 2008 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 2009 !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags & 2010 table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags & 2011 EFX_FILTER_FLAG_RX_SCATTER)); 2012 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 2013 /* We don't expose 'default' filters because unmatched 2014 * packets always go to the queue number found in the 2015 * RSS table. But we still need to set the RX scatter 2016 * bit here. 2017 */ 2018 EFX_SET_OWORD_FIELD( 2019 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q, 2020 efx->rx_scatter); 2021 } 2022 2023 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 2024} 2025 2026static void efx_farch_filter_push_tx_limits(struct efx_nic *efx) 2027{ 2028 struct efx_farch_filter_state *state = efx->filter_state; 2029 struct efx_farch_filter_table *table; 2030 efx_oword_t tx_cfg; 2031 2032 efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG); 2033 2034 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2035 if (table->size) { 2036 EFX_SET_OWORD_FIELD( 2037 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE, 2038 table->search_limit[EFX_FARCH_FILTER_MAC_FULL] + 2039 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL); 2040 EFX_SET_OWORD_FIELD( 2041 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE, 2042 table->search_limit[EFX_FARCH_FILTER_MAC_WILD] + 2043 EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD); 2044 } 2045 2046 efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG); 2047} 2048 2049static int 2050efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec, 2051 const struct efx_filter_spec *gen_spec) 2052{ 2053 bool is_full = false; 2054 2055 if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) && 2056 gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT) 2057 return -EINVAL; 2058 2059 spec->priority = gen_spec->priority; 2060 spec->flags = gen_spec->flags; 2061 spec->dmaq_id = gen_spec->dmaq_id; 2062 2063 switch (gen_spec->match_flags) { 2064 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2065 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT | 2066 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT): 2067 is_full = true; 2068 /* fall through */ 2069 case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO | 2070 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): { 2071 __be32 rhost, host1, host2; 2072 __be16 rport, port1, port2; 2073 2074 EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX)); 2075 2076 if (gen_spec->ether_type != htons(ETH_P_IP)) 2077 return -EPROTONOSUPPORT; 2078 if (gen_spec->loc_port == 0 || 2079 (is_full && gen_spec->rem_port == 0)) 2080 return -EADDRNOTAVAIL; 2081 switch (gen_spec->ip_proto) { 2082 case IPPROTO_TCP: 2083 spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL : 2084 EFX_FARCH_FILTER_TCP_WILD); 2085 break; 2086 case IPPROTO_UDP: 2087 spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL : 2088 EFX_FARCH_FILTER_UDP_WILD); 2089 break; 2090 default: 2091 return -EPROTONOSUPPORT; 2092 } 2093 2094 /* Filter is constructed in terms of source and destination, 2095 * with the odd wrinkle that the ports are swapped in a UDP 2096 * wildcard filter. We need to convert from local and remote 2097 * (= zero for wildcard) addresses. 2098 */ 2099 rhost = is_full ? gen_spec->rem_host[0] : 0; 2100 rport = is_full ? gen_spec->rem_port : 0; 2101 host1 = rhost; 2102 host2 = gen_spec->loc_host[0]; 2103 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) { 2104 port1 = gen_spec->loc_port; 2105 port2 = rport; 2106 } else { 2107 port1 = rport; 2108 port2 = gen_spec->loc_port; 2109 } 2110 spec->data[0] = ntohl(host1) << 16 | ntohs(port1); 2111 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16; 2112 spec->data[2] = ntohl(host2); 2113 2114 break; 2115 } 2116 2117 case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID: 2118 is_full = true; 2119 /* fall through */ 2120 case EFX_FILTER_MATCH_LOC_MAC: 2121 spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL : 2122 EFX_FARCH_FILTER_MAC_WILD); 2123 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0; 2124 spec->data[1] = (gen_spec->loc_mac[2] << 24 | 2125 gen_spec->loc_mac[3] << 16 | 2126 gen_spec->loc_mac[4] << 8 | 2127 gen_spec->loc_mac[5]); 2128 spec->data[2] = (gen_spec->loc_mac[0] << 8 | 2129 gen_spec->loc_mac[1]); 2130 break; 2131 2132 case EFX_FILTER_MATCH_LOC_MAC_IG: 2133 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ? 2134 EFX_FARCH_FILTER_MC_DEF : 2135 EFX_FARCH_FILTER_UC_DEF); 2136 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */ 2137 break; 2138 2139 default: 2140 return -EPROTONOSUPPORT; 2141 } 2142 2143 return 0; 2144} 2145 2146static void 2147efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec, 2148 const struct efx_farch_filter_spec *spec) 2149{ 2150 bool is_full = false; 2151 2152 /* *gen_spec should be completely initialised, to be consistent 2153 * with efx_filter_init_{rx,tx}() and in case we want to copy 2154 * it back to userland. 2155 */ 2156 memset(gen_spec, 0, sizeof(*gen_spec)); 2157 2158 gen_spec->priority = spec->priority; 2159 gen_spec->flags = spec->flags; 2160 gen_spec->dmaq_id = spec->dmaq_id; 2161 2162 switch (spec->type) { 2163 case EFX_FARCH_FILTER_TCP_FULL: 2164 case EFX_FARCH_FILTER_UDP_FULL: 2165 is_full = true; 2166 /* fall through */ 2167 case EFX_FARCH_FILTER_TCP_WILD: 2168 case EFX_FARCH_FILTER_UDP_WILD: { 2169 __be32 host1, host2; 2170 __be16 port1, port2; 2171 2172 gen_spec->match_flags = 2173 EFX_FILTER_MATCH_ETHER_TYPE | 2174 EFX_FILTER_MATCH_IP_PROTO | 2175 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT; 2176 if (is_full) 2177 gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST | 2178 EFX_FILTER_MATCH_REM_PORT); 2179 gen_spec->ether_type = htons(ETH_P_IP); 2180 gen_spec->ip_proto = 2181 (spec->type == EFX_FARCH_FILTER_TCP_FULL || 2182 spec->type == EFX_FARCH_FILTER_TCP_WILD) ? 2183 IPPROTO_TCP : IPPROTO_UDP; 2184 2185 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16); 2186 port1 = htons(spec->data[0]); 2187 host2 = htonl(spec->data[2]); 2188 port2 = htons(spec->data[1] >> 16); 2189 if (spec->flags & EFX_FILTER_FLAG_TX) { 2190 gen_spec->loc_host[0] = host1; 2191 gen_spec->rem_host[0] = host2; 2192 } else { 2193 gen_spec->loc_host[0] = host2; 2194 gen_spec->rem_host[0] = host1; 2195 } 2196 if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^ 2197 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) { 2198 gen_spec->loc_port = port1; 2199 gen_spec->rem_port = port2; 2200 } else { 2201 gen_spec->loc_port = port2; 2202 gen_spec->rem_port = port1; 2203 } 2204 2205 break; 2206 } 2207 2208 case EFX_FARCH_FILTER_MAC_FULL: 2209 is_full = true; 2210 /* fall through */ 2211 case EFX_FARCH_FILTER_MAC_WILD: 2212 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC; 2213 if (is_full) 2214 gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID; 2215 gen_spec->loc_mac[0] = spec->data[2] >> 8; 2216 gen_spec->loc_mac[1] = spec->data[2]; 2217 gen_spec->loc_mac[2] = spec->data[1] >> 24; 2218 gen_spec->loc_mac[3] = spec->data[1] >> 16; 2219 gen_spec->loc_mac[4] = spec->data[1] >> 8; 2220 gen_spec->loc_mac[5] = spec->data[1]; 2221 gen_spec->outer_vid = htons(spec->data[0]); 2222 break; 2223 2224 case EFX_FARCH_FILTER_UC_DEF: 2225 case EFX_FARCH_FILTER_MC_DEF: 2226 gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG; 2227 gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF; 2228 break; 2229 2230 default: 2231 WARN_ON(1); 2232 break; 2233 } 2234} 2235 2236static void 2237efx_farch_filter_init_rx_auto(struct efx_nic *efx, 2238 struct efx_farch_filter_spec *spec) 2239{ 2240 /* If there's only one channel then disable RSS for non VF 2241 * traffic, thereby allowing VFs to use RSS when the PF can't. 2242 */ 2243 spec->priority = EFX_FILTER_PRI_AUTO; 2244 spec->flags = (EFX_FILTER_FLAG_RX | 2245 (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) | 2246 (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0)); 2247 spec->dmaq_id = 0; 2248} 2249 2250/* Build a filter entry and return its n-tuple key. */ 2251static u32 efx_farch_filter_build(efx_oword_t *filter, 2252 struct efx_farch_filter_spec *spec) 2253{ 2254 u32 data3; 2255 2256 switch (efx_farch_filter_spec_table_id(spec)) { 2257 case EFX_FARCH_FILTER_TABLE_RX_IP: { 2258 bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL || 2259 spec->type == EFX_FARCH_FILTER_UDP_WILD); 2260 EFX_POPULATE_OWORD_7( 2261 *filter, 2262 FRF_BZ_RSS_EN, 2263 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2264 FRF_BZ_SCATTER_EN, 2265 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2266 FRF_BZ_TCP_UDP, is_udp, 2267 FRF_BZ_RXQ_ID, spec->dmaq_id, 2268 EFX_DWORD_2, spec->data[2], 2269 EFX_DWORD_1, spec->data[1], 2270 EFX_DWORD_0, spec->data[0]); 2271 data3 = is_udp; 2272 break; 2273 } 2274 2275 case EFX_FARCH_FILTER_TABLE_RX_MAC: { 2276 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2277 EFX_POPULATE_OWORD_7( 2278 *filter, 2279 FRF_CZ_RMFT_RSS_EN, 2280 !!(spec->flags & EFX_FILTER_FLAG_RX_RSS), 2281 FRF_CZ_RMFT_SCATTER_EN, 2282 !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER), 2283 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id, 2284 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild, 2285 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2], 2286 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1], 2287 FRF_CZ_RMFT_VLAN_ID, spec->data[0]); 2288 data3 = is_wild; 2289 break; 2290 } 2291 2292 case EFX_FARCH_FILTER_TABLE_TX_MAC: { 2293 bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD; 2294 EFX_POPULATE_OWORD_5(*filter, 2295 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id, 2296 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild, 2297 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2], 2298 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1], 2299 FRF_CZ_TMFT_VLAN_ID, spec->data[0]); 2300 data3 = is_wild | spec->dmaq_id << 1; 2301 break; 2302 } 2303 2304 default: 2305 BUG(); 2306 } 2307 2308 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3; 2309} 2310 2311static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left, 2312 const struct efx_farch_filter_spec *right) 2313{ 2314 if (left->type != right->type || 2315 memcmp(left->data, right->data, sizeof(left->data))) 2316 return false; 2317 2318 if (left->flags & EFX_FILTER_FLAG_TX && 2319 left->dmaq_id != right->dmaq_id) 2320 return false; 2321 2322 return true; 2323} 2324 2325/* 2326 * Construct/deconstruct external filter IDs. At least the RX filter 2327 * IDs must be ordered by matching priority, for RX NFC semantics. 2328 * 2329 * Deconstruction needs to be robust against invalid IDs so that 2330 * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can 2331 * accept user-provided IDs. 2332 */ 2333 2334#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5 2335 2336static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = { 2337 [EFX_FARCH_FILTER_TCP_FULL] = 0, 2338 [EFX_FARCH_FILTER_UDP_FULL] = 0, 2339 [EFX_FARCH_FILTER_TCP_WILD] = 1, 2340 [EFX_FARCH_FILTER_UDP_WILD] = 1, 2341 [EFX_FARCH_FILTER_MAC_FULL] = 2, 2342 [EFX_FARCH_FILTER_MAC_WILD] = 3, 2343 [EFX_FARCH_FILTER_UC_DEF] = 4, 2344 [EFX_FARCH_FILTER_MC_DEF] = 4, 2345}; 2346 2347static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = { 2348 EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */ 2349 EFX_FARCH_FILTER_TABLE_RX_IP, 2350 EFX_FARCH_FILTER_TABLE_RX_MAC, 2351 EFX_FARCH_FILTER_TABLE_RX_MAC, 2352 EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */ 2353 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */ 2354 EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */ 2355}; 2356 2357#define EFX_FARCH_FILTER_INDEX_WIDTH 13 2358#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1) 2359 2360static inline u32 2361efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec, 2362 unsigned int index) 2363{ 2364 unsigned int range; 2365 2366 range = efx_farch_filter_type_match_pri[spec->type]; 2367 if (!(spec->flags & EFX_FILTER_FLAG_RX)) 2368 range += EFX_FARCH_FILTER_MATCH_PRI_COUNT; 2369 2370 return range << EFX_FARCH_FILTER_INDEX_WIDTH | index; 2371} 2372 2373static inline enum efx_farch_filter_table_id 2374efx_farch_filter_id_table_id(u32 id) 2375{ 2376 unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH; 2377 2378 if (range < ARRAY_SIZE(efx_farch_filter_range_table)) 2379 return efx_farch_filter_range_table[range]; 2380 else 2381 return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */ 2382} 2383 2384static inline unsigned int efx_farch_filter_id_index(u32 id) 2385{ 2386 return id & EFX_FARCH_FILTER_INDEX_MASK; 2387} 2388 2389u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx) 2390{ 2391 struct efx_farch_filter_state *state = efx->filter_state; 2392 unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1; 2393 enum efx_farch_filter_table_id table_id; 2394 2395 do { 2396 table_id = efx_farch_filter_range_table[range]; 2397 if (state->table[table_id].size != 0) 2398 return range << EFX_FARCH_FILTER_INDEX_WIDTH | 2399 state->table[table_id].size; 2400 } while (range--); 2401 2402 return 0; 2403} 2404 2405s32 efx_farch_filter_insert(struct efx_nic *efx, 2406 struct efx_filter_spec *gen_spec, 2407 bool replace_equal) 2408{ 2409 struct efx_farch_filter_state *state = efx->filter_state; 2410 struct efx_farch_filter_table *table; 2411 struct efx_farch_filter_spec spec; 2412 efx_oword_t filter; 2413 int rep_index, ins_index; 2414 unsigned int depth = 0; 2415 int rc; 2416 2417 rc = efx_farch_filter_from_gen_spec(&spec, gen_spec); 2418 if (rc) 2419 return rc; 2420 2421 table = &state->table[efx_farch_filter_spec_table_id(&spec)]; 2422 if (table->size == 0) 2423 return -EINVAL; 2424 2425 netif_vdbg(efx, hw, efx->net_dev, 2426 "%s: type %d search_limit=%d", __func__, spec.type, 2427 table->search_limit[spec.type]); 2428 2429 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2430 /* One filter spec per type */ 2431 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0); 2432 BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF != 2433 EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF); 2434 rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF; 2435 ins_index = rep_index; 2436 2437 spin_lock_bh(&efx->filter_lock); 2438 } else { 2439 /* Search concurrently for 2440 * (1) a filter to be replaced (rep_index): any filter 2441 * with the same match values, up to the current 2442 * search depth for this type, and 2443 * (2) the insertion point (ins_index): (1) or any 2444 * free slot before it or up to the maximum search 2445 * depth for this priority 2446 * We fail if we cannot find (2). 2447 * 2448 * We can stop once either 2449 * (a) we find (1), in which case we have definitely 2450 * found (2) as well; or 2451 * (b) we have searched exhaustively for (1), and have 2452 * either found (2) or searched exhaustively for it 2453 */ 2454 u32 key = efx_farch_filter_build(&filter, &spec); 2455 unsigned int hash = efx_farch_filter_hash(key); 2456 unsigned int incr = efx_farch_filter_increment(key); 2457 unsigned int max_rep_depth = table->search_limit[spec.type]; 2458 unsigned int max_ins_depth = 2459 spec.priority <= EFX_FILTER_PRI_HINT ? 2460 EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX : 2461 EFX_FARCH_FILTER_CTL_SRCH_MAX; 2462 unsigned int i = hash & (table->size - 1); 2463 2464 ins_index = -1; 2465 depth = 1; 2466 2467 spin_lock_bh(&efx->filter_lock); 2468 2469 for (;;) { 2470 if (!test_bit(i, table->used_bitmap)) { 2471 if (ins_index < 0) 2472 ins_index = i; 2473 } else if (efx_farch_filter_equal(&spec, 2474 &table->spec[i])) { 2475 /* Case (a) */ 2476 if (ins_index < 0) 2477 ins_index = i; 2478 rep_index = i; 2479 break; 2480 } 2481 2482 if (depth >= max_rep_depth && 2483 (ins_index >= 0 || depth >= max_ins_depth)) { 2484 /* Case (b) */ 2485 if (ins_index < 0) { 2486 rc = -EBUSY; 2487 goto out; 2488 } 2489 rep_index = -1; 2490 break; 2491 } 2492 2493 i = (i + incr) & (table->size - 1); 2494 ++depth; 2495 } 2496 } 2497 2498 /* If we found a filter to be replaced, check whether we 2499 * should do so 2500 */ 2501 if (rep_index >= 0) { 2502 struct efx_farch_filter_spec *saved_spec = 2503 &table->spec[rep_index]; 2504 2505 if (spec.priority == saved_spec->priority && !replace_equal) { 2506 rc = -EEXIST; 2507 goto out; 2508 } 2509 if (spec.priority < saved_spec->priority) { 2510 rc = -EPERM; 2511 goto out; 2512 } 2513 if (saved_spec->priority == EFX_FILTER_PRI_AUTO || 2514 saved_spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) 2515 spec.flags |= EFX_FILTER_FLAG_RX_OVER_AUTO; 2516 } 2517 2518 /* Insert the filter */ 2519 if (ins_index != rep_index) { 2520 __set_bit(ins_index, table->used_bitmap); 2521 ++table->used; 2522 } 2523 table->spec[ins_index] = spec; 2524 2525 if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) { 2526 efx_farch_filter_push_rx_config(efx); 2527 } else { 2528 if (table->search_limit[spec.type] < depth) { 2529 table->search_limit[spec.type] = depth; 2530 if (spec.flags & EFX_FILTER_FLAG_TX) 2531 efx_farch_filter_push_tx_limits(efx); 2532 else 2533 efx_farch_filter_push_rx_config(efx); 2534 } 2535 2536 efx_writeo(efx, &filter, 2537 table->offset + table->step * ins_index); 2538 2539 /* If we were able to replace a filter by inserting 2540 * at a lower depth, clear the replaced filter 2541 */ 2542 if (ins_index != rep_index && rep_index >= 0) 2543 efx_farch_filter_table_clear_entry(efx, table, 2544 rep_index); 2545 } 2546 2547 netif_vdbg(efx, hw, efx->net_dev, 2548 "%s: filter type %d index %d rxq %u set", 2549 __func__, spec.type, ins_index, spec.dmaq_id); 2550 rc = efx_farch_filter_make_id(&spec, ins_index); 2551 2552out: 2553 spin_unlock_bh(&efx->filter_lock); 2554 return rc; 2555} 2556 2557static void 2558efx_farch_filter_table_clear_entry(struct efx_nic *efx, 2559 struct efx_farch_filter_table *table, 2560 unsigned int filter_idx) 2561{ 2562 static efx_oword_t filter; 2563 2564 EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap)); 2565 BUG_ON(table->offset == 0); /* can't clear MAC default filters */ 2566 2567 __clear_bit(filter_idx, table->used_bitmap); 2568 --table->used; 2569 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0])); 2570 2571 efx_writeo(efx, &filter, table->offset + table->step * filter_idx); 2572 2573 /* If this filter required a greater search depth than 2574 * any other, the search limit for its type can now be 2575 * decreased. However, it is hard to determine that 2576 * unless the table has become completely empty - in 2577 * which case, all its search limits can be set to 0. 2578 */ 2579 if (unlikely(table->used == 0)) { 2580 memset(table->search_limit, 0, sizeof(table->search_limit)); 2581 if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC) 2582 efx_farch_filter_push_tx_limits(efx); 2583 else 2584 efx_farch_filter_push_rx_config(efx); 2585 } 2586} 2587 2588static int efx_farch_filter_remove(struct efx_nic *efx, 2589 struct efx_farch_filter_table *table, 2590 unsigned int filter_idx, 2591 enum efx_filter_priority priority) 2592{ 2593 struct efx_farch_filter_spec *spec = &table->spec[filter_idx]; 2594 2595 if (!test_bit(filter_idx, table->used_bitmap) || 2596 spec->priority != priority) 2597 return -ENOENT; 2598 2599 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) { 2600 efx_farch_filter_init_rx_auto(efx, spec); 2601 efx_farch_filter_push_rx_config(efx); 2602 } else { 2603 efx_farch_filter_table_clear_entry(efx, table, filter_idx); 2604 } 2605 2606 return 0; 2607} 2608 2609int efx_farch_filter_remove_safe(struct efx_nic *efx, 2610 enum efx_filter_priority priority, 2611 u32 filter_id) 2612{ 2613 struct efx_farch_filter_state *state = efx->filter_state; 2614 enum efx_farch_filter_table_id table_id; 2615 struct efx_farch_filter_table *table; 2616 unsigned int filter_idx; 2617 struct efx_farch_filter_spec *spec; 2618 int rc; 2619 2620 table_id = efx_farch_filter_id_table_id(filter_id); 2621 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2622 return -ENOENT; 2623 table = &state->table[table_id]; 2624 2625 filter_idx = efx_farch_filter_id_index(filter_id); 2626 if (filter_idx >= table->size) 2627 return -ENOENT; 2628 spec = &table->spec[filter_idx]; 2629 2630 spin_lock_bh(&efx->filter_lock); 2631 rc = efx_farch_filter_remove(efx, table, filter_idx, priority); 2632 spin_unlock_bh(&efx->filter_lock); 2633 2634 return rc; 2635} 2636 2637int efx_farch_filter_get_safe(struct efx_nic *efx, 2638 enum efx_filter_priority priority, 2639 u32 filter_id, struct efx_filter_spec *spec_buf) 2640{ 2641 struct efx_farch_filter_state *state = efx->filter_state; 2642 enum efx_farch_filter_table_id table_id; 2643 struct efx_farch_filter_table *table; 2644 struct efx_farch_filter_spec *spec; 2645 unsigned int filter_idx; 2646 int rc; 2647 2648 table_id = efx_farch_filter_id_table_id(filter_id); 2649 if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT) 2650 return -ENOENT; 2651 table = &state->table[table_id]; 2652 2653 filter_idx = efx_farch_filter_id_index(filter_id); 2654 if (filter_idx >= table->size) 2655 return -ENOENT; 2656 spec = &table->spec[filter_idx]; 2657 2658 spin_lock_bh(&efx->filter_lock); 2659 2660 if (test_bit(filter_idx, table->used_bitmap) && 2661 spec->priority == priority) { 2662 efx_farch_filter_to_gen_spec(spec_buf, spec); 2663 rc = 0; 2664 } else { 2665 rc = -ENOENT; 2666 } 2667 2668 spin_unlock_bh(&efx->filter_lock); 2669 2670 return rc; 2671} 2672 2673static void 2674efx_farch_filter_table_clear(struct efx_nic *efx, 2675 enum efx_farch_filter_table_id table_id, 2676 enum efx_filter_priority priority) 2677{ 2678 struct efx_farch_filter_state *state = efx->filter_state; 2679 struct efx_farch_filter_table *table = &state->table[table_id]; 2680 unsigned int filter_idx; 2681 2682 spin_lock_bh(&efx->filter_lock); 2683 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) { 2684 if (table->spec[filter_idx].priority != EFX_FILTER_PRI_AUTO) 2685 efx_farch_filter_remove(efx, table, 2686 filter_idx, priority); 2687 } 2688 spin_unlock_bh(&efx->filter_lock); 2689} 2690 2691int efx_farch_filter_clear_rx(struct efx_nic *efx, 2692 enum efx_filter_priority priority) 2693{ 2694 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP, 2695 priority); 2696 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC, 2697 priority); 2698 efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF, 2699 priority); 2700 return 0; 2701} 2702 2703u32 efx_farch_filter_count_rx_used(struct efx_nic *efx, 2704 enum efx_filter_priority priority) 2705{ 2706 struct efx_farch_filter_state *state = efx->filter_state; 2707 enum efx_farch_filter_table_id table_id; 2708 struct efx_farch_filter_table *table; 2709 unsigned int filter_idx; 2710 u32 count = 0; 2711 2712 spin_lock_bh(&efx->filter_lock); 2713 2714 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2715 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2716 table_id++) { 2717 table = &state->table[table_id]; 2718 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2719 if (test_bit(filter_idx, table->used_bitmap) && 2720 table->spec[filter_idx].priority == priority) 2721 ++count; 2722 } 2723 } 2724 2725 spin_unlock_bh(&efx->filter_lock); 2726 2727 return count; 2728} 2729 2730s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx, 2731 enum efx_filter_priority priority, 2732 u32 *buf, u32 size) 2733{ 2734 struct efx_farch_filter_state *state = efx->filter_state; 2735 enum efx_farch_filter_table_id table_id; 2736 struct efx_farch_filter_table *table; 2737 unsigned int filter_idx; 2738 s32 count = 0; 2739 2740 spin_lock_bh(&efx->filter_lock); 2741 2742 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2743 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2744 table_id++) { 2745 table = &state->table[table_id]; 2746 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2747 if (test_bit(filter_idx, table->used_bitmap) && 2748 table->spec[filter_idx].priority == priority) { 2749 if (count == size) { 2750 count = -EMSGSIZE; 2751 goto out; 2752 } 2753 buf[count++] = efx_farch_filter_make_id( 2754 &table->spec[filter_idx], filter_idx); 2755 } 2756 } 2757 } 2758out: 2759 spin_unlock_bh(&efx->filter_lock); 2760 2761 return count; 2762} 2763 2764/* Restore filter stater after reset */ 2765void efx_farch_filter_table_restore(struct efx_nic *efx) 2766{ 2767 struct efx_farch_filter_state *state = efx->filter_state; 2768 enum efx_farch_filter_table_id table_id; 2769 struct efx_farch_filter_table *table; 2770 efx_oword_t filter; 2771 unsigned int filter_idx; 2772 2773 spin_lock_bh(&efx->filter_lock); 2774 2775 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2776 table = &state->table[table_id]; 2777 2778 /* Check whether this is a regular register table */ 2779 if (table->step == 0) 2780 continue; 2781 2782 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2783 if (!test_bit(filter_idx, table->used_bitmap)) 2784 continue; 2785 efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2786 efx_writeo(efx, &filter, 2787 table->offset + table->step * filter_idx); 2788 } 2789 } 2790 2791 efx_farch_filter_push_rx_config(efx); 2792 efx_farch_filter_push_tx_limits(efx); 2793 2794 spin_unlock_bh(&efx->filter_lock); 2795} 2796 2797void efx_farch_filter_table_remove(struct efx_nic *efx) 2798{ 2799 struct efx_farch_filter_state *state = efx->filter_state; 2800 enum efx_farch_filter_table_id table_id; 2801 2802 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2803 kfree(state->table[table_id].used_bitmap); 2804 vfree(state->table[table_id].spec); 2805 } 2806 kfree(state); 2807} 2808 2809int efx_farch_filter_table_probe(struct efx_nic *efx) 2810{ 2811 struct efx_farch_filter_state *state; 2812 struct efx_farch_filter_table *table; 2813 unsigned table_id; 2814 2815 state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL); 2816 if (!state) 2817 return -ENOMEM; 2818 efx->filter_state = state; 2819 2820 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 2821 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2822 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2823 table->offset = FR_BZ_RX_FILTER_TBL0; 2824 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 2825 table->step = FR_BZ_RX_FILTER_TBL0_STEP; 2826 } 2827 2828 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 2829 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC]; 2830 table->id = EFX_FARCH_FILTER_TABLE_RX_MAC; 2831 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 2832 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 2833 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 2834 2835 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 2836 table->id = EFX_FARCH_FILTER_TABLE_RX_DEF; 2837 table->size = EFX_FARCH_FILTER_SIZE_RX_DEF; 2838 2839 table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC]; 2840 table->id = EFX_FARCH_FILTER_TABLE_TX_MAC; 2841 table->offset = FR_CZ_TX_MAC_FILTER_TBL0; 2842 table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS; 2843 table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP; 2844 } 2845 2846 for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) { 2847 table = &state->table[table_id]; 2848 if (table->size == 0) 2849 continue; 2850 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size), 2851 sizeof(unsigned long), 2852 GFP_KERNEL); 2853 if (!table->used_bitmap) 2854 goto fail; 2855 table->spec = vzalloc(table->size * sizeof(*table->spec)); 2856 if (!table->spec) 2857 goto fail; 2858 } 2859 2860 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF]; 2861 if (table->size) { 2862 /* RX default filters must always exist */ 2863 struct efx_farch_filter_spec *spec; 2864 unsigned i; 2865 2866 for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) { 2867 spec = &table->spec[i]; 2868 spec->type = EFX_FARCH_FILTER_UC_DEF + i; 2869 efx_farch_filter_init_rx_auto(efx, spec); 2870 __set_bit(i, table->used_bitmap); 2871 } 2872 } 2873 2874 efx_farch_filter_push_rx_config(efx); 2875 2876 return 0; 2877 2878fail: 2879 efx_farch_filter_table_remove(efx); 2880 return -ENOMEM; 2881} 2882 2883/* Update scatter enable flags for filters pointing to our own RX queues */ 2884void efx_farch_filter_update_rx_scatter(struct efx_nic *efx) 2885{ 2886 struct efx_farch_filter_state *state = efx->filter_state; 2887 enum efx_farch_filter_table_id table_id; 2888 struct efx_farch_filter_table *table; 2889 efx_oword_t filter; 2890 unsigned int filter_idx; 2891 2892 spin_lock_bh(&efx->filter_lock); 2893 2894 for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP; 2895 table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF; 2896 table_id++) { 2897 table = &state->table[table_id]; 2898 2899 for (filter_idx = 0; filter_idx < table->size; filter_idx++) { 2900 if (!test_bit(filter_idx, table->used_bitmap) || 2901 table->spec[filter_idx].dmaq_id >= 2902 efx->n_rx_channels) 2903 continue; 2904 2905 if (efx->rx_scatter) 2906 table->spec[filter_idx].flags |= 2907 EFX_FILTER_FLAG_RX_SCATTER; 2908 else 2909 table->spec[filter_idx].flags &= 2910 ~EFX_FILTER_FLAG_RX_SCATTER; 2911 2912 if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF) 2913 /* Pushed by efx_farch_filter_push_rx_config() */ 2914 continue; 2915 2916 efx_farch_filter_build(&filter, &table->spec[filter_idx]); 2917 efx_writeo(efx, &filter, 2918 table->offset + table->step * filter_idx); 2919 } 2920 } 2921 2922 efx_farch_filter_push_rx_config(efx); 2923 2924 spin_unlock_bh(&efx->filter_lock); 2925} 2926 2927#ifdef CONFIG_RFS_ACCEL 2928 2929s32 efx_farch_filter_rfs_insert(struct efx_nic *efx, 2930 struct efx_filter_spec *gen_spec) 2931{ 2932 return efx_farch_filter_insert(efx, gen_spec, true); 2933} 2934 2935bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, 2936 unsigned int index) 2937{ 2938 struct efx_farch_filter_state *state = efx->filter_state; 2939 struct efx_farch_filter_table *table = 2940 &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2941 2942 if (test_bit(index, table->used_bitmap) && 2943 table->spec[index].priority == EFX_FILTER_PRI_HINT && 2944 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, 2945 flow_id, index)) { 2946 efx_farch_filter_table_clear_entry(efx, table, index); 2947 return true; 2948 } 2949 2950 return false; 2951} 2952 2953#endif /* CONFIG_RFS_ACCEL */ 2954 2955void efx_farch_filter_sync_rx_mode(struct efx_nic *efx) 2956{ 2957 struct net_device *net_dev = efx->net_dev; 2958 struct netdev_hw_addr *ha; 2959 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 2960 u32 crc; 2961 int bit; 2962 2963 if (!efx_dev_registered(efx)) 2964 return; 2965 2966 netif_addr_lock_bh(net_dev); 2967 2968 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC); 2969 2970 /* Build multicast hash table */ 2971 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 2972 memset(mc_hash, 0xff, sizeof(*mc_hash)); 2973 } else { 2974 memset(mc_hash, 0x00, sizeof(*mc_hash)); 2975 netdev_for_each_mc_addr(ha, net_dev) { 2976 crc = ether_crc_le(ETH_ALEN, ha->addr); 2977 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 2978 __set_bit_le(bit, mc_hash); 2979 } 2980 2981 /* Broadcast packets go through the multicast hash filter. 2982 * ether_crc_le() of the broadcast address is 0xbe2612ff 2983 * so we always add bit 0xff to the mask. 2984 */ 2985 __set_bit_le(0xff, mc_hash); 2986 } 2987 2988 netif_addr_unlock_bh(net_dev); 2989} 2990