1/* cnic.c: QLogic CNIC core network driver. 2 * 3 * Copyright (c) 2006-2014 Broadcom Corporation 4 * Copyright (c) 2014-2015 QLogic Corporation 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation. 9 * 10 * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com) 11 * Previously modified and maintained by: Michael Chan <mchan@broadcom.com> 12 * Maintained By: Dept-HSGLinuxNICDev@qlogic.com 13 */ 14 15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 16 17#include <linux/module.h> 18 19#include <linux/kernel.h> 20#include <linux/errno.h> 21#include <linux/list.h> 22#include <linux/slab.h> 23#include <linux/pci.h> 24#include <linux/init.h> 25#include <linux/netdevice.h> 26#include <linux/uio_driver.h> 27#include <linux/in.h> 28#include <linux/dma-mapping.h> 29#include <linux/delay.h> 30#include <linux/ethtool.h> 31#include <linux/if_vlan.h> 32#include <linux/prefetch.h> 33#include <linux/random.h> 34#if IS_ENABLED(CONFIG_VLAN_8021Q) 35#define BCM_VLAN 1 36#endif 37#include <net/ip.h> 38#include <net/tcp.h> 39#include <net/route.h> 40#include <net/ipv6.h> 41#include <net/ip6_route.h> 42#include <net/ip6_checksum.h> 43#include <scsi/iscsi_if.h> 44 45#define BCM_CNIC 1 46#include "cnic_if.h" 47#include "bnx2.h" 48#include "bnx2x/bnx2x.h" 49#include "bnx2x/bnx2x_reg.h" 50#include "bnx2x/bnx2x_fw_defs.h" 51#include "bnx2x/bnx2x_hsi.h" 52#include "../../../scsi/bnx2i/57xx_iscsi_constants.h" 53#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h" 54#include "../../../scsi/bnx2fc/bnx2fc_constants.h" 55#include "cnic.h" 56#include "cnic_defs.h" 57 58#define CNIC_MODULE_NAME "cnic" 59 60static char version[] = 61 "QLogic " CNIC_MODULE_NAME "Driver v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n"; 62 63MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) " 64 "Chen (zongxi@broadcom.com"); 65MODULE_DESCRIPTION("QLogic cnic Driver"); 66MODULE_LICENSE("GPL"); 67MODULE_VERSION(CNIC_MODULE_VERSION); 68 69/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */ 70static LIST_HEAD(cnic_dev_list); 71static LIST_HEAD(cnic_udev_list); 72static DEFINE_RWLOCK(cnic_dev_lock); 73static DEFINE_MUTEX(cnic_lock); 74 75static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 76 77/* helper function, assuming cnic_lock is held */ 78static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type) 79{ 80 return rcu_dereference_protected(cnic_ulp_tbl[type], 81 lockdep_is_held(&cnic_lock)); 82} 83 84static int cnic_service_bnx2(void *, void *); 85static int cnic_service_bnx2x(void *, void *); 86static int cnic_ctl(void *, struct cnic_ctl_info *); 87 88static struct cnic_ops cnic_bnx2_ops = { 89 .cnic_owner = THIS_MODULE, 90 .cnic_handler = cnic_service_bnx2, 91 .cnic_ctl = cnic_ctl, 92}; 93 94static struct cnic_ops cnic_bnx2x_ops = { 95 .cnic_owner = THIS_MODULE, 96 .cnic_handler = cnic_service_bnx2x, 97 .cnic_ctl = cnic_ctl, 98}; 99 100static struct workqueue_struct *cnic_wq; 101 102static void cnic_shutdown_rings(struct cnic_dev *); 103static void cnic_init_rings(struct cnic_dev *); 104static int cnic_cm_set_pg(struct cnic_sock *); 105 106static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 107{ 108 struct cnic_uio_dev *udev = uinfo->priv; 109 struct cnic_dev *dev; 110 111 if (!capable(CAP_NET_ADMIN)) 112 return -EPERM; 113 114 if (udev->uio_dev != -1) 115 return -EBUSY; 116 117 rtnl_lock(); 118 dev = udev->dev; 119 120 if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 121 rtnl_unlock(); 122 return -ENODEV; 123 } 124 125 udev->uio_dev = iminor(inode); 126 127 cnic_shutdown_rings(dev); 128 cnic_init_rings(dev); 129 rtnl_unlock(); 130 131 return 0; 132} 133 134static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode) 135{ 136 struct cnic_uio_dev *udev = uinfo->priv; 137 138 udev->uio_dev = -1; 139 return 0; 140} 141 142static inline void cnic_hold(struct cnic_dev *dev) 143{ 144 atomic_inc(&dev->ref_count); 145} 146 147static inline void cnic_put(struct cnic_dev *dev) 148{ 149 atomic_dec(&dev->ref_count); 150} 151 152static inline void csk_hold(struct cnic_sock *csk) 153{ 154 atomic_inc(&csk->ref_count); 155} 156 157static inline void csk_put(struct cnic_sock *csk) 158{ 159 atomic_dec(&csk->ref_count); 160} 161 162static struct cnic_dev *cnic_from_netdev(struct net_device *netdev) 163{ 164 struct cnic_dev *cdev; 165 166 read_lock(&cnic_dev_lock); 167 list_for_each_entry(cdev, &cnic_dev_list, list) { 168 if (netdev == cdev->netdev) { 169 cnic_hold(cdev); 170 read_unlock(&cnic_dev_lock); 171 return cdev; 172 } 173 } 174 read_unlock(&cnic_dev_lock); 175 return NULL; 176} 177 178static inline void ulp_get(struct cnic_ulp_ops *ulp_ops) 179{ 180 atomic_inc(&ulp_ops->ref_count); 181} 182 183static inline void ulp_put(struct cnic_ulp_ops *ulp_ops) 184{ 185 atomic_dec(&ulp_ops->ref_count); 186} 187 188static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 189{ 190 struct cnic_local *cp = dev->cnic_priv; 191 struct cnic_eth_dev *ethdev = cp->ethdev; 192 struct drv_ctl_info info; 193 struct drv_ctl_io *io = &info.data.io; 194 195 info.cmd = DRV_CTL_CTX_WR_CMD; 196 io->cid_addr = cid_addr; 197 io->offset = off; 198 io->data = val; 199 ethdev->drv_ctl(dev->netdev, &info); 200} 201 202static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr) 203{ 204 struct cnic_local *cp = dev->cnic_priv; 205 struct cnic_eth_dev *ethdev = cp->ethdev; 206 struct drv_ctl_info info; 207 struct drv_ctl_io *io = &info.data.io; 208 209 info.cmd = DRV_CTL_CTXTBL_WR_CMD; 210 io->offset = off; 211 io->dma_addr = addr; 212 ethdev->drv_ctl(dev->netdev, &info); 213} 214 215static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start) 216{ 217 struct cnic_local *cp = dev->cnic_priv; 218 struct cnic_eth_dev *ethdev = cp->ethdev; 219 struct drv_ctl_info info; 220 struct drv_ctl_l2_ring *ring = &info.data.ring; 221 222 if (start) 223 info.cmd = DRV_CTL_START_L2_CMD; 224 else 225 info.cmd = DRV_CTL_STOP_L2_CMD; 226 227 ring->cid = cid; 228 ring->client_id = cl_id; 229 ethdev->drv_ctl(dev->netdev, &info); 230} 231 232static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 233{ 234 struct cnic_local *cp = dev->cnic_priv; 235 struct cnic_eth_dev *ethdev = cp->ethdev; 236 struct drv_ctl_info info; 237 struct drv_ctl_io *io = &info.data.io; 238 239 info.cmd = DRV_CTL_IO_WR_CMD; 240 io->offset = off; 241 io->data = val; 242 ethdev->drv_ctl(dev->netdev, &info); 243} 244 245static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off) 246{ 247 struct cnic_local *cp = dev->cnic_priv; 248 struct cnic_eth_dev *ethdev = cp->ethdev; 249 struct drv_ctl_info info; 250 struct drv_ctl_io *io = &info.data.io; 251 252 info.cmd = DRV_CTL_IO_RD_CMD; 253 io->offset = off; 254 ethdev->drv_ctl(dev->netdev, &info); 255 return io->data; 256} 257 258static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg) 259{ 260 struct cnic_local *cp = dev->cnic_priv; 261 struct cnic_eth_dev *ethdev = cp->ethdev; 262 struct drv_ctl_info info; 263 struct fcoe_capabilities *fcoe_cap = 264 &info.data.register_data.fcoe_features; 265 266 if (reg) { 267 info.cmd = DRV_CTL_ULP_REGISTER_CMD; 268 if (ulp_type == CNIC_ULP_FCOE && dev->fcoe_cap) 269 memcpy(fcoe_cap, dev->fcoe_cap, sizeof(*fcoe_cap)); 270 } else { 271 info.cmd = DRV_CTL_ULP_UNREGISTER_CMD; 272 } 273 274 info.data.ulp_type = ulp_type; 275 ethdev->drv_ctl(dev->netdev, &info); 276} 277 278static int cnic_in_use(struct cnic_sock *csk) 279{ 280 return test_bit(SK_F_INUSE, &csk->flags); 281} 282 283static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count) 284{ 285 struct cnic_local *cp = dev->cnic_priv; 286 struct cnic_eth_dev *ethdev = cp->ethdev; 287 struct drv_ctl_info info; 288 289 info.cmd = cmd; 290 info.data.credit.credit_count = count; 291 ethdev->drv_ctl(dev->netdev, &info); 292} 293 294static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid) 295{ 296 u32 i; 297 298 if (!cp->ctx_tbl) 299 return -EINVAL; 300 301 for (i = 0; i < cp->max_cid_space; i++) { 302 if (cp->ctx_tbl[i].cid == cid) { 303 *l5_cid = i; 304 return 0; 305 } 306 } 307 return -EINVAL; 308} 309 310static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 311 struct cnic_sock *csk) 312{ 313 struct iscsi_path path_req; 314 char *buf = NULL; 315 u16 len = 0; 316 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 317 struct cnic_ulp_ops *ulp_ops; 318 struct cnic_uio_dev *udev = cp->udev; 319 int rc = 0, retry = 0; 320 321 if (!udev || udev->uio_dev == -1) 322 return -ENODEV; 323 324 if (csk) { 325 len = sizeof(path_req); 326 buf = (char *) &path_req; 327 memset(&path_req, 0, len); 328 329 msg_type = ISCSI_KEVENT_PATH_REQ; 330 path_req.handle = (u64) csk->l5_cid; 331 if (test_bit(SK_F_IPV6, &csk->flags)) { 332 memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0], 333 sizeof(struct in6_addr)); 334 path_req.ip_addr_len = 16; 335 } else { 336 memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0], 337 sizeof(struct in_addr)); 338 path_req.ip_addr_len = 4; 339 } 340 path_req.vlan_id = csk->vlan_id; 341 path_req.pmtu = csk->mtu; 342 } 343 344 while (retry < 3) { 345 rc = 0; 346 rcu_read_lock(); 347 ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]); 348 if (ulp_ops) 349 rc = ulp_ops->iscsi_nl_send_msg( 350 cp->ulp_handle[CNIC_ULP_ISCSI], 351 msg_type, buf, len); 352 rcu_read_unlock(); 353 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ) 354 break; 355 356 msleep(100); 357 retry++; 358 } 359 return rc; 360} 361 362static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8); 363 364static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 365 char *buf, u16 len) 366{ 367 int rc = -EINVAL; 368 369 switch (msg_type) { 370 case ISCSI_UEVENT_PATH_UPDATE: { 371 struct cnic_local *cp; 372 u32 l5_cid; 373 struct cnic_sock *csk; 374 struct iscsi_path *path_resp; 375 376 if (len < sizeof(*path_resp)) 377 break; 378 379 path_resp = (struct iscsi_path *) buf; 380 cp = dev->cnic_priv; 381 l5_cid = (u32) path_resp->handle; 382 if (l5_cid >= MAX_CM_SK_TBL_SZ) 383 break; 384 385 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { 386 rc = -ENODEV; 387 break; 388 } 389 csk = &cp->csk_tbl[l5_cid]; 390 csk_hold(csk); 391 if (cnic_in_use(csk) && 392 test_bit(SK_F_CONNECT_START, &csk->flags)) { 393 394 csk->vlan_id = path_resp->vlan_id; 395 396 memcpy(csk->ha, path_resp->mac_addr, ETH_ALEN); 397 if (test_bit(SK_F_IPV6, &csk->flags)) 398 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 399 sizeof(struct in6_addr)); 400 else 401 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 402 sizeof(struct in_addr)); 403 404 if (is_valid_ether_addr(csk->ha)) { 405 cnic_cm_set_pg(csk); 406 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) && 407 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 408 409 cnic_cm_upcall(cp, csk, 410 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 411 clear_bit(SK_F_CONNECT_START, &csk->flags); 412 } 413 } 414 csk_put(csk); 415 rc = 0; 416 } 417 } 418 419 return rc; 420} 421 422static int cnic_offld_prep(struct cnic_sock *csk) 423{ 424 if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 425 return 0; 426 427 if (!test_bit(SK_F_CONNECT_START, &csk->flags)) { 428 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 429 return 0; 430 } 431 432 return 1; 433} 434 435static int cnic_close_prep(struct cnic_sock *csk) 436{ 437 clear_bit(SK_F_CONNECT_START, &csk->flags); 438 smp_mb__after_atomic(); 439 440 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 441 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 442 msleep(1); 443 444 return 1; 445 } 446 return 0; 447} 448 449static int cnic_abort_prep(struct cnic_sock *csk) 450{ 451 clear_bit(SK_F_CONNECT_START, &csk->flags); 452 smp_mb__after_atomic(); 453 454 while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags)) 455 msleep(1); 456 457 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 458 csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP; 459 return 1; 460 } 461 462 return 0; 463} 464 465int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 466{ 467 struct cnic_dev *dev; 468 469 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 470 pr_err("%s: Bad type %d\n", __func__, ulp_type); 471 return -EINVAL; 472 } 473 mutex_lock(&cnic_lock); 474 if (cnic_ulp_tbl_prot(ulp_type)) { 475 pr_err("%s: Type %d has already been registered\n", 476 __func__, ulp_type); 477 mutex_unlock(&cnic_lock); 478 return -EBUSY; 479 } 480 481 read_lock(&cnic_dev_lock); 482 list_for_each_entry(dev, &cnic_dev_list, list) { 483 struct cnic_local *cp = dev->cnic_priv; 484 485 clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]); 486 } 487 read_unlock(&cnic_dev_lock); 488 489 atomic_set(&ulp_ops->ref_count, 0); 490 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 491 mutex_unlock(&cnic_lock); 492 493 /* Prevent race conditions with netdev_event */ 494 rtnl_lock(); 495 list_for_each_entry(dev, &cnic_dev_list, list) { 496 struct cnic_local *cp = dev->cnic_priv; 497 498 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 499 ulp_ops->cnic_init(dev); 500 } 501 rtnl_unlock(); 502 503 return 0; 504} 505 506int cnic_unregister_driver(int ulp_type) 507{ 508 struct cnic_dev *dev; 509 struct cnic_ulp_ops *ulp_ops; 510 int i = 0; 511 512 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 513 pr_err("%s: Bad type %d\n", __func__, ulp_type); 514 return -EINVAL; 515 } 516 mutex_lock(&cnic_lock); 517 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 518 if (!ulp_ops) { 519 pr_err("%s: Type %d has not been registered\n", 520 __func__, ulp_type); 521 goto out_unlock; 522 } 523 read_lock(&cnic_dev_lock); 524 list_for_each_entry(dev, &cnic_dev_list, list) { 525 struct cnic_local *cp = dev->cnic_priv; 526 527 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { 528 pr_err("%s: Type %d still has devices registered\n", 529 __func__, ulp_type); 530 read_unlock(&cnic_dev_lock); 531 goto out_unlock; 532 } 533 } 534 read_unlock(&cnic_dev_lock); 535 536 RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL); 537 538 mutex_unlock(&cnic_lock); 539 synchronize_rcu(); 540 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) { 541 msleep(100); 542 i++; 543 } 544 545 if (atomic_read(&ulp_ops->ref_count) != 0) 546 pr_warn("%s: Failed waiting for ref count to go to zero\n", 547 __func__); 548 return 0; 549 550out_unlock: 551 mutex_unlock(&cnic_lock); 552 return -EINVAL; 553} 554 555static int cnic_start_hw(struct cnic_dev *); 556static void cnic_stop_hw(struct cnic_dev *); 557 558static int cnic_register_device(struct cnic_dev *dev, int ulp_type, 559 void *ulp_ctx) 560{ 561 struct cnic_local *cp = dev->cnic_priv; 562 struct cnic_ulp_ops *ulp_ops; 563 564 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 565 pr_err("%s: Bad type %d\n", __func__, ulp_type); 566 return -EINVAL; 567 } 568 mutex_lock(&cnic_lock); 569 if (cnic_ulp_tbl_prot(ulp_type) == NULL) { 570 pr_err("%s: Driver with type %d has not been registered\n", 571 __func__, ulp_type); 572 mutex_unlock(&cnic_lock); 573 return -EAGAIN; 574 } 575 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { 576 pr_err("%s: Type %d has already been registered to this device\n", 577 __func__, ulp_type); 578 mutex_unlock(&cnic_lock); 579 return -EBUSY; 580 } 581 582 clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]); 583 cp->ulp_handle[ulp_type] = ulp_ctx; 584 ulp_ops = cnic_ulp_tbl_prot(ulp_type); 585 rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops); 586 cnic_hold(dev); 587 588 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 589 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type])) 590 ulp_ops->cnic_start(cp->ulp_handle[ulp_type]); 591 592 mutex_unlock(&cnic_lock); 593 594 cnic_ulp_ctl(dev, ulp_type, true); 595 596 return 0; 597 598} 599EXPORT_SYMBOL(cnic_register_driver); 600 601static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 602{ 603 struct cnic_local *cp = dev->cnic_priv; 604 int i = 0; 605 606 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) { 607 pr_err("%s: Bad type %d\n", __func__, ulp_type); 608 return -EINVAL; 609 } 610 611 if (ulp_type == CNIC_ULP_ISCSI) 612 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 613 614 mutex_lock(&cnic_lock); 615 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) { 616 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 617 cnic_put(dev); 618 } else { 619 pr_err("%s: device not registered to this ulp type %d\n", 620 __func__, ulp_type); 621 mutex_unlock(&cnic_lock); 622 return -EINVAL; 623 } 624 mutex_unlock(&cnic_lock); 625 626 if (ulp_type == CNIC_ULP_FCOE) 627 dev->fcoe_cap = NULL; 628 629 synchronize_rcu(); 630 631 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 632 i < 20) { 633 msleep(100); 634 i++; 635 } 636 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type])) 637 netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n"); 638 639 cnic_ulp_ctl(dev, ulp_type, false); 640 641 return 0; 642} 643EXPORT_SYMBOL(cnic_unregister_driver); 644 645static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id, 646 u32 next) 647{ 648 id_tbl->start = start_id; 649 id_tbl->max = size; 650 id_tbl->next = next; 651 spin_lock_init(&id_tbl->lock); 652 id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL); 653 if (!id_tbl->table) 654 return -ENOMEM; 655 656 return 0; 657} 658 659static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl) 660{ 661 kfree(id_tbl->table); 662 id_tbl->table = NULL; 663} 664 665static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id) 666{ 667 int ret = -1; 668 669 id -= id_tbl->start; 670 if (id >= id_tbl->max) 671 return ret; 672 673 spin_lock(&id_tbl->lock); 674 if (!test_bit(id, id_tbl->table)) { 675 set_bit(id, id_tbl->table); 676 ret = 0; 677 } 678 spin_unlock(&id_tbl->lock); 679 return ret; 680} 681 682/* Returns -1 if not successful */ 683static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl) 684{ 685 u32 id; 686 687 spin_lock(&id_tbl->lock); 688 id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next); 689 if (id >= id_tbl->max) { 690 id = -1; 691 if (id_tbl->next != 0) { 692 id = find_first_zero_bit(id_tbl->table, id_tbl->next); 693 if (id >= id_tbl->next) 694 id = -1; 695 } 696 } 697 698 if (id < id_tbl->max) { 699 set_bit(id, id_tbl->table); 700 id_tbl->next = (id + 1) & (id_tbl->max - 1); 701 id += id_tbl->start; 702 } 703 704 spin_unlock(&id_tbl->lock); 705 706 return id; 707} 708 709static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id) 710{ 711 if (id == -1) 712 return; 713 714 id -= id_tbl->start; 715 if (id >= id_tbl->max) 716 return; 717 718 clear_bit(id, id_tbl->table); 719} 720 721static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma) 722{ 723 int i; 724 725 if (!dma->pg_arr) 726 return; 727 728 for (i = 0; i < dma->num_pages; i++) { 729 if (dma->pg_arr[i]) { 730 dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE, 731 dma->pg_arr[i], dma->pg_map_arr[i]); 732 dma->pg_arr[i] = NULL; 733 } 734 } 735 if (dma->pgtbl) { 736 dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size, 737 dma->pgtbl, dma->pgtbl_map); 738 dma->pgtbl = NULL; 739 } 740 kfree(dma->pg_arr); 741 dma->pg_arr = NULL; 742 dma->num_pages = 0; 743} 744 745static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma) 746{ 747 int i; 748 __le32 *page_table = (__le32 *) dma->pgtbl; 749 750 for (i = 0; i < dma->num_pages; i++) { 751 /* Each entry needs to be in big endian format. */ 752 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 753 page_table++; 754 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 755 page_table++; 756 } 757} 758 759static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma) 760{ 761 int i; 762 __le32 *page_table = (__le32 *) dma->pgtbl; 763 764 for (i = 0; i < dma->num_pages; i++) { 765 /* Each entry needs to be in little endian format. */ 766 *page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff); 767 page_table++; 768 *page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32); 769 page_table++; 770 } 771} 772 773static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 774 int pages, int use_pg_tbl) 775{ 776 int i, size; 777 struct cnic_local *cp = dev->cnic_priv; 778 779 size = pages * (sizeof(void *) + sizeof(dma_addr_t)); 780 dma->pg_arr = kzalloc(size, GFP_ATOMIC); 781 if (dma->pg_arr == NULL) 782 return -ENOMEM; 783 784 dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages); 785 dma->num_pages = pages; 786 787 for (i = 0; i < pages; i++) { 788 dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev, 789 CNIC_PAGE_SIZE, 790 &dma->pg_map_arr[i], 791 GFP_ATOMIC); 792 if (dma->pg_arr[i] == NULL) 793 goto error; 794 } 795 if (!use_pg_tbl) 796 return 0; 797 798 dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) & 799 ~(CNIC_PAGE_SIZE - 1); 800 dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size, 801 &dma->pgtbl_map, GFP_ATOMIC); 802 if (dma->pgtbl == NULL) 803 goto error; 804 805 cp->setup_pgtbl(dev, dma); 806 807 return 0; 808 809error: 810 cnic_free_dma(dev, dma); 811 return -ENOMEM; 812} 813 814static void cnic_free_context(struct cnic_dev *dev) 815{ 816 struct cnic_local *cp = dev->cnic_priv; 817 int i; 818 819 for (i = 0; i < cp->ctx_blks; i++) { 820 if (cp->ctx_arr[i].ctx) { 821 dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 822 cp->ctx_arr[i].ctx, 823 cp->ctx_arr[i].mapping); 824 cp->ctx_arr[i].ctx = NULL; 825 } 826 } 827} 828 829static void __cnic_free_uio_rings(struct cnic_uio_dev *udev) 830{ 831 if (udev->l2_buf) { 832 dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size, 833 udev->l2_buf, udev->l2_buf_map); 834 udev->l2_buf = NULL; 835 } 836 837 if (udev->l2_ring) { 838 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size, 839 udev->l2_ring, udev->l2_ring_map); 840 udev->l2_ring = NULL; 841 } 842 843} 844 845static void __cnic_free_uio(struct cnic_uio_dev *udev) 846{ 847 uio_unregister_device(&udev->cnic_uinfo); 848 849 __cnic_free_uio_rings(udev); 850 851 pci_dev_put(udev->pdev); 852 kfree(udev); 853} 854 855static void cnic_free_uio(struct cnic_uio_dev *udev) 856{ 857 if (!udev) 858 return; 859 860 write_lock(&cnic_dev_lock); 861 list_del_init(&udev->list); 862 write_unlock(&cnic_dev_lock); 863 __cnic_free_uio(udev); 864} 865 866static void cnic_free_resc(struct cnic_dev *dev) 867{ 868 struct cnic_local *cp = dev->cnic_priv; 869 struct cnic_uio_dev *udev = cp->udev; 870 871 if (udev) { 872 udev->dev = NULL; 873 cp->udev = NULL; 874 if (udev->uio_dev == -1) 875 __cnic_free_uio_rings(udev); 876 } 877 878 cnic_free_context(dev); 879 kfree(cp->ctx_arr); 880 cp->ctx_arr = NULL; 881 cp->ctx_blks = 0; 882 883 cnic_free_dma(dev, &cp->gbl_buf_info); 884 cnic_free_dma(dev, &cp->kwq_info); 885 cnic_free_dma(dev, &cp->kwq_16_data_info); 886 cnic_free_dma(dev, &cp->kcq2.dma); 887 cnic_free_dma(dev, &cp->kcq1.dma); 888 kfree(cp->iscsi_tbl); 889 cp->iscsi_tbl = NULL; 890 kfree(cp->ctx_tbl); 891 cp->ctx_tbl = NULL; 892 893 cnic_free_id_tbl(&cp->fcoe_cid_tbl); 894 cnic_free_id_tbl(&cp->cid_tbl); 895} 896 897static int cnic_alloc_context(struct cnic_dev *dev) 898{ 899 struct cnic_local *cp = dev->cnic_priv; 900 901 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { 902 int i, k, arr_size; 903 904 cp->ctx_blk_size = CNIC_PAGE_SIZE; 905 cp->cids_per_blk = CNIC_PAGE_SIZE / 128; 906 arr_size = BNX2_MAX_CID / cp->cids_per_blk * 907 sizeof(struct cnic_ctx); 908 cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL); 909 if (cp->ctx_arr == NULL) 910 return -ENOMEM; 911 912 k = 0; 913 for (i = 0; i < 2; i++) { 914 u32 j, reg, off, lo, hi; 915 916 if (i == 0) 917 off = BNX2_PG_CTX_MAP; 918 else 919 off = BNX2_ISCSI_CTX_MAP; 920 921 reg = cnic_reg_rd_ind(dev, off); 922 lo = reg >> 16; 923 hi = reg & 0xffff; 924 for (j = lo; j < hi; j += cp->cids_per_blk, k++) 925 cp->ctx_arr[k].cid = j; 926 } 927 928 cp->ctx_blks = k; 929 if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) { 930 cp->ctx_blks = 0; 931 return -ENOMEM; 932 } 933 934 for (i = 0; i < cp->ctx_blks; i++) { 935 cp->ctx_arr[i].ctx = 936 dma_alloc_coherent(&dev->pcidev->dev, 937 CNIC_PAGE_SIZE, 938 &cp->ctx_arr[i].mapping, 939 GFP_KERNEL); 940 if (cp->ctx_arr[i].ctx == NULL) 941 return -ENOMEM; 942 } 943 } 944 return 0; 945} 946 947static u16 cnic_bnx2_next_idx(u16 idx) 948{ 949 return idx + 1; 950} 951 952static u16 cnic_bnx2_hw_idx(u16 idx) 953{ 954 return idx; 955} 956 957static u16 cnic_bnx2x_next_idx(u16 idx) 958{ 959 idx++; 960 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 961 idx++; 962 963 return idx; 964} 965 966static u16 cnic_bnx2x_hw_idx(u16 idx) 967{ 968 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT) 969 idx++; 970 return idx; 971} 972 973static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info, 974 bool use_pg_tbl) 975{ 976 int err, i, use_page_tbl = 0; 977 struct kcqe **kcq; 978 979 if (use_pg_tbl) 980 use_page_tbl = 1; 981 982 err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl); 983 if (err) 984 return err; 985 986 kcq = (struct kcqe **) info->dma.pg_arr; 987 info->kcq = kcq; 988 989 info->next_idx = cnic_bnx2_next_idx; 990 info->hw_idx = cnic_bnx2_hw_idx; 991 if (use_pg_tbl) 992 return 0; 993 994 info->next_idx = cnic_bnx2x_next_idx; 995 info->hw_idx = cnic_bnx2x_hw_idx; 996 997 for (i = 0; i < KCQ_PAGE_CNT; i++) { 998 struct bnx2x_bd_chain_next *next = 999 (struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT]; 1000 int j = i + 1; 1001 1002 if (j >= KCQ_PAGE_CNT) 1003 j = 0; 1004 next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32; 1005 next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff; 1006 } 1007 return 0; 1008} 1009 1010static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages) 1011{ 1012 struct cnic_local *cp = udev->dev->cnic_priv; 1013 1014 if (udev->l2_ring) 1015 return 0; 1016 1017 udev->l2_ring_size = pages * CNIC_PAGE_SIZE; 1018 udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size, 1019 &udev->l2_ring_map, 1020 GFP_KERNEL | __GFP_COMP); 1021 if (!udev->l2_ring) 1022 return -ENOMEM; 1023 1024 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 1025 udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size); 1026 udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size, 1027 &udev->l2_buf_map, 1028 GFP_KERNEL | __GFP_COMP); 1029 if (!udev->l2_buf) { 1030 __cnic_free_uio_rings(udev); 1031 return -ENOMEM; 1032 } 1033 1034 return 0; 1035 1036} 1037 1038static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages) 1039{ 1040 struct cnic_local *cp = dev->cnic_priv; 1041 struct cnic_uio_dev *udev; 1042 1043 list_for_each_entry(udev, &cnic_udev_list, list) { 1044 if (udev->pdev == dev->pcidev) { 1045 udev->dev = dev; 1046 if (__cnic_alloc_uio_rings(udev, pages)) { 1047 udev->dev = NULL; 1048 return -ENOMEM; 1049 } 1050 cp->udev = udev; 1051 return 0; 1052 } 1053 } 1054 1055 udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC); 1056 if (!udev) 1057 return -ENOMEM; 1058 1059 udev->uio_dev = -1; 1060 1061 udev->dev = dev; 1062 udev->pdev = dev->pcidev; 1063 1064 if (__cnic_alloc_uio_rings(udev, pages)) 1065 goto err_udev; 1066 1067 list_add(&udev->list, &cnic_udev_list); 1068 1069 pci_dev_get(udev->pdev); 1070 1071 cp->udev = udev; 1072 1073 return 0; 1074 1075 err_udev: 1076 kfree(udev); 1077 return -ENOMEM; 1078} 1079 1080static int cnic_init_uio(struct cnic_dev *dev) 1081{ 1082 struct cnic_local *cp = dev->cnic_priv; 1083 struct cnic_uio_dev *udev = cp->udev; 1084 struct uio_info *uinfo; 1085 int ret = 0; 1086 1087 if (!udev) 1088 return -ENOMEM; 1089 1090 uinfo = &udev->cnic_uinfo; 1091 1092 uinfo->mem[0].addr = pci_resource_start(dev->pcidev, 0); 1093 uinfo->mem[0].internal_addr = dev->regview; 1094 uinfo->mem[0].memtype = UIO_MEM_PHYS; 1095 1096 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 1097 uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID + 1098 TX_MAX_TSS_RINGS + 1); 1099 uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen & 1100 CNIC_PAGE_MASK; 1101 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 1102 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 1103 else 1104 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 1105 1106 uinfo->name = "bnx2_cnic"; 1107 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 1108 uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0); 1109 1110 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk & 1111 CNIC_PAGE_MASK; 1112 uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk); 1113 1114 uinfo->name = "bnx2x_cnic"; 1115 } 1116 1117 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 1118 1119 uinfo->mem[2].addr = (unsigned long) udev->l2_ring; 1120 uinfo->mem[2].size = udev->l2_ring_size; 1121 uinfo->mem[2].memtype = UIO_MEM_LOGICAL; 1122 1123 uinfo->mem[3].addr = (unsigned long) udev->l2_buf; 1124 uinfo->mem[3].size = udev->l2_buf_size; 1125 uinfo->mem[3].memtype = UIO_MEM_LOGICAL; 1126 1127 uinfo->version = CNIC_MODULE_VERSION; 1128 uinfo->irq = UIO_IRQ_CUSTOM; 1129 1130 uinfo->open = cnic_uio_open; 1131 uinfo->release = cnic_uio_close; 1132 1133 if (udev->uio_dev == -1) { 1134 if (!uinfo->priv) { 1135 uinfo->priv = udev; 1136 1137 ret = uio_register_device(&udev->pdev->dev, uinfo); 1138 } 1139 } else { 1140 cnic_init_rings(dev); 1141 } 1142 1143 return ret; 1144} 1145 1146static int cnic_alloc_bnx2_resc(struct cnic_dev *dev) 1147{ 1148 struct cnic_local *cp = dev->cnic_priv; 1149 int ret; 1150 1151 ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1); 1152 if (ret) 1153 goto error; 1154 cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr; 1155 1156 ret = cnic_alloc_kcq(dev, &cp->kcq1, true); 1157 if (ret) 1158 goto error; 1159 1160 ret = cnic_alloc_context(dev); 1161 if (ret) 1162 goto error; 1163 1164 ret = cnic_alloc_uio_rings(dev, 2); 1165 if (ret) 1166 goto error; 1167 1168 ret = cnic_init_uio(dev); 1169 if (ret) 1170 goto error; 1171 1172 return 0; 1173 1174error: 1175 cnic_free_resc(dev); 1176 return ret; 1177} 1178 1179static int cnic_alloc_bnx2x_context(struct cnic_dev *dev) 1180{ 1181 struct cnic_local *cp = dev->cnic_priv; 1182 struct bnx2x *bp = netdev_priv(dev->netdev); 1183 int ctx_blk_size = cp->ethdev->ctx_blk_size; 1184 int total_mem, blks, i; 1185 1186 total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space; 1187 blks = total_mem / ctx_blk_size; 1188 if (total_mem % ctx_blk_size) 1189 blks++; 1190 1191 if (blks > cp->ethdev->ctx_tbl_len) 1192 return -ENOMEM; 1193 1194 cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL); 1195 if (cp->ctx_arr == NULL) 1196 return -ENOMEM; 1197 1198 cp->ctx_blks = blks; 1199 cp->ctx_blk_size = ctx_blk_size; 1200 if (!CHIP_IS_E1(bp)) 1201 cp->ctx_align = 0; 1202 else 1203 cp->ctx_align = ctx_blk_size; 1204 1205 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE; 1206 1207 for (i = 0; i < blks; i++) { 1208 cp->ctx_arr[i].ctx = 1209 dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size, 1210 &cp->ctx_arr[i].mapping, 1211 GFP_KERNEL); 1212 if (cp->ctx_arr[i].ctx == NULL) 1213 return -ENOMEM; 1214 1215 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) { 1216 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) { 1217 cnic_free_context(dev); 1218 cp->ctx_blk_size += cp->ctx_align; 1219 i = -1; 1220 continue; 1221 } 1222 } 1223 } 1224 return 0; 1225} 1226 1227static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) 1228{ 1229 struct cnic_local *cp = dev->cnic_priv; 1230 struct bnx2x *bp = netdev_priv(dev->netdev); 1231 struct cnic_eth_dev *ethdev = cp->ethdev; 1232 u32 start_cid = ethdev->starting_cid; 1233 int i, j, n, ret, pages; 1234 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info; 1235 1236 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1237 cp->iscsi_start_cid = start_cid; 1238 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ; 1239 1240 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 1241 cp->max_cid_space += dev->max_fcoe_conn; 1242 cp->fcoe_init_cid = ethdev->fcoe_init_cid; 1243 if (!cp->fcoe_init_cid) 1244 cp->fcoe_init_cid = 0x10; 1245 } 1246 1247 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ, 1248 GFP_KERNEL); 1249 if (!cp->iscsi_tbl) 1250 goto error; 1251 1252 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) * 1253 cp->max_cid_space, GFP_KERNEL); 1254 if (!cp->ctx_tbl) 1255 goto error; 1256 1257 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) { 1258 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i]; 1259 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1260 } 1261 1262 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++) 1263 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE; 1264 1265 pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1266 CNIC_PAGE_SIZE; 1267 1268 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1269 if (ret) 1270 return -ENOMEM; 1271 1272 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1273 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1274 long off = CNIC_KWQ16_DATA_SIZE * (i % n); 1275 1276 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off; 1277 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] + 1278 off; 1279 1280 if ((i % n) == (n - 1)) 1281 j++; 1282 } 1283 1284 ret = cnic_alloc_kcq(dev, &cp->kcq1, false); 1285 if (ret) 1286 goto error; 1287 1288 if (CNIC_SUPPORTS_FCOE(bp)) { 1289 ret = cnic_alloc_kcq(dev, &cp->kcq2, true); 1290 if (ret) 1291 goto error; 1292 } 1293 1294 pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE; 1295 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0); 1296 if (ret) 1297 goto error; 1298 1299 ret = cnic_alloc_bnx2x_context(dev); 1300 if (ret) 1301 goto error; 1302 1303 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 1304 return 0; 1305 1306 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk; 1307 1308 cp->l2_rx_ring_size = 15; 1309 1310 ret = cnic_alloc_uio_rings(dev, 4); 1311 if (ret) 1312 goto error; 1313 1314 ret = cnic_init_uio(dev); 1315 if (ret) 1316 goto error; 1317 1318 return 0; 1319 1320error: 1321 cnic_free_resc(dev); 1322 return -ENOMEM; 1323} 1324 1325static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1326{ 1327 return cp->max_kwq_idx - 1328 ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx); 1329} 1330 1331static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 1332 u32 num_wqes) 1333{ 1334 struct cnic_local *cp = dev->cnic_priv; 1335 struct kwqe *prod_qe; 1336 u16 prod, sw_prod, i; 1337 1338 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 1339 return -EAGAIN; /* bnx2 is down */ 1340 1341 spin_lock_bh(&cp->cnic_ulp_lock); 1342 if (num_wqes > cnic_kwq_avail(cp) && 1343 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) { 1344 spin_unlock_bh(&cp->cnic_ulp_lock); 1345 return -EAGAIN; 1346 } 1347 1348 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 1349 1350 prod = cp->kwq_prod_idx; 1351 sw_prod = prod & MAX_KWQ_IDX; 1352 for (i = 0; i < num_wqes; i++) { 1353 prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)]; 1354 memcpy(prod_qe, wqes[i], sizeof(struct kwqe)); 1355 prod++; 1356 sw_prod = prod & MAX_KWQ_IDX; 1357 } 1358 cp->kwq_prod_idx = prod; 1359 1360 CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx); 1361 1362 spin_unlock_bh(&cp->cnic_ulp_lock); 1363 return 0; 1364} 1365 1366static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid, 1367 union l5cm_specific_data *l5_data) 1368{ 1369 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1370 dma_addr_t map; 1371 1372 map = ctx->kwqe_data_mapping; 1373 l5_data->phy_address.lo = (u64) map & 0xffffffff; 1374 l5_data->phy_address.hi = (u64) map >> 32; 1375 return ctx->kwqe_data; 1376} 1377 1378static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid, 1379 u32 type, union l5cm_specific_data *l5_data) 1380{ 1381 struct cnic_local *cp = dev->cnic_priv; 1382 struct bnx2x *bp = netdev_priv(dev->netdev); 1383 struct l5cm_spe kwqe; 1384 struct kwqe_16 *kwq[1]; 1385 u16 type_16; 1386 int ret; 1387 1388 kwqe.hdr.conn_and_cmd_data = 1389 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1390 BNX2X_HW_CID(bp, cid))); 1391 1392 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE; 1393 type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) & 1394 SPE_HDR_FUNCTION_ID; 1395 1396 kwqe.hdr.type = cpu_to_le16(type_16); 1397 kwqe.hdr.reserved1 = 0; 1398 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1399 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1400 1401 kwq[0] = (struct kwqe_16 *) &kwqe; 1402 1403 spin_lock_bh(&cp->cnic_ulp_lock); 1404 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1); 1405 spin_unlock_bh(&cp->cnic_ulp_lock); 1406 1407 if (ret == 1) 1408 return 0; 1409 1410 return ret; 1411} 1412 1413static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type, 1414 struct kcqe *cqes[], u32 num_cqes) 1415{ 1416 struct cnic_local *cp = dev->cnic_priv; 1417 struct cnic_ulp_ops *ulp_ops; 1418 1419 rcu_read_lock(); 1420 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 1421 if (likely(ulp_ops)) { 1422 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 1423 cqes, num_cqes); 1424 } 1425 rcu_read_unlock(); 1426} 1427 1428static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps, 1429 int en_tcp_dack) 1430{ 1431 struct bnx2x *bp = netdev_priv(dev->netdev); 1432 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN; 1433 u16 tstorm_flags = 0; 1434 1435 if (time_stamps) { 1436 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1437 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED; 1438 } 1439 if (en_tcp_dack) 1440 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN; 1441 1442 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1443 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags); 1444 1445 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1446 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags); 1447} 1448 1449static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe) 1450{ 1451 struct cnic_local *cp = dev->cnic_priv; 1452 struct bnx2x *bp = netdev_priv(dev->netdev); 1453 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe; 1454 int hq_bds, pages; 1455 u32 pfid = bp->pfid; 1456 1457 cp->num_iscsi_tasks = req1->num_tasks_per_conn; 1458 cp->num_ccells = req1->num_ccells_per_conn; 1459 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE * 1460 cp->num_iscsi_tasks; 1461 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS * 1462 BNX2X_ISCSI_R2TQE_SIZE; 1463 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE; 1464 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; 1465 hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE); 1466 cp->num_cqs = req1->num_cqs; 1467 1468 if (!dev->max_iscsi_conn) 1469 return 0; 1470 1471 /* init Tstorm RAM */ 1472 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1473 req1->rq_num_wqes); 1474 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1475 CNIC_PAGE_SIZE); 1476 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 1477 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); 1478 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + 1479 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1480 req1->num_tasks_per_conn); 1481 1482 /* init Ustorm RAM */ 1483 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1484 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid), 1485 req1->rq_buffer_size); 1486 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1487 CNIC_PAGE_SIZE); 1488 CNIC_WR8(dev, BAR_USTRORM_INTMEM + 1489 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); 1490 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1491 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1492 req1->num_tasks_per_conn); 1493 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid), 1494 req1->rq_num_wqes); 1495 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1496 req1->cq_num_wqes); 1497 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1498 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1499 1500 /* init Xstorm RAM */ 1501 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1502 CNIC_PAGE_SIZE); 1503 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 1504 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); 1505 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 1506 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1507 req1->num_tasks_per_conn); 1508 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1509 hq_bds); 1510 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid), 1511 req1->num_tasks_per_conn); 1512 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid), 1513 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS); 1514 1515 /* init Cstorm RAM */ 1516 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid), 1517 CNIC_PAGE_SIZE); 1518 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 1519 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS); 1520 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1521 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid), 1522 req1->num_tasks_per_conn); 1523 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid), 1524 req1->cq_num_wqes); 1525 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid), 1526 hq_bds); 1527 1528 cnic_bnx2x_set_tcp_options(dev, 1529 req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE, 1530 req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE); 1531 1532 return 0; 1533} 1534 1535static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe) 1536{ 1537 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe; 1538 struct bnx2x *bp = netdev_priv(dev->netdev); 1539 u32 pfid = bp->pfid; 1540 struct iscsi_kcqe kcqe; 1541 struct kcqe *cqes[1]; 1542 1543 memset(&kcqe, 0, sizeof(kcqe)); 1544 if (!dev->max_iscsi_conn) { 1545 kcqe.completion_status = 1546 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED; 1547 goto done; 1548 } 1549 1550 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1551 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1552 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 1553 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1554 req2->error_bit_map[1]); 1555 1556 CNIC_WR16(dev, BAR_USTRORM_INTMEM + 1557 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1558 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1559 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]); 1560 CNIC_WR(dev, BAR_USTRORM_INTMEM + 1561 USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4, 1562 req2->error_bit_map[1]); 1563 1564 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 1565 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn); 1566 1567 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1568 1569done: 1570 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT; 1571 cqes[0] = (struct kcqe *) &kcqe; 1572 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1573 1574 return 0; 1575} 1576 1577static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1578{ 1579 struct cnic_local *cp = dev->cnic_priv; 1580 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1581 1582 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) { 1583 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1584 1585 cnic_free_dma(dev, &iscsi->hq_info); 1586 cnic_free_dma(dev, &iscsi->r2tq_info); 1587 cnic_free_dma(dev, &iscsi->task_array_info); 1588 cnic_free_id(&cp->cid_tbl, ctx->cid); 1589 } else { 1590 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid); 1591 } 1592 1593 ctx->cid = 0; 1594} 1595 1596static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid) 1597{ 1598 u32 cid; 1599 int ret, pages; 1600 struct cnic_local *cp = dev->cnic_priv; 1601 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1602 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1603 1604 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) { 1605 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl); 1606 if (cid == -1) { 1607 ret = -ENOMEM; 1608 goto error; 1609 } 1610 ctx->cid = cid; 1611 return 0; 1612 } 1613 1614 cid = cnic_alloc_new_id(&cp->cid_tbl); 1615 if (cid == -1) { 1616 ret = -ENOMEM; 1617 goto error; 1618 } 1619 1620 ctx->cid = cid; 1621 pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE; 1622 1623 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1); 1624 if (ret) 1625 goto error; 1626 1627 pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE; 1628 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1); 1629 if (ret) 1630 goto error; 1631 1632 pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE; 1633 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1); 1634 if (ret) 1635 goto error; 1636 1637 return 0; 1638 1639error: 1640 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1641 return ret; 1642} 1643 1644static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init, 1645 struct regpair *ctx_addr) 1646{ 1647 struct cnic_local *cp = dev->cnic_priv; 1648 struct cnic_eth_dev *ethdev = cp->ethdev; 1649 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk; 1650 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk; 1651 unsigned long align_off = 0; 1652 dma_addr_t ctx_map; 1653 void *ctx; 1654 1655 if (cp->ctx_align) { 1656 unsigned long mask = cp->ctx_align - 1; 1657 1658 if (cp->ctx_arr[blk].mapping & mask) 1659 align_off = cp->ctx_align - 1660 (cp->ctx_arr[blk].mapping & mask); 1661 } 1662 ctx_map = cp->ctx_arr[blk].mapping + align_off + 1663 (off * BNX2X_CONTEXT_MEM_SIZE); 1664 ctx = cp->ctx_arr[blk].ctx + align_off + 1665 (off * BNX2X_CONTEXT_MEM_SIZE); 1666 if (init) 1667 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE); 1668 1669 ctx_addr->lo = ctx_map & 0xffffffff; 1670 ctx_addr->hi = (u64) ctx_map >> 32; 1671 return ctx; 1672} 1673 1674static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[], 1675 u32 num) 1676{ 1677 struct cnic_local *cp = dev->cnic_priv; 1678 struct bnx2x *bp = netdev_priv(dev->netdev); 1679 struct iscsi_kwqe_conn_offload1 *req1 = 1680 (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1681 struct iscsi_kwqe_conn_offload2 *req2 = 1682 (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1683 struct iscsi_kwqe_conn_offload3 *req3; 1684 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id]; 1685 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1686 u32 cid = ctx->cid; 1687 u32 hw_cid = BNX2X_HW_CID(bp, cid); 1688 struct iscsi_context *ictx; 1689 struct regpair context_addr; 1690 int i, j, n = 2, n_max; 1691 u8 port = BP_PORT(bp); 1692 1693 ctx->ctx_flags = 0; 1694 if (!req2->num_additional_wqes) 1695 return -EINVAL; 1696 1697 n_max = req2->num_additional_wqes + 2; 1698 1699 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr); 1700 if (ictx == NULL) 1701 return -ENOMEM; 1702 1703 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1704 1705 ictx->xstorm_ag_context.hq_prod = 1; 1706 1707 ictx->xstorm_st_context.iscsi.first_burst_length = 1708 ISCSI_DEF_FIRST_BURST_LEN; 1709 ictx->xstorm_st_context.iscsi.max_send_pdu_length = 1710 ISCSI_DEF_MAX_RECV_SEG_LEN; 1711 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo = 1712 req1->sq_page_table_addr_lo; 1713 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi = 1714 req1->sq_page_table_addr_hi; 1715 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi; 1716 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo; 1717 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo = 1718 iscsi->hq_info.pgtbl_map & 0xffffffff; 1719 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi = 1720 (u64) iscsi->hq_info.pgtbl_map >> 32; 1721 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo = 1722 iscsi->hq_info.pgtbl[0]; 1723 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi = 1724 iscsi->hq_info.pgtbl[1]; 1725 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo = 1726 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1727 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi = 1728 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1729 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo = 1730 iscsi->r2tq_info.pgtbl[0]; 1731 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi = 1732 iscsi->r2tq_info.pgtbl[1]; 1733 ictx->xstorm_st_context.iscsi.task_pbl_base.lo = 1734 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1735 ictx->xstorm_st_context.iscsi.task_pbl_base.hi = 1736 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1737 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx = 1738 BNX2X_ISCSI_PBL_NOT_CACHED; 1739 ictx->xstorm_st_context.iscsi.flags.flags |= 1740 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA; 1741 ictx->xstorm_st_context.iscsi.flags.flags |= 1742 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T; 1743 ictx->xstorm_st_context.common.ethernet.reserved_vlan_type = 1744 ETH_P_8021Q; 1745 if (BNX2X_CHIP_IS_E2_PLUS(bp) && 1746 bp->common.chip_port_mode == CHIP_2_PORT_MODE) { 1747 1748 port = 0; 1749 } 1750 ictx->xstorm_st_context.common.flags = 1751 1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT; 1752 ictx->xstorm_st_context.common.flags = 1753 port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT; 1754 1755 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE; 1756 /* TSTORM requires the base address of RQ DB & not PTE */ 1757 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo = 1758 req2->rq_page_table_addr_lo & CNIC_PAGE_MASK; 1759 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi = 1760 req2->rq_page_table_addr_hi; 1761 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id; 1762 ictx->tstorm_st_context.tcp.cwnd = 0x5A8; 1763 ictx->tstorm_st_context.tcp.flags2 |= 1764 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN; 1765 ictx->tstorm_st_context.tcp.ooo_support_mode = 1766 TCP_TSTORM_OOO_DROP_AND_PROC_ACK; 1767 1768 ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG; 1769 1770 ictx->ustorm_st_context.ring.rq.pbl_base.lo = 1771 req2->rq_page_table_addr_lo; 1772 ictx->ustorm_st_context.ring.rq.pbl_base.hi = 1773 req2->rq_page_table_addr_hi; 1774 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi; 1775 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo; 1776 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo = 1777 iscsi->r2tq_info.pgtbl_map & 0xffffffff; 1778 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi = 1779 (u64) iscsi->r2tq_info.pgtbl_map >> 32; 1780 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo = 1781 iscsi->r2tq_info.pgtbl[0]; 1782 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi = 1783 iscsi->r2tq_info.pgtbl[1]; 1784 ictx->ustorm_st_context.ring.cq_pbl_base.lo = 1785 req1->cq_page_table_addr_lo; 1786 ictx->ustorm_st_context.ring.cq_pbl_base.hi = 1787 req1->cq_page_table_addr_hi; 1788 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN; 1789 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi; 1790 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo; 1791 ictx->ustorm_st_context.task_pbe_cache_index = 1792 BNX2X_ISCSI_PBL_NOT_CACHED; 1793 ictx->ustorm_st_context.task_pdu_cache_index = 1794 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED; 1795 1796 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) { 1797 if (j == 3) { 1798 if (n >= n_max) 1799 break; 1800 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++]; 1801 j = 0; 1802 } 1803 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN; 1804 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo = 1805 req3->qp_first_pte[j].hi; 1806 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi = 1807 req3->qp_first_pte[j].lo; 1808 } 1809 1810 ictx->ustorm_st_context.task_pbl_base.lo = 1811 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1812 ictx->ustorm_st_context.task_pbl_base.hi = 1813 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1814 ictx->ustorm_st_context.tce_phy_addr.lo = 1815 iscsi->task_array_info.pgtbl[0]; 1816 ictx->ustorm_st_context.tce_phy_addr.hi = 1817 iscsi->task_array_info.pgtbl[1]; 1818 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1819 ictx->ustorm_st_context.num_cqs = cp->num_cqs; 1820 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN; 1821 ictx->ustorm_st_context.negotiated_rx_and_flags |= 1822 ISCSI_DEF_MAX_BURST_LEN; 1823 ictx->ustorm_st_context.negotiated_rx |= 1824 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T << 1825 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT; 1826 1827 ictx->cstorm_st_context.hq_pbl_base.lo = 1828 iscsi->hq_info.pgtbl_map & 0xffffffff; 1829 ictx->cstorm_st_context.hq_pbl_base.hi = 1830 (u64) iscsi->hq_info.pgtbl_map >> 32; 1831 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0]; 1832 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1]; 1833 ictx->cstorm_st_context.task_pbl_base.lo = 1834 iscsi->task_array_info.pgtbl_map & 0xffffffff; 1835 ictx->cstorm_st_context.task_pbl_base.hi = 1836 (u64) iscsi->task_array_info.pgtbl_map >> 32; 1837 /* CSTORM and USTORM initialization is different, CSTORM requires 1838 * CQ DB base & not PTE addr */ 1839 ictx->cstorm_st_context.cq_db_base.lo = 1840 req1->cq_page_table_addr_lo & CNIC_PAGE_MASK; 1841 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi; 1842 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id; 1843 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1; 1844 for (i = 0; i < cp->num_cqs; i++) { 1845 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] = 1846 ISCSI_INITIAL_SN; 1847 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] = 1848 ISCSI_INITIAL_SN; 1849 } 1850 1851 ictx->xstorm_ag_context.cdu_reserved = 1852 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 1853 ISCSI_CONNECTION_TYPE); 1854 ictx->ustorm_ag_context.cdu_usage = 1855 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 1856 ISCSI_CONNECTION_TYPE); 1857 return 0; 1858 1859} 1860 1861static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 1862 u32 num, int *work) 1863{ 1864 struct iscsi_kwqe_conn_offload1 *req1; 1865 struct iscsi_kwqe_conn_offload2 *req2; 1866 struct cnic_local *cp = dev->cnic_priv; 1867 struct bnx2x *bp = netdev_priv(dev->netdev); 1868 struct cnic_context *ctx; 1869 struct iscsi_kcqe kcqe; 1870 struct kcqe *cqes[1]; 1871 u32 l5_cid; 1872 int ret = 0; 1873 1874 if (num < 2) { 1875 *work = num; 1876 return -EINVAL; 1877 } 1878 1879 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0]; 1880 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1]; 1881 if ((num - 2) < req2->num_additional_wqes) { 1882 *work = num; 1883 return -EINVAL; 1884 } 1885 *work = 2 + req2->num_additional_wqes; 1886 1887 l5_cid = req1->iscsi_conn_id; 1888 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1889 return -EINVAL; 1890 1891 memset(&kcqe, 0, sizeof(kcqe)); 1892 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN; 1893 kcqe.iscsi_conn_id = l5_cid; 1894 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 1895 1896 ctx = &cp->ctx_tbl[l5_cid]; 1897 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) { 1898 kcqe.completion_status = 1899 ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY; 1900 goto done; 1901 } 1902 1903 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) { 1904 atomic_dec(&cp->iscsi_conn); 1905 goto done; 1906 } 1907 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 1908 if (ret) { 1909 atomic_dec(&cp->iscsi_conn); 1910 ret = 0; 1911 goto done; 1912 } 1913 ret = cnic_setup_bnx2x_ctx(dev, wqes, num); 1914 if (ret < 0) { 1915 cnic_free_bnx2x_conn_resc(dev, l5_cid); 1916 atomic_dec(&cp->iscsi_conn); 1917 goto done; 1918 } 1919 1920 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 1921 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid); 1922 1923done: 1924 cqes[0] = (struct kcqe *) &kcqe; 1925 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 1926 return 0; 1927} 1928 1929 1930static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe) 1931{ 1932 struct cnic_local *cp = dev->cnic_priv; 1933 struct iscsi_kwqe_conn_update *req = 1934 (struct iscsi_kwqe_conn_update *) kwqe; 1935 void *data; 1936 union l5cm_specific_data l5_data; 1937 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id); 1938 int ret; 1939 1940 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0) 1941 return -EINVAL; 1942 1943 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 1944 if (!data) 1945 return -ENOMEM; 1946 1947 memcpy(data, kwqe, sizeof(struct kwqe)); 1948 1949 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN, 1950 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data); 1951 return ret; 1952} 1953 1954static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid) 1955{ 1956 struct cnic_local *cp = dev->cnic_priv; 1957 struct bnx2x *bp = netdev_priv(dev->netdev); 1958 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1959 union l5cm_specific_data l5_data; 1960 int ret; 1961 u32 hw_cid; 1962 1963 init_waitqueue_head(&ctx->waitq); 1964 ctx->wait_cond = 0; 1965 memset(&l5_data, 0, sizeof(l5_data)); 1966 hw_cid = BNX2X_HW_CID(bp, ctx->cid); 1967 1968 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1969 hw_cid, NONE_CONNECTION_TYPE, &l5_data); 1970 1971 if (ret == 0) { 1972 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 1973 if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags))) 1974 return -EBUSY; 1975 } 1976 1977 return 0; 1978} 1979 1980static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 1981{ 1982 struct cnic_local *cp = dev->cnic_priv; 1983 struct iscsi_kwqe_conn_destroy *req = 1984 (struct iscsi_kwqe_conn_destroy *) kwqe; 1985 u32 l5_cid = req->reserved0; 1986 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1987 int ret = 0; 1988 struct iscsi_kcqe kcqe; 1989 struct kcqe *cqes[1]; 1990 1991 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 1992 goto skip_cfc_delete; 1993 1994 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 1995 unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies; 1996 1997 if (delta > (2 * HZ)) 1998 delta = 0; 1999 2000 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2001 queue_delayed_work(cnic_wq, &cp->delete_task, delta); 2002 goto destroy_reply; 2003 } 2004 2005 ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid); 2006 2007skip_cfc_delete: 2008 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2009 2010 if (!ret) { 2011 atomic_dec(&cp->iscsi_conn); 2012 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2013 } 2014 2015destroy_reply: 2016 memset(&kcqe, 0, sizeof(kcqe)); 2017 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN; 2018 kcqe.iscsi_conn_id = l5_cid; 2019 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS; 2020 kcqe.iscsi_conn_context_id = req->context_id; 2021 2022 cqes[0] = (struct kcqe *) &kcqe; 2023 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1); 2024 2025 return 0; 2026} 2027 2028static void cnic_init_storm_conn_bufs(struct cnic_dev *dev, 2029 struct l4_kwq_connect_req1 *kwqe1, 2030 struct l4_kwq_connect_req3 *kwqe3, 2031 struct l5cm_active_conn_buffer *conn_buf) 2032{ 2033 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf; 2034 struct l5cm_xstorm_conn_buffer *xstorm_buf = 2035 &conn_buf->xstorm_conn_buffer; 2036 struct l5cm_tstorm_conn_buffer *tstorm_buf = 2037 &conn_buf->tstorm_conn_buffer; 2038 struct regpair context_addr; 2039 u32 cid = BNX2X_SW_CID(kwqe1->cid); 2040 struct in6_addr src_ip, dst_ip; 2041 int i; 2042 u32 *addrp; 2043 2044 addrp = (u32 *) &conn_addr->local_ip_addr; 2045 for (i = 0; i < 4; i++, addrp++) 2046 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 2047 2048 addrp = (u32 *) &conn_addr->remote_ip_addr; 2049 for (i = 0; i < 4; i++, addrp++) 2050 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp); 2051 2052 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr); 2053 2054 xstorm_buf->context_addr.hi = context_addr.hi; 2055 xstorm_buf->context_addr.lo = context_addr.lo; 2056 xstorm_buf->mss = 0xffff; 2057 xstorm_buf->rcv_buf = kwqe3->rcv_buf; 2058 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE) 2059 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE; 2060 xstorm_buf->pseudo_header_checksum = 2061 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0)); 2062 2063 if (kwqe3->ka_timeout) { 2064 tstorm_buf->ka_enable = 1; 2065 tstorm_buf->ka_timeout = kwqe3->ka_timeout; 2066 tstorm_buf->ka_interval = kwqe3->ka_interval; 2067 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count; 2068 } 2069 tstorm_buf->max_rt_time = 0xffffffff; 2070} 2071 2072static void cnic_init_bnx2x_mac(struct cnic_dev *dev) 2073{ 2074 struct bnx2x *bp = netdev_priv(dev->netdev); 2075 u32 pfid = bp->pfid; 2076 u8 *mac = dev->mac_addr; 2077 2078 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2079 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]); 2080 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2081 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]); 2082 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2083 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]); 2084 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2085 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]); 2086 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2087 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]); 2088 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 2089 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]); 2090 2091 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2092 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]); 2093 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2094 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2095 mac[4]); 2096 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2097 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]); 2098 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2099 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2100 mac[2]); 2101 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2102 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]); 2103 CNIC_WR8(dev, BAR_TSTRORM_INTMEM + 2104 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1, 2105 mac[0]); 2106} 2107 2108static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[], 2109 u32 num, int *work) 2110{ 2111 struct cnic_local *cp = dev->cnic_priv; 2112 struct bnx2x *bp = netdev_priv(dev->netdev); 2113 struct l4_kwq_connect_req1 *kwqe1 = 2114 (struct l4_kwq_connect_req1 *) wqes[0]; 2115 struct l4_kwq_connect_req3 *kwqe3; 2116 struct l5cm_active_conn_buffer *conn_buf; 2117 struct l5cm_conn_addr_params *conn_addr; 2118 union l5cm_specific_data l5_data; 2119 u32 l5_cid = kwqe1->pg_cid; 2120 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 2121 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 2122 int ret; 2123 2124 if (num < 2) { 2125 *work = num; 2126 return -EINVAL; 2127 } 2128 2129 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) 2130 *work = 3; 2131 else 2132 *work = 2; 2133 2134 if (num < *work) { 2135 *work = num; 2136 return -EINVAL; 2137 } 2138 2139 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) { 2140 netdev_err(dev->netdev, "conn_buf size too big\n"); 2141 return -ENOMEM; 2142 } 2143 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2144 if (!conn_buf) 2145 return -ENOMEM; 2146 2147 memset(conn_buf, 0, sizeof(*conn_buf)); 2148 2149 conn_addr = &conn_buf->conn_addr_buf; 2150 conn_addr->remote_addr_0 = csk->ha[0]; 2151 conn_addr->remote_addr_1 = csk->ha[1]; 2152 conn_addr->remote_addr_2 = csk->ha[2]; 2153 conn_addr->remote_addr_3 = csk->ha[3]; 2154 conn_addr->remote_addr_4 = csk->ha[4]; 2155 conn_addr->remote_addr_5 = csk->ha[5]; 2156 2157 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) { 2158 struct l4_kwq_connect_req2 *kwqe2 = 2159 (struct l4_kwq_connect_req2 *) wqes[1]; 2160 2161 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4; 2162 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3; 2163 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2; 2164 2165 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4; 2166 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3; 2167 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2; 2168 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION; 2169 } 2170 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1]; 2171 2172 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip; 2173 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip; 2174 conn_addr->local_tcp_port = kwqe1->src_port; 2175 conn_addr->remote_tcp_port = kwqe1->dst_port; 2176 2177 conn_addr->pmtu = kwqe3->pmtu; 2178 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf); 2179 2180 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 2181 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id); 2182 2183 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT, 2184 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2185 if (!ret) 2186 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2187 2188 return ret; 2189} 2190 2191static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe) 2192{ 2193 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe; 2194 union l5cm_specific_data l5_data; 2195 int ret; 2196 2197 memset(&l5_data, 0, sizeof(l5_data)); 2198 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE, 2199 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2200 return ret; 2201} 2202 2203static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe) 2204{ 2205 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe; 2206 union l5cm_specific_data l5_data; 2207 int ret; 2208 2209 memset(&l5_data, 0, sizeof(l5_data)); 2210 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT, 2211 req->cid, ISCSI_CONNECTION_TYPE, &l5_data); 2212 return ret; 2213} 2214static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2215{ 2216 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe; 2217 struct l4_kcq kcqe; 2218 struct kcqe *cqes[1]; 2219 2220 memset(&kcqe, 0, sizeof(kcqe)); 2221 kcqe.pg_host_opaque = req->host_opaque; 2222 kcqe.pg_cid = req->host_opaque; 2223 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG; 2224 cqes[0] = (struct kcqe *) &kcqe; 2225 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2226 return 0; 2227} 2228 2229static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe) 2230{ 2231 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe; 2232 struct l4_kcq kcqe; 2233 struct kcqe *cqes[1]; 2234 2235 memset(&kcqe, 0, sizeof(kcqe)); 2236 kcqe.pg_host_opaque = req->pg_host_opaque; 2237 kcqe.pg_cid = req->pg_cid; 2238 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG; 2239 cqes[0] = (struct kcqe *) &kcqe; 2240 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1); 2241 return 0; 2242} 2243 2244static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe) 2245{ 2246 struct fcoe_kwqe_stat *req; 2247 struct fcoe_stat_ramrod_params *fcoe_stat; 2248 union l5cm_specific_data l5_data; 2249 struct cnic_local *cp = dev->cnic_priv; 2250 struct bnx2x *bp = netdev_priv(dev->netdev); 2251 int ret; 2252 u32 cid; 2253 2254 req = (struct fcoe_kwqe_stat *) kwqe; 2255 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); 2256 2257 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2258 if (!fcoe_stat) 2259 return -ENOMEM; 2260 2261 memset(fcoe_stat, 0, sizeof(*fcoe_stat)); 2262 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req)); 2263 2264 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid, 2265 FCOE_CONNECTION_TYPE, &l5_data); 2266 return ret; 2267} 2268 2269static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[], 2270 u32 num, int *work) 2271{ 2272 int ret; 2273 struct cnic_local *cp = dev->cnic_priv; 2274 struct bnx2x *bp = netdev_priv(dev->netdev); 2275 u32 cid; 2276 struct fcoe_init_ramrod_params *fcoe_init; 2277 struct fcoe_kwqe_init1 *req1; 2278 struct fcoe_kwqe_init2 *req2; 2279 struct fcoe_kwqe_init3 *req3; 2280 union l5cm_specific_data l5_data; 2281 2282 if (num < 3) { 2283 *work = num; 2284 return -EINVAL; 2285 } 2286 req1 = (struct fcoe_kwqe_init1 *) wqes[0]; 2287 req2 = (struct fcoe_kwqe_init2 *) wqes[1]; 2288 req3 = (struct fcoe_kwqe_init3 *) wqes[2]; 2289 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) { 2290 *work = 1; 2291 return -EINVAL; 2292 } 2293 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) { 2294 *work = 2; 2295 return -EINVAL; 2296 } 2297 2298 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) { 2299 netdev_err(dev->netdev, "fcoe_init size too big\n"); 2300 return -ENOMEM; 2301 } 2302 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data); 2303 if (!fcoe_init) 2304 return -ENOMEM; 2305 2306 memset(fcoe_init, 0, sizeof(*fcoe_init)); 2307 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1)); 2308 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2)); 2309 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3)); 2310 fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff; 2311 fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32; 2312 fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages; 2313 2314 fcoe_init->sb_num = cp->status_blk_num; 2315 fcoe_init->eq_prod = MAX_KCQ_IDX; 2316 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS; 2317 cp->kcq2.sw_prod_idx = 0; 2318 2319 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); 2320 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid, 2321 FCOE_CONNECTION_TYPE, &l5_data); 2322 *work = 3; 2323 return ret; 2324} 2325 2326static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[], 2327 u32 num, int *work) 2328{ 2329 int ret = 0; 2330 u32 cid = -1, l5_cid; 2331 struct cnic_local *cp = dev->cnic_priv; 2332 struct bnx2x *bp = netdev_priv(dev->netdev); 2333 struct fcoe_kwqe_conn_offload1 *req1; 2334 struct fcoe_kwqe_conn_offload2 *req2; 2335 struct fcoe_kwqe_conn_offload3 *req3; 2336 struct fcoe_kwqe_conn_offload4 *req4; 2337 struct fcoe_conn_offload_ramrod_params *fcoe_offload; 2338 struct cnic_context *ctx; 2339 struct fcoe_context *fctx; 2340 struct regpair ctx_addr; 2341 union l5cm_specific_data l5_data; 2342 struct fcoe_kcqe kcqe; 2343 struct kcqe *cqes[1]; 2344 2345 if (num < 4) { 2346 *work = num; 2347 return -EINVAL; 2348 } 2349 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0]; 2350 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1]; 2351 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2]; 2352 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3]; 2353 2354 *work = 4; 2355 2356 l5_cid = req1->fcoe_conn_id; 2357 if (l5_cid >= dev->max_fcoe_conn) 2358 goto err_reply; 2359 2360 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2361 2362 ctx = &cp->ctx_tbl[l5_cid]; 2363 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2364 goto err_reply; 2365 2366 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid); 2367 if (ret) { 2368 ret = 0; 2369 goto err_reply; 2370 } 2371 cid = ctx->cid; 2372 2373 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr); 2374 if (fctx) { 2375 u32 hw_cid = BNX2X_HW_CID(bp, cid); 2376 u32 val; 2377 2378 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG, 2379 FCOE_CONNECTION_TYPE); 2380 fctx->xstorm_ag_context.cdu_reserved = val; 2381 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG, 2382 FCOE_CONNECTION_TYPE); 2383 fctx->ustorm_ag_context.cdu_usage = val; 2384 } 2385 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) { 2386 netdev_err(dev->netdev, "fcoe_offload size too big\n"); 2387 goto err_reply; 2388 } 2389 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2390 if (!fcoe_offload) 2391 goto err_reply; 2392 2393 memset(fcoe_offload, 0, sizeof(*fcoe_offload)); 2394 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1)); 2395 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2)); 2396 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3)); 2397 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4)); 2398 2399 cid = BNX2X_HW_CID(bp, cid); 2400 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid, 2401 FCOE_CONNECTION_TYPE, &l5_data); 2402 if (!ret) 2403 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 2404 2405 return ret; 2406 2407err_reply: 2408 if (cid != -1) 2409 cnic_free_bnx2x_conn_resc(dev, l5_cid); 2410 2411 memset(&kcqe, 0, sizeof(kcqe)); 2412 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN; 2413 kcqe.fcoe_conn_id = req1->fcoe_conn_id; 2414 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE; 2415 2416 cqes[0] = (struct kcqe *) &kcqe; 2417 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2418 return ret; 2419} 2420 2421static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe) 2422{ 2423 struct fcoe_kwqe_conn_enable_disable *req; 2424 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable; 2425 union l5cm_specific_data l5_data; 2426 int ret; 2427 u32 cid, l5_cid; 2428 struct cnic_local *cp = dev->cnic_priv; 2429 2430 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2431 cid = req->context_id; 2432 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE; 2433 2434 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) { 2435 netdev_err(dev->netdev, "fcoe_enable size too big\n"); 2436 return -ENOMEM; 2437 } 2438 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2439 if (!fcoe_enable) 2440 return -ENOMEM; 2441 2442 memset(fcoe_enable, 0, sizeof(*fcoe_enable)); 2443 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req)); 2444 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid, 2445 FCOE_CONNECTION_TYPE, &l5_data); 2446 return ret; 2447} 2448 2449static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe) 2450{ 2451 struct fcoe_kwqe_conn_enable_disable *req; 2452 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable; 2453 union l5cm_specific_data l5_data; 2454 int ret; 2455 u32 cid, l5_cid; 2456 struct cnic_local *cp = dev->cnic_priv; 2457 2458 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2459 cid = req->context_id; 2460 l5_cid = req->conn_id; 2461 if (l5_cid >= dev->max_fcoe_conn) 2462 return -EINVAL; 2463 2464 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2465 2466 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) { 2467 netdev_err(dev->netdev, "fcoe_disable size too big\n"); 2468 return -ENOMEM; 2469 } 2470 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data); 2471 if (!fcoe_disable) 2472 return -ENOMEM; 2473 2474 memset(fcoe_disable, 0, sizeof(*fcoe_disable)); 2475 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req)); 2476 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid, 2477 FCOE_CONNECTION_TYPE, &l5_data); 2478 return ret; 2479} 2480 2481static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2482{ 2483 struct fcoe_kwqe_conn_destroy *req; 2484 union l5cm_specific_data l5_data; 2485 int ret; 2486 u32 cid, l5_cid; 2487 struct cnic_local *cp = dev->cnic_priv; 2488 struct cnic_context *ctx; 2489 struct fcoe_kcqe kcqe; 2490 struct kcqe *cqes[1]; 2491 2492 req = (struct fcoe_kwqe_conn_destroy *) kwqe; 2493 cid = req->context_id; 2494 l5_cid = req->conn_id; 2495 if (l5_cid >= dev->max_fcoe_conn) 2496 return -EINVAL; 2497 2498 l5_cid += BNX2X_FCOE_L5_CID_BASE; 2499 2500 ctx = &cp->ctx_tbl[l5_cid]; 2501 2502 init_waitqueue_head(&ctx->waitq); 2503 ctx->wait_cond = 0; 2504 2505 memset(&kcqe, 0, sizeof(kcqe)); 2506 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR; 2507 memset(&l5_data, 0, sizeof(l5_data)); 2508 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid, 2509 FCOE_CONNECTION_TYPE, &l5_data); 2510 if (ret == 0) { 2511 wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO); 2512 if (ctx->wait_cond) 2513 kcqe.completion_status = 0; 2514 } 2515 2516 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags); 2517 queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000)); 2518 2519 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN; 2520 kcqe.fcoe_conn_id = req->conn_id; 2521 kcqe.fcoe_conn_context_id = cid; 2522 2523 cqes[0] = (struct kcqe *) &kcqe; 2524 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1); 2525 return ret; 2526} 2527 2528static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid) 2529{ 2530 struct cnic_local *cp = dev->cnic_priv; 2531 u32 i; 2532 2533 for (i = start_cid; i < cp->max_cid_space; i++) { 2534 struct cnic_context *ctx = &cp->ctx_tbl[i]; 2535 int j; 2536 2537 while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 2538 msleep(10); 2539 2540 for (j = 0; j < 5; j++) { 2541 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2542 break; 2543 msleep(20); 2544 } 2545 2546 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 2547 netdev_warn(dev->netdev, "CID %x not deleted\n", 2548 ctx->cid); 2549 } 2550} 2551 2552static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe) 2553{ 2554 struct fcoe_kwqe_destroy *req; 2555 union l5cm_specific_data l5_data; 2556 struct cnic_local *cp = dev->cnic_priv; 2557 struct bnx2x *bp = netdev_priv(dev->netdev); 2558 int ret; 2559 u32 cid; 2560 2561 cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ); 2562 2563 req = (struct fcoe_kwqe_destroy *) kwqe; 2564 cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid); 2565 2566 memset(&l5_data, 0, sizeof(l5_data)); 2567 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid, 2568 FCOE_CONNECTION_TYPE, &l5_data); 2569 return ret; 2570} 2571 2572static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe) 2573{ 2574 struct cnic_local *cp = dev->cnic_priv; 2575 struct kcqe kcqe; 2576 struct kcqe *cqes[1]; 2577 u32 cid; 2578 u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2579 u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK; 2580 u32 kcqe_op; 2581 int ulp_type; 2582 2583 cid = kwqe->kwqe_info0; 2584 memset(&kcqe, 0, sizeof(kcqe)); 2585 2586 if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) { 2587 u32 l5_cid = 0; 2588 2589 ulp_type = CNIC_ULP_FCOE; 2590 if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) { 2591 struct fcoe_kwqe_conn_enable_disable *req; 2592 2593 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe; 2594 kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN; 2595 cid = req->context_id; 2596 l5_cid = req->conn_id; 2597 } else if (opcode == FCOE_KWQE_OPCODE_DESTROY) { 2598 kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC; 2599 } else { 2600 return; 2601 } 2602 kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT; 2603 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE; 2604 kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2605 kcqe.kcqe_info2 = cid; 2606 kcqe.kcqe_info0 = l5_cid; 2607 2608 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) { 2609 ulp_type = CNIC_ULP_ISCSI; 2610 if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN) 2611 cid = kwqe->kwqe_info1; 2612 2613 kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT; 2614 kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI; 2615 kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_PARITY_ERR; 2616 kcqe.kcqe_info2 = cid; 2617 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0); 2618 2619 } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) { 2620 struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe; 2621 2622 ulp_type = CNIC_ULP_L4; 2623 if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1) 2624 kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE; 2625 else if (opcode == L4_KWQE_OPCODE_VALUE_RESET) 2626 kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP; 2627 else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE) 2628 kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 2629 else 2630 return; 2631 2632 kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) | 2633 KCQE_FLAGS_LAYER_MASK_L4; 2634 l4kcqe->status = L4_KCQE_COMPLETION_STATUS_PARITY_ERROR; 2635 l4kcqe->cid = cid; 2636 cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id); 2637 } else { 2638 return; 2639 } 2640 2641 cqes[0] = &kcqe; 2642 cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1); 2643} 2644 2645static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev, 2646 struct kwqe *wqes[], u32 num_wqes) 2647{ 2648 int i, work, ret; 2649 u32 opcode; 2650 struct kwqe *kwqe; 2651 2652 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2653 return -EAGAIN; /* bnx2 is down */ 2654 2655 for (i = 0; i < num_wqes; ) { 2656 kwqe = wqes[i]; 2657 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2658 work = 1; 2659 2660 switch (opcode) { 2661 case ISCSI_KWQE_OPCODE_INIT1: 2662 ret = cnic_bnx2x_iscsi_init1(dev, kwqe); 2663 break; 2664 case ISCSI_KWQE_OPCODE_INIT2: 2665 ret = cnic_bnx2x_iscsi_init2(dev, kwqe); 2666 break; 2667 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1: 2668 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i], 2669 num_wqes - i, &work); 2670 break; 2671 case ISCSI_KWQE_OPCODE_UPDATE_CONN: 2672 ret = cnic_bnx2x_iscsi_update(dev, kwqe); 2673 break; 2674 case ISCSI_KWQE_OPCODE_DESTROY_CONN: 2675 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe); 2676 break; 2677 case L4_KWQE_OPCODE_VALUE_CONNECT1: 2678 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i, 2679 &work); 2680 break; 2681 case L4_KWQE_OPCODE_VALUE_CLOSE: 2682 ret = cnic_bnx2x_close(dev, kwqe); 2683 break; 2684 case L4_KWQE_OPCODE_VALUE_RESET: 2685 ret = cnic_bnx2x_reset(dev, kwqe); 2686 break; 2687 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG: 2688 ret = cnic_bnx2x_offload_pg(dev, kwqe); 2689 break; 2690 case L4_KWQE_OPCODE_VALUE_UPDATE_PG: 2691 ret = cnic_bnx2x_update_pg(dev, kwqe); 2692 break; 2693 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG: 2694 ret = 0; 2695 break; 2696 default: 2697 ret = 0; 2698 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2699 opcode); 2700 break; 2701 } 2702 if (ret < 0) { 2703 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2704 opcode); 2705 2706 /* Possibly bnx2x parity error, send completion 2707 * to ulp drivers with error code to speed up 2708 * cleanup and reset recovery. 2709 */ 2710 if (ret == -EIO || ret == -EAGAIN) 2711 cnic_bnx2x_kwqe_err(dev, kwqe); 2712 } 2713 i += work; 2714 } 2715 return 0; 2716} 2717 2718static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev, 2719 struct kwqe *wqes[], u32 num_wqes) 2720{ 2721 struct bnx2x *bp = netdev_priv(dev->netdev); 2722 int i, work, ret; 2723 u32 opcode; 2724 struct kwqe *kwqe; 2725 2726 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2727 return -EAGAIN; /* bnx2 is down */ 2728 2729 if (!BNX2X_CHIP_IS_E2_PLUS(bp)) 2730 return -EINVAL; 2731 2732 for (i = 0; i < num_wqes; ) { 2733 kwqe = wqes[i]; 2734 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag); 2735 work = 1; 2736 2737 switch (opcode) { 2738 case FCOE_KWQE_OPCODE_INIT1: 2739 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i], 2740 num_wqes - i, &work); 2741 break; 2742 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1: 2743 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i], 2744 num_wqes - i, &work); 2745 break; 2746 case FCOE_KWQE_OPCODE_ENABLE_CONN: 2747 ret = cnic_bnx2x_fcoe_enable(dev, kwqe); 2748 break; 2749 case FCOE_KWQE_OPCODE_DISABLE_CONN: 2750 ret = cnic_bnx2x_fcoe_disable(dev, kwqe); 2751 break; 2752 case FCOE_KWQE_OPCODE_DESTROY_CONN: 2753 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe); 2754 break; 2755 case FCOE_KWQE_OPCODE_DESTROY: 2756 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe); 2757 break; 2758 case FCOE_KWQE_OPCODE_STAT: 2759 ret = cnic_bnx2x_fcoe_stat(dev, kwqe); 2760 break; 2761 default: 2762 ret = 0; 2763 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n", 2764 opcode); 2765 break; 2766 } 2767 if (ret < 0) { 2768 netdev_err(dev->netdev, "KWQE(0x%x) failed\n", 2769 opcode); 2770 2771 /* Possibly bnx2x parity error, send completion 2772 * to ulp drivers with error code to speed up 2773 * cleanup and reset recovery. 2774 */ 2775 if (ret == -EIO || ret == -EAGAIN) 2776 cnic_bnx2x_kwqe_err(dev, kwqe); 2777 } 2778 i += work; 2779 } 2780 return 0; 2781} 2782 2783static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2784 u32 num_wqes) 2785{ 2786 int ret = -EINVAL; 2787 u32 layer_code; 2788 2789 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2790 return -EAGAIN; /* bnx2x is down */ 2791 2792 if (!num_wqes) 2793 return 0; 2794 2795 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK; 2796 switch (layer_code) { 2797 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI: 2798 case KWQE_FLAGS_LAYER_MASK_L4: 2799 case KWQE_FLAGS_LAYER_MASK_L2: 2800 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes); 2801 break; 2802 2803 case KWQE_FLAGS_LAYER_MASK_L5_FCOE: 2804 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes); 2805 break; 2806 } 2807 return ret; 2808} 2809 2810static inline u32 cnic_get_kcqe_layer_mask(u32 opflag) 2811{ 2812 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN)) 2813 return KCQE_FLAGS_LAYER_MASK_L4; 2814 2815 return opflag & KCQE_FLAGS_LAYER_MASK; 2816} 2817 2818static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2819{ 2820 struct cnic_local *cp = dev->cnic_priv; 2821 int i, j, comp = 0; 2822 2823 i = 0; 2824 j = 1; 2825 while (num_cqes) { 2826 struct cnic_ulp_ops *ulp_ops; 2827 int ulp_type; 2828 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2829 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag); 2830 2831 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2832 comp++; 2833 2834 while (j < num_cqes) { 2835 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2836 2837 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer) 2838 break; 2839 2840 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2841 comp++; 2842 j++; 2843 } 2844 2845 if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA) 2846 ulp_type = CNIC_ULP_RDMA; 2847 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2848 ulp_type = CNIC_ULP_ISCSI; 2849 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE) 2850 ulp_type = CNIC_ULP_FCOE; 2851 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2852 ulp_type = CNIC_ULP_L4; 2853 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2854 goto end; 2855 else { 2856 netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n", 2857 kcqe_op_flag); 2858 goto end; 2859 } 2860 2861 rcu_read_lock(); 2862 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 2863 if (likely(ulp_ops)) { 2864 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type], 2865 cp->completed_kcq + i, j); 2866 } 2867 rcu_read_unlock(); 2868end: 2869 num_cqes -= j; 2870 i += j; 2871 j = 1; 2872 } 2873 if (unlikely(comp)) 2874 cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp); 2875} 2876 2877static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info) 2878{ 2879 struct cnic_local *cp = dev->cnic_priv; 2880 u16 i, ri, hw_prod, last; 2881 struct kcqe *kcqe; 2882 int kcqe_cnt = 0, last_cnt = 0; 2883 2884 i = ri = last = info->sw_prod_idx; 2885 ri &= MAX_KCQ_IDX; 2886 hw_prod = *info->hw_prod_idx_ptr; 2887 hw_prod = info->hw_idx(hw_prod); 2888 2889 while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) { 2890 kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)]; 2891 cp->completed_kcq[kcqe_cnt++] = kcqe; 2892 i = info->next_idx(i); 2893 ri = i & MAX_KCQ_IDX; 2894 if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) { 2895 last_cnt = kcqe_cnt; 2896 last = i; 2897 } 2898 } 2899 2900 info->sw_prod_idx = last; 2901 return last_cnt; 2902} 2903 2904static int cnic_l2_completion(struct cnic_local *cp) 2905{ 2906 u16 hw_cons, sw_cons; 2907 struct cnic_uio_dev *udev = cp->udev; 2908 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *) 2909 (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); 2910 u32 cmd; 2911 int comp = 0; 2912 2913 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags)) 2914 return 0; 2915 2916 hw_cons = *cp->rx_cons_ptr; 2917 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT) 2918 hw_cons++; 2919 2920 sw_cons = cp->rx_cons; 2921 while (sw_cons != hw_cons) { 2922 u8 cqe_fp_flags; 2923 2924 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT]; 2925 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 2926 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) { 2927 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data); 2928 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT; 2929 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP || 2930 cmd == RAMROD_CMD_ID_ETH_HALT) 2931 comp++; 2932 } 2933 sw_cons = BNX2X_NEXT_RCQE(sw_cons); 2934 } 2935 return comp; 2936} 2937 2938static void cnic_chk_pkt_rings(struct cnic_local *cp) 2939{ 2940 u16 rx_cons, tx_cons; 2941 int comp = 0; 2942 2943 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 2944 return; 2945 2946 rx_cons = *cp->rx_cons_ptr; 2947 tx_cons = *cp->tx_cons_ptr; 2948 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2949 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 2950 comp = cnic_l2_completion(cp); 2951 2952 cp->tx_cons = tx_cons; 2953 cp->rx_cons = rx_cons; 2954 2955 if (cp->udev) 2956 uio_event_notify(&cp->udev->cnic_uinfo); 2957 } 2958 if (comp) 2959 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 2960} 2961 2962static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) 2963{ 2964 struct cnic_local *cp = dev->cnic_priv; 2965 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2966 int kcqe_cnt; 2967 2968 /* status block index must be read before reading other fields */ 2969 rmb(); 2970 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2971 2972 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2973 2974 service_kcqes(dev, kcqe_cnt); 2975 2976 /* Tell compiler that status_blk fields can change. */ 2977 barrier(); 2978 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2979 /* status block index must be read first */ 2980 rmb(); 2981 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2982 } 2983 2984 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx); 2985 2986 cnic_chk_pkt_rings(cp); 2987 2988 return status_idx; 2989} 2990 2991static int cnic_service_bnx2(void *data, void *status_blk) 2992{ 2993 struct cnic_dev *dev = data; 2994 2995 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2996 struct status_block *sblk = status_blk; 2997 2998 return sblk->status_idx; 2999 } 3000 3001 return cnic_service_bnx2_queues(dev); 3002} 3003 3004static void cnic_service_bnx2_msix(unsigned long data) 3005{ 3006 struct cnic_dev *dev = (struct cnic_dev *) data; 3007 struct cnic_local *cp = dev->cnic_priv; 3008 3009 cp->last_status_idx = cnic_service_bnx2_queues(dev); 3010 3011 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 3012 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 3013} 3014 3015static void cnic_doirq(struct cnic_dev *dev) 3016{ 3017 struct cnic_local *cp = dev->cnic_priv; 3018 3019 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 3020 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX; 3021 3022 prefetch(cp->status_blk.gen); 3023 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 3024 3025 tasklet_schedule(&cp->cnic_irq_task); 3026 } 3027} 3028 3029static irqreturn_t cnic_irq(int irq, void *dev_instance) 3030{ 3031 struct cnic_dev *dev = dev_instance; 3032 struct cnic_local *cp = dev->cnic_priv; 3033 3034 if (cp->ack_int) 3035 cp->ack_int(dev); 3036 3037 cnic_doirq(dev); 3038 3039 return IRQ_HANDLED; 3040} 3041 3042static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm, 3043 u16 index, u8 op, u8 update) 3044{ 3045 struct bnx2x *bp = netdev_priv(dev->netdev); 3046 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 + 3047 COMMAND_REG_INT_ACK); 3048 struct igu_ack_register igu_ack; 3049 3050 igu_ack.status_block_index = index; 3051 igu_ack.sb_id_and_flags = 3052 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) | 3053 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) | 3054 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) | 3055 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT)); 3056 3057 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack)); 3058} 3059 3060static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment, 3061 u16 index, u8 op, u8 update) 3062{ 3063 struct igu_regular cmd_data; 3064 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8; 3065 3066 cmd_data.sb_id_and_flags = 3067 (index << IGU_REGULAR_SB_INDEX_SHIFT) | 3068 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) | 3069 (update << IGU_REGULAR_BUPDATE_SHIFT) | 3070 (op << IGU_REGULAR_ENABLE_INT_SHIFT); 3071 3072 3073 CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags); 3074} 3075 3076static void cnic_ack_bnx2x_msix(struct cnic_dev *dev) 3077{ 3078 struct cnic_local *cp = dev->cnic_priv; 3079 3080 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0, 3081 IGU_INT_DISABLE, 0); 3082} 3083 3084static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev) 3085{ 3086 struct cnic_local *cp = dev->cnic_priv; 3087 3088 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0, 3089 IGU_INT_DISABLE, 0); 3090} 3091 3092static void cnic_arm_bnx2x_msix(struct cnic_dev *dev, u32 idx) 3093{ 3094 struct cnic_local *cp = dev->cnic_priv; 3095 3096 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, idx, 3097 IGU_INT_ENABLE, 1); 3098} 3099 3100static void cnic_arm_bnx2x_e2_msix(struct cnic_dev *dev, u32 idx) 3101{ 3102 struct cnic_local *cp = dev->cnic_priv; 3103 3104 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, idx, 3105 IGU_INT_ENABLE, 1); 3106} 3107 3108static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) 3109{ 3110 u32 last_status = *info->status_idx_ptr; 3111 int kcqe_cnt; 3112 3113 /* status block index must be read before reading the KCQ */ 3114 rmb(); 3115 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 3116 3117 service_kcqes(dev, kcqe_cnt); 3118 3119 /* Tell compiler that sblk fields can change. */ 3120 barrier(); 3121 3122 last_status = *info->status_idx_ptr; 3123 /* status block index must be read before reading the KCQ */ 3124 rmb(); 3125 } 3126 return last_status; 3127} 3128 3129static void cnic_service_bnx2x_bh(unsigned long data) 3130{ 3131 struct cnic_dev *dev = (struct cnic_dev *) data; 3132 struct cnic_local *cp = dev->cnic_priv; 3133 struct bnx2x *bp = netdev_priv(dev->netdev); 3134 u32 status_idx, new_status_idx; 3135 3136 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 3137 return; 3138 3139 while (1) { 3140 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 3141 3142 CNIC_WR16(dev, cp->kcq1.io_addr, 3143 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 3144 3145 if (!CNIC_SUPPORTS_FCOE(bp)) { 3146 cp->arm_int(dev, status_idx); 3147 break; 3148 } 3149 3150 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 3151 3152 if (new_status_idx != status_idx) 3153 continue; 3154 3155 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 3156 MAX_KCQ_IDX); 3157 3158 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 3159 status_idx, IGU_INT_ENABLE, 1); 3160 3161 break; 3162 } 3163} 3164 3165static int cnic_service_bnx2x(void *data, void *status_blk) 3166{ 3167 struct cnic_dev *dev = data; 3168 struct cnic_local *cp = dev->cnic_priv; 3169 3170 if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 3171 cnic_doirq(dev); 3172 3173 cnic_chk_pkt_rings(cp); 3174 3175 return 0; 3176} 3177 3178static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type) 3179{ 3180 struct cnic_ulp_ops *ulp_ops; 3181 3182 if (if_type == CNIC_ULP_ISCSI) 3183 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 3184 3185 mutex_lock(&cnic_lock); 3186 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3187 lockdep_is_held(&cnic_lock)); 3188 if (!ulp_ops) { 3189 mutex_unlock(&cnic_lock); 3190 return; 3191 } 3192 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3193 mutex_unlock(&cnic_lock); 3194 3195 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3196 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 3197 3198 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3199} 3200 3201static void cnic_ulp_stop(struct cnic_dev *dev) 3202{ 3203 struct cnic_local *cp = dev->cnic_priv; 3204 int if_type; 3205 3206 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) 3207 cnic_ulp_stop_one(cp, if_type); 3208} 3209 3210static void cnic_ulp_start(struct cnic_dev *dev) 3211{ 3212 struct cnic_local *cp = dev->cnic_priv; 3213 int if_type; 3214 3215 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 3216 struct cnic_ulp_ops *ulp_ops; 3217 3218 mutex_lock(&cnic_lock); 3219 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 3220 lockdep_is_held(&cnic_lock)); 3221 if (!ulp_ops || !ulp_ops->cnic_start) { 3222 mutex_unlock(&cnic_lock); 3223 continue; 3224 } 3225 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3226 mutex_unlock(&cnic_lock); 3227 3228 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 3229 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 3230 3231 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 3232 } 3233} 3234 3235static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type) 3236{ 3237 struct cnic_local *cp = dev->cnic_priv; 3238 struct cnic_ulp_ops *ulp_ops; 3239 int rc; 3240 3241 mutex_lock(&cnic_lock); 3242 ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type], 3243 lockdep_is_held(&cnic_lock)); 3244 if (ulp_ops && ulp_ops->cnic_get_stats) 3245 rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]); 3246 else 3247 rc = -ENODEV; 3248 mutex_unlock(&cnic_lock); 3249 return rc; 3250} 3251 3252static int cnic_ctl(void *data, struct cnic_ctl_info *info) 3253{ 3254 struct cnic_dev *dev = data; 3255 int ulp_type = CNIC_ULP_ISCSI; 3256 3257 switch (info->cmd) { 3258 case CNIC_CTL_STOP_CMD: 3259 cnic_hold(dev); 3260 3261 cnic_ulp_stop(dev); 3262 cnic_stop_hw(dev); 3263 3264 cnic_put(dev); 3265 break; 3266 case CNIC_CTL_START_CMD: 3267 cnic_hold(dev); 3268 3269 if (!cnic_start_hw(dev)) 3270 cnic_ulp_start(dev); 3271 3272 cnic_put(dev); 3273 break; 3274 case CNIC_CTL_STOP_ISCSI_CMD: { 3275 struct cnic_local *cp = dev->cnic_priv; 3276 set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags); 3277 queue_delayed_work(cnic_wq, &cp->delete_task, 0); 3278 break; 3279 } 3280 case CNIC_CTL_COMPLETION_CMD: { 3281 struct cnic_ctl_completion *comp = &info->data.comp; 3282 u32 cid = BNX2X_SW_CID(comp->cid); 3283 u32 l5_cid; 3284 struct cnic_local *cp = dev->cnic_priv; 3285 3286 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) 3287 break; 3288 3289 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) { 3290 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3291 3292 if (unlikely(comp->error)) { 3293 set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags); 3294 netdev_err(dev->netdev, 3295 "CID %x CFC delete comp error %x\n", 3296 cid, comp->error); 3297 } 3298 3299 ctx->wait_cond = 1; 3300 wake_up(&ctx->waitq); 3301 } 3302 break; 3303 } 3304 case CNIC_CTL_FCOE_STATS_GET_CMD: 3305 ulp_type = CNIC_ULP_FCOE; 3306 /* fall through */ 3307 case CNIC_CTL_ISCSI_STATS_GET_CMD: 3308 cnic_hold(dev); 3309 cnic_copy_ulp_stats(dev, ulp_type); 3310 cnic_put(dev); 3311 break; 3312 3313 default: 3314 return -EINVAL; 3315 } 3316 return 0; 3317} 3318 3319static void cnic_ulp_init(struct cnic_dev *dev) 3320{ 3321 int i; 3322 struct cnic_local *cp = dev->cnic_priv; 3323 3324 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3325 struct cnic_ulp_ops *ulp_ops; 3326 3327 mutex_lock(&cnic_lock); 3328 ulp_ops = cnic_ulp_tbl_prot(i); 3329 if (!ulp_ops || !ulp_ops->cnic_init) { 3330 mutex_unlock(&cnic_lock); 3331 continue; 3332 } 3333 ulp_get(ulp_ops); 3334 mutex_unlock(&cnic_lock); 3335 3336 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3337 ulp_ops->cnic_init(dev); 3338 3339 ulp_put(ulp_ops); 3340 } 3341} 3342 3343static void cnic_ulp_exit(struct cnic_dev *dev) 3344{ 3345 int i; 3346 struct cnic_local *cp = dev->cnic_priv; 3347 3348 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 3349 struct cnic_ulp_ops *ulp_ops; 3350 3351 mutex_lock(&cnic_lock); 3352 ulp_ops = cnic_ulp_tbl_prot(i); 3353 if (!ulp_ops || !ulp_ops->cnic_exit) { 3354 mutex_unlock(&cnic_lock); 3355 continue; 3356 } 3357 ulp_get(ulp_ops); 3358 mutex_unlock(&cnic_lock); 3359 3360 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 3361 ulp_ops->cnic_exit(dev); 3362 3363 ulp_put(ulp_ops); 3364 } 3365} 3366 3367static int cnic_cm_offload_pg(struct cnic_sock *csk) 3368{ 3369 struct cnic_dev *dev = csk->dev; 3370 struct l4_kwq_offload_pg *l4kwqe; 3371 struct kwqe *wqes[1]; 3372 3373 l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1; 3374 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3375 wqes[0] = (struct kwqe *) l4kwqe; 3376 3377 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG; 3378 l4kwqe->flags = 3379 L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT; 3380 l4kwqe->l2hdr_nbytes = ETH_HLEN; 3381 3382 l4kwqe->da0 = csk->ha[0]; 3383 l4kwqe->da1 = csk->ha[1]; 3384 l4kwqe->da2 = csk->ha[2]; 3385 l4kwqe->da3 = csk->ha[3]; 3386 l4kwqe->da4 = csk->ha[4]; 3387 l4kwqe->da5 = csk->ha[5]; 3388 3389 l4kwqe->sa0 = dev->mac_addr[0]; 3390 l4kwqe->sa1 = dev->mac_addr[1]; 3391 l4kwqe->sa2 = dev->mac_addr[2]; 3392 l4kwqe->sa3 = dev->mac_addr[3]; 3393 l4kwqe->sa4 = dev->mac_addr[4]; 3394 l4kwqe->sa5 = dev->mac_addr[5]; 3395 3396 l4kwqe->etype = ETH_P_IP; 3397 l4kwqe->ipid_start = DEF_IPID_START; 3398 l4kwqe->host_opaque = csk->l5_cid; 3399 3400 if (csk->vlan_id) { 3401 l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING; 3402 l4kwqe->vlan_tag = csk->vlan_id; 3403 l4kwqe->l2hdr_nbytes += 4; 3404 } 3405 3406 return dev->submit_kwqes(dev, wqes, 1); 3407} 3408 3409static int cnic_cm_update_pg(struct cnic_sock *csk) 3410{ 3411 struct cnic_dev *dev = csk->dev; 3412 struct l4_kwq_update_pg *l4kwqe; 3413 struct kwqe *wqes[1]; 3414 3415 l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1; 3416 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3417 wqes[0] = (struct kwqe *) l4kwqe; 3418 3419 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG; 3420 l4kwqe->flags = 3421 L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT; 3422 l4kwqe->pg_cid = csk->pg_cid; 3423 3424 l4kwqe->da0 = csk->ha[0]; 3425 l4kwqe->da1 = csk->ha[1]; 3426 l4kwqe->da2 = csk->ha[2]; 3427 l4kwqe->da3 = csk->ha[3]; 3428 l4kwqe->da4 = csk->ha[4]; 3429 l4kwqe->da5 = csk->ha[5]; 3430 3431 l4kwqe->pg_host_opaque = csk->l5_cid; 3432 l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA; 3433 3434 return dev->submit_kwqes(dev, wqes, 1); 3435} 3436 3437static int cnic_cm_upload_pg(struct cnic_sock *csk) 3438{ 3439 struct cnic_dev *dev = csk->dev; 3440 struct l4_kwq_upload *l4kwqe; 3441 struct kwqe *wqes[1]; 3442 3443 l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1; 3444 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3445 wqes[0] = (struct kwqe *) l4kwqe; 3446 3447 l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG; 3448 l4kwqe->flags = 3449 L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT; 3450 l4kwqe->cid = csk->pg_cid; 3451 3452 return dev->submit_kwqes(dev, wqes, 1); 3453} 3454 3455static int cnic_cm_conn_req(struct cnic_sock *csk) 3456{ 3457 struct cnic_dev *dev = csk->dev; 3458 struct l4_kwq_connect_req1 *l4kwqe1; 3459 struct l4_kwq_connect_req2 *l4kwqe2; 3460 struct l4_kwq_connect_req3 *l4kwqe3; 3461 struct kwqe *wqes[3]; 3462 u8 tcp_flags = 0; 3463 int num_wqes = 2; 3464 3465 l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1; 3466 l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2; 3467 l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3; 3468 memset(l4kwqe1, 0, sizeof(*l4kwqe1)); 3469 memset(l4kwqe2, 0, sizeof(*l4kwqe2)); 3470 memset(l4kwqe3, 0, sizeof(*l4kwqe3)); 3471 3472 l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3; 3473 l4kwqe3->flags = 3474 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT; 3475 l4kwqe3->ka_timeout = csk->ka_timeout; 3476 l4kwqe3->ka_interval = csk->ka_interval; 3477 l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count; 3478 l4kwqe3->tos = csk->tos; 3479 l4kwqe3->ttl = csk->ttl; 3480 l4kwqe3->snd_seq_scale = csk->snd_seq_scale; 3481 l4kwqe3->pmtu = csk->mtu; 3482 l4kwqe3->rcv_buf = csk->rcv_buf; 3483 l4kwqe3->snd_buf = csk->snd_buf; 3484 l4kwqe3->seed = csk->seed; 3485 3486 wqes[0] = (struct kwqe *) l4kwqe1; 3487 if (test_bit(SK_F_IPV6, &csk->flags)) { 3488 wqes[1] = (struct kwqe *) l4kwqe2; 3489 wqes[2] = (struct kwqe *) l4kwqe3; 3490 num_wqes = 3; 3491 3492 l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6; 3493 l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2; 3494 l4kwqe2->flags = 3495 L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT | 3496 L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT; 3497 l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]); 3498 l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]); 3499 l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]); 3500 l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]); 3501 l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]); 3502 l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]); 3503 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) - 3504 sizeof(struct tcphdr); 3505 } else { 3506 wqes[1] = (struct kwqe *) l4kwqe3; 3507 l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) - 3508 sizeof(struct tcphdr); 3509 } 3510 3511 l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1; 3512 l4kwqe1->flags = 3513 (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) | 3514 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT; 3515 l4kwqe1->cid = csk->cid; 3516 l4kwqe1->pg_cid = csk->pg_cid; 3517 l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]); 3518 l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]); 3519 l4kwqe1->src_port = be16_to_cpu(csk->src_port); 3520 l4kwqe1->dst_port = be16_to_cpu(csk->dst_port); 3521 if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK) 3522 tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK; 3523 if (csk->tcp_flags & SK_TCP_KEEP_ALIVE) 3524 tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE; 3525 if (csk->tcp_flags & SK_TCP_NAGLE) 3526 tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE; 3527 if (csk->tcp_flags & SK_TCP_TIMESTAMP) 3528 tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP; 3529 if (csk->tcp_flags & SK_TCP_SACK) 3530 tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK; 3531 if (csk->tcp_flags & SK_TCP_SEG_SCALING) 3532 tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING; 3533 3534 l4kwqe1->tcp_flags = tcp_flags; 3535 3536 return dev->submit_kwqes(dev, wqes, num_wqes); 3537} 3538 3539static int cnic_cm_close_req(struct cnic_sock *csk) 3540{ 3541 struct cnic_dev *dev = csk->dev; 3542 struct l4_kwq_close_req *l4kwqe; 3543 struct kwqe *wqes[1]; 3544 3545 l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2; 3546 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3547 wqes[0] = (struct kwqe *) l4kwqe; 3548 3549 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE; 3550 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT; 3551 l4kwqe->cid = csk->cid; 3552 3553 return dev->submit_kwqes(dev, wqes, 1); 3554} 3555 3556static int cnic_cm_abort_req(struct cnic_sock *csk) 3557{ 3558 struct cnic_dev *dev = csk->dev; 3559 struct l4_kwq_reset_req *l4kwqe; 3560 struct kwqe *wqes[1]; 3561 3562 l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2; 3563 memset(l4kwqe, 0, sizeof(*l4kwqe)); 3564 wqes[0] = (struct kwqe *) l4kwqe; 3565 3566 l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET; 3567 l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT; 3568 l4kwqe->cid = csk->cid; 3569 3570 return dev->submit_kwqes(dev, wqes, 1); 3571} 3572 3573static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid, 3574 u32 l5_cid, struct cnic_sock **csk, void *context) 3575{ 3576 struct cnic_local *cp = dev->cnic_priv; 3577 struct cnic_sock *csk1; 3578 3579 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3580 return -EINVAL; 3581 3582 if (cp->ctx_tbl) { 3583 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3584 3585 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) 3586 return -EAGAIN; 3587 } 3588 3589 csk1 = &cp->csk_tbl[l5_cid]; 3590 if (atomic_read(&csk1->ref_count)) 3591 return -EAGAIN; 3592 3593 if (test_and_set_bit(SK_F_INUSE, &csk1->flags)) 3594 return -EBUSY; 3595 3596 csk1->dev = dev; 3597 csk1->cid = cid; 3598 csk1->l5_cid = l5_cid; 3599 csk1->ulp_type = ulp_type; 3600 csk1->context = context; 3601 3602 csk1->ka_timeout = DEF_KA_TIMEOUT; 3603 csk1->ka_interval = DEF_KA_INTERVAL; 3604 csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT; 3605 csk1->tos = DEF_TOS; 3606 csk1->ttl = DEF_TTL; 3607 csk1->snd_seq_scale = DEF_SND_SEQ_SCALE; 3608 csk1->rcv_buf = DEF_RCV_BUF; 3609 csk1->snd_buf = DEF_SND_BUF; 3610 csk1->seed = DEF_SEED; 3611 csk1->tcp_flags = 0; 3612 3613 *csk = csk1; 3614 return 0; 3615} 3616 3617static void cnic_cm_cleanup(struct cnic_sock *csk) 3618{ 3619 if (csk->src_port) { 3620 struct cnic_dev *dev = csk->dev; 3621 struct cnic_local *cp = dev->cnic_priv; 3622 3623 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port)); 3624 csk->src_port = 0; 3625 } 3626} 3627 3628static void cnic_close_conn(struct cnic_sock *csk) 3629{ 3630 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) { 3631 cnic_cm_upload_pg(csk); 3632 clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3633 } 3634 cnic_cm_cleanup(csk); 3635} 3636 3637static int cnic_cm_destroy(struct cnic_sock *csk) 3638{ 3639 if (!cnic_in_use(csk)) 3640 return -EINVAL; 3641 3642 csk_hold(csk); 3643 clear_bit(SK_F_INUSE, &csk->flags); 3644 smp_mb__after_atomic(); 3645 while (atomic_read(&csk->ref_count) != 1) 3646 msleep(1); 3647 cnic_cm_cleanup(csk); 3648 3649 csk->flags = 0; 3650 csk_put(csk); 3651 return 0; 3652} 3653 3654static inline u16 cnic_get_vlan(struct net_device *dev, 3655 struct net_device **vlan_dev) 3656{ 3657 if (dev->priv_flags & IFF_802_1Q_VLAN) { 3658 *vlan_dev = vlan_dev_real_dev(dev); 3659 return vlan_dev_vlan_id(dev); 3660 } 3661 *vlan_dev = dev; 3662 return 0; 3663} 3664 3665static int cnic_get_v4_route(struct sockaddr_in *dst_addr, 3666 struct dst_entry **dst) 3667{ 3668#if defined(CONFIG_INET) 3669 struct rtable *rt; 3670 3671 rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0); 3672 if (!IS_ERR(rt)) { 3673 *dst = &rt->dst; 3674 return 0; 3675 } 3676 return PTR_ERR(rt); 3677#else 3678 return -ENETUNREACH; 3679#endif 3680} 3681 3682static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, 3683 struct dst_entry **dst) 3684{ 3685#if IS_ENABLED(CONFIG_IPV6) 3686 struct flowi6 fl6; 3687 3688 memset(&fl6, 0, sizeof(fl6)); 3689 fl6.daddr = dst_addr->sin6_addr; 3690 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 3691 fl6.flowi6_oif = dst_addr->sin6_scope_id; 3692 3693 *dst = ip6_route_output(&init_net, NULL, &fl6); 3694 if ((*dst)->error) { 3695 dst_release(*dst); 3696 *dst = NULL; 3697 return -ENETUNREACH; 3698 } else 3699 return 0; 3700#endif 3701 3702 return -ENETUNREACH; 3703} 3704 3705static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr, 3706 int ulp_type) 3707{ 3708 struct cnic_dev *dev = NULL; 3709 struct dst_entry *dst; 3710 struct net_device *netdev = NULL; 3711 int err = -ENETUNREACH; 3712 3713 if (dst_addr->sin_family == AF_INET) 3714 err = cnic_get_v4_route(dst_addr, &dst); 3715 else if (dst_addr->sin_family == AF_INET6) { 3716 struct sockaddr_in6 *dst_addr6 = 3717 (struct sockaddr_in6 *) dst_addr; 3718 3719 err = cnic_get_v6_route(dst_addr6, &dst); 3720 } else 3721 return NULL; 3722 3723 if (err) 3724 return NULL; 3725 3726 if (!dst->dev) 3727 goto done; 3728 3729 cnic_get_vlan(dst->dev, &netdev); 3730 3731 dev = cnic_from_netdev(netdev); 3732 3733done: 3734 dst_release(dst); 3735 if (dev) 3736 cnic_put(dev); 3737 return dev; 3738} 3739 3740static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3741{ 3742 struct cnic_dev *dev = csk->dev; 3743 struct cnic_local *cp = dev->cnic_priv; 3744 3745 return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk); 3746} 3747 3748static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3749{ 3750 struct cnic_dev *dev = csk->dev; 3751 struct cnic_local *cp = dev->cnic_priv; 3752 int is_v6, rc = 0; 3753 struct dst_entry *dst = NULL; 3754 struct net_device *realdev; 3755 __be16 local_port; 3756 u32 port_id; 3757 3758 if (saddr->local.v6.sin6_family == AF_INET6 && 3759 saddr->remote.v6.sin6_family == AF_INET6) 3760 is_v6 = 1; 3761 else if (saddr->local.v4.sin_family == AF_INET && 3762 saddr->remote.v4.sin_family == AF_INET) 3763 is_v6 = 0; 3764 else 3765 return -EINVAL; 3766 3767 clear_bit(SK_F_IPV6, &csk->flags); 3768 3769 if (is_v6) { 3770 set_bit(SK_F_IPV6, &csk->flags); 3771 cnic_get_v6_route(&saddr->remote.v6, &dst); 3772 3773 memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr, 3774 sizeof(struct in6_addr)); 3775 csk->dst_port = saddr->remote.v6.sin6_port; 3776 local_port = saddr->local.v6.sin6_port; 3777 3778 } else { 3779 cnic_get_v4_route(&saddr->remote.v4, &dst); 3780 3781 csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr; 3782 csk->dst_port = saddr->remote.v4.sin_port; 3783 local_port = saddr->local.v4.sin_port; 3784 } 3785 3786 csk->vlan_id = 0; 3787 csk->mtu = dev->netdev->mtu; 3788 if (dst && dst->dev) { 3789 u16 vlan = cnic_get_vlan(dst->dev, &realdev); 3790 if (realdev == dev->netdev) { 3791 csk->vlan_id = vlan; 3792 csk->mtu = dst_mtu(dst); 3793 } 3794 } 3795 3796 port_id = be16_to_cpu(local_port); 3797 if (port_id >= CNIC_LOCAL_PORT_MIN && 3798 port_id < CNIC_LOCAL_PORT_MAX) { 3799 if (cnic_alloc_id(&cp->csk_port_tbl, port_id)) 3800 port_id = 0; 3801 } else 3802 port_id = 0; 3803 3804 if (!port_id) { 3805 port_id = cnic_alloc_new_id(&cp->csk_port_tbl); 3806 if (port_id == -1) { 3807 rc = -ENOMEM; 3808 goto err_out; 3809 } 3810 local_port = cpu_to_be16(port_id); 3811 } 3812 csk->src_port = local_port; 3813 3814err_out: 3815 dst_release(dst); 3816 return rc; 3817} 3818 3819static void cnic_init_csk_state(struct cnic_sock *csk) 3820{ 3821 csk->state = 0; 3822 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3823 clear_bit(SK_F_CLOSING, &csk->flags); 3824} 3825 3826static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr) 3827{ 3828 struct cnic_local *cp = csk->dev->cnic_priv; 3829 int err = 0; 3830 3831 if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 3832 return -EOPNOTSUPP; 3833 3834 if (!cnic_in_use(csk)) 3835 return -EINVAL; 3836 3837 if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags)) 3838 return -EINVAL; 3839 3840 cnic_init_csk_state(csk); 3841 3842 err = cnic_get_route(csk, saddr); 3843 if (err) 3844 goto err_out; 3845 3846 err = cnic_resolve_addr(csk, saddr); 3847 if (!err) 3848 return 0; 3849 3850err_out: 3851 clear_bit(SK_F_CONNECT_START, &csk->flags); 3852 return err; 3853} 3854 3855static int cnic_cm_abort(struct cnic_sock *csk) 3856{ 3857 struct cnic_local *cp = csk->dev->cnic_priv; 3858 u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP; 3859 3860 if (!cnic_in_use(csk)) 3861 return -EINVAL; 3862 3863 if (cnic_abort_prep(csk)) 3864 return cnic_cm_abort_req(csk); 3865 3866 /* Getting here means that we haven't started connect, or 3867 * connect was not successful, or it has been reset by the target. 3868 */ 3869 3870 cp->close_conn(csk, opcode); 3871 if (csk->state != opcode) { 3872 /* Wait for remote reset sequence to complete */ 3873 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3874 msleep(1); 3875 3876 return -EALREADY; 3877 } 3878 3879 return 0; 3880} 3881 3882static int cnic_cm_close(struct cnic_sock *csk) 3883{ 3884 if (!cnic_in_use(csk)) 3885 return -EINVAL; 3886 3887 if (cnic_close_prep(csk)) { 3888 csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 3889 return cnic_cm_close_req(csk); 3890 } else { 3891 /* Wait for remote reset sequence to complete */ 3892 while (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3893 msleep(1); 3894 3895 return -EALREADY; 3896 } 3897 return 0; 3898} 3899 3900static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk, 3901 u8 opcode) 3902{ 3903 struct cnic_ulp_ops *ulp_ops; 3904 int ulp_type = csk->ulp_type; 3905 3906 rcu_read_lock(); 3907 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]); 3908 if (ulp_ops) { 3909 if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE) 3910 ulp_ops->cm_connect_complete(csk); 3911 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 3912 ulp_ops->cm_close_complete(csk); 3913 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) 3914 ulp_ops->cm_remote_abort(csk); 3915 else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) 3916 ulp_ops->cm_abort_complete(csk); 3917 else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED) 3918 ulp_ops->cm_remote_close(csk); 3919 } 3920 rcu_read_unlock(); 3921} 3922 3923static int cnic_cm_set_pg(struct cnic_sock *csk) 3924{ 3925 if (cnic_offld_prep(csk)) { 3926 if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 3927 cnic_cm_update_pg(csk); 3928 else 3929 cnic_cm_offload_pg(csk); 3930 } 3931 return 0; 3932} 3933 3934static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe) 3935{ 3936 struct cnic_local *cp = dev->cnic_priv; 3937 u32 l5_cid = kcqe->pg_host_opaque; 3938 u8 opcode = kcqe->op_code; 3939 struct cnic_sock *csk = &cp->csk_tbl[l5_cid]; 3940 3941 csk_hold(csk); 3942 if (!cnic_in_use(csk)) 3943 goto done; 3944 3945 if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3946 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3947 goto done; 3948 } 3949 /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */ 3950 if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) { 3951 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 3952 cnic_cm_upcall(cp, csk, 3953 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 3954 goto done; 3955 } 3956 3957 csk->pg_cid = kcqe->pg_cid; 3958 set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags); 3959 cnic_cm_conn_req(csk); 3960 3961done: 3962 csk_put(csk); 3963} 3964 3965static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe) 3966{ 3967 struct cnic_local *cp = dev->cnic_priv; 3968 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe; 3969 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE; 3970 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 3971 3972 ctx->timestamp = jiffies; 3973 ctx->wait_cond = 1; 3974 wake_up(&ctx->waitq); 3975} 3976 3977static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3978{ 3979 struct cnic_local *cp = dev->cnic_priv; 3980 struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe; 3981 u8 opcode = l4kcqe->op_code; 3982 u32 l5_cid; 3983 struct cnic_sock *csk; 3984 3985 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) { 3986 cnic_process_fcoe_term_conn(dev, kcqe); 3987 return; 3988 } 3989 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3990 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3991 cnic_cm_process_offld_pg(dev, l4kcqe); 3992 return; 3993 } 3994 3995 l5_cid = l4kcqe->conn_id; 3996 if (opcode & 0x80) 3997 l5_cid = l4kcqe->cid; 3998 if (l5_cid >= MAX_CM_SK_TBL_SZ) 3999 return; 4000 4001 csk = &cp->csk_tbl[l5_cid]; 4002 csk_hold(csk); 4003 4004 if (!cnic_in_use(csk)) { 4005 csk_put(csk); 4006 return; 4007 } 4008 4009 switch (opcode) { 4010 case L5CM_RAMROD_CMD_ID_TCP_CONNECT: 4011 if (l4kcqe->status != 0) { 4012 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 4013 cnic_cm_upcall(cp, csk, 4014 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE); 4015 } 4016 break; 4017 case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE: 4018 if (l4kcqe->status == 0) 4019 set_bit(SK_F_OFFLD_COMPLETE, &csk->flags); 4020 else if (l4kcqe->status == 4021 L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 4022 set_bit(SK_F_HW_ERR, &csk->flags); 4023 4024 smp_mb__before_atomic(); 4025 clear_bit(SK_F_OFFLD_SCHED, &csk->flags); 4026 cnic_cm_upcall(cp, csk, opcode); 4027 break; 4028 4029 case L5CM_RAMROD_CMD_ID_CLOSE: { 4030 struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe; 4031 4032 if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) { 4033 netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n", 4034 l4kcqe->status, l5kcqe->completion_status); 4035 opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP; 4036 /* Fall through */ 4037 } else { 4038 break; 4039 } 4040 } 4041 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4042 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4043 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4044 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 4045 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 4046 if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_PARITY_ERROR) 4047 set_bit(SK_F_HW_ERR, &csk->flags); 4048 4049 cp->close_conn(csk, opcode); 4050 break; 4051 4052 case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED: 4053 /* after we already sent CLOSE_REQ */ 4054 if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) && 4055 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) && 4056 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP) 4057 cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP); 4058 else 4059 cnic_cm_upcall(cp, csk, opcode); 4060 break; 4061 } 4062 csk_put(csk); 4063} 4064 4065static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num) 4066{ 4067 struct cnic_dev *dev = data; 4068 int i; 4069 4070 for (i = 0; i < num; i++) 4071 cnic_cm_process_kcqe(dev, kcqe[i]); 4072} 4073 4074static struct cnic_ulp_ops cm_ulp_ops = { 4075 .indicate_kcqes = cnic_cm_indicate_kcqe, 4076}; 4077 4078static void cnic_cm_free_mem(struct cnic_dev *dev) 4079{ 4080 struct cnic_local *cp = dev->cnic_priv; 4081 4082 kfree(cp->csk_tbl); 4083 cp->csk_tbl = NULL; 4084 cnic_free_id_tbl(&cp->csk_port_tbl); 4085} 4086 4087static int cnic_cm_alloc_mem(struct cnic_dev *dev) 4088{ 4089 struct cnic_local *cp = dev->cnic_priv; 4090 u32 port_id; 4091 4092 cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ, 4093 GFP_KERNEL); 4094 if (!cp->csk_tbl) 4095 return -ENOMEM; 4096 4097 port_id = prandom_u32(); 4098 port_id %= CNIC_LOCAL_PORT_RANGE; 4099 if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE, 4100 CNIC_LOCAL_PORT_MIN, port_id)) { 4101 cnic_cm_free_mem(dev); 4102 return -ENOMEM; 4103 } 4104 return 0; 4105} 4106 4107static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode) 4108{ 4109 if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) { 4110 /* Unsolicited RESET_COMP or RESET_RECEIVED */ 4111 opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED; 4112 csk->state = opcode; 4113 } 4114 4115 /* 1. If event opcode matches the expected event in csk->state 4116 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any 4117 * event 4118 * 3. If the expected event is 0, meaning the connection was never 4119 * never established, we accept the opcode from cm_abort. 4120 */ 4121 if (opcode == csk->state || csk->state == 0 || 4122 csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP || 4123 csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) { 4124 if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) { 4125 if (csk->state == 0) 4126 csk->state = opcode; 4127 return 1; 4128 } 4129 } 4130 return 0; 4131} 4132 4133static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode) 4134{ 4135 struct cnic_dev *dev = csk->dev; 4136 struct cnic_local *cp = dev->cnic_priv; 4137 4138 if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) { 4139 cnic_cm_upcall(cp, csk, opcode); 4140 return; 4141 } 4142 4143 clear_bit(SK_F_CONNECT_START, &csk->flags); 4144 cnic_close_conn(csk); 4145 csk->state = opcode; 4146 cnic_cm_upcall(cp, csk, opcode); 4147} 4148 4149static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev) 4150{ 4151} 4152 4153static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev) 4154{ 4155 u32 seed; 4156 4157 seed = prandom_u32(); 4158 cnic_ctx_wr(dev, 45, 0, seed); 4159 return 0; 4160} 4161 4162static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode) 4163{ 4164 struct cnic_dev *dev = csk->dev; 4165 struct cnic_local *cp = dev->cnic_priv; 4166 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid]; 4167 union l5cm_specific_data l5_data; 4168 u32 cmd = 0; 4169 int close_complete = 0; 4170 4171 switch (opcode) { 4172 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED: 4173 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 4174 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 4175 if (cnic_ready_to_close(csk, opcode)) { 4176 if (test_bit(SK_F_HW_ERR, &csk->flags)) 4177 close_complete = 1; 4178 else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) 4179 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE; 4180 else 4181 close_complete = 1; 4182 } 4183 break; 4184 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE: 4185 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD; 4186 break; 4187 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD: 4188 close_complete = 1; 4189 break; 4190 } 4191 if (cmd) { 4192 memset(&l5_data, 0, sizeof(l5_data)); 4193 4194 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE, 4195 &l5_data); 4196 } else if (close_complete) { 4197 ctx->timestamp = jiffies; 4198 cnic_close_conn(csk); 4199 cnic_cm_upcall(cp, csk, csk->state); 4200 } 4201} 4202 4203static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev) 4204{ 4205 struct cnic_local *cp = dev->cnic_priv; 4206 4207 if (!cp->ctx_tbl) 4208 return; 4209 4210 if (!netif_running(dev->netdev)) 4211 return; 4212 4213 cnic_bnx2x_delete_wait(dev, 0); 4214 4215 cancel_delayed_work(&cp->delete_task); 4216 flush_workqueue(cnic_wq); 4217 4218 if (atomic_read(&cp->iscsi_conn) != 0) 4219 netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n", 4220 atomic_read(&cp->iscsi_conn)); 4221} 4222 4223static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev) 4224{ 4225 struct bnx2x *bp = netdev_priv(dev->netdev); 4226 u32 pfid = bp->pfid; 4227 u32 port = BP_PORT(bp); 4228 4229 cnic_init_bnx2x_mac(dev); 4230 cnic_bnx2x_set_tcp_options(dev, 0, 1); 4231 4232 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + 4233 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0); 4234 4235 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4236 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1); 4237 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4238 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port), 4239 DEF_MAX_DA_COUNT); 4240 4241 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4242 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL); 4243 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4244 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS); 4245 CNIC_WR8(dev, BAR_XSTRORM_INTMEM + 4246 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2); 4247 CNIC_WR(dev, BAR_XSTRORM_INTMEM + 4248 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER); 4249 4250 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid), 4251 DEF_MAX_CWND); 4252 return 0; 4253} 4254 4255static void cnic_delete_task(struct work_struct *work) 4256{ 4257 struct cnic_local *cp; 4258 struct cnic_dev *dev; 4259 u32 i; 4260 int need_resched = 0; 4261 4262 cp = container_of(work, struct cnic_local, delete_task.work); 4263 dev = cp->dev; 4264 4265 if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) { 4266 struct drv_ctl_info info; 4267 4268 cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI); 4269 4270 info.cmd = DRV_CTL_ISCSI_STOPPED_CMD; 4271 cp->ethdev->drv_ctl(dev->netdev, &info); 4272 } 4273 4274 for (i = 0; i < cp->max_cid_space; i++) { 4275 struct cnic_context *ctx = &cp->ctx_tbl[i]; 4276 int err; 4277 4278 if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) || 4279 !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4280 continue; 4281 4282 if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) { 4283 need_resched = 1; 4284 continue; 4285 } 4286 4287 if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags)) 4288 continue; 4289 4290 err = cnic_bnx2x_destroy_ramrod(dev, i); 4291 4292 cnic_free_bnx2x_conn_resc(dev, i); 4293 if (!err) { 4294 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) 4295 atomic_dec(&cp->iscsi_conn); 4296 4297 clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags); 4298 } 4299 } 4300 4301 if (need_resched) 4302 queue_delayed_work(cnic_wq, &cp->delete_task, 4303 msecs_to_jiffies(10)); 4304 4305} 4306 4307static int cnic_cm_open(struct cnic_dev *dev) 4308{ 4309 struct cnic_local *cp = dev->cnic_priv; 4310 int err; 4311 4312 err = cnic_cm_alloc_mem(dev); 4313 if (err) 4314 return err; 4315 4316 err = cp->start_cm(dev); 4317 4318 if (err) 4319 goto err_out; 4320 4321 INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task); 4322 4323 dev->cm_create = cnic_cm_create; 4324 dev->cm_destroy = cnic_cm_destroy; 4325 dev->cm_connect = cnic_cm_connect; 4326 dev->cm_abort = cnic_cm_abort; 4327 dev->cm_close = cnic_cm_close; 4328 dev->cm_select_dev = cnic_cm_select_dev; 4329 4330 cp->ulp_handle[CNIC_ULP_L4] = dev; 4331 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops); 4332 return 0; 4333 4334err_out: 4335 cnic_cm_free_mem(dev); 4336 return err; 4337} 4338 4339static int cnic_cm_shutdown(struct cnic_dev *dev) 4340{ 4341 struct cnic_local *cp = dev->cnic_priv; 4342 int i; 4343 4344 if (!cp->csk_tbl) 4345 return 0; 4346 4347 for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) { 4348 struct cnic_sock *csk = &cp->csk_tbl[i]; 4349 4350 clear_bit(SK_F_INUSE, &csk->flags); 4351 cnic_cm_cleanup(csk); 4352 } 4353 cnic_cm_free_mem(dev); 4354 4355 return 0; 4356} 4357 4358static void cnic_init_context(struct cnic_dev *dev, u32 cid) 4359{ 4360 u32 cid_addr; 4361 int i; 4362 4363 cid_addr = GET_CID_ADDR(cid); 4364 4365 for (i = 0; i < CTX_SIZE; i += 4) 4366 cnic_ctx_wr(dev, cid_addr, i, 0); 4367} 4368 4369static int cnic_setup_5709_context(struct cnic_dev *dev, int valid) 4370{ 4371 struct cnic_local *cp = dev->cnic_priv; 4372 int ret = 0, i; 4373 u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0; 4374 4375 if (BNX2_CHIP(cp) != BNX2_CHIP_5709) 4376 return 0; 4377 4378 for (i = 0; i < cp->ctx_blks; i++) { 4379 int j; 4380 u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk; 4381 u32 val; 4382 4383 memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE); 4384 4385 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0, 4386 (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit); 4387 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1, 4388 (u64) cp->ctx_arr[i].mapping >> 32); 4389 CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx | 4390 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ); 4391 for (j = 0; j < 10; j++) { 4392 4393 val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL); 4394 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ)) 4395 break; 4396 udelay(5); 4397 } 4398 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) { 4399 ret = -EBUSY; 4400 break; 4401 } 4402 } 4403 return ret; 4404} 4405 4406static void cnic_free_irq(struct cnic_dev *dev) 4407{ 4408 struct cnic_local *cp = dev->cnic_priv; 4409 struct cnic_eth_dev *ethdev = cp->ethdev; 4410 4411 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4412 cp->disable_int_sync(dev); 4413 tasklet_kill(&cp->cnic_irq_task); 4414 free_irq(ethdev->irq_arr[0].vector, dev); 4415 } 4416} 4417 4418static int cnic_request_irq(struct cnic_dev *dev) 4419{ 4420 struct cnic_local *cp = dev->cnic_priv; 4421 struct cnic_eth_dev *ethdev = cp->ethdev; 4422 int err; 4423 4424 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev); 4425 if (err) 4426 tasklet_disable(&cp->cnic_irq_task); 4427 4428 return err; 4429} 4430 4431static int cnic_init_bnx2_irq(struct cnic_dev *dev) 4432{ 4433 struct cnic_local *cp = dev->cnic_priv; 4434 struct cnic_eth_dev *ethdev = cp->ethdev; 4435 4436 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4437 int err, i = 0; 4438 int sblk_num = cp->status_blk_num; 4439 u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) + 4440 BNX2_HC_SB_CONFIG_1; 4441 4442 CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT); 4443 4444 CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8); 4445 CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220); 4446 CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220); 4447 4448 cp->last_status_idx = cp->status_blk.bnx2->status_idx; 4449 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix, 4450 (unsigned long) dev); 4451 err = cnic_request_irq(dev); 4452 if (err) 4453 return err; 4454 4455 while (cp->status_blk.bnx2->status_completion_producer_index && 4456 i < 10) { 4457 CNIC_WR(dev, BNX2_HC_COALESCE_NOW, 4458 1 << (11 + sblk_num)); 4459 udelay(10); 4460 i++; 4461 barrier(); 4462 } 4463 if (cp->status_blk.bnx2->status_completion_producer_index) { 4464 cnic_free_irq(dev); 4465 goto failed; 4466 } 4467 4468 } else { 4469 struct status_block *sblk = cp->status_blk.gen; 4470 u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND); 4471 int i = 0; 4472 4473 while (sblk->status_completion_producer_index && i < 10) { 4474 CNIC_WR(dev, BNX2_HC_COMMAND, 4475 hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT); 4476 udelay(10); 4477 i++; 4478 barrier(); 4479 } 4480 if (sblk->status_completion_producer_index) 4481 goto failed; 4482 4483 } 4484 return 0; 4485 4486failed: 4487 netdev_err(dev->netdev, "KCQ index not resetting to 0\n"); 4488 return -EBUSY; 4489} 4490 4491static void cnic_enable_bnx2_int(struct cnic_dev *dev) 4492{ 4493 struct cnic_local *cp = dev->cnic_priv; 4494 struct cnic_eth_dev *ethdev = cp->ethdev; 4495 4496 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4497 return; 4498 4499 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4500 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx); 4501} 4502 4503static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev) 4504{ 4505 struct cnic_local *cp = dev->cnic_priv; 4506 struct cnic_eth_dev *ethdev = cp->ethdev; 4507 4508 if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)) 4509 return; 4510 4511 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 4512 BNX2_PCICFG_INT_ACK_CMD_MASK_INT); 4513 CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD); 4514 synchronize_irq(ethdev->irq_arr[0].vector); 4515} 4516 4517static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev) 4518{ 4519 struct cnic_local *cp = dev->cnic_priv; 4520 struct cnic_eth_dev *ethdev = cp->ethdev; 4521 struct cnic_uio_dev *udev = cp->udev; 4522 u32 cid_addr, tx_cid, sb_id; 4523 u32 val, offset0, offset1, offset2, offset3; 4524 int i; 4525 struct bnx2_tx_bd *txbd; 4526 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4527 struct status_block *s_blk = cp->status_blk.gen; 4528 4529 sb_id = cp->status_blk_num; 4530 tx_cid = 20; 4531 cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2; 4532 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4533 struct status_block_msix *sblk = cp->status_blk.bnx2; 4534 4535 tx_cid = TX_TSS_CID + sb_id - 1; 4536 CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) | 4537 (TX_TSS_CID << 7)); 4538 cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index; 4539 } 4540 cp->tx_cons = *cp->tx_cons_ptr; 4541 4542 cid_addr = GET_CID_ADDR(tx_cid); 4543 if (BNX2_CHIP(cp) == BNX2_CHIP_5709) { 4544 u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40; 4545 4546 for (i = 0; i < PHY_CTX_SIZE; i += 4) 4547 cnic_ctx_wr(dev, cid_addr2, i, 0); 4548 4549 offset0 = BNX2_L2CTX_TYPE_XI; 4550 offset1 = BNX2_L2CTX_CMD_TYPE_XI; 4551 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI; 4552 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI; 4553 } else { 4554 cnic_init_context(dev, tx_cid); 4555 cnic_init_context(dev, tx_cid + 1); 4556 4557 offset0 = BNX2_L2CTX_TYPE; 4558 offset1 = BNX2_L2CTX_CMD_TYPE; 4559 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI; 4560 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO; 4561 } 4562 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2; 4563 cnic_ctx_wr(dev, cid_addr, offset0, val); 4564 4565 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16); 4566 cnic_ctx_wr(dev, cid_addr, offset1, val); 4567 4568 txbd = udev->l2_ring; 4569 4570 buf_map = udev->l2_buf_map; 4571 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i++, txbd++) { 4572 txbd->tx_bd_haddr_hi = (u64) buf_map >> 32; 4573 txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4574 } 4575 val = (u64) ring_map >> 32; 4576 cnic_ctx_wr(dev, cid_addr, offset2, val); 4577 txbd->tx_bd_haddr_hi = val; 4578 4579 val = (u64) ring_map & 0xffffffff; 4580 cnic_ctx_wr(dev, cid_addr, offset3, val); 4581 txbd->tx_bd_haddr_lo = val; 4582} 4583 4584static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev) 4585{ 4586 struct cnic_local *cp = dev->cnic_priv; 4587 struct cnic_eth_dev *ethdev = cp->ethdev; 4588 struct cnic_uio_dev *udev = cp->udev; 4589 u32 cid_addr, sb_id, val, coal_reg, coal_val; 4590 int i; 4591 struct bnx2_rx_bd *rxbd; 4592 struct status_block *s_blk = cp->status_blk.gen; 4593 dma_addr_t ring_map = udev->l2_ring_map; 4594 4595 sb_id = cp->status_blk_num; 4596 cnic_init_context(dev, 2); 4597 cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2; 4598 coal_reg = BNX2_HC_COMMAND; 4599 coal_val = CNIC_RD(dev, coal_reg); 4600 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4601 struct status_block_msix *sblk = cp->status_blk.bnx2; 4602 4603 cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index; 4604 coal_reg = BNX2_HC_COALESCE_NOW; 4605 coal_val = 1 << (11 + sb_id); 4606 } 4607 i = 0; 4608 while (!(*cp->rx_cons_ptr != 0) && i < 10) { 4609 CNIC_WR(dev, coal_reg, coal_val); 4610 udelay(10); 4611 i++; 4612 barrier(); 4613 } 4614 cp->rx_cons = *cp->rx_cons_ptr; 4615 4616 cid_addr = GET_CID_ADDR(2); 4617 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE | 4618 BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8); 4619 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val); 4620 4621 if (sb_id == 0) 4622 val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT; 4623 else 4624 val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id); 4625 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val); 4626 4627 rxbd = udev->l2_ring + CNIC_PAGE_SIZE; 4628 for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) { 4629 dma_addr_t buf_map; 4630 int n = (i % cp->l2_rx_ring_size) + 1; 4631 4632 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4633 rxbd->rx_bd_len = cp->l2_single_buf_size; 4634 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; 4635 rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32; 4636 rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff; 4637 } 4638 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; 4639 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val); 4640 rxbd->rx_bd_haddr_hi = val; 4641 4642 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; 4643 cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val); 4644 rxbd->rx_bd_haddr_lo = val; 4645 4646 val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD); 4647 cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2)); 4648} 4649 4650static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev) 4651{ 4652 struct kwqe *wqes[1], l2kwqe; 4653 4654 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4655 wqes[0] = &l2kwqe; 4656 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) | 4657 (L2_KWQE_OPCODE_VALUE_FLUSH << 4658 KWQE_OPCODE_SHIFT) | 2; 4659 dev->submit_kwqes(dev, wqes, 1); 4660} 4661 4662static void cnic_set_bnx2_mac(struct cnic_dev *dev) 4663{ 4664 struct cnic_local *cp = dev->cnic_priv; 4665 u32 val; 4666 4667 val = cp->func << 2; 4668 4669 cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val); 4670 4671 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4672 BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER); 4673 dev->mac_addr[0] = (u8) (val >> 8); 4674 dev->mac_addr[1] = (u8) val; 4675 4676 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val); 4677 4678 val = cnic_reg_rd_ind(dev, cp->shmem_base + 4679 BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER); 4680 dev->mac_addr[2] = (u8) (val >> 24); 4681 dev->mac_addr[3] = (u8) (val >> 16); 4682 dev->mac_addr[4] = (u8) (val >> 8); 4683 dev->mac_addr[5] = (u8) val; 4684 4685 CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val); 4686 4687 val = 4 | BNX2_RPM_SORT_USER2_BC_EN; 4688 if (BNX2_CHIP(cp) != BNX2_CHIP_5709) 4689 val |= BNX2_RPM_SORT_USER2_PROM_VLAN; 4690 4691 CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0); 4692 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val); 4693 CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA); 4694} 4695 4696static int cnic_start_bnx2_hw(struct cnic_dev *dev) 4697{ 4698 struct cnic_local *cp = dev->cnic_priv; 4699 struct cnic_eth_dev *ethdev = cp->ethdev; 4700 struct status_block *sblk = cp->status_blk.gen; 4701 u32 val, kcq_cid_addr, kwq_cid_addr; 4702 int err; 4703 4704 cnic_set_bnx2_mac(dev); 4705 4706 val = CNIC_RD(dev, BNX2_MQ_CONFIG); 4707 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE; 4708 if (CNIC_PAGE_BITS > 12) 4709 val |= (12 - 8) << 4; 4710 else 4711 val |= (CNIC_PAGE_BITS - 8) << 4; 4712 4713 CNIC_WR(dev, BNX2_MQ_CONFIG, val); 4714 4715 CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8); 4716 CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220); 4717 CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220); 4718 4719 err = cnic_setup_5709_context(dev, 1); 4720 if (err) 4721 return err; 4722 4723 cnic_init_context(dev, KWQ_CID); 4724 cnic_init_context(dev, KCQ_CID); 4725 4726 kwq_cid_addr = GET_CID_ADDR(KWQ_CID); 4727 cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX; 4728 4729 cp->max_kwq_idx = MAX_KWQ_IDX; 4730 cp->kwq_prod_idx = 0; 4731 cp->kwq_con_idx = 0; 4732 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags); 4733 4734 if (BNX2_CHIP(cp) == BNX2_CHIP_5706 || BNX2_CHIP(cp) == BNX2_CHIP_5708) 4735 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 4736 else 4737 cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index; 4738 4739 /* Initialize the kernel work queue context. */ 4740 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4741 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4742 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val); 4743 4744 val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16; 4745 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4746 4747 val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT; 4748 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4749 4750 val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32); 4751 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4752 4753 val = (u32) cp->kwq_info.pgtbl_map; 4754 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4755 4756 kcq_cid_addr = GET_CID_ADDR(KCQ_CID); 4757 cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX; 4758 4759 cp->kcq1.sw_prod_idx = 0; 4760 cp->kcq1.hw_prod_idx_ptr = 4761 &sblk->status_completion_producer_index; 4762 4763 cp->kcq1.status_idx_ptr = &sblk->status_idx; 4764 4765 /* Initialize the kernel complete queue context. */ 4766 val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE | 4767 (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ; 4768 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val); 4769 4770 val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16; 4771 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val); 4772 4773 val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT; 4774 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val); 4775 4776 val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32); 4777 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val); 4778 4779 val = (u32) cp->kcq1.dma.pgtbl_map; 4780 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val); 4781 4782 cp->int_num = 0; 4783 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) { 4784 struct status_block_msix *msblk = cp->status_blk.bnx2; 4785 u32 sb_id = cp->status_blk_num; 4786 u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id); 4787 4788 cp->kcq1.hw_prod_idx_ptr = 4789 &msblk->status_completion_producer_index; 4790 cp->kcq1.status_idx_ptr = &msblk->status_idx; 4791 cp->kwq_con_idx_ptr = &msblk->status_cmd_consumer_index; 4792 cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT; 4793 cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4794 cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb); 4795 } 4796 4797 /* Enable Commnad Scheduler notification when we write to the 4798 * host producer index of the kernel contexts. */ 4799 CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2); 4800 4801 /* Enable Command Scheduler notification when we write to either 4802 * the Send Queue or Receive Queue producer indexes of the kernel 4803 * bypass contexts. */ 4804 CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7); 4805 CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7); 4806 4807 /* Notify COM when the driver post an application buffer. */ 4808 CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000); 4809 4810 /* Set the CP and COM doorbells. These two processors polls the 4811 * doorbell for a non zero value before running. This must be done 4812 * after setting up the kernel queue contexts. */ 4813 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1); 4814 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1); 4815 4816 cnic_init_bnx2_tx_ring(dev); 4817 cnic_init_bnx2_rx_ring(dev); 4818 4819 err = cnic_init_bnx2_irq(dev); 4820 if (err) { 4821 netdev_err(dev->netdev, "cnic_init_irq failed\n"); 4822 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 4823 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 4824 return err; 4825 } 4826 4827 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; 4828 4829 return 0; 4830} 4831 4832static void cnic_setup_bnx2x_context(struct cnic_dev *dev) 4833{ 4834 struct cnic_local *cp = dev->cnic_priv; 4835 struct cnic_eth_dev *ethdev = cp->ethdev; 4836 u32 start_offset = ethdev->ctx_tbl_offset; 4837 int i; 4838 4839 for (i = 0; i < cp->ctx_blks; i++) { 4840 struct cnic_ctx *ctx = &cp->ctx_arr[i]; 4841 dma_addr_t map = ctx->mapping; 4842 4843 if (cp->ctx_align) { 4844 unsigned long mask = cp->ctx_align - 1; 4845 4846 map = (map + mask) & ~mask; 4847 } 4848 4849 cnic_ctx_tbl_wr(dev, start_offset + i, map); 4850 } 4851} 4852 4853static int cnic_init_bnx2x_irq(struct cnic_dev *dev) 4854{ 4855 struct cnic_local *cp = dev->cnic_priv; 4856 struct cnic_eth_dev *ethdev = cp->ethdev; 4857 int err = 0; 4858 4859 tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh, 4860 (unsigned long) dev); 4861 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 4862 err = cnic_request_irq(dev); 4863 4864 return err; 4865} 4866 4867static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev, 4868 u16 sb_id, u8 sb_index, 4869 u8 disable) 4870{ 4871 struct bnx2x *bp = netdev_priv(dev->netdev); 4872 4873 u32 addr = BAR_CSTRORM_INTMEM + 4874 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4875 offsetof(struct hc_status_block_data_e1x, index_data) + 4876 sizeof(struct hc_index_data)*sb_index + 4877 offsetof(struct hc_index_data, flags); 4878 u16 flags = CNIC_RD16(dev, addr); 4879 /* clear and set */ 4880 flags &= ~HC_INDEX_DATA_HC_ENABLED; 4881 flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) & 4882 HC_INDEX_DATA_HC_ENABLED); 4883 CNIC_WR16(dev, addr, flags); 4884} 4885 4886static void cnic_enable_bnx2x_int(struct cnic_dev *dev) 4887{ 4888 struct cnic_local *cp = dev->cnic_priv; 4889 struct bnx2x *bp = netdev_priv(dev->netdev); 4890 u8 sb_id = cp->status_blk_num; 4891 4892 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 4893 CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) + 4894 offsetof(struct hc_status_block_data_e1x, index_data) + 4895 sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS + 4896 offsetof(struct hc_index_data, timeout), 64 / 4); 4897 cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0); 4898} 4899 4900static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev) 4901{ 4902} 4903 4904static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev, 4905 struct client_init_ramrod_data *data) 4906{ 4907 struct cnic_local *cp = dev->cnic_priv; 4908 struct bnx2x *bp = netdev_priv(dev->netdev); 4909 struct cnic_uio_dev *udev = cp->udev; 4910 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring; 4911 dma_addr_t buf_map, ring_map = udev->l2_ring_map; 4912 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4913 int i; 4914 u32 cli = cp->ethdev->iscsi_l2_client_id; 4915 u32 val; 4916 4917 memset(txbd, 0, CNIC_PAGE_SIZE); 4918 4919 buf_map = udev->l2_buf_map; 4920 for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) { 4921 struct eth_tx_start_bd *start_bd = &txbd->start_bd; 4922 struct eth_tx_parse_bd_e1x *pbd_e1x = 4923 &((txbd + 1)->parse_bd_e1x); 4924 struct eth_tx_parse_bd_e2 *pbd_e2 = &((txbd + 1)->parse_bd_e2); 4925 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd); 4926 4927 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 4928 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 4929 reg_bd->addr_hi = start_bd->addr_hi; 4930 reg_bd->addr_lo = start_bd->addr_lo + 0x10; 4931 start_bd->nbytes = cpu_to_le16(0x10); 4932 start_bd->nbd = cpu_to_le16(3); 4933 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 4934 start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS; 4935 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 4936 4937 if (BNX2X_CHIP_IS_E2_PLUS(bp)) 4938 pbd_e2->parsing_data = (UNICAST_ADDRESS << 4939 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); 4940 else 4941 pbd_e1x->global_data = (UNICAST_ADDRESS << 4942 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE_SHIFT); 4943 } 4944 4945 val = (u64) ring_map >> 32; 4946 txbd->next_bd.addr_hi = cpu_to_le32(val); 4947 4948 data->tx.tx_bd_page_base.hi = cpu_to_le32(val); 4949 4950 val = (u64) ring_map & 0xffffffff; 4951 txbd->next_bd.addr_lo = cpu_to_le32(val); 4952 4953 data->tx.tx_bd_page_base.lo = cpu_to_le32(val); 4954 4955 /* Other ramrod params */ 4956 data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS; 4957 data->tx.tx_status_block_id = BNX2X_DEF_SB_ID; 4958 4959 /* reset xstorm per client statistics */ 4960 if (cli < MAX_STAT_COUNTER_ID) { 4961 data->general.statistics_zero_flg = 1; 4962 data->general.statistics_en_flg = 1; 4963 data->general.statistics_counter_id = cli; 4964 } 4965 4966 cp->tx_cons_ptr = 4967 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS]; 4968} 4969 4970static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev, 4971 struct client_init_ramrod_data *data) 4972{ 4973 struct cnic_local *cp = dev->cnic_priv; 4974 struct bnx2x *bp = netdev_priv(dev->netdev); 4975 struct cnic_uio_dev *udev = cp->udev; 4976 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring + 4977 CNIC_PAGE_SIZE); 4978 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *) 4979 (udev->l2_ring + (2 * CNIC_PAGE_SIZE)); 4980 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4981 int i; 4982 u32 cli = cp->ethdev->iscsi_l2_client_id; 4983 int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); 4984 u32 val; 4985 dma_addr_t ring_map = udev->l2_ring_map; 4986 4987 /* General data */ 4988 data->general.client_id = cli; 4989 data->general.activate_flg = 1; 4990 data->general.sp_client_id = cli; 4991 data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14); 4992 data->general.func_id = bp->pfid; 4993 4994 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) { 4995 dma_addr_t buf_map; 4996 int n = (i % cp->l2_rx_ring_size) + 1; 4997 4998 buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size); 4999 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32); 5000 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff); 5001 } 5002 5003 val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32; 5004 rxbd->addr_hi = cpu_to_le32(val); 5005 data->rx.bd_page_base.hi = cpu_to_le32(val); 5006 5007 val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff; 5008 rxbd->addr_lo = cpu_to_le32(val); 5009 data->rx.bd_page_base.lo = cpu_to_le32(val); 5010 5011 rxcqe += BNX2X_MAX_RCQ_DESC_CNT; 5012 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32; 5013 rxcqe->addr_hi = cpu_to_le32(val); 5014 data->rx.cqe_page_base.hi = cpu_to_le32(val); 5015 5016 val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff; 5017 rxcqe->addr_lo = cpu_to_le32(val); 5018 data->rx.cqe_page_base.lo = cpu_to_le32(val); 5019 5020 /* Other ramrod params */ 5021 data->rx.client_qzone_id = cl_qzone_id; 5022 data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS; 5023 data->rx.status_block_id = BNX2X_DEF_SB_ID; 5024 5025 data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT; 5026 5027 data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size); 5028 data->rx.outer_vlan_removal_enable_flg = 1; 5029 data->rx.silent_vlan_removal_flg = 1; 5030 data->rx.silent_vlan_value = 0; 5031 data->rx.silent_vlan_mask = 0xffff; 5032 5033 cp->rx_cons_ptr = 5034 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 5035 cp->rx_cons = *cp->rx_cons_ptr; 5036} 5037 5038static void cnic_init_bnx2x_kcq(struct cnic_dev *dev) 5039{ 5040 struct cnic_local *cp = dev->cnic_priv; 5041 struct bnx2x *bp = netdev_priv(dev->netdev); 5042 u32 pfid = bp->pfid; 5043 5044 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM + 5045 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0); 5046 cp->kcq1.sw_prod_idx = 0; 5047 5048 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 5049 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5050 5051 cp->kcq1.hw_prod_idx_ptr = 5052 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 5053 cp->kcq1.status_idx_ptr = 5054 &sb->sb.running_index[SM_RX_ID]; 5055 } else { 5056 struct host_hc_status_block_e1x *sb = cp->status_blk.gen; 5057 5058 cp->kcq1.hw_prod_idx_ptr = 5059 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 5060 cp->kcq1.status_idx_ptr = 5061 &sb->sb.running_index[SM_RX_ID]; 5062 } 5063 5064 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 5065 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 5066 5067 cp->kcq2.io_addr = BAR_USTRORM_INTMEM + 5068 USTORM_FCOE_EQ_PROD_OFFSET(pfid); 5069 cp->kcq2.sw_prod_idx = 0; 5070 cp->kcq2.hw_prod_idx_ptr = 5071 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS]; 5072 cp->kcq2.status_idx_ptr = 5073 &sb->sb.running_index[SM_RX_ID]; 5074 } 5075} 5076 5077static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 5078{ 5079 struct cnic_local *cp = dev->cnic_priv; 5080 struct bnx2x *bp = netdev_priv(dev->netdev); 5081 struct cnic_eth_dev *ethdev = cp->ethdev; 5082 int func, ret; 5083 u32 pfid; 5084 5085 dev->stats_addr = ethdev->addr_drv_info_to_mcp; 5086 cp->func = bp->pf_num; 5087 5088 func = CNIC_FUNC(cp); 5089 pfid = bp->pfid; 5090 5091 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ, 5092 cp->iscsi_start_cid, 0); 5093 5094 if (ret) 5095 return -ENOMEM; 5096 5097 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 5098 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn, 5099 cp->fcoe_start_cid, 0); 5100 5101 if (ret) 5102 return -ENOMEM; 5103 } 5104 5105 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2; 5106 5107 cnic_init_bnx2x_kcq(dev); 5108 5109 /* Only 1 EQ */ 5110 CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX); 5111 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5112 CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0); 5113 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5114 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0), 5115 cp->kcq1.dma.pg_map_arr[1] & 0xffffffff); 5116 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5117 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4, 5118 (u64) cp->kcq1.dma.pg_map_arr[1] >> 32); 5119 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5120 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0), 5121 cp->kcq1.dma.pg_map_arr[0] & 0xffffffff); 5122 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5123 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4, 5124 (u64) cp->kcq1.dma.pg_map_arr[0] >> 32); 5125 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5126 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1); 5127 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + 5128 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num); 5129 CNIC_WR8(dev, BAR_CSTRORM_INTMEM + 5130 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0), 5131 HC_INDEX_ISCSI_EQ_CONS); 5132 5133 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5134 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid), 5135 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff); 5136 CNIC_WR(dev, BAR_USTRORM_INTMEM + 5137 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4, 5138 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32); 5139 5140 CNIC_WR(dev, BAR_TSTRORM_INTMEM + 5141 TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF); 5142 5143 cnic_setup_bnx2x_context(dev); 5144 5145 ret = cnic_init_bnx2x_irq(dev); 5146 if (ret) 5147 return ret; 5148 5149 ethdev->drv_state |= CNIC_DRV_STATE_HANDLES_IRQ; 5150 return 0; 5151} 5152 5153static void cnic_init_rings(struct cnic_dev *dev) 5154{ 5155 struct cnic_local *cp = dev->cnic_priv; 5156 struct bnx2x *bp = netdev_priv(dev->netdev); 5157 struct cnic_uio_dev *udev = cp->udev; 5158 5159 if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5160 return; 5161 5162 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5163 cnic_init_bnx2_tx_ring(dev); 5164 cnic_init_bnx2_rx_ring(dev); 5165 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5166 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5167 u32 cli = cp->ethdev->iscsi_l2_client_id; 5168 u32 cid = cp->ethdev->iscsi_l2_cid; 5169 u32 cl_qzone_id; 5170 struct client_init_ramrod_data *data; 5171 union l5cm_specific_data l5_data; 5172 struct ustorm_eth_rx_producers rx_prods = {0}; 5173 u32 off, i, *cid_ptr; 5174 5175 rx_prods.bd_prod = 0; 5176 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT; 5177 barrier(); 5178 5179 cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli); 5180 5181 off = BAR_USTRORM_INTMEM + 5182 (BNX2X_CHIP_IS_E2_PLUS(bp) ? 5183 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) : 5184 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli)); 5185 5186 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 5187 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 5188 5189 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5190 5191 data = udev->l2_buf; 5192 cid_ptr = udev->l2_buf + 12; 5193 5194 memset(data, 0, sizeof(*data)); 5195 5196 cnic_init_bnx2x_tx_ring(dev, data); 5197 cnic_init_bnx2x_rx_ring(dev, data); 5198 5199 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 5200 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 5201 5202 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5203 5204 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 5205 cid, ETH_CONNECTION_TYPE, &l5_data); 5206 5207 i = 0; 5208 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5209 ++i < 10) 5210 msleep(1); 5211 5212 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5213 netdev_err(dev->netdev, 5214 "iSCSI CLIENT_SETUP did not complete\n"); 5215 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5216 cnic_ring_ctl(dev, cid, cli, 1); 5217 *cid_ptr = cid >> 4; 5218 *(cid_ptr + 1) = cid * bp->db_size; 5219 *(cid_ptr + 2) = UIO_USE_TX_DOORBELL; 5220 } 5221} 5222 5223static void cnic_shutdown_rings(struct cnic_dev *dev) 5224{ 5225 struct cnic_local *cp = dev->cnic_priv; 5226 struct cnic_uio_dev *udev = cp->udev; 5227 void *rx_ring; 5228 5229 if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags)) 5230 return; 5231 5232 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 5233 cnic_shutdown_bnx2_rx_ring(dev); 5234 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5235 u32 cli = cp->ethdev->iscsi_l2_client_id; 5236 u32 cid = cp->ethdev->iscsi_l2_cid; 5237 union l5cm_specific_data l5_data; 5238 int i; 5239 5240 cnic_ring_ctl(dev, cid, cli, 0); 5241 5242 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5243 5244 l5_data.phy_address.lo = cli; 5245 l5_data.phy_address.hi = 0; 5246 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 5247 cid, ETH_CONNECTION_TYPE, &l5_data); 5248 i = 0; 5249 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5250 ++i < 10) 5251 msleep(1); 5252 5253 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags)) 5254 netdev_err(dev->netdev, 5255 "iSCSI CLIENT_HALT did not complete\n"); 5256 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5257 5258 memset(&l5_data, 0, sizeof(l5_data)); 5259 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5260 cid, NONE_CONNECTION_TYPE, &l5_data); 5261 msleep(10); 5262 } 5263 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5264 rx_ring = udev->l2_ring + CNIC_PAGE_SIZE; 5265 memset(rx_ring, 0, CNIC_PAGE_SIZE); 5266} 5267 5268static int cnic_register_netdev(struct cnic_dev *dev) 5269{ 5270 struct cnic_local *cp = dev->cnic_priv; 5271 struct cnic_eth_dev *ethdev = cp->ethdev; 5272 int err; 5273 5274 if (!ethdev) 5275 return -ENODEV; 5276 5277 if (ethdev->drv_state & CNIC_DRV_STATE_REGD) 5278 return 0; 5279 5280 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 5281 if (err) 5282 netdev_err(dev->netdev, "register_cnic failed\n"); 5283 5284 /* Read iSCSI config again. On some bnx2x device, iSCSI config 5285 * can change after firmware is downloaded. 5286 */ 5287 dev->max_iscsi_conn = ethdev->max_iscsi_conn; 5288 if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI) 5289 dev->max_iscsi_conn = 0; 5290 5291 return err; 5292} 5293 5294static void cnic_unregister_netdev(struct cnic_dev *dev) 5295{ 5296 struct cnic_local *cp = dev->cnic_priv; 5297 struct cnic_eth_dev *ethdev = cp->ethdev; 5298 5299 if (!ethdev) 5300 return; 5301 5302 ethdev->drv_unregister_cnic(dev->netdev); 5303} 5304 5305static int cnic_start_hw(struct cnic_dev *dev) 5306{ 5307 struct cnic_local *cp = dev->cnic_priv; 5308 struct cnic_eth_dev *ethdev = cp->ethdev; 5309 int err; 5310 5311 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 5312 return -EALREADY; 5313 5314 dev->regview = ethdev->io_base; 5315 pci_dev_get(dev->pcidev); 5316 cp->func = PCI_FUNC(dev->pcidev->devfn); 5317 cp->status_blk.gen = ethdev->irq_arr[0].status_blk; 5318 cp->status_blk_num = ethdev->irq_arr[0].status_blk_num; 5319 5320 err = cp->alloc_resc(dev); 5321 if (err) { 5322 netdev_err(dev->netdev, "allocate resource failure\n"); 5323 goto err1; 5324 } 5325 5326 err = cp->start_hw(dev); 5327 if (err) 5328 goto err1; 5329 5330 err = cnic_cm_open(dev); 5331 if (err) 5332 goto err1; 5333 5334 set_bit(CNIC_F_CNIC_UP, &dev->flags); 5335 5336 cp->enable_int(dev); 5337 5338 return 0; 5339 5340err1: 5341 cp->free_resc(dev); 5342 pci_dev_put(dev->pcidev); 5343 return err; 5344} 5345 5346static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 5347{ 5348 cnic_disable_bnx2_int_sync(dev); 5349 5350 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 5351 cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0); 5352 5353 cnic_init_context(dev, KWQ_CID); 5354 cnic_init_context(dev, KCQ_CID); 5355 5356 cnic_setup_5709_context(dev, 0); 5357 cnic_free_irq(dev); 5358 5359 cnic_free_resc(dev); 5360} 5361 5362 5363static void cnic_stop_bnx2x_hw(struct cnic_dev *dev) 5364{ 5365 struct cnic_local *cp = dev->cnic_priv; 5366 struct bnx2x *bp = netdev_priv(dev->netdev); 5367 u32 hc_index = HC_INDEX_ISCSI_EQ_CONS; 5368 u32 sb_id = cp->status_blk_num; 5369 u32 idx_off, syn_off; 5370 5371 cnic_free_irq(dev); 5372 5373 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 5374 idx_off = offsetof(struct hc_status_block_e2, index_values) + 5375 (hc_index * sizeof(u16)); 5376 5377 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E2_OFFSET(hc_index, sb_id); 5378 } else { 5379 idx_off = offsetof(struct hc_status_block_e1x, index_values) + 5380 (hc_index * sizeof(u16)); 5381 5382 syn_off = CSTORM_HC_SYNC_LINE_INDEX_E1X_OFFSET(hc_index, sb_id); 5383 } 5384 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + syn_off, 0); 5385 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_STATUS_BLOCK_OFFSET(sb_id) + 5386 idx_off, 0); 5387 5388 *cp->kcq1.hw_prod_idx_ptr = 0; 5389 CNIC_WR(dev, BAR_CSTRORM_INTMEM + 5390 CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0); 5391 CNIC_WR16(dev, cp->kcq1.io_addr, 0); 5392 cnic_free_resc(dev); 5393} 5394 5395static void cnic_stop_hw(struct cnic_dev *dev) 5396{ 5397 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 5398 struct cnic_local *cp = dev->cnic_priv; 5399 int i = 0; 5400 5401 /* Need to wait for the ring shutdown event to complete 5402 * before clearing the CNIC_UP flag. 5403 */ 5404 while (cp->udev && cp->udev->uio_dev != -1 && i < 15) { 5405 msleep(100); 5406 i++; 5407 } 5408 cnic_shutdown_rings(dev); 5409 cp->stop_cm(dev); 5410 cp->ethdev->drv_state &= ~CNIC_DRV_STATE_HANDLES_IRQ; 5411 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 5412 RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL); 5413 synchronize_rcu(); 5414 cnic_cm_shutdown(dev); 5415 cp->stop_hw(dev); 5416 pci_dev_put(dev->pcidev); 5417 } 5418} 5419 5420static void cnic_free_dev(struct cnic_dev *dev) 5421{ 5422 int i = 0; 5423 5424 while ((atomic_read(&dev->ref_count) != 0) && i < 10) { 5425 msleep(100); 5426 i++; 5427 } 5428 if (atomic_read(&dev->ref_count) != 0) 5429 netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n"); 5430 5431 netdev_info(dev->netdev, "Removed CNIC device\n"); 5432 dev_put(dev->netdev); 5433 kfree(dev); 5434} 5435 5436static struct cnic_dev *cnic_alloc_dev(struct net_device *dev, 5437 struct pci_dev *pdev) 5438{ 5439 struct cnic_dev *cdev; 5440 struct cnic_local *cp; 5441 int alloc_size; 5442 5443 alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local); 5444 5445 cdev = kzalloc(alloc_size, GFP_KERNEL); 5446 if (cdev == NULL) 5447 return NULL; 5448 5449 cdev->netdev = dev; 5450 cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev); 5451 cdev->register_device = cnic_register_device; 5452 cdev->unregister_device = cnic_unregister_device; 5453 cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv; 5454 5455 cp = cdev->cnic_priv; 5456 cp->dev = cdev; 5457 cp->l2_single_buf_size = 0x400; 5458 cp->l2_rx_ring_size = 3; 5459 5460 spin_lock_init(&cp->cnic_ulp_lock); 5461 5462 netdev_info(dev, "Added CNIC device\n"); 5463 5464 return cdev; 5465} 5466 5467static struct cnic_dev *init_bnx2_cnic(struct net_device *dev) 5468{ 5469 struct pci_dev *pdev; 5470 struct cnic_dev *cdev; 5471 struct cnic_local *cp; 5472 struct bnx2 *bp = netdev_priv(dev); 5473 struct cnic_eth_dev *ethdev = NULL; 5474 5475 if (bp->cnic_probe) 5476 ethdev = (bp->cnic_probe)(dev); 5477 5478 if (!ethdev) 5479 return NULL; 5480 5481 pdev = ethdev->pdev; 5482 if (!pdev) 5483 return NULL; 5484 5485 dev_hold(dev); 5486 pci_dev_get(pdev); 5487 if ((pdev->device == PCI_DEVICE_ID_NX2_5709 || 5488 pdev->device == PCI_DEVICE_ID_NX2_5709S) && 5489 (pdev->revision < 0x10)) { 5490 pci_dev_put(pdev); 5491 goto cnic_err; 5492 } 5493 pci_dev_put(pdev); 5494 5495 cdev = cnic_alloc_dev(dev, pdev); 5496 if (cdev == NULL) 5497 goto cnic_err; 5498 5499 set_bit(CNIC_F_BNX2_CLASS, &cdev->flags); 5500 cdev->submit_kwqes = cnic_submit_bnx2_kwqes; 5501 5502 cp = cdev->cnic_priv; 5503 cp->ethdev = ethdev; 5504 cdev->pcidev = pdev; 5505 cp->chip_id = ethdev->chip_id; 5506 5507 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5508 5509 cp->cnic_ops = &cnic_bnx2_ops; 5510 cp->start_hw = cnic_start_bnx2_hw; 5511 cp->stop_hw = cnic_stop_bnx2_hw; 5512 cp->setup_pgtbl = cnic_setup_page_tbl; 5513 cp->alloc_resc = cnic_alloc_bnx2_resc; 5514 cp->free_resc = cnic_free_resc; 5515 cp->start_cm = cnic_cm_init_bnx2_hw; 5516 cp->stop_cm = cnic_cm_stop_bnx2_hw; 5517 cp->enable_int = cnic_enable_bnx2_int; 5518 cp->disable_int_sync = cnic_disable_bnx2_int_sync; 5519 cp->close_conn = cnic_close_bnx2_conn; 5520 return cdev; 5521 5522cnic_err: 5523 dev_put(dev); 5524 return NULL; 5525} 5526 5527static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev) 5528{ 5529 struct pci_dev *pdev; 5530 struct cnic_dev *cdev; 5531 struct cnic_local *cp; 5532 struct bnx2x *bp = netdev_priv(dev); 5533 struct cnic_eth_dev *ethdev = NULL; 5534 5535 if (bp->cnic_probe) 5536 ethdev = bp->cnic_probe(dev); 5537 5538 if (!ethdev) 5539 return NULL; 5540 5541 pdev = ethdev->pdev; 5542 if (!pdev) 5543 return NULL; 5544 5545 dev_hold(dev); 5546 cdev = cnic_alloc_dev(dev, pdev); 5547 if (cdev == NULL) { 5548 dev_put(dev); 5549 return NULL; 5550 } 5551 5552 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags); 5553 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes; 5554 5555 cp = cdev->cnic_priv; 5556 cp->ethdev = ethdev; 5557 cdev->pcidev = pdev; 5558 cp->chip_id = ethdev->chip_id; 5559 5560 cdev->stats_addr = ethdev->addr_drv_info_to_mcp; 5561 5562 if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)) 5563 cdev->max_iscsi_conn = ethdev->max_iscsi_conn; 5564 if (CNIC_SUPPORTS_FCOE(bp)) { 5565 cdev->max_fcoe_conn = ethdev->max_fcoe_conn; 5566 cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges; 5567 } 5568 5569 if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS) 5570 cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS; 5571 5572 memcpy(cdev->mac_addr, ethdev->iscsi_mac, ETH_ALEN); 5573 5574 cp->cnic_ops = &cnic_bnx2x_ops; 5575 cp->start_hw = cnic_start_bnx2x_hw; 5576 cp->stop_hw = cnic_stop_bnx2x_hw; 5577 cp->setup_pgtbl = cnic_setup_page_tbl_le; 5578 cp->alloc_resc = cnic_alloc_bnx2x_resc; 5579 cp->free_resc = cnic_free_resc; 5580 cp->start_cm = cnic_cm_init_bnx2x_hw; 5581 cp->stop_cm = cnic_cm_stop_bnx2x_hw; 5582 cp->enable_int = cnic_enable_bnx2x_int; 5583 cp->disable_int_sync = cnic_disable_bnx2x_int_sync; 5584 if (BNX2X_CHIP_IS_E2_PLUS(bp)) { 5585 cp->ack_int = cnic_ack_bnx2x_e2_msix; 5586 cp->arm_int = cnic_arm_bnx2x_e2_msix; 5587 } else { 5588 cp->ack_int = cnic_ack_bnx2x_msix; 5589 cp->arm_int = cnic_arm_bnx2x_msix; 5590 } 5591 cp->close_conn = cnic_close_bnx2x_conn; 5592 return cdev; 5593} 5594 5595static struct cnic_dev *is_cnic_dev(struct net_device *dev) 5596{ 5597 struct ethtool_drvinfo drvinfo; 5598 struct cnic_dev *cdev = NULL; 5599 5600 if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) { 5601 memset(&drvinfo, 0, sizeof(drvinfo)); 5602 dev->ethtool_ops->get_drvinfo(dev, &drvinfo); 5603 5604 if (!strcmp(drvinfo.driver, "bnx2")) 5605 cdev = init_bnx2_cnic(dev); 5606 if (!strcmp(drvinfo.driver, "bnx2x")) 5607 cdev = init_bnx2x_cnic(dev); 5608 if (cdev) { 5609 write_lock(&cnic_dev_lock); 5610 list_add(&cdev->list, &cnic_dev_list); 5611 write_unlock(&cnic_dev_lock); 5612 } 5613 } 5614 return cdev; 5615} 5616 5617static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event, 5618 u16 vlan_id) 5619{ 5620 int if_type; 5621 5622 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 5623 struct cnic_ulp_ops *ulp_ops; 5624 void *ctx; 5625 5626 mutex_lock(&cnic_lock); 5627 ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type], 5628 lockdep_is_held(&cnic_lock)); 5629 if (!ulp_ops || !ulp_ops->indicate_netevent) { 5630 mutex_unlock(&cnic_lock); 5631 continue; 5632 } 5633 5634 ctx = cp->ulp_handle[if_type]; 5635 5636 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 5637 mutex_unlock(&cnic_lock); 5638 5639 ulp_ops->indicate_netevent(ctx, event, vlan_id); 5640 5641 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]); 5642 } 5643} 5644 5645/* netdev event handler */ 5646static int cnic_netdev_event(struct notifier_block *this, unsigned long event, 5647 void *ptr) 5648{ 5649 struct net_device *netdev = netdev_notifier_info_to_dev(ptr); 5650 struct cnic_dev *dev; 5651 int new_dev = 0; 5652 5653 dev = cnic_from_netdev(netdev); 5654 5655 if (!dev && event == NETDEV_REGISTER) { 5656 /* Check for the hot-plug device */ 5657 dev = is_cnic_dev(netdev); 5658 if (dev) { 5659 new_dev = 1; 5660 cnic_hold(dev); 5661 } 5662 } 5663 if (dev) { 5664 struct cnic_local *cp = dev->cnic_priv; 5665 5666 if (new_dev) 5667 cnic_ulp_init(dev); 5668 else if (event == NETDEV_UNREGISTER) 5669 cnic_ulp_exit(dev); 5670 5671 if (event == NETDEV_UP) { 5672 if (cnic_register_netdev(dev) != 0) { 5673 cnic_put(dev); 5674 goto done; 5675 } 5676 if (!cnic_start_hw(dev)) 5677 cnic_ulp_start(dev); 5678 } 5679 5680 cnic_rcv_netevent(cp, event, 0); 5681 5682 if (event == NETDEV_GOING_DOWN) { 5683 cnic_ulp_stop(dev); 5684 cnic_stop_hw(dev); 5685 cnic_unregister_netdev(dev); 5686 } else if (event == NETDEV_UNREGISTER) { 5687 write_lock(&cnic_dev_lock); 5688 list_del_init(&dev->list); 5689 write_unlock(&cnic_dev_lock); 5690 5691 cnic_put(dev); 5692 cnic_free_dev(dev); 5693 goto done; 5694 } 5695 cnic_put(dev); 5696 } else { 5697 struct net_device *realdev; 5698 u16 vid; 5699 5700 vid = cnic_get_vlan(netdev, &realdev); 5701 if (realdev) { 5702 dev = cnic_from_netdev(realdev); 5703 if (dev) { 5704 vid |= VLAN_TAG_PRESENT; 5705 cnic_rcv_netevent(dev->cnic_priv, event, vid); 5706 cnic_put(dev); 5707 } 5708 } 5709 } 5710done: 5711 return NOTIFY_DONE; 5712} 5713 5714static struct notifier_block cnic_netdev_notifier = { 5715 .notifier_call = cnic_netdev_event 5716}; 5717 5718static void cnic_release(void) 5719{ 5720 struct cnic_uio_dev *udev; 5721 5722 while (!list_empty(&cnic_udev_list)) { 5723 udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev, 5724 list); 5725 cnic_free_uio(udev); 5726 } 5727} 5728 5729static int __init cnic_init(void) 5730{ 5731 int rc = 0; 5732 5733 pr_info("%s", version); 5734 5735 rc = register_netdevice_notifier(&cnic_netdev_notifier); 5736 if (rc) { 5737 cnic_release(); 5738 return rc; 5739 } 5740 5741 cnic_wq = create_singlethread_workqueue("cnic_wq"); 5742 if (!cnic_wq) { 5743 cnic_release(); 5744 unregister_netdevice_notifier(&cnic_netdev_notifier); 5745 return -ENOMEM; 5746 } 5747 5748 return 0; 5749} 5750 5751static void __exit cnic_exit(void) 5752{ 5753 unregister_netdevice_notifier(&cnic_netdev_notifier); 5754 cnic_release(); 5755 destroy_workqueue(cnic_wq); 5756} 5757 5758module_init(cnic_init); 5759module_exit(cnic_exit); 5760