root/drivers/infiniband/hw/ocrdma/ocrdma.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. get_ocrdma_dev
  2. get_ocrdma_ucontext
  3. get_ocrdma_pd
  4. get_ocrdma_cq
  5. get_ocrdma_qp
  6. get_ocrdma_mr
  7. get_ocrdma_ah
  8. get_ocrdma_srq
  9. is_cqe_valid
  10. is_cqe_for_sq
  11. is_cqe_invalidated
  12. is_cqe_imm
  13. is_cqe_wr_imm
  14. ocrdma_resolve_dmac
  15. hca_name
  16. ocrdma_get_eq_table_index
  17. ocrdma_get_asic_type
  18. ocrdma_get_pfc_prio
  19. ocrdma_get_app_prio
  20. ocrdma_is_enabled_and_synced
  21. ocrdma_get_ae_link_state
  22. ocrdma_is_udp_encap_supported

   1 /* This file is part of the Emulex RoCE Device Driver for
   2  * RoCE (RDMA over Converged Ethernet) adapters.
   3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
   4  * EMULEX and SLI are trademarks of Emulex.
   5  * www.emulex.com
   6  *
   7  * This software is available to you under a choice of one of two licenses.
   8  * You may choose to be licensed under the terms of the GNU General Public
   9  * License (GPL) Version 2, available from the file COPYING in the main
  10  * directory of this source tree, or the BSD license below:
  11  *
  12  * Redistribution and use in source and binary forms, with or without
  13  * modification, are permitted provided that the following conditions
  14  * are met:
  15  *
  16  * - Redistributions of source code must retain the above copyright notice,
  17  *   this list of conditions and the following disclaimer.
  18  *
  19  * - Redistributions in binary form must reproduce the above copyright
  20  *   notice, this list of conditions and the following disclaimer in
  21  *   the documentation and/or other materials provided with the distribution.
  22  *
  23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
  25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
  27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  34  *
  35  * Contact Information:
  36  * linux-drivers@emulex.com
  37  *
  38  * Emulex
  39  * 3333 Susan Street
  40  * Costa Mesa, CA 92626
  41  */
  42 
  43 #ifndef __OCRDMA_H__
  44 #define __OCRDMA_H__
  45 
  46 #include <linux/mutex.h>
  47 #include <linux/list.h>
  48 #include <linux/spinlock.h>
  49 #include <linux/pci.h>
  50 
  51 #include <rdma/ib_verbs.h>
  52 #include <rdma/ib_user_verbs.h>
  53 #include <rdma/ib_addr.h>
  54 
  55 #include <be_roce.h>
  56 #include "ocrdma_sli.h"
  57 
  58 #define OCRDMA_ROCE_DRV_VERSION "11.0.0.0"
  59 
  60 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
  61 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
  62 
  63 #define OC_NAME_SH      OCRDMA_NODE_DESC "(Skyhawk)"
  64 #define OC_NAME_UNKNOWN OCRDMA_NODE_DESC "(Unknown)"
  65 
  66 #define OC_SKH_DEVICE_PF 0x720
  67 #define OC_SKH_DEVICE_VF 0x728
  68 #define OCRDMA_MAX_AH 512
  69 
  70 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
  71 
  72 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
  73 #define EQ_INTR_PER_SEC_THRSH_HI 150000
  74 #define EQ_INTR_PER_SEC_THRSH_LOW 100000
  75 #define EQ_AIC_MAX_EQD 20
  76 #define EQ_AIC_MIN_EQD 0
  77 
  78 void ocrdma_eqd_set_task(struct work_struct *work);
  79 
  80 struct ocrdma_dev_attr {
  81         u8 fw_ver[32];
  82         u32 vendor_id;
  83         u32 device_id;
  84         u16 max_pd;
  85         u16 max_dpp_pds;
  86         u16 max_cq;
  87         u16 max_cqe;
  88         u16 max_qp;
  89         u16 max_wqe;
  90         u16 max_rqe;
  91         u16 max_srq;
  92         u32 max_inline_data;
  93         int max_send_sge;
  94         int max_recv_sge;
  95         int max_srq_sge;
  96         int max_rdma_sge;
  97         int max_mr;
  98         u64 max_mr_size;
  99         u32 max_num_mr_pbl;
 100         int max_mw;
 101         int max_fmr;
 102         int max_map_per_fmr;
 103         int max_pages_per_frmr;
 104         u16 max_ord_per_qp;
 105         u16 max_ird_per_qp;
 106 
 107         int device_cap_flags;
 108         u8 cq_overflow_detect;
 109         u8 srq_supported;
 110 
 111         u32 wqe_size;
 112         u32 rqe_size;
 113         u32 ird_page_size;
 114         u8 local_ca_ack_delay;
 115         u8 ird;
 116         u8 num_ird_pages;
 117         u8 udp_encap;
 118 };
 119 
 120 struct ocrdma_dma_mem {
 121         void *va;
 122         dma_addr_t pa;
 123         u32 size;
 124 };
 125 
 126 struct ocrdma_pbl {
 127         void *va;
 128         dma_addr_t pa;
 129 };
 130 
 131 struct ocrdma_queue_info {
 132         void *va;
 133         dma_addr_t dma;
 134         u32 size;
 135         u16 len;
 136         u16 entry_size;         /* Size of an element in the queue */
 137         u16 id;                 /* qid, where to ring the doorbell. */
 138         u16 head, tail;
 139         bool created;
 140 };
 141 
 142 struct ocrdma_aic_obj {         /* Adaptive interrupt coalescing (AIC) info */
 143         u32 prev_eqd;
 144         u64 eq_intr_cnt;
 145         u64 prev_eq_intr_cnt;
 146 };
 147 
 148 struct ocrdma_eq {
 149         struct ocrdma_queue_info q;
 150         u32 vector;
 151         int cq_cnt;
 152         struct ocrdma_dev *dev;
 153         char irq_name[32];
 154         struct ocrdma_aic_obj aic_obj;
 155 };
 156 
 157 struct ocrdma_mq {
 158         struct ocrdma_queue_info sq;
 159         struct ocrdma_queue_info cq;
 160         bool rearm_cq;
 161 };
 162 
 163 struct mqe_ctx {
 164         struct mutex lock; /* for serializing mailbox commands on MQ */
 165         wait_queue_head_t cmd_wait;
 166         u32 tag;
 167         u16 cqe_status;
 168         u16 ext_status;
 169         bool cmd_done;
 170         bool fw_error_state;
 171 };
 172 
 173 struct ocrdma_hw_mr {
 174         u32 lkey;
 175         u8 fr_mr;
 176         u8 remote_atomic;
 177         u8 remote_rd;
 178         u8 remote_wr;
 179         u8 local_rd;
 180         u8 local_wr;
 181         u8 mw_bind;
 182         u8 rsvd;
 183         u64 len;
 184         struct ocrdma_pbl *pbl_table;
 185         u32 num_pbls;
 186         u32 num_pbes;
 187         u32 pbl_size;
 188         u32 pbe_size;
 189         u64 fbo;
 190         u64 va;
 191 };
 192 
 193 struct ocrdma_mr {
 194         struct ib_mr ibmr;
 195         struct ib_umem *umem;
 196         struct ocrdma_hw_mr hwmr;
 197         u64 *pages;
 198         u32 npages;
 199 };
 200 
 201 struct ocrdma_stats {
 202         u8 type;
 203         struct ocrdma_dev *dev;
 204 };
 205 
 206 struct ocrdma_pd_resource_mgr {
 207         u32 pd_norm_start;
 208         u16 pd_norm_count;
 209         u16 pd_norm_thrsh;
 210         u16 max_normal_pd;
 211         u32 pd_dpp_start;
 212         u16 pd_dpp_count;
 213         u16 pd_dpp_thrsh;
 214         u16 max_dpp_pd;
 215         u16 dpp_page_index;
 216         unsigned long *pd_norm_bitmap;
 217         unsigned long *pd_dpp_bitmap;
 218         bool pd_prealloc_valid;
 219 };
 220 
 221 struct stats_mem {
 222         struct ocrdma_mqe mqe;
 223         void *va;
 224         dma_addr_t pa;
 225         u32 size;
 226         char *debugfs_mem;
 227 };
 228 
 229 struct phy_info {
 230         u16 auto_speeds_supported;
 231         u16 fixed_speeds_supported;
 232         u16 phy_type;
 233         u16 interface_type;
 234 };
 235 
 236 enum ocrdma_flags {
 237         OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
 238 };
 239 
 240 struct ocrdma_dev {
 241         struct ib_device ibdev;
 242         struct ocrdma_dev_attr attr;
 243 
 244         struct mutex dev_lock; /* provides syncronise access to device data */
 245         spinlock_t flush_q_lock ____cacheline_aligned;
 246 
 247         struct ocrdma_cq **cq_tbl;
 248         struct ocrdma_qp **qp_tbl;
 249 
 250         struct ocrdma_eq *eq_tbl;
 251         int eq_cnt;
 252         struct delayed_work eqd_work;
 253         u16 base_eqid;
 254         u16 max_eq;
 255 
 256         /* provided synchronization to sgid table for
 257          * updating gid entries triggered by notifier.
 258          */
 259         spinlock_t sgid_lock;
 260 
 261         int gsi_qp_created;
 262         struct ocrdma_cq *gsi_sqcq;
 263         struct ocrdma_cq *gsi_rqcq;
 264 
 265         struct {
 266                 struct ocrdma_av *va;
 267                 dma_addr_t pa;
 268                 u32 size;
 269                 u32 num_ah;
 270                 /* provide synchronization for av
 271                  * entry allocations.
 272                  */
 273                 spinlock_t lock;
 274                 u32 ahid;
 275                 struct ocrdma_pbl pbl;
 276         } av_tbl;
 277 
 278         void *mbx_cmd;
 279         struct ocrdma_mq mq;
 280         struct mqe_ctx mqe_ctx;
 281 
 282         struct be_dev_info nic_info;
 283         struct phy_info phy;
 284         char model_number[32];
 285         u32 hba_port_num;
 286 
 287         struct list_head entry;
 288         int id;
 289         u64 *stag_arr;
 290         u8 sl; /* service level */
 291         bool pfc_state;
 292         atomic_t update_sl;
 293         u16 pvid;
 294         u32 asic_id;
 295         u32 flags;
 296 
 297         ulong last_stats_time;
 298         struct mutex stats_lock; /* provide synch for debugfs operations */
 299         struct stats_mem stats_mem;
 300         struct ocrdma_stats rsrc_stats;
 301         struct ocrdma_stats rx_stats;
 302         struct ocrdma_stats wqe_stats;
 303         struct ocrdma_stats tx_stats;
 304         struct ocrdma_stats db_err_stats;
 305         struct ocrdma_stats tx_qp_err_stats;
 306         struct ocrdma_stats rx_qp_err_stats;
 307         struct ocrdma_stats tx_dbg_stats;
 308         struct ocrdma_stats rx_dbg_stats;
 309         struct ocrdma_stats driver_stats;
 310         struct ocrdma_stats reset_stats;
 311         struct dentry *dir;
 312         atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
 313         atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
 314         struct ocrdma_pd_resource_mgr *pd_mgr;
 315 };
 316 
 317 struct ocrdma_cq {
 318         struct ib_cq ibcq;
 319         struct ocrdma_cqe *va;
 320         u32 phase;
 321         u32 getp;       /* pointer to pending wrs to
 322                          * return to stack, wrap arounds
 323                          * at max_hw_cqe
 324                          */
 325         u32 max_hw_cqe;
 326         bool phase_change;
 327         spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
 328                                                    * to cq polling
 329                                                    */
 330         /* syncronizes cq completion handler invoked from multiple context */
 331         spinlock_t comp_handler_lock ____cacheline_aligned;
 332         u16 id;
 333         u16 eqn;
 334 
 335         struct ocrdma_ucontext *ucontext;
 336         dma_addr_t pa;
 337         u32 len;
 338         u32 cqe_cnt;
 339 
 340         /* head of all qp's sq and rq for which cqes need to be flushed
 341          * by the software.
 342          */
 343         struct list_head sq_head, rq_head;
 344 };
 345 
 346 struct ocrdma_pd {
 347         struct ib_pd ibpd;
 348         struct ocrdma_ucontext *uctx;
 349         u32 id;
 350         int num_dpp_qp;
 351         u32 dpp_page;
 352         bool dpp_enabled;
 353 };
 354 
 355 struct ocrdma_ah {
 356         struct ib_ah ibah;
 357         struct ocrdma_av *av;
 358         u16 sgid_index;
 359         u32 id;
 360         u8 hdr_type;
 361 };
 362 
 363 struct ocrdma_qp_hwq_info {
 364         u8 *va;                 /* virtual address */
 365         u32 max_sges;
 366         u32 head, tail;
 367         u32 entry_size;
 368         u32 max_cnt;
 369         u32 max_wqe_idx;
 370         u16 dbid;               /* qid, where to ring the doorbell. */
 371         u32 len;
 372         dma_addr_t pa;
 373 };
 374 
 375 struct ocrdma_srq {
 376         struct ib_srq ibsrq;
 377         u8 __iomem *db;
 378         struct ocrdma_qp_hwq_info rq;
 379         u64 *rqe_wr_id_tbl;
 380         u32 *idx_bit_fields;
 381         u32 bit_fields_len;
 382 
 383         /* provide synchronization to multiple context(s) posting rqe */
 384         spinlock_t q_lock ____cacheline_aligned;
 385 
 386         struct ocrdma_pd *pd;
 387         u32 id;
 388 };
 389 
 390 struct ocrdma_qp {
 391         struct ib_qp ibqp;
 392 
 393         u8 __iomem *sq_db;
 394         struct ocrdma_qp_hwq_info sq;
 395         struct {
 396                 uint64_t wrid;
 397                 uint16_t dpp_wqe_idx;
 398                 uint16_t dpp_wqe;
 399                 uint8_t  signaled;
 400                 uint8_t  rsvd[3];
 401         } *wqe_wr_id_tbl;
 402         u32 max_inline_data;
 403 
 404         /* provide synchronization to multiple context(s) posting wqe, rqe */
 405         spinlock_t q_lock ____cacheline_aligned;
 406         struct ocrdma_cq *sq_cq;
 407         /* list maintained per CQ to flush SQ errors */
 408         struct list_head sq_entry;
 409 
 410         u8 __iomem *rq_db;
 411         struct ocrdma_qp_hwq_info rq;
 412         u64 *rqe_wr_id_tbl;
 413         struct ocrdma_cq *rq_cq;
 414         struct ocrdma_srq *srq;
 415         /* list maintained per CQ to flush RQ errors */
 416         struct list_head rq_entry;
 417 
 418         enum ocrdma_qp_state state;     /*  QP state */
 419         int cap_flags;
 420         u32 max_ord, max_ird;
 421 
 422         u32 id;
 423         struct ocrdma_pd *pd;
 424 
 425         enum ib_qp_type qp_type;
 426 
 427         int sgid_idx;
 428         u32 qkey;
 429         bool dpp_enabled;
 430         u8 *ird_q_va;
 431         bool signaled;
 432 };
 433 
 434 struct ocrdma_ucontext {
 435         struct ib_ucontext ibucontext;
 436 
 437         struct list_head mm_head;
 438         struct mutex mm_list_lock; /* protects list entries of mm type */
 439         struct ocrdma_pd *cntxt_pd;
 440         int pd_in_use;
 441 
 442         struct {
 443                 u32 *va;
 444                 dma_addr_t pa;
 445                 u32 len;
 446         } ah_tbl;
 447 };
 448 
 449 struct ocrdma_mm {
 450         struct {
 451                 u64 phy_addr;
 452                 unsigned long len;
 453         } key;
 454         struct list_head entry;
 455 };
 456 
 457 static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev)
 458 {
 459         return container_of(ibdev, struct ocrdma_dev, ibdev);
 460 }
 461 
 462 static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext
 463                                                           *ibucontext)
 464 {
 465         return container_of(ibucontext, struct ocrdma_ucontext, ibucontext);
 466 }
 467 
 468 static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd)
 469 {
 470         return container_of(ibpd, struct ocrdma_pd, ibpd);
 471 }
 472 
 473 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq)
 474 {
 475         return container_of(ibcq, struct ocrdma_cq, ibcq);
 476 }
 477 
 478 static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp)
 479 {
 480         return container_of(ibqp, struct ocrdma_qp, ibqp);
 481 }
 482 
 483 static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr)
 484 {
 485         return container_of(ibmr, struct ocrdma_mr, ibmr);
 486 }
 487 
 488 static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah)
 489 {
 490         return container_of(ibah, struct ocrdma_ah, ibah);
 491 }
 492 
 493 static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
 494 {
 495         return container_of(ibsrq, struct ocrdma_srq, ibsrq);
 496 }
 497 
 498 static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
 499 {
 500         int cqe_valid;
 501         cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
 502         return (cqe_valid == cq->phase);
 503 }
 504 
 505 static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
 506 {
 507         return (le32_to_cpu(cqe->flags_status_srcqpn) &
 508                 OCRDMA_CQE_QTYPE) ? 0 : 1;
 509 }
 510 
 511 static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
 512 {
 513         return (le32_to_cpu(cqe->flags_status_srcqpn) &
 514                 OCRDMA_CQE_INVALIDATE) ? 1 : 0;
 515 }
 516 
 517 static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
 518 {
 519         return (le32_to_cpu(cqe->flags_status_srcqpn) &
 520                 OCRDMA_CQE_IMM) ? 1 : 0;
 521 }
 522 
 523 static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
 524 {
 525         return (le32_to_cpu(cqe->flags_status_srcqpn) &
 526                 OCRDMA_CQE_WRITE_IMM) ? 1 : 0;
 527 }
 528 
 529 static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
 530                 struct rdma_ah_attr *ah_attr, u8 *mac_addr)
 531 {
 532         struct in6_addr in6;
 533 
 534         memcpy(&in6, rdma_ah_read_grh(ah_attr)->dgid.raw, sizeof(in6));
 535         if (rdma_is_multicast_addr(&in6))
 536                 rdma_get_mcast_mac(&in6, mac_addr);
 537         else if (rdma_link_local_addr(&in6))
 538                 rdma_get_ll_mac(&in6, mac_addr);
 539         else
 540                 memcpy(mac_addr, ah_attr->roce.dmac, ETH_ALEN);
 541         return 0;
 542 }
 543 
 544 static inline char *hca_name(struct ocrdma_dev *dev)
 545 {
 546         switch (dev->nic_info.pdev->device) {
 547         case OC_SKH_DEVICE_PF:
 548         case OC_SKH_DEVICE_VF:
 549                 return OC_NAME_SH;
 550         default:
 551                 return OC_NAME_UNKNOWN;
 552         }
 553 }
 554 
 555 static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev,
 556                 int eqid)
 557 {
 558         int indx;
 559 
 560         for (indx = 0; indx < dev->eq_cnt; indx++) {
 561                 if (dev->eq_tbl[indx].q.id == eqid)
 562                         return indx;
 563         }
 564 
 565         return -EINVAL;
 566 }
 567 
 568 static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev)
 569 {
 570         if (dev->nic_info.dev_family == 0xF && !dev->asic_id) {
 571                 pci_read_config_dword(
 572                         dev->nic_info.pdev,
 573                         OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id);
 574         }
 575 
 576         return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >>
 577                                 OCRDMA_SLI_ASIC_GEN_NUM_SHIFT;
 578 }
 579 
 580 static inline u8 ocrdma_get_pfc_prio(u8 *pfc, u8 prio)
 581 {
 582         return *(pfc + prio);
 583 }
 584 
 585 static inline u8 ocrdma_get_app_prio(u8 *app_prio, u8 prio)
 586 {
 587         return *(app_prio + prio);
 588 }
 589 
 590 static inline u8 ocrdma_is_enabled_and_synced(u32 state)
 591 {       /* May also be used to interpret TC-state, QCN-state
 592          * Appl-state and Logical-link-state in future.
 593          */
 594         return (state & OCRDMA_STATE_FLAG_ENABLED) &&
 595                 (state & OCRDMA_STATE_FLAG_SYNC);
 596 }
 597 
 598 static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
 599 {
 600         return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
 601 }
 602 
 603 static inline bool ocrdma_is_udp_encap_supported(struct ocrdma_dev *dev)
 604 {
 605         return (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV4) ||
 606                (dev->attr.udp_encap & OCRDMA_L3_TYPE_IPV6);
 607 }
 608 
 609 #endif

/* [<][>][^][v][top][bottom][index][help] */