root/drivers/infiniband/hw/cxgb4/iw_cxgb4.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. cplhdr
  2. c4iw_fatal_error
  3. c4iw_num_stags
  4. c4iw_put_wr_wait
  5. c4iw_get_wr_wait
  6. c4iw_init_wr_wait
  7. _c4iw_wake_up
  8. c4iw_wake_up_noref
  9. c4iw_wake_up_deref
  10. c4iw_wait_for_reply
  11. c4iw_ref_send_wait
  12. to_c4iw_dev
  13. rdev_to_c4iw_dev
  14. get_chp
  15. get_qhp
  16. cur_max_read_depth
  17. to_c4iw_pd
  18. to_c4iw_mr
  19. to_c4iw_mw
  20. to_c4iw_cq
  21. to_c4iw_qp
  22. to_c4iw_srq
  23. to_c4iw_ucontext
  24. remove_mmap
  25. insert_mmap
  26. c4iw_convert_state
  27. to_ib_qp_state
  28. c4iw_ib_to_tpt_access
  29. c4iw_ib_to_tpt_bind_access
  30. to_ep
  31. to_listen_ep
  32. ocqp_supported

   1 /*
   2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3  *
   4  * This software is available to you under a choice of one of two
   5  * licenses.  You may choose to be licensed under the terms of the GNU
   6  * General Public License (GPL) Version 2, available from the file
   7  * COPYING in the main directory of this source tree, or the
   8  * OpenIB.org BSD license below:
   9  *
  10  *     Redistribution and use in source and binary forms, with or
  11  *     without modification, are permitted provided that the following
  12  *     conditions are met:
  13  *
  14  *      - Redistributions of source code must retain the above
  15  *        copyright notice, this list of conditions and the following
  16  *        disclaimer.
  17  *      - Redistributions in binary form must reproduce the above
  18  *        copyright notice, this list of conditions and the following
  19  *        disclaimer in the documentation and/or other materials
  20  *        provided with the distribution.
  21  *
  22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  25  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  26  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  27  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  28  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  29  * SOFTWARE.
  30  */
  31 #ifndef __IW_CXGB4_H__
  32 #define __IW_CXGB4_H__
  33 
  34 #include <linux/mutex.h>
  35 #include <linux/list.h>
  36 #include <linux/spinlock.h>
  37 #include <linux/xarray.h>
  38 #include <linux/completion.h>
  39 #include <linux/netdevice.h>
  40 #include <linux/sched/mm.h>
  41 #include <linux/pci.h>
  42 #include <linux/dma-mapping.h>
  43 #include <linux/inet.h>
  44 #include <linux/wait.h>
  45 #include <linux/kref.h>
  46 #include <linux/timer.h>
  47 #include <linux/io.h>
  48 #include <linux/workqueue.h>
  49 
  50 #include <asm/byteorder.h>
  51 
  52 #include <net/net_namespace.h>
  53 
  54 #include <rdma/ib_verbs.h>
  55 #include <rdma/iw_cm.h>
  56 #include <rdma/rdma_netlink.h>
  57 #include <rdma/iw_portmap.h>
  58 #include <rdma/restrack.h>
  59 
  60 #include "cxgb4.h"
  61 #include "cxgb4_uld.h"
  62 #include "l2t.h"
  63 #include <rdma/cxgb4-abi.h>
  64 
  65 #define DRV_NAME "iw_cxgb4"
  66 #define MOD DRV_NAME ":"
  67 
  68 #ifdef pr_fmt
  69 #undef pr_fmt
  70 #endif
  71 
  72 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  73 
  74 #include "t4.h"
  75 
  76 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start)
  77 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start)
  78 
  79 static inline void *cplhdr(struct sk_buff *skb)
  80 {
  81         return skb->data;
  82 }
  83 
  84 #define C4IW_ID_TABLE_F_RANDOM 1       /* Pseudo-randomize the id's returned */
  85 #define C4IW_ID_TABLE_F_EMPTY  2       /* Table is initially empty */
  86 
  87 struct c4iw_id_table {
  88         u32 flags;
  89         u32 start;              /* logical minimal id */
  90         u32 last;               /* hint for find */
  91         u32 max;
  92         spinlock_t lock;
  93         unsigned long *table;
  94 };
  95 
  96 struct c4iw_resource {
  97         struct c4iw_id_table tpt_table;
  98         struct c4iw_id_table qid_table;
  99         struct c4iw_id_table pdid_table;
 100         struct c4iw_id_table srq_table;
 101 };
 102 
 103 struct c4iw_qid_list {
 104         struct list_head entry;
 105         u32 qid;
 106 };
 107 
 108 struct c4iw_dev_ucontext {
 109         struct list_head qpids;
 110         struct list_head cqids;
 111         struct mutex lock;
 112         struct kref kref;
 113 };
 114 
 115 enum c4iw_rdev_flags {
 116         T4_FATAL_ERROR = (1<<0),
 117         T4_STATUS_PAGE_DISABLED = (1<<1),
 118 };
 119 
 120 struct c4iw_stat {
 121         u64 total;
 122         u64 cur;
 123         u64 max;
 124         u64 fail;
 125 };
 126 
 127 struct c4iw_stats {
 128         struct mutex lock;
 129         struct c4iw_stat qid;
 130         struct c4iw_stat pd;
 131         struct c4iw_stat stag;
 132         struct c4iw_stat pbl;
 133         struct c4iw_stat rqt;
 134         struct c4iw_stat srqt;
 135         struct c4iw_stat srq;
 136         struct c4iw_stat ocqp;
 137         u64  db_full;
 138         u64  db_empty;
 139         u64  db_drop;
 140         u64  db_state_transitions;
 141         u64  db_fc_interruptions;
 142         u64  tcam_full;
 143         u64  act_ofld_conn_fails;
 144         u64  pas_ofld_conn_fails;
 145         u64  neg_adv;
 146 };
 147 
 148 struct c4iw_hw_queue {
 149         int t4_eq_status_entries;
 150         int t4_max_eq_size;
 151         int t4_max_iq_size;
 152         int t4_max_rq_size;
 153         int t4_max_sq_size;
 154         int t4_max_qp_depth;
 155         int t4_max_cq_depth;
 156         int t4_stat_len;
 157 };
 158 
 159 struct wr_log_entry {
 160         ktime_t post_host_time;
 161         ktime_t poll_host_time;
 162         u64 post_sge_ts;
 163         u64 cqe_sge_ts;
 164         u64 poll_sge_ts;
 165         u16 qid;
 166         u16 wr_id;
 167         u8 opcode;
 168         u8 valid;
 169 };
 170 
 171 struct c4iw_rdev {
 172         struct c4iw_resource resource;
 173         u32 qpmask;
 174         u32 cqmask;
 175         struct c4iw_dev_ucontext uctx;
 176         struct gen_pool *pbl_pool;
 177         struct gen_pool *rqt_pool;
 178         struct gen_pool *ocqp_pool;
 179         u32 flags;
 180         struct cxgb4_lld_info lldi;
 181         unsigned long bar2_pa;
 182         void __iomem *bar2_kva;
 183         unsigned long oc_mw_pa;
 184         void __iomem *oc_mw_kva;
 185         struct c4iw_stats stats;
 186         struct c4iw_hw_queue hw_queue;
 187         struct t4_dev_status_page *status_page;
 188         atomic_t wr_log_idx;
 189         struct wr_log_entry *wr_log;
 190         int wr_log_size;
 191         struct workqueue_struct *free_workq;
 192         struct completion rqt_compl;
 193         struct completion pbl_compl;
 194         struct kref rqt_kref;
 195         struct kref pbl_kref;
 196 };
 197 
 198 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 199 {
 200         return rdev->flags & T4_FATAL_ERROR;
 201 }
 202 
 203 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 204 {
 205         return (int)(rdev->lldi.vr->stag.size >> 5);
 206 }
 207 
 208 #define C4IW_WR_TO (60*HZ)
 209 
 210 struct c4iw_wr_wait {
 211         struct completion completion;
 212         int ret;
 213         struct kref kref;
 214 };
 215 
 216 void _c4iw_free_wr_wait(struct kref *kref);
 217 
 218 static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
 219 {
 220         pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
 221                  kref_read(&wr_waitp->kref));
 222         WARN_ON(kref_read(&wr_waitp->kref) == 0);
 223         kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
 224 }
 225 
 226 static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
 227 {
 228         pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
 229                  kref_read(&wr_waitp->kref));
 230         WARN_ON(kref_read(&wr_waitp->kref) == 0);
 231         kref_get(&wr_waitp->kref);
 232 }
 233 
 234 static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
 235 {
 236         wr_waitp->ret = 0;
 237         init_completion(&wr_waitp->completion);
 238 }
 239 
 240 static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
 241                                  bool deref)
 242 {
 243         wr_waitp->ret = ret;
 244         complete(&wr_waitp->completion);
 245         if (deref)
 246                 c4iw_put_wr_wait(wr_waitp);
 247 }
 248 
 249 static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
 250 {
 251         _c4iw_wake_up(wr_waitp, ret, false);
 252 }
 253 
 254 static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
 255 {
 256         _c4iw_wake_up(wr_waitp, ret, true);
 257 }
 258 
 259 static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
 260                                  struct c4iw_wr_wait *wr_waitp,
 261                                  u32 hwtid, u32 qpid,
 262                                  const char *func)
 263 {
 264         int ret;
 265 
 266         if (c4iw_fatal_error(rdev)) {
 267                 wr_waitp->ret = -EIO;
 268                 goto out;
 269         }
 270 
 271         ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
 272         if (!ret) {
 273                 pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
 274                        func, pci_name(rdev->lldi.pdev), hwtid, qpid);
 275                 rdev->flags |= T4_FATAL_ERROR;
 276                 wr_waitp->ret = -EIO;
 277                 goto out;
 278         }
 279         if (wr_waitp->ret)
 280                 pr_debug("%s: FW reply %d tid %u qpid %u\n",
 281                          pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
 282 out:
 283         return wr_waitp->ret;
 284 }
 285 
 286 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
 287 
 288 static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
 289                                      struct sk_buff *skb,
 290                                      struct c4iw_wr_wait *wr_waitp,
 291                                      u32 hwtid, u32 qpid,
 292                                      const char *func)
 293 {
 294         int ret;
 295 
 296         pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
 297                  qpid);
 298         c4iw_get_wr_wait(wr_waitp);
 299         ret = c4iw_ofld_send(rdev, skb);
 300         if (ret) {
 301                 c4iw_put_wr_wait(wr_waitp);
 302                 return ret;
 303         }
 304         return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
 305 }
 306 
 307 enum db_state {
 308         NORMAL = 0,
 309         FLOW_CONTROL = 1,
 310         RECOVERY = 2,
 311         STOPPED = 3
 312 };
 313 
 314 struct c4iw_dev {
 315         struct ib_device ibdev;
 316         struct c4iw_rdev rdev;
 317         u32 device_cap_flags;
 318         struct xarray cqs;
 319         struct xarray qps;
 320         struct xarray mrs;
 321         struct mutex db_mutex;
 322         struct dentry *debugfs_root;
 323         enum db_state db_state;
 324         struct xarray hwtids;
 325         struct xarray atids;
 326         struct xarray stids;
 327         struct list_head db_fc_list;
 328         u32 avail_ird;
 329         wait_queue_head_t wait;
 330 };
 331 
 332 struct uld_ctx {
 333         struct list_head entry;
 334         struct cxgb4_lld_info lldi;
 335         struct c4iw_dev *dev;
 336         struct work_struct reg_work;
 337 };
 338 
 339 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
 340 {
 341         return container_of(ibdev, struct c4iw_dev, ibdev);
 342 }
 343 
 344 static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev)
 345 {
 346         return container_of(rdev, struct c4iw_dev, rdev);
 347 }
 348 
 349 static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid)
 350 {
 351         return xa_load(&rhp->cqs, cqid);
 352 }
 353 
 354 static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid)
 355 {
 356         return xa_load(&rhp->qps, qpid);
 357 }
 358 
 359 extern uint c4iw_max_read_depth;
 360 
 361 static inline int cur_max_read_depth(struct c4iw_dev *dev)
 362 {
 363         return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
 364 }
 365 
 366 struct c4iw_pd {
 367         struct ib_pd ibpd;
 368         u32 pdid;
 369         struct c4iw_dev *rhp;
 370 };
 371 
 372 static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd)
 373 {
 374         return container_of(ibpd, struct c4iw_pd, ibpd);
 375 }
 376 
 377 struct tpt_attributes {
 378         u64 len;
 379         u64 va_fbo;
 380         enum fw_ri_mem_perms perms;
 381         u32 stag;
 382         u32 pdid;
 383         u32 qpid;
 384         u32 pbl_addr;
 385         u32 pbl_size;
 386         u32 state:1;
 387         u32 type:2;
 388         u32 rsvd:1;
 389         u32 remote_invaliate_disable:1;
 390         u32 zbva:1;
 391         u32 mw_bind_enable:1;
 392         u32 page_size:5;
 393 };
 394 
 395 struct c4iw_mr {
 396         struct ib_mr ibmr;
 397         struct ib_umem *umem;
 398         struct c4iw_dev *rhp;
 399         struct sk_buff *dereg_skb;
 400         u64 kva;
 401         struct tpt_attributes attr;
 402         u64 *mpl;
 403         dma_addr_t mpl_addr;
 404         u32 max_mpl_len;
 405         u32 mpl_len;
 406         struct c4iw_wr_wait *wr_waitp;
 407 };
 408 
 409 static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
 410 {
 411         return container_of(ibmr, struct c4iw_mr, ibmr);
 412 }
 413 
 414 struct c4iw_mw {
 415         struct ib_mw ibmw;
 416         struct c4iw_dev *rhp;
 417         struct sk_buff *dereg_skb;
 418         u64 kva;
 419         struct tpt_attributes attr;
 420         struct c4iw_wr_wait *wr_waitp;
 421 };
 422 
 423 static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
 424 {
 425         return container_of(ibmw, struct c4iw_mw, ibmw);
 426 }
 427 
 428 struct c4iw_cq {
 429         struct ib_cq ibcq;
 430         struct c4iw_dev *rhp;
 431         struct sk_buff *destroy_skb;
 432         struct t4_cq cq;
 433         spinlock_t lock;
 434         spinlock_t comp_handler_lock;
 435         atomic_t refcnt;
 436         wait_queue_head_t wait;
 437         struct c4iw_wr_wait *wr_waitp;
 438 };
 439 
 440 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
 441 {
 442         return container_of(ibcq, struct c4iw_cq, ibcq);
 443 }
 444 
 445 struct c4iw_mpa_attributes {
 446         u8 initiator;
 447         u8 recv_marker_enabled;
 448         u8 xmit_marker_enabled;
 449         u8 crc_enabled;
 450         u8 enhanced_rdma_conn;
 451         u8 version;
 452         u8 p2p_type;
 453 };
 454 
 455 struct c4iw_qp_attributes {
 456         u32 scq;
 457         u32 rcq;
 458         u32 sq_num_entries;
 459         u32 rq_num_entries;
 460         u32 sq_max_sges;
 461         u32 sq_max_sges_rdma_write;
 462         u32 rq_max_sges;
 463         u32 state;
 464         u8 enable_rdma_read;
 465         u8 enable_rdma_write;
 466         u8 enable_bind;
 467         u8 enable_mmid0_fastreg;
 468         u32 max_ord;
 469         u32 max_ird;
 470         u32 pd;
 471         u32 next_state;
 472         char terminate_buffer[52];
 473         u32 terminate_msg_len;
 474         u8 is_terminate_local;
 475         struct c4iw_mpa_attributes mpa_attr;
 476         struct c4iw_ep *llp_stream_handle;
 477         u8 layer_etype;
 478         u8 ecode;
 479         u16 sq_db_inc;
 480         u16 rq_db_inc;
 481         u8 send_term;
 482 };
 483 
 484 struct c4iw_qp {
 485         struct ib_qp ibqp;
 486         struct list_head db_fc_entry;
 487         struct c4iw_dev *rhp;
 488         struct c4iw_ep *ep;
 489         struct c4iw_qp_attributes attr;
 490         struct t4_wq wq;
 491         spinlock_t lock;
 492         struct mutex mutex;
 493         wait_queue_head_t wait;
 494         int sq_sig_all;
 495         struct c4iw_srq *srq;
 496         struct c4iw_ucontext *ucontext;
 497         struct c4iw_wr_wait *wr_waitp;
 498         struct completion qp_rel_comp;
 499         refcount_t qp_refcnt;
 500 };
 501 
 502 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
 503 {
 504         return container_of(ibqp, struct c4iw_qp, ibqp);
 505 }
 506 
 507 struct c4iw_srq {
 508         struct ib_srq ibsrq;
 509         struct list_head db_fc_entry;
 510         struct c4iw_dev *rhp;
 511         struct t4_srq wq;
 512         struct sk_buff *destroy_skb;
 513         u32 srq_limit;
 514         u32 pdid;
 515         int idx;
 516         u32 flags;
 517         spinlock_t lock; /* protects srq */
 518         struct c4iw_wr_wait *wr_waitp;
 519         bool armed;
 520 };
 521 
 522 static inline struct c4iw_srq *to_c4iw_srq(struct ib_srq *ibsrq)
 523 {
 524         return container_of(ibsrq, struct c4iw_srq, ibsrq);
 525 }
 526 
 527 struct c4iw_ucontext {
 528         struct ib_ucontext ibucontext;
 529         struct c4iw_dev_ucontext uctx;
 530         u32 key;
 531         spinlock_t mmap_lock;
 532         struct list_head mmaps;
 533         bool is_32b_cqe;
 534 };
 535 
 536 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
 537 {
 538         return container_of(c, struct c4iw_ucontext, ibucontext);
 539 }
 540 
 541 struct c4iw_mm_entry {
 542         struct list_head entry;
 543         u64 addr;
 544         u32 key;
 545         unsigned len;
 546 };
 547 
 548 static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
 549                                                 u32 key, unsigned len)
 550 {
 551         struct list_head *pos, *nxt;
 552         struct c4iw_mm_entry *mm;
 553 
 554         spin_lock(&ucontext->mmap_lock);
 555         list_for_each_safe(pos, nxt, &ucontext->mmaps) {
 556 
 557                 mm = list_entry(pos, struct c4iw_mm_entry, entry);
 558                 if (mm->key == key && mm->len == len) {
 559                         list_del_init(&mm->entry);
 560                         spin_unlock(&ucontext->mmap_lock);
 561                         pr_debug("key 0x%x addr 0x%llx len %d\n", key,
 562                                  (unsigned long long)mm->addr, mm->len);
 563                         return mm;
 564                 }
 565         }
 566         spin_unlock(&ucontext->mmap_lock);
 567         return NULL;
 568 }
 569 
 570 static inline void insert_mmap(struct c4iw_ucontext *ucontext,
 571                                struct c4iw_mm_entry *mm)
 572 {
 573         spin_lock(&ucontext->mmap_lock);
 574         pr_debug("key 0x%x addr 0x%llx len %d\n",
 575                  mm->key, (unsigned long long)mm->addr, mm->len);
 576         list_add_tail(&mm->entry, &ucontext->mmaps);
 577         spin_unlock(&ucontext->mmap_lock);
 578 }
 579 
 580 enum c4iw_qp_attr_mask {
 581         C4IW_QP_ATTR_NEXT_STATE = 1 << 0,
 582         C4IW_QP_ATTR_SQ_DB = 1<<1,
 583         C4IW_QP_ATTR_RQ_DB = 1<<2,
 584         C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7,
 585         C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8,
 586         C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9,
 587         C4IW_QP_ATTR_MAX_ORD = 1 << 11,
 588         C4IW_QP_ATTR_MAX_IRD = 1 << 12,
 589         C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22,
 590         C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23,
 591         C4IW_QP_ATTR_MPA_ATTR = 1 << 24,
 592         C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25,
 593         C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ |
 594                                      C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
 595                                      C4IW_QP_ATTR_MAX_ORD |
 596                                      C4IW_QP_ATTR_MAX_IRD |
 597                                      C4IW_QP_ATTR_LLP_STREAM_HANDLE |
 598                                      C4IW_QP_ATTR_STREAM_MSG_BUFFER |
 599                                      C4IW_QP_ATTR_MPA_ATTR |
 600                                      C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE)
 601 };
 602 
 603 int c4iw_modify_qp(struct c4iw_dev *rhp,
 604                                 struct c4iw_qp *qhp,
 605                                 enum c4iw_qp_attr_mask mask,
 606                                 struct c4iw_qp_attributes *attrs,
 607                                 int internal);
 608 
 609 enum c4iw_qp_state {
 610         C4IW_QP_STATE_IDLE,
 611         C4IW_QP_STATE_RTS,
 612         C4IW_QP_STATE_ERROR,
 613         C4IW_QP_STATE_TERMINATE,
 614         C4IW_QP_STATE_CLOSING,
 615         C4IW_QP_STATE_TOT
 616 };
 617 
 618 static inline int c4iw_convert_state(enum ib_qp_state ib_state)
 619 {
 620         switch (ib_state) {
 621         case IB_QPS_RESET:
 622         case IB_QPS_INIT:
 623                 return C4IW_QP_STATE_IDLE;
 624         case IB_QPS_RTS:
 625                 return C4IW_QP_STATE_RTS;
 626         case IB_QPS_SQD:
 627                 return C4IW_QP_STATE_CLOSING;
 628         case IB_QPS_SQE:
 629                 return C4IW_QP_STATE_TERMINATE;
 630         case IB_QPS_ERR:
 631                 return C4IW_QP_STATE_ERROR;
 632         default:
 633                 return -1;
 634         }
 635 }
 636 
 637 static inline int to_ib_qp_state(int c4iw_qp_state)
 638 {
 639         switch (c4iw_qp_state) {
 640         case C4IW_QP_STATE_IDLE:
 641                 return IB_QPS_INIT;
 642         case C4IW_QP_STATE_RTS:
 643                 return IB_QPS_RTS;
 644         case C4IW_QP_STATE_CLOSING:
 645                 return IB_QPS_SQD;
 646         case C4IW_QP_STATE_TERMINATE:
 647                 return IB_QPS_SQE;
 648         case C4IW_QP_STATE_ERROR:
 649                 return IB_QPS_ERR;
 650         }
 651         return IB_QPS_ERR;
 652 }
 653 
 654 static inline u32 c4iw_ib_to_tpt_access(int a)
 655 {
 656         return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 657                (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) |
 658                (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) |
 659                FW_RI_MEM_ACCESS_LOCAL_READ;
 660 }
 661 
 662 static inline u32 c4iw_ib_to_tpt_bind_access(int acc)
 663 {
 664         return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
 665                (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0);
 666 }
 667 
 668 enum c4iw_mmid_state {
 669         C4IW_STAG_STATE_VALID,
 670         C4IW_STAG_STATE_INVALID
 671 };
 672 
 673 #define C4IW_NODE_DESC "cxgb4 Chelsio Communications"
 674 
 675 #define MPA_KEY_REQ "MPA ID Req Frame"
 676 #define MPA_KEY_REP "MPA ID Rep Frame"
 677 
 678 #define MPA_MAX_PRIVATE_DATA    256
 679 #define MPA_ENHANCED_RDMA_CONN  0x10
 680 #define MPA_REJECT              0x20
 681 #define MPA_CRC                 0x40
 682 #define MPA_MARKERS             0x80
 683 #define MPA_FLAGS_MASK          0xE0
 684 
 685 #define MPA_V2_PEER2PEER_MODEL          0x8000
 686 #define MPA_V2_ZERO_LEN_FPDU_RTR        0x4000
 687 #define MPA_V2_RDMA_WRITE_RTR           0x8000
 688 #define MPA_V2_RDMA_READ_RTR            0x4000
 689 #define MPA_V2_IRD_ORD_MASK             0x3FFF
 690 
 691 #define c4iw_put_ep(ep) {                                               \
 692         pr_debug("put_ep ep %p refcnt %d\n",            \
 693                  ep, kref_read(&((ep)->kref)));                         \
 694         WARN_ON(kref_read(&((ep)->kref)) < 1);                          \
 695         kref_put(&((ep)->kref), _c4iw_free_ep);                         \
 696 }
 697 
 698 #define c4iw_get_ep(ep) {                                               \
 699         pr_debug("get_ep ep %p, refcnt %d\n",           \
 700                  ep, kref_read(&((ep)->kref)));                         \
 701         kref_get(&((ep)->kref));                                        \
 702 }
 703 void _c4iw_free_ep(struct kref *kref);
 704 
 705 struct mpa_message {
 706         u8 key[16];
 707         u8 flags;
 708         u8 revision;
 709         __be16 private_data_size;
 710         u8 private_data[0];
 711 };
 712 
 713 struct mpa_v2_conn_params {
 714         __be16 ird;
 715         __be16 ord;
 716 };
 717 
 718 struct terminate_message {
 719         u8 layer_etype;
 720         u8 ecode;
 721         __be16 hdrct_rsvd;
 722         u8 len_hdrs[0];
 723 };
 724 
 725 #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28)
 726 
 727 enum c4iw_layers_types {
 728         LAYER_RDMAP             = 0x00,
 729         LAYER_DDP               = 0x10,
 730         LAYER_MPA               = 0x20,
 731         RDMAP_LOCAL_CATA        = 0x00,
 732         RDMAP_REMOTE_PROT       = 0x01,
 733         RDMAP_REMOTE_OP         = 0x02,
 734         DDP_LOCAL_CATA          = 0x00,
 735         DDP_TAGGED_ERR          = 0x01,
 736         DDP_UNTAGGED_ERR        = 0x02,
 737         DDP_LLP                 = 0x03
 738 };
 739 
 740 enum c4iw_rdma_ecodes {
 741         RDMAP_INV_STAG          = 0x00,
 742         RDMAP_BASE_BOUNDS       = 0x01,
 743         RDMAP_ACC_VIOL          = 0x02,
 744         RDMAP_STAG_NOT_ASSOC    = 0x03,
 745         RDMAP_TO_WRAP           = 0x04,
 746         RDMAP_INV_VERS          = 0x05,
 747         RDMAP_INV_OPCODE        = 0x06,
 748         RDMAP_STREAM_CATA       = 0x07,
 749         RDMAP_GLOBAL_CATA       = 0x08,
 750         RDMAP_CANT_INV_STAG     = 0x09,
 751         RDMAP_UNSPECIFIED       = 0xff
 752 };
 753 
 754 enum c4iw_ddp_ecodes {
 755         DDPT_INV_STAG           = 0x00,
 756         DDPT_BASE_BOUNDS        = 0x01,
 757         DDPT_STAG_NOT_ASSOC     = 0x02,
 758         DDPT_TO_WRAP            = 0x03,
 759         DDPT_INV_VERS           = 0x04,
 760         DDPU_INV_QN             = 0x01,
 761         DDPU_INV_MSN_NOBUF      = 0x02,
 762         DDPU_INV_MSN_RANGE      = 0x03,
 763         DDPU_INV_MO             = 0x04,
 764         DDPU_MSG_TOOBIG         = 0x05,
 765         DDPU_INV_VERS           = 0x06
 766 };
 767 
 768 enum c4iw_mpa_ecodes {
 769         MPA_CRC_ERR             = 0x02,
 770         MPA_MARKER_ERR          = 0x03,
 771         MPA_LOCAL_CATA          = 0x05,
 772         MPA_INSUFF_IRD          = 0x06,
 773         MPA_NOMATCH_RTR         = 0x07,
 774 };
 775 
 776 enum c4iw_ep_state {
 777         IDLE = 0,
 778         LISTEN,
 779         CONNECTING,
 780         MPA_REQ_WAIT,
 781         MPA_REQ_SENT,
 782         MPA_REQ_RCVD,
 783         MPA_REP_SENT,
 784         FPDU_MODE,
 785         ABORTING,
 786         CLOSING,
 787         MORIBUND,
 788         DEAD,
 789 };
 790 
 791 enum c4iw_ep_flags {
 792         PEER_ABORT_IN_PROGRESS  = 0,
 793         ABORT_REQ_IN_PROGRESS   = 1,
 794         RELEASE_RESOURCES       = 2,
 795         CLOSE_SENT              = 3,
 796         TIMEOUT                 = 4,
 797         QP_REFERENCED           = 5,
 798         STOP_MPA_TIMER          = 7,
 799 };
 800 
 801 enum c4iw_ep_history {
 802         ACT_OPEN_REQ            = 0,
 803         ACT_OFLD_CONN           = 1,
 804         ACT_OPEN_RPL            = 2,
 805         ACT_ESTAB               = 3,
 806         PASS_ACCEPT_REQ         = 4,
 807         PASS_ESTAB              = 5,
 808         ABORT_UPCALL            = 6,
 809         ESTAB_UPCALL            = 7,
 810         CLOSE_UPCALL            = 8,
 811         ULP_ACCEPT              = 9,
 812         ULP_REJECT              = 10,
 813         TIMEDOUT                = 11,
 814         PEER_ABORT              = 12,
 815         PEER_CLOSE              = 13,
 816         CONNREQ_UPCALL          = 14,
 817         ABORT_CONN              = 15,
 818         DISCONN_UPCALL          = 16,
 819         EP_DISC_CLOSE           = 17,
 820         EP_DISC_ABORT           = 18,
 821         CONN_RPL_UPCALL         = 19,
 822         ACT_RETRY_NOMEM         = 20,
 823         ACT_RETRY_INUSE         = 21,
 824         CLOSE_CON_RPL           = 22,
 825         EP_DISC_FAIL            = 24,
 826         QP_REFED                = 25,
 827         QP_DEREFED              = 26,
 828         CM_ID_REFED             = 27,
 829         CM_ID_DEREFED           = 28,
 830 };
 831 
 832 enum conn_pre_alloc_buffers {
 833         CN_ABORT_REQ_BUF,
 834         CN_ABORT_RPL_BUF,
 835         CN_CLOSE_CON_REQ_BUF,
 836         CN_DESTROY_BUF,
 837         CN_FLOWC_BUF,
 838         CN_MAX_CON_BUF
 839 };
 840 
 841 enum {
 842         FLOWC_LEN = offsetof(struct fw_flowc_wr, mnemval[FW_FLOWC_MNEM_MAX])
 843 };
 844 
 845 union cpl_wr_size {
 846         struct cpl_abort_req abrt_req;
 847         struct cpl_abort_rpl abrt_rpl;
 848         struct fw_ri_wr ri_req;
 849         struct cpl_close_con_req close_req;
 850         char flowc_buf[FLOWC_LEN];
 851 };
 852 
 853 struct c4iw_ep_common {
 854         struct iw_cm_id *cm_id;
 855         struct c4iw_qp *qp;
 856         struct c4iw_dev *dev;
 857         struct sk_buff_head ep_skb_list;
 858         enum c4iw_ep_state state;
 859         struct kref kref;
 860         struct mutex mutex;
 861         struct sockaddr_storage local_addr;
 862         struct sockaddr_storage remote_addr;
 863         struct c4iw_wr_wait *wr_waitp;
 864         unsigned long flags;
 865         unsigned long history;
 866 };
 867 
 868 struct c4iw_listen_ep {
 869         struct c4iw_ep_common com;
 870         unsigned int stid;
 871         int backlog;
 872 };
 873 
 874 struct c4iw_ep_stats {
 875         unsigned connect_neg_adv;
 876         unsigned abort_neg_adv;
 877 };
 878 
 879 struct c4iw_ep {
 880         struct c4iw_ep_common com;
 881         struct c4iw_ep *parent_ep;
 882         struct timer_list timer;
 883         struct list_head entry;
 884         unsigned int atid;
 885         u32 hwtid;
 886         u32 snd_seq;
 887         u32 rcv_seq;
 888         struct l2t_entry *l2t;
 889         struct dst_entry *dst;
 890         struct sk_buff *mpa_skb;
 891         struct c4iw_mpa_attributes mpa_attr;
 892         u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA];
 893         unsigned int mpa_pkt_len;
 894         u32 ird;
 895         u32 ord;
 896         u32 smac_idx;
 897         u32 tx_chan;
 898         u32 mtu;
 899         u16 mss;
 900         u16 emss;
 901         u16 plen;
 902         u16 rss_qid;
 903         u16 txq_idx;
 904         u16 ctrlq_idx;
 905         u8 tos;
 906         u8 retry_with_mpa_v1;
 907         u8 tried_with_mpa_v1;
 908         unsigned int retry_count;
 909         int snd_win;
 910         int rcv_win;
 911         u32 snd_wscale;
 912         struct c4iw_ep_stats stats;
 913         u32 srqe_idx;
 914         u32 rx_pdu_out_cnt;
 915         struct sk_buff *peer_abort_skb;
 916 };
 917 
 918 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
 919 {
 920         return cm_id->provider_data;
 921 }
 922 
 923 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id)
 924 {
 925         return cm_id->provider_data;
 926 }
 927 
 928 static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
 929 {
 930 #if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
 931         return infop->vr->ocq.size > 0;
 932 #else
 933         return 0;
 934 #endif
 935 }
 936 
 937 u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
 938 void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
 939 int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
 940                         u32 reserved, u32 flags);
 941 void c4iw_id_table_free(struct c4iw_id_table *alloc);
 942 
 943 typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
 944 
 945 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
 946                      struct l2t_entry *l2t);
 947 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
 948                    struct c4iw_dev_ucontext *uctx);
 949 u32 c4iw_get_resource(struct c4iw_id_table *id_table);
 950 void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
 951 int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt,
 952                        u32 nr_pdid, u32 nr_srqt);
 953 int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
 954 int c4iw_pblpool_create(struct c4iw_rdev *rdev);
 955 int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
 956 int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev);
 957 void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
 958 void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
 959 void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
 960 void c4iw_destroy_resource(struct c4iw_resource *rscp);
 961 int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 962 void c4iw_register_device(struct work_struct *work);
 963 void c4iw_unregister_device(struct c4iw_dev *dev);
 964 int __init c4iw_cm_init(void);
 965 void c4iw_cm_term(void);
 966 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
 967                                struct c4iw_dev_ucontext *uctx);
 968 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
 969                             struct c4iw_dev_ucontext *uctx);
 970 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 971 int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 972                    const struct ib_send_wr **bad_wr);
 973 int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
 974                       const struct ib_recv_wr **bad_wr);
 975 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 976 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
 977 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
 978 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
 979 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
 980 void c4iw_qp_add_ref(struct ib_qp *qp);
 981 void c4iw_qp_rem_ref(struct ib_qp *qp);
 982 struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
 983                             u32 max_num_sg, struct ib_udata *udata);
 984 int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
 985                    unsigned int *sg_offset);
 986 int c4iw_dealloc_mw(struct ib_mw *mw);
 987 void c4iw_dealloc(struct uld_ctx *ctx);
 988 struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
 989                             struct ib_udata *udata);
 990 struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
 991                                            u64 length, u64 virt, int acc,
 992                                            struct ib_udata *udata);
 993 struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc);
 994 int c4iw_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
 995 void c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
 996 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
 997                    struct ib_udata *udata);
 998 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
 999 int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
1000                     enum ib_srq_attr_mask srq_attr_mask,
1001                     struct ib_udata *udata);
1002 void c4iw_destroy_srq(struct ib_srq *ib_srq, struct ib_udata *udata);
1003 int c4iw_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *attrs,
1004                     struct ib_udata *udata);
1005 int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
1006 struct ib_qp *c4iw_create_qp(struct ib_pd *pd,
1007                              struct ib_qp_init_attr *attrs,
1008                              struct ib_udata *udata);
1009 int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1010                                  int attr_mask, struct ib_udata *udata);
1011 int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1012                      int attr_mask, struct ib_qp_init_attr *init_attr);
1013 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
1014 u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size);
1015 void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
1016 u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
1017 void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
1018 u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
1019 void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
1020 void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
1021 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
1022 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
1023 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
1024 int c4iw_flush_sq(struct c4iw_qp *qhp);
1025 int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
1026 u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
1027 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
1028 u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1029 void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
1030                 struct c4iw_dev_ucontext *uctx);
1031 u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx);
1032 void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
1033                 struct c4iw_dev_ucontext *uctx);
1034 void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
1035 
1036 extern struct cxgb4_client t4c_client;
1037 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
1038 void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
1039                               enum cxgb4_bar2_qtype qtype,
1040                               unsigned int *pbar2_qid, u64 *pbar2_pa);
1041 int c4iw_alloc_srq_idx(struct c4iw_rdev *rdev);
1042 void c4iw_free_srq_idx(struct c4iw_rdev *rdev, int idx);
1043 extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
1044 extern int c4iw_wr_log;
1045 extern int db_fc_threshold;
1046 extern int db_coalescing_threshold;
1047 extern int use_dsgl;
1048 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
1049 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq);
1050 void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16);
1051 void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
1052 int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1053                        const struct ib_recv_wr **bad_wr);
1054 struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
1055 
1056 typedef int c4iw_restrack_func(struct sk_buff *msg,
1057                                struct rdma_restrack_entry *res);
1058 extern c4iw_restrack_func *c4iw_restrack_funcs[RDMA_RESTRACK_MAX];
1059 
1060 #endif

/* [<][>][^][v][top][bottom][index][help] */