root/drivers/ntb/hw/intel/ntb_hw_gen1.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ndev_reset_unsafe_flags
  2. ndev_is_unsafe
  3. ndev_ignore_unsafe
  4. ndev_mw_to_bar
  5. ndev_db_addr
  6. ndev_db_read
  7. ndev_db_write
  8. ndev_db_set_mask
  9. ndev_db_clear_mask
  10. ndev_vec_mask
  11. ndev_spad_addr
  12. ndev_spad_read
  13. ndev_spad_write
  14. ndev_interrupt
  15. ndev_vec_isr
  16. ndev_irq_isr
  17. ndev_init_isr
  18. ndev_deinit_isr
  19. ndev_ntb_debugfs_read
  20. ndev_debugfs_read
  21. ndev_init_debugfs
  22. ndev_deinit_debugfs
  23. intel_ntb_mw_count
  24. intel_ntb_mw_get_align
  25. intel_ntb_mw_set_trans
  26. intel_ntb_link_is_up
  27. intel_ntb_link_enable
  28. intel_ntb_link_disable
  29. intel_ntb_peer_mw_count
  30. intel_ntb_peer_mw_get_addr
  31. intel_ntb_db_is_unsafe
  32. intel_ntb_db_valid_mask
  33. intel_ntb_db_vector_count
  34. intel_ntb_db_vector_mask
  35. intel_ntb_db_read
  36. intel_ntb_db_clear
  37. intel_ntb_db_set_mask
  38. intel_ntb_db_clear_mask
  39. intel_ntb_peer_db_addr
  40. intel_ntb_peer_db_set
  41. intel_ntb_spad_is_unsafe
  42. intel_ntb_spad_count
  43. intel_ntb_spad_read
  44. intel_ntb_spad_write
  45. intel_ntb_peer_spad_addr
  46. intel_ntb_peer_spad_read
  47. intel_ntb_peer_spad_write
  48. xeon_db_ioread
  49. xeon_db_iowrite
  50. xeon_poll_link
  51. xeon_link_is_up
  52. xeon_ppd_topo
  53. xeon_ppd_bar4_split
  54. xeon_init_isr
  55. xeon_deinit_isr
  56. xeon_setup_b2b_mw
  57. xeon_init_ntb
  58. xeon_init_dev
  59. xeon_deinit_dev
  60. intel_ntb_init_pci
  61. intel_ntb_deinit_pci
  62. ndev_init_struct
  63. intel_ntb_pci_probe
  64. intel_ntb_pci_remove
  65. intel_ntb_pci_driver_init
  66. intel_ntb_pci_driver_exit

   1 /*
   2  * This file is provided under a dual BSD/GPLv2 license.  When using or
   3  *   redistributing this file, you may do so under either license.
   4  *
   5  *   GPL LICENSE SUMMARY
   6  *
   7  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
   8  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
   9  *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
  10  *
  11  *   This program is free software; you can redistribute it and/or modify
  12  *   it under the terms of version 2 of the GNU General Public License as
  13  *   published by the Free Software Foundation.
  14  *
  15  *   BSD LICENSE
  16  *
  17  *   Copyright(c) 2012 Intel Corporation. All rights reserved.
  18  *   Copyright (C) 2015 EMC Corporation. All Rights Reserved.
  19  *   Copyright (C) 2016 T-Platforms. All Rights Reserved.
  20  *
  21  *   Redistribution and use in source and binary forms, with or without
  22  *   modification, are permitted provided that the following conditions
  23  *   are met:
  24  *
  25  *     * Redistributions of source code must retain the above copyright
  26  *       notice, this list of conditions and the following disclaimer.
  27  *     * Redistributions in binary form must reproduce the above copy
  28  *       notice, this list of conditions and the following disclaimer in
  29  *       the documentation and/or other materials provided with the
  30  *       distribution.
  31  *     * Neither the name of Intel Corporation nor the names of its
  32  *       contributors may be used to endorse or promote products derived
  33  *       from this software without specific prior written permission.
  34  *
  35  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  36  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  37  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  38  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  39  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  40  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  41  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  42  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  43  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  44  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  45  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  46  *
  47  * Intel PCIe NTB Linux driver
  48  */
  49 
  50 #include <linux/debugfs.h>
  51 #include <linux/delay.h>
  52 #include <linux/init.h>
  53 #include <linux/interrupt.h>
  54 #include <linux/module.h>
  55 #include <linux/pci.h>
  56 #include <linux/random.h>
  57 #include <linux/slab.h>
  58 #include <linux/ntb.h>
  59 
  60 #include "ntb_hw_intel.h"
  61 #include "ntb_hw_gen1.h"
  62 #include "ntb_hw_gen3.h"
  63 
  64 #define NTB_NAME        "ntb_hw_intel"
  65 #define NTB_DESC        "Intel(R) PCI-E Non-Transparent Bridge Driver"
  66 #define NTB_VER         "2.0"
  67 
  68 MODULE_DESCRIPTION(NTB_DESC);
  69 MODULE_VERSION(NTB_VER);
  70 MODULE_LICENSE("Dual BSD/GPL");
  71 MODULE_AUTHOR("Intel Corporation");
  72 
  73 #define bar0_off(base, bar) ((base) + ((bar) << 2))
  74 #define bar2_off(base, bar) bar0_off(base, (bar) - 2)
  75 
  76 static const struct intel_ntb_reg xeon_reg;
  77 static const struct intel_ntb_alt_reg xeon_pri_reg;
  78 static const struct intel_ntb_alt_reg xeon_sec_reg;
  79 static const struct intel_ntb_alt_reg xeon_b2b_reg;
  80 static const struct intel_ntb_xlat_reg xeon_pri_xlat;
  81 static const struct intel_ntb_xlat_reg xeon_sec_xlat;
  82 static const struct ntb_dev_ops intel_ntb_ops;
  83 
  84 static const struct file_operations intel_ntb_debugfs_info;
  85 static struct dentry *debugfs_dir;
  86 
  87 static int b2b_mw_idx = -1;
  88 module_param(b2b_mw_idx, int, 0644);
  89 MODULE_PARM_DESC(b2b_mw_idx, "Use this mw idx to access the peer ntb.  A "
  90                  "value of zero or positive starts from first mw idx, and a "
  91                  "negative value starts from last mw idx.  Both sides MUST "
  92                  "set the same value here!");
  93 
  94 static unsigned int b2b_mw_share;
  95 module_param(b2b_mw_share, uint, 0644);
  96 MODULE_PARM_DESC(b2b_mw_share, "If the b2b mw is large enough, configure the "
  97                  "ntb so that the peer ntb only occupies the first half of "
  98                  "the mw, so the second half can still be used as a mw.  Both "
  99                  "sides MUST set the same value here!");
 100 
 101 module_param_named(xeon_b2b_usd_bar2_addr64,
 102                    xeon_b2b_usd_addr.bar2_addr64, ullong, 0644);
 103 MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64,
 104                  "XEON B2B USD BAR 2 64-bit address");
 105 
 106 module_param_named(xeon_b2b_usd_bar4_addr64,
 107                    xeon_b2b_usd_addr.bar4_addr64, ullong, 0644);
 108 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64,
 109                  "XEON B2B USD BAR 4 64-bit address");
 110 
 111 module_param_named(xeon_b2b_usd_bar4_addr32,
 112                    xeon_b2b_usd_addr.bar4_addr32, ullong, 0644);
 113 MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32,
 114                  "XEON B2B USD split-BAR 4 32-bit address");
 115 
 116 module_param_named(xeon_b2b_usd_bar5_addr32,
 117                    xeon_b2b_usd_addr.bar5_addr32, ullong, 0644);
 118 MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32,
 119                  "XEON B2B USD split-BAR 5 32-bit address");
 120 
 121 module_param_named(xeon_b2b_dsd_bar2_addr64,
 122                    xeon_b2b_dsd_addr.bar2_addr64, ullong, 0644);
 123 MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64,
 124                  "XEON B2B DSD BAR 2 64-bit address");
 125 
 126 module_param_named(xeon_b2b_dsd_bar4_addr64,
 127                    xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644);
 128 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64,
 129                  "XEON B2B DSD BAR 4 64-bit address");
 130 
 131 module_param_named(xeon_b2b_dsd_bar4_addr32,
 132                    xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644);
 133 MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32,
 134                  "XEON B2B DSD split-BAR 4 32-bit address");
 135 
 136 module_param_named(xeon_b2b_dsd_bar5_addr32,
 137                    xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644);
 138 MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32,
 139                  "XEON B2B DSD split-BAR 5 32-bit address");
 140 
 141 
 142 static int xeon_init_isr(struct intel_ntb_dev *ndev);
 143 
 144 static inline void ndev_reset_unsafe_flags(struct intel_ntb_dev *ndev)
 145 {
 146         ndev->unsafe_flags = 0;
 147         ndev->unsafe_flags_ignore = 0;
 148 
 149         /* Only B2B has a workaround to avoid SDOORBELL */
 150         if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP)
 151                 if (!ntb_topo_is_b2b(ndev->ntb.topo))
 152                         ndev->unsafe_flags |= NTB_UNSAFE_DB;
 153 
 154         /* No low level workaround to avoid SB01BASE */
 155         if (ndev->hwerr_flags & NTB_HWERR_SB01BASE_LOCKUP) {
 156                 ndev->unsafe_flags |= NTB_UNSAFE_DB;
 157                 ndev->unsafe_flags |= NTB_UNSAFE_SPAD;
 158         }
 159 }
 160 
 161 static inline int ndev_is_unsafe(struct intel_ntb_dev *ndev,
 162                                  unsigned long flag)
 163 {
 164         return !!(flag & ndev->unsafe_flags & ~ndev->unsafe_flags_ignore);
 165 }
 166 
 167 static inline int ndev_ignore_unsafe(struct intel_ntb_dev *ndev,
 168                                      unsigned long flag)
 169 {
 170         flag &= ndev->unsafe_flags;
 171         ndev->unsafe_flags_ignore |= flag;
 172 
 173         return !!flag;
 174 }
 175 
 176 int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
 177 {
 178         if (idx < 0 || idx >= ndev->mw_count)
 179                 return -EINVAL;
 180         return ndev->reg->mw_bar[idx];
 181 }
 182 
 183 void ndev_db_addr(struct intel_ntb_dev *ndev,
 184                                phys_addr_t *db_addr, resource_size_t *db_size,
 185                                phys_addr_t reg_addr, unsigned long reg)
 186 {
 187         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 188                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 189 
 190         if (db_addr) {
 191                 *db_addr = reg_addr + reg;
 192                 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr);
 193         }
 194 
 195         if (db_size) {
 196                 *db_size = ndev->reg->db_size;
 197                 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
 198         }
 199 }
 200 
 201 u64 ndev_db_read(struct intel_ntb_dev *ndev,
 202                                void __iomem *mmio)
 203 {
 204         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 205                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 206 
 207         return ndev->reg->db_ioread(mmio);
 208 }
 209 
 210 int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
 211                                 void __iomem *mmio)
 212 {
 213         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 214                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 215 
 216         if (db_bits & ~ndev->db_valid_mask)
 217                 return -EINVAL;
 218 
 219         ndev->reg->db_iowrite(db_bits, mmio);
 220 
 221         return 0;
 222 }
 223 
 224 static inline int ndev_db_set_mask(struct intel_ntb_dev *ndev, u64 db_bits,
 225                                    void __iomem *mmio)
 226 {
 227         unsigned long irqflags;
 228 
 229         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 230                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 231 
 232         if (db_bits & ~ndev->db_valid_mask)
 233                 return -EINVAL;
 234 
 235         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
 236         {
 237                 ndev->db_mask |= db_bits;
 238                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
 239         }
 240         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
 241 
 242         return 0;
 243 }
 244 
 245 static inline int ndev_db_clear_mask(struct intel_ntb_dev *ndev, u64 db_bits,
 246                                      void __iomem *mmio)
 247 {
 248         unsigned long irqflags;
 249 
 250         if (ndev_is_unsafe(ndev, NTB_UNSAFE_DB))
 251                 pr_warn_once("%s: NTB unsafe doorbell access", __func__);
 252 
 253         if (db_bits & ~ndev->db_valid_mask)
 254                 return -EINVAL;
 255 
 256         spin_lock_irqsave(&ndev->db_mask_lock, irqflags);
 257         {
 258                 ndev->db_mask &= ~db_bits;
 259                 ndev->reg->db_iowrite(ndev->db_mask, mmio);
 260         }
 261         spin_unlock_irqrestore(&ndev->db_mask_lock, irqflags);
 262 
 263         return 0;
 264 }
 265 
 266 static inline u64 ndev_vec_mask(struct intel_ntb_dev *ndev, int db_vector)
 267 {
 268         u64 shift, mask;
 269 
 270         shift = ndev->db_vec_shift;
 271         mask = BIT_ULL(shift) - 1;
 272 
 273         return mask << (shift * db_vector);
 274 }
 275 
 276 static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx,
 277                                  phys_addr_t *spad_addr, phys_addr_t reg_addr,
 278                                  unsigned long reg)
 279 {
 280         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 281                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 282 
 283         if (idx < 0 || idx >= ndev->spad_count)
 284                 return -EINVAL;
 285 
 286         if (spad_addr) {
 287                 *spad_addr = reg_addr + reg + (idx << 2);
 288                 dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n",
 289                         *spad_addr);
 290         }
 291 
 292         return 0;
 293 }
 294 
 295 static inline u32 ndev_spad_read(struct intel_ntb_dev *ndev, int idx,
 296                                  void __iomem *mmio)
 297 {
 298         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 299                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 300 
 301         if (idx < 0 || idx >= ndev->spad_count)
 302                 return 0;
 303 
 304         return ioread32(mmio + (idx << 2));
 305 }
 306 
 307 static inline int ndev_spad_write(struct intel_ntb_dev *ndev, int idx, u32 val,
 308                                   void __iomem *mmio)
 309 {
 310         if (ndev_is_unsafe(ndev, NTB_UNSAFE_SPAD))
 311                 pr_warn_once("%s: NTB unsafe scratchpad access", __func__);
 312 
 313         if (idx < 0 || idx >= ndev->spad_count)
 314                 return -EINVAL;
 315 
 316         iowrite32(val, mmio + (idx << 2));
 317 
 318         return 0;
 319 }
 320 
 321 static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec)
 322 {
 323         u64 vec_mask;
 324 
 325         vec_mask = ndev_vec_mask(ndev, vec);
 326 
 327         if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31))
 328                 vec_mask |= ndev->db_link_mask;
 329 
 330         dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask);
 331 
 332         ndev->last_ts = jiffies;
 333 
 334         if (vec_mask & ndev->db_link_mask) {
 335                 if (ndev->reg->poll_link(ndev))
 336                         ntb_link_event(&ndev->ntb);
 337         }
 338 
 339         if (vec_mask & ndev->db_valid_mask)
 340                 ntb_db_event(&ndev->ntb, vec);
 341 
 342         return IRQ_HANDLED;
 343 }
 344 
 345 static irqreturn_t ndev_vec_isr(int irq, void *dev)
 346 {
 347         struct intel_ntb_vec *nvec = dev;
 348 
 349         dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d  nvec->num: %d\n",
 350                 irq, nvec->num);
 351 
 352         return ndev_interrupt(nvec->ndev, nvec->num);
 353 }
 354 
 355 static irqreturn_t ndev_irq_isr(int irq, void *dev)
 356 {
 357         struct intel_ntb_dev *ndev = dev;
 358 
 359         return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq);
 360 }
 361 
 362 int ndev_init_isr(struct intel_ntb_dev *ndev,
 363                          int msix_min, int msix_max,
 364                          int msix_shift, int total_shift)
 365 {
 366         struct pci_dev *pdev;
 367         int rc, i, msix_count, node;
 368 
 369         pdev = ndev->ntb.pdev;
 370 
 371         node = dev_to_node(&pdev->dev);
 372 
 373         /* Mask all doorbell interrupts */
 374         ndev->db_mask = ndev->db_valid_mask;
 375         ndev->reg->db_iowrite(ndev->db_mask,
 376                               ndev->self_mmio +
 377                               ndev->self_reg->db_mask);
 378 
 379         /* Try to set up msix irq */
 380 
 381         ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec),
 382                                  GFP_KERNEL, node);
 383         if (!ndev->vec)
 384                 goto err_msix_vec_alloc;
 385 
 386         ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix),
 387                                   GFP_KERNEL, node);
 388         if (!ndev->msix)
 389                 goto err_msix_alloc;
 390 
 391         for (i = 0; i < msix_max; ++i)
 392                 ndev->msix[i].entry = i;
 393 
 394         msix_count = pci_enable_msix_range(pdev, ndev->msix,
 395                                            msix_min, msix_max);
 396         if (msix_count < 0)
 397                 goto err_msix_enable;
 398 
 399         for (i = 0; i < msix_count; ++i) {
 400                 ndev->vec[i].ndev = ndev;
 401                 ndev->vec[i].num = i;
 402                 rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0,
 403                                  "ndev_vec_isr", &ndev->vec[i]);
 404                 if (rc)
 405                         goto err_msix_request;
 406         }
 407 
 408         dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count);
 409         ndev->db_vec_count = msix_count;
 410         ndev->db_vec_shift = msix_shift;
 411         return 0;
 412 
 413 err_msix_request:
 414         while (i-- > 0)
 415                 free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 416         pci_disable_msix(pdev);
 417 err_msix_enable:
 418         kfree(ndev->msix);
 419 err_msix_alloc:
 420         kfree(ndev->vec);
 421 err_msix_vec_alloc:
 422         ndev->msix = NULL;
 423         ndev->vec = NULL;
 424 
 425         /* Try to set up msi irq */
 426 
 427         rc = pci_enable_msi(pdev);
 428         if (rc)
 429                 goto err_msi_enable;
 430 
 431         rc = request_irq(pdev->irq, ndev_irq_isr, 0,
 432                          "ndev_irq_isr", ndev);
 433         if (rc)
 434                 goto err_msi_request;
 435 
 436         dev_dbg(&pdev->dev, "Using msi interrupts\n");
 437         ndev->db_vec_count = 1;
 438         ndev->db_vec_shift = total_shift;
 439         return 0;
 440 
 441 err_msi_request:
 442         pci_disable_msi(pdev);
 443 err_msi_enable:
 444 
 445         /* Try to set up intx irq */
 446 
 447         pci_intx(pdev, 1);
 448 
 449         rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED,
 450                          "ndev_irq_isr", ndev);
 451         if (rc)
 452                 goto err_intx_request;
 453 
 454         dev_dbg(&pdev->dev, "Using intx interrupts\n");
 455         ndev->db_vec_count = 1;
 456         ndev->db_vec_shift = total_shift;
 457         return 0;
 458 
 459 err_intx_request:
 460         return rc;
 461 }
 462 
 463 static void ndev_deinit_isr(struct intel_ntb_dev *ndev)
 464 {
 465         struct pci_dev *pdev;
 466         int i;
 467 
 468         pdev = ndev->ntb.pdev;
 469 
 470         /* Mask all doorbell interrupts */
 471         ndev->db_mask = ndev->db_valid_mask;
 472         ndev->reg->db_iowrite(ndev->db_mask,
 473                               ndev->self_mmio +
 474                               ndev->self_reg->db_mask);
 475 
 476         if (ndev->msix) {
 477                 i = ndev->db_vec_count;
 478                 while (i--)
 479                         free_irq(ndev->msix[i].vector, &ndev->vec[i]);
 480                 pci_disable_msix(pdev);
 481                 kfree(ndev->msix);
 482                 kfree(ndev->vec);
 483         } else {
 484                 free_irq(pdev->irq, ndev);
 485                 if (pci_dev_msi_enabled(pdev))
 486                         pci_disable_msi(pdev);
 487         }
 488 }
 489 
 490 static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf,
 491                                      size_t count, loff_t *offp)
 492 {
 493         struct intel_ntb_dev *ndev;
 494         struct pci_dev *pdev;
 495         void __iomem *mmio;
 496         char *buf;
 497         size_t buf_size;
 498         ssize_t ret, off;
 499         union { u64 v64; u32 v32; u16 v16; u8 v8; } u;
 500 
 501         ndev = filp->private_data;
 502         pdev = ndev->ntb.pdev;
 503         mmio = ndev->self_mmio;
 504 
 505         buf_size = min(count, 0x800ul);
 506 
 507         buf = kmalloc(buf_size, GFP_KERNEL);
 508         if (!buf)
 509                 return -ENOMEM;
 510 
 511         off = 0;
 512 
 513         off += scnprintf(buf + off, buf_size - off,
 514                          "NTB Device Information:\n");
 515 
 516         off += scnprintf(buf + off, buf_size - off,
 517                          "Connection Topology -\t%s\n",
 518                          ntb_topo_string(ndev->ntb.topo));
 519 
 520         if (ndev->b2b_idx != UINT_MAX) {
 521                 off += scnprintf(buf + off, buf_size - off,
 522                                  "B2B MW Idx -\t\t%u\n", ndev->b2b_idx);
 523                 off += scnprintf(buf + off, buf_size - off,
 524                                  "B2B Offset -\t\t%#lx\n", ndev->b2b_off);
 525         }
 526 
 527         off += scnprintf(buf + off, buf_size - off,
 528                          "BAR4 Split -\t\t%s\n",
 529                          ndev->bar4_split ? "yes" : "no");
 530 
 531         off += scnprintf(buf + off, buf_size - off,
 532                          "NTB CTL -\t\t%#06x\n", ndev->ntb_ctl);
 533         off += scnprintf(buf + off, buf_size - off,
 534                          "LNK STA -\t\t%#06x\n", ndev->lnk_sta);
 535 
 536         if (!ndev->reg->link_is_up(ndev)) {
 537                 off += scnprintf(buf + off, buf_size - off,
 538                                  "Link Status -\t\tDown\n");
 539         } else {
 540                 off += scnprintf(buf + off, buf_size - off,
 541                                  "Link Status -\t\tUp\n");
 542                 off += scnprintf(buf + off, buf_size - off,
 543                                  "Link Speed -\t\tPCI-E Gen %u\n",
 544                                  NTB_LNK_STA_SPEED(ndev->lnk_sta));
 545                 off += scnprintf(buf + off, buf_size - off,
 546                                  "Link Width -\t\tx%u\n",
 547                                  NTB_LNK_STA_WIDTH(ndev->lnk_sta));
 548         }
 549 
 550         off += scnprintf(buf + off, buf_size - off,
 551                          "Memory Window Count -\t%u\n", ndev->mw_count);
 552         off += scnprintf(buf + off, buf_size - off,
 553                          "Scratchpad Count -\t%u\n", ndev->spad_count);
 554         off += scnprintf(buf + off, buf_size - off,
 555                          "Doorbell Count -\t%u\n", ndev->db_count);
 556         off += scnprintf(buf + off, buf_size - off,
 557                          "Doorbell Vector Count -\t%u\n", ndev->db_vec_count);
 558         off += scnprintf(buf + off, buf_size - off,
 559                          "Doorbell Vector Shift -\t%u\n", ndev->db_vec_shift);
 560 
 561         off += scnprintf(buf + off, buf_size - off,
 562                          "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask);
 563         off += scnprintf(buf + off, buf_size - off,
 564                          "Doorbell Link Mask -\t%#llx\n", ndev->db_link_mask);
 565         off += scnprintf(buf + off, buf_size - off,
 566                          "Doorbell Mask Cached -\t%#llx\n", ndev->db_mask);
 567 
 568         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_mask);
 569         off += scnprintf(buf + off, buf_size - off,
 570                          "Doorbell Mask -\t\t%#llx\n", u.v64);
 571 
 572         u.v64 = ndev_db_read(ndev, mmio + ndev->self_reg->db_bell);
 573         off += scnprintf(buf + off, buf_size - off,
 574                          "Doorbell Bell -\t\t%#llx\n", u.v64);
 575 
 576         off += scnprintf(buf + off, buf_size - off,
 577                          "\nNTB Window Size:\n");
 578 
 579         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &u.v8);
 580         off += scnprintf(buf + off, buf_size - off,
 581                          "PBAR23SZ %hhu\n", u.v8);
 582         if (!ndev->bar4_split) {
 583                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &u.v8);
 584                 off += scnprintf(buf + off, buf_size - off,
 585                                  "PBAR45SZ %hhu\n", u.v8);
 586         } else {
 587                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &u.v8);
 588                 off += scnprintf(buf + off, buf_size - off,
 589                                  "PBAR4SZ %hhu\n", u.v8);
 590                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &u.v8);
 591                 off += scnprintf(buf + off, buf_size - off,
 592                                  "PBAR5SZ %hhu\n", u.v8);
 593         }
 594 
 595         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &u.v8);
 596         off += scnprintf(buf + off, buf_size - off,
 597                          "SBAR23SZ %hhu\n", u.v8);
 598         if (!ndev->bar4_split) {
 599                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &u.v8);
 600                 off += scnprintf(buf + off, buf_size - off,
 601                                  "SBAR45SZ %hhu\n", u.v8);
 602         } else {
 603                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &u.v8);
 604                 off += scnprintf(buf + off, buf_size - off,
 605                                  "SBAR4SZ %hhu\n", u.v8);
 606                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &u.v8);
 607                 off += scnprintf(buf + off, buf_size - off,
 608                                  "SBAR5SZ %hhu\n", u.v8);
 609         }
 610 
 611         off += scnprintf(buf + off, buf_size - off,
 612                          "\nNTB Incoming XLAT:\n");
 613 
 614         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 2));
 615         off += scnprintf(buf + off, buf_size - off,
 616                          "XLAT23 -\t\t%#018llx\n", u.v64);
 617 
 618         if (ndev->bar4_split) {
 619                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
 620                 off += scnprintf(buf + off, buf_size - off,
 621                                  "XLAT4 -\t\t\t%#06x\n", u.v32);
 622 
 623                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 5));
 624                 off += scnprintf(buf + off, buf_size - off,
 625                                  "XLAT5 -\t\t\t%#06x\n", u.v32);
 626         } else {
 627                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_xlat, 4));
 628                 off += scnprintf(buf + off, buf_size - off,
 629                                  "XLAT45 -\t\t%#018llx\n", u.v64);
 630         }
 631 
 632         u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 2));
 633         off += scnprintf(buf + off, buf_size - off,
 634                          "LMT23 -\t\t\t%#018llx\n", u.v64);
 635 
 636         if (ndev->bar4_split) {
 637                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
 638                 off += scnprintf(buf + off, buf_size - off,
 639                                  "LMT4 -\t\t\t%#06x\n", u.v32);
 640                 u.v32 = ioread32(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 5));
 641                 off += scnprintf(buf + off, buf_size - off,
 642                                  "LMT5 -\t\t\t%#06x\n", u.v32);
 643         } else {
 644                 u.v64 = ioread64(mmio + bar2_off(ndev->xlat_reg->bar2_limit, 4));
 645                 off += scnprintf(buf + off, buf_size - off,
 646                                  "LMT45 -\t\t\t%#018llx\n", u.v64);
 647         }
 648 
 649         if (pdev_is_gen1(pdev)) {
 650                 if (ntb_topo_is_b2b(ndev->ntb.topo)) {
 651                         off += scnprintf(buf + off, buf_size - off,
 652                                          "\nNTB Outgoing B2B XLAT:\n");
 653 
 654                         u.v64 = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
 655                         off += scnprintf(buf + off, buf_size - off,
 656                                          "B2B XLAT23 -\t\t%#018llx\n", u.v64);
 657 
 658                         if (ndev->bar4_split) {
 659                                 u.v32 = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
 660                                 off += scnprintf(buf + off, buf_size - off,
 661                                                  "B2B XLAT4 -\t\t%#06x\n",
 662                                                  u.v32);
 663                                 u.v32 = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
 664                                 off += scnprintf(buf + off, buf_size - off,
 665                                                  "B2B XLAT5 -\t\t%#06x\n",
 666                                                  u.v32);
 667                         } else {
 668                                 u.v64 = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
 669                                 off += scnprintf(buf + off, buf_size - off,
 670                                                  "B2B XLAT45 -\t\t%#018llx\n",
 671                                                  u.v64);
 672                         }
 673 
 674                         u.v64 = ioread64(mmio + XEON_PBAR23LMT_OFFSET);
 675                         off += scnprintf(buf + off, buf_size - off,
 676                                          "B2B LMT23 -\t\t%#018llx\n", u.v64);
 677 
 678                         if (ndev->bar4_split) {
 679                                 u.v32 = ioread32(mmio + XEON_PBAR4LMT_OFFSET);
 680                                 off += scnprintf(buf + off, buf_size - off,
 681                                                  "B2B LMT4 -\t\t%#06x\n",
 682                                                  u.v32);
 683                                 u.v32 = ioread32(mmio + XEON_PBAR5LMT_OFFSET);
 684                                 off += scnprintf(buf + off, buf_size - off,
 685                                                  "B2B LMT5 -\t\t%#06x\n",
 686                                                  u.v32);
 687                         } else {
 688                                 u.v64 = ioread64(mmio + XEON_PBAR45LMT_OFFSET);
 689                                 off += scnprintf(buf + off, buf_size - off,
 690                                                  "B2B LMT45 -\t\t%#018llx\n",
 691                                                  u.v64);
 692                         }
 693 
 694                         off += scnprintf(buf + off, buf_size - off,
 695                                          "\nNTB Secondary BAR:\n");
 696 
 697                         u.v64 = ioread64(mmio + XEON_SBAR0BASE_OFFSET);
 698                         off += scnprintf(buf + off, buf_size - off,
 699                                          "SBAR01 -\t\t%#018llx\n", u.v64);
 700 
 701                         u.v64 = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
 702                         off += scnprintf(buf + off, buf_size - off,
 703                                          "SBAR23 -\t\t%#018llx\n", u.v64);
 704 
 705                         if (ndev->bar4_split) {
 706                                 u.v32 = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
 707                                 off += scnprintf(buf + off, buf_size - off,
 708                                                  "SBAR4 -\t\t\t%#06x\n", u.v32);
 709                                 u.v32 = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
 710                                 off += scnprintf(buf + off, buf_size - off,
 711                                                  "SBAR5 -\t\t\t%#06x\n", u.v32);
 712                         } else {
 713                                 u.v64 = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
 714                                 off += scnprintf(buf + off, buf_size - off,
 715                                                  "SBAR45 -\t\t%#018llx\n",
 716                                                  u.v64);
 717                         }
 718                 }
 719 
 720                 off += scnprintf(buf + off, buf_size - off,
 721                                  "\nXEON NTB Statistics:\n");
 722 
 723                 u.v16 = ioread16(mmio + XEON_USMEMMISS_OFFSET);
 724                 off += scnprintf(buf + off, buf_size - off,
 725                                  "Upstream Memory Miss -\t%u\n", u.v16);
 726 
 727                 off += scnprintf(buf + off, buf_size - off,
 728                                  "\nXEON NTB Hardware Errors:\n");
 729 
 730                 if (!pci_read_config_word(pdev,
 731                                           XEON_DEVSTS_OFFSET, &u.v16))
 732                         off += scnprintf(buf + off, buf_size - off,
 733                                          "DEVSTS -\t\t%#06x\n", u.v16);
 734 
 735                 if (!pci_read_config_word(pdev,
 736                                           XEON_LINK_STATUS_OFFSET, &u.v16))
 737                         off += scnprintf(buf + off, buf_size - off,
 738                                          "LNKSTS -\t\t%#06x\n", u.v16);
 739 
 740                 if (!pci_read_config_dword(pdev,
 741                                            XEON_UNCERRSTS_OFFSET, &u.v32))
 742                         off += scnprintf(buf + off, buf_size - off,
 743                                          "UNCERRSTS -\t\t%#06x\n", u.v32);
 744 
 745                 if (!pci_read_config_dword(pdev,
 746                                            XEON_CORERRSTS_OFFSET, &u.v32))
 747                         off += scnprintf(buf + off, buf_size - off,
 748                                          "CORERRSTS -\t\t%#06x\n", u.v32);
 749         }
 750 
 751         ret = simple_read_from_buffer(ubuf, count, offp, buf, off);
 752         kfree(buf);
 753         return ret;
 754 }
 755 
 756 static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf,
 757                                  size_t count, loff_t *offp)
 758 {
 759         struct intel_ntb_dev *ndev = filp->private_data;
 760 
 761         if (pdev_is_gen1(ndev->ntb.pdev))
 762                 return ndev_ntb_debugfs_read(filp, ubuf, count, offp);
 763         else if (pdev_is_gen3(ndev->ntb.pdev))
 764                 return ndev_ntb3_debugfs_read(filp, ubuf, count, offp);
 765 
 766         return -ENXIO;
 767 }
 768 
 769 static void ndev_init_debugfs(struct intel_ntb_dev *ndev)
 770 {
 771         if (!debugfs_dir) {
 772                 ndev->debugfs_dir = NULL;
 773                 ndev->debugfs_info = NULL;
 774         } else {
 775                 ndev->debugfs_dir =
 776                         debugfs_create_dir(pci_name(ndev->ntb.pdev),
 777                                            debugfs_dir);
 778                 if (!ndev->debugfs_dir)
 779                         ndev->debugfs_info = NULL;
 780                 else
 781                         ndev->debugfs_info =
 782                                 debugfs_create_file("info", S_IRUSR,
 783                                                     ndev->debugfs_dir, ndev,
 784                                                     &intel_ntb_debugfs_info);
 785         }
 786 }
 787 
 788 static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev)
 789 {
 790         debugfs_remove_recursive(ndev->debugfs_dir);
 791 }
 792 
 793 int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx)
 794 {
 795         if (pidx != NTB_DEF_PEER_IDX)
 796                 return -EINVAL;
 797 
 798         return ntb_ndev(ntb)->mw_count;
 799 }
 800 
 801 int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx,
 802                            resource_size_t *addr_align,
 803                            resource_size_t *size_align,
 804                            resource_size_t *size_max)
 805 {
 806         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 807         resource_size_t bar_size, mw_size;
 808         int bar;
 809 
 810         if (pidx != NTB_DEF_PEER_IDX)
 811                 return -EINVAL;
 812 
 813         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
 814                 idx += 1;
 815 
 816         bar = ndev_mw_to_bar(ndev, idx);
 817         if (bar < 0)
 818                 return bar;
 819 
 820         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
 821 
 822         if (idx == ndev->b2b_idx)
 823                 mw_size = bar_size - ndev->b2b_off;
 824         else
 825                 mw_size = bar_size;
 826 
 827         if (addr_align)
 828                 *addr_align = pci_resource_len(ndev->ntb.pdev, bar);
 829 
 830         if (size_align)
 831                 *size_align = 1;
 832 
 833         if (size_max)
 834                 *size_max = mw_size;
 835 
 836         return 0;
 837 }
 838 
 839 static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
 840                                   dma_addr_t addr, resource_size_t size)
 841 {
 842         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 843         unsigned long base_reg, xlat_reg, limit_reg;
 844         resource_size_t bar_size, mw_size;
 845         void __iomem *mmio;
 846         u64 base, limit, reg_val;
 847         int bar;
 848 
 849         if (pidx != NTB_DEF_PEER_IDX)
 850                 return -EINVAL;
 851 
 852         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
 853                 idx += 1;
 854 
 855         bar = ndev_mw_to_bar(ndev, idx);
 856         if (bar < 0)
 857                 return bar;
 858 
 859         bar_size = pci_resource_len(ndev->ntb.pdev, bar);
 860 
 861         if (idx == ndev->b2b_idx)
 862                 mw_size = bar_size - ndev->b2b_off;
 863         else
 864                 mw_size = bar_size;
 865 
 866         /* hardware requires that addr is aligned to bar size */
 867         if (addr & (bar_size - 1))
 868                 return -EINVAL;
 869 
 870         /* make sure the range fits in the usable mw size */
 871         if (size > mw_size)
 872                 return -EINVAL;
 873 
 874         mmio = ndev->self_mmio;
 875         base_reg = bar0_off(ndev->xlat_reg->bar0_base, bar);
 876         xlat_reg = bar2_off(ndev->xlat_reg->bar2_xlat, bar);
 877         limit_reg = bar2_off(ndev->xlat_reg->bar2_limit, bar);
 878 
 879         if (bar < 4 || !ndev->bar4_split) {
 880                 base = ioread64(mmio + base_reg) & NTB_BAR_MASK_64;
 881 
 882                 /* Set the limit if supported, if size is not mw_size */
 883                 if (limit_reg && size != mw_size)
 884                         limit = base + size;
 885                 else
 886                         limit = 0;
 887 
 888                 /* set and verify setting the translation address */
 889                 iowrite64(addr, mmio + xlat_reg);
 890                 reg_val = ioread64(mmio + xlat_reg);
 891                 if (reg_val != addr) {
 892                         iowrite64(0, mmio + xlat_reg);
 893                         return -EIO;
 894                 }
 895 
 896                 /* set and verify setting the limit */
 897                 iowrite64(limit, mmio + limit_reg);
 898                 reg_val = ioread64(mmio + limit_reg);
 899                 if (reg_val != limit) {
 900                         iowrite64(base, mmio + limit_reg);
 901                         iowrite64(0, mmio + xlat_reg);
 902                         return -EIO;
 903                 }
 904         } else {
 905                 /* split bar addr range must all be 32 bit */
 906                 if (addr & (~0ull << 32))
 907                         return -EINVAL;
 908                 if ((addr + size) & (~0ull << 32))
 909                         return -EINVAL;
 910 
 911                 base = ioread32(mmio + base_reg) & NTB_BAR_MASK_32;
 912 
 913                 /* Set the limit if supported, if size is not mw_size */
 914                 if (limit_reg && size != mw_size)
 915                         limit = base + size;
 916                 else
 917                         limit = 0;
 918 
 919                 /* set and verify setting the translation address */
 920                 iowrite32(addr, mmio + xlat_reg);
 921                 reg_val = ioread32(mmio + xlat_reg);
 922                 if (reg_val != addr) {
 923                         iowrite32(0, mmio + xlat_reg);
 924                         return -EIO;
 925                 }
 926 
 927                 /* set and verify setting the limit */
 928                 iowrite32(limit, mmio + limit_reg);
 929                 reg_val = ioread32(mmio + limit_reg);
 930                 if (reg_val != limit) {
 931                         iowrite32(base, mmio + limit_reg);
 932                         iowrite32(0, mmio + xlat_reg);
 933                         return -EIO;
 934                 }
 935         }
 936 
 937         return 0;
 938 }
 939 
 940 u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed,
 941                          enum ntb_width *width)
 942 {
 943         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
 944 
 945         if (ndev->reg->link_is_up(ndev)) {
 946                 if (speed)
 947                         *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta);
 948                 if (width)
 949                         *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta);
 950                 return 1;
 951         } else {
 952                 /* TODO MAYBE: is it possible to observe the link speed and
 953                  * width while link is training? */
 954                 if (speed)
 955                         *speed = NTB_SPEED_NONE;
 956                 if (width)
 957                         *width = NTB_WIDTH_NONE;
 958                 return 0;
 959         }
 960 }
 961 
 962 static int intel_ntb_link_enable(struct ntb_dev *ntb,
 963                                  enum ntb_speed max_speed,
 964                                  enum ntb_width max_width)
 965 {
 966         struct intel_ntb_dev *ndev;
 967         u32 ntb_ctl;
 968 
 969         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 970 
 971         if (ndev->ntb.topo == NTB_TOPO_SEC)
 972                 return -EINVAL;
 973 
 974         dev_dbg(&ntb->pdev->dev,
 975                 "Enabling link with max_speed %d max_width %d\n",
 976                 max_speed, max_width);
 977         if (max_speed != NTB_SPEED_AUTO)
 978                 dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed);
 979         if (max_width != NTB_WIDTH_AUTO)
 980                 dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width);
 981 
 982         ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
 983         ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK);
 984         ntb_ctl |= NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP;
 985         ntb_ctl |= NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP;
 986         if (ndev->bar4_split)
 987                 ntb_ctl |= NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP;
 988         iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
 989 
 990         return 0;
 991 }
 992 
 993 int intel_ntb_link_disable(struct ntb_dev *ntb)
 994 {
 995         struct intel_ntb_dev *ndev;
 996         u32 ntb_cntl;
 997 
 998         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
 999 
1000         if (ndev->ntb.topo == NTB_TOPO_SEC)
1001                 return -EINVAL;
1002 
1003         dev_dbg(&ntb->pdev->dev, "Disabling link\n");
1004 
1005         /* Bring NTB link down */
1006         ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1007         ntb_cntl &= ~(NTB_CTL_P2S_BAR2_SNOOP | NTB_CTL_S2P_BAR2_SNOOP);
1008         ntb_cntl &= ~(NTB_CTL_P2S_BAR4_SNOOP | NTB_CTL_S2P_BAR4_SNOOP);
1009         if (ndev->bar4_split)
1010                 ntb_cntl &= ~(NTB_CTL_P2S_BAR5_SNOOP | NTB_CTL_S2P_BAR5_SNOOP);
1011         ntb_cntl |= NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK;
1012         iowrite32(ntb_cntl, ndev->self_mmio + ndev->reg->ntb_ctl);
1013 
1014         return 0;
1015 }
1016 
1017 int intel_ntb_peer_mw_count(struct ntb_dev *ntb)
1018 {
1019         /* Numbers of inbound and outbound memory windows match */
1020         return ntb_ndev(ntb)->mw_count;
1021 }
1022 
1023 int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx,
1024                                phys_addr_t *base, resource_size_t *size)
1025 {
1026         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1027         int bar;
1028 
1029         if (idx >= ndev->b2b_idx && !ndev->b2b_off)
1030                 idx += 1;
1031 
1032         bar = ndev_mw_to_bar(ndev, idx);
1033         if (bar < 0)
1034                 return bar;
1035 
1036         if (base)
1037                 *base = pci_resource_start(ndev->ntb.pdev, bar) +
1038                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1039 
1040         if (size)
1041                 *size = pci_resource_len(ndev->ntb.pdev, bar) -
1042                         (idx == ndev->b2b_idx ? ndev->b2b_off : 0);
1043 
1044         return 0;
1045 }
1046 
1047 static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb)
1048 {
1049         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB);
1050 }
1051 
1052 u64 intel_ntb_db_valid_mask(struct ntb_dev *ntb)
1053 {
1054         return ntb_ndev(ntb)->db_valid_mask;
1055 }
1056 
1057 int intel_ntb_db_vector_count(struct ntb_dev *ntb)
1058 {
1059         struct intel_ntb_dev *ndev;
1060 
1061         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1062 
1063         return ndev->db_vec_count;
1064 }
1065 
1066 u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector)
1067 {
1068         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1069 
1070         if (db_vector < 0 || db_vector > ndev->db_vec_count)
1071                 return 0;
1072 
1073         return ndev->db_valid_mask & ndev_vec_mask(ndev, db_vector);
1074 }
1075 
1076 static u64 intel_ntb_db_read(struct ntb_dev *ntb)
1077 {
1078         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1079 
1080         return ndev_db_read(ndev,
1081                             ndev->self_mmio +
1082                             ndev->self_reg->db_bell);
1083 }
1084 
1085 static int intel_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits)
1086 {
1087         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1088 
1089         return ndev_db_write(ndev, db_bits,
1090                              ndev->self_mmio +
1091                              ndev->self_reg->db_bell);
1092 }
1093 
1094 int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits)
1095 {
1096         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1097 
1098         return ndev_db_set_mask(ndev, db_bits,
1099                                 ndev->self_mmio +
1100                                 ndev->self_reg->db_mask);
1101 }
1102 
1103 int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1104 {
1105         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1106 
1107         return ndev_db_clear_mask(ndev, db_bits,
1108                                   ndev->self_mmio +
1109                                   ndev->self_reg->db_mask);
1110 }
1111 
1112 static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1113                            resource_size_t *db_size, u64 *db_data, int db_bit)
1114 {
1115         u64 db_bits;
1116         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1117 
1118         if (unlikely(db_bit >= BITS_PER_LONG_LONG))
1119                 return -EINVAL;
1120 
1121         db_bits = BIT_ULL(db_bit);
1122 
1123         if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
1124                 return -EINVAL;
1125 
1126         ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1127                             ndev->peer_reg->db_bell);
1128 
1129         if (db_data)
1130                 *db_data = db_bits;
1131 
1132 
1133         return 0;
1134 }
1135 
1136 static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
1137 {
1138         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1139 
1140         return ndev_db_write(ndev, db_bits,
1141                              ndev->peer_mmio +
1142                              ndev->peer_reg->db_bell);
1143 }
1144 
1145 int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb)
1146 {
1147         return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_SPAD);
1148 }
1149 
1150 int intel_ntb_spad_count(struct ntb_dev *ntb)
1151 {
1152         struct intel_ntb_dev *ndev;
1153 
1154         ndev = container_of(ntb, struct intel_ntb_dev, ntb);
1155 
1156         return ndev->spad_count;
1157 }
1158 
1159 u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx)
1160 {
1161         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1162 
1163         return ndev_spad_read(ndev, idx,
1164                               ndev->self_mmio +
1165                               ndev->self_reg->spad);
1166 }
1167 
1168 int intel_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
1169 {
1170         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1171 
1172         return ndev_spad_write(ndev, idx, val,
1173                                ndev->self_mmio +
1174                                ndev->self_reg->spad);
1175 }
1176 
1177 int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx,
1178                              phys_addr_t *spad_addr)
1179 {
1180         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1181 
1182         return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr,
1183                               ndev->peer_reg->spad);
1184 }
1185 
1186 u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx)
1187 {
1188         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1189 
1190         return ndev_spad_read(ndev, sidx,
1191                               ndev->peer_mmio +
1192                               ndev->peer_reg->spad);
1193 }
1194 
1195 int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx,
1196                               u32 val)
1197 {
1198         struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1199 
1200         return ndev_spad_write(ndev, sidx, val,
1201                                ndev->peer_mmio +
1202                                ndev->peer_reg->spad);
1203 }
1204 
1205 static u64 xeon_db_ioread(void __iomem *mmio)
1206 {
1207         return (u64)ioread16(mmio);
1208 }
1209 
1210 static void xeon_db_iowrite(u64 bits, void __iomem *mmio)
1211 {
1212         iowrite16((u16)bits, mmio);
1213 }
1214 
1215 static int xeon_poll_link(struct intel_ntb_dev *ndev)
1216 {
1217         u16 reg_val;
1218         int rc;
1219 
1220         ndev->reg->db_iowrite(ndev->db_link_mask,
1221                               ndev->self_mmio +
1222                               ndev->self_reg->db_bell);
1223 
1224         rc = pci_read_config_word(ndev->ntb.pdev,
1225                                   XEON_LINK_STATUS_OFFSET, &reg_val);
1226         if (rc)
1227                 return 0;
1228 
1229         if (reg_val == ndev->lnk_sta)
1230                 return 0;
1231 
1232         ndev->lnk_sta = reg_val;
1233 
1234         return 1;
1235 }
1236 
1237 int xeon_link_is_up(struct intel_ntb_dev *ndev)
1238 {
1239         if (ndev->ntb.topo == NTB_TOPO_SEC)
1240                 return 1;
1241 
1242         return NTB_LNK_STA_ACTIVE(ndev->lnk_sta);
1243 }
1244 
1245 enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd)
1246 {
1247         switch (ppd & XEON_PPD_TOPO_MASK) {
1248         case XEON_PPD_TOPO_B2B_USD:
1249                 return NTB_TOPO_B2B_USD;
1250 
1251         case XEON_PPD_TOPO_B2B_DSD:
1252                 return NTB_TOPO_B2B_DSD;
1253 
1254         case XEON_PPD_TOPO_PRI_USD:
1255         case XEON_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */
1256                 return NTB_TOPO_PRI;
1257 
1258         case XEON_PPD_TOPO_SEC_USD:
1259         case XEON_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */
1260                 return NTB_TOPO_SEC;
1261         }
1262 
1263         return NTB_TOPO_NONE;
1264 }
1265 
1266 static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd)
1267 {
1268         if (ppd & XEON_PPD_SPLIT_BAR_MASK) {
1269                 dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd);
1270                 return 1;
1271         }
1272         return 0;
1273 }
1274 
1275 static int xeon_init_isr(struct intel_ntb_dev *ndev)
1276 {
1277         return ndev_init_isr(ndev, XEON_DB_MSIX_VECTOR_COUNT,
1278                              XEON_DB_MSIX_VECTOR_COUNT,
1279                              XEON_DB_MSIX_VECTOR_SHIFT,
1280                              XEON_DB_TOTAL_SHIFT);
1281 }
1282 
1283 static void xeon_deinit_isr(struct intel_ntb_dev *ndev)
1284 {
1285         ndev_deinit_isr(ndev);
1286 }
1287 
1288 static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev,
1289                              const struct intel_b2b_addr *addr,
1290                              const struct intel_b2b_addr *peer_addr)
1291 {
1292         struct pci_dev *pdev;
1293         void __iomem *mmio;
1294         resource_size_t bar_size;
1295         phys_addr_t bar_addr;
1296         int b2b_bar;
1297         u8 bar_sz;
1298 
1299         pdev = ndev->ntb.pdev;
1300         mmio = ndev->self_mmio;
1301 
1302         if (ndev->b2b_idx == UINT_MAX) {
1303                 dev_dbg(&pdev->dev, "not using b2b mw\n");
1304                 b2b_bar = 0;
1305                 ndev->b2b_off = 0;
1306         } else {
1307                 b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx);
1308                 if (b2b_bar < 0)
1309                         return -EIO;
1310 
1311                 dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar);
1312 
1313                 bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar);
1314 
1315                 dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size);
1316 
1317                 if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) {
1318                         dev_dbg(&pdev->dev, "b2b using first half of bar\n");
1319                         ndev->b2b_off = bar_size >> 1;
1320                 } else if (XEON_B2B_MIN_SIZE <= bar_size) {
1321                         dev_dbg(&pdev->dev, "b2b using whole bar\n");
1322                         ndev->b2b_off = 0;
1323                         --ndev->mw_count;
1324                 } else {
1325                         dev_dbg(&pdev->dev, "b2b bar size is too small\n");
1326                         return -EIO;
1327                 }
1328         }
1329 
1330         /* Reset the secondary bar sizes to match the primary bar sizes,
1331          * except disable or halve the size of the b2b secondary bar.
1332          *
1333          * Note: code for each specific bar size register, because the register
1334          * offsets are not in a consistent order (bar5sz comes after ppd, odd).
1335          */
1336         pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz);
1337         dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz);
1338         if (b2b_bar == 2) {
1339                 if (ndev->b2b_off)
1340                         bar_sz -= 1;
1341                 else
1342                         bar_sz = 0;
1343         }
1344         pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz);
1345         pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz);
1346         dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz);
1347 
1348         if (!ndev->bar4_split) {
1349                 pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz);
1350                 dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz);
1351                 if (b2b_bar == 4) {
1352                         if (ndev->b2b_off)
1353                                 bar_sz -= 1;
1354                         else
1355                                 bar_sz = 0;
1356                 }
1357                 pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz);
1358                 pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz);
1359                 dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz);
1360         } else {
1361                 pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz);
1362                 dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz);
1363                 if (b2b_bar == 4) {
1364                         if (ndev->b2b_off)
1365                                 bar_sz -= 1;
1366                         else
1367                                 bar_sz = 0;
1368                 }
1369                 pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz);
1370                 pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz);
1371                 dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz);
1372 
1373                 pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz);
1374                 dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz);
1375                 if (b2b_bar == 5) {
1376                         if (ndev->b2b_off)
1377                                 bar_sz -= 1;
1378                         else
1379                                 bar_sz = 0;
1380                 }
1381                 pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz);
1382                 pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz);
1383                 dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz);
1384         }
1385 
1386         /* SBAR01 hit by first part of the b2b bar */
1387         if (b2b_bar == 0)
1388                 bar_addr = addr->bar0_addr;
1389         else if (b2b_bar == 2)
1390                 bar_addr = addr->bar2_addr64;
1391         else if (b2b_bar == 4 && !ndev->bar4_split)
1392                 bar_addr = addr->bar4_addr64;
1393         else if (b2b_bar == 4)
1394                 bar_addr = addr->bar4_addr32;
1395         else if (b2b_bar == 5)
1396                 bar_addr = addr->bar5_addr32;
1397         else
1398                 return -EIO;
1399 
1400         dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr);
1401         iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET);
1402 
1403         /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar.
1404          * The b2b bar is either disabled above, or configured half-size, and
1405          * it starts at the PBAR xlat + offset.
1406          */
1407 
1408         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1409         iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET);
1410         bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET);
1411         dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr);
1412 
1413         if (!ndev->bar4_split) {
1414                 bar_addr = addr->bar4_addr64 +
1415                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1416                 iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET);
1417                 bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET);
1418                 dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr);
1419         } else {
1420                 bar_addr = addr->bar4_addr32 +
1421                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1422                 iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET);
1423                 bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET);
1424                 dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr);
1425 
1426                 bar_addr = addr->bar5_addr32 +
1427                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1428                 iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET);
1429                 bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET);
1430                 dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr);
1431         }
1432 
1433         /* setup incoming bar limits == base addrs (zero length windows) */
1434 
1435         bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0);
1436         iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET);
1437         bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET);
1438         dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr);
1439 
1440         if (!ndev->bar4_split) {
1441                 bar_addr = addr->bar4_addr64 +
1442                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1443                 iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET);
1444                 bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET);
1445                 dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr);
1446         } else {
1447                 bar_addr = addr->bar4_addr32 +
1448                         (b2b_bar == 4 ? ndev->b2b_off : 0);
1449                 iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET);
1450                 bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET);
1451                 dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr);
1452 
1453                 bar_addr = addr->bar5_addr32 +
1454                         (b2b_bar == 5 ? ndev->b2b_off : 0);
1455                 iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET);
1456                 bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET);
1457                 dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr);
1458         }
1459 
1460         /* zero incoming translation addrs */
1461         iowrite64(0, mmio + XEON_SBAR23XLAT_OFFSET);
1462 
1463         if (!ndev->bar4_split) {
1464                 iowrite64(0, mmio + XEON_SBAR45XLAT_OFFSET);
1465         } else {
1466                 iowrite32(0, mmio + XEON_SBAR4XLAT_OFFSET);
1467                 iowrite32(0, mmio + XEON_SBAR5XLAT_OFFSET);
1468         }
1469 
1470         /* zero outgoing translation limits (whole bar size windows) */
1471         iowrite64(0, mmio + XEON_PBAR23LMT_OFFSET);
1472         if (!ndev->bar4_split) {
1473                 iowrite64(0, mmio + XEON_PBAR45LMT_OFFSET);
1474         } else {
1475                 iowrite32(0, mmio + XEON_PBAR4LMT_OFFSET);
1476                 iowrite32(0, mmio + XEON_PBAR5LMT_OFFSET);
1477         }
1478 
1479         /* set outgoing translation offsets */
1480         bar_addr = peer_addr->bar2_addr64;
1481         iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET);
1482         bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET);
1483         dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr);
1484 
1485         if (!ndev->bar4_split) {
1486                 bar_addr = peer_addr->bar4_addr64;
1487                 iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET);
1488                 bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET);
1489                 dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr);
1490         } else {
1491                 bar_addr = peer_addr->bar4_addr32;
1492                 iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET);
1493                 bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET);
1494                 dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr);
1495 
1496                 bar_addr = peer_addr->bar5_addr32;
1497                 iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET);
1498                 bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET);
1499                 dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr);
1500         }
1501 
1502         /* set the translation offset for b2b registers */
1503         if (b2b_bar == 0)
1504                 bar_addr = peer_addr->bar0_addr;
1505         else if (b2b_bar == 2)
1506                 bar_addr = peer_addr->bar2_addr64;
1507         else if (b2b_bar == 4 && !ndev->bar4_split)
1508                 bar_addr = peer_addr->bar4_addr64;
1509         else if (b2b_bar == 4)
1510                 bar_addr = peer_addr->bar4_addr32;
1511         else if (b2b_bar == 5)
1512                 bar_addr = peer_addr->bar5_addr32;
1513         else
1514                 return -EIO;
1515 
1516         /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */
1517         dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr);
1518         iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL);
1519         iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU);
1520 
1521         if (b2b_bar) {
1522                 /* map peer ntb mmio config space registers */
1523                 ndev->peer_mmio = pci_iomap(pdev, b2b_bar,
1524                                             XEON_B2B_MIN_SIZE);
1525                 if (!ndev->peer_mmio)
1526                         return -EIO;
1527 
1528                 ndev->peer_addr = pci_resource_start(pdev, b2b_bar);
1529         }
1530 
1531         return 0;
1532 }
1533 
1534 static int xeon_init_ntb(struct intel_ntb_dev *ndev)
1535 {
1536         struct device *dev = &ndev->ntb.pdev->dev;
1537         int rc;
1538         u32 ntb_ctl;
1539 
1540         if (ndev->bar4_split)
1541                 ndev->mw_count = HSX_SPLIT_BAR_MW_COUNT;
1542         else
1543                 ndev->mw_count = XEON_MW_COUNT;
1544 
1545         ndev->spad_count = XEON_SPAD_COUNT;
1546         ndev->db_count = XEON_DB_COUNT;
1547         ndev->db_link_mask = XEON_DB_LINK_BIT;
1548 
1549         switch (ndev->ntb.topo) {
1550         case NTB_TOPO_PRI:
1551                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1552                         dev_err(dev, "NTB Primary config disabled\n");
1553                         return -EINVAL;
1554                 }
1555 
1556                 /* enable link to allow secondary side device to appear */
1557                 ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl);
1558                 ntb_ctl &= ~NTB_CTL_DISABLE;
1559                 iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
1560 
1561                 /* use half the spads for the peer */
1562                 ndev->spad_count >>= 1;
1563                 ndev->self_reg = &xeon_pri_reg;
1564                 ndev->peer_reg = &xeon_sec_reg;
1565                 ndev->xlat_reg = &xeon_sec_xlat;
1566                 break;
1567 
1568         case NTB_TOPO_SEC:
1569                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1570                         dev_err(dev, "NTB Secondary config disabled\n");
1571                         return -EINVAL;
1572                 }
1573                 /* use half the spads for the peer */
1574                 ndev->spad_count >>= 1;
1575                 ndev->self_reg = &xeon_sec_reg;
1576                 ndev->peer_reg = &xeon_pri_reg;
1577                 ndev->xlat_reg = &xeon_pri_xlat;
1578                 break;
1579 
1580         case NTB_TOPO_B2B_USD:
1581         case NTB_TOPO_B2B_DSD:
1582                 ndev->self_reg = &xeon_pri_reg;
1583                 ndev->peer_reg = &xeon_b2b_reg;
1584                 ndev->xlat_reg = &xeon_sec_xlat;
1585 
1586                 if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) {
1587                         ndev->peer_reg = &xeon_pri_reg;
1588 
1589                         if (b2b_mw_idx < 0)
1590                                 ndev->b2b_idx = b2b_mw_idx + ndev->mw_count;
1591                         else
1592                                 ndev->b2b_idx = b2b_mw_idx;
1593 
1594                         if (ndev->b2b_idx >= ndev->mw_count) {
1595                                 dev_dbg(dev,
1596                                         "b2b_mw_idx %d invalid for mw_count %u\n",
1597                                         b2b_mw_idx, ndev->mw_count);
1598                                 return -EINVAL;
1599                         }
1600 
1601                         dev_dbg(dev, "setting up b2b mw idx %d means %d\n",
1602                                 b2b_mw_idx, ndev->b2b_idx);
1603 
1604                 } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) {
1605                         dev_warn(dev, "Reduce doorbell count by 1\n");
1606                         ndev->db_count -= 1;
1607                 }
1608 
1609                 if (ndev->ntb.topo == NTB_TOPO_B2B_USD) {
1610                         rc = xeon_setup_b2b_mw(ndev,
1611                                                &xeon_b2b_dsd_addr,
1612                                                &xeon_b2b_usd_addr);
1613                 } else {
1614                         rc = xeon_setup_b2b_mw(ndev,
1615                                                &xeon_b2b_usd_addr,
1616                                                &xeon_b2b_dsd_addr);
1617                 }
1618                 if (rc)
1619                         return rc;
1620 
1621                 /* Enable Bus Master and Memory Space on the secondary side */
1622                 iowrite16(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER,
1623                           ndev->self_mmio + XEON_SPCICMD_OFFSET);
1624 
1625                 break;
1626 
1627         default:
1628                 return -EINVAL;
1629         }
1630 
1631         ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1;
1632 
1633         ndev->reg->db_iowrite(ndev->db_valid_mask,
1634                               ndev->self_mmio +
1635                               ndev->self_reg->db_mask);
1636 
1637         return 0;
1638 }
1639 
1640 static int xeon_init_dev(struct intel_ntb_dev *ndev)
1641 {
1642         struct pci_dev *pdev;
1643         u8 ppd;
1644         int rc, mem;
1645 
1646         pdev = ndev->ntb.pdev;
1647 
1648         switch (pdev->device) {
1649         /* There is a Xeon hardware errata related to writes to SDOORBELL or
1650          * B2BDOORBELL in conjunction with inbound access to NTB MMIO Space,
1651          * which may hang the system.  To workaround this use the second memory
1652          * window to access the interrupt and scratch pad registers on the
1653          * remote system.
1654          */
1655         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1656         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1657         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1658         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1659         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1660         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1661         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1662         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1663         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1664         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1665         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1666         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1667         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1668         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1669         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1670                 ndev->hwerr_flags |= NTB_HWERR_SDOORBELL_LOCKUP;
1671                 break;
1672         }
1673 
1674         switch (pdev->device) {
1675         /* There is a hardware errata related to accessing any register in
1676          * SB01BASE in the presence of bidirectional traffic crossing the NTB.
1677          */
1678         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1679         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1680         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1681         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1682         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1683         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1684         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1685         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1686         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1687                 ndev->hwerr_flags |= NTB_HWERR_SB01BASE_LOCKUP;
1688                 break;
1689         }
1690 
1691         switch (pdev->device) {
1692         /* HW Errata on bit 14 of b2bdoorbell register.  Writes will not be
1693          * mirrored to the remote system.  Shrink the number of bits by one,
1694          * since bit 14 is the last bit.
1695          */
1696         case PCI_DEVICE_ID_INTEL_NTB_SS_JSF:
1697         case PCI_DEVICE_ID_INTEL_NTB_PS_JSF:
1698         case PCI_DEVICE_ID_INTEL_NTB_B2B_JSF:
1699         case PCI_DEVICE_ID_INTEL_NTB_SS_SNB:
1700         case PCI_DEVICE_ID_INTEL_NTB_PS_SNB:
1701         case PCI_DEVICE_ID_INTEL_NTB_B2B_SNB:
1702         case PCI_DEVICE_ID_INTEL_NTB_SS_IVT:
1703         case PCI_DEVICE_ID_INTEL_NTB_PS_IVT:
1704         case PCI_DEVICE_ID_INTEL_NTB_B2B_IVT:
1705         case PCI_DEVICE_ID_INTEL_NTB_SS_HSX:
1706         case PCI_DEVICE_ID_INTEL_NTB_PS_HSX:
1707         case PCI_DEVICE_ID_INTEL_NTB_B2B_HSX:
1708         case PCI_DEVICE_ID_INTEL_NTB_SS_BDX:
1709         case PCI_DEVICE_ID_INTEL_NTB_PS_BDX:
1710         case PCI_DEVICE_ID_INTEL_NTB_B2B_BDX:
1711                 ndev->hwerr_flags |= NTB_HWERR_B2BDOORBELL_BIT14;
1712                 break;
1713         }
1714 
1715         ndev->reg = &xeon_reg;
1716 
1717         rc = pci_read_config_byte(pdev, XEON_PPD_OFFSET, &ppd);
1718         if (rc)
1719                 return -EIO;
1720 
1721         ndev->ntb.topo = xeon_ppd_topo(ndev, ppd);
1722         dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd,
1723                 ntb_topo_string(ndev->ntb.topo));
1724         if (ndev->ntb.topo == NTB_TOPO_NONE)
1725                 return -EINVAL;
1726 
1727         if (ndev->ntb.topo != NTB_TOPO_SEC) {
1728                 ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd);
1729                 dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n",
1730                         ppd, ndev->bar4_split);
1731         } else {
1732                 /* This is a way for transparent BAR to figure out if we are
1733                  * doing split BAR or not. There is no way for the hw on the
1734                  * transparent side to know and set the PPD.
1735                  */
1736                 mem = pci_select_bars(pdev, IORESOURCE_MEM);
1737                 ndev->bar4_split = hweight32(mem) ==
1738                         HSX_SPLIT_BAR_MW_COUNT + 1;
1739                 dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n",
1740                         mem, ndev->bar4_split);
1741         }
1742 
1743         rc = xeon_init_ntb(ndev);
1744         if (rc)
1745                 return rc;
1746 
1747         return xeon_init_isr(ndev);
1748 }
1749 
1750 static void xeon_deinit_dev(struct intel_ntb_dev *ndev)
1751 {
1752         xeon_deinit_isr(ndev);
1753 }
1754 
1755 static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev)
1756 {
1757         int rc;
1758 
1759         pci_set_drvdata(pdev, ndev);
1760 
1761         rc = pci_enable_device(pdev);
1762         if (rc)
1763                 goto err_pci_enable;
1764 
1765         rc = pci_request_regions(pdev, NTB_NAME);
1766         if (rc)
1767                 goto err_pci_regions;
1768 
1769         pci_set_master(pdev);
1770 
1771         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1772         if (rc) {
1773                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1774                 if (rc)
1775                         goto err_dma_mask;
1776                 dev_warn(&pdev->dev, "Cannot DMA highmem\n");
1777         }
1778 
1779         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1780         if (rc) {
1781                 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1782                 if (rc)
1783                         goto err_dma_mask;
1784                 dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n");
1785         }
1786         rc = dma_coerce_mask_and_coherent(&ndev->ntb.dev,
1787                                           dma_get_mask(&pdev->dev));
1788         if (rc)
1789                 goto err_dma_mask;
1790 
1791         ndev->self_mmio = pci_iomap(pdev, 0, 0);
1792         if (!ndev->self_mmio) {
1793                 rc = -EIO;
1794                 goto err_mmio;
1795         }
1796         ndev->peer_mmio = ndev->self_mmio;
1797         ndev->peer_addr = pci_resource_start(pdev, 0);
1798 
1799         return 0;
1800 
1801 err_mmio:
1802 err_dma_mask:
1803         pci_clear_master(pdev);
1804         pci_release_regions(pdev);
1805 err_pci_regions:
1806         pci_disable_device(pdev);
1807 err_pci_enable:
1808         pci_set_drvdata(pdev, NULL);
1809         return rc;
1810 }
1811 
1812 static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev)
1813 {
1814         struct pci_dev *pdev = ndev->ntb.pdev;
1815 
1816         if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio)
1817                 pci_iounmap(pdev, ndev->peer_mmio);
1818         pci_iounmap(pdev, ndev->self_mmio);
1819 
1820         pci_clear_master(pdev);
1821         pci_release_regions(pdev);
1822         pci_disable_device(pdev);
1823         pci_set_drvdata(pdev, NULL);
1824 }
1825 
1826 static inline void ndev_init_struct(struct intel_ntb_dev *ndev,
1827                                     struct pci_dev *pdev)
1828 {
1829         ndev->ntb.pdev = pdev;
1830         ndev->ntb.topo = NTB_TOPO_NONE;
1831         ndev->ntb.ops = &intel_ntb_ops;
1832 
1833         ndev->b2b_off = 0;
1834         ndev->b2b_idx = UINT_MAX;
1835 
1836         ndev->bar4_split = 0;
1837 
1838         ndev->mw_count = 0;
1839         ndev->spad_count = 0;
1840         ndev->db_count = 0;
1841         ndev->db_vec_count = 0;
1842         ndev->db_vec_shift = 0;
1843 
1844         ndev->ntb_ctl = 0;
1845         ndev->lnk_sta = 0;
1846 
1847         ndev->db_valid_mask = 0;
1848         ndev->db_link_mask = 0;
1849         ndev->db_mask = 0;
1850 
1851         spin_lock_init(&ndev->db_mask_lock);
1852 }
1853 
1854 static int intel_ntb_pci_probe(struct pci_dev *pdev,
1855                                const struct pci_device_id *id)
1856 {
1857         struct intel_ntb_dev *ndev;
1858         int rc, node;
1859 
1860         node = dev_to_node(&pdev->dev);
1861 
1862         if (pdev_is_gen1(pdev)) {
1863                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1864                 if (!ndev) {
1865                         rc = -ENOMEM;
1866                         goto err_ndev;
1867                 }
1868 
1869                 ndev_init_struct(ndev, pdev);
1870 
1871                 rc = intel_ntb_init_pci(ndev, pdev);
1872                 if (rc)
1873                         goto err_init_pci;
1874 
1875                 rc = xeon_init_dev(ndev);
1876                 if (rc)
1877                         goto err_init_dev;
1878 
1879         } else if (pdev_is_gen3(pdev)) {
1880                 ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node);
1881                 if (!ndev) {
1882                         rc = -ENOMEM;
1883                         goto err_ndev;
1884                 }
1885 
1886                 ndev_init_struct(ndev, pdev);
1887                 ndev->ntb.ops = &intel_ntb3_ops;
1888 
1889                 rc = intel_ntb_init_pci(ndev, pdev);
1890                 if (rc)
1891                         goto err_init_pci;
1892 
1893                 rc = gen3_init_dev(ndev);
1894                 if (rc)
1895                         goto err_init_dev;
1896 
1897         } else {
1898                 rc = -EINVAL;
1899                 goto err_ndev;
1900         }
1901 
1902         ndev_reset_unsafe_flags(ndev);
1903 
1904         ndev->reg->poll_link(ndev);
1905 
1906         ndev_init_debugfs(ndev);
1907 
1908         rc = ntb_register_device(&ndev->ntb);
1909         if (rc)
1910                 goto err_register;
1911 
1912         dev_info(&pdev->dev, "NTB device registered.\n");
1913 
1914         return 0;
1915 
1916 err_register:
1917         ndev_deinit_debugfs(ndev);
1918         if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
1919                 xeon_deinit_dev(ndev);
1920 err_init_dev:
1921         intel_ntb_deinit_pci(ndev);
1922 err_init_pci:
1923         kfree(ndev);
1924 err_ndev:
1925         return rc;
1926 }
1927 
1928 static void intel_ntb_pci_remove(struct pci_dev *pdev)
1929 {
1930         struct intel_ntb_dev *ndev = pci_get_drvdata(pdev);
1931 
1932         ntb_unregister_device(&ndev->ntb);
1933         ndev_deinit_debugfs(ndev);
1934         if (pdev_is_gen1(pdev) || pdev_is_gen3(pdev))
1935                 xeon_deinit_dev(ndev);
1936         intel_ntb_deinit_pci(ndev);
1937         kfree(ndev);
1938 }
1939 
1940 static const struct intel_ntb_reg xeon_reg = {
1941         .poll_link              = xeon_poll_link,
1942         .link_is_up             = xeon_link_is_up,
1943         .db_ioread              = xeon_db_ioread,
1944         .db_iowrite             = xeon_db_iowrite,
1945         .db_size                = sizeof(u32),
1946         .ntb_ctl                = XEON_NTBCNTL_OFFSET,
1947         .mw_bar                 = {2, 4, 5},
1948 };
1949 
1950 static const struct intel_ntb_alt_reg xeon_pri_reg = {
1951         .db_bell                = XEON_PDOORBELL_OFFSET,
1952         .db_mask                = XEON_PDBMSK_OFFSET,
1953         .spad                   = XEON_SPAD_OFFSET,
1954 };
1955 
1956 static const struct intel_ntb_alt_reg xeon_sec_reg = {
1957         .db_bell                = XEON_SDOORBELL_OFFSET,
1958         .db_mask                = XEON_SDBMSK_OFFSET,
1959         /* second half of the scratchpads */
1960         .spad                   = XEON_SPAD_OFFSET + (XEON_SPAD_COUNT << 1),
1961 };
1962 
1963 static const struct intel_ntb_alt_reg xeon_b2b_reg = {
1964         .db_bell                = XEON_B2B_DOORBELL_OFFSET,
1965         .spad                   = XEON_B2B_SPAD_OFFSET,
1966 };
1967 
1968 static const struct intel_ntb_xlat_reg xeon_pri_xlat = {
1969         /* Note: no primary .bar0_base visible to the secondary side.
1970          *
1971          * The secondary side cannot get the base address stored in primary
1972          * bars.  The base address is necessary to set the limit register to
1973          * any value other than zero, or unlimited.
1974          *
1975          * WITHOUT THE BASE ADDRESS, THE SECONDARY SIDE CANNOT DISABLE the
1976          * window by setting the limit equal to base, nor can it limit the size
1977          * of the memory window by setting the limit to base + size.
1978          */
1979         .bar2_limit             = XEON_PBAR23LMT_OFFSET,
1980         .bar2_xlat              = XEON_PBAR23XLAT_OFFSET,
1981 };
1982 
1983 static const struct intel_ntb_xlat_reg xeon_sec_xlat = {
1984         .bar0_base              = XEON_SBAR0BASE_OFFSET,
1985         .bar2_limit             = XEON_SBAR23LMT_OFFSET,
1986         .bar2_xlat              = XEON_SBAR23XLAT_OFFSET,
1987 };
1988 
1989 struct intel_b2b_addr xeon_b2b_usd_addr = {
1990         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
1991         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
1992         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
1993         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
1994 };
1995 
1996 struct intel_b2b_addr xeon_b2b_dsd_addr = {
1997         .bar2_addr64            = XEON_B2B_BAR2_ADDR64,
1998         .bar4_addr64            = XEON_B2B_BAR4_ADDR64,
1999         .bar4_addr32            = XEON_B2B_BAR4_ADDR32,
2000         .bar5_addr32            = XEON_B2B_BAR5_ADDR32,
2001 };
2002 
2003 /* operations for primary side of local ntb */
2004 static const struct ntb_dev_ops intel_ntb_ops = {
2005         .mw_count               = intel_ntb_mw_count,
2006         .mw_get_align           = intel_ntb_mw_get_align,
2007         .mw_set_trans           = intel_ntb_mw_set_trans,
2008         .peer_mw_count          = intel_ntb_peer_mw_count,
2009         .peer_mw_get_addr       = intel_ntb_peer_mw_get_addr,
2010         .link_is_up             = intel_ntb_link_is_up,
2011         .link_enable            = intel_ntb_link_enable,
2012         .link_disable           = intel_ntb_link_disable,
2013         .db_is_unsafe           = intel_ntb_db_is_unsafe,
2014         .db_valid_mask          = intel_ntb_db_valid_mask,
2015         .db_vector_count        = intel_ntb_db_vector_count,
2016         .db_vector_mask         = intel_ntb_db_vector_mask,
2017         .db_read                = intel_ntb_db_read,
2018         .db_clear               = intel_ntb_db_clear,
2019         .db_set_mask            = intel_ntb_db_set_mask,
2020         .db_clear_mask          = intel_ntb_db_clear_mask,
2021         .peer_db_addr           = intel_ntb_peer_db_addr,
2022         .peer_db_set            = intel_ntb_peer_db_set,
2023         .spad_is_unsafe         = intel_ntb_spad_is_unsafe,
2024         .spad_count             = intel_ntb_spad_count,
2025         .spad_read              = intel_ntb_spad_read,
2026         .spad_write             = intel_ntb_spad_write,
2027         .peer_spad_addr         = intel_ntb_peer_spad_addr,
2028         .peer_spad_read         = intel_ntb_peer_spad_read,
2029         .peer_spad_write        = intel_ntb_peer_spad_write,
2030 };
2031 
2032 static const struct file_operations intel_ntb_debugfs_info = {
2033         .owner = THIS_MODULE,
2034         .open = simple_open,
2035         .read = ndev_debugfs_read,
2036 };
2037 
2038 static const struct pci_device_id intel_ntb_pci_tbl[] = {
2039         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_JSF)},
2040         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SNB)},
2041         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_IVT)},
2042         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_HSX)},
2043         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_BDX)},
2044         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_JSF)},
2045         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_SNB)},
2046         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_IVT)},
2047         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_HSX)},
2048         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_PS_BDX)},
2049         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_JSF)},
2050         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_SNB)},
2051         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_IVT)},
2052         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_HSX)},
2053         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_SS_BDX)},
2054         {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_NTB_B2B_SKX)},
2055         {0}
2056 };
2057 MODULE_DEVICE_TABLE(pci, intel_ntb_pci_tbl);
2058 
2059 static struct pci_driver intel_ntb_pci_driver = {
2060         .name = KBUILD_MODNAME,
2061         .id_table = intel_ntb_pci_tbl,
2062         .probe = intel_ntb_pci_probe,
2063         .remove = intel_ntb_pci_remove,
2064 };
2065 
2066 static int __init intel_ntb_pci_driver_init(void)
2067 {
2068         pr_info("%s %s\n", NTB_DESC, NTB_VER);
2069 
2070         if (debugfs_initialized())
2071                 debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
2072 
2073         return pci_register_driver(&intel_ntb_pci_driver);
2074 }
2075 module_init(intel_ntb_pci_driver_init);
2076 
2077 static void __exit intel_ntb_pci_driver_exit(void)
2078 {
2079         pci_unregister_driver(&intel_ntb_pci_driver);
2080 
2081         debugfs_remove_recursive(debugfs_dir);
2082 }
2083 module_exit(intel_ntb_pci_driver_exit);

/* [<][>][^][v][top][bottom][index][help] */