root/drivers/ntb/test/ntb_perf.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. perf_link_is_up
  2. perf_spad_cmd_send
  3. perf_spad_cmd_recv
  4. perf_msg_cmd_send
  5. perf_msg_cmd_recv
  6. perf_cmd_send
  7. perf_cmd_exec
  8. perf_cmd_recv
  9. perf_link_event
  10. perf_db_event
  11. perf_msg_event
  12. perf_free_outbuf
  13. perf_setup_outbuf
  14. perf_free_inbuf
  15. perf_setup_inbuf
  16. perf_service_work
  17. perf_init_service
  18. perf_enable_service
  19. perf_disable_service
  20. perf_dma_copy_callback
  21. perf_copy_chunk
  22. perf_dma_filter
  23. perf_init_test
  24. perf_run_test
  25. perf_sync_test
  26. perf_clear_test
  27. perf_thread_work
  28. perf_set_tcnt
  29. perf_terminate_test
  30. perf_submit_test
  31. perf_read_stats
  32. perf_init_threads
  33. perf_clear_threads
  34. perf_dbgfs_read_info
  35. perf_dbgfs_read_run
  36. perf_dbgfs_write_run
  37. perf_dbgfs_read_tcnt
  38. perf_dbgfs_write_tcnt
  39. perf_setup_dbgfs
  40. perf_clear_dbgfs
  41. perf_create_data
  42. perf_setup_peer_mw
  43. perf_init_peers
  44. perf_probe
  45. perf_remove
  46. perf_init
  47. perf_exit

   1 /*
   2  * This file is provided under a dual BSD/GPLv2 license.  When using or
   3  *   redistributing this file, you may do so under either license.
   4  *
   5  *   GPL LICENSE SUMMARY
   6  *
   7  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
   8  *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
   9  *
  10  *   This program is free software; you can redistribute it and/or modify
  11  *   it under the terms of version 2 of the GNU General Public License as
  12  *   published by the Free Software Foundation.
  13  *
  14  *   BSD LICENSE
  15  *
  16  *   Copyright(c) 2015 Intel Corporation. All rights reserved.
  17  *   Copyright(c) 2017 T-Platforms. All Rights Reserved.
  18  *
  19  *   Redistribution and use in source and binary forms, with or without
  20  *   modification, are permitted provided that the following conditions
  21  *   are met:
  22  *
  23  *     * Redistributions of source code must retain the above copyright
  24  *       notice, this list of conditions and the following disclaimer.
  25  *     * Redistributions in binary form must reproduce the above copy
  26  *       notice, this list of conditions and the following disclaimer in
  27  *       the documentation and/or other materials provided with the
  28  *       distribution.
  29  *     * Neither the name of Intel Corporation nor the names of its
  30  *       contributors may be used to endorse or promote products derived
  31  *       from this software without specific prior written permission.
  32  *
  33  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  34  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  35  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  36  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  37  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  38  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  39  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  40  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  41  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  42  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  43  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  44  *
  45  * PCIe NTB Perf Linux driver
  46  */
  47 
  48 /*
  49  * How to use this tool, by example.
  50  *
  51  * Assuming $DBG_DIR is something like:
  52  * '/sys/kernel/debug/ntb_perf/0000:00:03.0'
  53  * Suppose aside from local device there is at least one remote device
  54  * connected to NTB with index 0.
  55  *-----------------------------------------------------------------------------
  56  * Eg: install driver with specified chunk/total orders and dma-enabled flag
  57  *
  58  * root@self# insmod ntb_perf.ko chunk_order=19 total_order=28 use_dma
  59  *-----------------------------------------------------------------------------
  60  * Eg: check NTB ports (index) and MW mapping information
  61  *
  62  * root@self# cat $DBG_DIR/info
  63  *-----------------------------------------------------------------------------
  64  * Eg: start performance test with peer (index 0) and get the test metrics
  65  *
  66  * root@self# echo 0 > $DBG_DIR/run
  67  * root@self# cat $DBG_DIR/run
  68  */
  69 
  70 #include <linux/init.h>
  71 #include <linux/kernel.h>
  72 #include <linux/module.h>
  73 #include <linux/sched.h>
  74 #include <linux/wait.h>
  75 #include <linux/dma-mapping.h>
  76 #include <linux/dmaengine.h>
  77 #include <linux/pci.h>
  78 #include <linux/ktime.h>
  79 #include <linux/slab.h>
  80 #include <linux/delay.h>
  81 #include <linux/sizes.h>
  82 #include <linux/workqueue.h>
  83 #include <linux/debugfs.h>
  84 #include <linux/random.h>
  85 #include <linux/ntb.h>
  86 
  87 #define DRIVER_NAME             "ntb_perf"
  88 #define DRIVER_VERSION          "2.0"
  89 
  90 MODULE_LICENSE("Dual BSD/GPL");
  91 MODULE_VERSION(DRIVER_VERSION);
  92 MODULE_AUTHOR("Dave Jiang <dave.jiang@intel.com>");
  93 MODULE_DESCRIPTION("PCIe NTB Performance Measurement Tool");
  94 
  95 #define MAX_THREADS_CNT         32
  96 #define DEF_THREADS_CNT         1
  97 #define MAX_CHUNK_SIZE          SZ_1M
  98 #define MAX_CHUNK_ORDER         20 /* no larger than 1M */
  99 
 100 #define DMA_TRIES               100
 101 #define DMA_MDELAY              10
 102 
 103 #define MSG_TRIES               1000
 104 #define MSG_UDELAY_LOW          1000
 105 #define MSG_UDELAY_HIGH         2000
 106 
 107 #define PERF_BUF_LEN 1024
 108 
 109 static unsigned long max_mw_size;
 110 module_param(max_mw_size, ulong, 0644);
 111 MODULE_PARM_DESC(max_mw_size, "Upper limit of memory window size");
 112 
 113 static unsigned char chunk_order = 19; /* 512K */
 114 module_param(chunk_order, byte, 0644);
 115 MODULE_PARM_DESC(chunk_order, "Data chunk order [2^n] to transfer");
 116 
 117 static unsigned char total_order = 30; /* 1G */
 118 module_param(total_order, byte, 0644);
 119 MODULE_PARM_DESC(total_order, "Total data order [2^n] to transfer");
 120 
 121 static bool use_dma; /* default to 0 */
 122 module_param(use_dma, bool, 0644);
 123 MODULE_PARM_DESC(use_dma, "Use DMA engine to measure performance");
 124 
 125 /*==============================================================================
 126  *                         Perf driver data definition
 127  *==============================================================================
 128  */
 129 
 130 enum perf_cmd {
 131         PERF_CMD_INVAL = -1,/* invalid spad command */
 132         PERF_CMD_SSIZE = 0, /* send out buffer size */
 133         PERF_CMD_RSIZE = 1, /* recv in  buffer size */
 134         PERF_CMD_SXLAT = 2, /* send in  buffer xlat */
 135         PERF_CMD_RXLAT = 3, /* recv out buffer xlat */
 136         PERF_CMD_CLEAR = 4, /* clear allocated memory */
 137         PERF_STS_DONE  = 5, /* init is done */
 138         PERF_STS_LNKUP = 6, /* link up state flag */
 139 };
 140 
 141 struct perf_ctx;
 142 
 143 struct perf_peer {
 144         struct perf_ctx *perf;
 145         int pidx;
 146         int gidx;
 147 
 148         /* Outbound MW params */
 149         u64 outbuf_xlat;
 150         resource_size_t outbuf_size;
 151         void __iomem *outbuf;
 152 
 153         /* Inbound MW params */
 154         dma_addr_t inbuf_xlat;
 155         resource_size_t inbuf_size;
 156         void            *inbuf;
 157 
 158         /* NTB connection setup service */
 159         struct work_struct      service;
 160         unsigned long           sts;
 161 };
 162 #define to_peer_service(__work) \
 163         container_of(__work, struct perf_peer, service)
 164 
 165 struct perf_thread {
 166         struct perf_ctx *perf;
 167         int tidx;
 168 
 169         /* DMA-based test sync parameters */
 170         atomic_t dma_sync;
 171         wait_queue_head_t dma_wait;
 172         struct dma_chan *dma_chan;
 173 
 174         /* Data source and measured statistics */
 175         void *src;
 176         u64 copied;
 177         ktime_t duration;
 178         int status;
 179         struct work_struct work;
 180 };
 181 #define to_thread_work(__work) \
 182         container_of(__work, struct perf_thread, work)
 183 
 184 struct perf_ctx {
 185         struct ntb_dev *ntb;
 186 
 187         /* Global device index and peers descriptors */
 188         int gidx;
 189         int pcnt;
 190         struct perf_peer *peers;
 191 
 192         /* Performance measuring work-threads interface */
 193         unsigned long busy_flag;
 194         wait_queue_head_t twait;
 195         atomic_t tsync;
 196         u8 tcnt;
 197         struct perf_peer *test_peer;
 198         struct perf_thread threads[MAX_THREADS_CNT];
 199 
 200         /* Scratchpad/Message IO operations */
 201         int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
 202         int (*cmd_recv)(struct perf_ctx *perf, int *pidx, enum perf_cmd *cmd,
 203                         u64 *data);
 204 
 205         struct dentry *dbgfs_dir;
 206 };
 207 
 208 /*
 209  * Scratchpads-base commands interface
 210  */
 211 #define PERF_SPAD_CNT(_pcnt) \
 212         (3*((_pcnt) + 1))
 213 #define PERF_SPAD_CMD(_gidx) \
 214         (3*(_gidx))
 215 #define PERF_SPAD_LDATA(_gidx) \
 216         (3*(_gidx) + 1)
 217 #define PERF_SPAD_HDATA(_gidx) \
 218         (3*(_gidx) + 2)
 219 #define PERF_SPAD_NOTIFY(_gidx) \
 220         (BIT_ULL(_gidx))
 221 
 222 /*
 223  * Messages-base commands interface
 224  */
 225 #define PERF_MSG_CNT            3
 226 #define PERF_MSG_CMD            0
 227 #define PERF_MSG_LDATA          1
 228 #define PERF_MSG_HDATA          2
 229 
 230 /*==============================================================================
 231  *                           Static data declarations
 232  *==============================================================================
 233  */
 234 
 235 static struct dentry *perf_dbgfs_topdir;
 236 
 237 static struct workqueue_struct *perf_wq __read_mostly;
 238 
 239 /*==============================================================================
 240  *                  NTB cross-link commands execution service
 241  *==============================================================================
 242  */
 243 
 244 static void perf_terminate_test(struct perf_ctx *perf);
 245 
 246 static inline bool perf_link_is_up(struct perf_peer *peer)
 247 {
 248         u64 link;
 249 
 250         link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
 251         return !!(link & BIT_ULL_MASK(peer->pidx));
 252 }
 253 
 254 static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
 255                               u64 data)
 256 {
 257         struct perf_ctx *perf = peer->perf;
 258         int try;
 259         u32 sts;
 260 
 261         dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
 262 
 263         /*
 264          * Perform predefined number of attempts before give up.
 265          * We are sending the data to the port specific scratchpad, so
 266          * to prevent a multi-port access race-condition. Additionally
 267          * there is no need in local locking since only thread-safe
 268          * service work is using this method.
 269          */
 270         for (try = 0; try < MSG_TRIES; try++) {
 271                 if (!perf_link_is_up(peer))
 272                         return -ENOLINK;
 273 
 274                 sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
 275                                          PERF_SPAD_CMD(perf->gidx));
 276                 if (sts != PERF_CMD_INVAL) {
 277                         usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
 278                         continue;
 279                 }
 280 
 281                 ntb_peer_spad_write(perf->ntb, peer->pidx,
 282                                     PERF_SPAD_LDATA(perf->gidx),
 283                                     lower_32_bits(data));
 284                 ntb_peer_spad_write(perf->ntb, peer->pidx,
 285                                     PERF_SPAD_HDATA(perf->gidx),
 286                                     upper_32_bits(data));
 287                 ntb_peer_spad_write(perf->ntb, peer->pidx,
 288                                     PERF_SPAD_CMD(perf->gidx),
 289                                     cmd);
 290                 ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
 291 
 292                 dev_dbg(&perf->ntb->dev, "DB ring peer %#llx\n",
 293                         PERF_SPAD_NOTIFY(peer->gidx));
 294 
 295                 break;
 296         }
 297 
 298         return try < MSG_TRIES ? 0 : -EAGAIN;
 299 }
 300 
 301 static int perf_spad_cmd_recv(struct perf_ctx *perf, int *pidx,
 302                               enum perf_cmd *cmd, u64 *data)
 303 {
 304         struct perf_peer *peer;
 305         u32 val;
 306 
 307         ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 308 
 309         /*
 310          * We start scanning all over, since cleared DB may have been set
 311          * by any peer. Yes, it makes peer with smaller index being
 312          * serviced with greater priority, but it's convenient for spad
 313          * and message code unification and simplicity.
 314          */
 315         for (*pidx = 0; *pidx < perf->pcnt; (*pidx)++) {
 316                 peer = &perf->peers[*pidx];
 317 
 318                 if (!perf_link_is_up(peer))
 319                         continue;
 320 
 321                 val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
 322                 if (val == PERF_CMD_INVAL)
 323                         continue;
 324 
 325                 *cmd = val;
 326 
 327                 val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
 328                 *data = val;
 329 
 330                 val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
 331                 *data |= (u64)val << 32;
 332 
 333                 /* Next command can be retrieved from now */
 334                 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
 335                                PERF_CMD_INVAL);
 336 
 337                 dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
 338 
 339                 return 0;
 340         }
 341 
 342         return -ENODATA;
 343 }
 344 
 345 static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
 346                              u64 data)
 347 {
 348         struct perf_ctx *perf = peer->perf;
 349         int try, ret;
 350         u64 outbits;
 351 
 352         dev_dbg(&perf->ntb->dev, "CMD send: %d 0x%llx\n", cmd, data);
 353 
 354         /*
 355          * Perform predefined number of attempts before give up. Message
 356          * registers are free of race-condition problem when accessed
 357          * from different ports, so we don't need splitting registers
 358          * by global device index. We also won't have local locking,
 359          * since the method is used from service work only.
 360          */
 361         outbits = ntb_msg_outbits(perf->ntb);
 362         for (try = 0; try < MSG_TRIES; try++) {
 363                 if (!perf_link_is_up(peer))
 364                         return -ENOLINK;
 365 
 366                 ret = ntb_msg_clear_sts(perf->ntb, outbits);
 367                 if (ret)
 368                         return ret;
 369 
 370                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
 371                                    lower_32_bits(data));
 372 
 373                 if (ntb_msg_read_sts(perf->ntb) & outbits) {
 374                         usleep_range(MSG_UDELAY_LOW, MSG_UDELAY_HIGH);
 375                         continue;
 376                 }
 377 
 378                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
 379                                    upper_32_bits(data));
 380 
 381                 /* This call shall trigger peer message event */
 382                 ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
 383 
 384                 break;
 385         }
 386 
 387         return try < MSG_TRIES ? 0 : -EAGAIN;
 388 }
 389 
 390 static int perf_msg_cmd_recv(struct perf_ctx *perf, int *pidx,
 391                              enum perf_cmd *cmd, u64 *data)
 392 {
 393         u64 inbits;
 394         u32 val;
 395 
 396         inbits = ntb_msg_inbits(perf->ntb);
 397 
 398         if (hweight64(ntb_msg_read_sts(perf->ntb) & inbits) < 3)
 399                 return -ENODATA;
 400 
 401         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_CMD);
 402         *cmd = val;
 403 
 404         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_LDATA);
 405         *data = val;
 406 
 407         val = ntb_msg_read(perf->ntb, pidx, PERF_MSG_HDATA);
 408         *data |= (u64)val << 32;
 409 
 410         /* Next command can be retrieved from now */
 411         ntb_msg_clear_sts(perf->ntb, inbits);
 412 
 413         dev_dbg(&perf->ntb->dev, "CMD recv: %d 0x%llx\n", *cmd, *data);
 414 
 415         return 0;
 416 }
 417 
 418 static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
 419 {
 420         struct perf_ctx *perf = peer->perf;
 421 
 422         if (cmd == PERF_CMD_SSIZE || cmd == PERF_CMD_SXLAT)
 423                 return perf->cmd_send(peer, cmd, data);
 424 
 425         dev_err(&perf->ntb->dev, "Send invalid command\n");
 426         return -EINVAL;
 427 }
 428 
 429 static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
 430 {
 431         switch (cmd) {
 432         case PERF_CMD_SSIZE:
 433         case PERF_CMD_RSIZE:
 434         case PERF_CMD_SXLAT:
 435         case PERF_CMD_RXLAT:
 436         case PERF_CMD_CLEAR:
 437                 break;
 438         default:
 439                 dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
 440                 return -EINVAL;
 441         }
 442 
 443         /* No need of memory barrier, since bit ops have invernal lock */
 444         set_bit(cmd, &peer->sts);
 445 
 446         dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
 447 
 448         (void)queue_work(system_highpri_wq, &peer->service);
 449 
 450         return 0;
 451 }
 452 
 453 static int perf_cmd_recv(struct perf_ctx *perf)
 454 {
 455         struct perf_peer *peer;
 456         int ret, pidx, cmd;
 457         u64 data;
 458 
 459         while (!(ret = perf->cmd_recv(perf, &pidx, &cmd, &data))) {
 460                 peer = &perf->peers[pidx];
 461 
 462                 switch (cmd) {
 463                 case PERF_CMD_SSIZE:
 464                         peer->inbuf_size = data;
 465                         return perf_cmd_exec(peer, PERF_CMD_RSIZE);
 466                 case PERF_CMD_SXLAT:
 467                         peer->outbuf_xlat = data;
 468                         return perf_cmd_exec(peer, PERF_CMD_RXLAT);
 469                 default:
 470                         dev_err(&perf->ntb->dev, "Recv invalid command\n");
 471                         return -EINVAL;
 472                 }
 473         }
 474 
 475         /* Return 0 if no data left to process, otherwise an error */
 476         return ret == -ENODATA ? 0 : ret;
 477 }
 478 
 479 static void perf_link_event(void *ctx)
 480 {
 481         struct perf_ctx *perf = ctx;
 482         struct perf_peer *peer;
 483         bool lnk_up;
 484         int pidx;
 485 
 486         for (pidx = 0; pidx < perf->pcnt; pidx++) {
 487                 peer = &perf->peers[pidx];
 488 
 489                 lnk_up = perf_link_is_up(peer);
 490 
 491                 if (lnk_up &&
 492                     !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
 493                         perf_cmd_exec(peer, PERF_CMD_SSIZE);
 494                 } else if (!lnk_up &&
 495                            test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
 496                         perf_cmd_exec(peer, PERF_CMD_CLEAR);
 497                 }
 498         }
 499 }
 500 
 501 static void perf_db_event(void *ctx, int vec)
 502 {
 503         struct perf_ctx *perf = ctx;
 504 
 505         dev_dbg(&perf->ntb->dev, "DB vec %d mask %#llx bits %#llx\n", vec,
 506                 ntb_db_vector_mask(perf->ntb, vec), ntb_db_read(perf->ntb));
 507 
 508         /* Just receive all available commands */
 509         (void)perf_cmd_recv(perf);
 510 }
 511 
 512 static void perf_msg_event(void *ctx)
 513 {
 514         struct perf_ctx *perf = ctx;
 515 
 516         dev_dbg(&perf->ntb->dev, "Msg status bits %#llx\n",
 517                 ntb_msg_read_sts(perf->ntb));
 518 
 519         /* Messages are only sent one-by-one */
 520         (void)perf_cmd_recv(perf);
 521 }
 522 
 523 static const struct ntb_ctx_ops perf_ops = {
 524         .link_event = perf_link_event,
 525         .db_event = perf_db_event,
 526         .msg_event = perf_msg_event
 527 };
 528 
 529 static void perf_free_outbuf(struct perf_peer *peer)
 530 {
 531         (void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
 532 }
 533 
 534 static int perf_setup_outbuf(struct perf_peer *peer)
 535 {
 536         struct perf_ctx *perf = peer->perf;
 537         int ret;
 538 
 539         /* Outbuf size can be unaligned due to custom max_mw_size */
 540         ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
 541                                     peer->outbuf_xlat, peer->outbuf_size);
 542         if (ret) {
 543                 dev_err(&perf->ntb->dev, "Failed to set outbuf translation\n");
 544                 return ret;
 545         }
 546 
 547         /* Initialization is finally done */
 548         set_bit(PERF_STS_DONE, &peer->sts);
 549 
 550         return 0;
 551 }
 552 
 553 static void perf_free_inbuf(struct perf_peer *peer)
 554 {
 555         if (!peer->inbuf)
 556                 return;
 557 
 558         (void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
 559         dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
 560                           peer->inbuf, peer->inbuf_xlat);
 561         peer->inbuf = NULL;
 562 }
 563 
 564 static int perf_setup_inbuf(struct perf_peer *peer)
 565 {
 566         resource_size_t xlat_align, size_align, size_max;
 567         struct perf_ctx *perf = peer->perf;
 568         int ret;
 569 
 570         /* Get inbound MW parameters */
 571         ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
 572                                &xlat_align, &size_align, &size_max);
 573         if (ret) {
 574                 dev_err(&perf->ntb->dev, "Couldn't get inbuf restrictions\n");
 575                 return ret;
 576         }
 577 
 578         if (peer->inbuf_size > size_max) {
 579                 dev_err(&perf->ntb->dev, "Too big inbuf size %pa > %pa\n",
 580                         &peer->inbuf_size, &size_max);
 581                 return -EINVAL;
 582         }
 583 
 584         peer->inbuf_size = round_up(peer->inbuf_size, size_align);
 585 
 586         perf_free_inbuf(peer);
 587 
 588         peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
 589                                          &peer->inbuf_xlat, GFP_KERNEL);
 590         if (!peer->inbuf) {
 591                 dev_err(&perf->ntb->dev, "Failed to alloc inbuf of %pa\n",
 592                         &peer->inbuf_size);
 593                 return -ENOMEM;
 594         }
 595         if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
 596                 dev_err(&perf->ntb->dev, "Unaligned inbuf allocated\n");
 597                 goto err_free_inbuf;
 598         }
 599 
 600         ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
 601                                peer->inbuf_xlat, peer->inbuf_size);
 602         if (ret) {
 603                 dev_err(&perf->ntb->dev, "Failed to set inbuf translation\n");
 604                 goto err_free_inbuf;
 605         }
 606 
 607         /*
 608          * We submit inbuf xlat transmission cmd for execution here to follow
 609          * the code architecture, even though this method is called from service
 610          * work itself so the command will be executed right after it returns.
 611          */
 612         (void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
 613 
 614         return 0;
 615 
 616 err_free_inbuf:
 617         perf_free_inbuf(peer);
 618 
 619         return ret;
 620 }
 621 
 622 static void perf_service_work(struct work_struct *work)
 623 {
 624         struct perf_peer *peer = to_peer_service(work);
 625 
 626         if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
 627                 perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
 628 
 629         if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
 630                 perf_setup_inbuf(peer);
 631 
 632         if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
 633                 perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
 634 
 635         if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
 636                 perf_setup_outbuf(peer);
 637 
 638         if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
 639                 clear_bit(PERF_STS_DONE, &peer->sts);
 640                 if (test_bit(0, &peer->perf->busy_flag) &&
 641                     peer == peer->perf->test_peer) {
 642                         dev_warn(&peer->perf->ntb->dev,
 643                                 "Freeing while test on-fly\n");
 644                         perf_terminate_test(peer->perf);
 645                 }
 646                 perf_free_outbuf(peer);
 647                 perf_free_inbuf(peer);
 648         }
 649 }
 650 
 651 static int perf_init_service(struct perf_ctx *perf)
 652 {
 653         u64 mask;
 654 
 655         if (ntb_peer_mw_count(perf->ntb) < perf->pcnt + 1) {
 656                 dev_err(&perf->ntb->dev, "Not enough memory windows\n");
 657                 return -EINVAL;
 658         }
 659 
 660         if (ntb_msg_count(perf->ntb) >= PERF_MSG_CNT) {
 661                 perf->cmd_send = perf_msg_cmd_send;
 662                 perf->cmd_recv = perf_msg_cmd_recv;
 663 
 664                 dev_dbg(&perf->ntb->dev, "Message service initialized\n");
 665 
 666                 return 0;
 667         }
 668 
 669         dev_dbg(&perf->ntb->dev, "Message service unsupported\n");
 670 
 671         mask = GENMASK_ULL(perf->pcnt, 0);
 672         if (ntb_spad_count(perf->ntb) >= PERF_SPAD_CNT(perf->pcnt) &&
 673             (ntb_db_valid_mask(perf->ntb) & mask) == mask) {
 674                 perf->cmd_send = perf_spad_cmd_send;
 675                 perf->cmd_recv = perf_spad_cmd_recv;
 676 
 677                 dev_dbg(&perf->ntb->dev, "Scratchpad service initialized\n");
 678 
 679                 return 0;
 680         }
 681 
 682         dev_dbg(&perf->ntb->dev, "Scratchpad service unsupported\n");
 683 
 684         dev_err(&perf->ntb->dev, "Command services unsupported\n");
 685 
 686         return -EINVAL;
 687 }
 688 
 689 static int perf_enable_service(struct perf_ctx *perf)
 690 {
 691         u64 mask, incmd_bit;
 692         int ret, sidx, scnt;
 693 
 694         mask = ntb_db_valid_mask(perf->ntb);
 695         (void)ntb_db_set_mask(perf->ntb, mask);
 696 
 697         ret = ntb_set_ctx(perf->ntb, perf, &perf_ops);
 698         if (ret)
 699                 return ret;
 700 
 701         if (perf->cmd_send == perf_msg_cmd_send) {
 702                 u64 inbits, outbits;
 703 
 704                 inbits = ntb_msg_inbits(perf->ntb);
 705                 outbits = ntb_msg_outbits(perf->ntb);
 706                 (void)ntb_msg_set_mask(perf->ntb, inbits | outbits);
 707 
 708                 incmd_bit = BIT_ULL(__ffs64(inbits));
 709                 ret = ntb_msg_clear_mask(perf->ntb, incmd_bit);
 710 
 711                 dev_dbg(&perf->ntb->dev, "MSG sts unmasked %#llx\n", incmd_bit);
 712         } else {
 713                 scnt = ntb_spad_count(perf->ntb);
 714                 for (sidx = 0; sidx < scnt; sidx++)
 715                         ntb_spad_write(perf->ntb, sidx, PERF_CMD_INVAL);
 716                 incmd_bit = PERF_SPAD_NOTIFY(perf->gidx);
 717                 ret = ntb_db_clear_mask(perf->ntb, incmd_bit);
 718 
 719                 dev_dbg(&perf->ntb->dev, "DB bits unmasked %#llx\n", incmd_bit);
 720         }
 721         if (ret) {
 722                 ntb_clear_ctx(perf->ntb);
 723                 return ret;
 724         }
 725 
 726         ntb_link_enable(perf->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
 727         /* Might be not necessary */
 728         ntb_link_event(perf->ntb);
 729 
 730         return 0;
 731 }
 732 
 733 static void perf_disable_service(struct perf_ctx *perf)
 734 {
 735         int pidx;
 736 
 737         if (perf->cmd_send == perf_msg_cmd_send) {
 738                 u64 inbits;
 739 
 740                 inbits = ntb_msg_inbits(perf->ntb);
 741                 (void)ntb_msg_set_mask(perf->ntb, inbits);
 742         } else {
 743                 (void)ntb_db_set_mask(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 744         }
 745 
 746         ntb_clear_ctx(perf->ntb);
 747 
 748         for (pidx = 0; pidx < perf->pcnt; pidx++)
 749                 perf_cmd_exec(&perf->peers[pidx], PERF_CMD_CLEAR);
 750 
 751         for (pidx = 0; pidx < perf->pcnt; pidx++)
 752                 flush_work(&perf->peers[pidx].service);
 753 
 754         for (pidx = 0; pidx < perf->pcnt; pidx++) {
 755                 struct perf_peer *peer = &perf->peers[pidx];
 756 
 757                 ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
 758         }
 759 
 760         ntb_db_clear(perf->ntb, PERF_SPAD_NOTIFY(perf->gidx));
 761 
 762         ntb_link_disable(perf->ntb);
 763 }
 764 
 765 /*==============================================================================
 766  *                      Performance measuring work-thread
 767  *==============================================================================
 768  */
 769 
 770 static void perf_dma_copy_callback(void *data)
 771 {
 772         struct perf_thread *pthr = data;
 773 
 774         atomic_dec(&pthr->dma_sync);
 775         wake_up(&pthr->dma_wait);
 776 }
 777 
 778 static int perf_copy_chunk(struct perf_thread *pthr,
 779                            void __iomem *dst, void *src, size_t len)
 780 {
 781         struct dma_async_tx_descriptor *tx;
 782         struct dmaengine_unmap_data *unmap;
 783         struct device *dma_dev;
 784         int try = 0, ret = 0;
 785 
 786         if (!use_dma) {
 787                 memcpy_toio(dst, src, len);
 788                 goto ret_check_tsync;
 789         }
 790 
 791         dma_dev = pthr->dma_chan->device->dev;
 792 
 793         if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src),
 794                                  offset_in_page(dst), len))
 795                 return -EIO;
 796 
 797         unmap = dmaengine_get_unmap_data(dma_dev, 2, GFP_NOWAIT);
 798         if (!unmap)
 799                 return -ENOMEM;
 800 
 801         unmap->len = len;
 802         unmap->addr[0] = dma_map_page(dma_dev, virt_to_page(src),
 803                 offset_in_page(src), len, DMA_TO_DEVICE);
 804         if (dma_mapping_error(dma_dev, unmap->addr[0])) {
 805                 ret = -EIO;
 806                 goto err_free_resource;
 807         }
 808         unmap->to_cnt = 1;
 809 
 810         unmap->addr[1] = dma_map_page(dma_dev, virt_to_page(dst),
 811                 offset_in_page(dst), len, DMA_FROM_DEVICE);
 812         if (dma_mapping_error(dma_dev, unmap->addr[1])) {
 813                 ret = -EIO;
 814                 goto err_free_resource;
 815         }
 816         unmap->from_cnt = 1;
 817 
 818         do {
 819                 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, unmap->addr[1],
 820                         unmap->addr[0], len, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 821                 if (!tx)
 822                         msleep(DMA_MDELAY);
 823         } while (!tx && (try++ < DMA_TRIES));
 824 
 825         if (!tx) {
 826                 ret = -EIO;
 827                 goto err_free_resource;
 828         }
 829 
 830         tx->callback = perf_dma_copy_callback;
 831         tx->callback_param = pthr;
 832         dma_set_unmap(tx, unmap);
 833 
 834         ret = dma_submit_error(dmaengine_submit(tx));
 835         if (ret) {
 836                 dmaengine_unmap_put(unmap);
 837                 goto err_free_resource;
 838         }
 839 
 840         dmaengine_unmap_put(unmap);
 841 
 842         atomic_inc(&pthr->dma_sync);
 843         dma_async_issue_pending(pthr->dma_chan);
 844 
 845 ret_check_tsync:
 846         return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR;
 847 
 848 err_free_resource:
 849         dmaengine_unmap_put(unmap);
 850 
 851         return ret;
 852 }
 853 
 854 static bool perf_dma_filter(struct dma_chan *chan, void *data)
 855 {
 856         struct perf_ctx *perf = data;
 857         int node;
 858 
 859         node = dev_to_node(&perf->ntb->dev);
 860 
 861         return node == NUMA_NO_NODE || node == dev_to_node(chan->device->dev);
 862 }
 863 
 864 static int perf_init_test(struct perf_thread *pthr)
 865 {
 866         struct perf_ctx *perf = pthr->perf;
 867         dma_cap_mask_t dma_mask;
 868 
 869         pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL,
 870                                  dev_to_node(&perf->ntb->dev));
 871         if (!pthr->src)
 872                 return -ENOMEM;
 873 
 874         get_random_bytes(pthr->src, perf->test_peer->outbuf_size);
 875 
 876         if (!use_dma)
 877                 return 0;
 878 
 879         dma_cap_zero(dma_mask);
 880         dma_cap_set(DMA_MEMCPY, dma_mask);
 881         pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf);
 882         if (!pthr->dma_chan) {
 883                 dev_err(&perf->ntb->dev, "%d: Failed to get DMA channel\n",
 884                         pthr->tidx);
 885                 atomic_dec(&perf->tsync);
 886                 wake_up(&perf->twait);
 887                 kfree(pthr->src);
 888                 return -ENODEV;
 889         }
 890 
 891         atomic_set(&pthr->dma_sync, 0);
 892 
 893         return 0;
 894 }
 895 
 896 static int perf_run_test(struct perf_thread *pthr)
 897 {
 898         struct perf_peer *peer = pthr->perf->test_peer;
 899         struct perf_ctx *perf = pthr->perf;
 900         void __iomem *flt_dst, *bnd_dst;
 901         u64 total_size, chunk_size;
 902         void *flt_src;
 903         int ret = 0;
 904 
 905         total_size = 1ULL << total_order;
 906         chunk_size = 1ULL << chunk_order;
 907         chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
 908 
 909         flt_src = pthr->src;
 910         bnd_dst = peer->outbuf + peer->outbuf_size;
 911         flt_dst = peer->outbuf;
 912 
 913         pthr->duration = ktime_get();
 914 
 915         /* Copied field is cleared on test launch stage */
 916         while (pthr->copied < total_size) {
 917                 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size);
 918                 if (ret) {
 919                         dev_err(&perf->ntb->dev, "%d: Got error %d on test\n",
 920                                 pthr->tidx, ret);
 921                         return ret;
 922                 }
 923 
 924                 pthr->copied += chunk_size;
 925 
 926                 flt_dst += chunk_size;
 927                 flt_src += chunk_size;
 928                 if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
 929                         flt_dst = peer->outbuf;
 930                         flt_src = pthr->src;
 931                 }
 932 
 933                 /* Give up CPU to give a chance for other threads to use it */
 934                 schedule();
 935         }
 936 
 937         return 0;
 938 }
 939 
 940 static int perf_sync_test(struct perf_thread *pthr)
 941 {
 942         struct perf_ctx *perf = pthr->perf;
 943 
 944         if (!use_dma)
 945                 goto no_dma_ret;
 946 
 947         wait_event(pthr->dma_wait,
 948                    (atomic_read(&pthr->dma_sync) == 0 ||
 949                     atomic_read(&perf->tsync) < 0));
 950 
 951         if (atomic_read(&perf->tsync) < 0)
 952                 return -EINTR;
 953 
 954 no_dma_ret:
 955         pthr->duration = ktime_sub(ktime_get(), pthr->duration);
 956 
 957         dev_dbg(&perf->ntb->dev, "%d: copied %llu bytes\n",
 958                 pthr->tidx, pthr->copied);
 959 
 960         dev_dbg(&perf->ntb->dev, "%d: lasted %llu usecs\n",
 961                 pthr->tidx, ktime_to_us(pthr->duration));
 962 
 963         dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx,
 964                 div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
 965 
 966         return 0;
 967 }
 968 
 969 static void perf_clear_test(struct perf_thread *pthr)
 970 {
 971         struct perf_ctx *perf = pthr->perf;
 972 
 973         if (!use_dma)
 974                 goto no_dma_notify;
 975 
 976         /*
 977          * If test finished without errors, termination isn't needed.
 978          * We call it anyway just to be sure of the transfers completion.
 979          */
 980         (void)dmaengine_terminate_sync(pthr->dma_chan);
 981 
 982         dma_release_channel(pthr->dma_chan);
 983 
 984 no_dma_notify:
 985         atomic_dec(&perf->tsync);
 986         wake_up(&perf->twait);
 987         kfree(pthr->src);
 988 }
 989 
 990 static void perf_thread_work(struct work_struct *work)
 991 {
 992         struct perf_thread *pthr = to_thread_work(work);
 993         int ret;
 994 
 995         /*
 996          * Perform stages in compliance with use_dma flag value.
 997          * Test status is changed only if error happened, otherwise
 998          * status -ENODATA is kept while test is on-fly. Results
 999          * synchronization is performed only if test fininshed
1000          * without an error or interruption.
1001          */
1002         ret = perf_init_test(pthr);
1003         if (ret) {
1004                 pthr->status = ret;
1005                 return;
1006         }
1007 
1008         ret = perf_run_test(pthr);
1009         if (ret) {
1010                 pthr->status = ret;
1011                 goto err_clear_test;
1012         }
1013 
1014         pthr->status = perf_sync_test(pthr);
1015 
1016 err_clear_test:
1017         perf_clear_test(pthr);
1018 }
1019 
1020 static int perf_set_tcnt(struct perf_ctx *perf, u8 tcnt)
1021 {
1022         if (tcnt == 0 || tcnt > MAX_THREADS_CNT)
1023                 return -EINVAL;
1024 
1025         if (test_and_set_bit_lock(0, &perf->busy_flag))
1026                 return -EBUSY;
1027 
1028         perf->tcnt = tcnt;
1029 
1030         clear_bit_unlock(0, &perf->busy_flag);
1031 
1032         return 0;
1033 }
1034 
1035 static void perf_terminate_test(struct perf_ctx *perf)
1036 {
1037         int tidx;
1038 
1039         atomic_set(&perf->tsync, -1);
1040         wake_up(&perf->twait);
1041 
1042         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1043                 wake_up(&perf->threads[tidx].dma_wait);
1044                 cancel_work_sync(&perf->threads[tidx].work);
1045         }
1046 }
1047 
1048 static int perf_submit_test(struct perf_peer *peer)
1049 {
1050         struct perf_ctx *perf = peer->perf;
1051         struct perf_thread *pthr;
1052         int tidx, ret;
1053 
1054         if (!test_bit(PERF_STS_DONE, &peer->sts))
1055                 return -ENOLINK;
1056 
1057         if (test_and_set_bit_lock(0, &perf->busy_flag))
1058                 return -EBUSY;
1059 
1060         perf->test_peer = peer;
1061         atomic_set(&perf->tsync, perf->tcnt);
1062 
1063         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1064                 pthr = &perf->threads[tidx];
1065 
1066                 pthr->status = -ENODATA;
1067                 pthr->copied = 0;
1068                 pthr->duration = ktime_set(0, 0);
1069                 if (tidx < perf->tcnt)
1070                         (void)queue_work(perf_wq, &pthr->work);
1071         }
1072 
1073         ret = wait_event_interruptible(perf->twait,
1074                                        atomic_read(&perf->tsync) <= 0);
1075         if (ret == -ERESTARTSYS) {
1076                 perf_terminate_test(perf);
1077                 ret = -EINTR;
1078         }
1079 
1080         clear_bit_unlock(0, &perf->busy_flag);
1081 
1082         return ret;
1083 }
1084 
1085 static int perf_read_stats(struct perf_ctx *perf, char *buf,
1086                            size_t size, ssize_t *pos)
1087 {
1088         struct perf_thread *pthr;
1089         int tidx;
1090 
1091         if (test_and_set_bit_lock(0, &perf->busy_flag))
1092                 return -EBUSY;
1093 
1094         (*pos) += scnprintf(buf + *pos, size - *pos,
1095                 "    Peer %d test statistics:\n", perf->test_peer->pidx);
1096 
1097         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1098                 pthr = &perf->threads[tidx];
1099 
1100                 if (pthr->status == -ENODATA)
1101                         continue;
1102 
1103                 if (pthr->status) {
1104                         (*pos) += scnprintf(buf + *pos, size - *pos,
1105                                 "%d: error status %d\n", tidx, pthr->status);
1106                         continue;
1107                 }
1108 
1109                 (*pos) += scnprintf(buf + *pos, size - *pos,
1110                         "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n",
1111                         tidx, pthr->copied, ktime_to_us(pthr->duration),
1112                         div64_u64(pthr->copied, ktime_to_us(pthr->duration)));
1113         }
1114 
1115         clear_bit_unlock(0, &perf->busy_flag);
1116 
1117         return 0;
1118 }
1119 
1120 static void perf_init_threads(struct perf_ctx *perf)
1121 {
1122         struct perf_thread *pthr;
1123         int tidx;
1124 
1125         perf->tcnt = DEF_THREADS_CNT;
1126         perf->test_peer = &perf->peers[0];
1127         init_waitqueue_head(&perf->twait);
1128 
1129         for (tidx = 0; tidx < MAX_THREADS_CNT; tidx++) {
1130                 pthr = &perf->threads[tidx];
1131 
1132                 pthr->perf = perf;
1133                 pthr->tidx = tidx;
1134                 pthr->status = -ENODATA;
1135                 init_waitqueue_head(&pthr->dma_wait);
1136                 INIT_WORK(&pthr->work, perf_thread_work);
1137         }
1138 }
1139 
1140 static void perf_clear_threads(struct perf_ctx *perf)
1141 {
1142         perf_terminate_test(perf);
1143 }
1144 
1145 /*==============================================================================
1146  *                               DebugFS nodes
1147  *==============================================================================
1148  */
1149 
1150 static ssize_t perf_dbgfs_read_info(struct file *filep, char __user *ubuf,
1151                                     size_t size, loff_t *offp)
1152 {
1153         struct perf_ctx *perf = filep->private_data;
1154         struct perf_peer *peer;
1155         size_t buf_size;
1156         ssize_t pos = 0;
1157         int ret, pidx;
1158         char *buf;
1159 
1160         buf_size = min_t(size_t, size, 0x1000U);
1161 
1162         buf = kmalloc(buf_size, GFP_KERNEL);
1163         if (!buf)
1164                 return -ENOMEM;
1165 
1166         pos += scnprintf(buf + pos, buf_size - pos,
1167                 "    Performance measuring tool info:\n\n");
1168 
1169         pos += scnprintf(buf + pos, buf_size - pos,
1170                 "Local port %d, Global index %d\n", ntb_port_number(perf->ntb),
1171                 perf->gidx);
1172         pos += scnprintf(buf + pos, buf_size - pos, "Test status: ");
1173         if (test_bit(0, &perf->busy_flag)) {
1174                 pos += scnprintf(buf + pos, buf_size - pos,
1175                         "on-fly with port %d (%d)\n",
1176                         ntb_peer_port_number(perf->ntb, perf->test_peer->pidx),
1177                         perf->test_peer->pidx);
1178         } else {
1179                 pos += scnprintf(buf + pos, buf_size - pos, "idle\n");
1180         }
1181 
1182         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1183                 peer = &perf->peers[pidx];
1184 
1185                 pos += scnprintf(buf + pos, buf_size - pos,
1186                         "Port %d (%d), Global index %d:\n",
1187                         ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
1188                         peer->gidx);
1189 
1190                 pos += scnprintf(buf + pos, buf_size - pos,
1191                         "\tLink status: %s\n",
1192                         test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
1193 
1194                 pos += scnprintf(buf + pos, buf_size - pos,
1195                         "\tOut buffer addr 0x%pK\n", peer->outbuf);
1196 
1197                 pos += scnprintf(buf + pos, buf_size - pos,
1198                         "\tOut buffer size %pa\n", &peer->outbuf_size);
1199 
1200                 pos += scnprintf(buf + pos, buf_size - pos,
1201                         "\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
1202 
1203                 if (!peer->inbuf) {
1204                         pos += scnprintf(buf + pos, buf_size - pos,
1205                                 "\tIn buffer addr: unallocated\n");
1206                         continue;
1207                 }
1208 
1209                 pos += scnprintf(buf + pos, buf_size - pos,
1210                         "\tIn buffer addr 0x%pK\n", peer->inbuf);
1211 
1212                 pos += scnprintf(buf + pos, buf_size - pos,
1213                         "\tIn buffer size %pa\n", &peer->inbuf_size);
1214 
1215                 pos += scnprintf(buf + pos, buf_size - pos,
1216                         "\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
1217         }
1218 
1219         ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1220         kfree(buf);
1221 
1222         return ret;
1223 }
1224 
1225 static const struct file_operations perf_dbgfs_info = {
1226         .open = simple_open,
1227         .read = perf_dbgfs_read_info
1228 };
1229 
1230 static ssize_t perf_dbgfs_read_run(struct file *filep, char __user *ubuf,
1231                                    size_t size, loff_t *offp)
1232 {
1233         struct perf_ctx *perf = filep->private_data;
1234         ssize_t ret, pos = 0;
1235         char *buf;
1236 
1237         buf = kmalloc(PERF_BUF_LEN, GFP_KERNEL);
1238         if (!buf)
1239                 return -ENOMEM;
1240 
1241         ret = perf_read_stats(perf, buf, PERF_BUF_LEN, &pos);
1242         if (ret)
1243                 goto err_free;
1244 
1245         ret = simple_read_from_buffer(ubuf, size, offp, buf, pos);
1246 err_free:
1247         kfree(buf);
1248 
1249         return ret;
1250 }
1251 
1252 static ssize_t perf_dbgfs_write_run(struct file *filep, const char __user *ubuf,
1253                                     size_t size, loff_t *offp)
1254 {
1255         struct perf_ctx *perf = filep->private_data;
1256         struct perf_peer *peer;
1257         int pidx, ret;
1258 
1259         ret = kstrtoint_from_user(ubuf, size, 0, &pidx);
1260         if (ret)
1261                 return ret;
1262 
1263         if (pidx < 0 || pidx >= perf->pcnt)
1264                 return -EINVAL;
1265 
1266         peer = &perf->peers[pidx];
1267 
1268         ret = perf_submit_test(peer);
1269         if (ret)
1270                 return ret;
1271 
1272         return size;
1273 }
1274 
1275 static const struct file_operations perf_dbgfs_run = {
1276         .open = simple_open,
1277         .read = perf_dbgfs_read_run,
1278         .write = perf_dbgfs_write_run
1279 };
1280 
1281 static ssize_t perf_dbgfs_read_tcnt(struct file *filep, char __user *ubuf,
1282                                     size_t size, loff_t *offp)
1283 {
1284         struct perf_ctx *perf = filep->private_data;
1285         char buf[8];
1286         ssize_t pos;
1287 
1288         pos = scnprintf(buf, sizeof(buf), "%hhu\n", perf->tcnt);
1289 
1290         return simple_read_from_buffer(ubuf, size, offp, buf, pos);
1291 }
1292 
1293 static ssize_t perf_dbgfs_write_tcnt(struct file *filep,
1294                                      const char __user *ubuf,
1295                                      size_t size, loff_t *offp)
1296 {
1297         struct perf_ctx *perf = filep->private_data;
1298         int ret;
1299         u8 val;
1300 
1301         ret = kstrtou8_from_user(ubuf, size, 0, &val);
1302         if (ret)
1303                 return ret;
1304 
1305         ret = perf_set_tcnt(perf, val);
1306         if (ret)
1307                 return ret;
1308 
1309         return size;
1310 }
1311 
1312 static const struct file_operations perf_dbgfs_tcnt = {
1313         .open = simple_open,
1314         .read = perf_dbgfs_read_tcnt,
1315         .write = perf_dbgfs_write_tcnt
1316 };
1317 
1318 static void perf_setup_dbgfs(struct perf_ctx *perf)
1319 {
1320         struct pci_dev *pdev = perf->ntb->pdev;
1321 
1322         perf->dbgfs_dir = debugfs_create_dir(pci_name(pdev), perf_dbgfs_topdir);
1323         if (!perf->dbgfs_dir) {
1324                 dev_warn(&perf->ntb->dev, "DebugFS unsupported\n");
1325                 return;
1326         }
1327 
1328         debugfs_create_file("info", 0600, perf->dbgfs_dir, perf,
1329                             &perf_dbgfs_info);
1330 
1331         debugfs_create_file("run", 0600, perf->dbgfs_dir, perf,
1332                             &perf_dbgfs_run);
1333 
1334         debugfs_create_file("threads_count", 0600, perf->dbgfs_dir, perf,
1335                             &perf_dbgfs_tcnt);
1336 
1337         /* They are made read-only for test exec safety and integrity */
1338         debugfs_create_u8("chunk_order", 0500, perf->dbgfs_dir, &chunk_order);
1339 
1340         debugfs_create_u8("total_order", 0500, perf->dbgfs_dir, &total_order);
1341 
1342         debugfs_create_bool("use_dma", 0500, perf->dbgfs_dir, &use_dma);
1343 }
1344 
1345 static void perf_clear_dbgfs(struct perf_ctx *perf)
1346 {
1347         debugfs_remove_recursive(perf->dbgfs_dir);
1348 }
1349 
1350 /*==============================================================================
1351  *                        Basic driver initialization
1352  *==============================================================================
1353  */
1354 
1355 static struct perf_ctx *perf_create_data(struct ntb_dev *ntb)
1356 {
1357         struct perf_ctx *perf;
1358 
1359         perf = devm_kzalloc(&ntb->dev, sizeof(*perf), GFP_KERNEL);
1360         if (!perf)
1361                 return ERR_PTR(-ENOMEM);
1362 
1363         perf->pcnt = ntb_peer_port_count(ntb);
1364         perf->peers = devm_kcalloc(&ntb->dev, perf->pcnt, sizeof(*perf->peers),
1365                                   GFP_KERNEL);
1366         if (!perf->peers)
1367                 return ERR_PTR(-ENOMEM);
1368 
1369         perf->ntb = ntb;
1370 
1371         return perf;
1372 }
1373 
1374 static int perf_setup_peer_mw(struct perf_peer *peer)
1375 {
1376         struct perf_ctx *perf = peer->perf;
1377         phys_addr_t phys_addr;
1378         int ret;
1379 
1380         /* Get outbound MW parameters and map it */
1381         ret = ntb_peer_mw_get_addr(perf->ntb, perf->gidx, &phys_addr,
1382                                    &peer->outbuf_size);
1383         if (ret)
1384                 return ret;
1385 
1386         peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
1387                                         peer->outbuf_size);
1388         if (!peer->outbuf)
1389                 return -ENOMEM;
1390 
1391         if (max_mw_size && peer->outbuf_size > max_mw_size) {
1392                 peer->outbuf_size = max_mw_size;
1393                 dev_warn(&peer->perf->ntb->dev,
1394                         "Peer %d outbuf reduced to %pa\n", peer->pidx,
1395                         &peer->outbuf_size);
1396         }
1397 
1398         return 0;
1399 }
1400 
1401 static int perf_init_peers(struct perf_ctx *perf)
1402 {
1403         struct perf_peer *peer;
1404         int pidx, lport, ret;
1405 
1406         lport = ntb_port_number(perf->ntb);
1407         perf->gidx = -1;
1408         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1409                 peer = &perf->peers[pidx];
1410 
1411                 peer->perf = perf;
1412                 peer->pidx = pidx;
1413                 if (lport < ntb_peer_port_number(perf->ntb, pidx)) {
1414                         if (perf->gidx == -1)
1415                                 perf->gidx = pidx;
1416                         peer->gidx = pidx + 1;
1417                 } else {
1418                         peer->gidx = pidx;
1419                 }
1420                 INIT_WORK(&peer->service, perf_service_work);
1421         }
1422         if (perf->gidx == -1)
1423                 perf->gidx = pidx;
1424 
1425         for (pidx = 0; pidx < perf->pcnt; pidx++) {
1426                 ret = perf_setup_peer_mw(&perf->peers[pidx]);
1427                 if (ret)
1428                         return ret;
1429         }
1430 
1431         dev_dbg(&perf->ntb->dev, "Global port index %d\n", perf->gidx);
1432 
1433         return 0;
1434 }
1435 
1436 static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb)
1437 {
1438         struct perf_ctx *perf;
1439         int ret;
1440 
1441         perf = perf_create_data(ntb);
1442         if (IS_ERR(perf))
1443                 return PTR_ERR(perf);
1444 
1445         ret = perf_init_peers(perf);
1446         if (ret)
1447                 return ret;
1448 
1449         perf_init_threads(perf);
1450 
1451         ret = perf_init_service(perf);
1452         if (ret)
1453                 return ret;
1454 
1455         ret = perf_enable_service(perf);
1456         if (ret)
1457                 return ret;
1458 
1459         perf_setup_dbgfs(perf);
1460 
1461         return 0;
1462 }
1463 
1464 static void perf_remove(struct ntb_client *client, struct ntb_dev *ntb)
1465 {
1466         struct perf_ctx *perf = ntb->ctx;
1467 
1468         perf_clear_dbgfs(perf);
1469 
1470         perf_disable_service(perf);
1471 
1472         perf_clear_threads(perf);
1473 }
1474 
1475 static struct ntb_client perf_client = {
1476         .ops = {
1477                 .probe = perf_probe,
1478                 .remove = perf_remove
1479         }
1480 };
1481 
1482 static int __init perf_init(void)
1483 {
1484         int ret;
1485 
1486         if (chunk_order > MAX_CHUNK_ORDER) {
1487                 chunk_order = MAX_CHUNK_ORDER;
1488                 pr_info("Chunk order reduced to %hhu\n", chunk_order);
1489         }
1490 
1491         if (total_order < chunk_order) {
1492                 total_order = chunk_order;
1493                 pr_info("Total data order reduced to %hhu\n", total_order);
1494         }
1495 
1496         perf_wq = alloc_workqueue("perf_wq", WQ_UNBOUND | WQ_SYSFS, 0);
1497         if (!perf_wq)
1498                 return -ENOMEM;
1499 
1500         if (debugfs_initialized())
1501                 perf_dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
1502 
1503         ret = ntb_register_client(&perf_client);
1504         if (ret) {
1505                 debugfs_remove_recursive(perf_dbgfs_topdir);
1506                 destroy_workqueue(perf_wq);
1507         }
1508 
1509         return ret;
1510 }
1511 module_init(perf_init);
1512 
1513 static void __exit perf_exit(void)
1514 {
1515         ntb_unregister_client(&perf_client);
1516         debugfs_remove_recursive(perf_dbgfs_topdir);
1517         destroy_workqueue(perf_wq);
1518 }
1519 module_exit(perf_exit);
1520 

/* [<][>][^][v][top][bottom][index][help] */