root/drivers/soc/fsl/dpio/dpio-service.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. service_select_by_cpu
  2. service_select
  3. dpaa2_io_service_select
  4. dpaa2_io_create
  5. dpaa2_io_down
  6. dpaa2_io_irq
  7. dpaa2_io_get_cpu
  8. dpaa2_io_service_register
  9. dpaa2_io_service_deregister
  10. dpaa2_io_service_rearm
  11. dpaa2_io_service_pull_fq
  12. dpaa2_io_service_pull_channel
  13. dpaa2_io_service_enqueue_fq
  14. dpaa2_io_service_enqueue_qd
  15. dpaa2_io_service_release
  16. dpaa2_io_service_acquire
  17. dpaa2_io_store_create
  18. dpaa2_io_store_destroy
  19. dpaa2_io_store_next
  20. dpaa2_io_query_fq_count
  21. dpaa2_io_query_bp_count

   1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
   2 /*
   3  * Copyright 2014-2016 Freescale Semiconductor Inc.
   4  * Copyright 2016 NXP
   5  *
   6  */
   7 #include <linux/types.h>
   8 #include <linux/fsl/mc.h>
   9 #include <soc/fsl/dpaa2-io.h>
  10 #include <linux/init.h>
  11 #include <linux/module.h>
  12 #include <linux/platform_device.h>
  13 #include <linux/interrupt.h>
  14 #include <linux/dma-mapping.h>
  15 #include <linux/slab.h>
  16 
  17 #include "dpio.h"
  18 #include "qbman-portal.h"
  19 
  20 struct dpaa2_io {
  21         struct dpaa2_io_desc dpio_desc;
  22         struct qbman_swp_desc swp_desc;
  23         struct qbman_swp *swp;
  24         struct list_head node;
  25         /* protect against multiple management commands */
  26         spinlock_t lock_mgmt_cmd;
  27         /* protect notifications list */
  28         spinlock_t lock_notifications;
  29         struct list_head notifications;
  30         struct device *dev;
  31 };
  32 
  33 struct dpaa2_io_store {
  34         unsigned int max;
  35         dma_addr_t paddr;
  36         struct dpaa2_dq *vaddr;
  37         void *alloced_addr;    /* unaligned value from kmalloc() */
  38         unsigned int idx;      /* position of the next-to-be-returned entry */
  39         struct qbman_swp *swp; /* portal used to issue VDQCR */
  40         struct device *dev;    /* device used for DMA mapping */
  41 };
  42 
  43 /* keep a per cpu array of DPIOs for fast access */
  44 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
  45 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
  46 static DEFINE_SPINLOCK(dpio_list_lock);
  47 
  48 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
  49                                                      int cpu)
  50 {
  51         if (d)
  52                 return d;
  53 
  54         if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
  55                 return NULL;
  56 
  57         /*
  58          * If cpu == -1, choose the current cpu, with no guarantees about
  59          * potentially being migrated away.
  60          */
  61         if (unlikely(cpu < 0))
  62                 cpu = smp_processor_id();
  63 
  64         /* If a specific cpu was requested, pick it up immediately */
  65         return dpio_by_cpu[cpu];
  66 }
  67 
  68 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
  69 {
  70         if (d)
  71                 return d;
  72 
  73         spin_lock(&dpio_list_lock);
  74         d = list_entry(dpio_list.next, struct dpaa2_io, node);
  75         list_del(&d->node);
  76         list_add_tail(&d->node, &dpio_list);
  77         spin_unlock(&dpio_list_lock);
  78 
  79         return d;
  80 }
  81 
  82 /**
  83  * dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
  84  * @cpu: the cpu id
  85  *
  86  * Return the affine dpaa2_io service, or NULL if there is no service affined
  87  * to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
  88  * service.
  89  */
  90 struct dpaa2_io *dpaa2_io_service_select(int cpu)
  91 {
  92         if (cpu == DPAA2_IO_ANY_CPU)
  93                 return service_select(NULL);
  94 
  95         return service_select_by_cpu(NULL, cpu);
  96 }
  97 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
  98 
  99 /**
 100  * dpaa2_io_create() - create a dpaa2_io object.
 101  * @desc: the dpaa2_io descriptor
 102  * @dev: the actual DPIO device
 103  *
 104  * Activates a "struct dpaa2_io" corresponding to the given config of an actual
 105  * DPIO object.
 106  *
 107  * Return a valid dpaa2_io object for success, or NULL for failure.
 108  */
 109 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
 110                                  struct device *dev)
 111 {
 112         struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
 113 
 114         if (!obj)
 115                 return NULL;
 116 
 117         /* check if CPU is out of range (-1 means any cpu) */
 118         if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
 119                 kfree(obj);
 120                 return NULL;
 121         }
 122 
 123         obj->dpio_desc = *desc;
 124         obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
 125         obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
 126         obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
 127         obj->swp = qbman_swp_init(&obj->swp_desc);
 128 
 129         if (!obj->swp) {
 130                 kfree(obj);
 131                 return NULL;
 132         }
 133 
 134         INIT_LIST_HEAD(&obj->node);
 135         spin_lock_init(&obj->lock_mgmt_cmd);
 136         spin_lock_init(&obj->lock_notifications);
 137         INIT_LIST_HEAD(&obj->notifications);
 138 
 139         /* For now only enable DQRR interrupts */
 140         qbman_swp_interrupt_set_trigger(obj->swp,
 141                                         QBMAN_SWP_INTERRUPT_DQRI);
 142         qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
 143         if (obj->dpio_desc.receives_notifications)
 144                 qbman_swp_push_set(obj->swp, 0, 1);
 145 
 146         spin_lock(&dpio_list_lock);
 147         list_add_tail(&obj->node, &dpio_list);
 148         if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
 149                 dpio_by_cpu[desc->cpu] = obj;
 150         spin_unlock(&dpio_list_lock);
 151 
 152         obj->dev = dev;
 153 
 154         return obj;
 155 }
 156 
 157 /**
 158  * dpaa2_io_down() - release the dpaa2_io object.
 159  * @d: the dpaa2_io object to be released.
 160  *
 161  * The "struct dpaa2_io" type can represent an individual DPIO object (as
 162  * described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
 163  * which can be used to group/encapsulate multiple DPIO objects. In all cases,
 164  * each handle obtained should be released using this function.
 165  */
 166 void dpaa2_io_down(struct dpaa2_io *d)
 167 {
 168         spin_lock(&dpio_list_lock);
 169         dpio_by_cpu[d->dpio_desc.cpu] = NULL;
 170         list_del(&d->node);
 171         spin_unlock(&dpio_list_lock);
 172 
 173         kfree(d);
 174 }
 175 
 176 #define DPAA_POLL_MAX 32
 177 
 178 /**
 179  * dpaa2_io_irq() - ISR for DPIO interrupts
 180  *
 181  * @obj: the given DPIO object.
 182  *
 183  * Return IRQ_HANDLED for success or IRQ_NONE if there
 184  * were no pending interrupts.
 185  */
 186 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
 187 {
 188         const struct dpaa2_dq *dq;
 189         int max = 0;
 190         struct qbman_swp *swp;
 191         u32 status;
 192 
 193         swp = obj->swp;
 194         status = qbman_swp_interrupt_read_status(swp);
 195         if (!status)
 196                 return IRQ_NONE;
 197 
 198         dq = qbman_swp_dqrr_next(swp);
 199         while (dq) {
 200                 if (qbman_result_is_SCN(dq)) {
 201                         struct dpaa2_io_notification_ctx *ctx;
 202                         u64 q64;
 203 
 204                         q64 = qbman_result_SCN_ctx(dq);
 205                         ctx = (void *)(uintptr_t)q64;
 206                         ctx->cb(ctx);
 207                 } else {
 208                         pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
 209                 }
 210                 qbman_swp_dqrr_consume(swp, dq);
 211                 ++max;
 212                 if (max > DPAA_POLL_MAX)
 213                         goto done;
 214                 dq = qbman_swp_dqrr_next(swp);
 215         }
 216 done:
 217         qbman_swp_interrupt_clear_status(swp, status);
 218         qbman_swp_interrupt_set_inhibit(swp, 0);
 219         return IRQ_HANDLED;
 220 }
 221 
 222 /**
 223  * dpaa2_io_get_cpu() - get the cpu associated with a given DPIO object
 224  *
 225  * @d: the given DPIO object.
 226  *
 227  * Return the cpu associated with the DPIO object
 228  */
 229 int dpaa2_io_get_cpu(struct dpaa2_io *d)
 230 {
 231         return d->dpio_desc.cpu;
 232 }
 233 EXPORT_SYMBOL(dpaa2_io_get_cpu);
 234 
 235 /**
 236  * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
 237  *                               notifications on the given DPIO service.
 238  * @d:   the given DPIO service.
 239  * @ctx: the notification context.
 240  * @dev: the device that requests the register
 241  *
 242  * The caller should make the MC command to attach a DPAA2 object to
 243  * a DPIO after this function completes successfully.  In that way:
 244  *    (a) The DPIO service is "ready" to handle a notification arrival
 245  *        (which might happen before the "attach" command to MC has
 246  *        returned control of execution back to the caller)
 247  *    (b) The DPIO service can provide back to the caller the 'dpio_id' and
 248  *        'qman64' parameters that it should pass along in the MC command
 249  *        in order for the object to be configured to produce the right
 250  *        notification fields to the DPIO service.
 251  *
 252  * Return 0 for success, or -ENODEV for failure.
 253  */
 254 int dpaa2_io_service_register(struct dpaa2_io *d,
 255                               struct dpaa2_io_notification_ctx *ctx,
 256                               struct device *dev)
 257 {
 258         struct device_link *link;
 259         unsigned long irqflags;
 260 
 261         d = service_select_by_cpu(d, ctx->desired_cpu);
 262         if (!d)
 263                 return -ENODEV;
 264 
 265         link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
 266         if (!link)
 267                 return -EINVAL;
 268 
 269         ctx->dpio_id = d->dpio_desc.dpio_id;
 270         ctx->qman64 = (u64)(uintptr_t)ctx;
 271         ctx->dpio_private = d;
 272         spin_lock_irqsave(&d->lock_notifications, irqflags);
 273         list_add(&ctx->node, &d->notifications);
 274         spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 275 
 276         /* Enable the generation of CDAN notifications */
 277         if (ctx->is_cdan)
 278                 return qbman_swp_CDAN_set_context_enable(d->swp,
 279                                                          (u16)ctx->id,
 280                                                          ctx->qman64);
 281         return 0;
 282 }
 283 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
 284 
 285 /**
 286  * dpaa2_io_service_deregister - The opposite of 'register'.
 287  * @service: the given DPIO service.
 288  * @ctx: the notification context.
 289  * @dev: the device that requests to be deregistered
 290  *
 291  * This function should be called only after sending the MC command to
 292  * to detach the notification-producing device from the DPIO.
 293  */
 294 void dpaa2_io_service_deregister(struct dpaa2_io *service,
 295                                  struct dpaa2_io_notification_ctx *ctx,
 296                                  struct device *dev)
 297 {
 298         struct dpaa2_io *d = ctx->dpio_private;
 299         unsigned long irqflags;
 300 
 301         if (ctx->is_cdan)
 302                 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
 303 
 304         spin_lock_irqsave(&d->lock_notifications, irqflags);
 305         list_del(&ctx->node);
 306         spin_unlock_irqrestore(&d->lock_notifications, irqflags);
 307 
 308 }
 309 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
 310 
 311 /**
 312  * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
 313  * @d: the given DPIO service.
 314  * @ctx: the notification context.
 315  *
 316  * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
 317  * considered "disarmed". Ie. the user can issue pull dequeue operations on that
 318  * traffic source for as long as it likes. Eventually it may wish to "rearm"
 319  * that source to allow it to produce another FQDAN/CDAN, that's what this
 320  * function achieves.
 321  *
 322  * Return 0 for success.
 323  */
 324 int dpaa2_io_service_rearm(struct dpaa2_io *d,
 325                            struct dpaa2_io_notification_ctx *ctx)
 326 {
 327         unsigned long irqflags;
 328         int err;
 329 
 330         d = service_select_by_cpu(d, ctx->desired_cpu);
 331         if (!unlikely(d))
 332                 return -ENODEV;
 333 
 334         spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 335         if (ctx->is_cdan)
 336                 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
 337         else
 338                 err = qbman_swp_fq_schedule(d->swp, ctx->id);
 339         spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 340 
 341         return err;
 342 }
 343 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
 344 
 345 /**
 346  * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
 347  * @d: the given DPIO service.
 348  * @fqid: the given frame queue id.
 349  * @s: the dpaa2_io_store object for the result.
 350  *
 351  * Return 0 for success, or error code for failure.
 352  */
 353 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
 354                              struct dpaa2_io_store *s)
 355 {
 356         struct qbman_pull_desc pd;
 357         int err;
 358 
 359         qbman_pull_desc_clear(&pd);
 360         qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 361         qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 362         qbman_pull_desc_set_fq(&pd, fqid);
 363 
 364         d = service_select(d);
 365         if (!d)
 366                 return -ENODEV;
 367         s->swp = d->swp;
 368         err = qbman_swp_pull(d->swp, &pd);
 369         if (err)
 370                 s->swp = NULL;
 371 
 372         return err;
 373 }
 374 EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
 375 
 376 /**
 377  * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
 378  * @d: the given DPIO service.
 379  * @channelid: the given channel id.
 380  * @s: the dpaa2_io_store object for the result.
 381  *
 382  * Return 0 for success, or error code for failure.
 383  */
 384 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
 385                                   struct dpaa2_io_store *s)
 386 {
 387         struct qbman_pull_desc pd;
 388         int err;
 389 
 390         qbman_pull_desc_clear(&pd);
 391         qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
 392         qbman_pull_desc_set_numframes(&pd, (u8)s->max);
 393         qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
 394 
 395         d = service_select(d);
 396         if (!d)
 397                 return -ENODEV;
 398 
 399         s->swp = d->swp;
 400         err = qbman_swp_pull(d->swp, &pd);
 401         if (err)
 402                 s->swp = NULL;
 403 
 404         return err;
 405 }
 406 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
 407 
 408 /**
 409  * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
 410  * @d: the given DPIO service.
 411  * @fqid: the given frame queue id.
 412  * @fd: the frame descriptor which is enqueued.
 413  *
 414  * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
 415  * or -ENODEV if there is no dpio service.
 416  */
 417 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
 418                                 u32 fqid,
 419                                 const struct dpaa2_fd *fd)
 420 {
 421         struct qbman_eq_desc ed;
 422 
 423         d = service_select(d);
 424         if (!d)
 425                 return -ENODEV;
 426 
 427         qbman_eq_desc_clear(&ed);
 428         qbman_eq_desc_set_no_orp(&ed, 0);
 429         qbman_eq_desc_set_fq(&ed, fqid);
 430 
 431         return qbman_swp_enqueue(d->swp, &ed, fd);
 432 }
 433 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
 434 
 435 /**
 436  * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
 437  * @d: the given DPIO service.
 438  * @qdid: the given queuing destination id.
 439  * @prio: the given queuing priority.
 440  * @qdbin: the given queuing destination bin.
 441  * @fd: the frame descriptor which is enqueued.
 442  *
 443  * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
 444  * or -ENODEV if there is no dpio service.
 445  */
 446 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
 447                                 u32 qdid, u8 prio, u16 qdbin,
 448                                 const struct dpaa2_fd *fd)
 449 {
 450         struct qbman_eq_desc ed;
 451 
 452         d = service_select(d);
 453         if (!d)
 454                 return -ENODEV;
 455 
 456         qbman_eq_desc_clear(&ed);
 457         qbman_eq_desc_set_no_orp(&ed, 0);
 458         qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
 459 
 460         return qbman_swp_enqueue(d->swp, &ed, fd);
 461 }
 462 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
 463 
 464 /**
 465  * dpaa2_io_service_release() - Release buffers to a buffer pool.
 466  * @d: the given DPIO object.
 467  * @bpid: the buffer pool id.
 468  * @buffers: the buffers to be released.
 469  * @num_buffers: the number of the buffers to be released.
 470  *
 471  * Return 0 for success, and negative error code for failure.
 472  */
 473 int dpaa2_io_service_release(struct dpaa2_io *d,
 474                              u16 bpid,
 475                              const u64 *buffers,
 476                              unsigned int num_buffers)
 477 {
 478         struct qbman_release_desc rd;
 479 
 480         d = service_select(d);
 481         if (!d)
 482                 return -ENODEV;
 483 
 484         qbman_release_desc_clear(&rd);
 485         qbman_release_desc_set_bpid(&rd, bpid);
 486 
 487         return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
 488 }
 489 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
 490 
 491 /**
 492  * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
 493  * @d: the given DPIO object.
 494  * @bpid: the buffer pool id.
 495  * @buffers: the buffer addresses for acquired buffers.
 496  * @num_buffers: the expected number of the buffers to acquire.
 497  *
 498  * Return a negative error code if the command failed, otherwise it returns
 499  * the number of buffers acquired, which may be less than the number requested.
 500  * Eg. if the buffer pool is empty, this will return zero.
 501  */
 502 int dpaa2_io_service_acquire(struct dpaa2_io *d,
 503                              u16 bpid,
 504                              u64 *buffers,
 505                              unsigned int num_buffers)
 506 {
 507         unsigned long irqflags;
 508         int err;
 509 
 510         d = service_select(d);
 511         if (!d)
 512                 return -ENODEV;
 513 
 514         spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 515         err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
 516         spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 517 
 518         return err;
 519 }
 520 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
 521 
 522 /*
 523  * 'Stores' are reusable memory blocks for holding dequeue results, and to
 524  * assist with parsing those results.
 525  */
 526 
 527 /**
 528  * dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
 529  * @max_frames: the maximum number of dequeued result for frames, must be <= 16.
 530  * @dev:        the device to allow mapping/unmapping the DMAable region.
 531  *
 532  * The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
 533  * The 'dpaa2_io_store' returned is a DPIO service managed object.
 534  *
 535  * Return pointer to dpaa2_io_store struct for successfully created storage
 536  * memory, or NULL on error.
 537  */
 538 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
 539                                              struct device *dev)
 540 {
 541         struct dpaa2_io_store *ret;
 542         size_t size;
 543 
 544         if (!max_frames || (max_frames > 16))
 545                 return NULL;
 546 
 547         ret = kmalloc(sizeof(*ret), GFP_KERNEL);
 548         if (!ret)
 549                 return NULL;
 550 
 551         ret->max = max_frames;
 552         size = max_frames * sizeof(struct dpaa2_dq) + 64;
 553         ret->alloced_addr = kzalloc(size, GFP_KERNEL);
 554         if (!ret->alloced_addr) {
 555                 kfree(ret);
 556                 return NULL;
 557         }
 558 
 559         ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
 560         ret->paddr = dma_map_single(dev, ret->vaddr,
 561                                     sizeof(struct dpaa2_dq) * max_frames,
 562                                     DMA_FROM_DEVICE);
 563         if (dma_mapping_error(dev, ret->paddr)) {
 564                 kfree(ret->alloced_addr);
 565                 kfree(ret);
 566                 return NULL;
 567         }
 568 
 569         ret->idx = 0;
 570         ret->dev = dev;
 571 
 572         return ret;
 573 }
 574 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
 575 
 576 /**
 577  * dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
 578  *                            result.
 579  * @s: the storage memory to be destroyed.
 580  */
 581 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
 582 {
 583         dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
 584                          DMA_FROM_DEVICE);
 585         kfree(s->alloced_addr);
 586         kfree(s);
 587 }
 588 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
 589 
 590 /**
 591  * dpaa2_io_store_next() - Determine when the next dequeue result is available.
 592  * @s: the dpaa2_io_store object.
 593  * @is_last: indicate whether this is the last frame in the pull command.
 594  *
 595  * When an object driver performs dequeues to a dpaa2_io_store, this function
 596  * can be used to determine when the next frame result is available. Once
 597  * this function returns non-NULL, a subsequent call to it will try to find
 598  * the next dequeue result.
 599  *
 600  * Note that if a pull-dequeue has a NULL result because the target FQ/channel
 601  * was empty, then this function will also return NULL (rather than expecting
 602  * the caller to always check for this. As such, "is_last" can be used to
 603  * differentiate between "end-of-empty-dequeue" and "still-waiting".
 604  *
 605  * Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
 606  */
 607 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
 608 {
 609         int match;
 610         struct dpaa2_dq *ret = &s->vaddr[s->idx];
 611 
 612         match = qbman_result_has_new_result(s->swp, ret);
 613         if (!match) {
 614                 *is_last = 0;
 615                 return NULL;
 616         }
 617 
 618         s->idx++;
 619 
 620         if (dpaa2_dq_is_pull_complete(ret)) {
 621                 *is_last = 1;
 622                 s->idx = 0;
 623                 /*
 624                  * If we get an empty dequeue result to terminate a zero-results
 625                  * vdqcr, return NULL to the caller rather than expecting him to
 626                  * check non-NULL results every time.
 627                  */
 628                 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
 629                         ret = NULL;
 630         } else {
 631                 prefetch(&s->vaddr[s->idx]);
 632                 *is_last = 0;
 633         }
 634 
 635         return ret;
 636 }
 637 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
 638 
 639 /**
 640  * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq.
 641  * @d: the given DPIO object.
 642  * @fqid: the id of frame queue to be queried.
 643  * @fcnt: the queried frame count.
 644  * @bcnt: the queried byte count.
 645  *
 646  * Knowing the FQ count at run-time can be useful in debugging situations.
 647  * The instantaneous frame- and byte-count are hereby returned.
 648  *
 649  * Return 0 for a successful query, and negative error code if query fails.
 650  */
 651 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
 652                             u32 *fcnt, u32 *bcnt)
 653 {
 654         struct qbman_fq_query_np_rslt state;
 655         struct qbman_swp *swp;
 656         unsigned long irqflags;
 657         int ret;
 658 
 659         d = service_select(d);
 660         if (!d)
 661                 return -ENODEV;
 662 
 663         swp = d->swp;
 664         spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 665         ret = qbman_fq_query_state(swp, fqid, &state);
 666         spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 667         if (ret)
 668                 return ret;
 669         *fcnt = qbman_fq_state_frame_count(&state);
 670         *bcnt = qbman_fq_state_byte_count(&state);
 671 
 672         return 0;
 673 }
 674 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
 675 
 676 /**
 677  * dpaa2_io_query_bp_count() - Query the number of buffers currently in a
 678  * buffer pool.
 679  * @d: the given DPIO object.
 680  * @bpid: the index of buffer pool to be queried.
 681  * @num: the queried number of buffers in the buffer pool.
 682  *
 683  * Return 0 for a successful query, and negative error code if query fails.
 684  */
 685 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
 686 {
 687         struct qbman_bp_query_rslt state;
 688         struct qbman_swp *swp;
 689         unsigned long irqflags;
 690         int ret;
 691 
 692         d = service_select(d);
 693         if (!d)
 694                 return -ENODEV;
 695 
 696         swp = d->swp;
 697         spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
 698         ret = qbman_bp_query(swp, bpid, &state);
 699         spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
 700         if (ret)
 701                 return ret;
 702         *num = qbman_bp_info_num_free_bufs(&state);
 703         return 0;
 704 }
 705 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);

/* [<][>][^][v][top][bottom][index][help] */