root/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __resource_add
  2. __resource_del
  3. __release_cpp_area
  4. nfp_cpp_area_put
  5. nfp_cpp_area_get
  6. nfp_cpp_free
  7. nfp_cpp_model
  8. nfp_cpp_interface
  9. nfp_cpp_serial
  10. nfp_cpp_set_mu_locality_lsb
  11. nfp_cpp_mu_locality_lsb
  12. nfp_cpp_area_alloc_with_name
  13. nfp_cpp_area_alloc
  14. nfp_cpp_area_alloc_acquire
  15. nfp_cpp_area_free
  16. nfp_cpp_area_acquire_try
  17. __nfp_cpp_area_acquire
  18. nfp_cpp_area_acquire
  19. nfp_cpp_area_acquire_nonblocking
  20. nfp_cpp_area_release
  21. nfp_cpp_area_release_free
  22. nfp_cpp_area_read
  23. nfp_cpp_area_write
  24. nfp_cpp_area_size
  25. nfp_cpp_area_name
  26. nfp_cpp_area_priv
  27. nfp_cpp_area_cpp
  28. nfp_cpp_area_resource
  29. nfp_cpp_area_phys
  30. nfp_cpp_area_iomem
  31. nfp_cpp_area_readl
  32. nfp_cpp_area_writel
  33. nfp_cpp_area_readq
  34. nfp_cpp_area_writeq
  35. nfp_cpp_area_fill
  36. nfp_cpp_area_cache_add
  37. area_cache_get
  38. area_cache_put
  39. __nfp_cpp_read
  40. nfp_cpp_read
  41. __nfp_cpp_write
  42. nfp_cpp_write
  43. nfp_xpb_to_cpp
  44. nfp_xpb_readl
  45. nfp_xpb_writel
  46. nfp_xpb_writelm
  47. nfp_cpp_dev_release
  48. nfp_cpp_from_operations
  49. nfp_cpp_priv
  50. nfp_cpp_device
  51. nfp_cpp_explicit_acquire
  52. nfp_cpp_explicit_set_target
  53. nfp_cpp_explicit_set_data
  54. nfp_cpp_explicit_set_signal
  55. nfp_cpp_explicit_set_posted
  56. nfp_cpp_explicit_put
  57. nfp_cpp_explicit_do
  58. nfp_cpp_explicit_get
  59. nfp_cpp_explicit_release
  60. nfp_cpp_explicit_cpp
  61. nfp_cpp_explicit_priv

   1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3 
   4 /*
   5  * nfp_cppcore.c
   6  * Provides low-level access to the NFP's internal CPP bus
   7  * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
   8  *          Jason McMullan <jason.mcmullan@netronome.com>
   9  *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  10  */
  11 
  12 #include <asm/unaligned.h>
  13 #include <linux/delay.h>
  14 #include <linux/device.h>
  15 #include <linux/ioport.h>
  16 #include <linux/kernel.h>
  17 #include <linux/module.h>
  18 #include <linux/mutex.h>
  19 #include <linux/sched.h>
  20 #include <linux/slab.h>
  21 #include <linux/wait.h>
  22 
  23 #include "nfp_arm.h"
  24 #include "nfp_cpp.h"
  25 #include "nfp6000/nfp6000.h"
  26 
  27 #define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
  28 #define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
  29 
  30 struct nfp_cpp_resource {
  31         struct list_head list;
  32         const char *name;
  33         u32 cpp_id;
  34         u64 start;
  35         u64 end;
  36 };
  37 
  38 /**
  39  * struct nfp_cpp - main nfpcore device structure
  40  * Following fields are read-only after probe() exits or netdevs are spawned.
  41  * @dev:                embedded device structure
  42  * @op:                 low-level implementation ops
  43  * @priv:               private data of the low-level implementation
  44  * @model:              chip model
  45  * @interface:          chip interface id we are using to reach it
  46  * @serial:             chip serial number
  47  * @imb_cat_table:      CPP Mapping Table
  48  * @mu_locality_lsb:    MU access type bit offset
  49  *
  50  * Following fields use explicit locking:
  51  * @resource_list:      NFP CPP resource list
  52  * @resource_lock:      protects @resource_list
  53  *
  54  * @area_cache_list:    cached areas for cpp/xpb read/write speed up
  55  * @area_cache_mutex:   protects @area_cache_list
  56  *
  57  * @waitq:              area wait queue
  58  */
  59 struct nfp_cpp {
  60         struct device dev;
  61 
  62         void *priv;
  63 
  64         u32 model;
  65         u16 interface;
  66         u8 serial[NFP_SERIAL_LEN];
  67 
  68         const struct nfp_cpp_operations *op;
  69         struct list_head resource_list;
  70         rwlock_t resource_lock;
  71         wait_queue_head_t waitq;
  72 
  73         u32 imb_cat_table[16];
  74         unsigned int mu_locality_lsb;
  75 
  76         struct mutex area_cache_mutex;
  77         struct list_head area_cache_list;
  78 };
  79 
  80 /* Element of the area_cache_list */
  81 struct nfp_cpp_area_cache {
  82         struct list_head entry;
  83         u32 id;
  84         u64 addr;
  85         u32 size;
  86         struct nfp_cpp_area *area;
  87 };
  88 
  89 struct nfp_cpp_area {
  90         struct nfp_cpp *cpp;
  91         struct kref kref;
  92         atomic_t refcount;
  93         struct mutex mutex;     /* Lock for the area's refcount */
  94         unsigned long long offset;
  95         unsigned long size;
  96         struct nfp_cpp_resource resource;
  97         void __iomem *iomem;
  98         /* Here follows the 'priv' part of nfp_cpp_area. */
  99 };
 100 
 101 struct nfp_cpp_explicit {
 102         struct nfp_cpp *cpp;
 103         struct nfp_cpp_explicit_command cmd;
 104         /* Here follows the 'priv' part of nfp_cpp_area. */
 105 };
 106 
 107 static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
 108 {
 109         struct nfp_cpp_resource *tmp;
 110         struct list_head *pos;
 111 
 112         list_for_each(pos, head) {
 113                 tmp = container_of(pos, struct nfp_cpp_resource, list);
 114 
 115                 if (tmp->cpp_id > res->cpp_id)
 116                         break;
 117 
 118                 if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
 119                         break;
 120         }
 121 
 122         list_add_tail(&res->list, pos);
 123 }
 124 
 125 static void __resource_del(struct nfp_cpp_resource *res)
 126 {
 127         list_del_init(&res->list);
 128 }
 129 
 130 static void __release_cpp_area(struct kref *kref)
 131 {
 132         struct nfp_cpp_area *area =
 133                 container_of(kref, struct nfp_cpp_area, kref);
 134         struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
 135 
 136         if (area->cpp->op->area_cleanup)
 137                 area->cpp->op->area_cleanup(area);
 138 
 139         write_lock(&cpp->resource_lock);
 140         __resource_del(&area->resource);
 141         write_unlock(&cpp->resource_lock);
 142         kfree(area);
 143 }
 144 
 145 static void nfp_cpp_area_put(struct nfp_cpp_area *area)
 146 {
 147         kref_put(&area->kref, __release_cpp_area);
 148 }
 149 
 150 static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
 151 {
 152         kref_get(&area->kref);
 153 
 154         return area;
 155 }
 156 
 157 /**
 158  * nfp_cpp_free() - free the CPP handle
 159  * @cpp:        CPP handle
 160  */
 161 void nfp_cpp_free(struct nfp_cpp *cpp)
 162 {
 163         struct nfp_cpp_area_cache *cache, *ctmp;
 164         struct nfp_cpp_resource *res, *rtmp;
 165 
 166         /* Remove all caches */
 167         list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
 168                 list_del(&cache->entry);
 169                 if (cache->id)
 170                         nfp_cpp_area_release(cache->area);
 171                 nfp_cpp_area_free(cache->area);
 172                 kfree(cache);
 173         }
 174 
 175         /* There should be no dangling areas at this point */
 176         WARN_ON(!list_empty(&cpp->resource_list));
 177 
 178         /* .. but if they weren't, try to clean up. */
 179         list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
 180                 struct nfp_cpp_area *area = container_of(res,
 181                                                          struct nfp_cpp_area,
 182                                                          resource);
 183 
 184                 dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
 185                         NFP_CPP_ID_TARGET_of(res->cpp_id),
 186                         NFP_CPP_ID_ACTION_of(res->cpp_id),
 187                         NFP_CPP_ID_TOKEN_of(res->cpp_id),
 188                         res->start, res->end,
 189                         res->name ? " " : "",
 190                         res->name ? res->name : "");
 191 
 192                 if (area->cpp->op->area_release)
 193                         area->cpp->op->area_release(area);
 194 
 195                 __release_cpp_area(&area->kref);
 196         }
 197 
 198         if (cpp->op->free)
 199                 cpp->op->free(cpp);
 200 
 201         device_unregister(&cpp->dev);
 202 
 203         kfree(cpp);
 204 }
 205 
 206 /**
 207  * nfp_cpp_model() - Retrieve the Model ID of the NFP
 208  * @cpp:        NFP CPP handle
 209  *
 210  * Return: NFP CPP Model ID
 211  */
 212 u32 nfp_cpp_model(struct nfp_cpp *cpp)
 213 {
 214         return cpp->model;
 215 }
 216 
 217 /**
 218  * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
 219  * @cpp:        NFP CPP handle
 220  *
 221  * Return: NFP CPP Interface ID
 222  */
 223 u16 nfp_cpp_interface(struct nfp_cpp *cpp)
 224 {
 225         return cpp->interface;
 226 }
 227 
 228 /**
 229  * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
 230  * @cpp:        NFP CPP handle
 231  * @serial:     Pointer to NFP serial number
 232  *
 233  * Return:  Length of NFP serial number
 234  */
 235 int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
 236 {
 237         *serial = &cpp->serial[0];
 238         return sizeof(cpp->serial);
 239 }
 240 
 241 #define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x)           (((_x) >> 13) & 0x7)
 242 #define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE              BIT(12)
 243 #define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT     0
 244 #define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT     BIT(12)
 245 
 246 static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp)
 247 {
 248         unsigned int mode, addr40;
 249         u32 imbcppat;
 250         int res;
 251 
 252         imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU];
 253         mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
 254         addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
 255 
 256         res = nfp_cppat_mu_locality_lsb(mode, addr40);
 257         if (res < 0)
 258                 return res;
 259         cpp->mu_locality_lsb = res;
 260 
 261         return 0;
 262 }
 263 
 264 unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp)
 265 {
 266         return cpp->mu_locality_lsb;
 267 }
 268 
 269 /**
 270  * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
 271  * @cpp:        CPP device handle
 272  * @dest:       NFP CPP ID
 273  * @name:       Name of region
 274  * @address:    Address of region
 275  * @size:       Size of region
 276  *
 277  * Allocate and initialize a CPP area structure.  The area must later
 278  * be locked down with an 'acquire' before it can be safely accessed.
 279  *
 280  * NOTE: @address and @size must be 32-bit aligned values.
 281  *
 282  * Return: NFP CPP area handle, or NULL
 283  */
 284 struct nfp_cpp_area *
 285 nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
 286                              unsigned long long address, unsigned long size)
 287 {
 288         struct nfp_cpp_area *area;
 289         u64 tmp64 = address;
 290         int err, name_len;
 291 
 292         /* Remap from cpp_island to cpp_target */
 293         err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
 294         if (err < 0)
 295                 return NULL;
 296 
 297         address = tmp64;
 298 
 299         if (!name)
 300                 name = "(reserved)";
 301 
 302         name_len = strlen(name) + 1;
 303         area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
 304                        GFP_KERNEL);
 305         if (!area)
 306                 return NULL;
 307 
 308         area->cpp = cpp;
 309         area->resource.name = (void *)area + sizeof(*area) +
 310                 cpp->op->area_priv_size;
 311         memcpy((char *)area->resource.name, name, name_len);
 312 
 313         area->resource.cpp_id = dest;
 314         area->resource.start = address;
 315         area->resource.end = area->resource.start + size - 1;
 316         INIT_LIST_HEAD(&area->resource.list);
 317 
 318         atomic_set(&area->refcount, 0);
 319         kref_init(&area->kref);
 320         mutex_init(&area->mutex);
 321 
 322         if (cpp->op->area_init) {
 323                 int err;
 324 
 325                 err = cpp->op->area_init(area, dest, address, size);
 326                 if (err < 0) {
 327                         kfree(area);
 328                         return NULL;
 329                 }
 330         }
 331 
 332         write_lock(&cpp->resource_lock);
 333         __resource_add(&cpp->resource_list, &area->resource);
 334         write_unlock(&cpp->resource_lock);
 335 
 336         area->offset = address;
 337         area->size = size;
 338 
 339         return area;
 340 }
 341 
 342 /**
 343  * nfp_cpp_area_alloc() - allocate a new CPP area
 344  * @cpp:        CPP handle
 345  * @dest:       CPP id
 346  * @address:    Start address on CPP target
 347  * @size:       Size of area in bytes
 348  *
 349  * Allocate and initialize a CPP area structure.  The area must later
 350  * be locked down with an 'acquire' before it can be safely accessed.
 351  *
 352  * NOTE: @address and @size must be 32-bit aligned values.
 353  *
 354  * Return: NFP CPP Area handle, or NULL
 355  */
 356 struct nfp_cpp_area *
 357 nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
 358                    unsigned long long address, unsigned long size)
 359 {
 360         return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
 361 }
 362 
 363 /**
 364  * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
 365  * @cpp:        CPP handle
 366  * @name:       Name of region
 367  * @dest:       CPP id
 368  * @address:    Start address on CPP target
 369  * @size:       Size of area
 370  *
 371  * Allocate and initialize a CPP area structure, and lock it down so
 372  * that it can be accessed directly.
 373  *
 374  * NOTE: @address and @size must be 32-bit aligned values.
 375  * The area must also be 'released' when the structure is freed.
 376  *
 377  * Return: NFP CPP Area handle, or NULL
 378  */
 379 struct nfp_cpp_area *
 380 nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
 381                            unsigned long long address, unsigned long size)
 382 {
 383         struct nfp_cpp_area *area;
 384 
 385         area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
 386         if (!area)
 387                 return NULL;
 388 
 389         if (nfp_cpp_area_acquire(area)) {
 390                 nfp_cpp_area_free(area);
 391                 return NULL;
 392         }
 393 
 394         return area;
 395 }
 396 
 397 /**
 398  * nfp_cpp_area_free() - free up the CPP area
 399  * @area:       CPP area handle
 400  *
 401  * Frees up memory resources held by the CPP area.
 402  */
 403 void nfp_cpp_area_free(struct nfp_cpp_area *area)
 404 {
 405         if (atomic_read(&area->refcount))
 406                 nfp_warn(area->cpp, "Warning: freeing busy area\n");
 407         nfp_cpp_area_put(area);
 408 }
 409 
 410 static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
 411 {
 412         *status = area->cpp->op->area_acquire(area);
 413 
 414         return *status != -EAGAIN;
 415 }
 416 
 417 static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 418 {
 419         int err, status;
 420 
 421         if (atomic_inc_return(&area->refcount) > 1)
 422                 return 0;
 423 
 424         if (!area->cpp->op->area_acquire)
 425                 return 0;
 426 
 427         err = wait_event_interruptible(area->cpp->waitq,
 428                                        nfp_cpp_area_acquire_try(area, &status));
 429         if (!err)
 430                 err = status;
 431         if (err) {
 432                 nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
 433                 atomic_dec(&area->refcount);
 434                 return err;
 435         }
 436 
 437         nfp_cpp_area_get(area);
 438 
 439         return 0;
 440 }
 441 
 442 /**
 443  * nfp_cpp_area_acquire() - lock down a CPP area for access
 444  * @area:       CPP area handle
 445  *
 446  * Locks down the CPP area for a potential long term activity.  Area
 447  * must always be locked down before being accessed.
 448  *
 449  * Return: 0, or -ERRNO
 450  */
 451 int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 452 {
 453         int ret;
 454 
 455         mutex_lock(&area->mutex);
 456         ret = __nfp_cpp_area_acquire(area);
 457         mutex_unlock(&area->mutex);
 458 
 459         return ret;
 460 }
 461 
 462 /**
 463  * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
 464  * @area:       CPP area handle
 465  *
 466  * Locks down the CPP area for a potential long term activity.  Area
 467  * must always be locked down before being accessed.
 468  *
 469  * NOTE: Returns -EAGAIN is no area is available
 470  *
 471  * Return: 0, or -ERRNO
 472  */
 473 int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
 474 {
 475         mutex_lock(&area->mutex);
 476         if (atomic_inc_return(&area->refcount) == 1) {
 477                 if (area->cpp->op->area_acquire) {
 478                         int err;
 479 
 480                         err = area->cpp->op->area_acquire(area);
 481                         if (err < 0) {
 482                                 atomic_dec(&area->refcount);
 483                                 mutex_unlock(&area->mutex);
 484                                 return err;
 485                         }
 486                 }
 487         }
 488         mutex_unlock(&area->mutex);
 489 
 490         nfp_cpp_area_get(area);
 491         return 0;
 492 }
 493 
 494 /**
 495  * nfp_cpp_area_release() - release a locked down CPP area
 496  * @area:       CPP area handle
 497  *
 498  * Releases a previously locked down CPP area.
 499  */
 500 void nfp_cpp_area_release(struct nfp_cpp_area *area)
 501 {
 502         mutex_lock(&area->mutex);
 503         /* Only call the release on refcount == 0 */
 504         if (atomic_dec_and_test(&area->refcount)) {
 505                 if (area->cpp->op->area_release) {
 506                         area->cpp->op->area_release(area);
 507                         /* Let anyone waiting for a BAR try to get one.. */
 508                         wake_up_interruptible_all(&area->cpp->waitq);
 509                 }
 510         }
 511         mutex_unlock(&area->mutex);
 512 
 513         nfp_cpp_area_put(area);
 514 }
 515 
 516 /**
 517  * nfp_cpp_area_release_free() - release CPP area and free it
 518  * @area:       CPP area handle
 519  *
 520  * Releases CPP area and frees up memory resources held by the it.
 521  */
 522 void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
 523 {
 524         nfp_cpp_area_release(area);
 525         nfp_cpp_area_free(area);
 526 }
 527 
 528 /**
 529  * nfp_cpp_area_read() - read data from CPP area
 530  * @area:         CPP area handle
 531  * @offset:       offset into CPP area
 532  * @kernel_vaddr: kernel address to put data into
 533  * @length:       number of bytes to read
 534  *
 535  * Read data from indicated CPP region.
 536  *
 537  * NOTE: @offset and @length must be 32-bit aligned values.
 538  * Area must have been locked down with an 'acquire'.
 539  *
 540  * Return: length of io, or -ERRNO
 541  */
 542 int nfp_cpp_area_read(struct nfp_cpp_area *area,
 543                       unsigned long offset, void *kernel_vaddr,
 544                       size_t length)
 545 {
 546         return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
 547 }
 548 
 549 /**
 550  * nfp_cpp_area_write() - write data to CPP area
 551  * @area:       CPP area handle
 552  * @offset:     offset into CPP area
 553  * @kernel_vaddr: kernel address to read data from
 554  * @length:     number of bytes to write
 555  *
 556  * Write data to indicated CPP region.
 557  *
 558  * NOTE: @offset and @length must be 32-bit aligned values.
 559  * Area must have been locked down with an 'acquire'.
 560  *
 561  * Return: length of io, or -ERRNO
 562  */
 563 int nfp_cpp_area_write(struct nfp_cpp_area *area,
 564                        unsigned long offset, const void *kernel_vaddr,
 565                        size_t length)
 566 {
 567         return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
 568 }
 569 
 570 /**
 571  * nfp_cpp_area_size() - return size of a CPP area
 572  * @cpp_area:   CPP area handle
 573  *
 574  * Return: Size of the area
 575  */
 576 size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
 577 {
 578         return cpp_area->size;
 579 }
 580 
 581 /**
 582  * nfp_cpp_area_name() - return name of a CPP area
 583  * @cpp_area:   CPP area handle
 584  *
 585  * Return: Name of the area, or NULL
 586  */
 587 const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
 588 {
 589         return cpp_area->resource.name;
 590 }
 591 
 592 /**
 593  * nfp_cpp_area_priv() - return private struct for CPP area
 594  * @cpp_area:   CPP area handle
 595  *
 596  * Return: Private data for the CPP area
 597  */
 598 void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
 599 {
 600         return &cpp_area[1];
 601 }
 602 
 603 /**
 604  * nfp_cpp_area_cpp() - return CPP handle for CPP area
 605  * @cpp_area:   CPP area handle
 606  *
 607  * Return: NFP CPP handle
 608  */
 609 struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
 610 {
 611         return cpp_area->cpp;
 612 }
 613 
 614 /**
 615  * nfp_cpp_area_resource() - get resource
 616  * @area:       CPP area handle
 617  *
 618  * NOTE: Area must have been locked down with an 'acquire'.
 619  *
 620  * Return: struct resource pointer, or NULL
 621  */
 622 struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
 623 {
 624         struct resource *res = NULL;
 625 
 626         if (area->cpp->op->area_resource)
 627                 res = area->cpp->op->area_resource(area);
 628 
 629         return res;
 630 }
 631 
 632 /**
 633  * nfp_cpp_area_phys() - get physical address of CPP area
 634  * @area:       CPP area handle
 635  *
 636  * NOTE: Area must have been locked down with an 'acquire'.
 637  *
 638  * Return: phy_addr_t of the area, or NULL
 639  */
 640 phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
 641 {
 642         phys_addr_t addr = ~0;
 643 
 644         if (area->cpp->op->area_phys)
 645                 addr = area->cpp->op->area_phys(area);
 646 
 647         return addr;
 648 }
 649 
 650 /**
 651  * nfp_cpp_area_iomem() - get IOMEM region for CPP area
 652  * @area:       CPP area handle
 653  *
 654  * Returns an iomem pointer for use with readl()/writel() style
 655  * operations.
 656  *
 657  * NOTE: Area must have been locked down with an 'acquire'.
 658  *
 659  * Return: __iomem pointer to the area, or NULL
 660  */
 661 void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
 662 {
 663         void __iomem *iomem = NULL;
 664 
 665         if (area->cpp->op->area_iomem)
 666                 iomem = area->cpp->op->area_iomem(area);
 667 
 668         return iomem;
 669 }
 670 
 671 /**
 672  * nfp_cpp_area_readl() - Read a u32 word from an area
 673  * @area:       CPP Area handle
 674  * @offset:     Offset into area
 675  * @value:      Pointer to read buffer
 676  *
 677  * Return: 0 on success, or -ERRNO
 678  */
 679 int nfp_cpp_area_readl(struct nfp_cpp_area *area,
 680                        unsigned long offset, u32 *value)
 681 {
 682         u8 tmp[4];
 683         int n;
 684 
 685         n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 686         if (n != sizeof(tmp))
 687                 return n < 0 ? n : -EIO;
 688 
 689         *value = get_unaligned_le32(tmp);
 690         return 0;
 691 }
 692 
 693 /**
 694  * nfp_cpp_area_writel() - Write a u32 word to an area
 695  * @area:       CPP Area handle
 696  * @offset:     Offset into area
 697  * @value:      Value to write
 698  *
 699  * Return: 0 on success, or -ERRNO
 700  */
 701 int nfp_cpp_area_writel(struct nfp_cpp_area *area,
 702                         unsigned long offset, u32 value)
 703 {
 704         u8 tmp[4];
 705         int n;
 706 
 707         put_unaligned_le32(value, tmp);
 708         n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 709 
 710         return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 711 }
 712 
 713 /**
 714  * nfp_cpp_area_readq() - Read a u64 word from an area
 715  * @area:       CPP Area handle
 716  * @offset:     Offset into area
 717  * @value:      Pointer to read buffer
 718  *
 719  * Return: 0 on success, or -ERRNO
 720  */
 721 int nfp_cpp_area_readq(struct nfp_cpp_area *area,
 722                        unsigned long offset, u64 *value)
 723 {
 724         u8 tmp[8];
 725         int n;
 726 
 727         n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 728         if (n != sizeof(tmp))
 729                 return n < 0 ? n : -EIO;
 730 
 731         *value = get_unaligned_le64(tmp);
 732         return 0;
 733 }
 734 
 735 /**
 736  * nfp_cpp_area_writeq() - Write a u64 word to an area
 737  * @area:       CPP Area handle
 738  * @offset:     Offset into area
 739  * @value:      Value to write
 740  *
 741  * Return: 0 on success, or -ERRNO
 742  */
 743 int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
 744                         unsigned long offset, u64 value)
 745 {
 746         u8 tmp[8];
 747         int n;
 748 
 749         put_unaligned_le64(value, tmp);
 750         n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 751 
 752         return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 753 }
 754 
 755 /**
 756  * nfp_cpp_area_fill() - fill a CPP area with a value
 757  * @area:       CPP area
 758  * @offset:     offset into CPP area
 759  * @value:      value to fill with
 760  * @length:     length of area to fill
 761  *
 762  * Fill indicated area with given value.
 763  *
 764  * Return: length of io, or -ERRNO
 765  */
 766 int nfp_cpp_area_fill(struct nfp_cpp_area *area,
 767                       unsigned long offset, u32 value, size_t length)
 768 {
 769         u8 tmp[4];
 770         size_t i;
 771         int k;
 772 
 773         put_unaligned_le32(value, tmp);
 774 
 775         if (offset % sizeof(tmp) || length % sizeof(tmp))
 776                 return -EINVAL;
 777 
 778         for (i = 0; i < length; i += sizeof(tmp)) {
 779                 k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
 780                 if (k < 0)
 781                         return k;
 782         }
 783 
 784         return i;
 785 }
 786 
 787 /**
 788  * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
 789  * @cpp:        NFP CPP handle
 790  * @size:       Size of the area - MUST BE A POWER OF 2.
 791  */
 792 int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
 793 {
 794         struct nfp_cpp_area_cache *cache;
 795         struct nfp_cpp_area *area;
 796 
 797         /* Allocate an area - we use the MU target's base as a placeholder,
 798          * as all supported chips have a MU.
 799          */
 800         area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
 801                                   0, size);
 802         if (!area)
 803                 return -ENOMEM;
 804 
 805         cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 806         if (!cache)
 807                 return -ENOMEM;
 808 
 809         cache->id = 0;
 810         cache->addr = 0;
 811         cache->size = size;
 812         cache->area = area;
 813         mutex_lock(&cpp->area_cache_mutex);
 814         list_add_tail(&cache->entry, &cpp->area_cache_list);
 815         mutex_unlock(&cpp->area_cache_mutex);
 816 
 817         return 0;
 818 }
 819 
 820 static struct nfp_cpp_area_cache *
 821 area_cache_get(struct nfp_cpp *cpp, u32 id,
 822                u64 addr, unsigned long *offset, size_t length)
 823 {
 824         struct nfp_cpp_area_cache *cache;
 825         int err;
 826 
 827         /* Early exit when length == 0, which prevents
 828          * the need for special case code below when
 829          * checking against available cache size.
 830          */
 831         if (length == 0 || id == 0)
 832                 return NULL;
 833 
 834         /* Remap from cpp_island to cpp_target */
 835         err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
 836         if (err < 0)
 837                 return NULL;
 838 
 839         mutex_lock(&cpp->area_cache_mutex);
 840 
 841         if (list_empty(&cpp->area_cache_list)) {
 842                 mutex_unlock(&cpp->area_cache_mutex);
 843                 return NULL;
 844         }
 845 
 846         addr += *offset;
 847 
 848         /* See if we have a match */
 849         list_for_each_entry(cache, &cpp->area_cache_list, entry) {
 850                 if (id == cache->id &&
 851                     addr >= cache->addr &&
 852                     addr + length <= cache->addr + cache->size)
 853                         goto exit;
 854         }
 855 
 856         /* No matches - inspect the tail of the LRU */
 857         cache = list_entry(cpp->area_cache_list.prev,
 858                            struct nfp_cpp_area_cache, entry);
 859 
 860         /* Can we fit in the cache entry? */
 861         if (round_down(addr + length - 1, cache->size) !=
 862             round_down(addr, cache->size)) {
 863                 mutex_unlock(&cpp->area_cache_mutex);
 864                 return NULL;
 865         }
 866 
 867         /* If id != 0, we will need to release it */
 868         if (cache->id) {
 869                 nfp_cpp_area_release(cache->area);
 870                 cache->id = 0;
 871                 cache->addr = 0;
 872         }
 873 
 874         /* Adjust the start address to be cache size aligned */
 875         cache->id = id;
 876         cache->addr = addr & ~(u64)(cache->size - 1);
 877 
 878         /* Re-init to the new ID and address */
 879         if (cpp->op->area_init) {
 880                 err = cpp->op->area_init(cache->area,
 881                                          id, cache->addr, cache->size);
 882                 if (err < 0) {
 883                         mutex_unlock(&cpp->area_cache_mutex);
 884                         return NULL;
 885                 }
 886         }
 887 
 888         /* Attempt to acquire */
 889         err = nfp_cpp_area_acquire(cache->area);
 890         if (err < 0) {
 891                 mutex_unlock(&cpp->area_cache_mutex);
 892                 return NULL;
 893         }
 894 
 895 exit:
 896         /* Adjust offset */
 897         *offset = addr - cache->addr;
 898         return cache;
 899 }
 900 
 901 static void
 902 area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
 903 {
 904         if (!cache)
 905                 return;
 906 
 907         /* Move to front of LRU */
 908         list_del(&cache->entry);
 909         list_add(&cache->entry, &cpp->area_cache_list);
 910 
 911         mutex_unlock(&cpp->area_cache_mutex);
 912 }
 913 
 914 static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 915                           unsigned long long address, void *kernel_vaddr,
 916                           size_t length)
 917 {
 918         struct nfp_cpp_area_cache *cache;
 919         struct nfp_cpp_area *area;
 920         unsigned long offset = 0;
 921         int err;
 922 
 923         cache = area_cache_get(cpp, destination, address, &offset, length);
 924         if (cache) {
 925                 area = cache->area;
 926         } else {
 927                 area = nfp_cpp_area_alloc(cpp, destination, address, length);
 928                 if (!area)
 929                         return -ENOMEM;
 930 
 931                 err = nfp_cpp_area_acquire(area);
 932                 if (err) {
 933                         nfp_cpp_area_free(area);
 934                         return err;
 935                 }
 936         }
 937 
 938         err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
 939 
 940         if (cache)
 941                 area_cache_put(cpp, cache);
 942         else
 943                 nfp_cpp_area_release_free(area);
 944 
 945         return err;
 946 }
 947 
 948 /**
 949  * nfp_cpp_read() - read from CPP target
 950  * @cpp:                CPP handle
 951  * @destination:        CPP id
 952  * @address:            offset into CPP target
 953  * @kernel_vaddr:       kernel buffer for result
 954  * @length:             number of bytes to read
 955  *
 956  * Return: length of io, or -ERRNO
 957  */
 958 int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 959                  unsigned long long address, void *kernel_vaddr,
 960                  size_t length)
 961 {
 962         size_t n, offset;
 963         int ret;
 964 
 965         for (offset = 0; offset < length; offset += n) {
 966                 unsigned long long r_addr = address + offset;
 967 
 968                 /* make first read smaller to align to safe window */
 969                 n = min_t(size_t, length - offset,
 970                           ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
 971 
 972                 ret = __nfp_cpp_read(cpp, destination, address + offset,
 973                                      kernel_vaddr + offset, n);
 974                 if (ret < 0)
 975                         return ret;
 976                 if (ret != n)
 977                         return offset + n;
 978         }
 979 
 980         return length;
 981 }
 982 
 983 static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
 984                            unsigned long long address,
 985                            const void *kernel_vaddr, size_t length)
 986 {
 987         struct nfp_cpp_area_cache *cache;
 988         struct nfp_cpp_area *area;
 989         unsigned long offset = 0;
 990         int err;
 991 
 992         cache = area_cache_get(cpp, destination, address, &offset, length);
 993         if (cache) {
 994                 area = cache->area;
 995         } else {
 996                 area = nfp_cpp_area_alloc(cpp, destination, address, length);
 997                 if (!area)
 998                         return -ENOMEM;
 999 
1000                 err = nfp_cpp_area_acquire(area);
1001                 if (err) {
1002                         nfp_cpp_area_free(area);
1003                         return err;
1004                 }
1005         }
1006 
1007         err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
1008 
1009         if (cache)
1010                 area_cache_put(cpp, cache);
1011         else
1012                 nfp_cpp_area_release_free(area);
1013 
1014         return err;
1015 }
1016 
1017 /**
1018  * nfp_cpp_write() - write to CPP target
1019  * @cpp:                CPP handle
1020  * @destination:        CPP id
1021  * @address:            offset into CPP target
1022  * @kernel_vaddr:       kernel buffer to read from
1023  * @length:             number of bytes to write
1024  *
1025  * Return: length of io, or -ERRNO
1026  */
1027 int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
1028                   unsigned long long address,
1029                   const void *kernel_vaddr, size_t length)
1030 {
1031         size_t n, offset;
1032         int ret;
1033 
1034         for (offset = 0; offset < length; offset += n) {
1035                 unsigned long long w_addr = address + offset;
1036 
1037                 /* make first write smaller to align to safe window */
1038                 n = min_t(size_t, length - offset,
1039                           ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
1040 
1041                 ret = __nfp_cpp_write(cpp, destination, address + offset,
1042                                       kernel_vaddr + offset, n);
1043                 if (ret < 0)
1044                         return ret;
1045                 if (ret != n)
1046                         return offset + n;
1047         }
1048 
1049         return length;
1050 }
1051 
1052 /* Return the correct CPP address, and fixup xpb_addr as needed. */
1053 static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
1054 {
1055         int island;
1056         u32 xpb;
1057 
1058         xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
1059         /* Ensure that non-local XPB accesses go
1060          * out through the global XPBM bus.
1061          */
1062         island = (*xpb_addr >> 24) & 0x3f;
1063         if (!island)
1064                 return xpb;
1065 
1066         if (island != 1) {
1067                 *xpb_addr |= 1 << 30;
1068                 return xpb;
1069         }
1070 
1071         /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
1072         *xpb_addr &= ~0x7f000000;
1073         if (*xpb_addr < 0x60000) {
1074                 *xpb_addr |= 1 << 30;
1075         } else {
1076                 /* And only non-ARM interfaces use the island id = 1 */
1077                 if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
1078                     != NFP_CPP_INTERFACE_TYPE_ARM)
1079                         *xpb_addr |= 1 << 24;
1080         }
1081 
1082         return xpb;
1083 }
1084 
1085 /**
1086  * nfp_xpb_readl() - Read a u32 word from a XPB location
1087  * @cpp:        CPP device handle
1088  * @xpb_addr:   Address for operation
1089  * @value:      Pointer to read buffer
1090  *
1091  * Return: 0 on success, or -ERRNO
1092  */
1093 int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
1094 {
1095         u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1096 
1097         return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
1098 }
1099 
1100 /**
1101  * nfp_xpb_writel() - Write a u32 word to a XPB location
1102  * @cpp:        CPP device handle
1103  * @xpb_addr:   Address for operation
1104  * @value:      Value to write
1105  *
1106  * Return: 0 on success, or -ERRNO
1107  */
1108 int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
1109 {
1110         u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1111 
1112         return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
1113 }
1114 
1115 /**
1116  * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
1117  * @cpp:        NFP CPP device handle
1118  * @xpb_tgt:    XPB target and address
1119  * @mask:       mask of bits to alter
1120  * @value:      value to modify
1121  *
1122  * KERNEL: This operation is safe to call in interrupt or softirq context.
1123  *
1124  * Return: 0 on success, or -ERRNO
1125  */
1126 int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
1127                     u32 mask, u32 value)
1128 {
1129         int err;
1130         u32 tmp;
1131 
1132         err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
1133         if (err < 0)
1134                 return err;
1135 
1136         tmp &= ~mask;
1137         tmp |= mask & value;
1138         return nfp_xpb_writel(cpp, xpb_tgt, tmp);
1139 }
1140 
1141 /* Lockdep markers */
1142 static struct lock_class_key nfp_cpp_resource_lock_key;
1143 
1144 static void nfp_cpp_dev_release(struct device *dev)
1145 {
1146         /* Nothing to do here - it just makes the kernel happy */
1147 }
1148 
1149 /**
1150  * nfp_cpp_from_operations() - Create a NFP CPP handle
1151  *                             from an operations structure
1152  * @ops:        NFP CPP operations structure
1153  * @parent:     Parent device
1154  * @priv:       Private data of low-level implementation
1155  *
1156  * NOTE: On failure, cpp_ops->free will be called!
1157  *
1158  * Return: NFP CPP handle on success, ERR_PTR on failure
1159  */
1160 struct nfp_cpp *
1161 nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
1162                         struct device *parent, void *priv)
1163 {
1164         const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
1165         struct nfp_cpp *cpp;
1166         int ifc, err;
1167         u32 mask[2];
1168         u32 xpbaddr;
1169         size_t tgt;
1170 
1171         cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
1172         if (!cpp) {
1173                 err = -ENOMEM;
1174                 goto err_malloc;
1175         }
1176 
1177         cpp->op = ops;
1178         cpp->priv = priv;
1179 
1180         ifc = ops->get_interface(parent);
1181         if (ifc < 0) {
1182                 err = ifc;
1183                 goto err_free_cpp;
1184         }
1185         cpp->interface = ifc;
1186         if (ops->read_serial) {
1187                 err = ops->read_serial(parent, cpp->serial);
1188                 if (err)
1189                         goto err_free_cpp;
1190         }
1191 
1192         rwlock_init(&cpp->resource_lock);
1193         init_waitqueue_head(&cpp->waitq);
1194         lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
1195         INIT_LIST_HEAD(&cpp->resource_list);
1196         INIT_LIST_HEAD(&cpp->area_cache_list);
1197         mutex_init(&cpp->area_cache_mutex);
1198         cpp->dev.init_name = "cpp";
1199         cpp->dev.parent = parent;
1200         cpp->dev.release = nfp_cpp_dev_release;
1201         err = device_register(&cpp->dev);
1202         if (err < 0) {
1203                 put_device(&cpp->dev);
1204                 goto err_free_cpp;
1205         }
1206 
1207         dev_set_drvdata(&cpp->dev, cpp);
1208 
1209         /* NOTE: cpp_lock is NOT locked for op->init,
1210          * since it may call NFP CPP API operations
1211          */
1212         if (cpp->op->init) {
1213                 err = cpp->op->init(cpp);
1214                 if (err < 0) {
1215                         dev_err(parent,
1216                                 "NFP interface initialization failed\n");
1217                         goto err_out;
1218                 }
1219         }
1220 
1221         err = nfp_cpp_model_autodetect(cpp, &cpp->model);
1222         if (err < 0) {
1223                 dev_err(parent, "NFP model detection failed\n");
1224                 goto err_out;
1225         }
1226 
1227         for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
1228                         /* Hardcoded XPB IMB Base, island 0 */
1229                 xpbaddr = 0x000a0000 + (tgt * 4);
1230                 err = nfp_xpb_readl(cpp, xpbaddr,
1231                                     &cpp->imb_cat_table[tgt]);
1232                 if (err < 0) {
1233                         dev_err(parent,
1234                                 "Can't read CPP mapping from device\n");
1235                         goto err_out;
1236                 }
1237         }
1238 
1239         nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
1240                       &mask[0]);
1241         nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
1242                       &mask[1]);
1243 
1244         err = nfp_cpp_set_mu_locality_lsb(cpp);
1245         if (err < 0) {
1246                 dev_err(parent, "Can't calculate MU locality bit offset\n");
1247                 goto err_out;
1248         }
1249 
1250         dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
1251                  nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
1252 
1253         return cpp;
1254 
1255 err_out:
1256         device_unregister(&cpp->dev);
1257 err_free_cpp:
1258         kfree(cpp);
1259 err_malloc:
1260         return ERR_PTR(err);
1261 }
1262 
1263 /**
1264  * nfp_cpp_priv() - Get the operations private data of a CPP handle
1265  * @cpp:        CPP handle
1266  *
1267  * Return: Private data for the NFP CPP handle
1268  */
1269 void *nfp_cpp_priv(struct nfp_cpp *cpp)
1270 {
1271         return cpp->priv;
1272 }
1273 
1274 /**
1275  * nfp_cpp_device() - Get the Linux device handle of a CPP handle
1276  * @cpp:        CPP handle
1277  *
1278  * Return: Device for the NFP CPP bus
1279  */
1280 struct device *nfp_cpp_device(struct nfp_cpp *cpp)
1281 {
1282         return &cpp->dev;
1283 }
1284 
1285 #define NFP_EXPL_OP(func, expl, args...)                          \
1286         ({                                                        \
1287                 struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1288                 int err = -ENODEV;                                \
1289                                                                   \
1290                 if (cpp->op->func)                                \
1291                         err = cpp->op->func(expl, ##args);        \
1292                 err;                                              \
1293         })
1294 
1295 #define NFP_EXPL_OP_NR(func, expl, args...)                       \
1296         ({                                                        \
1297                 struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1298                                                                   \
1299                 if (cpp->op->func)                                \
1300                         cpp->op->func(expl, ##args);              \
1301                                                                   \
1302         })
1303 
1304 /**
1305  * nfp_cpp_explicit_acquire() - Acquire explicit access handle
1306  * @cpp:        NFP CPP handle
1307  *
1308  * The 'data_ref' and 'signal_ref' values are useful when
1309  * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
1310  *
1311  * Return: NFP CPP explicit handle
1312  */
1313 struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
1314 {
1315         struct nfp_cpp_explicit *expl;
1316         int err;
1317 
1318         expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
1319         if (!expl)
1320                 return NULL;
1321 
1322         expl->cpp = cpp;
1323         err = NFP_EXPL_OP(explicit_acquire, expl);
1324         if (err < 0) {
1325                 kfree(expl);
1326                 return NULL;
1327         }
1328 
1329         return expl;
1330 }
1331 
1332 /**
1333  * nfp_cpp_explicit_set_target() - Set target fields for explicit
1334  * @expl:       Explicit handle
1335  * @cpp_id:     CPP ID field
1336  * @len:        CPP Length field
1337  * @mask:       CPP Mask field
1338  *
1339  * Return: 0, or -ERRNO
1340  */
1341 int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
1342                                 u32 cpp_id, u8 len, u8 mask)
1343 {
1344         expl->cmd.cpp_id = cpp_id;
1345         expl->cmd.len = len;
1346         expl->cmd.byte_mask = mask;
1347 
1348         return 0;
1349 }
1350 
1351 /**
1352  * nfp_cpp_explicit_set_data() - Set data fields for explicit
1353  * @expl:       Explicit handle
1354  * @data_master: CPP Data Master field
1355  * @data_ref:   CPP Data Ref field
1356  *
1357  * Return: 0, or -ERRNO
1358  */
1359 int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
1360                               u8 data_master, u16 data_ref)
1361 {
1362         expl->cmd.data_master = data_master;
1363         expl->cmd.data_ref = data_ref;
1364 
1365         return 0;
1366 }
1367 
1368 /**
1369  * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
1370  * @expl:       Explicit handle
1371  * @signal_master: CPP Signal Master field
1372  * @signal_ref: CPP Signal Ref field
1373  *
1374  * Return: 0, or -ERRNO
1375  */
1376 int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
1377                                 u8 signal_master, u8 signal_ref)
1378 {
1379         expl->cmd.signal_master = signal_master;
1380         expl->cmd.signal_ref = signal_ref;
1381 
1382         return 0;
1383 }
1384 
1385 /**
1386  * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
1387  * @expl:       Explicit handle
1388  * @posted:     True for signaled completion, false otherwise
1389  * @siga:       CPP Signal A field
1390  * @siga_mode:  CPP Signal A Mode field
1391  * @sigb:       CPP Signal B field
1392  * @sigb_mode:  CPP Signal B Mode field
1393  *
1394  * Return: 0, or -ERRNO
1395  */
1396 int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
1397                                 u8 siga,
1398                                 enum nfp_cpp_explicit_signal_mode siga_mode,
1399                                 u8 sigb,
1400                                 enum nfp_cpp_explicit_signal_mode sigb_mode)
1401 {
1402         expl->cmd.posted = posted;
1403         expl->cmd.siga = siga;
1404         expl->cmd.sigb = sigb;
1405         expl->cmd.siga_mode = siga_mode;
1406         expl->cmd.sigb_mode = sigb_mode;
1407 
1408         return 0;
1409 }
1410 
1411 /**
1412  * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
1413  * @expl:       NFP CPP Explicit handle
1414  * @buff:       Data to have the target pull in the transaction
1415  * @len:        Length of data, in bytes
1416  *
1417  * The 'len' parameter must be less than or equal to 128 bytes.
1418  *
1419  * If this function is called before the configuration
1420  * registers are set, it will return -EINVAL.
1421  *
1422  * Return: 0, or -ERRNO
1423  */
1424 int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
1425                          const void *buff, size_t len)
1426 {
1427         return NFP_EXPL_OP(explicit_put, expl, buff, len);
1428 }
1429 
1430 /**
1431  * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
1432  * @expl:       NFP CPP Explicit handle
1433  * @address:    Address to send in the explicit transaction
1434  *
1435  * If this function is called before the configuration
1436  * registers are set, it will return -1, with an errno of EINVAL.
1437  *
1438  * Return: 0, or -ERRNO
1439  */
1440 int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
1441 {
1442         return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
1443 }
1444 
1445 /**
1446  * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
1447  * @expl:       NFP CPP Explicit handle
1448  * @buff:       Data that the target pushed in the transaction
1449  * @len:        Length of data, in bytes
1450  *
1451  * The 'len' parameter must be less than or equal to 128 bytes.
1452  *
1453  * If this function is called before all three configuration
1454  * registers are set, it will return -1, with an errno of EINVAL.
1455  *
1456  * If this function is called before nfp_cpp_explicit_do()
1457  * has completed, it will return -1, with an errno of EBUSY.
1458  *
1459  * Return: 0, or -ERRNO
1460  */
1461 int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
1462 {
1463         return NFP_EXPL_OP(explicit_get, expl, buff, len);
1464 }
1465 
1466 /**
1467  * nfp_cpp_explicit_release() - Release explicit access handle
1468  * @expl:       NFP CPP Explicit handle
1469  *
1470  */
1471 void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
1472 {
1473         NFP_EXPL_OP_NR(explicit_release, expl);
1474         kfree(expl);
1475 }
1476 
1477 /**
1478  * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
1479  * @cpp_explicit:       CPP explicit handle
1480  *
1481  * Return: NFP CPP handle of the explicit
1482  */
1483 struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
1484 {
1485         return cpp_explicit->cpp;
1486 }
1487 
1488 /**
1489  * nfp_cpp_explicit_priv() - return private struct for CPP explicit
1490  * @cpp_explicit:       CPP explicit handle
1491  *
1492  * Return: private data of the explicit, or NULL
1493  */
1494 void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
1495 {
1496         return &cpp_explicit[1];
1497 }

/* [<][>][^][v][top][bottom][index][help] */