root/drivers/iommu/fsl_pamu.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. pamu_get_max_subwin_cnt
  2. pamu_get_ppaace
  3. pamu_enable_liodn
  4. pamu_disable_liodn
  5. map_addrspace_size_to_wse
  6. map_subwindow_cnt_to_wce
  7. pamu_init_ppaace
  8. pamu_init_spaace
  9. pamu_get_spaace
  10. pamu_get_fspi_and_allocate
  11. pamu_free_subwins
  12. pamu_update_paace_stash
  13. pamu_disable_spaace
  14. pamu_config_ppaace
  15. pamu_config_spaace
  16. get_ome_index
  17. get_stash_id
  18. setup_qbman_paace
  19. setup_omt
  20. get_pamu_cap_values
  21. setup_one_pamu
  22. setup_liodns
  23. pamu_av_isr
  24. create_csd
  25. fsl_pamu_probe
  26. fsl_pamu_init

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *
   4  * Copyright (C) 2013 Freescale Semiconductor, Inc.
   5  */
   6 
   7 #define pr_fmt(fmt)    "fsl-pamu: %s: " fmt, __func__
   8 
   9 #include "fsl_pamu.h"
  10 
  11 #include <linux/fsl/guts.h>
  12 #include <linux/interrupt.h>
  13 #include <linux/genalloc.h>
  14 
  15 #include <asm/mpc85xx.h>
  16 
  17 /* define indexes for each operation mapping scenario */
  18 #define OMI_QMAN        0x00
  19 #define OMI_FMAN        0x01
  20 #define OMI_QMAN_PRIV   0x02
  21 #define OMI_CAAM        0x03
  22 
  23 #define make64(high, low) (((u64)(high) << 32) | (low))
  24 
  25 struct pamu_isr_data {
  26         void __iomem *pamu_reg_base;    /* Base address of PAMU regs */
  27         unsigned int count;             /* The number of PAMUs */
  28 };
  29 
  30 static struct paace *ppaact;
  31 static struct paace *spaact;
  32 
  33 static bool probed;                     /* Has PAMU been probed? */
  34 
  35 /*
  36  * Table for matching compatible strings, for device tree
  37  * guts node, for QorIQ SOCs.
  38  * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4
  39  * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
  40  * string would be used.
  41  */
  42 static const struct of_device_id guts_device_ids[] = {
  43         { .compatible = "fsl,qoriq-device-config-1.0", },
  44         { .compatible = "fsl,qoriq-device-config-2.0", },
  45         {}
  46 };
  47 
  48 /*
  49  * Table for matching compatible strings, for device tree
  50  * L3 cache controller node.
  51  * "fsl,t4240-l3-cache-controller" corresponds to T4,
  52  * "fsl,b4860-l3-cache-controller" corresponds to B4 &
  53  * "fsl,p4080-l3-cache-controller" corresponds to other,
  54  * SOCs.
  55  */
  56 static const struct of_device_id l3_device_ids[] = {
  57         { .compatible = "fsl,t4240-l3-cache-controller", },
  58         { .compatible = "fsl,b4860-l3-cache-controller", },
  59         { .compatible = "fsl,p4080-l3-cache-controller", },
  60         {}
  61 };
  62 
  63 /* maximum subwindows permitted per liodn */
  64 static u32 max_subwindow_count;
  65 
  66 /* Pool for fspi allocation */
  67 static struct gen_pool *spaace_pool;
  68 
  69 /**
  70  * pamu_get_max_subwin_cnt() - Return the maximum supported
  71  * subwindow count per liodn.
  72  *
  73  */
  74 u32 pamu_get_max_subwin_cnt(void)
  75 {
  76         return max_subwindow_count;
  77 }
  78 
  79 /**
  80  * pamu_get_ppaace() - Return the primary PACCE
  81  * @liodn: liodn PAACT index for desired PAACE
  82  *
  83  * Returns the ppace pointer upon success else return
  84  * null.
  85  */
  86 static struct paace *pamu_get_ppaace(int liodn)
  87 {
  88         if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) {
  89                 pr_debug("PPAACT doesn't exist\n");
  90                 return NULL;
  91         }
  92 
  93         return &ppaact[liodn];
  94 }
  95 
  96 /**
  97  * pamu_enable_liodn() - Set valid bit of PACCE
  98  * @liodn: liodn PAACT index for desired PAACE
  99  *
 100  * Returns 0 upon success else error code < 0 returned
 101  */
 102 int pamu_enable_liodn(int liodn)
 103 {
 104         struct paace *ppaace;
 105 
 106         ppaace = pamu_get_ppaace(liodn);
 107         if (!ppaace) {
 108                 pr_debug("Invalid primary paace entry\n");
 109                 return -ENOENT;
 110         }
 111 
 112         if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) {
 113                 pr_debug("liodn %d not configured\n", liodn);
 114                 return -EINVAL;
 115         }
 116 
 117         /* Ensure that all other stores to the ppaace complete first */
 118         mb();
 119 
 120         set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
 121         mb();
 122 
 123         return 0;
 124 }
 125 
 126 /**
 127  * pamu_disable_liodn() - Clears valid bit of PACCE
 128  * @liodn: liodn PAACT index for desired PAACE
 129  *
 130  * Returns 0 upon success else error code < 0 returned
 131  */
 132 int pamu_disable_liodn(int liodn)
 133 {
 134         struct paace *ppaace;
 135 
 136         ppaace = pamu_get_ppaace(liodn);
 137         if (!ppaace) {
 138                 pr_debug("Invalid primary paace entry\n");
 139                 return -ENOENT;
 140         }
 141 
 142         set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
 143         mb();
 144 
 145         return 0;
 146 }
 147 
 148 /* Derive the window size encoding for a particular PAACE entry */
 149 static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
 150 {
 151         /* Bug if not a power of 2 */
 152         BUG_ON(addrspace_size & (addrspace_size - 1));
 153 
 154         /* window size is 2^(WSE+1) bytes */
 155         return fls64(addrspace_size) - 2;
 156 }
 157 
 158 /* Derive the PAACE window count encoding for the subwindow count */
 159 static unsigned int map_subwindow_cnt_to_wce(u32 subwindow_cnt)
 160 {
 161         /* window count is 2^(WCE+1) bytes */
 162         return __ffs(subwindow_cnt) - 1;
 163 }
 164 
 165 /*
 166  * Set the PAACE type as primary and set the coherency required domain
 167  * attribute
 168  */
 169 static void pamu_init_ppaace(struct paace *ppaace)
 170 {
 171         set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY);
 172 
 173         set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 174                PAACE_M_COHERENCE_REQ);
 175 }
 176 
 177 /*
 178  * Set the PAACE type as secondary and set the coherency required domain
 179  * attribute.
 180  */
 181 static void pamu_init_spaace(struct paace *spaace)
 182 {
 183         set_bf(spaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_SECONDARY);
 184         set_bf(spaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 185                PAACE_M_COHERENCE_REQ);
 186 }
 187 
 188 /*
 189  * Return the spaace (corresponding to the secondary window index)
 190  * for a particular ppaace.
 191  */
 192 static struct paace *pamu_get_spaace(struct paace *paace, u32 wnum)
 193 {
 194         u32 subwin_cnt;
 195         struct paace *spaace = NULL;
 196 
 197         subwin_cnt = 1UL << (get_bf(paace->impl_attr, PAACE_IA_WCE) + 1);
 198 
 199         if (wnum < subwin_cnt)
 200                 spaace = &spaact[paace->fspi + wnum];
 201         else
 202                 pr_debug("secondary paace out of bounds\n");
 203 
 204         return spaace;
 205 }
 206 
 207 /**
 208  * pamu_get_fspi_and_allocate() - Allocates fspi index and reserves subwindows
 209  *                                required for primary PAACE in the secondary
 210  *                                PAACE table.
 211  * @subwin_cnt: Number of subwindows to be reserved.
 212  *
 213  * A PPAACE entry may have a number of associated subwindows. A subwindow
 214  * corresponds to a SPAACE entry in the SPAACT table. Each PAACE entry stores
 215  * the index (fspi) of the first SPAACE entry in the SPAACT table. This
 216  * function returns the index of the first SPAACE entry. The remaining
 217  * SPAACE entries are reserved contiguously from that index.
 218  *
 219  * Returns a valid fspi index in the range of 0 - SPAACE_NUMBER_ENTRIES on success.
 220  * If no SPAACE entry is available or the allocator can not reserve the required
 221  * number of contiguous entries function returns ULONG_MAX indicating a failure.
 222  *
 223  */
 224 static unsigned long pamu_get_fspi_and_allocate(u32 subwin_cnt)
 225 {
 226         unsigned long spaace_addr;
 227 
 228         spaace_addr = gen_pool_alloc(spaace_pool, subwin_cnt * sizeof(struct paace));
 229         if (!spaace_addr)
 230                 return ULONG_MAX;
 231 
 232         return (spaace_addr - (unsigned long)spaact) / (sizeof(struct paace));
 233 }
 234 
 235 /* Release the subwindows reserved for a particular LIODN */
 236 void pamu_free_subwins(int liodn)
 237 {
 238         struct paace *ppaace;
 239         u32 subwin_cnt, size;
 240 
 241         ppaace = pamu_get_ppaace(liodn);
 242         if (!ppaace) {
 243                 pr_debug("Invalid liodn entry\n");
 244                 return;
 245         }
 246 
 247         if (get_bf(ppaace->addr_bitfields, PPAACE_AF_MW)) {
 248                 subwin_cnt = 1UL << (get_bf(ppaace->impl_attr, PAACE_IA_WCE) + 1);
 249                 size = (subwin_cnt - 1) * sizeof(struct paace);
 250                 gen_pool_free(spaace_pool, (unsigned long)&spaact[ppaace->fspi], size);
 251                 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
 252         }
 253 }
 254 
 255 /*
 256  * Function used for updating stash destination for the coressponding
 257  * LIODN.
 258  */
 259 int  pamu_update_paace_stash(int liodn, u32 subwin, u32 value)
 260 {
 261         struct paace *paace;
 262 
 263         paace = pamu_get_ppaace(liodn);
 264         if (!paace) {
 265                 pr_debug("Invalid liodn entry\n");
 266                 return -ENOENT;
 267         }
 268         if (subwin) {
 269                 paace = pamu_get_spaace(paace, subwin - 1);
 270                 if (!paace)
 271                         return -ENOENT;
 272         }
 273         set_bf(paace->impl_attr, PAACE_IA_CID, value);
 274 
 275         mb();
 276 
 277         return 0;
 278 }
 279 
 280 /* Disable a subwindow corresponding to the LIODN */
 281 int pamu_disable_spaace(int liodn, u32 subwin)
 282 {
 283         struct paace *paace;
 284 
 285         paace = pamu_get_ppaace(liodn);
 286         if (!paace) {
 287                 pr_debug("Invalid liodn entry\n");
 288                 return -ENOENT;
 289         }
 290         if (subwin) {
 291                 paace = pamu_get_spaace(paace, subwin - 1);
 292                 if (!paace)
 293                         return -ENOENT;
 294                 set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID);
 295         } else {
 296                 set_bf(paace->addr_bitfields, PAACE_AF_AP,
 297                        PAACE_AP_PERMS_DENIED);
 298         }
 299 
 300         mb();
 301 
 302         return 0;
 303 }
 304 
 305 /**
 306  * pamu_config_paace() - Sets up PPAACE entry for specified liodn
 307  *
 308  * @liodn: Logical IO device number
 309  * @win_addr: starting address of DSA window
 310  * @win-size: size of DSA window
 311  * @omi: Operation mapping index -- if ~omi == 0 then omi not defined
 312  * @rpn: real (true physical) page number
 313  * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then
 314  *           stashid not defined
 315  * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
 316  *           snoopid not defined
 317  * @subwin_cnt: number of sub-windows
 318  * @prot: window permissions
 319  *
 320  * Returns 0 upon success else error code < 0 returned
 321  */
 322 int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
 323                        u32 omi, unsigned long rpn, u32 snoopid, u32 stashid,
 324                        u32 subwin_cnt, int prot)
 325 {
 326         struct paace *ppaace;
 327         unsigned long fspi;
 328 
 329         if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
 330                 pr_debug("window size too small or not a power of two %pa\n",
 331                          &win_size);
 332                 return -EINVAL;
 333         }
 334 
 335         if (win_addr & (win_size - 1)) {
 336                 pr_debug("window address is not aligned with window size\n");
 337                 return -EINVAL;
 338         }
 339 
 340         ppaace = pamu_get_ppaace(liodn);
 341         if (!ppaace)
 342                 return -ENOENT;
 343 
 344         /* window size is 2^(WSE+1) bytes */
 345         set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE,
 346                map_addrspace_size_to_wse(win_size));
 347 
 348         pamu_init_ppaace(ppaace);
 349 
 350         ppaace->wbah = win_addr >> (PAMU_PAGE_SHIFT + 20);
 351         set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL,
 352                (win_addr >> PAMU_PAGE_SHIFT));
 353 
 354         /* set up operation mapping if it's configured */
 355         if (omi < OME_NUMBER_ENTRIES) {
 356                 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 357                 ppaace->op_encode.index_ot.omi = omi;
 358         } else if (~omi != 0) {
 359                 pr_debug("bad operation mapping index: %d\n", omi);
 360                 return -EINVAL;
 361         }
 362 
 363         /* configure stash id */
 364         if (~stashid != 0)
 365                 set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid);
 366 
 367         /* configure snoop id */
 368         if (~snoopid != 0)
 369                 ppaace->domain_attr.to_host.snpid = snoopid;
 370 
 371         if (subwin_cnt) {
 372                 /* The first entry is in the primary PAACE instead */
 373                 fspi = pamu_get_fspi_and_allocate(subwin_cnt - 1);
 374                 if (fspi == ULONG_MAX) {
 375                         pr_debug("spaace indexes exhausted\n");
 376                         return -EINVAL;
 377                 }
 378 
 379                 /* window count is 2^(WCE+1) bytes */
 380                 set_bf(ppaace->impl_attr, PAACE_IA_WCE,
 381                        map_subwindow_cnt_to_wce(subwin_cnt));
 382                 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0x1);
 383                 ppaace->fspi = fspi;
 384         } else {
 385                 set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
 386                 ppaace->twbah = rpn >> 20;
 387                 set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, rpn);
 388                 set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot);
 389                 set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0);
 390                 set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0);
 391         }
 392         mb();
 393 
 394         return 0;
 395 }
 396 
 397 /**
 398  * pamu_config_spaace() - Sets up SPAACE entry for specified subwindow
 399  *
 400  * @liodn:  Logical IO device number
 401  * @subwin_cnt:  number of sub-windows associated with dma-window
 402  * @subwin: subwindow index
 403  * @subwin_size: size of subwindow
 404  * @omi: Operation mapping index
 405  * @rpn: real (true physical) page number
 406  * @snoopid: snoop id for hardware coherency -- if ~snoopid == 0 then
 407  *                        snoopid not defined
 408  * @stashid: cache stash id for associated cpu
 409  * @enable: enable/disable subwindow after reconfiguration
 410  * @prot: sub window permissions
 411  *
 412  * Returns 0 upon success else error code < 0 returned
 413  */
 414 int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
 415                        phys_addr_t subwin_size, u32 omi, unsigned long rpn,
 416                        u32 snoopid, u32 stashid, int enable, int prot)
 417 {
 418         struct paace *paace;
 419 
 420         /* setup sub-windows */
 421         if (!subwin_cnt) {
 422                 pr_debug("Invalid subwindow count\n");
 423                 return -EINVAL;
 424         }
 425 
 426         paace = pamu_get_ppaace(liodn);
 427         if (subwin > 0 && subwin < subwin_cnt && paace) {
 428                 paace = pamu_get_spaace(paace, subwin - 1);
 429 
 430                 if (paace && !(paace->addr_bitfields & PAACE_V_VALID)) {
 431                         pamu_init_spaace(paace);
 432                         set_bf(paace->addr_bitfields, SPAACE_AF_LIODN, liodn);
 433                 }
 434         }
 435 
 436         if (!paace) {
 437                 pr_debug("Invalid liodn entry\n");
 438                 return -ENOENT;
 439         }
 440 
 441         if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
 442                 pr_debug("subwindow size out of range, or not a power of 2\n");
 443                 return -EINVAL;
 444         }
 445 
 446         if (rpn == ULONG_MAX) {
 447                 pr_debug("real page number out of range\n");
 448                 return -EINVAL;
 449         }
 450 
 451         /* window size is 2^(WSE+1) bytes */
 452         set_bf(paace->win_bitfields, PAACE_WIN_SWSE,
 453                map_addrspace_size_to_wse(subwin_size));
 454 
 455         set_bf(paace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE);
 456         paace->twbah = rpn >> 20;
 457         set_bf(paace->win_bitfields, PAACE_WIN_TWBAL, rpn);
 458         set_bf(paace->addr_bitfields, PAACE_AF_AP, prot);
 459 
 460         /* configure snoop id */
 461         if (~snoopid != 0)
 462                 paace->domain_attr.to_host.snpid = snoopid;
 463 
 464         /* set up operation mapping if it's configured */
 465         if (omi < OME_NUMBER_ENTRIES) {
 466                 set_bf(paace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 467                 paace->op_encode.index_ot.omi = omi;
 468         } else if (~omi != 0) {
 469                 pr_debug("bad operation mapping index: %d\n", omi);
 470                 return -EINVAL;
 471         }
 472 
 473         if (~stashid != 0)
 474                 set_bf(paace->impl_attr, PAACE_IA_CID, stashid);
 475 
 476         smp_wmb();
 477 
 478         if (enable)
 479                 set_bf(paace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID);
 480 
 481         mb();
 482 
 483         return 0;
 484 }
 485 
 486 /**
 487  * get_ome_index() - Returns the index in the operation mapping table
 488  *                   for device.
 489  * @*omi_index: pointer for storing the index value
 490  *
 491  */
 492 void get_ome_index(u32 *omi_index, struct device *dev)
 493 {
 494         if (of_device_is_compatible(dev->of_node, "fsl,qman-portal"))
 495                 *omi_index = OMI_QMAN;
 496         if (of_device_is_compatible(dev->of_node, "fsl,qman"))
 497                 *omi_index = OMI_QMAN_PRIV;
 498 }
 499 
 500 /**
 501  * get_stash_id - Returns stash destination id corresponding to a
 502  *                cache type and vcpu.
 503  * @stash_dest_hint: L1, L2 or L3
 504  * @vcpu: vpcu target for a particular cache type.
 505  *
 506  * Returs stash on success or ~(u32)0 on failure.
 507  *
 508  */
 509 u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
 510 {
 511         const u32 *prop;
 512         struct device_node *node;
 513         u32 cache_level;
 514         int len, found = 0;
 515         int i;
 516 
 517         /* Fastpath, exit early if L3/CPC cache is target for stashing */
 518         if (stash_dest_hint == PAMU_ATTR_CACHE_L3) {
 519                 node = of_find_matching_node(NULL, l3_device_ids);
 520                 if (node) {
 521                         prop = of_get_property(node, "cache-stash-id", NULL);
 522                         if (!prop) {
 523                                 pr_debug("missing cache-stash-id at %pOF\n",
 524                                          node);
 525                                 of_node_put(node);
 526                                 return ~(u32)0;
 527                         }
 528                         of_node_put(node);
 529                         return be32_to_cpup(prop);
 530                 }
 531                 return ~(u32)0;
 532         }
 533 
 534         for_each_of_cpu_node(node) {
 535                 prop = of_get_property(node, "reg", &len);
 536                 for (i = 0; i < len / sizeof(u32); i++) {
 537                         if (be32_to_cpup(&prop[i]) == vcpu) {
 538                                 found = 1;
 539                                 goto found_cpu_node;
 540                         }
 541                 }
 542         }
 543 found_cpu_node:
 544 
 545         /* find the hwnode that represents the cache */
 546         for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) {
 547                 if (stash_dest_hint == cache_level) {
 548                         prop = of_get_property(node, "cache-stash-id", NULL);
 549                         if (!prop) {
 550                                 pr_debug("missing cache-stash-id at %pOF\n",
 551                                          node);
 552                                 of_node_put(node);
 553                                 return ~(u32)0;
 554                         }
 555                         of_node_put(node);
 556                         return be32_to_cpup(prop);
 557                 }
 558 
 559                 prop = of_get_property(node, "next-level-cache", NULL);
 560                 if (!prop) {
 561                         pr_debug("can't find next-level-cache at %pOF\n", node);
 562                         of_node_put(node);
 563                         return ~(u32)0;  /* can't traverse any further */
 564                 }
 565                 of_node_put(node);
 566 
 567                 /* advance to next node in cache hierarchy */
 568                 node = of_find_node_by_phandle(*prop);
 569                 if (!node) {
 570                         pr_debug("Invalid node for cache hierarchy\n");
 571                         return ~(u32)0;
 572                 }
 573         }
 574 
 575         pr_debug("stash dest not found for %d on vcpu %d\n",
 576                  stash_dest_hint, vcpu);
 577         return ~(u32)0;
 578 }
 579 
 580 /* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */
 581 #define QMAN_PAACE 1
 582 #define QMAN_PORTAL_PAACE 2
 583 #define BMAN_PAACE 3
 584 
 585 /**
 586  * Setup operation mapping and stash destinations for QMAN and QMAN portal.
 587  * Memory accesses to QMAN and BMAN private memory need not be coherent, so
 588  * clear the PAACE entry coherency attribute for them.
 589  */
 590 static void setup_qbman_paace(struct paace *ppaace, int  paace_type)
 591 {
 592         switch (paace_type) {
 593         case QMAN_PAACE:
 594                 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 595                 ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV;
 596                 /* setup QMAN Private data stashing for the L3 cache */
 597                 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
 598                 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 599                        0);
 600                 break;
 601         case QMAN_PORTAL_PAACE:
 602                 set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED);
 603                 ppaace->op_encode.index_ot.omi = OMI_QMAN;
 604                 /* Set DQRR and Frame stashing for the L3 cache */
 605                 set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0));
 606                 break;
 607         case BMAN_PAACE:
 608                 set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR,
 609                        0);
 610                 break;
 611         }
 612 }
 613 
 614 /**
 615  * Setup the operation mapping table for various devices. This is a static
 616  * table where each table index corresponds to a particular device. PAMU uses
 617  * this table to translate device transaction to appropriate corenet
 618  * transaction.
 619  */
 620 static void setup_omt(struct ome *omt)
 621 {
 622         struct ome *ome;
 623 
 624         /* Configure OMI_QMAN */
 625         ome = &omt[OMI_QMAN];
 626 
 627         ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ;
 628         ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
 629         ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 630         ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO;
 631 
 632         ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC;
 633         ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE;
 634 
 635         /* Configure OMI_FMAN */
 636         ome = &omt[OMI_FMAN];
 637         ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
 638         ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 639 
 640         /* Configure OMI_QMAN private */
 641         ome = &omt[OMI_QMAN_PRIV];
 642         ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READ;
 643         ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 644         ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA;
 645         ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA;
 646 
 647         /* Configure OMI_CAAM */
 648         ome = &omt[OMI_CAAM];
 649         ome->moe[IOE_READ_IDX]  = EOE_VALID | EOE_READI;
 650         ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE;
 651 }
 652 
 653 /*
 654  * Get the maximum number of PAACT table entries
 655  * and subwindows supported by PAMU
 656  */
 657 static void get_pamu_cap_values(unsigned long pamu_reg_base)
 658 {
 659         u32 pc_val;
 660 
 661         pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3));
 662         /* Maximum number of subwindows per liodn */
 663         max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val));
 664 }
 665 
 666 /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
 667 static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
 668                           phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
 669                           phys_addr_t omt_phys)
 670 {
 671         u32 *pc;
 672         struct pamu_mmap_regs *pamu_regs;
 673 
 674         pc = (u32 *) (pamu_reg_base + PAMU_PC);
 675         pamu_regs = (struct pamu_mmap_regs *)
 676                 (pamu_reg_base + PAMU_MMAP_REGS_BASE);
 677 
 678         /* set up pointers to corenet control blocks */
 679 
 680         out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys));
 681         out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys));
 682         ppaact_phys = ppaact_phys + PAACT_SIZE;
 683         out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys));
 684         out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys));
 685 
 686         out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys));
 687         out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys));
 688         spaact_phys = spaact_phys + SPAACT_SIZE;
 689         out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys));
 690         out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys));
 691 
 692         out_be32(&pamu_regs->obah, upper_32_bits(omt_phys));
 693         out_be32(&pamu_regs->obal, lower_32_bits(omt_phys));
 694         omt_phys = omt_phys + OMT_SIZE;
 695         out_be32(&pamu_regs->olah, upper_32_bits(omt_phys));
 696         out_be32(&pamu_regs->olal, lower_32_bits(omt_phys));
 697 
 698         /*
 699          * set PAMU enable bit,
 700          * allow ppaact & omt to be cached
 701          * & enable PAMU access violation interrupts.
 702          */
 703 
 704         out_be32((u32 *)(pamu_reg_base + PAMU_PICS),
 705                  PAMU_ACCESS_VIOLATION_ENABLE);
 706         out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC);
 707         return 0;
 708 }
 709 
 710 /* Enable all device LIODNS */
 711 static void setup_liodns(void)
 712 {
 713         int i, len;
 714         struct paace *ppaace;
 715         struct device_node *node = NULL;
 716         const u32 *prop;
 717 
 718         for_each_node_with_property(node, "fsl,liodn") {
 719                 prop = of_get_property(node, "fsl,liodn", &len);
 720                 for (i = 0; i < len / sizeof(u32); i++) {
 721                         int liodn;
 722 
 723                         liodn = be32_to_cpup(&prop[i]);
 724                         if (liodn >= PAACE_NUMBER_ENTRIES) {
 725                                 pr_debug("Invalid LIODN value %d\n", liodn);
 726                                 continue;
 727                         }
 728                         ppaace = pamu_get_ppaace(liodn);
 729                         pamu_init_ppaace(ppaace);
 730                         /* window size is 2^(WSE+1) bytes */
 731                         set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35);
 732                         ppaace->wbah = 0;
 733                         set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0);
 734                         set_bf(ppaace->impl_attr, PAACE_IA_ATM,
 735                                PAACE_ATM_NO_XLATE);
 736                         set_bf(ppaace->addr_bitfields, PAACE_AF_AP,
 737                                PAACE_AP_PERMS_ALL);
 738                         if (of_device_is_compatible(node, "fsl,qman-portal"))
 739                                 setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE);
 740                         if (of_device_is_compatible(node, "fsl,qman"))
 741                                 setup_qbman_paace(ppaace, QMAN_PAACE);
 742                         if (of_device_is_compatible(node, "fsl,bman"))
 743                                 setup_qbman_paace(ppaace, BMAN_PAACE);
 744                         mb();
 745                         pamu_enable_liodn(liodn);
 746                 }
 747         }
 748 }
 749 
 750 static irqreturn_t pamu_av_isr(int irq, void *arg)
 751 {
 752         struct pamu_isr_data *data = arg;
 753         phys_addr_t phys;
 754         unsigned int i, j, ret;
 755 
 756         pr_emerg("access violation interrupt\n");
 757 
 758         for (i = 0; i < data->count; i++) {
 759                 void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET;
 760                 u32 pics = in_be32(p + PAMU_PICS);
 761 
 762                 if (pics & PAMU_ACCESS_VIOLATION_STAT) {
 763                         u32 avs1 = in_be32(p + PAMU_AVS1);
 764                         struct paace *paace;
 765 
 766                         pr_emerg("POES1=%08x\n", in_be32(p + PAMU_POES1));
 767                         pr_emerg("POES2=%08x\n", in_be32(p + PAMU_POES2));
 768                         pr_emerg("AVS1=%08x\n", avs1);
 769                         pr_emerg("AVS2=%08x\n", in_be32(p + PAMU_AVS2));
 770                         pr_emerg("AVA=%016llx\n",
 771                                  make64(in_be32(p + PAMU_AVAH),
 772                                         in_be32(p + PAMU_AVAL)));
 773                         pr_emerg("UDAD=%08x\n", in_be32(p + PAMU_UDAD));
 774                         pr_emerg("POEA=%016llx\n",
 775                                  make64(in_be32(p + PAMU_POEAH),
 776                                         in_be32(p + PAMU_POEAL)));
 777 
 778                         phys = make64(in_be32(p + PAMU_POEAH),
 779                                       in_be32(p + PAMU_POEAL));
 780 
 781                         /* Assume that POEA points to a PAACE */
 782                         if (phys) {
 783                                 u32 *paace = phys_to_virt(phys);
 784 
 785                                 /* Only the first four words are relevant */
 786                                 for (j = 0; j < 4; j++)
 787                                         pr_emerg("PAACE[%u]=%08x\n",
 788                                                  j, in_be32(paace + j));
 789                         }
 790 
 791                         /* clear access violation condition */
 792                         out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK);
 793                         paace = pamu_get_ppaace(avs1 >> PAMU_AVS1_LIODN_SHIFT);
 794                         BUG_ON(!paace);
 795                         /* check if we got a violation for a disabled LIODN */
 796                         if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) {
 797                                 /*
 798                                  * As per hardware erratum A-003638, access
 799                                  * violation can be reported for a disabled
 800                                  * LIODN. If we hit that condition, disable
 801                                  * access violation reporting.
 802                                  */
 803                                 pics &= ~PAMU_ACCESS_VIOLATION_ENABLE;
 804                         } else {
 805                                 /* Disable the LIODN */
 806                                 ret = pamu_disable_liodn(avs1 >> PAMU_AVS1_LIODN_SHIFT);
 807                                 BUG_ON(ret);
 808                                 pr_emerg("Disabling liodn %x\n",
 809                                          avs1 >> PAMU_AVS1_LIODN_SHIFT);
 810                         }
 811                         out_be32((p + PAMU_PICS), pics);
 812                 }
 813         }
 814 
 815         return IRQ_HANDLED;
 816 }
 817 
 818 #define LAWAR_EN                0x80000000
 819 #define LAWAR_TARGET_MASK       0x0FF00000
 820 #define LAWAR_TARGET_SHIFT      20
 821 #define LAWAR_SIZE_MASK         0x0000003F
 822 #define LAWAR_CSDID_MASK        0x000FF000
 823 #define LAWAR_CSDID_SHIFT       12
 824 
 825 #define LAW_SIZE_4K             0xb
 826 
 827 struct ccsr_law {
 828         u32     lawbarh;        /* LAWn base address high */
 829         u32     lawbarl;        /* LAWn base address low */
 830         u32     lawar;          /* LAWn attributes */
 831         u32     reserved;
 832 };
 833 
 834 /*
 835  * Create a coherence subdomain for a given memory block.
 836  */
 837 static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
 838 {
 839         struct device_node *np;
 840         const __be32 *iprop;
 841         void __iomem *lac = NULL;       /* Local Access Control registers */
 842         struct ccsr_law __iomem *law;
 843         void __iomem *ccm = NULL;
 844         u32 __iomem *csdids;
 845         unsigned int i, num_laws, num_csds;
 846         u32 law_target = 0;
 847         u32 csd_id = 0;
 848         int ret = 0;
 849 
 850         np = of_find_compatible_node(NULL, NULL, "fsl,corenet-law");
 851         if (!np)
 852                 return -ENODEV;
 853 
 854         iprop = of_get_property(np, "fsl,num-laws", NULL);
 855         if (!iprop) {
 856                 ret = -ENODEV;
 857                 goto error;
 858         }
 859 
 860         num_laws = be32_to_cpup(iprop);
 861         if (!num_laws) {
 862                 ret = -ENODEV;
 863                 goto error;
 864         }
 865 
 866         lac = of_iomap(np, 0);
 867         if (!lac) {
 868                 ret = -ENODEV;
 869                 goto error;
 870         }
 871 
 872         /* LAW registers are at offset 0xC00 */
 873         law = lac + 0xC00;
 874 
 875         of_node_put(np);
 876 
 877         np = of_find_compatible_node(NULL, NULL, "fsl,corenet-cf");
 878         if (!np) {
 879                 ret = -ENODEV;
 880                 goto error;
 881         }
 882 
 883         iprop = of_get_property(np, "fsl,ccf-num-csdids", NULL);
 884         if (!iprop) {
 885                 ret = -ENODEV;
 886                 goto error;
 887         }
 888 
 889         num_csds = be32_to_cpup(iprop);
 890         if (!num_csds) {
 891                 ret = -ENODEV;
 892                 goto error;
 893         }
 894 
 895         ccm = of_iomap(np, 0);
 896         if (!ccm) {
 897                 ret = -ENOMEM;
 898                 goto error;
 899         }
 900 
 901         /* The undocumented CSDID registers are at offset 0x600 */
 902         csdids = ccm + 0x600;
 903 
 904         of_node_put(np);
 905         np = NULL;
 906 
 907         /* Find an unused coherence subdomain ID */
 908         for (csd_id = 0; csd_id < num_csds; csd_id++) {
 909                 if (!csdids[csd_id])
 910                         break;
 911         }
 912 
 913         /* Store the Port ID in the (undocumented) proper CIDMRxx register */
 914         csdids[csd_id] = csd_port_id;
 915 
 916         /* Find the DDR LAW that maps to our buffer. */
 917         for (i = 0; i < num_laws; i++) {
 918                 if (law[i].lawar & LAWAR_EN) {
 919                         phys_addr_t law_start, law_end;
 920 
 921                         law_start = make64(law[i].lawbarh, law[i].lawbarl);
 922                         law_end = law_start +
 923                                 (2ULL << (law[i].lawar & LAWAR_SIZE_MASK));
 924 
 925                         if (law_start <= phys && phys < law_end) {
 926                                 law_target = law[i].lawar & LAWAR_TARGET_MASK;
 927                                 break;
 928                         }
 929                 }
 930         }
 931 
 932         if (i == 0 || i == num_laws) {
 933                 /* This should never happen */
 934                 ret = -ENOENT;
 935                 goto error;
 936         }
 937 
 938         /* Find a free LAW entry */
 939         while (law[--i].lawar & LAWAR_EN) {
 940                 if (i == 0) {
 941                         /* No higher priority LAW slots available */
 942                         ret = -ENOENT;
 943                         goto error;
 944                 }
 945         }
 946 
 947         law[i].lawbarh = upper_32_bits(phys);
 948         law[i].lawbarl = lower_32_bits(phys);
 949         wmb();
 950         law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) |
 951                 (LAW_SIZE_4K + get_order(size));
 952         wmb();
 953 
 954 error:
 955         if (ccm)
 956                 iounmap(ccm);
 957 
 958         if (lac)
 959                 iounmap(lac);
 960 
 961         if (np)
 962                 of_node_put(np);
 963 
 964         return ret;
 965 }
 966 
 967 /*
 968  * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a
 969  * bit map of snoopers for a given range of memory mapped by a LAW.
 970  *
 971  * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this
 972  * table should never need to be updated.  SVRs are guaranteed to be unique, so
 973  * there is no worry that a future SOC will inadvertently have one of these
 974  * values.
 975  */
 976 static const struct {
 977         u32 svr;
 978         u32 port_id;
 979 } port_id_map[] = {
 980         {(SVR_P2040 << 8) | 0x10, 0xFF000000},  /* P2040 1.0 */
 981         {(SVR_P2040 << 8) | 0x11, 0xFF000000},  /* P2040 1.1 */
 982         {(SVR_P2041 << 8) | 0x10, 0xFF000000},  /* P2041 1.0 */
 983         {(SVR_P2041 << 8) | 0x11, 0xFF000000},  /* P2041 1.1 */
 984         {(SVR_P3041 << 8) | 0x10, 0xFF000000},  /* P3041 1.0 */
 985         {(SVR_P3041 << 8) | 0x11, 0xFF000000},  /* P3041 1.1 */
 986         {(SVR_P4040 << 8) | 0x20, 0xFFF80000},  /* P4040 2.0 */
 987         {(SVR_P4080 << 8) | 0x20, 0xFFF80000},  /* P4080 2.0 */
 988         {(SVR_P5010 << 8) | 0x10, 0xFC000000},  /* P5010 1.0 */
 989         {(SVR_P5010 << 8) | 0x20, 0xFC000000},  /* P5010 2.0 */
 990         {(SVR_P5020 << 8) | 0x10, 0xFC000000},  /* P5020 1.0 */
 991         {(SVR_P5021 << 8) | 0x10, 0xFF800000},  /* P5021 1.0 */
 992         {(SVR_P5040 << 8) | 0x10, 0xFF800000},  /* P5040 1.0 */
 993 };
 994 
 995 #define SVR_SECURITY    0x80000 /* The Security (E) bit */
 996 
 997 static int fsl_pamu_probe(struct platform_device *pdev)
 998 {
 999         struct device *dev = &pdev->dev;
1000         void __iomem *pamu_regs = NULL;
1001         struct ccsr_guts __iomem *guts_regs = NULL;
1002         u32 pamubypenr, pamu_counter;
1003         unsigned long pamu_reg_off;
1004         unsigned long pamu_reg_base;
1005         struct pamu_isr_data *data = NULL;
1006         struct device_node *guts_node;
1007         u64 size;
1008         struct page *p;
1009         int ret = 0;
1010         int irq;
1011         phys_addr_t ppaact_phys;
1012         phys_addr_t spaact_phys;
1013         struct ome *omt;
1014         phys_addr_t omt_phys;
1015         size_t mem_size = 0;
1016         unsigned int order = 0;
1017         u32 csd_port_id = 0;
1018         unsigned i;
1019         /*
1020          * enumerate all PAMUs and allocate and setup PAMU tables
1021          * for each of them,
1022          * NOTE : All PAMUs share the same LIODN tables.
1023          */
1024 
1025         if (WARN_ON(probed))
1026                 return -EBUSY;
1027 
1028         pamu_regs = of_iomap(dev->of_node, 0);
1029         if (!pamu_regs) {
1030                 dev_err(dev, "ioremap of PAMU node failed\n");
1031                 return -ENOMEM;
1032         }
1033         of_get_address(dev->of_node, 0, &size, NULL);
1034 
1035         irq = irq_of_parse_and_map(dev->of_node, 0);
1036         if (irq == NO_IRQ) {
1037                 dev_warn(dev, "no interrupts listed in PAMU node\n");
1038                 goto error;
1039         }
1040 
1041         data = kzalloc(sizeof(*data), GFP_KERNEL);
1042         if (!data) {
1043                 ret = -ENOMEM;
1044                 goto error;
1045         }
1046         data->pamu_reg_base = pamu_regs;
1047         data->count = size / PAMU_OFFSET;
1048 
1049         /* The ISR needs access to the regs, so we won't iounmap them */
1050         ret = request_irq(irq, pamu_av_isr, 0, "pamu", data);
1051         if (ret < 0) {
1052                 dev_err(dev, "error %i installing ISR for irq %i\n", ret, irq);
1053                 goto error;
1054         }
1055 
1056         guts_node = of_find_matching_node(NULL, guts_device_ids);
1057         if (!guts_node) {
1058                 dev_err(dev, "could not find GUTS node %pOF\n", dev->of_node);
1059                 ret = -ENODEV;
1060                 goto error;
1061         }
1062 
1063         guts_regs = of_iomap(guts_node, 0);
1064         of_node_put(guts_node);
1065         if (!guts_regs) {
1066                 dev_err(dev, "ioremap of GUTS node failed\n");
1067                 ret = -ENODEV;
1068                 goto error;
1069         }
1070 
1071         /* read in the PAMU capability registers */
1072         get_pamu_cap_values((unsigned long)pamu_regs);
1073         /*
1074          * To simplify the allocation of a coherency domain, we allocate the
1075          * PAACT and the OMT in the same memory buffer.  Unfortunately, this
1076          * wastes more memory compared to allocating the buffers separately.
1077          */
1078         /* Determine how much memory we need */
1079         mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) +
1080                 (PAGE_SIZE << get_order(SPAACT_SIZE)) +
1081                 (PAGE_SIZE << get_order(OMT_SIZE));
1082         order = get_order(mem_size);
1083 
1084         p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1085         if (!p) {
1086                 dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n");
1087                 ret = -ENOMEM;
1088                 goto error;
1089         }
1090 
1091         ppaact = page_address(p);
1092         ppaact_phys = page_to_phys(p);
1093 
1094         /* Make sure the memory is naturally aligned */
1095         if (ppaact_phys & ((PAGE_SIZE << order) - 1)) {
1096                 dev_err(dev, "PAACT/OMT block is unaligned\n");
1097                 ret = -ENOMEM;
1098                 goto error;
1099         }
1100 
1101         spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE));
1102         omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE));
1103 
1104         dev_dbg(dev, "ppaact virt=%p phys=%pa\n", ppaact, &ppaact_phys);
1105 
1106         /* Check to see if we need to implement the work-around on this SOC */
1107 
1108         /* Determine the Port ID for our coherence subdomain */
1109         for (i = 0; i < ARRAY_SIZE(port_id_map); i++) {
1110                 if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) {
1111                         csd_port_id = port_id_map[i].port_id;
1112                         dev_dbg(dev, "found matching SVR %08x\n",
1113                                 port_id_map[i].svr);
1114                         break;
1115                 }
1116         }
1117 
1118         if (csd_port_id) {
1119                 dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x",
1120                         &ppaact_phys, mem_size, csd_port_id);
1121 
1122                 ret = create_csd(ppaact_phys, mem_size, csd_port_id);
1123                 if (ret) {
1124                         dev_err(dev, "could not create coherence subdomain\n");
1125                         return ret;
1126                 }
1127         }
1128 
1129         spaact_phys = virt_to_phys(spaact);
1130         omt_phys = virt_to_phys(omt);
1131 
1132         spaace_pool = gen_pool_create(ilog2(sizeof(struct paace)), -1);
1133         if (!spaace_pool) {
1134                 ret = -ENOMEM;
1135                 dev_err(dev, "Failed to allocate spaace gen pool\n");
1136                 goto error;
1137         }
1138 
1139         ret = gen_pool_add(spaace_pool, (unsigned long)spaact, SPAACT_SIZE, -1);
1140         if (ret)
1141                 goto error_genpool;
1142 
1143         pamubypenr = in_be32(&guts_regs->pamubypenr);
1144 
1145         for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size;
1146              pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) {
1147 
1148                 pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off;
1149                 setup_one_pamu(pamu_reg_base, pamu_reg_off, ppaact_phys,
1150                                spaact_phys, omt_phys);
1151                 /* Disable PAMU bypass for this PAMU */
1152                 pamubypenr &= ~pamu_counter;
1153         }
1154 
1155         setup_omt(omt);
1156 
1157         /* Enable all relevant PAMU(s) */
1158         out_be32(&guts_regs->pamubypenr, pamubypenr);
1159 
1160         iounmap(guts_regs);
1161 
1162         /* Enable DMA for the LIODNs in the device tree */
1163 
1164         setup_liodns();
1165 
1166         probed = true;
1167 
1168         return 0;
1169 
1170 error_genpool:
1171         gen_pool_destroy(spaace_pool);
1172 
1173 error:
1174         if (irq != NO_IRQ)
1175                 free_irq(irq, data);
1176 
1177         if (data) {
1178                 memset(data, 0, sizeof(struct pamu_isr_data));
1179                 kfree(data);
1180         }
1181 
1182         if (pamu_regs)
1183                 iounmap(pamu_regs);
1184 
1185         if (guts_regs)
1186                 iounmap(guts_regs);
1187 
1188         if (ppaact)
1189                 free_pages((unsigned long)ppaact, order);
1190 
1191         ppaact = NULL;
1192 
1193         return ret;
1194 }
1195 
1196 static struct platform_driver fsl_of_pamu_driver = {
1197         .driver = {
1198                 .name = "fsl-of-pamu",
1199         },
1200         .probe = fsl_pamu_probe,
1201 };
1202 
1203 static __init int fsl_pamu_init(void)
1204 {
1205         struct platform_device *pdev = NULL;
1206         struct device_node *np;
1207         int ret;
1208 
1209         /*
1210          * The normal OF process calls the probe function at some
1211          * indeterminate later time, after most drivers have loaded.  This is
1212          * too late for us, because PAMU clients (like the Qman driver)
1213          * depend on PAMU being initialized early.
1214          *
1215          * So instead, we "manually" call our probe function by creating the
1216          * platform devices ourselves.
1217          */
1218 
1219         /*
1220          * We assume that there is only one PAMU node in the device tree.  A
1221          * single PAMU node represents all of the PAMU devices in the SOC
1222          * already.   Everything else already makes that assumption, and the
1223          * binding for the PAMU nodes doesn't allow for any parent-child
1224          * relationships anyway.  In other words, support for more than one
1225          * PAMU node would require significant changes to a lot of code.
1226          */
1227 
1228         np = of_find_compatible_node(NULL, NULL, "fsl,pamu");
1229         if (!np) {
1230                 pr_err("could not find a PAMU node\n");
1231                 return -ENODEV;
1232         }
1233 
1234         ret = platform_driver_register(&fsl_of_pamu_driver);
1235         if (ret) {
1236                 pr_err("could not register driver (err=%i)\n", ret);
1237                 goto error_driver_register;
1238         }
1239 
1240         pdev = platform_device_alloc("fsl-of-pamu", 0);
1241         if (!pdev) {
1242                 pr_err("could not allocate device %pOF\n", np);
1243                 ret = -ENOMEM;
1244                 goto error_device_alloc;
1245         }
1246         pdev->dev.of_node = of_node_get(np);
1247 
1248         ret = pamu_domain_init();
1249         if (ret)
1250                 goto error_device_add;
1251 
1252         ret = platform_device_add(pdev);
1253         if (ret) {
1254                 pr_err("could not add device %pOF (err=%i)\n", np, ret);
1255                 goto error_device_add;
1256         }
1257 
1258         return 0;
1259 
1260 error_device_add:
1261         of_node_put(pdev->dev.of_node);
1262         pdev->dev.of_node = NULL;
1263 
1264         platform_device_put(pdev);
1265 
1266 error_device_alloc:
1267         platform_driver_unregister(&fsl_of_pamu_driver);
1268 
1269 error_driver_register:
1270         of_node_put(np);
1271 
1272         return ret;
1273 }
1274 arch_initcall(fsl_pamu_init);

/* [<][>][^][v][top][bottom][index][help] */