1/* 2 * Copyright (C) 2011-2014 NVIDIA CORPORATION. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 */ 8 9#include <linux/bitops.h> 10#include <linux/err.h> 11#include <linux/iommu.h> 12#include <linux/kernel.h> 13#include <linux/of.h> 14#include <linux/of_device.h> 15#include <linux/platform_device.h> 16#include <linux/slab.h> 17 18#include <soc/tegra/ahb.h> 19#include <soc/tegra/mc.h> 20 21struct tegra_smmu { 22 void __iomem *regs; 23 struct device *dev; 24 25 struct tegra_mc *mc; 26 const struct tegra_smmu_soc *soc; 27 28 unsigned long pfn_mask; 29 unsigned long tlb_mask; 30 31 unsigned long *asids; 32 struct mutex lock; 33 34 struct list_head list; 35}; 36 37struct tegra_smmu_as { 38 struct iommu_domain domain; 39 struct tegra_smmu *smmu; 40 unsigned int use_count; 41 struct page *count; 42 struct page *pd; 43 unsigned id; 44 u32 attr; 45}; 46 47static struct tegra_smmu_as *to_smmu_as(struct iommu_domain *dom) 48{ 49 return container_of(dom, struct tegra_smmu_as, domain); 50} 51 52static inline void smmu_writel(struct tegra_smmu *smmu, u32 value, 53 unsigned long offset) 54{ 55 writel(value, smmu->regs + offset); 56} 57 58static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) 59{ 60 return readl(smmu->regs + offset); 61} 62 63#define SMMU_CONFIG 0x010 64#define SMMU_CONFIG_ENABLE (1 << 0) 65 66#define SMMU_TLB_CONFIG 0x14 67#define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) 68#define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) 69#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ 70 ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) 71 72#define SMMU_PTC_CONFIG 0x18 73#define SMMU_PTC_CONFIG_ENABLE (1 << 29) 74#define SMMU_PTC_CONFIG_REQ_LIMIT(x) (((x) & 0x0f) << 24) 75#define SMMU_PTC_CONFIG_INDEX_MAP(x) ((x) & 0x3f) 76 77#define SMMU_PTB_ASID 0x01c 78#define SMMU_PTB_ASID_VALUE(x) ((x) & 0x7f) 79 80#define SMMU_PTB_DATA 0x020 81#define SMMU_PTB_DATA_VALUE(page, attr) (page_to_phys(page) >> 12 | (attr)) 82 83#define SMMU_MK_PDE(page, attr) (page_to_phys(page) >> SMMU_PTE_SHIFT | (attr)) 84 85#define SMMU_TLB_FLUSH 0x030 86#define SMMU_TLB_FLUSH_VA_MATCH_ALL (0 << 0) 87#define SMMU_TLB_FLUSH_VA_MATCH_SECTION (2 << 0) 88#define SMMU_TLB_FLUSH_VA_MATCH_GROUP (3 << 0) 89#define SMMU_TLB_FLUSH_ASID(x) (((x) & 0x7f) << 24) 90#define SMMU_TLB_FLUSH_VA_SECTION(addr) ((((addr) & 0xffc00000) >> 12) | \ 91 SMMU_TLB_FLUSH_VA_MATCH_SECTION) 92#define SMMU_TLB_FLUSH_VA_GROUP(addr) ((((addr) & 0xffffc000) >> 12) | \ 93 SMMU_TLB_FLUSH_VA_MATCH_GROUP) 94#define SMMU_TLB_FLUSH_ASID_MATCH (1 << 31) 95 96#define SMMU_PTC_FLUSH 0x034 97#define SMMU_PTC_FLUSH_TYPE_ALL (0 << 0) 98#define SMMU_PTC_FLUSH_TYPE_ADR (1 << 0) 99 100#define SMMU_PTC_FLUSH_HI 0x9b8 101#define SMMU_PTC_FLUSH_HI_MASK 0x3 102 103/* per-SWGROUP SMMU_*_ASID register */ 104#define SMMU_ASID_ENABLE (1 << 31) 105#define SMMU_ASID_MASK 0x7f 106#define SMMU_ASID_VALUE(x) ((x) & SMMU_ASID_MASK) 107 108/* page table definitions */ 109#define SMMU_NUM_PDE 1024 110#define SMMU_NUM_PTE 1024 111 112#define SMMU_SIZE_PD (SMMU_NUM_PDE * 4) 113#define SMMU_SIZE_PT (SMMU_NUM_PTE * 4) 114 115#define SMMU_PDE_SHIFT 22 116#define SMMU_PTE_SHIFT 12 117 118#define SMMU_PD_READABLE (1 << 31) 119#define SMMU_PD_WRITABLE (1 << 30) 120#define SMMU_PD_NONSECURE (1 << 29) 121 122#define SMMU_PDE_READABLE (1 << 31) 123#define SMMU_PDE_WRITABLE (1 << 30) 124#define SMMU_PDE_NONSECURE (1 << 29) 125#define SMMU_PDE_NEXT (1 << 28) 126 127#define SMMU_PTE_READABLE (1 << 31) 128#define SMMU_PTE_WRITABLE (1 << 30) 129#define SMMU_PTE_NONSECURE (1 << 29) 130 131#define SMMU_PDE_ATTR (SMMU_PDE_READABLE | SMMU_PDE_WRITABLE | \ 132 SMMU_PDE_NONSECURE) 133#define SMMU_PTE_ATTR (SMMU_PTE_READABLE | SMMU_PTE_WRITABLE | \ 134 SMMU_PTE_NONSECURE) 135 136static inline void smmu_flush_ptc(struct tegra_smmu *smmu, struct page *page, 137 unsigned long offset) 138{ 139 phys_addr_t phys = page ? page_to_phys(page) : 0; 140 u32 value; 141 142 if (page) { 143 offset &= ~(smmu->mc->soc->atom_size - 1); 144 145 if (smmu->mc->soc->num_address_bits > 32) { 146#ifdef CONFIG_PHYS_ADDR_T_64BIT 147 value = (phys >> 32) & SMMU_PTC_FLUSH_HI_MASK; 148#else 149 value = 0; 150#endif 151 smmu_writel(smmu, value, SMMU_PTC_FLUSH_HI); 152 } 153 154 value = (phys + offset) | SMMU_PTC_FLUSH_TYPE_ADR; 155 } else { 156 value = SMMU_PTC_FLUSH_TYPE_ALL; 157 } 158 159 smmu_writel(smmu, value, SMMU_PTC_FLUSH); 160} 161 162static inline void smmu_flush_tlb(struct tegra_smmu *smmu) 163{ 164 smmu_writel(smmu, SMMU_TLB_FLUSH_VA_MATCH_ALL, SMMU_TLB_FLUSH); 165} 166 167static inline void smmu_flush_tlb_asid(struct tegra_smmu *smmu, 168 unsigned long asid) 169{ 170 u32 value; 171 172 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 173 SMMU_TLB_FLUSH_VA_MATCH_ALL; 174 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 175} 176 177static inline void smmu_flush_tlb_section(struct tegra_smmu *smmu, 178 unsigned long asid, 179 unsigned long iova) 180{ 181 u32 value; 182 183 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 184 SMMU_TLB_FLUSH_VA_SECTION(iova); 185 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 186} 187 188static inline void smmu_flush_tlb_group(struct tegra_smmu *smmu, 189 unsigned long asid, 190 unsigned long iova) 191{ 192 u32 value; 193 194 value = SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_ASID(asid) | 195 SMMU_TLB_FLUSH_VA_GROUP(iova); 196 smmu_writel(smmu, value, SMMU_TLB_FLUSH); 197} 198 199static inline void smmu_flush(struct tegra_smmu *smmu) 200{ 201 smmu_readl(smmu, SMMU_CONFIG); 202} 203 204static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) 205{ 206 unsigned long id; 207 208 mutex_lock(&smmu->lock); 209 210 id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); 211 if (id >= smmu->soc->num_asids) { 212 mutex_unlock(&smmu->lock); 213 return -ENOSPC; 214 } 215 216 set_bit(id, smmu->asids); 217 *idp = id; 218 219 mutex_unlock(&smmu->lock); 220 return 0; 221} 222 223static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) 224{ 225 mutex_lock(&smmu->lock); 226 clear_bit(id, smmu->asids); 227 mutex_unlock(&smmu->lock); 228} 229 230static bool tegra_smmu_capable(enum iommu_cap cap) 231{ 232 return false; 233} 234 235static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type) 236{ 237 struct tegra_smmu_as *as; 238 unsigned int i; 239 uint32_t *pd; 240 241 if (type != IOMMU_DOMAIN_UNMANAGED) 242 return NULL; 243 244 as = kzalloc(sizeof(*as), GFP_KERNEL); 245 if (!as) 246 return NULL; 247 248 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; 249 250 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA); 251 if (!as->pd) { 252 kfree(as); 253 return NULL; 254 } 255 256 as->count = alloc_page(GFP_KERNEL); 257 if (!as->count) { 258 __free_page(as->pd); 259 kfree(as); 260 return NULL; 261 } 262 263 /* clear PDEs */ 264 pd = page_address(as->pd); 265 SetPageReserved(as->pd); 266 267 for (i = 0; i < SMMU_NUM_PDE; i++) 268 pd[i] = 0; 269 270 /* clear PDE usage counters */ 271 pd = page_address(as->count); 272 SetPageReserved(as->count); 273 274 for (i = 0; i < SMMU_NUM_PDE; i++) 275 pd[i] = 0; 276 277 /* setup aperture */ 278 as->domain.geometry.aperture_start = 0; 279 as->domain.geometry.aperture_end = 0xffffffff; 280 as->domain.geometry.force_aperture = true; 281 282 return &as->domain; 283} 284 285static void tegra_smmu_domain_free(struct iommu_domain *domain) 286{ 287 struct tegra_smmu_as *as = to_smmu_as(domain); 288 289 /* TODO: free page directory and page tables */ 290 ClearPageReserved(as->pd); 291 292 kfree(as); 293} 294 295static const struct tegra_smmu_swgroup * 296tegra_smmu_find_swgroup(struct tegra_smmu *smmu, unsigned int swgroup) 297{ 298 const struct tegra_smmu_swgroup *group = NULL; 299 unsigned int i; 300 301 for (i = 0; i < smmu->soc->num_swgroups; i++) { 302 if (smmu->soc->swgroups[i].swgroup == swgroup) { 303 group = &smmu->soc->swgroups[i]; 304 break; 305 } 306 } 307 308 return group; 309} 310 311static void tegra_smmu_enable(struct tegra_smmu *smmu, unsigned int swgroup, 312 unsigned int asid) 313{ 314 const struct tegra_smmu_swgroup *group; 315 unsigned int i; 316 u32 value; 317 318 for (i = 0; i < smmu->soc->num_clients; i++) { 319 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 320 321 if (client->swgroup != swgroup) 322 continue; 323 324 value = smmu_readl(smmu, client->smmu.reg); 325 value |= BIT(client->smmu.bit); 326 smmu_writel(smmu, value, client->smmu.reg); 327 } 328 329 group = tegra_smmu_find_swgroup(smmu, swgroup); 330 if (group) { 331 value = smmu_readl(smmu, group->reg); 332 value &= ~SMMU_ASID_MASK; 333 value |= SMMU_ASID_VALUE(asid); 334 value |= SMMU_ASID_ENABLE; 335 smmu_writel(smmu, value, group->reg); 336 } 337} 338 339static void tegra_smmu_disable(struct tegra_smmu *smmu, unsigned int swgroup, 340 unsigned int asid) 341{ 342 const struct tegra_smmu_swgroup *group; 343 unsigned int i; 344 u32 value; 345 346 group = tegra_smmu_find_swgroup(smmu, swgroup); 347 if (group) { 348 value = smmu_readl(smmu, group->reg); 349 value &= ~SMMU_ASID_MASK; 350 value |= SMMU_ASID_VALUE(asid); 351 value &= ~SMMU_ASID_ENABLE; 352 smmu_writel(smmu, value, group->reg); 353 } 354 355 for (i = 0; i < smmu->soc->num_clients; i++) { 356 const struct tegra_mc_client *client = &smmu->soc->clients[i]; 357 358 if (client->swgroup != swgroup) 359 continue; 360 361 value = smmu_readl(smmu, client->smmu.reg); 362 value &= ~BIT(client->smmu.bit); 363 smmu_writel(smmu, value, client->smmu.reg); 364 } 365} 366 367static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, 368 struct tegra_smmu_as *as) 369{ 370 u32 value; 371 int err; 372 373 if (as->use_count > 0) { 374 as->use_count++; 375 return 0; 376 } 377 378 err = tegra_smmu_alloc_asid(smmu, &as->id); 379 if (err < 0) 380 return err; 381 382 smmu->soc->ops->flush_dcache(as->pd, 0, SMMU_SIZE_PD); 383 smmu_flush_ptc(smmu, as->pd, 0); 384 smmu_flush_tlb_asid(smmu, as->id); 385 386 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); 387 value = SMMU_PTB_DATA_VALUE(as->pd, as->attr); 388 smmu_writel(smmu, value, SMMU_PTB_DATA); 389 smmu_flush(smmu); 390 391 as->smmu = smmu; 392 as->use_count++; 393 394 return 0; 395} 396 397static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, 398 struct tegra_smmu_as *as) 399{ 400 if (--as->use_count > 0) 401 return; 402 403 tegra_smmu_free_asid(smmu, as->id); 404 as->smmu = NULL; 405} 406 407static int tegra_smmu_attach_dev(struct iommu_domain *domain, 408 struct device *dev) 409{ 410 struct tegra_smmu *smmu = dev->archdata.iommu; 411 struct tegra_smmu_as *as = to_smmu_as(domain); 412 struct device_node *np = dev->of_node; 413 struct of_phandle_args args; 414 unsigned int index = 0; 415 int err = 0; 416 417 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 418 &args)) { 419 unsigned int swgroup = args.args[0]; 420 421 if (args.np != smmu->dev->of_node) { 422 of_node_put(args.np); 423 continue; 424 } 425 426 of_node_put(args.np); 427 428 err = tegra_smmu_as_prepare(smmu, as); 429 if (err < 0) 430 return err; 431 432 tegra_smmu_enable(smmu, swgroup, as->id); 433 index++; 434 } 435 436 if (index == 0) 437 return -ENODEV; 438 439 return 0; 440} 441 442static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) 443{ 444 struct tegra_smmu_as *as = to_smmu_as(domain); 445 struct device_node *np = dev->of_node; 446 struct tegra_smmu *smmu = as->smmu; 447 struct of_phandle_args args; 448 unsigned int index = 0; 449 450 while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 451 &args)) { 452 unsigned int swgroup = args.args[0]; 453 454 if (args.np != smmu->dev->of_node) { 455 of_node_put(args.np); 456 continue; 457 } 458 459 of_node_put(args.np); 460 461 tegra_smmu_disable(smmu, swgroup, as->id); 462 tegra_smmu_as_unprepare(smmu, as); 463 index++; 464 } 465} 466 467static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, 468 struct page **pagep) 469{ 470 u32 *pd = page_address(as->pd), *pt, *count; 471 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; 472 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; 473 struct tegra_smmu *smmu = as->smmu; 474 struct page *page; 475 unsigned int i; 476 477 if (pd[pde] == 0) { 478 page = alloc_page(GFP_KERNEL | __GFP_DMA); 479 if (!page) 480 return NULL; 481 482 pt = page_address(page); 483 SetPageReserved(page); 484 485 for (i = 0; i < SMMU_NUM_PTE; i++) 486 pt[i] = 0; 487 488 smmu->soc->ops->flush_dcache(page, 0, SMMU_SIZE_PT); 489 490 pd[pde] = SMMU_MK_PDE(page, SMMU_PDE_ATTR | SMMU_PDE_NEXT); 491 492 smmu->soc->ops->flush_dcache(as->pd, pde << 2, 4); 493 smmu_flush_ptc(smmu, as->pd, pde << 2); 494 smmu_flush_tlb_section(smmu, as->id, iova); 495 smmu_flush(smmu); 496 } else { 497 page = pfn_to_page(pd[pde] & smmu->pfn_mask); 498 pt = page_address(page); 499 } 500 501 *pagep = page; 502 503 /* Keep track of entries in this page table. */ 504 count = page_address(as->count); 505 if (pt[pte] == 0) 506 count[pde]++; 507 508 return &pt[pte]; 509} 510 511static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) 512{ 513 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; 514 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; 515 u32 *count = page_address(as->count); 516 u32 *pd = page_address(as->pd), *pt; 517 struct page *page; 518 519 page = pfn_to_page(pd[pde] & as->smmu->pfn_mask); 520 pt = page_address(page); 521 522 /* 523 * When no entries in this page table are used anymore, return the 524 * memory page to the system. 525 */ 526 if (pt[pte] != 0) { 527 if (--count[pde] == 0) { 528 ClearPageReserved(page); 529 __free_page(page); 530 pd[pde] = 0; 531 } 532 533 pt[pte] = 0; 534 } 535} 536 537static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, 538 phys_addr_t paddr, size_t size, int prot) 539{ 540 struct tegra_smmu_as *as = to_smmu_as(domain); 541 struct tegra_smmu *smmu = as->smmu; 542 unsigned long offset; 543 struct page *page; 544 u32 *pte; 545 546 pte = as_get_pte(as, iova, &page); 547 if (!pte) 548 return -ENOMEM; 549 550 *pte = __phys_to_pfn(paddr) | SMMU_PTE_ATTR; 551 offset = offset_in_page(pte); 552 553 smmu->soc->ops->flush_dcache(page, offset, 4); 554 smmu_flush_ptc(smmu, page, offset); 555 smmu_flush_tlb_group(smmu, as->id, iova); 556 smmu_flush(smmu); 557 558 return 0; 559} 560 561static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, 562 size_t size) 563{ 564 struct tegra_smmu_as *as = to_smmu_as(domain); 565 struct tegra_smmu *smmu = as->smmu; 566 unsigned long offset; 567 struct page *page; 568 u32 *pte; 569 570 pte = as_get_pte(as, iova, &page); 571 if (!pte) 572 return 0; 573 574 offset = offset_in_page(pte); 575 as_put_pte(as, iova); 576 577 smmu->soc->ops->flush_dcache(page, offset, 4); 578 smmu_flush_ptc(smmu, page, offset); 579 smmu_flush_tlb_group(smmu, as->id, iova); 580 smmu_flush(smmu); 581 582 return size; 583} 584 585static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, 586 dma_addr_t iova) 587{ 588 struct tegra_smmu_as *as = to_smmu_as(domain); 589 struct page *page; 590 unsigned long pfn; 591 u32 *pte; 592 593 pte = as_get_pte(as, iova, &page); 594 pfn = *pte & as->smmu->pfn_mask; 595 596 return PFN_PHYS(pfn); 597} 598 599static struct tegra_smmu *tegra_smmu_find(struct device_node *np) 600{ 601 struct platform_device *pdev; 602 struct tegra_mc *mc; 603 604 pdev = of_find_device_by_node(np); 605 if (!pdev) 606 return NULL; 607 608 mc = platform_get_drvdata(pdev); 609 if (!mc) 610 return NULL; 611 612 return mc->smmu; 613} 614 615static int tegra_smmu_add_device(struct device *dev) 616{ 617 struct device_node *np = dev->of_node; 618 struct of_phandle_args args; 619 unsigned int index = 0; 620 621 while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, 622 &args) == 0) { 623 struct tegra_smmu *smmu; 624 625 smmu = tegra_smmu_find(args.np); 626 if (smmu) { 627 /* 628 * Only a single IOMMU master interface is currently 629 * supported by the Linux kernel, so abort after the 630 * first match. 631 */ 632 dev->archdata.iommu = smmu; 633 break; 634 } 635 636 index++; 637 } 638 639 return 0; 640} 641 642static void tegra_smmu_remove_device(struct device *dev) 643{ 644 dev->archdata.iommu = NULL; 645} 646 647static const struct iommu_ops tegra_smmu_ops = { 648 .capable = tegra_smmu_capable, 649 .domain_alloc = tegra_smmu_domain_alloc, 650 .domain_free = tegra_smmu_domain_free, 651 .attach_dev = tegra_smmu_attach_dev, 652 .detach_dev = tegra_smmu_detach_dev, 653 .add_device = tegra_smmu_add_device, 654 .remove_device = tegra_smmu_remove_device, 655 .map = tegra_smmu_map, 656 .unmap = tegra_smmu_unmap, 657 .map_sg = default_iommu_map_sg, 658 .iova_to_phys = tegra_smmu_iova_to_phys, 659 660 .pgsize_bitmap = SZ_4K, 661}; 662 663static void tegra_smmu_ahb_enable(void) 664{ 665 static const struct of_device_id ahb_match[] = { 666 { .compatible = "nvidia,tegra30-ahb", }, 667 { } 668 }; 669 struct device_node *ahb; 670 671 ahb = of_find_matching_node(NULL, ahb_match); 672 if (ahb) { 673 tegra_ahb_enable_smmu(ahb); 674 of_node_put(ahb); 675 } 676} 677 678struct tegra_smmu *tegra_smmu_probe(struct device *dev, 679 const struct tegra_smmu_soc *soc, 680 struct tegra_mc *mc) 681{ 682 struct tegra_smmu *smmu; 683 size_t size; 684 u32 value; 685 int err; 686 687 /* This can happen on Tegra20 which doesn't have an SMMU */ 688 if (!soc) 689 return NULL; 690 691 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); 692 if (!smmu) 693 return ERR_PTR(-ENOMEM); 694 695 /* 696 * This is a bit of a hack. Ideally we'd want to simply return this 697 * value. However the IOMMU registration process will attempt to add 698 * all devices to the IOMMU when bus_set_iommu() is called. In order 699 * not to rely on global variables to track the IOMMU instance, we 700 * set it here so that it can be looked up from the .add_device() 701 * callback via the IOMMU device's .drvdata field. 702 */ 703 mc->smmu = smmu; 704 705 size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); 706 707 smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); 708 if (!smmu->asids) 709 return ERR_PTR(-ENOMEM); 710 711 mutex_init(&smmu->lock); 712 713 smmu->regs = mc->regs; 714 smmu->soc = soc; 715 smmu->dev = dev; 716 smmu->mc = mc; 717 718 smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; 719 dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", 720 mc->soc->num_address_bits, smmu->pfn_mask); 721 smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; 722 dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, 723 smmu->tlb_mask); 724 725 value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); 726 727 if (soc->supports_request_limit) 728 value |= SMMU_PTC_CONFIG_REQ_LIMIT(8); 729 730 smmu_writel(smmu, value, SMMU_PTC_CONFIG); 731 732 value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | 733 SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); 734 735 if (soc->supports_round_robin_arbitration) 736 value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; 737 738 smmu_writel(smmu, value, SMMU_TLB_CONFIG); 739 740 smmu_flush_ptc(smmu, NULL, 0); 741 smmu_flush_tlb(smmu); 742 smmu_writel(smmu, SMMU_CONFIG_ENABLE, SMMU_CONFIG); 743 smmu_flush(smmu); 744 745 tegra_smmu_ahb_enable(); 746 747 err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); 748 if (err < 0) 749 return ERR_PTR(err); 750 751 return smmu; 752} 753