This source file includes following definitions.
- lv1ent_offset
- lv2ent_offset
- section_entry
- page_entry
- to_exynos_domain
- sysmmu_unblock
- sysmmu_block
- __sysmmu_tlb_invalidate
- __sysmmu_tlb_invalidate_entry
- __sysmmu_set_ptbase
- __sysmmu_enable_clocks
- __sysmmu_disable_clocks
- __sysmmu_get_version
- show_fault_information
- exynos_sysmmu_irq
- __sysmmu_disable
- __sysmmu_init_config
- __sysmmu_enable
- sysmmu_tlb_invalidate_flpdcache
- sysmmu_tlb_invalidate_entry
- exynos_sysmmu_probe
- exynos_sysmmu_suspend
- exynos_sysmmu_resume
- update_pte
- exynos_iommu_domain_alloc
- exynos_iommu_domain_free
- exynos_iommu_detach_device
- exynos_iommu_attach_device
- alloc_lv2entry
- lv1set_section
- lv2set_page
- exynos_iommu_map
- exynos_iommu_tlb_invalidate_entry
- exynos_iommu_unmap
- exynos_iommu_iova_to_phys
- exynos_iommu_add_device
- exynos_iommu_remove_device
- exynos_iommu_of_xlate
- exynos_iommu_init
   1 
   2 
   3 
   4 
   5 
   6 
   7 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
   8 #define DEBUG
   9 #endif
  10 
  11 #include <linux/clk.h>
  12 #include <linux/dma-mapping.h>
  13 #include <linux/err.h>
  14 #include <linux/io.h>
  15 #include <linux/iommu.h>
  16 #include <linux/interrupt.h>
  17 #include <linux/kmemleak.h>
  18 #include <linux/list.h>
  19 #include <linux/of.h>
  20 #include <linux/of_iommu.h>
  21 #include <linux/of_platform.h>
  22 #include <linux/platform_device.h>
  23 #include <linux/pm_runtime.h>
  24 #include <linux/slab.h>
  25 #include <linux/dma-iommu.h>
  26 
  27 typedef u32 sysmmu_iova_t;
  28 typedef u32 sysmmu_pte_t;
  29 
  30 
  31 #define SECT_ORDER 20
  32 #define LPAGE_ORDER 16
  33 #define SPAGE_ORDER 12
  34 
  35 #define SECT_SIZE (1 << SECT_ORDER)
  36 #define LPAGE_SIZE (1 << LPAGE_ORDER)
  37 #define SPAGE_SIZE (1 << SPAGE_ORDER)
  38 
  39 #define SECT_MASK (~(SECT_SIZE - 1))
  40 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
  41 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
  42 
  43 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
  44                            ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
  45 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
  46 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
  47 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
  48                           ((*(sent) & 3) == 1))
  49 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
  50 
  51 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
  52 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
  53 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
  54 
  55 
  56 
  57 
  58 
  59 
  60 
  61 
  62 
  63 static short PG_ENT_SHIFT = -1;
  64 #define SYSMMU_PG_ENT_SHIFT 0
  65 #define SYSMMU_V5_PG_ENT_SHIFT 4
  66 
  67 static const sysmmu_pte_t *LV1_PROT;
  68 static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
  69         ((0 << 15) | (0 << 10)), 
  70         ((1 << 15) | (1 << 10)), 
  71         ((0 << 15) | (1 << 10)), 
  72         ((0 << 15) | (1 << 10)), 
  73 };
  74 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
  75         (0 << 4), 
  76         (1 << 4), 
  77         (2 << 4), 
  78         (3 << 4), 
  79 };
  80 
  81 static const sysmmu_pte_t *LV2_PROT;
  82 static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
  83         ((0 << 9) | (0 << 4)), 
  84         ((1 << 9) | (1 << 4)), 
  85         ((0 << 9) | (1 << 4)), 
  86         ((0 << 9) | (1 << 4)), 
  87 };
  88 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
  89         (0 << 2), 
  90         (1 << 2), 
  91         (2 << 2), 
  92         (3 << 2), 
  93 };
  94 
  95 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
  96 
  97 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
  98 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
  99 #define section_offs(iova) (iova & (SECT_SIZE - 1))
 100 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
 101 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
 102 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
 103 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
 104 
 105 #define NUM_LV1ENTRIES 4096
 106 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
 107 
 108 static u32 lv1ent_offset(sysmmu_iova_t iova)
 109 {
 110         return iova >> SECT_ORDER;
 111 }
 112 
 113 static u32 lv2ent_offset(sysmmu_iova_t iova)
 114 {
 115         return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
 116 }
 117 
 118 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
 119 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
 120 
 121 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
 122 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
 123 
 124 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
 125 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
 126 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
 127 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
 128 
 129 #define CTRL_ENABLE     0x5
 130 #define CTRL_BLOCK      0x7
 131 #define CTRL_DISABLE    0x0
 132 
 133 #define CFG_LRU         0x1
 134 #define CFG_EAP         (1 << 2)
 135 #define CFG_QOS(n)      ((n & 0xF) << 7)
 136 #define CFG_ACGEN       (1 << 24) 
 137 #define CFG_SYSSEL      (1 << 22) 
 138 #define CFG_FLPDCACHE   (1 << 20) 
 139 
 140 
 141 #define REG_MMU_CTRL            0x000
 142 #define REG_MMU_CFG             0x004
 143 #define REG_MMU_STATUS          0x008
 144 #define REG_MMU_VERSION         0x034
 145 
 146 #define MMU_MAJ_VER(val)        ((val) >> 7)
 147 #define MMU_MIN_VER(val)        ((val) & 0x7F)
 148 #define MMU_RAW_VER(reg)        (((reg) >> 21) & ((1 << 11) - 1)) 
 149 
 150 #define MAKE_MMU_VER(maj, min)  ((((maj) & 0xF) << 7) | ((min) & 0x7F))
 151 
 152 
 153 #define REG_MMU_FLUSH           0x00C
 154 #define REG_MMU_FLUSH_ENTRY     0x010
 155 #define REG_PT_BASE_ADDR        0x014
 156 #define REG_INT_STATUS          0x018
 157 #define REG_INT_CLEAR           0x01C
 158 
 159 #define REG_PAGE_FAULT_ADDR     0x024
 160 #define REG_AW_FAULT_ADDR       0x028
 161 #define REG_AR_FAULT_ADDR       0x02C
 162 #define REG_DEFAULT_SLAVE_ADDR  0x030
 163 
 164 
 165 #define REG_V5_PT_BASE_PFN      0x00C
 166 #define REG_V5_MMU_FLUSH_ALL    0x010
 167 #define REG_V5_MMU_FLUSH_ENTRY  0x014
 168 #define REG_V5_MMU_FLUSH_RANGE  0x018
 169 #define REG_V5_MMU_FLUSH_START  0x020
 170 #define REG_V5_MMU_FLUSH_END    0x024
 171 #define REG_V5_INT_STATUS       0x060
 172 #define REG_V5_INT_CLEAR        0x064
 173 #define REG_V5_FAULT_AR_VA      0x070
 174 #define REG_V5_FAULT_AW_VA      0x080
 175 
 176 #define has_sysmmu(dev)         (dev->archdata.iommu != NULL)
 177 
 178 static struct device *dma_dev;
 179 static struct kmem_cache *lv2table_kmem_cache;
 180 static sysmmu_pte_t *zero_lv2_table;
 181 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
 182 
 183 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
 184 {
 185         return pgtable + lv1ent_offset(iova);
 186 }
 187 
 188 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
 189 {
 190         return (sysmmu_pte_t *)phys_to_virt(
 191                                 lv2table_base(sent)) + lv2ent_offset(iova);
 192 }
 193 
 194 
 195 
 196 
 197 struct sysmmu_fault_info {
 198         unsigned int bit;       
 199         unsigned short addr_reg; 
 200         const char *name;       
 201         unsigned int type;      
 202 };
 203 
 204 static const struct sysmmu_fault_info sysmmu_faults[] = {
 205         { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
 206         { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
 207         { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
 208         { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
 209         { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
 210         { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
 211         { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
 212         { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
 213 };
 214 
 215 static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
 216         { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
 217         { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
 218         { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
 219         { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
 220         { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
 221         { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
 222         { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
 223         { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
 224         { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
 225         { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
 226 };
 227 
 228 
 229 
 230 
 231 
 232 
 233 
 234 struct exynos_iommu_owner {
 235         struct list_head controllers;   
 236         struct iommu_domain *domain;    
 237         struct mutex rpm_lock;          
 238 };
 239 
 240 
 241 
 242 
 243 
 244 
 245 
 246 struct exynos_iommu_domain {
 247         struct list_head clients; 
 248         sysmmu_pte_t *pgtable;  
 249         short *lv2entcnt;       
 250         spinlock_t lock;        
 251         spinlock_t pgtablelock; 
 252         struct iommu_domain domain; 
 253 };
 254 
 255 
 256 
 257 
 258 
 259 
 260 
 261 struct sysmmu_drvdata {
 262         struct device *sysmmu;          
 263         struct device *master;          
 264         struct device_link *link;       
 265         void __iomem *sfrbase;          
 266         struct clk *clk;                
 267         struct clk *aclk;               
 268         struct clk *pclk;               
 269         struct clk *clk_master;         
 270         spinlock_t lock;                
 271         bool active;                    
 272         struct exynos_iommu_domain *domain; 
 273         struct list_head domain_node;   
 274         struct list_head owner_node;    
 275         phys_addr_t pgtable;            
 276         unsigned int version;           
 277 
 278         struct iommu_device iommu;      
 279 };
 280 
 281 static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
 282 {
 283         return container_of(dom, struct exynos_iommu_domain, domain);
 284 }
 285 
 286 static void sysmmu_unblock(struct sysmmu_drvdata *data)
 287 {
 288         writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
 289 }
 290 
 291 static bool sysmmu_block(struct sysmmu_drvdata *data)
 292 {
 293         int i = 120;
 294 
 295         writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
 296         while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
 297                 --i;
 298 
 299         if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
 300                 sysmmu_unblock(data);
 301                 return false;
 302         }
 303 
 304         return true;
 305 }
 306 
 307 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
 308 {
 309         if (MMU_MAJ_VER(data->version) < 5)
 310                 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
 311         else
 312                 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
 313 }
 314 
 315 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 316                                 sysmmu_iova_t iova, unsigned int num_inv)
 317 {
 318         unsigned int i;
 319 
 320         if (MMU_MAJ_VER(data->version) < 5) {
 321                 for (i = 0; i < num_inv; i++) {
 322                         writel((iova & SPAGE_MASK) | 1,
 323                                      data->sfrbase + REG_MMU_FLUSH_ENTRY);
 324                         iova += SPAGE_SIZE;
 325                 }
 326         } else {
 327                 if (num_inv == 1) {
 328                         writel((iova & SPAGE_MASK) | 1,
 329                                      data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
 330                 } else {
 331                         writel((iova & SPAGE_MASK),
 332                                      data->sfrbase + REG_V5_MMU_FLUSH_START);
 333                         writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
 334                                      data->sfrbase + REG_V5_MMU_FLUSH_END);
 335                         writel(1, data->sfrbase + REG_V5_MMU_FLUSH_RANGE);
 336                 }
 337         }
 338 }
 339 
 340 static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
 341 {
 342         if (MMU_MAJ_VER(data->version) < 5)
 343                 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
 344         else
 345                 writel(pgd >> PAGE_SHIFT,
 346                              data->sfrbase + REG_V5_PT_BASE_PFN);
 347 
 348         __sysmmu_tlb_invalidate(data);
 349 }
 350 
 351 static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
 352 {
 353         BUG_ON(clk_prepare_enable(data->clk_master));
 354         BUG_ON(clk_prepare_enable(data->clk));
 355         BUG_ON(clk_prepare_enable(data->pclk));
 356         BUG_ON(clk_prepare_enable(data->aclk));
 357 }
 358 
 359 static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
 360 {
 361         clk_disable_unprepare(data->aclk);
 362         clk_disable_unprepare(data->pclk);
 363         clk_disable_unprepare(data->clk);
 364         clk_disable_unprepare(data->clk_master);
 365 }
 366 
 367 static void __sysmmu_get_version(struct sysmmu_drvdata *data)
 368 {
 369         u32 ver;
 370 
 371         __sysmmu_enable_clocks(data);
 372 
 373         ver = readl(data->sfrbase + REG_MMU_VERSION);
 374 
 375         
 376         if (ver == 0x80000001u)
 377                 data->version = MAKE_MMU_VER(1, 0);
 378         else
 379                 data->version = MMU_RAW_VER(ver);
 380 
 381         dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
 382                 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
 383 
 384         __sysmmu_disable_clocks(data);
 385 }
 386 
 387 static void show_fault_information(struct sysmmu_drvdata *data,
 388                                    const struct sysmmu_fault_info *finfo,
 389                                    sysmmu_iova_t fault_addr)
 390 {
 391         sysmmu_pte_t *ent;
 392 
 393         dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
 394                 dev_name(data->master), finfo->name, fault_addr);
 395         dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
 396         ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
 397         dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
 398         if (lv1ent_page(ent)) {
 399                 ent = page_entry(ent, fault_addr);
 400                 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
 401         }
 402 }
 403 
 404 static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
 405 {
 406         
 407         struct sysmmu_drvdata *data = dev_id;
 408         const struct sysmmu_fault_info *finfo;
 409         unsigned int i, n, itype;
 410         sysmmu_iova_t fault_addr = -1;
 411         unsigned short reg_status, reg_clear;
 412         int ret = -ENOSYS;
 413 
 414         WARN_ON(!data->active);
 415 
 416         if (MMU_MAJ_VER(data->version) < 5) {
 417                 reg_status = REG_INT_STATUS;
 418                 reg_clear = REG_INT_CLEAR;
 419                 finfo = sysmmu_faults;
 420                 n = ARRAY_SIZE(sysmmu_faults);
 421         } else {
 422                 reg_status = REG_V5_INT_STATUS;
 423                 reg_clear = REG_V5_INT_CLEAR;
 424                 finfo = sysmmu_v5_faults;
 425                 n = ARRAY_SIZE(sysmmu_v5_faults);
 426         }
 427 
 428         spin_lock(&data->lock);
 429 
 430         clk_enable(data->clk_master);
 431 
 432         itype = __ffs(readl(data->sfrbase + reg_status));
 433         for (i = 0; i < n; i++, finfo++)
 434                 if (finfo->bit == itype)
 435                         break;
 436         
 437         BUG_ON(i == n);
 438 
 439         
 440         fault_addr = readl(data->sfrbase + finfo->addr_reg);
 441         show_fault_information(data, finfo, fault_addr);
 442 
 443         if (data->domain)
 444                 ret = report_iommu_fault(&data->domain->domain,
 445                                         data->master, fault_addr, finfo->type);
 446         
 447         BUG_ON(ret != 0);
 448 
 449         writel(1 << itype, data->sfrbase + reg_clear);
 450 
 451         sysmmu_unblock(data);
 452 
 453         clk_disable(data->clk_master);
 454 
 455         spin_unlock(&data->lock);
 456 
 457         return IRQ_HANDLED;
 458 }
 459 
 460 static void __sysmmu_disable(struct sysmmu_drvdata *data)
 461 {
 462         unsigned long flags;
 463 
 464         clk_enable(data->clk_master);
 465 
 466         spin_lock_irqsave(&data->lock, flags);
 467         writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
 468         writel(0, data->sfrbase + REG_MMU_CFG);
 469         data->active = false;
 470         spin_unlock_irqrestore(&data->lock, flags);
 471 
 472         __sysmmu_disable_clocks(data);
 473 }
 474 
 475 static void __sysmmu_init_config(struct sysmmu_drvdata *data)
 476 {
 477         unsigned int cfg;
 478 
 479         if (data->version <= MAKE_MMU_VER(3, 1))
 480                 cfg = CFG_LRU | CFG_QOS(15);
 481         else if (data->version <= MAKE_MMU_VER(3, 2))
 482                 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
 483         else
 484                 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
 485 
 486         cfg |= CFG_EAP; 
 487 
 488         writel(cfg, data->sfrbase + REG_MMU_CFG);
 489 }
 490 
 491 static void __sysmmu_enable(struct sysmmu_drvdata *data)
 492 {
 493         unsigned long flags;
 494 
 495         __sysmmu_enable_clocks(data);
 496 
 497         spin_lock_irqsave(&data->lock, flags);
 498         writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
 499         __sysmmu_init_config(data);
 500         __sysmmu_set_ptbase(data, data->pgtable);
 501         writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
 502         data->active = true;
 503         spin_unlock_irqrestore(&data->lock, flags);
 504 
 505         
 506 
 507 
 508 
 509 
 510 
 511         clk_disable(data->clk_master);
 512 }
 513 
 514 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
 515                                             sysmmu_iova_t iova)
 516 {
 517         unsigned long flags;
 518 
 519         spin_lock_irqsave(&data->lock, flags);
 520         if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
 521                 clk_enable(data->clk_master);
 522                 if (sysmmu_block(data)) {
 523                         if (data->version >= MAKE_MMU_VER(5, 0))
 524                                 __sysmmu_tlb_invalidate(data);
 525                         else
 526                                 __sysmmu_tlb_invalidate_entry(data, iova, 1);
 527                         sysmmu_unblock(data);
 528                 }
 529                 clk_disable(data->clk_master);
 530         }
 531         spin_unlock_irqrestore(&data->lock, flags);
 532 }
 533 
 534 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
 535                                         sysmmu_iova_t iova, size_t size)
 536 {
 537         unsigned long flags;
 538 
 539         spin_lock_irqsave(&data->lock, flags);
 540         if (data->active) {
 541                 unsigned int num_inv = 1;
 542 
 543                 clk_enable(data->clk_master);
 544 
 545                 
 546 
 547 
 548 
 549 
 550 
 551 
 552 
 553 
 554 
 555                 if (MMU_MAJ_VER(data->version) == 2)
 556                         num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
 557 
 558                 if (sysmmu_block(data)) {
 559                         __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
 560                         sysmmu_unblock(data);
 561                 }
 562                 clk_disable(data->clk_master);
 563         }
 564         spin_unlock_irqrestore(&data->lock, flags);
 565 }
 566 
 567 static const struct iommu_ops exynos_iommu_ops;
 568 
 569 static int exynos_sysmmu_probe(struct platform_device *pdev)
 570 {
 571         int irq, ret;
 572         struct device *dev = &pdev->dev;
 573         struct sysmmu_drvdata *data;
 574         struct resource *res;
 575 
 576         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
 577         if (!data)
 578                 return -ENOMEM;
 579 
 580         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 581         data->sfrbase = devm_ioremap_resource(dev, res);
 582         if (IS_ERR(data->sfrbase))
 583                 return PTR_ERR(data->sfrbase);
 584 
 585         irq = platform_get_irq(pdev, 0);
 586         if (irq <= 0)
 587                 return irq;
 588 
 589         ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
 590                                 dev_name(dev), data);
 591         if (ret) {
 592                 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
 593                 return ret;
 594         }
 595 
 596         data->clk = devm_clk_get(dev, "sysmmu");
 597         if (PTR_ERR(data->clk) == -ENOENT)
 598                 data->clk = NULL;
 599         else if (IS_ERR(data->clk))
 600                 return PTR_ERR(data->clk);
 601 
 602         data->aclk = devm_clk_get(dev, "aclk");
 603         if (PTR_ERR(data->aclk) == -ENOENT)
 604                 data->aclk = NULL;
 605         else if (IS_ERR(data->aclk))
 606                 return PTR_ERR(data->aclk);
 607 
 608         data->pclk = devm_clk_get(dev, "pclk");
 609         if (PTR_ERR(data->pclk) == -ENOENT)
 610                 data->pclk = NULL;
 611         else if (IS_ERR(data->pclk))
 612                 return PTR_ERR(data->pclk);
 613 
 614         if (!data->clk && (!data->aclk || !data->pclk)) {
 615                 dev_err(dev, "Failed to get device clock(s)!\n");
 616                 return -ENOSYS;
 617         }
 618 
 619         data->clk_master = devm_clk_get(dev, "master");
 620         if (PTR_ERR(data->clk_master) == -ENOENT)
 621                 data->clk_master = NULL;
 622         else if (IS_ERR(data->clk_master))
 623                 return PTR_ERR(data->clk_master);
 624 
 625         data->sysmmu = dev;
 626         spin_lock_init(&data->lock);
 627 
 628         ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
 629                                      dev_name(data->sysmmu));
 630         if (ret)
 631                 return ret;
 632 
 633         iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
 634         iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
 635 
 636         ret = iommu_device_register(&data->iommu);
 637         if (ret)
 638                 return ret;
 639 
 640         platform_set_drvdata(pdev, data);
 641 
 642         __sysmmu_get_version(data);
 643         if (PG_ENT_SHIFT < 0) {
 644                 if (MMU_MAJ_VER(data->version) < 5) {
 645                         PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
 646                         LV1_PROT = SYSMMU_LV1_PROT;
 647                         LV2_PROT = SYSMMU_LV2_PROT;
 648                 } else {
 649                         PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
 650                         LV1_PROT = SYSMMU_V5_LV1_PROT;
 651                         LV2_PROT = SYSMMU_V5_LV2_PROT;
 652                 }
 653         }
 654 
 655         
 656 
 657 
 658 
 659         if (!dma_dev)
 660                 dma_dev = &pdev->dev;
 661 
 662         pm_runtime_enable(dev);
 663 
 664         return 0;
 665 }
 666 
 667 static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
 668 {
 669         struct sysmmu_drvdata *data = dev_get_drvdata(dev);
 670         struct device *master = data->master;
 671 
 672         if (master) {
 673                 struct exynos_iommu_owner *owner = master->archdata.iommu;
 674 
 675                 mutex_lock(&owner->rpm_lock);
 676                 if (data->domain) {
 677                         dev_dbg(data->sysmmu, "saving state\n");
 678                         __sysmmu_disable(data);
 679                 }
 680                 mutex_unlock(&owner->rpm_lock);
 681         }
 682         return 0;
 683 }
 684 
 685 static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
 686 {
 687         struct sysmmu_drvdata *data = dev_get_drvdata(dev);
 688         struct device *master = data->master;
 689 
 690         if (master) {
 691                 struct exynos_iommu_owner *owner = master->archdata.iommu;
 692 
 693                 mutex_lock(&owner->rpm_lock);
 694                 if (data->domain) {
 695                         dev_dbg(data->sysmmu, "restoring state\n");
 696                         __sysmmu_enable(data);
 697                 }
 698                 mutex_unlock(&owner->rpm_lock);
 699         }
 700         return 0;
 701 }
 702 
 703 static const struct dev_pm_ops sysmmu_pm_ops = {
 704         SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
 705         SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
 706                                 pm_runtime_force_resume)
 707 };
 708 
 709 static const struct of_device_id sysmmu_of_match[] = {
 710         { .compatible   = "samsung,exynos-sysmmu", },
 711         { },
 712 };
 713 
 714 static struct platform_driver exynos_sysmmu_driver __refdata = {
 715         .probe  = exynos_sysmmu_probe,
 716         .driver = {
 717                 .name           = "exynos-sysmmu",
 718                 .of_match_table = sysmmu_of_match,
 719                 .pm             = &sysmmu_pm_ops,
 720                 .suppress_bind_attrs = true,
 721         }
 722 };
 723 
 724 static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
 725 {
 726         dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
 727                                 DMA_TO_DEVICE);
 728         *ent = cpu_to_le32(val);
 729         dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
 730                                    DMA_TO_DEVICE);
 731 }
 732 
 733 static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
 734 {
 735         struct exynos_iommu_domain *domain;
 736         dma_addr_t handle;
 737         int i;
 738 
 739         
 740         BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
 741 
 742         domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 743         if (!domain)
 744                 return NULL;
 745 
 746         if (type == IOMMU_DOMAIN_DMA) {
 747                 if (iommu_get_dma_cookie(&domain->domain) != 0)
 748                         goto err_pgtable;
 749         } else if (type != IOMMU_DOMAIN_UNMANAGED) {
 750                 goto err_pgtable;
 751         }
 752 
 753         domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
 754         if (!domain->pgtable)
 755                 goto err_dma_cookie;
 756 
 757         domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
 758         if (!domain->lv2entcnt)
 759                 goto err_counter;
 760 
 761         
 762         for (i = 0; i < NUM_LV1ENTRIES; i++)
 763                 domain->pgtable[i] = ZERO_LV2LINK;
 764 
 765         handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
 766                                 DMA_TO_DEVICE);
 767         
 768         BUG_ON(handle != virt_to_phys(domain->pgtable));
 769         if (dma_mapping_error(dma_dev, handle))
 770                 goto err_lv2ent;
 771 
 772         spin_lock_init(&domain->lock);
 773         spin_lock_init(&domain->pgtablelock);
 774         INIT_LIST_HEAD(&domain->clients);
 775 
 776         domain->domain.geometry.aperture_start = 0;
 777         domain->domain.geometry.aperture_end   = ~0UL;
 778         domain->domain.geometry.force_aperture = true;
 779 
 780         return &domain->domain;
 781 
 782 err_lv2ent:
 783         free_pages((unsigned long)domain->lv2entcnt, 1);
 784 err_counter:
 785         free_pages((unsigned long)domain->pgtable, 2);
 786 err_dma_cookie:
 787         if (type == IOMMU_DOMAIN_DMA)
 788                 iommu_put_dma_cookie(&domain->domain);
 789 err_pgtable:
 790         kfree(domain);
 791         return NULL;
 792 }
 793 
 794 static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
 795 {
 796         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 797         struct sysmmu_drvdata *data, *next;
 798         unsigned long flags;
 799         int i;
 800 
 801         WARN_ON(!list_empty(&domain->clients));
 802 
 803         spin_lock_irqsave(&domain->lock, flags);
 804 
 805         list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
 806                 spin_lock(&data->lock);
 807                 __sysmmu_disable(data);
 808                 data->pgtable = 0;
 809                 data->domain = NULL;
 810                 list_del_init(&data->domain_node);
 811                 spin_unlock(&data->lock);
 812         }
 813 
 814         spin_unlock_irqrestore(&domain->lock, flags);
 815 
 816         if (iommu_domain->type == IOMMU_DOMAIN_DMA)
 817                 iommu_put_dma_cookie(iommu_domain);
 818 
 819         dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
 820                          DMA_TO_DEVICE);
 821 
 822         for (i = 0; i < NUM_LV1ENTRIES; i++)
 823                 if (lv1ent_page(domain->pgtable + i)) {
 824                         phys_addr_t base = lv2table_base(domain->pgtable + i);
 825 
 826                         dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
 827                                          DMA_TO_DEVICE);
 828                         kmem_cache_free(lv2table_kmem_cache,
 829                                         phys_to_virt(base));
 830                 }
 831 
 832         free_pages((unsigned long)domain->pgtable, 2);
 833         free_pages((unsigned long)domain->lv2entcnt, 1);
 834         kfree(domain);
 835 }
 836 
 837 static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
 838                                     struct device *dev)
 839 {
 840         struct exynos_iommu_owner *owner = dev->archdata.iommu;
 841         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 842         phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 843         struct sysmmu_drvdata *data, *next;
 844         unsigned long flags;
 845 
 846         if (!has_sysmmu(dev) || owner->domain != iommu_domain)
 847                 return;
 848 
 849         mutex_lock(&owner->rpm_lock);
 850 
 851         list_for_each_entry(data, &owner->controllers, owner_node) {
 852                 pm_runtime_get_noresume(data->sysmmu);
 853                 if (pm_runtime_active(data->sysmmu))
 854                         __sysmmu_disable(data);
 855                 pm_runtime_put(data->sysmmu);
 856         }
 857 
 858         spin_lock_irqsave(&domain->lock, flags);
 859         list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
 860                 spin_lock(&data->lock);
 861                 data->pgtable = 0;
 862                 data->domain = NULL;
 863                 list_del_init(&data->domain_node);
 864                 spin_unlock(&data->lock);
 865         }
 866         owner->domain = NULL;
 867         spin_unlock_irqrestore(&domain->lock, flags);
 868 
 869         mutex_unlock(&owner->rpm_lock);
 870 
 871         dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
 872                 &pagetable);
 873 }
 874 
 875 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
 876                                    struct device *dev)
 877 {
 878         struct exynos_iommu_owner *owner = dev->archdata.iommu;
 879         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
 880         struct sysmmu_drvdata *data;
 881         phys_addr_t pagetable = virt_to_phys(domain->pgtable);
 882         unsigned long flags;
 883 
 884         if (!has_sysmmu(dev))
 885                 return -ENODEV;
 886 
 887         if (owner->domain)
 888                 exynos_iommu_detach_device(owner->domain, dev);
 889 
 890         mutex_lock(&owner->rpm_lock);
 891 
 892         spin_lock_irqsave(&domain->lock, flags);
 893         list_for_each_entry(data, &owner->controllers, owner_node) {
 894                 spin_lock(&data->lock);
 895                 data->pgtable = pagetable;
 896                 data->domain = domain;
 897                 list_add_tail(&data->domain_node, &domain->clients);
 898                 spin_unlock(&data->lock);
 899         }
 900         owner->domain = iommu_domain;
 901         spin_unlock_irqrestore(&domain->lock, flags);
 902 
 903         list_for_each_entry(data, &owner->controllers, owner_node) {
 904                 pm_runtime_get_noresume(data->sysmmu);
 905                 if (pm_runtime_active(data->sysmmu))
 906                         __sysmmu_enable(data);
 907                 pm_runtime_put(data->sysmmu);
 908         }
 909 
 910         mutex_unlock(&owner->rpm_lock);
 911 
 912         dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
 913                 &pagetable);
 914 
 915         return 0;
 916 }
 917 
 918 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
 919                 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
 920 {
 921         if (lv1ent_section(sent)) {
 922                 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
 923                 return ERR_PTR(-EADDRINUSE);
 924         }
 925 
 926         if (lv1ent_fault(sent)) {
 927                 dma_addr_t handle;
 928                 sysmmu_pte_t *pent;
 929                 bool need_flush_flpd_cache = lv1ent_zero(sent);
 930 
 931                 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
 932                 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
 933                 if (!pent)
 934                         return ERR_PTR(-ENOMEM);
 935 
 936                 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
 937                 kmemleak_ignore(pent);
 938                 *pgcounter = NUM_LV2ENTRIES;
 939                 handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
 940                                         DMA_TO_DEVICE);
 941                 if (dma_mapping_error(dma_dev, handle)) {
 942                         kmem_cache_free(lv2table_kmem_cache, pent);
 943                         return ERR_PTR(-EADDRINUSE);
 944                 }
 945 
 946                 
 947 
 948 
 949 
 950 
 951 
 952 
 953 
 954 
 955 
 956 
 957 
 958 
 959 
 960 
 961 
 962 
 963                 if (need_flush_flpd_cache) {
 964                         struct sysmmu_drvdata *data;
 965 
 966                         spin_lock(&domain->lock);
 967                         list_for_each_entry(data, &domain->clients, domain_node)
 968                                 sysmmu_tlb_invalidate_flpdcache(data, iova);
 969                         spin_unlock(&domain->lock);
 970                 }
 971         }
 972 
 973         return page_entry(sent, iova);
 974 }
 975 
 976 static int lv1set_section(struct exynos_iommu_domain *domain,
 977                           sysmmu_pte_t *sent, sysmmu_iova_t iova,
 978                           phys_addr_t paddr, int prot, short *pgcnt)
 979 {
 980         if (lv1ent_section(sent)) {
 981                 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
 982                         iova);
 983                 return -EADDRINUSE;
 984         }
 985 
 986         if (lv1ent_page(sent)) {
 987                 if (*pgcnt != NUM_LV2ENTRIES) {
 988                         WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
 989                                 iova);
 990                         return -EADDRINUSE;
 991                 }
 992 
 993                 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
 994                 *pgcnt = 0;
 995         }
 996 
 997         update_pte(sent, mk_lv1ent_sect(paddr, prot));
 998 
 999         spin_lock(&domain->lock);
1000         if (lv1ent_page_zero(sent)) {
1001                 struct sysmmu_drvdata *data;
1002                 
1003 
1004 
1005 
1006                 list_for_each_entry(data, &domain->clients, domain_node)
1007                         sysmmu_tlb_invalidate_flpdcache(data, iova);
1008         }
1009         spin_unlock(&domain->lock);
1010 
1011         return 0;
1012 }
1013 
1014 static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
1015                        int prot, short *pgcnt)
1016 {
1017         if (size == SPAGE_SIZE) {
1018                 if (WARN_ON(!lv2ent_fault(pent)))
1019                         return -EADDRINUSE;
1020 
1021                 update_pte(pent, mk_lv2ent_spage(paddr, prot));
1022                 *pgcnt -= 1;
1023         } else { 
1024                 int i;
1025                 dma_addr_t pent_base = virt_to_phys(pent);
1026 
1027                 dma_sync_single_for_cpu(dma_dev, pent_base,
1028                                         sizeof(*pent) * SPAGES_PER_LPAGE,
1029                                         DMA_TO_DEVICE);
1030                 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
1031                         if (WARN_ON(!lv2ent_fault(pent))) {
1032                                 if (i > 0)
1033                                         memset(pent - i, 0, sizeof(*pent) * i);
1034                                 return -EADDRINUSE;
1035                         }
1036 
1037                         *pent = mk_lv2ent_lpage(paddr, prot);
1038                 }
1039                 dma_sync_single_for_device(dma_dev, pent_base,
1040                                            sizeof(*pent) * SPAGES_PER_LPAGE,
1041                                            DMA_TO_DEVICE);
1042                 *pgcnt -= SPAGES_PER_LPAGE;
1043         }
1044 
1045         return 0;
1046 }
1047 
1048 
1049 
1050 
1051 
1052 
1053 
1054 
1055 
1056 
1057 
1058 
1059 
1060 
1061 
1062 
1063 
1064 
1065 
1066 
1067 
1068 
1069 
1070 
1071 
1072 
1073 
1074 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1075                             unsigned long l_iova, phys_addr_t paddr, size_t size,
1076                             int prot)
1077 {
1078         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1079         sysmmu_pte_t *entry;
1080         sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1081         unsigned long flags;
1082         int ret = -ENOMEM;
1083 
1084         BUG_ON(domain->pgtable == NULL);
1085         prot &= SYSMMU_SUPPORTED_PROT_BITS;
1086 
1087         spin_lock_irqsave(&domain->pgtablelock, flags);
1088 
1089         entry = section_entry(domain->pgtable, iova);
1090 
1091         if (size == SECT_SIZE) {
1092                 ret = lv1set_section(domain, entry, iova, paddr, prot,
1093                                      &domain->lv2entcnt[lv1ent_offset(iova)]);
1094         } else {
1095                 sysmmu_pte_t *pent;
1096 
1097                 pent = alloc_lv2entry(domain, entry, iova,
1098                                       &domain->lv2entcnt[lv1ent_offset(iova)]);
1099 
1100                 if (IS_ERR(pent))
1101                         ret = PTR_ERR(pent);
1102                 else
1103                         ret = lv2set_page(pent, paddr, size, prot,
1104                                        &domain->lv2entcnt[lv1ent_offset(iova)]);
1105         }
1106 
1107         if (ret)
1108                 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1109                         __func__, ret, size, iova);
1110 
1111         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1112 
1113         return ret;
1114 }
1115 
1116 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1117                                               sysmmu_iova_t iova, size_t size)
1118 {
1119         struct sysmmu_drvdata *data;
1120         unsigned long flags;
1121 
1122         spin_lock_irqsave(&domain->lock, flags);
1123 
1124         list_for_each_entry(data, &domain->clients, domain_node)
1125                 sysmmu_tlb_invalidate_entry(data, iova, size);
1126 
1127         spin_unlock_irqrestore(&domain->lock, flags);
1128 }
1129 
1130 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1131                                  unsigned long l_iova, size_t size,
1132                                  struct iommu_iotlb_gather *gather)
1133 {
1134         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1135         sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1136         sysmmu_pte_t *ent;
1137         size_t err_pgsize;
1138         unsigned long flags;
1139 
1140         BUG_ON(domain->pgtable == NULL);
1141 
1142         spin_lock_irqsave(&domain->pgtablelock, flags);
1143 
1144         ent = section_entry(domain->pgtable, iova);
1145 
1146         if (lv1ent_section(ent)) {
1147                 if (WARN_ON(size < SECT_SIZE)) {
1148                         err_pgsize = SECT_SIZE;
1149                         goto err;
1150                 }
1151 
1152                 
1153                 update_pte(ent, ZERO_LV2LINK);
1154                 size = SECT_SIZE;
1155                 goto done;
1156         }
1157 
1158         if (unlikely(lv1ent_fault(ent))) {
1159                 if (size > SECT_SIZE)
1160                         size = SECT_SIZE;
1161                 goto done;
1162         }
1163 
1164         
1165 
1166         ent = page_entry(ent, iova);
1167 
1168         if (unlikely(lv2ent_fault(ent))) {
1169                 size = SPAGE_SIZE;
1170                 goto done;
1171         }
1172 
1173         if (lv2ent_small(ent)) {
1174                 update_pte(ent, 0);
1175                 size = SPAGE_SIZE;
1176                 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
1177                 goto done;
1178         }
1179 
1180         
1181         if (WARN_ON(size < LPAGE_SIZE)) {
1182                 err_pgsize = LPAGE_SIZE;
1183                 goto err;
1184         }
1185 
1186         dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1187                                 sizeof(*ent) * SPAGES_PER_LPAGE,
1188                                 DMA_TO_DEVICE);
1189         memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1190         dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1191                                    sizeof(*ent) * SPAGES_PER_LPAGE,
1192                                    DMA_TO_DEVICE);
1193         size = LPAGE_SIZE;
1194         domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1195 done:
1196         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1197 
1198         exynos_iommu_tlb_invalidate_entry(domain, iova, size);
1199 
1200         return size;
1201 err:
1202         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1203 
1204         pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1205                 __func__, size, iova, err_pgsize);
1206 
1207         return 0;
1208 }
1209 
1210 static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1211                                           dma_addr_t iova)
1212 {
1213         struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1214         sysmmu_pte_t *entry;
1215         unsigned long flags;
1216         phys_addr_t phys = 0;
1217 
1218         spin_lock_irqsave(&domain->pgtablelock, flags);
1219 
1220         entry = section_entry(domain->pgtable, iova);
1221 
1222         if (lv1ent_section(entry)) {
1223                 phys = section_phys(entry) + section_offs(iova);
1224         } else if (lv1ent_page(entry)) {
1225                 entry = page_entry(entry, iova);
1226 
1227                 if (lv2ent_large(entry))
1228                         phys = lpage_phys(entry) + lpage_offs(iova);
1229                 else if (lv2ent_small(entry))
1230                         phys = spage_phys(entry) + spage_offs(iova);
1231         }
1232 
1233         spin_unlock_irqrestore(&domain->pgtablelock, flags);
1234 
1235         return phys;
1236 }
1237 
1238 static int exynos_iommu_add_device(struct device *dev)
1239 {
1240         struct exynos_iommu_owner *owner = dev->archdata.iommu;
1241         struct sysmmu_drvdata *data;
1242         struct iommu_group *group;
1243 
1244         if (!has_sysmmu(dev))
1245                 return -ENODEV;
1246 
1247         group = iommu_group_get_for_dev(dev);
1248 
1249         if (IS_ERR(group))
1250                 return PTR_ERR(group);
1251 
1252         list_for_each_entry(data, &owner->controllers, owner_node) {
1253                 
1254 
1255 
1256 
1257 
1258                 data->link = device_link_add(dev, data->sysmmu,
1259                                              DL_FLAG_STATELESS |
1260                                              DL_FLAG_PM_RUNTIME);
1261         }
1262         iommu_group_put(group);
1263 
1264         return 0;
1265 }
1266 
1267 static void exynos_iommu_remove_device(struct device *dev)
1268 {
1269         struct exynos_iommu_owner *owner = dev->archdata.iommu;
1270         struct sysmmu_drvdata *data;
1271 
1272         if (!has_sysmmu(dev))
1273                 return;
1274 
1275         if (owner->domain) {
1276                 struct iommu_group *group = iommu_group_get(dev);
1277 
1278                 if (group) {
1279                         WARN_ON(owner->domain !=
1280                                 iommu_group_default_domain(group));
1281                         exynos_iommu_detach_device(owner->domain, dev);
1282                         iommu_group_put(group);
1283                 }
1284         }
1285         iommu_group_remove_device(dev);
1286 
1287         list_for_each_entry(data, &owner->controllers, owner_node)
1288                 device_link_del(data->link);
1289 }
1290 
1291 static int exynos_iommu_of_xlate(struct device *dev,
1292                                  struct of_phandle_args *spec)
1293 {
1294         struct exynos_iommu_owner *owner = dev->archdata.iommu;
1295         struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1296         struct sysmmu_drvdata *data, *entry;
1297 
1298         if (!sysmmu)
1299                 return -ENODEV;
1300 
1301         data = platform_get_drvdata(sysmmu);
1302         if (!data)
1303                 return -ENODEV;
1304 
1305         if (!owner) {
1306                 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1307                 if (!owner)
1308                         return -ENOMEM;
1309 
1310                 INIT_LIST_HEAD(&owner->controllers);
1311                 mutex_init(&owner->rpm_lock);
1312                 dev->archdata.iommu = owner;
1313         }
1314 
1315         list_for_each_entry(entry, &owner->controllers, owner_node)
1316                 if (entry == data)
1317                         return 0;
1318 
1319         list_add_tail(&data->owner_node, &owner->controllers);
1320         data->master = dev;
1321 
1322         return 0;
1323 }
1324 
1325 static const struct iommu_ops exynos_iommu_ops = {
1326         .domain_alloc = exynos_iommu_domain_alloc,
1327         .domain_free = exynos_iommu_domain_free,
1328         .attach_dev = exynos_iommu_attach_device,
1329         .detach_dev = exynos_iommu_detach_device,
1330         .map = exynos_iommu_map,
1331         .unmap = exynos_iommu_unmap,
1332         .iova_to_phys = exynos_iommu_iova_to_phys,
1333         .device_group = generic_device_group,
1334         .add_device = exynos_iommu_add_device,
1335         .remove_device = exynos_iommu_remove_device,
1336         .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1337         .of_xlate = exynos_iommu_of_xlate,
1338 };
1339 
1340 static int __init exynos_iommu_init(void)
1341 {
1342         struct device_node *np;
1343         int ret;
1344 
1345         np = of_find_matching_node(NULL, sysmmu_of_match);
1346         if (!np)
1347                 return 0;
1348 
1349         of_node_put(np);
1350 
1351         lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1352                                 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1353         if (!lv2table_kmem_cache) {
1354                 pr_err("%s: Failed to create kmem cache\n", __func__);
1355                 return -ENOMEM;
1356         }
1357 
1358         ret = platform_driver_register(&exynos_sysmmu_driver);
1359         if (ret) {
1360                 pr_err("%s: Failed to register driver\n", __func__);
1361                 goto err_reg_driver;
1362         }
1363 
1364         zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1365         if (zero_lv2_table == NULL) {
1366                 pr_err("%s: Failed to allocate zero level2 page table\n",
1367                         __func__);
1368                 ret = -ENOMEM;
1369                 goto err_zero_lv2;
1370         }
1371 
1372         ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1373         if (ret) {
1374                 pr_err("%s: Failed to register exynos-iommu driver.\n",
1375                                                                 __func__);
1376                 goto err_set_iommu;
1377         }
1378 
1379         return 0;
1380 err_set_iommu:
1381         kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1382 err_zero_lv2:
1383         platform_driver_unregister(&exynos_sysmmu_driver);
1384 err_reg_driver:
1385         kmem_cache_destroy(lv2table_kmem_cache);
1386         return ret;
1387 }
1388 core_initcall(exynos_iommu_init);