This source file includes following definitions.
- mk_pmb_entry
- mk_pmb_addr
- mk_pmb_data
- pmb_ppn_in_range
- pmb_cache_flags
- pgprot_to_pmb_flags
- pmb_can_merge
- pmb_mapping_exists
- pmb_size_valid
- pmb_addr_valid
- pmb_prot_valid
- pmb_size_to_flags
- pmb_alloc_entry
- pmb_alloc
- pmb_free
- __set_pmb_entry
- __clear_pmb_entry
- set_pmb_entry
- pmb_bolt_mapping
- pmb_remap_caller
- pmb_unmap
- __pmb_unmap_entry
- pmb_unmap_entry
- pmb_notify
- pmb_synchronize
- pmb_merge
- pmb_coalesce
- pmb_resize
- early_pmb
- pmb_init
- __in_29bit_mode
- pmb_seq_show
- pmb_debugfs_open
- pmb_debugfs_init
- pmb_syscore_resume
- pmb_sysdev_init
   1 
   2 
   3 
   4 
   5 
   6 
   7 
   8 
   9 
  10 
  11 
  12 
  13 #include <linux/init.h>
  14 #include <linux/kernel.h>
  15 #include <linux/syscore_ops.h>
  16 #include <linux/cpu.h>
  17 #include <linux/module.h>
  18 #include <linux/bitops.h>
  19 #include <linux/debugfs.h>
  20 #include <linux/fs.h>
  21 #include <linux/seq_file.h>
  22 #include <linux/err.h>
  23 #include <linux/io.h>
  24 #include <linux/spinlock.h>
  25 #include <linux/vmalloc.h>
  26 #include <asm/cacheflush.h>
  27 #include <linux/sizes.h>
  28 #include <linux/uaccess.h>
  29 #include <asm/pgtable.h>
  30 #include <asm/page.h>
  31 #include <asm/mmu.h>
  32 #include <asm/mmu_context.h>
  33 
  34 struct pmb_entry;
  35 
  36 struct pmb_entry {
  37         unsigned long vpn;
  38         unsigned long ppn;
  39         unsigned long flags;
  40         unsigned long size;
  41 
  42         raw_spinlock_t lock;
  43 
  44         
  45 
  46 
  47 
  48         int entry;
  49 
  50         
  51         struct pmb_entry *link;
  52 };
  53 
  54 static struct {
  55         unsigned long size;
  56         int flag;
  57 } pmb_sizes[] = {
  58         { .size = SZ_512M, .flag = PMB_SZ_512M, },
  59         { .size = SZ_128M, .flag = PMB_SZ_128M, },
  60         { .size = SZ_64M,  .flag = PMB_SZ_64M,  },
  61         { .size = SZ_16M,  .flag = PMB_SZ_16M,  },
  62 };
  63 
  64 static void pmb_unmap_entry(struct pmb_entry *, int depth);
  65 
  66 static DEFINE_RWLOCK(pmb_rwlock);
  67 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
  68 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
  69 
  70 static unsigned int pmb_iomapping_enabled;
  71 
  72 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
  73 {
  74         return (entry & PMB_E_MASK) << PMB_E_SHIFT;
  75 }
  76 
  77 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
  78 {
  79         return mk_pmb_entry(entry) | PMB_ADDR;
  80 }
  81 
  82 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
  83 {
  84         return mk_pmb_entry(entry) | PMB_DATA;
  85 }
  86 
  87 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
  88 {
  89         return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
  90 }
  91 
  92 
  93 
  94 
  95 
  96 
  97 
  98 
  99 static __always_inline unsigned long pmb_cache_flags(void)
 100 {
 101         unsigned long flags = 0;
 102 
 103 #if defined(CONFIG_CACHE_OFF)
 104         flags |= PMB_WT | PMB_UB;
 105 #elif defined(CONFIG_CACHE_WRITETHROUGH)
 106         flags |= PMB_C | PMB_WT | PMB_UB;
 107 #elif defined(CONFIG_CACHE_WRITEBACK)
 108         flags |= PMB_C;
 109 #endif
 110 
 111         return flags;
 112 }
 113 
 114 
 115 
 116 
 117 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
 118 {
 119         unsigned long pmb_flags = 0;
 120         u64 flags = pgprot_val(prot);
 121 
 122         if (flags & _PAGE_CACHABLE)
 123                 pmb_flags |= PMB_C;
 124         if (flags & _PAGE_WT)
 125                 pmb_flags |= PMB_WT | PMB_UB;
 126 
 127         return pmb_flags;
 128 }
 129 
 130 static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
 131 {
 132         return (b->vpn == (a->vpn + a->size)) &&
 133                (b->ppn == (a->ppn + a->size)) &&
 134                (b->flags == a->flags);
 135 }
 136 
 137 static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
 138                                unsigned long size)
 139 {
 140         int i;
 141 
 142         read_lock(&pmb_rwlock);
 143 
 144         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 145                 struct pmb_entry *pmbe, *iter;
 146                 unsigned long span;
 147 
 148                 if (!test_bit(i, pmb_map))
 149                         continue;
 150 
 151                 pmbe = &pmb_entry_list[i];
 152 
 153                 
 154 
 155 
 156                 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
 157                         continue;
 158                 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
 159                         continue;
 160 
 161                 
 162 
 163 
 164                 if (size <= pmbe->size) {
 165                         read_unlock(&pmb_rwlock);
 166                         return true;
 167                 }
 168 
 169                 span = pmbe->size;
 170 
 171                 
 172 
 173 
 174 
 175                 for (iter = pmbe->link; iter; iter = iter->link)
 176                         span += iter->size;
 177 
 178                 
 179 
 180 
 181                 if (size <= span) {
 182                         read_unlock(&pmb_rwlock);
 183                         return true;
 184                 }
 185         }
 186 
 187         read_unlock(&pmb_rwlock);
 188         return false;
 189 }
 190 
 191 static bool pmb_size_valid(unsigned long size)
 192 {
 193         int i;
 194 
 195         for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 196                 if (pmb_sizes[i].size == size)
 197                         return true;
 198 
 199         return false;
 200 }
 201 
 202 static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
 203 {
 204         return (addr >= P1SEG && (addr + size - 1) < P3SEG);
 205 }
 206 
 207 static inline bool pmb_prot_valid(pgprot_t prot)
 208 {
 209         return (pgprot_val(prot) & _PAGE_USER) == 0;
 210 }
 211 
 212 static int pmb_size_to_flags(unsigned long size)
 213 {
 214         int i;
 215 
 216         for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 217                 if (pmb_sizes[i].size == size)
 218                         return pmb_sizes[i].flag;
 219 
 220         return 0;
 221 }
 222 
 223 static int pmb_alloc_entry(void)
 224 {
 225         int pos;
 226 
 227         pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
 228         if (pos >= 0 && pos < NR_PMB_ENTRIES)
 229                 __set_bit(pos, pmb_map);
 230         else
 231                 pos = -ENOSPC;
 232 
 233         return pos;
 234 }
 235 
 236 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
 237                                    unsigned long flags, int entry)
 238 {
 239         struct pmb_entry *pmbe;
 240         unsigned long irqflags;
 241         void *ret = NULL;
 242         int pos;
 243 
 244         write_lock_irqsave(&pmb_rwlock, irqflags);
 245 
 246         if (entry == PMB_NO_ENTRY) {
 247                 pos = pmb_alloc_entry();
 248                 if (unlikely(pos < 0)) {
 249                         ret = ERR_PTR(pos);
 250                         goto out;
 251                 }
 252         } else {
 253                 if (__test_and_set_bit(entry, pmb_map)) {
 254                         ret = ERR_PTR(-ENOSPC);
 255                         goto out;
 256                 }
 257 
 258                 pos = entry;
 259         }
 260 
 261         write_unlock_irqrestore(&pmb_rwlock, irqflags);
 262 
 263         pmbe = &pmb_entry_list[pos];
 264 
 265         memset(pmbe, 0, sizeof(struct pmb_entry));
 266 
 267         raw_spin_lock_init(&pmbe->lock);
 268 
 269         pmbe->vpn       = vpn;
 270         pmbe->ppn       = ppn;
 271         pmbe->flags     = flags;
 272         pmbe->entry     = pos;
 273 
 274         return pmbe;
 275 
 276 out:
 277         write_unlock_irqrestore(&pmb_rwlock, irqflags);
 278         return ret;
 279 }
 280 
 281 static void pmb_free(struct pmb_entry *pmbe)
 282 {
 283         __clear_bit(pmbe->entry, pmb_map);
 284 
 285         pmbe->entry     = PMB_NO_ENTRY;
 286         pmbe->link      = NULL;
 287 }
 288 
 289 
 290 
 291 
 292 static void __set_pmb_entry(struct pmb_entry *pmbe)
 293 {
 294         unsigned long addr, data;
 295 
 296         addr = mk_pmb_addr(pmbe->entry);
 297         data = mk_pmb_data(pmbe->entry);
 298 
 299         jump_to_uncached();
 300 
 301         
 302         __raw_writel(pmbe->vpn | PMB_V, addr);
 303         __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
 304 
 305         back_to_cached();
 306 }
 307 
 308 static void __clear_pmb_entry(struct pmb_entry *pmbe)
 309 {
 310         unsigned long addr, data;
 311         unsigned long addr_val, data_val;
 312 
 313         addr = mk_pmb_addr(pmbe->entry);
 314         data = mk_pmb_data(pmbe->entry);
 315 
 316         addr_val = __raw_readl(addr);
 317         data_val = __raw_readl(data);
 318 
 319         
 320         writel_uncached(addr_val & ~PMB_V, addr);
 321         writel_uncached(data_val & ~PMB_V, data);
 322 }
 323 
 324 #ifdef CONFIG_PM
 325 static void set_pmb_entry(struct pmb_entry *pmbe)
 326 {
 327         unsigned long flags;
 328 
 329         raw_spin_lock_irqsave(&pmbe->lock, flags);
 330         __set_pmb_entry(pmbe);
 331         raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 332 }
 333 #endif 
 334 
 335 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
 336                      unsigned long size, pgprot_t prot)
 337 {
 338         struct pmb_entry *pmbp, *pmbe;
 339         unsigned long orig_addr, orig_size;
 340         unsigned long flags, pmb_flags;
 341         int i, mapped;
 342 
 343         if (size < SZ_16M)
 344                 return -EINVAL;
 345         if (!pmb_addr_valid(vaddr, size))
 346                 return -EFAULT;
 347         if (pmb_mapping_exists(vaddr, phys, size))
 348                 return 0;
 349 
 350         orig_addr = vaddr;
 351         orig_size = size;
 352 
 353         flush_tlb_kernel_range(vaddr, vaddr + size);
 354 
 355         pmb_flags = pgprot_to_pmb_flags(prot);
 356         pmbp = NULL;
 357 
 358         do {
 359                 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
 360                         if (size < pmb_sizes[i].size)
 361                                 continue;
 362 
 363                         pmbe = pmb_alloc(vaddr, phys, pmb_flags |
 364                                          pmb_sizes[i].flag, PMB_NO_ENTRY);
 365                         if (IS_ERR(pmbe)) {
 366                                 pmb_unmap_entry(pmbp, mapped);
 367                                 return PTR_ERR(pmbe);
 368                         }
 369 
 370                         raw_spin_lock_irqsave(&pmbe->lock, flags);
 371 
 372                         pmbe->size = pmb_sizes[i].size;
 373 
 374                         __set_pmb_entry(pmbe);
 375 
 376                         phys    += pmbe->size;
 377                         vaddr   += pmbe->size;
 378                         size    -= pmbe->size;
 379 
 380                         
 381 
 382 
 383 
 384                         if (likely(pmbp)) {
 385                                 raw_spin_lock_nested(&pmbp->lock,
 386                                                      SINGLE_DEPTH_NESTING);
 387                                 pmbp->link = pmbe;
 388                                 raw_spin_unlock(&pmbp->lock);
 389                         }
 390 
 391                         pmbp = pmbe;
 392 
 393                         
 394 
 395 
 396 
 397 
 398                         i--;
 399                         mapped++;
 400 
 401                         raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 402                 }
 403         } while (size >= SZ_16M);
 404 
 405         flush_cache_vmap(orig_addr, orig_addr + orig_size);
 406 
 407         return 0;
 408 }
 409 
 410 void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
 411                                pgprot_t prot, void *caller)
 412 {
 413         unsigned long vaddr;
 414         phys_addr_t offset, last_addr;
 415         phys_addr_t align_mask;
 416         unsigned long aligned;
 417         struct vm_struct *area;
 418         int i, ret;
 419 
 420         if (!pmb_iomapping_enabled)
 421                 return NULL;
 422 
 423         
 424 
 425 
 426         if (size < SZ_16M)
 427                 return ERR_PTR(-EINVAL);
 428         if (!pmb_prot_valid(prot))
 429                 return ERR_PTR(-EINVAL);
 430 
 431         for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
 432                 if (size >= pmb_sizes[i].size)
 433                         break;
 434 
 435         last_addr = phys + size;
 436         align_mask = ~(pmb_sizes[i].size - 1);
 437         offset = phys & ~align_mask;
 438         phys &= align_mask;
 439         aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
 440 
 441         
 442 
 443 
 444 
 445 
 446         area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
 447                                     P3SEG, caller);
 448         if (!area)
 449                 return NULL;
 450 
 451         area->phys_addr = phys;
 452         vaddr = (unsigned long)area->addr;
 453 
 454         ret = pmb_bolt_mapping(vaddr, phys, size, prot);
 455         if (unlikely(ret != 0))
 456                 return ERR_PTR(ret);
 457 
 458         return (void __iomem *)(offset + (char *)vaddr);
 459 }
 460 
 461 int pmb_unmap(void __iomem *addr)
 462 {
 463         struct pmb_entry *pmbe = NULL;
 464         unsigned long vaddr = (unsigned long __force)addr;
 465         int i, found = 0;
 466 
 467         read_lock(&pmb_rwlock);
 468 
 469         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 470                 if (test_bit(i, pmb_map)) {
 471                         pmbe = &pmb_entry_list[i];
 472                         if (pmbe->vpn == vaddr) {
 473                                 found = 1;
 474                                 break;
 475                         }
 476                 }
 477         }
 478 
 479         read_unlock(&pmb_rwlock);
 480 
 481         if (found) {
 482                 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
 483                 return 0;
 484         }
 485 
 486         return -EINVAL;
 487 }
 488 
 489 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
 490 {
 491         do {
 492                 struct pmb_entry *pmblink = pmbe;
 493 
 494                 
 495 
 496 
 497 
 498 
 499 
 500 
 501 
 502 
 503 
 504                 __clear_pmb_entry(pmbe);
 505 
 506                 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
 507 
 508                 pmbe = pmblink->link;
 509 
 510                 pmb_free(pmblink);
 511         } while (pmbe && --depth);
 512 }
 513 
 514 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
 515 {
 516         unsigned long flags;
 517 
 518         if (unlikely(!pmbe))
 519                 return;
 520 
 521         write_lock_irqsave(&pmb_rwlock, flags);
 522         __pmb_unmap_entry(pmbe, depth);
 523         write_unlock_irqrestore(&pmb_rwlock, flags);
 524 }
 525 
 526 static void __init pmb_notify(void)
 527 {
 528         int i;
 529 
 530         pr_info("PMB: boot mappings:\n");
 531 
 532         read_lock(&pmb_rwlock);
 533 
 534         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 535                 struct pmb_entry *pmbe;
 536 
 537                 if (!test_bit(i, pmb_map))
 538                         continue;
 539 
 540                 pmbe = &pmb_entry_list[i];
 541 
 542                 pr_info("       0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
 543                         pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
 544                         pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
 545         }
 546 
 547         read_unlock(&pmb_rwlock);
 548 }
 549 
 550 
 551 
 552 
 553 
 554 
 555 static void __init pmb_synchronize(void)
 556 {
 557         struct pmb_entry *pmbp = NULL;
 558         int i, j;
 559 
 560         
 561 
 562 
 563 
 564 
 565 
 566 
 567 
 568 
 569 
 570 
 571 
 572 
 573 
 574 
 575 
 576         for (i = 0; i < NR_PMB_ENTRIES; i++) {
 577                 unsigned long addr, data;
 578                 unsigned long addr_val, data_val;
 579                 unsigned long ppn, vpn, flags;
 580                 unsigned long irqflags;
 581                 unsigned int size;
 582                 struct pmb_entry *pmbe;
 583 
 584                 addr = mk_pmb_addr(i);
 585                 data = mk_pmb_data(i);
 586 
 587                 addr_val = __raw_readl(addr);
 588                 data_val = __raw_readl(data);
 589 
 590                 
 591 
 592 
 593                 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
 594                         continue;
 595 
 596                 ppn = data_val & PMB_PFN_MASK;
 597                 vpn = addr_val & PMB_PFN_MASK;
 598 
 599                 
 600 
 601 
 602                 if (!pmb_ppn_in_range(ppn)) {
 603                         
 604 
 605 
 606                         writel_uncached(addr_val & ~PMB_V, addr);
 607                         writel_uncached(data_val & ~PMB_V, data);
 608                         continue;
 609                 }
 610 
 611                 
 612 
 613 
 614                 if (data_val & PMB_C) {
 615                         data_val &= ~PMB_CACHE_MASK;
 616                         data_val |= pmb_cache_flags();
 617 
 618                         writel_uncached(data_val, data);
 619                 }
 620 
 621                 size = data_val & PMB_SZ_MASK;
 622                 flags = size | (data_val & PMB_CACHE_MASK);
 623 
 624                 pmbe = pmb_alloc(vpn, ppn, flags, i);
 625                 if (IS_ERR(pmbe)) {
 626                         WARN_ON_ONCE(1);
 627                         continue;
 628                 }
 629 
 630                 raw_spin_lock_irqsave(&pmbe->lock, irqflags);
 631 
 632                 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
 633                         if (pmb_sizes[j].flag == size)
 634                                 pmbe->size = pmb_sizes[j].size;
 635 
 636                 if (pmbp) {
 637                         raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
 638                         
 639 
 640 
 641 
 642 
 643 
 644                         if (pmb_can_merge(pmbp, pmbe))
 645                                 pmbp->link = pmbe;
 646                         raw_spin_unlock(&pmbp->lock);
 647                 }
 648 
 649                 pmbp = pmbe;
 650 
 651                 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
 652         }
 653 }
 654 
 655 static void __init pmb_merge(struct pmb_entry *head)
 656 {
 657         unsigned long span, newsize;
 658         struct pmb_entry *tail;
 659         int i = 1, depth = 0;
 660 
 661         span = newsize = head->size;
 662 
 663         tail = head->link;
 664         while (tail) {
 665                 span += tail->size;
 666 
 667                 if (pmb_size_valid(span)) {
 668                         newsize = span;
 669                         depth = i;
 670                 }
 671 
 672                 
 673                 if (!tail->link)
 674                         break;
 675 
 676                 tail = tail->link;
 677                 i++;
 678         }
 679 
 680         
 681 
 682 
 683         if (!depth || !pmb_size_valid(newsize))
 684                 return;
 685 
 686         head->flags &= ~PMB_SZ_MASK;
 687         head->flags |= pmb_size_to_flags(newsize);
 688 
 689         head->size = newsize;
 690 
 691         __pmb_unmap_entry(head->link, depth);
 692         __set_pmb_entry(head);
 693 }
 694 
 695 static void __init pmb_coalesce(void)
 696 {
 697         unsigned long flags;
 698         int i;
 699 
 700         write_lock_irqsave(&pmb_rwlock, flags);
 701 
 702         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 703                 struct pmb_entry *pmbe;
 704 
 705                 if (!test_bit(i, pmb_map))
 706                         continue;
 707 
 708                 pmbe = &pmb_entry_list[i];
 709 
 710                 
 711 
 712 
 713                 if (!pmbe->link)
 714                         continue;
 715 
 716                 
 717 
 718 
 719 
 720                 if (pmbe->size == SZ_512M)
 721                         continue;
 722 
 723                 pmb_merge(pmbe);
 724         }
 725 
 726         write_unlock_irqrestore(&pmb_rwlock, flags);
 727 }
 728 
 729 #ifdef CONFIG_UNCACHED_MAPPING
 730 static void __init pmb_resize(void)
 731 {
 732         int i;
 733 
 734         
 735 
 736 
 737 
 738         if (uncached_size == SZ_16M)
 739                 return;
 740 
 741         read_lock(&pmb_rwlock);
 742 
 743         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 744                 struct pmb_entry *pmbe;
 745                 unsigned long flags;
 746 
 747                 if (!test_bit(i, pmb_map))
 748                         continue;
 749 
 750                 pmbe = &pmb_entry_list[i];
 751 
 752                 if (pmbe->vpn != uncached_start)
 753                         continue;
 754 
 755                 
 756 
 757 
 758                 raw_spin_lock_irqsave(&pmbe->lock, flags);
 759 
 760                 pmbe->size = SZ_16M;
 761                 pmbe->flags &= ~PMB_SZ_MASK;
 762                 pmbe->flags |= pmb_size_to_flags(pmbe->size);
 763 
 764                 uncached_resize(pmbe->size);
 765 
 766                 __set_pmb_entry(pmbe);
 767 
 768                 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
 769         }
 770 
 771         read_unlock(&pmb_rwlock);
 772 }
 773 #endif
 774 
 775 static int __init early_pmb(char *p)
 776 {
 777         if (!p)
 778                 return 0;
 779 
 780         if (strstr(p, "iomap"))
 781                 pmb_iomapping_enabled = 1;
 782 
 783         return 0;
 784 }
 785 early_param("pmb", early_pmb);
 786 
 787 void __init pmb_init(void)
 788 {
 789         
 790         pmb_synchronize();
 791 
 792         
 793         pmb_coalesce();
 794 
 795 #ifdef CONFIG_UNCACHED_MAPPING
 796         
 797         pmb_resize();
 798 #endif
 799 
 800         
 801         pmb_notify();
 802 
 803         writel_uncached(0, PMB_IRMCR);
 804 
 805         
 806         local_flush_tlb_all();
 807         ctrl_barrier();
 808 }
 809 
 810 bool __in_29bit_mode(void)
 811 {
 812         return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
 813 }
 814 
 815 static int pmb_seq_show(struct seq_file *file, void *iter)
 816 {
 817         int i;
 818 
 819         seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
 820                          "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
 821         seq_printf(file, "ety   vpn  ppn  size   flags\n");
 822 
 823         for (i = 0; i < NR_PMB_ENTRIES; i++) {
 824                 unsigned long addr, data;
 825                 unsigned int size;
 826                 char *sz_str = NULL;
 827 
 828                 addr = __raw_readl(mk_pmb_addr(i));
 829                 data = __raw_readl(mk_pmb_data(i));
 830 
 831                 size = data & PMB_SZ_MASK;
 832                 sz_str = (size == PMB_SZ_16M)  ? " 16MB":
 833                          (size == PMB_SZ_64M)  ? " 64MB":
 834                          (size == PMB_SZ_128M) ? "128MB":
 835                                                  "512MB";
 836 
 837                 
 838                 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
 839                            i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
 840                            (addr >> 24) & 0xff, (data >> 24) & 0xff,
 841                            sz_str, (data & PMB_C) ? 'C' : ' ',
 842                            (data & PMB_WT) ? "WT" : "CB",
 843                            (data & PMB_UB) ? "UB" : " B");
 844         }
 845 
 846         return 0;
 847 }
 848 
 849 static int pmb_debugfs_open(struct inode *inode, struct file *file)
 850 {
 851         return single_open(file, pmb_seq_show, NULL);
 852 }
 853 
 854 static const struct file_operations pmb_debugfs_fops = {
 855         .owner          = THIS_MODULE,
 856         .open           = pmb_debugfs_open,
 857         .read           = seq_read,
 858         .llseek         = seq_lseek,
 859         .release        = single_release,
 860 };
 861 
 862 static int __init pmb_debugfs_init(void)
 863 {
 864         debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL,
 865                             &pmb_debugfs_fops);
 866         return 0;
 867 }
 868 subsys_initcall(pmb_debugfs_init);
 869 
 870 #ifdef CONFIG_PM
 871 static void pmb_syscore_resume(void)
 872 {
 873         struct pmb_entry *pmbe;
 874         int i;
 875 
 876         read_lock(&pmb_rwlock);
 877 
 878         for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
 879                 if (test_bit(i, pmb_map)) {
 880                         pmbe = &pmb_entry_list[i];
 881                         set_pmb_entry(pmbe);
 882                 }
 883         }
 884 
 885         read_unlock(&pmb_rwlock);
 886 }
 887 
 888 static struct syscore_ops pmb_syscore_ops = {
 889         .resume = pmb_syscore_resume,
 890 };
 891 
 892 static int __init pmb_sysdev_init(void)
 893 {
 894         register_syscore_ops(&pmb_syscore_ops);
 895         return 0;
 896 }
 897 subsys_initcall(pmb_sysdev_init);
 898 #endif