pmbe              145 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe, *iter;
pmbe              151 arch/sh/mm/pmb.c 		pmbe = &pmb_entry_list[i];
pmbe              156 arch/sh/mm/pmb.c 		if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
pmbe              158 arch/sh/mm/pmb.c 		if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
pmbe              164 arch/sh/mm/pmb.c 		if (size <= pmbe->size) {
pmbe              169 arch/sh/mm/pmb.c 		span = pmbe->size;
pmbe              175 arch/sh/mm/pmb.c 		for (iter = pmbe->link; iter; iter = iter->link)
pmbe              239 arch/sh/mm/pmb.c 	struct pmb_entry *pmbe;
pmbe              263 arch/sh/mm/pmb.c 	pmbe = &pmb_entry_list[pos];
pmbe              265 arch/sh/mm/pmb.c 	memset(pmbe, 0, sizeof(struct pmb_entry));
pmbe              267 arch/sh/mm/pmb.c 	raw_spin_lock_init(&pmbe->lock);
pmbe              269 arch/sh/mm/pmb.c 	pmbe->vpn	= vpn;
pmbe              270 arch/sh/mm/pmb.c 	pmbe->ppn	= ppn;
pmbe              271 arch/sh/mm/pmb.c 	pmbe->flags	= flags;
pmbe              272 arch/sh/mm/pmb.c 	pmbe->entry	= pos;
pmbe              274 arch/sh/mm/pmb.c 	return pmbe;
pmbe              281 arch/sh/mm/pmb.c static void pmb_free(struct pmb_entry *pmbe)
pmbe              283 arch/sh/mm/pmb.c 	__clear_bit(pmbe->entry, pmb_map);
pmbe              285 arch/sh/mm/pmb.c 	pmbe->entry	= PMB_NO_ENTRY;
pmbe              286 arch/sh/mm/pmb.c 	pmbe->link	= NULL;
pmbe              292 arch/sh/mm/pmb.c static void __set_pmb_entry(struct pmb_entry *pmbe)
pmbe              296 arch/sh/mm/pmb.c 	addr = mk_pmb_addr(pmbe->entry);
pmbe              297 arch/sh/mm/pmb.c 	data = mk_pmb_data(pmbe->entry);
pmbe              302 arch/sh/mm/pmb.c 	__raw_writel(pmbe->vpn | PMB_V, addr);
pmbe              303 arch/sh/mm/pmb.c 	__raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
pmbe              308 arch/sh/mm/pmb.c static void __clear_pmb_entry(struct pmb_entry *pmbe)
pmbe              313 arch/sh/mm/pmb.c 	addr = mk_pmb_addr(pmbe->entry);
pmbe              314 arch/sh/mm/pmb.c 	data = mk_pmb_data(pmbe->entry);
pmbe              325 arch/sh/mm/pmb.c static void set_pmb_entry(struct pmb_entry *pmbe)
pmbe              329 arch/sh/mm/pmb.c 	raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe              330 arch/sh/mm/pmb.c 	__set_pmb_entry(pmbe);
pmbe              331 arch/sh/mm/pmb.c 	raw_spin_unlock_irqrestore(&pmbe->lock, flags);
pmbe              338 arch/sh/mm/pmb.c 	struct pmb_entry *pmbp, *pmbe;
pmbe              363 arch/sh/mm/pmb.c 			pmbe = pmb_alloc(vaddr, phys, pmb_flags |
pmbe              365 arch/sh/mm/pmb.c 			if (IS_ERR(pmbe)) {
pmbe              367 arch/sh/mm/pmb.c 				return PTR_ERR(pmbe);
pmbe              370 arch/sh/mm/pmb.c 			raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe              372 arch/sh/mm/pmb.c 			pmbe->size = pmb_sizes[i].size;
pmbe              374 arch/sh/mm/pmb.c 			__set_pmb_entry(pmbe);
pmbe              376 arch/sh/mm/pmb.c 			phys	+= pmbe->size;
pmbe              377 arch/sh/mm/pmb.c 			vaddr	+= pmbe->size;
pmbe              378 arch/sh/mm/pmb.c 			size	-= pmbe->size;
pmbe              387 arch/sh/mm/pmb.c 				pmbp->link = pmbe;
pmbe              391 arch/sh/mm/pmb.c 			pmbp = pmbe;
pmbe              401 arch/sh/mm/pmb.c 			raw_spin_unlock_irqrestore(&pmbe->lock, flags);
pmbe              463 arch/sh/mm/pmb.c 	struct pmb_entry *pmbe = NULL;
pmbe              471 arch/sh/mm/pmb.c 			pmbe = &pmb_entry_list[i];
pmbe              472 arch/sh/mm/pmb.c 			if (pmbe->vpn == vaddr) {
pmbe              482 arch/sh/mm/pmb.c 		pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
pmbe              489 arch/sh/mm/pmb.c static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
pmbe              492 arch/sh/mm/pmb.c 		struct pmb_entry *pmblink = pmbe;
pmbe              504 arch/sh/mm/pmb.c 		__clear_pmb_entry(pmbe);
pmbe              506 arch/sh/mm/pmb.c 		flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
pmbe              508 arch/sh/mm/pmb.c 		pmbe = pmblink->link;
pmbe              511 arch/sh/mm/pmb.c 	} while (pmbe && --depth);
pmbe              514 arch/sh/mm/pmb.c static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
pmbe              518 arch/sh/mm/pmb.c 	if (unlikely(!pmbe))
pmbe              522 arch/sh/mm/pmb.c 	__pmb_unmap_entry(pmbe, depth);
pmbe              535 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe;
pmbe              540 arch/sh/mm/pmb.c 		pmbe = &pmb_entry_list[i];
pmbe              543 arch/sh/mm/pmb.c 			pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
pmbe              544 arch/sh/mm/pmb.c 			pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
pmbe              582 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe;
pmbe              624 arch/sh/mm/pmb.c 		pmbe = pmb_alloc(vpn, ppn, flags, i);
pmbe              625 arch/sh/mm/pmb.c 		if (IS_ERR(pmbe)) {
pmbe              630 arch/sh/mm/pmb.c 		raw_spin_lock_irqsave(&pmbe->lock, irqflags);
pmbe              634 arch/sh/mm/pmb.c 				pmbe->size = pmb_sizes[j].size;
pmbe              644 arch/sh/mm/pmb.c 			if (pmb_can_merge(pmbp, pmbe))
pmbe              645 arch/sh/mm/pmb.c 				pmbp->link = pmbe;
pmbe              649 arch/sh/mm/pmb.c 		pmbp = pmbe;
pmbe              651 arch/sh/mm/pmb.c 		raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
pmbe              703 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe;
pmbe              708 arch/sh/mm/pmb.c 		pmbe = &pmb_entry_list[i];
pmbe              713 arch/sh/mm/pmb.c 		if (!pmbe->link)
pmbe              720 arch/sh/mm/pmb.c 		if (pmbe->size == SZ_512M)
pmbe              723 arch/sh/mm/pmb.c 		pmb_merge(pmbe);
pmbe              744 arch/sh/mm/pmb.c 		struct pmb_entry *pmbe;
pmbe              750 arch/sh/mm/pmb.c 		pmbe = &pmb_entry_list[i];
pmbe              752 arch/sh/mm/pmb.c 		if (pmbe->vpn != uncached_start)
pmbe              758 arch/sh/mm/pmb.c 		raw_spin_lock_irqsave(&pmbe->lock, flags);
pmbe              760 arch/sh/mm/pmb.c 		pmbe->size = SZ_16M;
pmbe              761 arch/sh/mm/pmb.c 		pmbe->flags &= ~PMB_SZ_MASK;
pmbe              762 arch/sh/mm/pmb.c 		pmbe->flags |= pmb_size_to_flags(pmbe->size);
pmbe              764 arch/sh/mm/pmb.c 		uncached_resize(pmbe->size);
pmbe              766 arch/sh/mm/pmb.c 		__set_pmb_entry(pmbe);
pmbe              768 arch/sh/mm/pmb.c 		raw_spin_unlock_irqrestore(&pmbe->lock, flags);
pmbe              873 arch/sh/mm/pmb.c 	struct pmb_entry *pmbe;
pmbe              880 arch/sh/mm/pmb.c 			pmbe = &pmb_entry_list[i];
pmbe              881 arch/sh/mm/pmb.c 			set_pmb_entry(pmbe);