/linux-4.4.14/arch/powerpc/kvm/ |
D | book3s_32_mmu.c | 81 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, 87 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument 89 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr() 92 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_32_ea_to_vp() argument 98 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) in kvmppc_mmu_book3s_32_ea_to_vp() 101 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp() 102 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp() 111 u32 sre, gva_t eaddr, in kvmppc_mmu_book3s_32_get_pteg() argument 118 page = (eaddr & 0x0FFFFFFF) >> 12; in kvmppc_mmu_book3s_32_get_pteg() 129 kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, in kvmppc_mmu_book3s_32_get_pteg() [all …]
|
D | book3s_64_mmu.c | 46 gva_t eaddr) in kvmppc_mmu_book3s_64_find_slbe() argument 49 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 50 u64 esid_1t = GET_ESID_1T(eaddr); in kvmppc_mmu_book3s_64_find_slbe() 66 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe() 90 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) in kvmppc_slb_calc_vpn() argument 92 eaddr &= kvmppc_slb_offset_mask(slb); in kvmppc_slb_calc_vpn() 94 return (eaddr >> VPN_SHIFT) | in kvmppc_slb_calc_vpn() 98 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_ea_to_vp() argument 103 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp() 107 return kvmppc_slb_calc_vpn(slb, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp() [all …]
|
D | trace_pr.h | 40 __field( unsigned long, eaddr ) 50 __entry->eaddr = orig_pte->eaddr; 58 __entry->flag_w, __entry->flag_x, __entry->eaddr, 71 __field( ulong, eaddr ) 80 __entry->eaddr = pte->pte.eaddr; 89 __entry->host_vpn, __entry->pfn, __entry->eaddr, 100 __field( ulong, eaddr ) 109 __entry->eaddr = pte->pte.eaddr; 118 __entry->host_vpn, __entry->pfn, __entry->eaddr,
|
D | book3s_32_mmu_host.c | 71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); in kvmppc_mmu_invalidate_pte() 118 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument 124 page = (eaddr & ~ESID_MASK) >> 12; in kvmppc_mmu_get_pteg() 150 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() local 170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 173 kvmppc_mmu_map_segment(vcpu, eaddr); in kvmppc_mmu_map_page() 180 ((eaddr & ~ESID_MASK) >> VPN_SHIFT); in kvmppc_mmu_map_page() 188 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page() 206 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page() 255 orig_pte->eaddr, (ulong)pteg, vpn, in kvmppc_mmu_map_page() [all …]
|
D | e500_mmu.c | 84 gva_t eaddr, int tlbsel, unsigned int pid, int as) in kvmppc_e500_tlb_index() argument 91 set_base = gtlb0_set_base(vcpu_e500, eaddr); in kvmppc_e500_tlb_index() 94 if (eaddr < vcpu_e500->tlb1_min_eaddr || in kvmppc_e500_tlb_index() 95 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index() 107 if (eaddr < get_tlb_eaddr(tlbe)) in kvmppc_e500_tlb_index() 110 if (eaddr > get_tlb_end(tlbe)) in kvmppc_e500_tlb_index() 130 gva_t eaddr, int as) in kvmppc_e500_deliver_tlb_miss() argument 146 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) in kvmppc_e500_deliver_tlb_miss() 158 gva_t eaddr; in kvmppc_recalc_tlb1map_range() local 172 eaddr = get_tlb_eaddr(tlbe); in kvmppc_recalc_tlb1map_range() [all …]
|
D | book3s_64_mmu_host.c | 118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 121 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 127 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 133 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 222 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page() 309 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument 312 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment() 313 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; in kvmppc_mmu_map_segment() 320 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
|
D | book3s_mmu_hpte.c | 37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) in kvmppc_mmu_hash_pte() argument 39 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); in kvmppc_mmu_hash_pte() 42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) in kvmppc_mmu_hash_pte_long() argument 44 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, in kvmppc_mmu_hash_pte_long() 77 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 81 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map() 174 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) in kvmppc_mmu_pte_flush_page() 194 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) in kvmppc_mmu_pte_flush_long()
|
D | e500_mmu_host.c | 107 static u32 get_host_mas0(unsigned long eaddr) in get_host_mas0() argument 117 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); in get_host_mas0() 588 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, in kvmppc_mmu_map() argument 608 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map() 615 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, in kvmppc_mmu_map() 633 hva_t eaddr; in kvmppc_load_last_inst() local 710 eaddr = (unsigned long)kmap_atomic(page); in kvmppc_load_last_inst() 711 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); in kvmppc_load_last_inst() 712 kunmap_atomic((u32 *)eaddr); in kvmppc_load_last_inst()
|
D | booke.c | 1227 unsigned long eaddr = vcpu->arch.fault_dear; in kvmppc_handle_exit() local 1234 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { in kvmppc_handle_exit() 1244 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1258 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit() 1268 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit() 1275 vcpu->arch.vaddr_accessed = eaddr; in kvmppc_handle_exit() 1285 unsigned long eaddr = vcpu->arch.pc; in kvmppc_handle_exit() local 1293 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); in kvmppc_handle_exit() 1306 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit() 1316 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit() [all …]
|
D | e500mc.c | 63 gva_t eaddr; in kvmppc_e500_tlbil_one() local 72 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
D | book3s.c | 395 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, in kvmppc_xlate() argument 404 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate() 406 pte->eaddr = eaddr; in kvmppc_xlate() 407 pte->raddr = eaddr & KVM_PAM; in kvmppc_xlate() 408 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate() 417 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) in kvmppc_xlate()
|
D | e500.c | 242 u32 val, eaddr; in kvmppc_e500_tlbil_one() local 274 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one() 279 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
|
D | powerpc.c | 312 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_st() argument 321 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_st() 326 *eaddr = pte.raddr; in kvmppc_st() 336 magic += pte.eaddr & 0xfff; in kvmppc_st() 348 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, in kvmppc_ld() argument 357 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, in kvmppc_ld() 362 *eaddr = pte.raddr; in kvmppc_ld() 375 magic += pte.eaddr & 0xfff; in kvmppc_ld()
|
D | book3s_64_mmu_hv.c | 279 gva_t eaddr) in kvmppc_mmu_book3s_hv_find_slbe() argument 293 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) in kvmppc_mmu_book3s_hv_find_slbe() 308 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_hv_xlate() argument 322 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_hv_xlate() 333 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, in kvmppc_mmu_book3s_64_hv_xlate() 346 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate() 347 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate() 369 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
|
D | book3s_pr.c | 531 ulong eaddr, int vec) in kvmppc_handle_pagefault() argument 550 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); in kvmppc_handle_pagefault() 555 pte.raddr = eaddr & KVM_PAM; in kvmppc_handle_pagefault() 556 pte.eaddr = eaddr; in kvmppc_handle_pagefault() 557 pte.vpage = eaddr >> 12; in kvmppc_handle_pagefault() 572 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_handle_pagefault() 638 vcpu->arch.vaddr_accessed = pte.eaddr; in kvmppc_handle_pagefault()
|
D | book3s_hv_rm_mmu.c | 835 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, in kvmppc_hv_find_lock_hpte() argument 864 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask; in kvmppc_hv_find_lock_hpte() 866 avpn |= (eaddr & somask) >> 16; in kvmppc_hv_find_lock_hpte()
|
D | book3s_paired_singles.c | 166 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument 174 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf()
|
/linux-4.4.14/arch/sh/mm/ |
D | cache-sh5.c | 34 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, in sh64_setup_dtlb_cache_slot() argument 38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); in sh64_setup_dtlb_cache_slot() 87 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument 95 addr = eaddr; in sh64_icache_inv_user_page() 159 unsigned long eaddr; in sh64_icache_inv_user_page_range() local 188 eaddr = aligned_start; in sh64_icache_inv_user_page_range() 189 while (eaddr < vma_end) { in sh64_icache_inv_user_page_range() 190 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range() 191 eaddr += PAGE_SIZE; in sh64_icache_inv_user_page_range() 244 unsigned long long eaddr, eaddr0, eaddr1; in sh64_dcache_purge_sets() local [all …]
|
D | tlb-sh5.c | 120 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, in sh64_setup_tlb_slot() argument 125 pteh = neff_sign_extend(eaddr); in sh64_setup_tlb_slot()
|
/linux-4.4.14/arch/blackfin/kernel/cplb-nompu/ |
D | cplbinit.c | 135 dcplb_bounds[i_d].eaddr = uncached_end; in generate_cplb_tables_all() 137 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1); in generate_cplb_tables_all() 141 dcplb_bounds[i_d].eaddr = _ramend; in generate_cplb_tables_all() 146 dcplb_bounds[i_d].eaddr = physical_mem_end; in generate_cplb_tables_all() 151 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE; in generate_cplb_tables_all() 154 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; in generate_cplb_tables_all() 157 dcplb_bounds[i_d].eaddr = BOOT_ROM_START; in generate_cplb_tables_all() 160 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH; in generate_cplb_tables_all() 164 dcplb_bounds[i_d].eaddr = L2_START; in generate_cplb_tables_all() 167 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH; in generate_cplb_tables_all() [all …]
|
D | cplbmgr.c | 101 unsigned long i_data, base, addr1, eaddr; in icplb_miss() local 110 eaddr = icplb_bounds[idx].eaddr; in icplb_miss() 111 if (addr < eaddr) in icplb_miss() 113 base = eaddr; in icplb_miss() 126 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { in icplb_miss() 148 unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags; in dcplb_miss() local 157 eaddr = dcplb_bounds[idx].eaddr; in dcplb_miss() 158 if (addr < eaddr) in dcplb_miss() 160 base = eaddr; in dcplb_miss() 184 if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) { in dcplb_miss()
|
/linux-4.4.14/arch/unicore32/mm/ |
D | alignment.c | 292 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 303 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 309 eaddr = newaddr; in do_alignment_ldmstm() 312 eaddr += 4; in do_alignment_ldmstm() 318 if (addr != eaddr) { in do_alignment_ldmstm() 321 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 335 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm() 338 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm() 339 eaddr += 4; in do_alignment_ldmstm()
|
/linux-4.4.14/arch/arm/mm/ |
D | alignment.c | 506 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local 520 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm() 526 eaddr = newaddr; in do_alignment_ldmstm() 529 eaddr += 4; in do_alignment_ldmstm() 543 if (addr != eaddr) { in do_alignment_ldmstm() 546 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm() 558 get32t_unaligned_check(val, eaddr); in do_alignment_ldmstm() 561 put32t_unaligned_check(regs->uregs[rd], eaddr); in do_alignment_ldmstm() 562 eaddr += 4; in do_alignment_ldmstm() 571 get32_unaligned_check(val, eaddr); in do_alignment_ldmstm() [all …]
|
/linux-4.4.14/fs/freevxfs/ |
D | vxfs_olt.c | 82 char *oaddr, *eaddr; in vxfs_read_olt() local 106 eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize); in vxfs_read_olt() 108 while (oaddr < eaddr) { in vxfs_read_olt()
|
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_vm.c | 1004 uint64_t eaddr; in amdgpu_vm_bo_map() local 1013 eaddr = saddr + size - 1; in amdgpu_vm_bo_map() 1014 if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) in amdgpu_vm_bo_map() 1017 last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_map() 1025 eaddr /= AMDGPU_GPU_PAGE_SIZE; in amdgpu_vm_bo_map() 1028 it = interval_tree_iter_first(&vm->va, saddr, eaddr); in amdgpu_vm_bo_map() 1035 "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr, in amdgpu_vm_bo_map() 1049 mapping->it.last = eaddr; in amdgpu_vm_bo_map() 1063 eaddr >>= amdgpu_vm_block_size; in amdgpu_vm_bo_map() 1065 BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); in amdgpu_vm_bo_map() [all …]
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | kvm_book3s.h | 127 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 128 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); 133 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, 148 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
|
D | kvm_ppc.h | 88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); 114 gva_t eaddr); 117 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
|
D | kvm_host.h | 339 ulong eaddr; member 358 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, 363 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
|
/linux-4.4.14/arch/blackfin/include/asm/ |
D | cplbinit.h | 27 unsigned long eaddr; /* End of this region. */ member
|
/linux-4.4.14/arch/sh/include/asm/ |
D | tlb_64.h | 59 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
|
/linux-4.4.14/drivers/net/ethernet/amd/ |
D | ni65.h | 91 unsigned char eaddr[6]; member
|
D | ni65.c | 580 p->ib.eaddr[i] = daddr[i]; in ni65_init_lance()
|
/linux-4.4.14/lib/ |
D | debugobjects.c | 668 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; in __debug_check_no_obj_freed() local 678 eaddr = saddr + size; in __debug_check_no_obj_freed() 680 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); in __debug_check_no_obj_freed() 692 if (oaddr < saddr || oaddr >= eaddr) in __debug_check_no_obj_freed()
|
/linux-4.4.14/arch/mips/txx9/generic/ |
D | setup.c | 378 unsigned long eaddr = __pa_symbol(&_text); in prom_free_prom_memory() local 380 if (saddr < eaddr) in prom_free_prom_memory() 381 free_init_pages("prom memory", saddr, eaddr); in prom_free_prom_memory()
|
/linux-4.4.14/drivers/net/ethernet/broadcom/ |
D | sb1250-mac.c | 2192 unsigned char *eaddr; in sbmac_init() local 2200 eaddr = sc->sbm_hwaddr; in sbmac_init() 2210 eaddr[i] = (uint8_t) (ea_reg & 0xFF); in sbmac_init() 2215 dev->dev_addr[i] = eaddr[i]; in sbmac_init() 2287 dev->name, base, eaddr); in sbmac_init()
|
/linux-4.4.14/tools/perf/util/ |
D | probe-finder.c | 600 Dwarf_Addr eaddr, highaddr; in convert_to_trace_point() local 605 if (dwarf_entrypc(sp_die, &eaddr) != 0) { in convert_to_trace_point() 630 eaddr = sym.st_value; in convert_to_trace_point() 632 tp->offset = (unsigned long)(paddr - eaddr); in convert_to_trace_point() 640 if (eaddr != paddr) { in convert_to_trace_point()
|
/linux-4.4.14/kernel/ |
D | kexec_core.c | 351 unsigned long pfn, epfn, addr, eaddr; in kimage_alloc_normal_control_pages() local 359 eaddr = epfn << PAGE_SHIFT; in kimage_alloc_normal_control_pages() 361 kimage_is_destination_range(image, addr, eaddr)) { in kimage_alloc_normal_control_pages()
|
/linux-4.4.14/drivers/staging/dgap/ |
D | dgap.c | 1820 struct ev_t __iomem *eaddr; in dgap_event() local 1839 eaddr = (struct ev_t __iomem *)(vaddr + EVBUF); in dgap_event() 1842 head = readw(&eaddr->ev_head); in dgap_event() 1843 tail = readw(&eaddr->ev_tail); in dgap_event() 1985 writew(tail, &eaddr->ev_tail); in dgap_event() 2015 struct ev_t __iomem *eaddr; in dgap_poll_tasklet() local 2029 eaddr = (struct ev_t __iomem *)(vaddr + EVBUF); in dgap_poll_tasklet() 2032 head = readw(&eaddr->ev_head); in dgap_poll_tasklet() 2033 tail = readw(&eaddr->ev_tail); in dgap_poll_tasklet()
|