Searched refs:walker (Results 1 - 42 of 42) sorted by relevance

/linux-4.4.14/arch/x86/kvm/
H A Dpaging_tmpl.h92 * table walker.
201 struct guest_walker *walker, update_accessed_dirty_bits()
214 for (level = walker->max_level; level >= walker->level; --level) { update_accessed_dirty_bits()
215 pte = orig_pte = walker->ptes[level - 1]; update_accessed_dirty_bits()
216 table_gfn = walker->table_gfn[level - 1]; update_accessed_dirty_bits()
217 ptep_user = walker->ptep_user[level - 1]; update_accessed_dirty_bits()
223 if (level == walker->level && write_fault && update_accessed_dirty_bits()
244 if (unlikely(!walker->pte_writable[level - 1])) update_accessed_dirty_bits()
252 walker->ptes[level - 1] = pte; update_accessed_dirty_bits()
260 static int FNAME(walk_addr_generic)(struct guest_walker *walker, walk_addr_generic() argument
280 walker->level = mmu->root_level; walk_addr_generic()
284 if (walker->level == PT32E_ROOT_LEVEL) { walk_addr_generic()
286 trace_kvm_mmu_paging_element(pte, walker->level); walk_addr_generic()
289 --walker->level; walk_addr_generic()
292 walker->max_level = walker->level; walk_addr_generic()
297 ++walker->level; walk_addr_generic()
304 --walker->level; walk_addr_generic()
306 index = PT_INDEX(addr, walker->level); walk_addr_generic()
311 walker->table_gfn[walker->level - 1] = table_gfn; walk_addr_generic()
312 walker->pte_gpa[walker->level - 1] = pte_gpa; walk_addr_generic()
316 &walker->fault); walk_addr_generic()
334 &walker->pte_writable[walker->level - 1]); walk_addr_generic()
341 walker->ptep_user[walker->level - 1] = ptep_user; walk_addr_generic()
343 trace_kvm_mmu_paging_element(pte, walker->level); walk_addr_generic()
348 if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) { walk_addr_generic()
356 walker->ptes[walker->level - 1] = pte; walk_addr_generic()
357 } while (!is_last_gpte(mmu, walker->level, pte)); walk_addr_generic()
364 gfn = gpte_to_gfn_lvl(pte, walker->level); walk_addr_generic()
365 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; walk_addr_generic()
367 if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36()) walk_addr_generic()
370 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); walk_addr_generic()
374 walker->gfn = real_gpa >> PAGE_SHIFT; walk_addr_generic()
388 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault); walk_addr_generic()
395 walker->pt_access = pt_access; walk_addr_generic()
396 walker->pte_access = pte_access; walk_addr_generic()
407 walker->fault.vector = PF_VECTOR; walk_addr_generic()
408 walker->fault.error_code_valid = true; walk_addr_generic()
409 walker->fault.error_code = errcode; walk_addr_generic()
429 walker->fault.address = addr; walk_addr_generic()
430 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; walk_addr_generic()
432 trace_kvm_mmu_walker_error(walker->fault.error_code); walk_addr_generic()
436 static int FNAME(walk_addr)(struct guest_walker *walker, walk_addr() argument
439 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr, walk_addr()
444 static int FNAME(walk_addr_nested)(struct guest_walker *walker, walk_addr_nested() argument
448 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, walk_addr_nested()
657 struct guest_walker *walker, int user_fault, is_self_change_mapping()
661 gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1); is_self_change_mapping()
664 if (!(walker->pte_access & ACC_WRITE_MASK || is_self_change_mapping()
668 for (level = walker->level; level <= walker->max_level; level++) { is_self_change_mapping()
669 gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1]; is_self_change_mapping()
697 struct guest_walker walker; page_fault() local
727 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); page_fault()
735 inject_page_fault(vcpu, &walker.fault); page_fault()
743 &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable); page_fault()
745 if (walker.level >= PT_DIRECTORY_LEVEL && !is_self_change_mapping) { page_fault()
746 level = mapping_level(vcpu, walker.gfn, &force_pt_level); page_fault()
748 level = min(walker.level, level); page_fault()
749 walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1); page_fault()
757 if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault, page_fault()
762 walker.gfn, pfn, walker.pte_access, &r)) page_fault()
769 if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) && page_fault()
772 walker.pte_access |= ACC_WRITE_MASK; page_fault()
773 walker.pte_access &= ~ACC_USER_MASK; page_fault()
782 walker.pte_access &= ~ACC_EXEC_MASK; page_fault()
792 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); page_fault()
793 r = FNAME(fetch)(vcpu, addr, &walker, write_fault, page_fault()
877 struct guest_walker walker; gva_to_gpa() local
881 r = FNAME(walk_addr)(&walker, vcpu, vaddr, access); gva_to_gpa()
884 gpa = gfn_to_gpa(walker.gfn); gva_to_gpa()
887 *exception = walker.fault; gva_to_gpa()
897 struct guest_walker walker; gva_to_gpa_nested() local
901 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); gva_to_gpa_nested()
904 gpa = gfn_to_gpa(walker.gfn); gva_to_gpa_nested()
907 *exception = walker.fault; gva_to_gpa_nested()
199 update_accessed_dirty_bits(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, struct guest_walker *walker, int write_fault) update_accessed_dirty_bits() argument
656 is_self_change_mapping(struct kvm_vcpu *vcpu, struct guest_walker *walker, int user_fault, bool *write_fault_to_shadow_pgtable) is_self_change_mapping() argument
/linux-4.4.14/drivers/pci/host/
H A Dpcie-hisi.c54 void *walker = &reg_val; hisi_pcie_cfg_read() local
56 walker += (where & 0x3); hisi_pcie_cfg_read()
61 *val = *(u8 __force *) walker; hisi_pcie_cfg_read()
63 *val = *(u16 __force *) walker; hisi_pcie_cfg_read()
79 void *walker = &reg_val; hisi_pcie_cfg_write() local
81 walker += (where & 0x3); hisi_pcie_cfg_write()
87 *(u16 __force *) walker = val; hisi_pcie_cfg_write()
91 *(u8 __force *) walker = val; hisi_pcie_cfg_write()
/linux-4.4.14/net/sched/
H A Dcls_tcindex.c194 struct tcf_walker *walker) tcindex_destroy_element()
453 static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker) tcindex_walk() argument
459 pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p); tcindex_walk()
464 if (walker->count >= walker->skip) { tcindex_walk()
465 if (walker->fn(tp, tcindex_walk()
466 (unsigned long) (p->perfect+i), walker) tcindex_walk()
468 walker->stop = 1; tcindex_walk()
472 walker->count++; tcindex_walk()
480 if (walker->count >= walker->skip) { tcindex_walk()
481 if (walker->fn(tp, (unsigned long) &f->result, tcindex_walk()
482 walker) < 0) { tcindex_walk()
483 walker->stop = 1; tcindex_walk()
487 walker->count++; tcindex_walk()
495 struct tcf_walker walker; tcindex_destroy() local
501 walker.count = 0; tcindex_destroy()
502 walker.skip = 0; tcindex_destroy()
503 walker.fn = tcindex_destroy_element; tcindex_destroy()
504 tcindex_walk(tp, &walker); tcindex_destroy()
192 tcindex_destroy_element(struct tcf_proto *tp, unsigned long arg, struct tcf_walker *walker) tcindex_destroy_element() argument
H A Dsch_red.c336 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) red_walk() argument
338 if (!walker->stop) { red_walk()
339 if (walker->count >= walker->skip) red_walk()
340 if (walker->fn(sch, 1, walker) < 0) { red_walk()
341 walker->stop = 1; red_walk()
344 walker->count++; red_walk()
H A Dsch_dsmark.c160 static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker) dsmark_walk() argument
165 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n", dsmark_walk()
166 __func__, sch, p, walker); dsmark_walk()
168 if (walker->stop) dsmark_walk()
174 if (walker->count >= walker->skip) { dsmark_walk()
175 if (walker->fn(sch, i + 1, walker) < 0) { dsmark_walk()
176 walker->stop = 1; dsmark_walk()
181 walker->count++; dsmark_walk()
H A Dsch_sfb.c641 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker) sfb_walk() argument
643 if (!walker->stop) { sfb_walk()
644 if (walker->count >= walker->skip) sfb_walk()
645 if (walker->fn(sch, 1, walker) < 0) { sfb_walk()
646 walker->stop = 1; sfb_walk()
649 walker->count++; sfb_walk()
H A Dsch_tbf.c527 static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker) tbf_walk() argument
529 if (!walker->stop) { tbf_walk()
530 if (walker->count >= walker->skip) tbf_walk()
531 if (walker->fn(sch, 1, walker) < 0) { tbf_walk()
532 walker->stop = 1; tbf_walk()
535 walker->count++; tbf_walk()
H A Dsch_atm.c330 static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker) atm_tc_walk() argument
335 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker); atm_tc_walk()
336 if (walker->stop) atm_tc_walk()
339 if (walker->count >= walker->skip && atm_tc_walk()
340 walker->fn(sch, (unsigned long)flow, walker) < 0) { atm_tc_walk()
341 walker->stop = 1; atm_tc_walk()
344 walker->count++; atm_tc_walk()
H A Dsch_ingress.c39 static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker) ingress_walk() argument
H A Dsch_netem.c1117 static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) netem_walk() argument
1119 if (!walker->stop) { netem_walk()
1120 if (walker->count >= walker->skip) netem_walk()
1121 if (walker->fn(sch, 1, walker) < 0) { netem_walk()
1122 walker->stop = 1; netem_walk()
1125 walker->count++; netem_walk()
/linux-4.4.14/lib/
H A Drhashtable.c246 struct rhashtable_walker *walker; rhashtable_rehash_table() local
260 list_for_each_entry(walker, &old_tbl->walkers, list) rhashtable_rehash_table()
261 walker->tbl = NULL; rhashtable_rehash_table()
517 iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL); rhashtable_walk_init()
518 if (!iter->walker) rhashtable_walk_init()
522 iter->walker->tbl = rhashtable_walk_init()
524 list_add(&iter->walker->list, &iter->walker->tbl->walkers); rhashtable_walk_init()
540 if (iter->walker->tbl) rhashtable_walk_exit()
541 list_del(&iter->walker->list); rhashtable_walk_exit()
543 kfree(iter->walker); rhashtable_walk_exit()
569 if (iter->walker->tbl) __acquires()
570 list_del(&iter->walker->list); __acquires()
573 if (!iter->walker->tbl) { __acquires()
574 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); __acquires()
596 struct bucket_table *tbl = iter->walker->tbl; rhashtable_walk_next()
629 iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); rhashtable_walk_next()
630 if (iter->walker->tbl) { rhashtable_walk_next()
650 struct bucket_table *tbl = iter->walker->tbl; __releases()
659 list_add(&iter->walker->list, &tbl->walkers); __releases()
661 iter->walker->tbl = NULL; __releases()
/linux-4.4.14/fs/ecryptfs/
H A Dsuper.c151 struct ecryptfs_global_auth_tok *walker; ecryptfs_show_options() local
154 list_for_each_entry(walker, ecryptfs_show_options()
157 if (walker->flags & ECRYPTFS_AUTH_TOK_FNEK) ecryptfs_show_options()
158 seq_printf(m, ",ecryptfs_fnek_sig=%s", walker->sig); ecryptfs_show_options()
160 seq_printf(m, ",ecryptfs_sig=%s", walker->sig); ecryptfs_show_options()
H A Dkeystore.c495 struct ecryptfs_global_auth_tok *walker; ecryptfs_find_global_auth_tok_for_sig() local
501 list_for_each_entry(walker, ecryptfs_find_global_auth_tok_for_sig()
504 if (memcmp(walker->sig, sig, ECRYPTFS_SIG_SIZE_HEX)) ecryptfs_find_global_auth_tok_for_sig()
507 if (walker->flags & ECRYPTFS_AUTH_TOK_INVALID) { ecryptfs_find_global_auth_tok_for_sig()
512 rc = key_validate(walker->global_auth_tok_key); ecryptfs_find_global_auth_tok_for_sig()
519 down_write(&(walker->global_auth_tok_key->sem)); ecryptfs_find_global_auth_tok_for_sig()
521 walker->global_auth_tok_key, auth_tok); ecryptfs_find_global_auth_tok_for_sig()
525 (*auth_tok_key) = walker->global_auth_tok_key; ecryptfs_find_global_auth_tok_for_sig()
532 up_write(&(walker->global_auth_tok_key->sem)); ecryptfs_find_global_auth_tok_for_sig()
535 walker->flags |= ECRYPTFS_AUTH_TOK_INVALID; ecryptfs_find_global_auth_tok_for_sig()
536 key_put(walker->global_auth_tok_key); ecryptfs_find_global_auth_tok_for_sig()
537 walker->global_auth_tok_key = NULL; ecryptfs_find_global_auth_tok_for_sig()
/linux-4.4.14/security/yama/
H A Dyama_lsm.c214 struct task_struct *walker = child; task_is_descendant() local
222 while (walker->pid > 0) { task_is_descendant()
223 if (!thread_group_leader(walker)) task_is_descendant()
224 walker = rcu_dereference(walker->group_leader); task_is_descendant()
225 if (walker == parent) { task_is_descendant()
229 walker = rcu_dereference(walker->real_parent); task_is_descendant()
/linux-4.4.14/drivers/iommu/
H A Dio-pgtable.c65 * is no longer accessible to the walker by this point.
H A Dio-pgtable.h45 * page table walker.
H A Darm-smmu.c1653 * What the page table walker can address actually depends on which arm_smmu_device_cfg_probe()
1659 "failed to set DMA mask for table walker\n"); arm_smmu_device_cfg_probe()
H A Darm-smmu-v3.c2632 /* Set the DMA mask for our table walker */ arm_smmu_device_probe()
2635 "failed to set DMA mask for table walker\n"); arm_smmu_device_probe()
/linux-4.4.14/arch/s390/mm/
H A Ddump_pagetables.c103 * The actual page table walker functions. In order to keep the
214 * kernel ASCE. We need this to keep the page table walker functions pt_dump_init()
/linux-4.4.14/kernel/
H A Dseccomp.c426 struct seccomp_filter *walker; seccomp_attach_filter() local
432 for (walker = current->seccomp.filter; walker; walker = walker->prev) seccomp_attach_filter()
433 total_insns += walker->prog->len + 4; /* 4 instr penalty */ seccomp_attach_filter()
/linux-4.4.14/include/linux/
H A Drhashtable.h142 * @lock: Spin lock to protect walker list
156 * struct rhashtable_walker - Hash table walker
169 * @walker: Associated rhashtable walker
176 struct rhashtable_walker *walker; member in struct:rhashtable_iter
H A Dmmzone.h1205 * returns true. A walker of the full memmap must then do this additional
H A Dnetdevice.h3410 * dev_addrs walker. Should be used only for read access. Call with
/linux-4.4.14/arch/ia64/mm/
H A Dfault.c274 * valid, due to the VHPT walker inserting a non present translation that becomes ia64_do_page_fault()
H A Dinit.c362 * size of the table, and bit 0 whether the VHPT walker is ia64_mmu_init()
/linux-4.4.14/mm/
H A Dgup.c1018 * tables directly and avoids taking locks. Thus the walker needs to be
1022 * One way to achieve this is to have the walker disable interrupts, and
1029 * pages. Disabling interrupts will allow the fast_gup walker to both block
H A Drmap.c253 * child isn't reused even if there was no alive vma, thus rmap walker has a
/linux-4.4.14/net/ipv6/
H A Dip6_fib.c414 * 2. allocate and initialize walker. inet6_dump_fib()
1417 RT6_TRACE("walker %p adjusted by delroute\n", w); FOR_WALKERS()
1645 * Convenient frontend to tree walker.
/linux-4.4.14/drivers/lguest/x86/
H A Dswitcher_32.S26 * If we had a small walker in the Switcher, we could quickly check the Guest
/linux-4.4.14/arch/ia64/include/asm/
H A Dprocessor.h179 __u64 ve : 1; /* enable hw walker */
H A Dpal.h66 #define PAL_VM_PAGE_SIZE 34 /* return vm TC and page walker page sizes */
/linux-4.4.14/arch/ia64/kernel/
H A Dpalinfo.c344 "\nTLB walker : %simplemented\n" vm_info()
H A Dsetup.c158 * lowest possible address(walker uses virtual) filter_rsvd_memory()
H A Divt.S101 * that the VHPT walker was attempting to access. The latter gets
/linux-4.4.14/arch/arm64/mm/
H A Dmmu.c459 /* Ensure the zero page is visible to the page table walker */ paging_init()
/linux-4.4.14/tools/vm/
H A Dpage-types.c539 * page frame walker
/linux-4.4.14/arch/x86/kernel/cpu/
H A Dperf_event_intel_ds.c93 * so it either hit the walker or the OS precise_store_data()
/linux-4.4.14/arch/arc/mm/
H A Dtlb.c645 * - software page walker address split between PGD:PTE:PFN (typical
/linux-4.4.14/tools/perf/util/
H A Ddwarf-aux.c640 /* Line walker internal parameters */
/linux-4.4.14/fs/xfs/
H A Dxfs_buf.c99 * Clear the delwri status so that a delwri queue walker will not xfs_buf_stale()
H A Dxfs_icache.c1156 * background walker having already kicked the IO off on those dirty inodes.
/linux-4.4.14/arch/x86/xen/
H A Dmmu.c596 * (Yet another) pagetable walker. This one is intended for pinning a

Completed in 1613 milliseconds