cache 30 arch/arc/include/asm/tlb-mmu1.h ; -- should be in cache since in same line cache 39 arch/arm/include/asm/procinfo.h struct cpu_cache_fns *cache; cache 705 arch/arm/kernel/setup.c cpu_cache = *list->cache; cache 12 arch/csky/mm/syscache.c int, cache) cache 14 arch/csky/mm/syscache.c switch (cache) { cache 117 arch/ia64/kernel/unwind.c struct unw_script cache[UNW_CACHE_SIZE]; cache 129 arch/ia64/kernel/unwind.c } cache; cache 1227 arch/ia64/kernel/unwind.c struct unw_script *script = unw.cache + info->hint; cache 1234 arch/ia64/kernel/unwind.c STAT(++unw.stat.cache.lookups); cache 1240 arch/ia64/kernel/unwind.c STAT(++unw.stat.cache.hinted_hits); cache 1248 arch/ia64/kernel/unwind.c script = unw.cache + index; cache 1252 arch/ia64/kernel/unwind.c STAT(++unw.stat.cache.normal_hits); cache 1253 arch/ia64/kernel/unwind.c unw.cache[info->prev_script].hint = script - unw.cache; cache 1258 arch/ia64/kernel/unwind.c script = unw.cache + script->coll_chain; cache 1259 arch/ia64/kernel/unwind.c STAT(++unw.stat.cache.collision_chain_traversals); cache 1280 arch/ia64/kernel/unwind.c script = unw.cache + head; cache 1293 arch/ia64/kernel/unwind.c unw.cache[unw.lru_tail].lru_chain = head; cache 1299 arch/ia64/kernel/unwind.c tmp = unw.cache + unw.hash[index]; cache 1313 arch/ia64/kernel/unwind.c tmp = unw.cache + tmp->coll_chain; cache 1320 arch/ia64/kernel/unwind.c unw.hash[index] = script - unw.cache; cache 1557 arch/ia64/kernel/unwind.c unw.cache[info->prev_script].hint = script - unw.cache; cache 1587 arch/ia64/kernel/unwind.c __func__, ip, unw.cache[info->prev_script].ip); cache 1862 arch/ia64/kernel/unwind.c info->prev_script = scr - unw.cache; cache 2168 arch/ia64/kernel/unwind.c tmp = unw.cache + unw.hash[index]; cache 2272 arch/ia64/kernel/unwind.c unw.cache[i].lru_chain = (i - 1); cache 2273 arch/ia64/kernel/unwind.c unw.cache[i].coll_chain = -1; cache 2274 arch/ia64/kernel/unwind.c rwlock_init(&unw.cache[i].lock); cache 68 arch/m68k/kernel/sys_m68k.c cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) cache 75 arch/m68k/kernel/sys_m68k.c switch (cache) cache 128 arch/m68k/kernel/sys_m68k.c switch (cache) cache 185 arch/m68k/kernel/sys_m68k.c switch (cache) cache 228 arch/m68k/kernel/sys_m68k.c cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) cache 241 arch/m68k/kernel/sys_m68k.c switch (cache) cache 289 arch/m68k/kernel/sys_m68k.c switch (cache) cache 348 arch/m68k/kernel/sys_m68k.c switch (cache) cache 378 arch/m68k/kernel/sys_m68k.c sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) cache 383 arch/m68k/kernel/sys_m68k.c cache & ~FLUSH_CACHE_BOTH) cache 412 arch/m68k/kernel/sys_m68k.c if (cache & FLUSH_CACHE_INSN) cache 414 arch/m68k/kernel/sys_m68k.c if (cache & FLUSH_CACHE_DATA) cache 428 arch/m68k/kernel/sys_m68k.c if (cache & FLUSH_CACHE_INSN) cache 430 arch/m68k/kernel/sys_m68k.c if (cache & FLUSH_CACHE_DATA) cache 447 arch/m68k/kernel/sys_m68k.c ret = cache_flush_040 (addr, scope, cache, len); cache 449 arch/m68k/kernel/sys_m68k.c ret = cache_flush_060 (addr, scope, cache, len); cache 525 arch/m68k/kernel/sys_m68k.c sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) cache 112 arch/m68k/mm/sun3kmap.c void __iomem *__ioremap(unsigned long phys, unsigned long size, int cache) cache 104 arch/mips/include/asm/asm-eva.h #define kernel_cache(op, base) cache op, base cache 304 arch/mips/include/asm/asm.h #define R10KCBARRIER(addr) cache Cache_Barrier, addr; cache 111 arch/mips/include/asm/fw/arc/hinv.h } cache; cache 71 arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h cache 9, 0($0) cache 531 arch/mips/include/asm/r4kcache.h static inline void extra##blast_##pfx##cache##lsize(void) \ cache 542 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32(addr|ws, indexop); \ cache 545 arch/mips/include/asm/r4kcache.h static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ cache 551 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32(start, hitop); \ cache 556 arch/mips/include/asm/r4kcache.h static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ cache 568 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32(addr|ws, indexop); \ cache 593 arch/mips/include/asm/r4kcache.h static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ cache 599 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32_user(start, hitop); \ cache 616 arch/mips/include/asm/r4kcache.h static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ cache 639 arch/mips/include/asm/r4kcache.h static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ cache 680 arch/mips/include/asm/r4kcache.h static inline void blast_##pfx##cache##lsize##_node(long node) \ cache 691 arch/mips/include/asm/r4kcache.h cache##lsize##_unroll32(addr|ws, indexop); \ cache 652 arch/mips/include/uapi/asm/inst.h __BITFIELD_FIELD(unsigned int cache : 2, cache 8 arch/mips/kernel/cacheinfo.c #define populate_cache(cache, leaf, c_level, c_type) \ cache 12 arch/mips/kernel/cacheinfo.c leaf->coherency_line_size = c->cache.linesz; \ cache 13 arch/mips/kernel/cacheinfo.c leaf->number_of_sets = c->cache.sets; \ cache 14 arch/mips/kernel/cacheinfo.c leaf->ways_of_associativity = c->cache.ways; \ cache 15 arch/mips/kernel/cacheinfo.c leaf->size = c->cache.linesz * c->cache.sets * \ cache 16 arch/mips/kernel/cacheinfo.c c->cache.ways; \ cache 194 arch/mips/kernel/pm-cps.c const struct cache_desc *cache, cache 197 arch/mips/kernel/pm-cps.c unsigned cache_size = cache->ways << cache->waybit; cache 202 arch/mips/kernel/pm-cps.c if (cache->flags & MIPS_CACHE_NOT_PRESENT) cache 221 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t0, t0, cache->linesz); cache 223 arch/mips/kernel/pm-cps.c uasm_i_cache(pp, op, i * cache->linesz, t0); cache 229 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); cache 1803 arch/mips/kvm/emulate.c u32 cache, op_inst, op, base; cache 1824 arch/mips/kvm/emulate.c cache = op_inst & CacheOp_Cache; cache 1830 arch/mips/kvm/emulate.c cache, op, base, arch->gprs[base], offset); cache 1839 arch/mips/kvm/emulate.c vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, cache 1842 arch/mips/kvm/emulate.c if (cache == Cache_D) { cache 1856 arch/mips/kvm/emulate.c } else if (cache == Cache_I) { cache 1916 arch/mips/kvm/emulate.c cache, op, base, arch->gprs[base], offset); cache 28 arch/mips/kvm/mmu.c static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, cache 34 arch/mips/kvm/mmu.c if (cache->nobjs >= min) cache 36 arch/mips/kvm/mmu.c while (cache->nobjs < max) { cache 40 arch/mips/kvm/mmu.c cache->objects[cache->nobjs++] = page; cache 136 arch/mips/kvm/mmu.c static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, cache 152 arch/mips/kvm/mmu.c if (!cache) cache 154 arch/mips/kvm/mmu.c new_pmd = mmu_memory_cache_alloc(cache); cache 163 arch/mips/kvm/mmu.c if (!cache) cache 165 arch/mips/kvm/mmu.c new_pte = mmu_memory_cache_alloc(cache); cache 174 arch/mips/kvm/mmu.c struct kvm_mmu_memory_cache *cache, cache 177 arch/mips/kvm/mmu.c return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); cache 1069 arch/mips/kvm/vz.c u32 cache, op_inst, op, base; cache 1089 arch/mips/kvm/vz.c cache = op_inst & CacheOp_Cache; cache 1095 arch/mips/kvm/vz.c cache, op, base, arch->gprs[base], offset); cache 1098 arch/mips/kvm/vz.c if (cache != Cache_I && cache != Cache_D) cache 1124 arch/mips/kvm/vz.c curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], cache 72 arch/mips/mm/cache.c unsigned int, cache) cache 35 arch/nds32/include/asm/nds32.h static inline unsigned long CACHE_SET(unsigned char cache) cache 38 arch/nds32/include/asm/nds32.h if (cache == ICACHE) cache 46 arch/nds32/include/asm/nds32.h static inline unsigned long CACHE_WAY(unsigned char cache) cache 49 arch/nds32/include/asm/nds32.h if (cache == ICACHE) cache 57 arch/nds32/include/asm/nds32.h static inline unsigned long CACHE_LINE_SIZE(unsigned char cache) cache 60 arch/nds32/include/asm/nds32.h if (cache == ICACHE) cache 29 arch/nds32/kernel/sys_nds32.c SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache) cache 37 arch/nds32/kernel/sys_nds32.c switch (cache) { cache 48 arch/parisc/include/asm/pdc.h int pdc_cache_info(struct pdc_cache_info *cache); cache 24 arch/powerpc/include/asm/fsl_pamu_stash.h u32 cache; /* cache to stash to: L1,L2,L3 */ cache 42 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 122 arch/powerpc/kernel/cacheinfo.c struct cache *next_local; /* next cache of >= level */ cache 137 arch/powerpc/kernel/cacheinfo.c static const char *cache_type_string(const struct cache *cache) cache 139 arch/powerpc/kernel/cacheinfo.c return cache_type_info[cache->type].name; cache 142 arch/powerpc/kernel/cacheinfo.c static void cache_init(struct cache *cache, int type, int level, cache 145 arch/powerpc/kernel/cacheinfo.c cache->type = type; cache 146 arch/powerpc/kernel/cacheinfo.c cache->level = level; cache 147 arch/powerpc/kernel/cacheinfo.c cache->ofnode = of_node_get(ofnode); cache 148 arch/powerpc/kernel/cacheinfo.c INIT_LIST_HEAD(&cache->list); cache 149 arch/powerpc/kernel/cacheinfo.c list_add(&cache->list, &cache_list); cache 152 arch/powerpc/kernel/cacheinfo.c static struct cache *new_cache(int type, int level, struct device_node *ofnode) cache 154 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 156 arch/powerpc/kernel/cacheinfo.c cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache 157 arch/powerpc/kernel/cacheinfo.c if (cache) cache 158 arch/powerpc/kernel/cacheinfo.c cache_init(cache, type, level, ofnode); cache 160 arch/powerpc/kernel/cacheinfo.c return cache; cache 163 arch/powerpc/kernel/cacheinfo.c static void release_cache_debugcheck(struct cache *cache) cache 165 arch/powerpc/kernel/cacheinfo.c struct cache *iter; cache 168 arch/powerpc/kernel/cacheinfo.c WARN_ONCE(iter->next_local == cache, cache 172 arch/powerpc/kernel/cacheinfo.c cache->ofnode, cache 173 arch/powerpc/kernel/cacheinfo.c cache_type_string(cache)); cache 176 arch/powerpc/kernel/cacheinfo.c static void release_cache(struct cache *cache) cache 178 arch/powerpc/kernel/cacheinfo.c if (!cache) cache 181 arch/powerpc/kernel/cacheinfo.c pr_debug("freeing L%d %s cache for %pOF\n", cache->level, cache 182 arch/powerpc/kernel/cacheinfo.c cache_type_string(cache), cache->ofnode); cache 184 arch/powerpc/kernel/cacheinfo.c release_cache_debugcheck(cache); cache 185 arch/powerpc/kernel/cacheinfo.c list_del(&cache->list); cache 186 arch/powerpc/kernel/cacheinfo.c of_node_put(cache->ofnode); cache 187 arch/powerpc/kernel/cacheinfo.c kfree(cache); cache 190 arch/powerpc/kernel/cacheinfo.c static void cache_cpu_set(struct cache *cache, int cpu) cache 192 arch/powerpc/kernel/cacheinfo.c struct cache *next = cache; cache 204 arch/powerpc/kernel/cacheinfo.c static int cache_size(const struct cache *cache, unsigned int *ret) cache 209 arch/powerpc/kernel/cacheinfo.c propname = cache_type_info[cache->type].size_prop; cache 211 arch/powerpc/kernel/cacheinfo.c cache_size = of_get_property(cache->ofnode, propname, NULL); cache 219 arch/powerpc/kernel/cacheinfo.c static int cache_size_kb(const struct cache *cache, unsigned int *ret) cache 223 arch/powerpc/kernel/cacheinfo.c if (cache_size(cache, &size)) cache 231 arch/powerpc/kernel/cacheinfo.c static int cache_get_line_size(const struct cache *cache, unsigned int *ret) cache 236 arch/powerpc/kernel/cacheinfo.c lim = ARRAY_SIZE(cache_type_info[cache->type].line_size_props); cache 241 arch/powerpc/kernel/cacheinfo.c propname = cache_type_info[cache->type].line_size_props[i]; cache 242 arch/powerpc/kernel/cacheinfo.c line_size = of_get_property(cache->ofnode, propname, NULL); cache 254 arch/powerpc/kernel/cacheinfo.c static int cache_nr_sets(const struct cache *cache, unsigned int *ret) cache 259 arch/powerpc/kernel/cacheinfo.c propname = cache_type_info[cache->type].nr_sets_prop; cache 261 arch/powerpc/kernel/cacheinfo.c nr_sets = of_get_property(cache->ofnode, propname, NULL); cache 269 arch/powerpc/kernel/cacheinfo.c static int cache_associativity(const struct cache *cache, unsigned int *ret) cache 275 arch/powerpc/kernel/cacheinfo.c if (cache_nr_sets(cache, &nr_sets)) cache 286 arch/powerpc/kernel/cacheinfo.c if (cache_get_line_size(cache, &line_size)) cache 288 arch/powerpc/kernel/cacheinfo.c if (cache_size(cache, &size)) cache 301 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_find_first_sibling(struct cache *cache) cache 303 arch/powerpc/kernel/cacheinfo.c struct cache *iter; cache 305 arch/powerpc/kernel/cacheinfo.c if (cache->type == CACHE_TYPE_UNIFIED || cache 306 arch/powerpc/kernel/cacheinfo.c cache->type == CACHE_TYPE_UNIFIED_D) cache 307 arch/powerpc/kernel/cacheinfo.c return cache; cache 310 arch/powerpc/kernel/cacheinfo.c if (iter->ofnode == cache->ofnode && iter->next_local == cache) cache 313 arch/powerpc/kernel/cacheinfo.c return cache; cache 317 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_lookup_by_node(const struct device_node *node) cache 319 arch/powerpc/kernel/cacheinfo.c struct cache *cache = NULL; cache 320 arch/powerpc/kernel/cacheinfo.c struct cache *iter; cache 325 arch/powerpc/kernel/cacheinfo.c cache = cache_find_first_sibling(iter); cache 329 arch/powerpc/kernel/cacheinfo.c return cache; cache 353 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level) cache 360 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_do_one_devnode_split(struct device_node *node, cache 363 arch/powerpc/kernel/cacheinfo.c struct cache *dcache, *icache; cache 383 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_do_one_devnode(struct device_node *node, int level) cache 385 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 388 arch/powerpc/kernel/cacheinfo.c cache = cache_do_one_devnode_unified(node, level); cache 390 arch/powerpc/kernel/cacheinfo.c cache = cache_do_one_devnode_split(node, level); cache 392 arch/powerpc/kernel/cacheinfo.c return cache; cache 395 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_lookup_or_instantiate(struct device_node *node, cache 398 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 400 arch/powerpc/kernel/cacheinfo.c cache = cache_lookup_by_node(node); cache 402 arch/powerpc/kernel/cacheinfo.c WARN_ONCE(cache && cache->level != level, cache 404 arch/powerpc/kernel/cacheinfo.c cache->level, level); cache 406 arch/powerpc/kernel/cacheinfo.c if (!cache) cache 407 arch/powerpc/kernel/cacheinfo.c cache = cache_do_one_devnode(node, level); cache 409 arch/powerpc/kernel/cacheinfo.c return cache; cache 412 arch/powerpc/kernel/cacheinfo.c static void link_cache_lists(struct cache *smaller, struct cache *bigger) cache 423 arch/powerpc/kernel/cacheinfo.c static void do_subsidiary_caches_debugcheck(struct cache *cache) cache 425 arch/powerpc/kernel/cacheinfo.c WARN_ON_ONCE(cache->level != 1); cache 426 arch/powerpc/kernel/cacheinfo.c WARN_ON_ONCE(!of_node_is_type(cache->ofnode, "cpu")); cache 429 arch/powerpc/kernel/cacheinfo.c static void do_subsidiary_caches(struct cache *cache) cache 432 arch/powerpc/kernel/cacheinfo.c int level = cache->level; cache 434 arch/powerpc/kernel/cacheinfo.c do_subsidiary_caches_debugcheck(cache); cache 436 arch/powerpc/kernel/cacheinfo.c while ((subcache_node = of_find_next_cache_node(cache->ofnode))) { cache 437 arch/powerpc/kernel/cacheinfo.c struct cache *subcache; cache 445 arch/powerpc/kernel/cacheinfo.c link_cache_lists(cache, subcache); cache 446 arch/powerpc/kernel/cacheinfo.c cache = subcache; cache 450 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_chain_instantiate(unsigned int cpu_id) cache 453 arch/powerpc/kernel/cacheinfo.c struct cache *cpu_cache = NULL; cache 513 arch/powerpc/kernel/cacheinfo.c index->cache->level, cache_type_string(index->cache)); cache 527 arch/powerpc/kernel/cacheinfo.c static struct cache *index_kobj_to_cache(struct kobject *k) cache 533 arch/powerpc/kernel/cacheinfo.c return index->cache; cache 539 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 541 arch/powerpc/kernel/cacheinfo.c cache = index_kobj_to_cache(k); cache 543 arch/powerpc/kernel/cacheinfo.c if (cache_size_kb(cache, &size_kb)) cache 556 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 558 arch/powerpc/kernel/cacheinfo.c cache = index_kobj_to_cache(k); cache 560 arch/powerpc/kernel/cacheinfo.c if (cache_get_line_size(cache, &line_size)) cache 572 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 574 arch/powerpc/kernel/cacheinfo.c cache = index_kobj_to_cache(k); cache 576 arch/powerpc/kernel/cacheinfo.c if (cache_nr_sets(cache, &nr_sets)) cache 588 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 590 arch/powerpc/kernel/cacheinfo.c cache = index_kobj_to_cache(k); cache 592 arch/powerpc/kernel/cacheinfo.c if (cache_associativity(cache, &associativity)) cache 603 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 605 arch/powerpc/kernel/cacheinfo.c cache = index_kobj_to_cache(k); cache 607 arch/powerpc/kernel/cacheinfo.c return sprintf(buf, "%s\n", cache_type_string(cache)); cache 616 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 619 arch/powerpc/kernel/cacheinfo.c cache = index->cache; cache 621 arch/powerpc/kernel/cacheinfo.c return sprintf(buf, "%d\n", cache->level); cache 642 arch/powerpc/kernel/cacheinfo.c static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache) cache 644 arch/powerpc/kernel/cacheinfo.c if (cache->level == 1) cache 647 arch/powerpc/kernel/cacheinfo.c return &cache->shared_cpu_map; cache 653 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 658 arch/powerpc/kernel/cacheinfo.c cache = index->cache; cache 662 arch/powerpc/kernel/cacheinfo.c mask = get_big_core_shared_cpu_map(cpu, cache); cache 664 arch/powerpc/kernel/cacheinfo.c mask = &cache->shared_cpu_map; cache 711 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 719 arch/powerpc/kernel/cacheinfo.c cache = dir->cache; cache 720 arch/powerpc/kernel/cacheinfo.c cache_type = cache_type_string(cache); cache 737 arch/powerpc/kernel/cacheinfo.c attr->attr.name, cache->ofnode, cache 743 arch/powerpc/kernel/cacheinfo.c attr->attr.name, cache->ofnode, cache_type); cache 749 arch/powerpc/kernel/cacheinfo.c static void cacheinfo_create_index_dir(struct cache *cache, int index, cache 759 arch/powerpc/kernel/cacheinfo.c index_dir->cache = cache; cache 775 arch/powerpc/kernel/cacheinfo.c struct cache *cache_list) cache 778 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 785 arch/powerpc/kernel/cacheinfo.c cache = cache_list; cache 786 arch/powerpc/kernel/cacheinfo.c while (cache) { cache 787 arch/powerpc/kernel/cacheinfo.c cacheinfo_create_index_dir(cache, index, cache_dir); cache 789 arch/powerpc/kernel/cacheinfo.c cache = cache->next_local; cache 795 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 797 arch/powerpc/kernel/cacheinfo.c cache = cache_chain_instantiate(cpu_id); cache 798 arch/powerpc/kernel/cacheinfo.c if (!cache) cache 801 arch/powerpc/kernel/cacheinfo.c cacheinfo_sysfs_populate(cpu_id, cache); cache 809 arch/powerpc/kernel/cacheinfo.c static struct cache *cache_lookup_by_cpu(unsigned int cpu_id) cache 812 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 819 arch/powerpc/kernel/cacheinfo.c cache = cache_lookup_by_node(cpu_node); cache 822 arch/powerpc/kernel/cacheinfo.c return cache; cache 852 arch/powerpc/kernel/cacheinfo.c static void cache_cpu_clear(struct cache *cache, int cpu) cache 854 arch/powerpc/kernel/cacheinfo.c while (cache) { cache 855 arch/powerpc/kernel/cacheinfo.c struct cache *next = cache->next_local; cache 857 arch/powerpc/kernel/cacheinfo.c WARN_ONCE(!cpumask_test_cpu(cpu, &cache->shared_cpu_map), cache 859 arch/powerpc/kernel/cacheinfo.c cpu, cache->ofnode, cache 860 arch/powerpc/kernel/cacheinfo.c cache_type_string(cache)); cache 862 arch/powerpc/kernel/cacheinfo.c cpumask_clear_cpu(cpu, &cache->shared_cpu_map); cache 866 arch/powerpc/kernel/cacheinfo.c if (cpumask_empty(&cache->shared_cpu_map)) cache 867 arch/powerpc/kernel/cacheinfo.c release_cache(cache); cache 869 arch/powerpc/kernel/cacheinfo.c cache = next; cache 876 arch/powerpc/kernel/cacheinfo.c struct cache *cache; cache 890 arch/powerpc/kernel/cacheinfo.c cache = cache_lookup_by_cpu(cpu_id); cache 891 arch/powerpc/kernel/cacheinfo.c if (cache) cache 892 arch/powerpc/kernel/cacheinfo.c cache_cpu_clear(cache, cpu_id); cache 98 arch/powerpc/kernel/eeh_cache.c static void eeh_addr_cache_print(struct pci_io_addr_cache *cache) cache 103 arch/powerpc/kernel/eeh_cache.c n = rb_first(&cache->rb_root); cache 1112 arch/powerpc/kernel/smp.c struct device_node *cache; cache 1121 arch/powerpc/kernel/smp.c cache = of_find_next_cache_node(np); cache 1125 arch/powerpc/kernel/smp.c return cache; cache 149 arch/powerpc/perf/isa207-common.c unsigned int cache; cache 151 arch/powerpc/perf/isa207-common.c cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK; cache 152 arch/powerpc/perf/isa207-common.c return cache; cache 244 arch/powerpc/perf/isa207-common.c unsigned int unit, pmc, cache, ebb; cache 254 arch/powerpc/perf/isa207-common.c cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; cache 293 arch/powerpc/perf/isa207-common.c } else if (cache & 0x7) { cache 308 arch/powerpc/perf/isa207-common.c value |= CNST_L1_QUAL_VAL(cache); cache 369 arch/powerpc/perf/isa207-common.c unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; cache 410 arch/powerpc/perf/isa207-common.c cache = dc_ic_rld_quad_l1_sel(event[i]); cache 411 arch/powerpc/perf/isa207-common.c mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; cache 414 arch/powerpc/perf/isa207-common.c cache = dc_ic_rld_quad_l1_sel(event[i]); cache 415 arch/powerpc/perf/isa207-common.c mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT; cache 378 arch/powerpc/perf/power7-pmu.c GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); cache 379 arch/powerpc/perf/power7-pmu.c GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); cache 129 arch/powerpc/perf/power8-pmu.c GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); cache 130 arch/powerpc/perf/power8-pmu.c GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); cache 158 arch/powerpc/perf/power9-pmu.c GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); cache 159 arch/powerpc/perf/power9-pmu.c GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1_FIN); cache 113 arch/powerpc/platforms/ps3/spu.c struct priv1_cache cache; cache 351 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.sr1 = 0x33; cache 481 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.masks[class] = mask; cache 483 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.masks[class]); cache 488 arch/powerpc/platforms/ps3/spu.c return spu_pdata(spu)->cache.masks[class]; cache 539 arch/powerpc/platforms/ps3/spu.c BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); cache 541 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.sr1 = sr1; cache 545 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.sr1); cache 550 arch/powerpc/platforms/ps3/spu.c return spu_pdata(spu)->cache.sr1; cache 555 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.tclass_id = tclass_id; cache 559 arch/powerpc/platforms/ps3/spu.c spu_pdata(spu)->cache.tclass_id); cache 564 arch/powerpc/platforms/ps3/spu.c return spu_pdata(spu)->cache.tclass_id; cache 71 arch/s390/kernel/cache.c struct cacheinfo *cache; cache 78 arch/s390/kernel/cache.c cache = this_cpu_ci->info_list + idx; cache 80 arch/s390/kernel/cache.c seq_printf(m, "level=%d ", cache->level); cache 81 arch/s390/kernel/cache.c seq_printf(m, "type=%s ", cache_type_string[cache->type]); cache 83 arch/s390/kernel/cache.c cache->disable_sysfs ? "Shared" : "Private"); cache 84 arch/s390/kernel/cache.c seq_printf(m, "size=%dK ", cache->size >> 10); cache 85 arch/s390/kernel/cache.c seq_printf(m, "line_size=%u ", cache->coherency_line_size); cache 86 arch/s390/kernel/cache.c seq_printf(m, "associativity=%d", cache->ways_of_associativity); cache 28 arch/sh/mm/cache-debugfs.c struct cache_info *cache; cache 49 arch/sh/mm/cache-debugfs.c cache = ¤t_cpu_data.dcache; cache 52 arch/sh/mm/cache-debugfs.c cache = ¤t_cpu_data.icache; cache 55 arch/sh/mm/cache-debugfs.c waysize = cache->sets; cache 64 arch/sh/mm/cache-debugfs.c waysize <<= cache->entry_shift; cache 66 arch/sh/mm/cache-debugfs.c for (way = 0; way < cache->ways; way++) { cache 76 arch/sh/mm/cache-debugfs.c addr += cache->linesz, line++) { cache 89 arch/sh/mm/cache-debugfs.c addrstart += cache->way_incr; cache 1689 arch/x86/events/core.c EVENT_ATTR(cache-references, CACHE_REFERENCES ); cache 1690 arch/x86/events/core.c EVENT_ATTR(cache-misses, CACHE_MISSES ); cache 674 arch/x86/include/asm/kvm_host.h struct gfn_to_pfn_cache cache; cache 71 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 88 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 105 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 122 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 139 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 156 arch/x86/kernel/cpu/resctrl/core.c .cache = { cache 178 arch/x86/kernel/cpu/resctrl/core.c return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset; cache 215 arch/x86/kernel/cpu/resctrl/core.c r->cache.cbm_len = 20; cache 216 arch/x86/kernel/cpu/resctrl/core.c r->cache.shareable_bits = 0xc0000; cache 217 arch/x86/kernel/cpu/resctrl/core.c r->cache.min_cbm_bits = 2; cache 311 arch/x86/kernel/cpu/resctrl/core.c r->cache.cbm_len = eax.split.cbm_len + 1; cache 313 arch/x86/kernel/cpu/resctrl/core.c r->cache.shareable_bits = ebx & r->default_ctrl; cache 314 arch/x86/kernel/cpu/resctrl/core.c r->data_width = (r->cache.cbm_len + 3) / 4; cache 325 arch/x86/kernel/cpu/resctrl/core.c r->cache.cbm_len = r_l->cache.cbm_len; cache 327 arch/x86/kernel/cpu/resctrl/core.c r->cache.shareable_bits = r_l->cache.shareable_bits; cache 328 arch/x86/kernel/cpu/resctrl/core.c r->data_width = (r->cache.cbm_len + 3) / 4; cache 134 arch/x86/kernel/cpu/resctrl/ctrlmondata.c unsigned int cbm_len = r->cache.cbm_len; cache 156 arch/x86/kernel/cpu/resctrl/ctrlmondata.c if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { cache 158 arch/x86/kernel/cpu/resctrl/ctrlmondata.c r->cache.min_cbm_bits); cache 453 arch/x86/kernel/cpu/resctrl/internal.h struct rdt_cache cache; cache 799 arch/x86/kernel/cpu/resctrl/pseudo_lock.c cbm_len = d->plr->r->cache.cbm_len; cache 766 arch/x86/kernel/cpu/resctrl/rdtgroup.c seq_printf(seq, "%u\n", r->cache.min_cbm_bits); cache 775 arch/x86/kernel/cpu/resctrl/rdtgroup.c seq_printf(seq, "%x\n", r->cache.shareable_bits); cache 810 arch/x86/kernel/cpu/resctrl/rdtgroup.c hw_shareable = r->cache.shareable_bits; cache 845 arch/x86/kernel/cpu/resctrl/rdtgroup.c for (i = r->cache.cbm_len - 1; i >= 0; i--) { cache 1070 arch/x86/kernel/cpu/resctrl/rdtgroup.c ctrl_b = r->cache.shareable_bits; cache 1071 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) cache 1082 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { cache 1265 arch/x86/kernel/cpu/resctrl/rdtgroup.c num_b = bitmap_weight(&cbm, r->cache.cbm_len); cache 1269 arch/x86/kernel/cpu/resctrl/rdtgroup.c size = ci->info_list[i].size / r->cache.cbm_len * num_b; cache 2511 arch/x86/kernel/cpu/resctrl/rdtgroup.c unsigned int cbm_len = r->cache.cbm_len; cache 2545 arch/x86/kernel/cpu/resctrl/rdtgroup.c d->new_ctrl = r->cache.shareable_bits; cache 2546 arch/x86/kernel/cpu/resctrl/rdtgroup.c used_b = r->cache.shareable_bits; cache 2574 arch/x86/kernel/cpu/resctrl/rdtgroup.c unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); cache 2575 arch/x86/kernel/cpu/resctrl/rdtgroup.c unused_b &= BIT_MASK(r->cache.cbm_len) - 1; cache 2587 arch/x86/kernel/cpu/resctrl/rdtgroup.c if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { cache 1060 arch/x86/kvm/mmu.c static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, cache 1065 arch/x86/kvm/mmu.c if (cache->nobjs >= min) cache 1067 arch/x86/kvm/mmu.c while (cache->nobjs < ARRAY_SIZE(cache->objects)) { cache 1070 arch/x86/kvm/mmu.c return cache->nobjs >= min ? 0 : -ENOMEM; cache 1071 arch/x86/kvm/mmu.c cache->objects[cache->nobjs++] = obj; cache 1076 arch/x86/kvm/mmu.c static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache) cache 1078 arch/x86/kvm/mmu.c return cache->nobjs; cache 1082 arch/x86/kvm/mmu.c struct kmem_cache *cache) cache 1085 arch/x86/kvm/mmu.c kmem_cache_free(cache, mc->objects[--mc->nobjs]); cache 1088 arch/x86/kvm/mmu.c static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, cache 1093 arch/x86/kvm/mmu.c if (cache->nobjs >= min) cache 1095 arch/x86/kvm/mmu.c while (cache->nobjs < ARRAY_SIZE(cache->objects)) { cache 1098 arch/x86/kvm/mmu.c return cache->nobjs >= min ? 0 : -ENOMEM; cache 1099 arch/x86/kvm/mmu.c cache->objects[cache->nobjs++] = page; cache 1491 arch/x86/kvm/mmu.c struct kvm_mmu_memory_cache *cache; cache 1493 arch/x86/kvm/mmu.c cache = &vcpu->arch.mmu_pte_list_desc_cache; cache 1494 arch/x86/kvm/mmu.c return mmu_memory_cache_free_objects(cache); cache 6865 arch/x86/kvm/vmx/vmx.c u8 cache; cache 6880 arch/x86/kvm/vmx/vmx.c cache = MTRR_TYPE_UNCACHABLE; cache 6886 arch/x86/kvm/vmx/vmx.c cache = MTRR_TYPE_WRBACK; cache 6893 arch/x86/kvm/vmx/vmx.c cache = MTRR_TYPE_WRBACK; cache 6895 arch/x86/kvm/vmx/vmx.c cache = MTRR_TYPE_UNCACHABLE; cache 6899 arch/x86/kvm/vmx/vmx.c cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn); cache 6902 arch/x86/kvm/vmx/vmx.c return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat; cache 2657 arch/x86/kvm/x86.c &map, &vcpu->arch.st.cache, false)) cache 2689 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); cache 3555 arch/x86/kvm/x86.c &vcpu->arch.st.cache, true)) cache 3563 arch/x86/kvm/x86.c kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); cache 9142 arch/x86/kvm/x86.c struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; cache 9144 arch/x86/kvm/x86.c kvm_release_pfn(cache->pfn, cache->dirty, cache); cache 961 arch/x86/mm/init.c void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) cache 964 arch/x86/mm/init.c BUG_ON(!entry && cache != _PAGE_CACHE_MODE_WB); cache 966 arch/x86/mm/init.c __cachemode2pte_tbl[cache] = __cm_idx2pte(entry); cache 967 arch/x86/mm/init.c __pte2cachemode_tbl[entry] = cache; cache 361 arch/x86/mm/init_64.c enum page_cache_mode cache) cache 370 arch/x86/mm/init_64.c pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache))); cache 23 arch/x86/mm/mm_internal.h void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache); cache 316 arch/x86/mm/pageattr.c unsigned long cache = (unsigned long)arg; cache 324 arch/x86/mm/pageattr.c if (cache && boot_cpu_data.x86 >= 4) cache 328 arch/x86/mm/pageattr.c static void cpa_flush_all(unsigned long cache) cache 332 arch/x86/mm/pageattr.c on_each_cpu(__cpa_flush_all, (void *) cache, 1); cache 344 arch/x86/mm/pageattr.c static void cpa_flush(struct cpa_data *data, int cache) cache 351 arch/x86/mm/pageattr.c if (cache && !static_cpu_has(X86_FEATURE_CLFLUSH)) { cache 352 arch/x86/mm/pageattr.c cpa_flush_all(cache); cache 361 arch/x86/mm/pageattr.c if (!cache) cache 1674 arch/x86/mm/pageattr.c int ret, cache, checkalias; cache 1745 arch/x86/mm/pageattr.c cache = !!pgprot2cachemode(mask_set); cache 1751 arch/x86/mm/pageattr.c cpa_flush_all(cache); cache 1755 arch/x86/mm/pageattr.c cpa_flush(&cpa, cache); cache 169 arch/x86/mm/pat.c enum page_cache_mode cache; cache 173 arch/x86/mm/pat.c case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; cache 174 arch/x86/mm/pat.c case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; cache 175 arch/x86/mm/pat.c case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; cache 176 arch/x86/mm/pat.c case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; cache 177 arch/x86/mm/pat.c case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; cache 178 arch/x86/mm/pat.c case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; cache 179 arch/x86/mm/pat.c default: cache = CM(WB); cache_mode = "WB "; break; cache 184 arch/x86/mm/pat.c return cache; cache 196 arch/x86/mm/pat.c enum page_cache_mode cache; cache 202 arch/x86/mm/pat.c cache = pat_get_cache_mode((pat >> (i * 8)) & 7, cache 204 arch/x86/mm/pat.c update_cache_mode_entry(i, cache); cache 203 drivers/acpi/acpica/acdebug.h u32 acpi_db_get_cache_info(struct acpi_memory_list *cache); cache 391 drivers/acpi/acpica/acobject.h struct acpi_object_cache_list cache; cache 240 drivers/acpi/acpica/dbexec.c u32 acpi_db_get_cache_info(struct acpi_memory_list *cache) cache 243 drivers/acpi/acpica/dbexec.c return (cache->total_allocated - cache->total_freed - cache 244 drivers/acpi/acpica/dbexec.c cache->current_depth); cache 36 drivers/acpi/acpica/utcache.c struct acpi_memory_list *cache; cache 46 drivers/acpi/acpica/utcache.c cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); cache 47 drivers/acpi/acpica/utcache.c if (!cache) { cache 53 drivers/acpi/acpica/utcache.c memset(cache, 0, sizeof(struct acpi_memory_list)); cache 54 drivers/acpi/acpica/utcache.c cache->list_name = cache_name; cache 55 drivers/acpi/acpica/utcache.c cache->object_size = object_size; cache 56 drivers/acpi/acpica/utcache.c cache->max_depth = max_depth; cache 58 drivers/acpi/acpica/utcache.c *return_cache = cache; cache 74 drivers/acpi/acpica/utcache.c acpi_status acpi_os_purge_cache(struct acpi_memory_list *cache) cache 81 drivers/acpi/acpica/utcache.c if (!cache) { cache 92 drivers/acpi/acpica/utcache.c while (cache->list_head) { cache 96 drivers/acpi/acpica/utcache.c next = ACPI_GET_DESCRIPTOR_PTR(cache->list_head); cache 97 drivers/acpi/acpica/utcache.c ACPI_FREE(cache->list_head); cache 99 drivers/acpi/acpica/utcache.c cache->list_head = next; cache 100 drivers/acpi/acpica/utcache.c cache->current_depth--; cache 120 drivers/acpi/acpica/utcache.c acpi_status acpi_os_delete_cache(struct acpi_memory_list *cache) cache 128 drivers/acpi/acpica/utcache.c status = acpi_os_purge_cache(cache); cache 135 drivers/acpi/acpica/utcache.c acpi_os_free(cache); cache 153 drivers/acpi/acpica/utcache.c acpi_status acpi_os_release_object(struct acpi_memory_list *cache, void *object) cache 159 drivers/acpi/acpica/utcache.c if (!cache || !object) { cache 165 drivers/acpi/acpica/utcache.c if (cache->current_depth >= cache->max_depth) { cache 167 drivers/acpi/acpica/utcache.c ACPI_MEM_TRACKING(cache->total_freed++); cache 180 drivers/acpi/acpica/utcache.c memset(object, 0xCA, cache->object_size); cache 185 drivers/acpi/acpica/utcache.c ACPI_SET_DESCRIPTOR_PTR(object, cache->list_head); cache 186 drivers/acpi/acpica/utcache.c cache->list_head = object; cache 187 drivers/acpi/acpica/utcache.c cache->current_depth++; cache 208 drivers/acpi/acpica/utcache.c void *acpi_os_acquire_object(struct acpi_memory_list *cache) cache 215 drivers/acpi/acpica/utcache.c if (!cache) { cache 224 drivers/acpi/acpica/utcache.c ACPI_MEM_TRACKING(cache->requests++); cache 228 drivers/acpi/acpica/utcache.c if (cache->list_head) { cache 232 drivers/acpi/acpica/utcache.c object = cache->list_head; cache 233 drivers/acpi/acpica/utcache.c cache->list_head = ACPI_GET_DESCRIPTOR_PTR(object); cache 235 drivers/acpi/acpica/utcache.c cache->current_depth--; cache 237 drivers/acpi/acpica/utcache.c ACPI_MEM_TRACKING(cache->hits++); cache 241 drivers/acpi/acpica/utcache.c cache->list_name)); cache 250 drivers/acpi/acpica/utcache.c memset(object, 0, cache->object_size); cache 254 drivers/acpi/acpica/utcache.c ACPI_MEM_TRACKING(cache->total_allocated++); cache 257 drivers/acpi/acpica/utcache.c if ((cache->total_allocated - cache->total_freed) > cache 258 drivers/acpi/acpica/utcache.c cache->max_occupied) { cache 259 drivers/acpi/acpica/utcache.c cache->max_occupied = cache 260 drivers/acpi/acpica/utcache.c cache->total_allocated - cache->total_freed; cache 271 drivers/acpi/acpica/utcache.c object = ACPI_ALLOCATE_ZEROED(cache->object_size); cache 62 drivers/acpi/acpica/uttrack.c struct acpi_memory_list *cache; cache 64 drivers/acpi/acpica/uttrack.c cache = acpi_os_allocate_zeroed(sizeof(struct acpi_memory_list)); cache 65 drivers/acpi/acpica/uttrack.c if (!cache) { cache 69 drivers/acpi/acpica/uttrack.c cache->list_name = list_name; cache 70 drivers/acpi/acpica/uttrack.c cache->object_size = object_size; cache 72 drivers/acpi/acpica/uttrack.c *return_cache = cache; cache 588 drivers/acpi/apei/ghes.c struct ghes_estatus_cache *cache; cache 594 drivers/acpi/apei/ghes.c cache = rcu_dereference(ghes_estatus_caches[i]); cache 595 drivers/acpi/apei/ghes.c if (cache == NULL) cache 597 drivers/acpi/apei/ghes.c if (len != cache->estatus_len) cache 599 drivers/acpi/apei/ghes.c cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); cache 602 drivers/acpi/apei/ghes.c atomic_inc(&cache->count); cache 604 drivers/acpi/apei/ghes.c if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) cache 618 drivers/acpi/apei/ghes.c struct ghes_estatus_cache *cache; cache 628 drivers/acpi/apei/ghes.c cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); cache 629 drivers/acpi/apei/ghes.c if (!cache) { cache 633 drivers/acpi/apei/ghes.c cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); cache 635 drivers/acpi/apei/ghes.c cache->estatus_len = len; cache 636 drivers/acpi/apei/ghes.c atomic_set(&cache->count, 0); cache 637 drivers/acpi/apei/ghes.c cache->generic = generic; cache 638 drivers/acpi/apei/ghes.c cache->time_in = sched_clock(); cache 639 drivers/acpi/apei/ghes.c return cache; cache 642 drivers/acpi/apei/ghes.c static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache) cache 646 drivers/acpi/apei/ghes.c len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache)); cache 648 drivers/acpi/apei/ghes.c gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len); cache 654 drivers/acpi/apei/ghes.c struct ghes_estatus_cache *cache; cache 656 drivers/acpi/apei/ghes.c cache = container_of(head, struct ghes_estatus_cache, rcu); cache 657 drivers/acpi/apei/ghes.c ghes_estatus_cache_free(cache); cache 666 drivers/acpi/apei/ghes.c struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache; cache 674 drivers/acpi/apei/ghes.c cache = rcu_dereference(ghes_estatus_caches[i]); cache 675 drivers/acpi/apei/ghes.c if (cache == NULL) { cache 680 drivers/acpi/apei/ghes.c duration = now - cache->time_in; cache 683 drivers/acpi/apei/ghes.c slot_cache = cache; cache 686 drivers/acpi/apei/ghes.c count = atomic_read(&cache->count); cache 692 drivers/acpi/apei/ghes.c slot_cache = cache; cache 326 drivers/acpi/hmat/hmat.c struct acpi_hmat_cache *cache = (void *)header; cache 331 drivers/acpi/hmat/hmat.c if (cache->header.length < sizeof(*cache)) { cache 333 drivers/acpi/hmat/hmat.c cache->header.length); cache 337 drivers/acpi/hmat/hmat.c attrs = cache->cache_attributes; cache 339 drivers/acpi/hmat/hmat.c cache->memory_PD, cache->cache_size, attrs, cache 340 drivers/acpi/hmat/hmat.c cache->number_of_SMBIOShandles); cache 342 drivers/acpi/hmat/hmat.c target = find_mem_target(cache->memory_PD); cache 352 drivers/acpi/hmat/hmat.c tcache->cache_attrs.size = cache->cache_size; cache 1634 drivers/acpi/osl.c acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache) cache 1636 drivers/acpi/osl.c *cache = kmem_cache_create(name, size, 0, 0, NULL); cache 1637 drivers/acpi/osl.c if (*cache == NULL) cache 1655 drivers/acpi/osl.c acpi_status acpi_os_purge_cache(acpi_cache_t * cache) cache 1657 drivers/acpi/osl.c kmem_cache_shrink(cache); cache 1674 drivers/acpi/osl.c acpi_status acpi_os_delete_cache(acpi_cache_t * cache) cache 1676 drivers/acpi/osl.c kmem_cache_destroy(cache); cache 1694 drivers/acpi/osl.c acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) cache 1696 drivers/acpi/osl.c kmem_cache_free(cache, object); cache 107 drivers/acpi/pptt.c struct acpi_pptt_cache *cache; cache 112 drivers/acpi/pptt.c cache = (struct acpi_pptt_cache *) res; cache 113 drivers/acpi/pptt.c while (cache) { cache 117 drivers/acpi/pptt.c cache->flags & ACPI_PPTT_CACHE_TYPE_VALID && cache 118 drivers/acpi/pptt.c acpi_pptt_match_type(cache->attributes, type)) { cache 119 drivers/acpi/pptt.c if (*found != NULL && cache != *found) cache 123 drivers/acpi/pptt.c *found = cache; cache 130 drivers/acpi/pptt.c cache = fetch_pptt_cache(table_hdr, cache->next_level_of_cache); cache 67 drivers/auxdisplay/ht16k33.c uint8_t *cache; cache 137 drivers/auxdisplay/ht16k33.c p1 = fbdev->cache; cache 152 drivers/auxdisplay/ht16k33.c p1 = fbdev->cache + HT16K33_FB_SIZE - 1; cache 162 drivers/auxdisplay/ht16k33.c p1 = fbdev->cache + first; cache 428 drivers/auxdisplay/ht16k33.c fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL); cache 429 drivers/auxdisplay/ht16k33.c if (!fbdev->cache) { cache 244 drivers/base/node.c ATTRIBUTE_GROUPS(cache); cache 140 drivers/base/regmap/internal.h void *cache; cache 24 drivers/base/regmap/regcache-flat.c unsigned int *cache; cache 29 drivers/base/regmap/regcache-flat.c map->cache = kcalloc(regcache_flat_get_index(map, map->max_register) cache 31 drivers/base/regmap/regcache-flat.c if (!map->cache) cache 34 drivers/base/regmap/regcache-flat.c cache = map->cache; cache 40 drivers/base/regmap/regcache-flat.c cache[index] = map->reg_defaults[i].def; cache 48 drivers/base/regmap/regcache-flat.c kfree(map->cache); cache 49 drivers/base/regmap/regcache-flat.c map->cache = NULL; cache 57 drivers/base/regmap/regcache-flat.c unsigned int *cache = map->cache; cache 60 drivers/base/regmap/regcache-flat.c *value = cache[index]; cache 68 drivers/base/regmap/regcache-flat.c unsigned int *cache = map->cache; cache 71 drivers/base/regmap/regcache-flat.c cache[index] = value; cache 138 drivers/base/regmap/regcache-lzo.c map->cache = kcalloc(blkcount, sizeof(*lzo_blocks), cache 140 drivers/base/regmap/regcache-lzo.c if (!map->cache) cache 142 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; cache 203 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; cache 224 drivers/base/regmap/regcache-lzo.c map->cache = NULL; cache 240 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; cache 277 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; cache 332 drivers/base/regmap/regcache-lzo.c lzo_blocks = map->cache; cache 65 drivers/base/regmap/regcache-rbtree.c struct regcache_rbtree_ctx *rbtree_ctx = map->cache; cache 135 drivers/base/regmap/regcache-rbtree.c struct regcache_rbtree_ctx *rbtree_ctx = map->cache; cache 190 drivers/base/regmap/regcache-rbtree.c map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); cache 191 drivers/base/regmap/regcache-rbtree.c if (!map->cache) cache 194 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; cache 220 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; cache 236 drivers/base/regmap/regcache-rbtree.c kfree(map->cache); cache 237 drivers/base/regmap/regcache-rbtree.c map->cache = NULL; cache 378 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; cache 475 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; cache 515 drivers/base/regmap/regcache-rbtree.c rbtree_ctx = map->cache; cache 156 drivers/base/regmap/regcache.c map->cache = NULL; cache 563 drivers/base/regmap/regcache.c u8 *cache = base; cache 565 drivers/base/regmap/regcache.c cache[idx] = val; cache 569 drivers/base/regmap/regcache.c u16 *cache = base; cache 571 drivers/base/regmap/regcache.c cache[idx] = val; cache 575 drivers/base/regmap/regcache.c u32 *cache = base; cache 577 drivers/base/regmap/regcache.c cache[idx] = val; cache 582 drivers/base/regmap/regcache.c u64 *cache = base; cache 584 drivers/base/regmap/regcache.c cache[idx] = val; cache 607 drivers/base/regmap/regcache.c const u8 *cache = base; cache 609 drivers/base/regmap/regcache.c return cache[idx]; cache 612 drivers/base/regmap/regcache.c const u16 *cache = base; cache 614 drivers/base/regmap/regcache.c return cache[idx]; cache 617 drivers/base/regmap/regcache.c const u32 *cache = base; cache 619 drivers/base/regmap/regcache.c return cache[idx]; cache 623 drivers/base/regmap/regcache.c const u64 *cache = base; cache 625 drivers/base/regmap/regcache.c return cache[idx]; cache 42 drivers/block/null_blk.h struct radix_tree_root cache; /* disk cache data */ cache 513 drivers/block/null_blk_main.c INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC); cache 697 drivers/block/null_blk_main.c root = is_cache ? &nullb->dev->cache : &nullb->dev->data; cache 720 drivers/block/null_blk_main.c root = is_cache ? &nullb->dev->cache : &nullb->dev->data; cache 739 drivers/block/null_blk_main.c root = is_cache ? &dev->cache : &dev->data; cache 772 drivers/block/null_blk_main.c root = is_cache ? &nullb->dev->cache : &nullb->dev->data; cache 871 drivers/block/null_blk_main.c ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page); cache 889 drivers/block/null_blk_main.c nr_pages = radix_tree_gang_lookup(&nullb->dev->cache, cache 1031 drivers/block/null_blk_main.c WARN_ON(!radix_tree_empty(&nullb->dev->cache)); cache 84 drivers/block/ps3vram.c struct ps3vram_cache cache; cache 318 drivers/block/ps3vram.c struct ps3vram_cache *cache = &priv->cache; cache 320 drivers/block/ps3vram.c if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) cache 324 drivers/block/ps3vram.c cache->tags[entry].address); cache 325 drivers/block/ps3vram.c if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size, cache 326 drivers/block/ps3vram.c cache->tags[entry].address, DMA_PAGE_SIZE, cache 327 drivers/block/ps3vram.c cache->page_size / DMA_PAGE_SIZE) < 0) { cache 330 drivers/block/ps3vram.c entry * cache->page_size, cache->tags[entry].address, cache 331 drivers/block/ps3vram.c cache->page_size); cache 333 drivers/block/ps3vram.c cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; cache 340 drivers/block/ps3vram.c struct ps3vram_cache *cache = &priv->cache; cache 344 drivers/block/ps3vram.c CACHE_OFFSET + entry * cache->page_size, cache 346 drivers/block/ps3vram.c cache->page_size / DMA_PAGE_SIZE) < 0) { cache 349 drivers/block/ps3vram.c address, entry * cache->page_size, cache->page_size); cache 352 drivers/block/ps3vram.c cache->tags[entry].address = address; cache 353 drivers/block/ps3vram.c cache->tags[entry].flags |= CACHE_PAGE_PRESENT; cache 360 drivers/block/ps3vram.c struct ps3vram_cache *cache = &priv->cache; cache 364 drivers/block/ps3vram.c for (i = 0; i < cache->page_count; i++) { cache 366 drivers/block/ps3vram.c cache->tags[i].flags = 0; cache 374 drivers/block/ps3vram.c struct ps3vram_cache *cache = &priv->cache; cache 380 drivers/block/ps3vram.c offset = (unsigned int) (address & (cache->page_size - 1)); cache 384 drivers/block/ps3vram.c for (i = 0; i < cache->page_count; i++) { cache 385 drivers/block/ps3vram.c if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && cache 386 drivers/block/ps3vram.c cache->tags[i].address == base) { cache 387 drivers/block/ps3vram.c cache->hit++; cache 389 drivers/block/ps3vram.c cache->tags[i].address); cache 395 drivers/block/ps3vram.c i = (jiffies + (counter++)) % cache->page_count; cache 401 drivers/block/ps3vram.c cache->miss++; cache 409 drivers/block/ps3vram.c priv->cache.page_count = CACHE_PAGE_COUNT; cache 410 drivers/block/ps3vram.c priv->cache.page_size = CACHE_PAGE_SIZE; cache 411 drivers/block/ps3vram.c priv->cache.tags = kcalloc(CACHE_PAGE_COUNT, cache 414 drivers/block/ps3vram.c if (!priv->cache.tags) cache 428 drivers/block/ps3vram.c kfree(priv->cache.tags); cache 452 drivers/block/ps3vram.c offset = (unsigned int) (from & (priv->cache.page_size - 1)); cache 453 drivers/block/ps3vram.c avail = priv->cache.page_size - offset; cache 456 drivers/block/ps3vram.c cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; cache 493 drivers/block/ps3vram.c offset = (unsigned int) (to & (priv->cache.page_size - 1)); cache 494 drivers/block/ps3vram.c avail = priv->cache.page_size - offset; cache 497 drivers/block/ps3vram.c cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; cache 507 drivers/block/ps3vram.c priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; cache 522 drivers/block/ps3vram.c seq_printf(m, "hit:%u\nmiss:%u\n", priv->cache.hit, priv->cache.miss); cache 608 drivers/crypto/hifn_795x.c struct scatterlist cache[ASYNC_SCATTERLIST_CACHE]; cache 1339 drivers/crypto/hifn_795x.c t = &rctx->walk.cache[0]; cache 1373 drivers/crypto/hifn_795x.c sg_init_table(w->cache, num); cache 1383 drivers/crypto/hifn_795x.c s = &w->cache[i]; cache 1397 drivers/crypto/hifn_795x.c struct scatterlist *s = &w->cache[i]; cache 1460 drivers/crypto/hifn_795x.c t = &w->cache[idx]; cache 1674 drivers/crypto/hifn_795x.c t = &rctx->walk.cache[idx]; cache 755 drivers/crypto/inside-secure/safexcel.h u8 cache[HASH_CACHE_SIZE]; cache 47 drivers/crypto/inside-secure/safexcel_hash.c u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32)); cache 223 drivers/crypto/inside-secure/safexcel_hash.c memcpy(sreq->cache, sreq->state, cache 247 drivers/crypto/inside-secure/safexcel_hash.c memcpy(sreq->cache, sreq->cache_next, cache_len); cache 301 drivers/crypto/inside-secure/safexcel_hash.c req->cache_dma = dma_map_single(priv->dev, req->cache, cache 573 drivers/crypto/inside-secure/safexcel_hash.c req->cache + cache_len, cache 707 drivers/crypto/inside-secure/safexcel_hash.c memset(req->cache, 0, req->block_sz); cache 709 drivers/crypto/inside-secure/safexcel_hash.c req->cache[0] = 0x80; cache 713 drivers/crypto/inside-secure/safexcel_hash.c req->cache[req->block_sz-8] = (req->block_sz << 3) & cache 715 drivers/crypto/inside-secure/safexcel_hash.c req->cache[req->block_sz-7] = (req->block_sz >> 5); cache 718 drivers/crypto/inside-secure/safexcel_hash.c req->cache[req->block_sz-2] = (req->block_sz >> 5); cache 719 drivers/crypto/inside-secure/safexcel_hash.c req->cache[req->block_sz-1] = (req->block_sz << 3) & cache 759 drivers/crypto/inside-secure/safexcel_hash.c memcpy(export->cache, req->cache, HASH_CACHE_SIZE); cache 779 drivers/crypto/inside-secure/safexcel_hash.c memcpy(req->cache, export->cache, HASH_CACHE_SIZE); cache 585 drivers/crypto/marvell/cesa.h u8 *cache; cache 607 drivers/crypto/marvell/cesa.h u8 cache[CESA_MAX_HASH_BLOCK_SIZE]; cache 49 drivers/crypto/marvell/hash.c req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags, cache 51 drivers/crypto/marvell/hash.c if (!req->cache) cache 60 drivers/crypto/marvell/hash.c if (!req->cache) cache 63 drivers/crypto/marvell/hash.c dma_pool_free(cesa_dev->dma->cache_pool, req->cache, cache 176 drivers/crypto/marvell/hash.c creq->cache, creq->cache_ptr); cache 217 drivers/crypto/marvell/hash.c memcpy_fromio(creq->cache, cache 396 drivers/crypto/marvell/hash.c creq->cache, cache 449 drivers/crypto/marvell/hash.c creq->cache + creq->cache_ptr, cache 501 drivers/crypto/marvell/hash.c memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr); cache 807 drivers/crypto/marvell/hash.c u64 *len, void *cache) cache 818 drivers/crypto/marvell/hash.c memset(cache, 0, blocksize); cache 819 drivers/crypto/marvell/hash.c memcpy(cache, creq->cache, creq->cache_ptr); cache 825 drivers/crypto/marvell/hash.c u64 len, const void *cache) cache 852 drivers/crypto/marvell/hash.c memcpy(creq->cache, cache, cache_ptr); cache 1169 drivers/dma/dmaengine.c struct kmem_cache *cache; cache 1247 drivers/dma/dmaengine.c kmem_cache_destroy(p->cache); cache 1248 drivers/dma/dmaengine.c p->cache = NULL; cache 1263 drivers/dma/dmaengine.c p->cache = kmem_cache_create(p->name, size, 0, cache 1265 drivers/dma/dmaengine.c if (!p->cache) cache 1267 drivers/dma/dmaengine.c p->pool = mempool_create_slab_pool(1, p->cache); cache 278 drivers/firewire/net.c .cache = fwnet_header_cache, cache 415 drivers/gpu/drm/amd/amdgpu/kv_dpm.c u32 cache = 0; cache 422 drivers/gpu/drm/amd/amdgpu/kv_dpm.c cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); cache 438 drivers/gpu/drm/amd/amdgpu/kv_dpm.c data |= cache; cache 439 drivers/gpu/drm/amd/amdgpu/kv_dpm.c cache = 0; cache 284 drivers/gpu/drm/amd/amdkfd/kfd_crat.c static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache, cache 292 drivers/gpu/drm/amd/amdkfd/kfd_crat.c id = cache->processor_id_low; cache 318 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cache_level = cache->cache_level; cache 319 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cache_size = cache->cache_size; cache 320 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cacheline_size = cache->cache_line_size; cache 321 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cachelines_per_tag = cache->lines_per_tag; cache 322 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cache_assoc = cache->associativity; cache 323 drivers/gpu/drm/amd/amdkfd/kfd_crat.c props->cache_latency = cache->cache_latency; cache 324 drivers/gpu/drm/amd/amdkfd/kfd_crat.c memcpy(props->sibling_map, cache->sibling_map, cache 327 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE) cache 329 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE) cache 331 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE) cache 333 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE) cache 431 drivers/gpu/drm/amd/amdkfd/kfd_crat.c struct crat_subtype_cache *cache; cache 445 drivers/gpu/drm/amd/amdkfd/kfd_crat.c cache = (struct crat_subtype_cache *)sub_type_hdr; cache 446 drivers/gpu/drm/amd/amdkfd/kfd_crat.c ret = kfd_parse_subtype_cache(cache, device_list); cache 137 drivers/gpu/drm/amd/amdkfd/kfd_topology.c struct kfd_cache_properties *cache; cache 151 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache = container_of(dev->cache_props.next, cache 153 drivers/gpu/drm/amd/amdkfd/kfd_topology.c list_del(&cache->list); cache 154 drivers/gpu/drm/amd/amdkfd/kfd_topology.c kfree(cache); cache 331 drivers/gpu/drm/amd/amdkfd/kfd_topology.c struct kfd_cache_properties *cache; cache 336 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache = container_of(attr, struct kfd_cache_properties, attr); cache 338 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->processor_id_low); cache 339 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "level", cache->cache_level); cache 340 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "size", cache->cache_size); cache 341 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size); cache 343 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->cachelines_per_tag); cache 344 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc); cache 345 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency); cache 346 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_show_32bit_prop(buffer, "type", cache->cache_type); cache 349 drivers/gpu/drm/amd/amdkfd/kfd_topology.c for (j = 0; j < sizeof(cache->sibling_map[0])*8; j++) { cache 351 drivers/gpu/drm/amd/amdkfd/kfd_topology.c if (cache->sibling_map[i] & (1 << j)) cache 533 drivers/gpu/drm/amd/amdkfd/kfd_topology.c struct kfd_cache_properties *cache; cache 550 drivers/gpu/drm/amd/amdkfd/kfd_topology.c list_for_each_entry(cache, &dev->cache_props, list) cache 551 drivers/gpu/drm/amd/amdkfd/kfd_topology.c if (cache->kobj) { cache 552 drivers/gpu/drm/amd/amdkfd/kfd_topology.c kfd_remove_sysfs_file(cache->kobj, cache 553 drivers/gpu/drm/amd/amdkfd/kfd_topology.c &cache->attr); cache 554 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->kobj = NULL; cache 596 drivers/gpu/drm/amd/amdkfd/kfd_topology.c struct kfd_cache_properties *cache; cache 676 drivers/gpu/drm/amd/amdkfd/kfd_topology.c list_for_each_entry(cache, &dev->cache_props, list) { cache 677 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL); cache 678 drivers/gpu/drm/amd/amdkfd/kfd_topology.c if (!cache->kobj) cache 680 drivers/gpu/drm/amd/amdkfd/kfd_topology.c ret = kobject_init_and_add(cache->kobj, &cache_type, cache 685 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->attr.name = "properties"; cache 686 drivers/gpu/drm/amd/amdkfd/kfd_topology.c cache->attr.mode = KFD_SYSFS_FILE_MODE; cache 687 drivers/gpu/drm/amd/amdkfd/kfd_topology.c sysfs_attr_init(&cache->attr); cache 688 drivers/gpu/drm/amd/amdkfd/kfd_topology.c ret = sysfs_create_file(cache->kobj, &cache->attr); cache 899 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c uint32_t cache = 0; cache 906 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); cache 928 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c data |= cache; cache 947 drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c cache = 0; cache 76 drivers/gpu/drm/i915/display/intel_fbc.c static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache, cache 80 drivers/gpu/drm/i915/display/intel_fbc.c *width = cache->plane.src_w; cache 82 drivers/gpu/drm/i915/display/intel_fbc.c *height = cache->plane.src_h; cache 86 drivers/gpu/drm/i915/display/intel_fbc.c struct intel_fbc_state_cache *cache) cache 90 drivers/gpu/drm/i915/display/intel_fbc.c intel_fbc_get_plane_source_size(cache, NULL, &lines); cache 97 drivers/gpu/drm/i915/display/intel_fbc.c return lines * cache->fb.stride; cache 663 drivers/gpu/drm/i915/display/intel_fbc.c struct intel_fbc_state_cache *cache = &fbc->state_cache; cache 666 drivers/gpu/drm/i915/display/intel_fbc.c cache->vma = NULL; cache 667 drivers/gpu/drm/i915/display/intel_fbc.c cache->flags = 0; cache 669 drivers/gpu/drm/i915/display/intel_fbc.c cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; cache 671 drivers/gpu/drm/i915/display/intel_fbc.c cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; cache 673 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.rotation = plane_state->base.rotation; cache 679 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16; cache 680 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.src_h = drm_rect_height(&plane_state->base.src) >> 16; cache 681 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.visible = plane_state->base.visible; cache 682 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.adjusted_x = plane_state->color_plane[0].x; cache 683 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.adjusted_y = plane_state->color_plane[0].y; cache 684 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.y = plane_state->base.src.y1 >> 16; cache 686 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.pixel_blend_mode = plane_state->base.pixel_blend_mode; cache 688 drivers/gpu/drm/i915/display/intel_fbc.c if (!cache->plane.visible) cache 691 drivers/gpu/drm/i915/display/intel_fbc.c cache->fb.format = fb->format; cache 692 drivers/gpu/drm/i915/display/intel_fbc.c cache->fb.stride = fb->pitches[0]; cache 694 drivers/gpu/drm/i915/display/intel_fbc.c cache->vma = plane_state->vma; cache 695 drivers/gpu/drm/i915/display/intel_fbc.c cache->flags = plane_state->flags; cache 696 drivers/gpu/drm/i915/display/intel_fbc.c if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence)) cache 697 drivers/gpu/drm/i915/display/intel_fbc.c cache->flags &= ~PLANE_HAS_FENCE; cache 704 drivers/gpu/drm/i915/display/intel_fbc.c struct intel_fbc_state_cache *cache = &fbc->state_cache; cache 714 drivers/gpu/drm/i915/display/intel_fbc.c if (!cache->vma) { cache 719 drivers/gpu/drm/i915/display/intel_fbc.c if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) { cache 742 drivers/gpu/drm/i915/display/intel_fbc.c if (!(cache->flags & PLANE_HAS_FENCE)) { cache 747 drivers/gpu/drm/i915/display/intel_fbc.c cache->plane.rotation != DRM_MODE_ROTATE_0) { cache 752 drivers/gpu/drm/i915/display/intel_fbc.c if (!stride_is_valid(dev_priv, cache->fb.stride)) { cache 757 drivers/gpu/drm/i915/display/intel_fbc.c if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { cache 762 drivers/gpu/drm/i915/display/intel_fbc.c if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE && cache 763 drivers/gpu/drm/i915/display/intel_fbc.c cache->fb.format->has_alpha) { cache 770 drivers/gpu/drm/i915/display/intel_fbc.c cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { cache 832 drivers/gpu/drm/i915/display/intel_fbc.c struct intel_fbc_state_cache *cache = &fbc->state_cache; cache 839 drivers/gpu/drm/i915/display/intel_fbc.c params->vma = cache->vma; cache 840 drivers/gpu/drm/i915/display/intel_fbc.c params->flags = cache->flags; cache 846 drivers/gpu/drm/i915/display/intel_fbc.c params->fb.format = cache->fb.format; cache 847 drivers/gpu/drm/i915/display/intel_fbc.c params->fb.stride = cache->fb.stride; cache 849 drivers/gpu/drm/i915/display/intel_fbc.c params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache); cache 852 drivers/gpu/drm/i915/display/intel_fbc.c params->gen9_wa_cfb_stride = DIV_ROUND_UP(cache->plane.src_w, cache 563 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline int use_cpu_reloc(const struct reloc_cache *cache, cache 575 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c return (cache->has_llc || cache 897 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void reloc_cache_init(struct reloc_cache *cache, cache 900 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->page = -1; cache 901 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = 0; cache 903 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->gen = INTEL_GEN(i915); cache 904 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->has_llc = HAS_LLC(i915); cache 905 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); cache 906 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->has_fence = cache->gen < 4; cache 907 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; cache 908 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.allocated = false; cache 909 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq = NULL; cache 910 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq_size = 0; cache 925 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache) cache 928 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c container_of(cache, struct i915_execbuffer, reloc_cache)->i915; cache 932 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void reloc_gpu_flush(struct reloc_cache *cache) cache 934 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct drm_i915_gem_object *obj = cache->rq->batch->obj; cache 936 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache 937 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; cache 939 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); cache 942 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c intel_gt_chipset_flush(cache->rq->engine->gt); cache 944 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_request_add(cache->rq); cache 945 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq = NULL; cache 948 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c static void reloc_cache_reset(struct reloc_cache *cache) cache 952 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->rq) cache 953 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c reloc_gpu_flush(cache); cache 955 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (!cache->vaddr) cache 958 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = unmask_page(cache->vaddr); cache 959 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr & KMAP) { cache 960 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr & CLFLUSH_AFTER) cache 964 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm); cache 966 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_ggtt *ggtt = cache_to_ggtt(cache); cache 971 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->node.allocated) { cache 973 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.start, cache 974 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.size); cache 975 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c drm_mm_remove_node(&cache->node); cache 977 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c i915_vma_unpin((struct i915_vma *)cache->node.mm); cache 981 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = 0; cache 982 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->page = -1; cache 986 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache, cache 991 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr) { cache 992 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c kunmap_atomic(unmask_page(cache->vaddr)); cache 1004 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = flushes | KMAP; cache 1005 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.mm = (void *)obj; cache 1011 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr; cache 1012 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->page = page; cache 1018 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache, cache 1021 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct i915_ggtt *ggtt = cache_to_ggtt(cache); cache 1025 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->vaddr) { cache 1027 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr)); cache 1035 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (use_cpu_reloc(cache, obj)) cache 1049 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c memset(&cache->node, 0, sizeof(cache->node)); cache 1051 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c (&ggtt->vm.mm, &cache->node, cache 1058 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.start = vma->node.start; cache 1059 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->node.mm = (void *)vma; cache 1063 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c offset = cache->node.start; cache 1064 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->node.allocated) { cache 1074 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->page = page; cache 1075 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->vaddr = (unsigned long)vaddr; cache 1081 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache, cache 1086 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->page == page) { cache 1087 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = unmask_page(cache->vaddr); cache 1090 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if ((cache->vaddr & KMAP) == 0) cache 1091 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_iomap(obj, cache, page); cache 1093 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c vaddr = reloc_kmap(obj, cache, page); cache 1146 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache = &eb->reloc_cache; cache 1158 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->has_llc ? cache 1192 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->gen > 5 ? 0 : I915_DISPATCH_SECURE); cache 1207 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq = rq; cache 1208 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq_cmd = cmd; cache 1209 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq_size = 0; cache 1231 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c struct reloc_cache *cache = &eb->reloc_cache; cache 1234 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1)) cache 1235 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c reloc_gpu_flush(cache); cache 1237 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c if (unlikely(!cache->rq)) { cache 1252 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cmd = cache->rq_cmd + cache->rq_size; cache 1253 drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c cache->rq_size += len; cache 137 drivers/gpu/drm/i915/i915_active.c ref->cache = NULL; cache 195 drivers/gpu/drm/i915/i915_active.c node = READ_ONCE(ref->cache); cache 233 drivers/gpu/drm/i915/i915_active.c ref->cache = node; cache 253 drivers/gpu/drm/i915/i915_active.c ref->cache = NULL; cache 517 drivers/gpu/drm/i915/i915_active.c if (ref->cache && is_idle_barrier(ref->cache, idx)) { cache 518 drivers/gpu/drm/i915/i915_active.c p = &ref->cache->node; cache 578 drivers/gpu/drm/i915/i915_active.c if (p == &ref->cache->node) cache 579 drivers/gpu/drm/i915/i915_active.c ref->cache = NULL; cache 49 drivers/gpu/drm/i915/i915_active_types.h struct active_node *cache; cache 151 drivers/gpu/drm/i915/i915_scheduler.c struct sched_cache *cache) cache 166 drivers/gpu/drm/i915/i915_scheduler.c memset(cache, 0, sizeof(*cache)); cache 237 drivers/gpu/drm/i915/i915_scheduler.c struct sched_cache cache; cache 311 drivers/gpu/drm/i915/i915_scheduler.c memset(&cache, 0, sizeof(cache)); cache 316 drivers/gpu/drm/i915/i915_scheduler.c engine = sched_lock_engine(node, engine, &cache); cache 321 drivers/gpu/drm/i915/i915_scheduler.c engine = sched_lock_engine(node, engine, &cache); cache 346 drivers/gpu/drm/i915/i915_scheduler.c if (!cache.priolist) cache 347 drivers/gpu/drm/i915/i915_scheduler.c cache.priolist = cache 350 drivers/gpu/drm/i915/i915_scheduler.c list_move_tail(&node->link, cache.priolist); cache 274 drivers/gpu/drm/nouveau/nvkm/engine/fifo/chang84.c &chan->cache); cache 212 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.c nvkm_gpuobj_del(&chan->cache); cache 13 drivers/gpu/drm/nouveau/nvkm/engine/fifo/channv50.h struct nvkm_gpuobj *cache; cache 82 drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmag84.c nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); cache 83 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifog84.c nvkm_wo32(chan->ramfc, 0x88, chan->cache->addr >> 10); cache 573 drivers/gpu/drm/radeon/ci_dpm.c u32 cache = 0; cache 580 drivers/gpu/drm/radeon/ci_dpm.c cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); cache 596 drivers/gpu/drm/radeon/ci_dpm.c data |= cache; cache 609 drivers/gpu/drm/radeon/ci_dpm.c cache = 0; cache 289 drivers/gpu/drm/radeon/kv_dpm.c u32 cache = 0; cache 296 drivers/gpu/drm/radeon/kv_dpm.c cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); cache 312 drivers/gpu/drm/radeon/kv_dpm.c data |= cache; cache 313 drivers/gpu/drm/radeon/kv_dpm.c cache = 0; cache 130 drivers/hwmon/applesmc.c struct applesmc_entry *cache; /* cached key entries */ cache 330 drivers/hwmon/applesmc.c struct applesmc_entry *cache = &smcreg.cache[index]; cache 335 drivers/hwmon/applesmc.c if (cache->valid) cache 336 drivers/hwmon/applesmc.c return cache; cache 340 drivers/hwmon/applesmc.c if (cache->valid) cache 350 drivers/hwmon/applesmc.c memcpy(cache->key, key, 4); cache 351 drivers/hwmon/applesmc.c cache->len = info[0]; cache 352 drivers/hwmon/applesmc.c memcpy(cache->type, &info[1], 4); cache 353 drivers/hwmon/applesmc.c cache->flags = info[5]; cache 354 drivers/hwmon/applesmc.c cache->valid = 1; cache 360 drivers/hwmon/applesmc.c return cache; cache 540 drivers/hwmon/applesmc.c if (s->cache && s->key_count != count) { cache 543 drivers/hwmon/applesmc.c kfree(s->cache); cache 544 drivers/hwmon/applesmc.c s->cache = NULL; cache 548 drivers/hwmon/applesmc.c if (!s->cache) cache 549 drivers/hwmon/applesmc.c s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); cache 550 drivers/hwmon/applesmc.c if (!s->cache) cache 601 drivers/hwmon/applesmc.c kfree(smcreg.cache); cache 602 drivers/hwmon/applesmc.c smcreg.cache = NULL; cache 23 drivers/iio/dac/ti-dac7612.c uint16_t cache[2]; cache 40 drivers/iio/dac/ti-dac7612.c priv->cache[channel] = val; cache 76 drivers/iio/dac/ti-dac7612.c *val = priv->cache[chan->channel]; cache 101 drivers/iio/dac/ti-dac7612.c if (val == priv->cache[chan->channel]) cache 146 drivers/iio/dac/ti-dac7612.c iio_dev->num_channels = ARRAY_SIZE(priv->cache); cache 149 drivers/iio/dac/ti-dac7612.c for (i = 0; i < ARRAY_SIZE(priv->cache); i++) { cache 57 drivers/iio/multiplexer/iio-mux.c struct mux_ext_info_cache *cache; cache 59 drivers/iio/multiplexer/iio-mux.c cache = &child->ext_info_cache[i]; cache 61 drivers/iio/multiplexer/iio-mux.c if (cache->size < 0) cache 65 drivers/iio/multiplexer/iio-mux.c cache->data, cache 66 drivers/iio/multiplexer/iio-mux.c cache->size); cache 198 drivers/infiniband/core/cache.c return device->port_data[port].cache.gid; cache 892 drivers/infiniband/core/cache.c release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); cache 893 drivers/infiniband/core/cache.c ib_dev->port_data[p].cache.gid = NULL; cache 909 drivers/infiniband/core/cache.c ib_dev->port_data[rdma_port].cache.gid = table; cache 924 drivers/infiniband/core/cache.c ib_dev->port_data[p].cache.gid); cache 1012 drivers/infiniband/core/cache.c table = device->port_data[p].cache.gid; cache 1035 drivers/infiniband/core/cache.c struct ib_pkey_cache *cache; cache 1042 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1044 drivers/infiniband/core/cache.c cache = device->port_data[port_num].cache.pkey; cache 1046 drivers/infiniband/core/cache.c if (index < 0 || index >= cache->table_len) cache 1049 drivers/infiniband/core/cache.c *pkey = cache->table[index]; cache 1051 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1066 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1067 drivers/infiniband/core/cache.c *sn_pfx = device->port_data[port_num].cache.subnet_prefix; cache 1068 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1079 drivers/infiniband/core/cache.c struct ib_pkey_cache *cache; cache 1088 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1090 drivers/infiniband/core/cache.c cache = device->port_data[port_num].cache.pkey; cache 1094 drivers/infiniband/core/cache.c for (i = 0; i < cache->table_len; ++i) cache 1095 drivers/infiniband/core/cache.c if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { cache 1096 drivers/infiniband/core/cache.c if (cache->table[i] & 0x8000) { cache 1109 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1120 drivers/infiniband/core/cache.c struct ib_pkey_cache *cache; cache 1128 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1130 drivers/infiniband/core/cache.c cache = device->port_data[port_num].cache.pkey; cache 1134 drivers/infiniband/core/cache.c for (i = 0; i < cache->table_len; ++i) cache 1135 drivers/infiniband/core/cache.c if (cache->table[i] == pkey) { cache 1141 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1157 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1158 drivers/infiniband/core/cache.c *lmc = device->port_data[port_num].cache.lmc; cache 1159 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1175 drivers/infiniband/core/cache.c read_lock_irqsave(&device->cache.lock, flags); cache 1176 drivers/infiniband/core/cache.c *port_state = device->port_data[port_num].cache.port_state; cache 1177 drivers/infiniband/core/cache.c read_unlock_irqrestore(&device->cache.lock, flags); cache 1437 drivers/infiniband/core/cache.c write_lock_irq(&device->cache.lock); cache 1439 drivers/infiniband/core/cache.c old_pkey_cache = device->port_data[port].cache.pkey; cache 1441 drivers/infiniband/core/cache.c device->port_data[port].cache.pkey = pkey_cache; cache 1442 drivers/infiniband/core/cache.c device->port_data[port].cache.lmc = tprops->lmc; cache 1443 drivers/infiniband/core/cache.c device->port_data[port].cache.port_state = tprops->state; cache 1445 drivers/infiniband/core/cache.c device->port_data[port].cache.subnet_prefix = tprops->subnet_prefix; cache 1446 drivers/infiniband/core/cache.c write_unlock_irq(&device->cache.lock); cache 1539 drivers/infiniband/core/cache.c rwlock_init(&device->cache.lock); cache 1565 drivers/infiniband/core/cache.c kfree(device->port_data[p].cache.pkey); cache 229 drivers/infiniband/core/fmr_pool.c if (params->cache) { cache 11056 drivers/infiniband/hw/hfi1/chip.c static void vl_arb_get_cache(struct vl_arb_cache *cache, cache 11059 drivers/infiniband/hw/hfi1/chip.c memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl)); cache 11062 drivers/infiniband/hw/hfi1/chip.c static void vl_arb_set_cache(struct vl_arb_cache *cache, cache 11065 drivers/infiniband/hw/hfi1/chip.c memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); cache 11068 drivers/infiniband/hw/hfi1/chip.c static int vl_arb_match_cache(struct vl_arb_cache *cache, cache 11071 drivers/infiniband/hw/hfi1/chip.c return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl)); cache 1946 drivers/infiniband/hw/hfi1/firmware.c u8 *cache = ppd->qsfp_info.cache; cache 1985 drivers/infiniband/hw/hfi1/firmware.c if (cache[QSFP_EQ_INFO_OFFS] & 0x4) cache 238 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 247 drivers/infiniband/hw/hfi1/platform.c cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); cache 267 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 270 drivers/infiniband/hw/hfi1/platform.c cache[QSFP_NOM_BIT_RATE_250_OFFS] < 0x64) cache 275 drivers/infiniband/hw/hfi1/platform.c cache[QSFP_NOM_BIT_RATE_100_OFFS] < 0x7D) cache 293 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 296 drivers/infiniband/hw/hfi1/platform.c cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); cache 299 drivers/infiniband/hw/hfi1/platform.c power_ctrl_byte = cache[QSFP_PWR_CTRL_BYTE_OFFS]; cache 330 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 333 drivers/infiniband/hw/hfi1/platform.c if (!((cache[QSFP_MOD_PWR_OFFS] & 0x4) && cache 334 drivers/infiniband/hw/hfi1/platform.c (cache[QSFP_CDR_INFO_OFFS] & 0x40))) cache 338 drivers/infiniband/hw/hfi1/platform.c cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); cache 372 drivers/infiniband/hw/hfi1/platform.c *cdr_ctrl_byte |= (cache[QSFP_CDR_CTRL_BYTE_OFFS] & 0xF0); cache 381 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 384 drivers/infiniband/hw/hfi1/platform.c if (!((cache[QSFP_MOD_PWR_OFFS] & 0x8) && cache 385 drivers/infiniband/hw/hfi1/platform.c (cache[QSFP_CDR_INFO_OFFS] & 0x80))) cache 389 drivers/infiniband/hw/hfi1/platform.c cable_power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); cache 430 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 431 drivers/infiniband/hw/hfi1/platform.c u8 cdr_ctrl_byte = cache[QSFP_CDR_CTRL_BYTE_OFFS]; cache 443 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 446 drivers/infiniband/hw/hfi1/platform.c if (!(cache[QSFP_EQ_INFO_OFFS] & 0x8)) cache 449 drivers/infiniband/hw/hfi1/platform.c tx_eq = cache[(128 * 3) + 241]; cache 456 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 460 drivers/infiniband/hw/hfi1/platform.c if (!(cache[QSFP_EQ_INFO_OFFS] & 0x4)) cache 479 drivers/infiniband/hw/hfi1/platform.c if (((cache[(128 * 3) + 224] & 0xF0) >> 4) < tx_preset) { cache 488 drivers/infiniband/hw/hfi1/platform.c __func__, cache[608] & 0xF0); cache 490 drivers/infiniband/hw/hfi1/platform.c tx_preset = (cache[608] & 0xF0) >> 4; cache 501 drivers/infiniband/hw/hfi1/platform.c u8 rx_eq, *cache = ppd->qsfp_info.cache; cache 503 drivers/infiniband/hw/hfi1/platform.c if (!(cache[QSFP_EQ_INFO_OFFS] & 0x2)) cache 522 drivers/infiniband/hw/hfi1/platform.c if ((cache[(128 * 3) + 224] & 0xF) < rx_preset) { cache 531 drivers/infiniband/hw/hfi1/platform.c __func__, cache[608] & 0xF); cache 533 drivers/infiniband/hw/hfi1/platform.c rx_preset = cache[608] & 0xF; cache 545 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 548 drivers/infiniband/hw/hfi1/platform.c if (cache[2] & 4) { cache 567 drivers/infiniband/hw/hfi1/platform.c u8 rx_amp = 0, i = 0, preferred = 0, *cache = ppd->qsfp_info.cache; cache 570 drivers/infiniband/hw/hfi1/platform.c if (cache[2] & 4) { cache 576 drivers/infiniband/hw/hfi1/platform.c if (!(cache[QSFP_EQ_INFO_OFFS] & 0x1)) { cache 607 drivers/infiniband/hw/hfi1/platform.c if (cache[(128 * 3) + 225] & (1 << i)) { cache 618 drivers/infiniband/hw/hfi1/platform.c if (!preferred && !(cache[(128 * 3) + 225] & 0x1)) { cache 662 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 670 drivers/infiniband/hw/hfi1/platform.c switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) { cache 675 drivers/infiniband/hw/hfi1/platform.c power_class = get_qsfp_power_class(cache[QSFP_MOD_PWR_OFFS]); cache 689 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 710 drivers/infiniband/hw/hfi1/platform.c ((cache[QSFP_MOD_PWR_OFFS] & 0x4) << 3) | cache 711 drivers/infiniband/hw/hfi1/platform.c ((cache[QSFP_MOD_PWR_OFFS] & 0x8) << 2) | cache 712 drivers/infiniband/hw/hfi1/platform.c ((cache[QSFP_EQ_INFO_OFFS] & 0x2) << 1) | cache 713 drivers/infiniband/hw/hfi1/platform.c (cache[QSFP_EQ_INFO_OFFS] & 0x4); cache 772 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 806 drivers/infiniband/hw/hfi1/platform.c if (cache[QSFP_EQ_INFO_OFFS] & 0x4) { cache 863 drivers/infiniband/hw/hfi1/platform.c u8 *cache = ppd->qsfp_info.cache; cache 865 drivers/infiniband/hw/hfi1/platform.c switch ((cache[QSFP_MOD_TECH_OFFS] & 0xF0) >> 4) { cache 876 drivers/infiniband/hw/hfi1/platform.c cable_atten = cache[QSFP_CU_ATTEN_12G_OFFS]; cache 879 drivers/infiniband/hw/hfi1/platform.c cable_atten = cache[QSFP_CU_ATTEN_7G_OFFS]; cache 563 drivers/infiniband/hw/hfi1/qsfp.c u8 *cache = &cp->cache[0]; cache 566 drivers/infiniband/hw/hfi1/qsfp.c memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); cache 576 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 0, cache, QSFP_PAGESIZE); cache 585 drivers/infiniband/hw/hfi1/qsfp.c if (!(cache[2] & 4)) { cache 587 drivers/infiniband/hw/hfi1/qsfp.c if ((cache[195] & 0xC0) == 0xC0) { cache 589 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 384, cache + 256, 128); cache 594 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 640, cache + 384, 128); cache 599 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 896, cache + 512, 128); cache 604 drivers/infiniband/hw/hfi1/qsfp.c } else if ((cache[195] & 0x80) == 0x80) { cache 606 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 640, cache + 384, 128); cache 611 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 896, cache + 512, 128); cache 616 drivers/infiniband/hw/hfi1/qsfp.c } else if ((cache[195] & 0x40) == 0x40) { cache 618 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 384, cache + 256, 128); cache 623 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 896, cache + 512, 128); cache 630 drivers/infiniband/hw/hfi1/qsfp.c ret = qsfp_read(ppd, target, 896, cache + 512, 128); cache 646 drivers/infiniband/hw/hfi1/qsfp.c memset(cache, 0, (QSFP_MAX_NUM_PAGES * 128)); cache 739 drivers/infiniband/hw/hfi1/qsfp.c memcpy(data, &ppd->qsfp_info.cache[addr], (len - excess_len)); cache 744 drivers/infiniband/hw/hfi1/qsfp.c memcpy(data, &ppd->qsfp_info.cache[addr], len); cache 792 drivers/infiniband/hw/hfi1/qsfp.c u8 *cache = &ppd->qsfp_info.cache[0]; cache 797 drivers/infiniband/hw/hfi1/qsfp.c u8 *atten = &cache[QSFP_ATTEN_OFFS]; cache 798 drivers/infiniband/hw/hfi1/qsfp.c u8 *vendor_oui = &cache[QSFP_VOUI_OFFS]; cache 806 drivers/infiniband/hw/hfi1/qsfp.c if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) cache 808 drivers/infiniband/hw/hfi1/qsfp.c cache[QSFP_MOD_LEN_OFFS]); cache 810 drivers/infiniband/hw/hfi1/qsfp.c power_byte = cache[QSFP_MOD_PWR_OFFS]; cache 816 drivers/infiniband/hw/hfi1/qsfp.c hfi1_qsfp_devtech[(cache[QSFP_MOD_TECH_OFFS]) >> 4]); cache 819 drivers/infiniband/hw/hfi1/qsfp.c QSFP_VEND_LEN, &cache[QSFP_VEND_OFFS]); cache 825 drivers/infiniband/hw/hfi1/qsfp.c QSFP_PN_LEN, &cache[QSFP_PN_OFFS]); cache 828 drivers/infiniband/hw/hfi1/qsfp.c QSFP_REV_LEN, &cache[QSFP_REV_OFFS]); cache 830 drivers/infiniband/hw/hfi1/qsfp.c if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS])) cache 837 drivers/infiniband/hw/hfi1/qsfp.c QSFP_SN_LEN, &cache[QSFP_SN_OFFS]); cache 840 drivers/infiniband/hw/hfi1/qsfp.c QSFP_DATE_LEN, &cache[QSFP_DATE_OFFS]); cache 843 drivers/infiniband/hw/hfi1/qsfp.c QSFP_LOT_LEN, &cache[QSFP_LOT_OFFS]); cache 848 drivers/infiniband/hw/hfi1/qsfp.c memcpy(bin_buff, &cache[bidx], QSFP_DUMP_CHUNK); cache 215 drivers/infiniband/hw/hfi1/qsfp.h u8 cache[QSFP_MAX_NUM_PAGES * 128]; cache 966 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_mr_cache cache; cache 73 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 75 drivers/infiniband/hw/mlx5/mr.c if (order < cache->ent[0].order) cache 78 drivers/infiniband/hw/mlx5/mr.c return order - cache->ent[0].order; cache 92 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 94 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; cache 117 drivers/infiniband/hw/mlx5/mr.c cache->last_add = jiffies; cache 138 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 139 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; cache 200 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 201 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; cache 342 drivers/infiniband/hw/mlx5/mr.c static int someone_adding(struct mlx5_mr_cache *cache) cache 347 drivers/infiniband/hw/mlx5/mr.c if (cache->ent[i].cur < cache->ent[i].limit) cache 357 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 361 drivers/infiniband/hw/mlx5/mr.c if (cache->stopped) cache 364 drivers/infiniband/hw/mlx5/mr.c ent = &dev->cache.ent[i]; cache 371 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, cache 376 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, cache 379 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 395 drivers/infiniband/hw/mlx5/mr.c if (!need_resched() && !someone_adding(cache) && cache 396 drivers/infiniband/hw/mlx5/mr.c time_after(jiffies, cache->last_add + 300 * HZ)) { cache 399 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 401 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); cache 424 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 434 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[entry]; cache 452 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 460 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 475 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; cache 487 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 492 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 496 drivers/infiniband/hw/mlx5/mr.c cache->ent[c].miss++; cache 503 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 517 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; cache 519 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 523 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[c]; cache 532 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 537 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 538 drivers/infiniband/hw/mlx5/mr.c struct mlx5_cache_ent *ent = &cache->ent[c]; cache 573 drivers/infiniband/hw/mlx5/mr.c debugfs_remove_recursive(dev->cache.root); cache 574 drivers/infiniband/hw/mlx5/mr.c dev->cache.root = NULL; cache 579 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 587 drivers/infiniband/hw/mlx5/mr.c cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root); cache 590 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; cache 592 drivers/infiniband/hw/mlx5/mr.c dir = debugfs_create_dir(ent->name, cache->root); cache 609 drivers/infiniband/hw/mlx5/mr.c struct mlx5_mr_cache *cache = &dev->cache; cache 614 drivers/infiniband/hw/mlx5/mr.c cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); cache 615 drivers/infiniband/hw/mlx5/mr.c if (!cache->wq) { cache 623 drivers/infiniband/hw/mlx5/mr.c ent = &cache->ent[i]; cache 652 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); cache 664 drivers/infiniband/hw/mlx5/mr.c if (!dev->cache.wq) cache 667 drivers/infiniband/hw/mlx5/mr.c dev->cache.stopped = 1; cache 668 drivers/infiniband/hw/mlx5/mr.c flush_workqueue(dev->cache.wq); cache 676 drivers/infiniband/hw/mlx5/mr.c destroy_workqueue(dev->cache.wq); cache 5972 drivers/infiniband/hw/qib/qib_iba7322.c ret = qib_refresh_qsfp_cache(ppd, &qd->cache); cache 5981 drivers/infiniband/hw/qib/qib_iba7322.c if (QSFP_IS_ACTIVE_FAR(qd->cache.tech)) cache 5983 drivers/infiniband/hw/qib/qib_iba7322.c else if (qd->cache.atten[1] >= qib_long_atten && cache 5984 drivers/infiniband/hw/qib/qib_iba7322.c QSFP_IS_CU(qd->cache.tech)) cache 7621 drivers/infiniband/hw/qib/qib_iba7322.c struct qib_qsfp_cache *qd = &ppd->cpspec->qsfp_data.cache; cache 179 drivers/infiniband/hw/qib/qib_qsfp.h struct qib_qsfp_cache cache; cache 115 drivers/infiniband/sw/rxe/rxe_pool.c return rxe_type_info[pool->type].cache; cache 126 drivers/infiniband/sw/rxe/rxe_pool.c kmem_cache_destroy(type->cache); cache 127 drivers/infiniband/sw/rxe/rxe_pool.c type->cache = NULL; cache 143 drivers/infiniband/sw/rxe/rxe_pool.c type->cache = cache 147 drivers/infiniband/sw/rxe/rxe_pool.c if (!type->cache) { cache 72 drivers/infiniband/sw/rxe/rxe_pool.h struct kmem_cache *cache; cache 189 drivers/infiniband/ulp/iser/iser_verbs.c params.cache = 0; cache 399 drivers/infiniband/ulp/srp/ib_srp.c fmr_param.cache = 1; cache 762 drivers/iommu/fsl_pamu_domain.c dma_domain->stash_id = get_stash_id(stash_attr->cache, cache 1755 drivers/irqchip/irq-gic-v3-its.c u64 cache, u64 shr, u32 psz, u32 order, cache 1802 drivers/irqchip/irq-gic-v3-its.c cache | cache 1833 drivers/irqchip/irq-gic-v3-its.c cache = GITS_BASER_nC; cache 1951 drivers/irqchip/irq-gic-v3-its.c u64 cache = GITS_BASER_RaWaWb; cache 1957 drivers/irqchip/irq-gic-v3-its.c cache = GITS_BASER_nCnB; cache 1983 drivers/irqchip/irq-gic-v3-its.c err = its_setup_baser(its, baser, cache, shr, psz, order, indirect); cache 1991 drivers/irqchip/irq-gic-v3-its.c cache = baser->val & GITS_BASER_CACHEABILITY_MASK; cache 32 drivers/macintosh/windfarm_smu_sat.c u8 cache[16]; cache 119 drivers/macintosh/windfarm_smu_sat.c err = i2c_smbus_read_i2c_block_data(sat->i2c, 0x3f, 16, sat->cache); cache 129 drivers/macintosh/windfarm_smu_sat.c 16, 1, sat->cache, 16, false); cache 153 drivers/macintosh/windfarm_smu_sat.c val = ((sat->cache[i] << 8) + sat->cache[i+1]) << sens->shift; cache 157 drivers/macintosh/windfarm_smu_sat.c val = (val * ((sat->cache[i] << 8) + sat->cache[i+1])) >> 4; cache 76 drivers/md/bcache/alloc.c uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) cache 88 drivers/md/bcache/alloc.c struct cache *ca; cache 131 drivers/md/bcache/alloc.c bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) cache 141 drivers/md/bcache/alloc.c void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) cache 154 drivers/md/bcache/alloc.c static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) cache 180 drivers/md/bcache/alloc.c static void invalidate_buckets_lru(struct cache *ca) cache 217 drivers/md/bcache/alloc.c static void invalidate_buckets_fifo(struct cache *ca) cache 240 drivers/md/bcache/alloc.c static void invalidate_buckets_random(struct cache *ca) cache 266 drivers/md/bcache/alloc.c static void invalidate_buckets(struct cache *ca) cache 303 drivers/md/bcache/alloc.c static int bch_allocator_push(struct cache *ca, long bucket) cache 320 drivers/md/bcache/alloc.c struct cache *ca = arg; cache 393 drivers/md/bcache/alloc.c long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) cache 470 drivers/md/bcache/alloc.c void __bch_bucket_free(struct cache *ca, struct bucket *b) cache 507 drivers/md/bcache/alloc.c struct cache *ca = c->cache_by_alloc[i]; cache 734 drivers/md/bcache/alloc.c int bch_cache_allocator_start(struct cache *ca) cache 520 drivers/md/bcache/bcache.h struct cache *cache[MAX_CACHES_PER_SET]; cache 521 drivers/md/bcache/bcache.h struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; cache 782 drivers/md/bcache/bcache.h static inline struct cache *PTR_CACHE(struct cache_set *c, cache 786 drivers/md/bcache/bcache.h return c->cache[PTR_DEV(k, ptr)]; cache 868 drivers/md/bcache/bcache.h for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) cache 911 drivers/md/bcache/bcache.h struct cache *ca; cache 948 drivers/md/bcache/bcache.h void bch_count_io_errors(struct cache *ca, blk_status_t error, cache 961 drivers/md/bcache/bcache.h uint8_t bch_inc_gen(struct cache *ca, struct bucket *b); cache 964 drivers/md/bcache/bcache.h bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b); cache 965 drivers/md/bcache/bcache.h void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b); cache 967 drivers/md/bcache/bcache.h void __bch_bucket_free(struct cache *ca, struct bucket *b); cache 970 drivers/md/bcache/bcache.h long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait); cache 983 drivers/md/bcache/bcache.h int bch_prio_write(struct cache *ca, bool wait); cache 1023 drivers/md/bcache/bcache.h int bch_cache_allocator_start(struct cache *ca); cache 1215 drivers/md/bcache/btree.c struct cache *ca; cache 1737 drivers/md/bcache/btree.c struct cache *ca; cache 1764 drivers/md/bcache/btree.c struct cache *ca; cache 1874 drivers/md/bcache/btree.c struct cache *ca; cache 1964 drivers/md/bcache/btree.c struct cache *ca; cache 53 drivers/md/bcache/extents.c struct cache *ca = PTR_CACHE(c, k, i); cache 74 drivers/md/bcache/extents.c struct cache *ca = PTR_CACHE(c, k, i); cache 81 drivers/md/bcache/io.c void bch_count_io_errors(struct cache *ca, cache 140 drivers/md/bcache/io.c struct cache *ca = PTR_CACHE(c, &b->key, 0); cache 35 drivers/md/bcache/journal.c static int journal_read_bucket(struct cache *ca, struct list_head *list, cache 182 drivers/md/bcache/journal.c struct cache *ca; cache 348 drivers/md/bcache/journal.c struct cache *ca; cache 582 drivers/md/bcache/journal.c struct cache *ca = container_of(ja, struct cache, journal); cache 598 drivers/md/bcache/journal.c static void do_journal_discard(struct cache *ca) cache 642 drivers/md/bcache/journal.c struct cache *ca; cache 759 drivers/md/bcache/journal.c struct cache *ca; cache 190 drivers/md/bcache/movinggc.c static unsigned int bucket_heap_top(struct cache *ca) cache 199 drivers/md/bcache/movinggc.c struct cache *ca; cache 1264 drivers/md/bcache/request.c struct cache *ca; cache 1374 drivers/md/bcache/request.c struct cache *ca; cache 273 drivers/md/bcache/super.c struct cache *ca = bio->bi_private; cache 291 drivers/md/bcache/super.c struct cache *ca; cache 425 drivers/md/bcache/super.c struct cache *ca; cache 504 drivers/md/bcache/super.c struct cache *ca = bio->bi_private; cache 511 drivers/md/bcache/super.c static void prio_io(struct cache *ca, uint64_t bucket, int op, cache 532 drivers/md/bcache/super.c int bch_prio_write(struct cache *ca, bool wait) cache 613 drivers/md/bcache/super.c static void prio_read(struct cache *ca, uint64_t bucket) cache 698 drivers/md/bcache/super.c struct cache *ca; cache 712 drivers/md/bcache/super.c struct cache *ca; cache 1573 drivers/md/bcache/super.c struct cache *ca; cache 1586 drivers/md/bcache/super.c c->cache[ca->sb.nr_this_dev] = NULL; cache 1614 drivers/md/bcache/super.c struct cache *ca; cache 1850 drivers/md/bcache/super.c struct cache *ca; cache 2043 drivers/md/bcache/super.c static bool can_attach_cache(struct cache *ca, struct cache_set *c) cache 2050 drivers/md/bcache/super.c static const char *register_cache_set(struct cache *ca) cache 2058 drivers/md/bcache/super.c if (c->cache[ca->sb.nr_this_dev]) cache 2101 drivers/md/bcache/super.c ca->set->cache[ca->sb.nr_this_dev] = ca; cache 2121 drivers/md/bcache/super.c struct cache *ca = container_of(kobj, struct cache, kobj); cache 2125 drivers/md/bcache/super.c BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca); cache 2126 drivers/md/bcache/super.c ca->set->cache[ca->sb.nr_this_dev] = NULL; cache 2149 drivers/md/bcache/super.c static int cache_alloc(struct cache *ca) cache 2263 drivers/md/bcache/super.c struct block_device *bdev, struct cache *ca) cache 2357 drivers/md/bcache/super.c struct cache *ca; cache 2449 drivers/md/bcache/super.c struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL); cache 995 drivers/md/bcache/sysfs.c struct cache *ca = container_of(kobj, struct cache, kobj); cache 1098 drivers/md/bcache/sysfs.c struct cache *ca = container_of(kobj, struct cache, kobj); cache 499 drivers/md/dm-cache-target.c struct cache *cache; cache 511 drivers/md/dm-cache-target.c static bool writethrough_mode(struct cache *cache) cache 513 drivers/md/dm-cache-target.c return cache->features.io_mode == CM_IO_WRITETHROUGH; cache 516 drivers/md/dm-cache-target.c static bool writeback_mode(struct cache *cache) cache 518 drivers/md/dm-cache-target.c return cache->features.io_mode == CM_IO_WRITEBACK; cache 521 drivers/md/dm-cache-target.c static inline bool passthrough_mode(struct cache *cache) cache 523 drivers/md/dm-cache-target.c return unlikely(cache->features.io_mode == CM_IO_PASSTHROUGH); cache 528 drivers/md/dm-cache-target.c static void wake_deferred_bio_worker(struct cache *cache) cache 530 drivers/md/dm-cache-target.c queue_work(cache->wq, &cache->deferred_bio_worker); cache 533 drivers/md/dm-cache-target.c static void wake_migration_worker(struct cache *cache) cache 535 drivers/md/dm-cache-target.c if (passthrough_mode(cache)) cache 538 drivers/md/dm-cache-target.c queue_work(cache->wq, &cache->migration_worker); cache 543 drivers/md/dm-cache-target.c static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache) cache 545 drivers/md/dm-cache-target.c return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO); cache 548 drivers/md/dm-cache-target.c static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell) cache 550 drivers/md/dm-cache-target.c dm_bio_prison_free_cell_v2(cache->prison, cell); cache 553 drivers/md/dm-cache-target.c static struct dm_cache_migration *alloc_migration(struct cache *cache) cache 557 drivers/md/dm-cache-target.c mg = mempool_alloc(&cache->migration_pool, GFP_NOIO); cache 561 drivers/md/dm-cache-target.c mg->cache = cache; cache 562 drivers/md/dm-cache-target.c atomic_inc(&cache->nr_allocated_migrations); cache 569 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 571 drivers/md/dm-cache-target.c if (atomic_dec_and_test(&cache->nr_allocated_migrations)) cache 572 drivers/md/dm-cache-target.c wake_up(&cache->migration_wait); cache 574 drivers/md/dm-cache-target.c mempool_free(mg, &cache->migration_pool); cache 631 drivers/md/dm-cache-target.c static void defer_bio(struct cache *cache, struct bio *bio) cache 635 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 636 drivers/md/dm-cache-target.c bio_list_add(&cache->deferred_bios, bio); cache 637 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 639 drivers/md/dm-cache-target.c wake_deferred_bio_worker(cache); cache 642 drivers/md/dm-cache-target.c static void defer_bios(struct cache *cache, struct bio_list *bios) cache 646 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 647 drivers/md/dm-cache-target.c bio_list_merge(&cache->deferred_bios, bios); cache 649 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 651 drivers/md/dm-cache-target.c wake_deferred_bio_worker(cache); cache 656 drivers/md/dm-cache-target.c static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bio *bio) cache 664 drivers/md/dm-cache-target.c cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */ cache 667 drivers/md/dm-cache-target.c r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell); cache 672 drivers/md/dm-cache-target.c free_prison_cell(cache, cell_prealloc); cache 677 drivers/md/dm-cache-target.c free_prison_cell(cache, cell_prealloc); cache 687 drivers/md/dm-cache-target.c static bool is_dirty(struct cache *cache, dm_cblock_t b) cache 689 drivers/md/dm-cache-target.c return test_bit(from_cblock(b), cache->dirty_bitset); cache 692 drivers/md/dm-cache-target.c static void set_dirty(struct cache *cache, dm_cblock_t cblock) cache 694 drivers/md/dm-cache-target.c if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) { cache 695 drivers/md/dm-cache-target.c atomic_inc(&cache->nr_dirty); cache 696 drivers/md/dm-cache-target.c policy_set_dirty(cache->policy, cblock); cache 704 drivers/md/dm-cache-target.c static void force_set_dirty(struct cache *cache, dm_cblock_t cblock) cache 706 drivers/md/dm-cache-target.c if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) cache 707 drivers/md/dm-cache-target.c atomic_inc(&cache->nr_dirty); cache 708 drivers/md/dm-cache-target.c policy_set_dirty(cache->policy, cblock); cache 711 drivers/md/dm-cache-target.c static void force_clear_dirty(struct cache *cache, dm_cblock_t cblock) cache 713 drivers/md/dm-cache-target.c if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) { cache 714 drivers/md/dm-cache-target.c if (atomic_dec_return(&cache->nr_dirty) == 0) cache 715 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); cache 718 drivers/md/dm-cache-target.c policy_clear_dirty(cache->policy, cblock); cache 723 drivers/md/dm-cache-target.c static bool block_size_is_power_of_two(struct cache *cache) cache 725 drivers/md/dm-cache-target.c return cache->sectors_per_block_shift >= 0; cache 739 drivers/md/dm-cache-target.c static dm_block_t oblocks_per_dblock(struct cache *cache) cache 741 drivers/md/dm-cache-target.c dm_block_t oblocks = cache->discard_block_size; cache 743 drivers/md/dm-cache-target.c if (block_size_is_power_of_two(cache)) cache 744 drivers/md/dm-cache-target.c oblocks >>= cache->sectors_per_block_shift; cache 746 drivers/md/dm-cache-target.c oblocks = block_div(oblocks, cache->sectors_per_block); cache 751 drivers/md/dm-cache-target.c static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock) cache 754 drivers/md/dm-cache-target.c oblocks_per_dblock(cache))); cache 757 drivers/md/dm-cache-target.c static void set_discard(struct cache *cache, dm_dblock_t b) cache 761 drivers/md/dm-cache-target.c BUG_ON(from_dblock(b) >= from_dblock(cache->discard_nr_blocks)); cache 762 drivers/md/dm-cache-target.c atomic_inc(&cache->stats.discard_count); cache 764 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 765 drivers/md/dm-cache-target.c set_bit(from_dblock(b), cache->discard_bitset); cache 766 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 769 drivers/md/dm-cache-target.c static void clear_discard(struct cache *cache, dm_dblock_t b) cache 773 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 774 drivers/md/dm-cache-target.c clear_bit(from_dblock(b), cache->discard_bitset); cache 775 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 778 drivers/md/dm-cache-target.c static bool is_discarded(struct cache *cache, dm_dblock_t b) cache 783 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 784 drivers/md/dm-cache-target.c r = test_bit(from_dblock(b), cache->discard_bitset); cache 785 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 790 drivers/md/dm-cache-target.c static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b) cache 795 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 796 drivers/md/dm-cache-target.c r = test_bit(from_dblock(oblock_to_dblock(cache, b)), cache 797 drivers/md/dm-cache-target.c cache->discard_bitset); cache 798 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 806 drivers/md/dm-cache-target.c static void remap_to_origin(struct cache *cache, struct bio *bio) cache 808 drivers/md/dm-cache-target.c bio_set_dev(bio, cache->origin_dev->bdev); cache 811 drivers/md/dm-cache-target.c static void remap_to_cache(struct cache *cache, struct bio *bio, cache 817 drivers/md/dm-cache-target.c bio_set_dev(bio, cache->cache_dev->bdev); cache 818 drivers/md/dm-cache-target.c if (!block_size_is_power_of_two(cache)) cache 820 drivers/md/dm-cache-target.c (block * cache->sectors_per_block) + cache 821 drivers/md/dm-cache-target.c sector_div(bi_sector, cache->sectors_per_block); cache 824 drivers/md/dm-cache-target.c (block << cache->sectors_per_block_shift) | cache 825 drivers/md/dm-cache-target.c (bi_sector & (cache->sectors_per_block - 1)); cache 828 drivers/md/dm-cache-target.c static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) cache 833 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 834 drivers/md/dm-cache-target.c if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) && cache 838 drivers/md/dm-cache-target.c cache->need_tick_bio = false; cache 840 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 843 drivers/md/dm-cache-target.c static void __remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, cache 847 drivers/md/dm-cache-target.c check_if_tick_bio_needed(cache, bio); cache 848 drivers/md/dm-cache-target.c remap_to_origin(cache, bio); cache 850 drivers/md/dm-cache-target.c clear_discard(cache, oblock_to_dblock(cache, oblock)); cache 853 drivers/md/dm-cache-target.c static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio, cache 857 drivers/md/dm-cache-target.c __remap_to_origin_clear_discard(cache, bio, oblock, true); cache 860 drivers/md/dm-cache-target.c static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, cache 863 drivers/md/dm-cache-target.c check_if_tick_bio_needed(cache, bio); cache 864 drivers/md/dm-cache-target.c remap_to_cache(cache, bio, cblock); cache 866 drivers/md/dm-cache-target.c set_dirty(cache, cblock); cache 867 drivers/md/dm-cache-target.c clear_discard(cache, oblock_to_dblock(cache, oblock)); cache 871 drivers/md/dm-cache-target.c static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) cache 875 drivers/md/dm-cache-target.c if (!block_size_is_power_of_two(cache)) cache 876 drivers/md/dm-cache-target.c (void) sector_div(block_nr, cache->sectors_per_block); cache 878 drivers/md/dm-cache-target.c block_nr >>= cache->sectors_per_block_shift; cache 883 drivers/md/dm-cache-target.c static bool accountable_bio(struct cache *cache, struct bio *bio) cache 888 drivers/md/dm-cache-target.c static void accounted_begin(struct cache *cache, struct bio *bio) cache 892 drivers/md/dm-cache-target.c if (accountable_bio(cache, bio)) { cache 895 drivers/md/dm-cache-target.c iot_io_begin(&cache->tracker, pb->len); cache 899 drivers/md/dm-cache-target.c static void accounted_complete(struct cache *cache, struct bio *bio) cache 903 drivers/md/dm-cache-target.c iot_io_end(&cache->tracker, pb->len); cache 906 drivers/md/dm-cache-target.c static void accounted_request(struct cache *cache, struct bio *bio) cache 908 drivers/md/dm-cache-target.c accounted_begin(cache, bio); cache 914 drivers/md/dm-cache-target.c struct cache *cache = context; cache 915 drivers/md/dm-cache-target.c accounted_request(cache, bio); cache 922 drivers/md/dm-cache-target.c static void remap_to_origin_and_cache(struct cache *cache, struct bio *bio, cache 925 drivers/md/dm-cache-target.c struct bio *origin_bio = bio_clone_fast(bio, GFP_NOIO, &cache->bs); cache 934 drivers/md/dm-cache-target.c __remap_to_origin_clear_discard(cache, origin_bio, oblock, false); cache 937 drivers/md/dm-cache-target.c remap_to_cache(cache, bio, cblock); cache 943 drivers/md/dm-cache-target.c static enum cache_metadata_mode get_cache_mode(struct cache *cache) cache 945 drivers/md/dm-cache-target.c return cache->features.mode; cache 948 drivers/md/dm-cache-target.c static const char *cache_device_name(struct cache *cache) cache 950 drivers/md/dm-cache-target.c return dm_device_name(dm_table_get_md(cache->ti->table)); cache 953 drivers/md/dm-cache-target.c static void notify_mode_switch(struct cache *cache, enum cache_metadata_mode mode) cache 961 drivers/md/dm-cache-target.c dm_table_event(cache->ti->table); cache 963 drivers/md/dm-cache-target.c cache_device_name(cache), descs[(int)mode]); cache 966 drivers/md/dm-cache-target.c static void set_cache_mode(struct cache *cache, enum cache_metadata_mode new_mode) cache 969 drivers/md/dm-cache-target.c enum cache_metadata_mode old_mode = get_cache_mode(cache); cache 971 drivers/md/dm-cache-target.c if (dm_cache_metadata_needs_check(cache->cmd, &needs_check)) { cache 973 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 979 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 993 drivers/md/dm-cache-target.c dm_cache_metadata_set_read_only(cache->cmd); cache 997 drivers/md/dm-cache-target.c dm_cache_metadata_set_read_write(cache->cmd); cache 1001 drivers/md/dm-cache-target.c cache->features.mode = new_mode; cache 1004 drivers/md/dm-cache-target.c notify_mode_switch(cache, new_mode); cache 1007 drivers/md/dm-cache-target.c static void abort_transaction(struct cache *cache) cache 1009 drivers/md/dm-cache-target.c const char *dev_name = cache_device_name(cache); cache 1011 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 1014 drivers/md/dm-cache-target.c if (dm_cache_metadata_set_needs_check(cache->cmd)) { cache 1016 drivers/md/dm-cache-target.c set_cache_mode(cache, CM_FAIL); cache 1020 drivers/md/dm-cache-target.c if (dm_cache_metadata_abort(cache->cmd)) { cache 1022 drivers/md/dm-cache-target.c set_cache_mode(cache, CM_FAIL); cache 1026 drivers/md/dm-cache-target.c static void metadata_operation_failed(struct cache *cache, const char *op, int r) cache 1029 drivers/md/dm-cache-target.c cache_device_name(cache), op, r); cache 1030 drivers/md/dm-cache-target.c abort_transaction(cache); cache 1031 drivers/md/dm-cache-target.c set_cache_mode(cache, CM_READ_ONLY); cache 1036 drivers/md/dm-cache-target.c static void load_stats(struct cache *cache) cache 1040 drivers/md/dm-cache-target.c dm_cache_metadata_get_stats(cache->cmd, &stats); cache 1041 drivers/md/dm-cache-target.c atomic_set(&cache->stats.read_hit, stats.read_hits); cache 1042 drivers/md/dm-cache-target.c atomic_set(&cache->stats.read_miss, stats.read_misses); cache 1043 drivers/md/dm-cache-target.c atomic_set(&cache->stats.write_hit, stats.write_hits); cache 1044 drivers/md/dm-cache-target.c atomic_set(&cache->stats.write_miss, stats.write_misses); cache 1047 drivers/md/dm-cache-target.c static void save_stats(struct cache *cache) cache 1051 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 1054 drivers/md/dm-cache-target.c stats.read_hits = atomic_read(&cache->stats.read_hit); cache 1055 drivers/md/dm-cache-target.c stats.read_misses = atomic_read(&cache->stats.read_miss); cache 1056 drivers/md/dm-cache-target.c stats.write_hits = atomic_read(&cache->stats.write_hit); cache 1057 drivers/md/dm-cache-target.c stats.write_misses = atomic_read(&cache->stats.write_miss); cache 1059 drivers/md/dm-cache-target.c dm_cache_metadata_set_stats(cache->cmd, &stats); cache 1086 drivers/md/dm-cache-target.c static void inc_io_migrations(struct cache *cache) cache 1088 drivers/md/dm-cache-target.c atomic_inc(&cache->nr_io_migrations); cache 1091 drivers/md/dm-cache-target.c static void dec_io_migrations(struct cache *cache) cache 1093 drivers/md/dm-cache-target.c atomic_dec(&cache->nr_io_migrations); cache 1101 drivers/md/dm-cache-target.c static void calc_discard_block_range(struct cache *cache, struct bio *bio, cache 1107 drivers/md/dm-cache-target.c *b = to_dblock(dm_sector_div_up(sb, cache->discard_block_size)); cache 1109 drivers/md/dm-cache-target.c if (se - sb < cache->discard_block_size) cache 1112 drivers/md/dm-cache-target.c *e = to_dblock(block_div(se, cache->discard_block_size)); cache 1117 drivers/md/dm-cache-target.c static void prevent_background_work(struct cache *cache) cache 1120 drivers/md/dm-cache-target.c down_write(&cache->background_work_lock); cache 1124 drivers/md/dm-cache-target.c static void allow_background_work(struct cache *cache) cache 1127 drivers/md/dm-cache-target.c up_write(&cache->background_work_lock); cache 1131 drivers/md/dm-cache-target.c static bool background_work_begin(struct cache *cache) cache 1136 drivers/md/dm-cache-target.c r = down_read_trylock(&cache->background_work_lock); cache 1142 drivers/md/dm-cache-target.c static void background_work_end(struct cache *cache) cache 1145 drivers/md/dm-cache-target.c up_read(&cache->background_work_lock); cache 1151 drivers/md/dm-cache-target.c static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) cache 1154 drivers/md/dm-cache-target.c (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); cache 1157 drivers/md/dm-cache-target.c static bool optimisable_bio(struct cache *cache, struct bio *bio, dm_oblock_t block) cache 1159 drivers/md/dm-cache-target.c return writeback_mode(cache) && cache 1160 drivers/md/dm-cache-target.c (is_discarded_oblock(cache, block) || bio_writes_complete_block(cache, bio)); cache 1167 drivers/md/dm-cache-target.c dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws); cache 1183 drivers/md/dm-cache-target.c queue_continuation(mg->cache->wq, &mg->k); cache 1189 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1191 drivers/md/dm-cache-target.c o_region.bdev = cache->origin_dev->bdev; cache 1192 drivers/md/dm-cache-target.c o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block; cache 1193 drivers/md/dm-cache-target.c o_region.count = cache->sectors_per_block; cache 1195 drivers/md/dm-cache-target.c c_region.bdev = cache->cache_dev->bdev; cache 1196 drivers/md/dm-cache-target.c c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block; cache 1197 drivers/md/dm-cache-target.c c_region.count = cache->sectors_per_block; cache 1200 drivers/md/dm-cache-target.c dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k); cache 1202 drivers/md/dm-cache-target.c dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k); cache 1205 drivers/md/dm-cache-target.c static void bio_drop_shared_lock(struct cache *cache, struct bio *bio) cache 1209 drivers/md/dm-cache-target.c if (pb->cell && dm_cell_put_v2(cache->prison, pb->cell)) cache 1210 drivers/md/dm-cache-target.c free_prison_cell(cache, pb->cell); cache 1217 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1225 drivers/md/dm-cache-target.c queue_continuation(cache->wq, &mg->k); cache 1241 drivers/md/dm-cache-target.c remap_to_cache(mg->cache, bio, mg->op->cblock); cache 1243 drivers/md/dm-cache-target.c remap_to_origin(mg->cache, bio); cache 1246 drivers/md/dm-cache-target.c accounted_request(mg->cache, bio); cache 1263 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1268 drivers/md/dm-cache-target.c update_stats(&cache->stats, op->op); cache 1272 drivers/md/dm-cache-target.c clear_discard(cache, oblock_to_dblock(cache, op->oblock)); cache 1273 drivers/md/dm-cache-target.c policy_complete_background_work(cache->policy, op, success); cache 1277 drivers/md/dm-cache-target.c force_set_dirty(cache, cblock); cache 1285 drivers/md/dm-cache-target.c force_clear_dirty(cache, cblock); cache 1286 drivers/md/dm-cache-target.c dec_io_migrations(cache); cache 1295 drivers/md/dm-cache-target.c force_clear_dirty(cache, cblock); cache 1296 drivers/md/dm-cache-target.c policy_complete_background_work(cache->policy, op, success); cache 1297 drivers/md/dm-cache-target.c dec_io_migrations(cache); cache 1302 drivers/md/dm-cache-target.c force_clear_dirty(cache, cblock); cache 1303 drivers/md/dm-cache-target.c policy_complete_background_work(cache->policy, op, success); cache 1304 drivers/md/dm-cache-target.c dec_io_migrations(cache); cache 1310 drivers/md/dm-cache-target.c if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) cache 1311 drivers/md/dm-cache-target.c free_prison_cell(cache, mg->cell); cache 1315 drivers/md/dm-cache-target.c defer_bios(cache, &bios); cache 1316 drivers/md/dm-cache-target.c wake_migration_worker(cache); cache 1318 drivers/md/dm-cache-target.c background_work_end(cache); cache 1331 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1336 drivers/md/dm-cache-target.c r = dm_cache_insert_mapping(cache->cmd, op->cblock, op->oblock); cache 1339 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 1340 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_insert_mapping", r); cache 1349 drivers/md/dm-cache-target.c r = dm_cache_remove_mapping(cache->cmd, op->cblock); cache 1352 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 1353 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_remove_mapping", r); cache 1379 drivers/md/dm-cache-target.c continue_after_commit(&cache->committer, &mg->k); cache 1380 drivers/md/dm-cache-target.c schedule_commit(&cache->committer); cache 1417 drivers/md/dm-cache-target.c r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell, cache 1433 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1437 drivers/md/dm-cache-target.c if ((!is_policy_promote && !is_dirty(cache, op->cblock)) || cache 1438 drivers/md/dm-cache-target.c is_discarded_oblock(cache, op->oblock)) { cache 1457 drivers/md/dm-cache-target.c if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) { cache 1461 drivers/md/dm-cache-target.c bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio); cache 1464 drivers/md/dm-cache-target.c inc_io_migrations(mg->cache); cache 1486 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1489 drivers/md/dm-cache-target.c prealloc = alloc_prison_cell(cache); cache 1497 drivers/md/dm-cache-target.c r = dm_cell_lock_v2(cache->prison, &key, cache 1501 drivers/md/dm-cache-target.c free_prison_cell(cache, prealloc); cache 1507 drivers/md/dm-cache-target.c free_prison_cell(cache, prealloc); cache 1517 drivers/md/dm-cache-target.c static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio) cache 1521 drivers/md/dm-cache-target.c if (!background_work_begin(cache)) { cache 1522 drivers/md/dm-cache-target.c policy_complete_background_work(cache->policy, op, false); cache 1526 drivers/md/dm-cache-target.c mg = alloc_migration(cache); cache 1532 drivers/md/dm-cache-target.c inc_io_migrations(cache); cache 1544 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1547 drivers/md/dm-cache-target.c if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios)) cache 1548 drivers/md/dm-cache-target.c free_prison_cell(cache, mg->cell); cache 1554 drivers/md/dm-cache-target.c defer_bios(cache, &bios); cache 1556 drivers/md/dm-cache-target.c background_work_end(cache); cache 1565 drivers/md/dm-cache-target.c static int invalidate_cblock(struct cache *cache, dm_cblock_t cblock) cache 1567 drivers/md/dm-cache-target.c int r = policy_invalidate_mapping(cache->policy, cblock); cache 1569 drivers/md/dm-cache-target.c r = dm_cache_remove_mapping(cache->cmd, cblock); cache 1572 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 1573 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_remove_mapping", r); cache 1583 drivers/md/dm-cache-target.c DMERR("%s: policy_invalidate_mapping failed", cache_device_name(cache)); cache 1592 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1594 drivers/md/dm-cache-target.c r = invalidate_cblock(cache, mg->invalidate_cblock); cache 1601 drivers/md/dm-cache-target.c continue_after_commit(&cache->committer, &mg->k); cache 1602 drivers/md/dm-cache-target.c remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock); cache 1604 drivers/md/dm-cache-target.c schedule_commit(&cache->committer); cache 1611 drivers/md/dm-cache-target.c struct cache *cache = mg->cache; cache 1614 drivers/md/dm-cache-target.c prealloc = alloc_prison_cell(cache); cache 1617 drivers/md/dm-cache-target.c r = dm_cell_lock_v2(cache->prison, &key, cache 1620 drivers/md/dm-cache-target.c free_prison_cell(cache, prealloc); cache 1626 drivers/md/dm-cache-target.c free_prison_cell(cache, prealloc); cache 1637 drivers/md/dm-cache-target.c queue_work(cache->wq, &mg->k.ws); cache 1643 drivers/md/dm-cache-target.c static int invalidate_start(struct cache *cache, dm_cblock_t cblock, cache 1648 drivers/md/dm-cache-target.c if (!background_work_begin(cache)) cache 1651 drivers/md/dm-cache-target.c mg = alloc_migration(cache); cache 1669 drivers/md/dm-cache-target.c static enum busy spare_migration_bandwidth(struct cache *cache) cache 1671 drivers/md/dm-cache-target.c bool idle = iot_idle_for(&cache->tracker, HZ); cache 1672 drivers/md/dm-cache-target.c sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) * cache 1673 drivers/md/dm-cache-target.c cache->sectors_per_block; cache 1675 drivers/md/dm-cache-target.c if (idle && current_volume <= cache->migration_threshold) cache 1681 drivers/md/dm-cache-target.c static void inc_hit_counter(struct cache *cache, struct bio *bio) cache 1684 drivers/md/dm-cache-target.c &cache->stats.read_hit : &cache->stats.write_hit); cache 1687 drivers/md/dm-cache-target.c static void inc_miss_counter(struct cache *cache, struct bio *bio) cache 1690 drivers/md/dm-cache-target.c &cache->stats.read_miss : &cache->stats.write_miss); cache 1695 drivers/md/dm-cache-target.c static int map_bio(struct cache *cache, struct bio *bio, dm_oblock_t block, cache 1704 drivers/md/dm-cache-target.c rb = bio_detain_shared(cache, block, bio); cache 1718 drivers/md/dm-cache-target.c if (optimisable_bio(cache, bio, block)) { cache 1721 drivers/md/dm-cache-target.c r = policy_lookup_with_work(cache->policy, block, &cblock, data_dir, true, &op); cache 1724 drivers/md/dm-cache-target.c cache_device_name(cache), r); cache 1730 drivers/md/dm-cache-target.c bio_drop_shared_lock(cache, bio); cache 1732 drivers/md/dm-cache-target.c mg_start(cache, op, bio); cache 1736 drivers/md/dm-cache-target.c r = policy_lookup(cache->policy, block, &cblock, data_dir, false, &background_queued); cache 1739 drivers/md/dm-cache-target.c cache_device_name(cache), r); cache 1745 drivers/md/dm-cache-target.c wake_migration_worker(cache); cache 1754 drivers/md/dm-cache-target.c inc_miss_counter(cache, bio); cache 1756 drivers/md/dm-cache-target.c accounted_begin(cache, bio); cache 1757 drivers/md/dm-cache-target.c remap_to_origin_clear_discard(cache, bio, block); cache 1770 drivers/md/dm-cache-target.c inc_hit_counter(cache, bio); cache 1776 drivers/md/dm-cache-target.c if (passthrough_mode(cache)) { cache 1778 drivers/md/dm-cache-target.c bio_drop_shared_lock(cache, bio); cache 1779 drivers/md/dm-cache-target.c atomic_inc(&cache->stats.demotion); cache 1780 drivers/md/dm-cache-target.c invalidate_start(cache, cblock, block, bio); cache 1782 drivers/md/dm-cache-target.c remap_to_origin_clear_discard(cache, bio, block); cache 1784 drivers/md/dm-cache-target.c if (bio_data_dir(bio) == WRITE && writethrough_mode(cache) && cache 1785 drivers/md/dm-cache-target.c !is_dirty(cache, cblock)) { cache 1786 drivers/md/dm-cache-target.c remap_to_origin_and_cache(cache, bio, block, cblock); cache 1787 drivers/md/dm-cache-target.c accounted_begin(cache, bio); cache 1789 drivers/md/dm-cache-target.c remap_to_cache_dirty(cache, bio, block, cblock); cache 1801 drivers/md/dm-cache-target.c accounted_complete(cache, bio); cache 1802 drivers/md/dm-cache-target.c issue_after_commit(&cache->committer, bio); cache 1810 drivers/md/dm-cache-target.c static bool process_bio(struct cache *cache, struct bio *bio) cache 1814 drivers/md/dm-cache-target.c if (map_bio(cache, bio, get_bio_block(cache, bio), &commit_needed) == DM_MAPIO_REMAPPED) cache 1823 drivers/md/dm-cache-target.c static int commit(struct cache *cache, bool clean_shutdown) cache 1827 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 1830 drivers/md/dm-cache-target.c atomic_inc(&cache->stats.commit_count); cache 1831 drivers/md/dm-cache-target.c r = dm_cache_commit(cache->cmd, clean_shutdown); cache 1833 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_commit", r); cache 1843 drivers/md/dm-cache-target.c struct cache *cache = context; cache 1845 drivers/md/dm-cache-target.c if (dm_cache_changed_this_transaction(cache->cmd)) cache 1846 drivers/md/dm-cache-target.c return errno_to_blk_status(commit(cache, false)); cache 1853 drivers/md/dm-cache-target.c static bool process_flush_bio(struct cache *cache, struct bio *bio) cache 1858 drivers/md/dm-cache-target.c remap_to_origin(cache, bio); cache 1860 drivers/md/dm-cache-target.c remap_to_cache(cache, bio, 0); cache 1862 drivers/md/dm-cache-target.c issue_after_commit(&cache->committer, bio); cache 1866 drivers/md/dm-cache-target.c static bool process_discard_bio(struct cache *cache, struct bio *bio) cache 1873 drivers/md/dm-cache-target.c calc_discard_block_range(cache, bio, &b, &e); cache 1875 drivers/md/dm-cache-target.c set_discard(cache, b); cache 1879 drivers/md/dm-cache-target.c if (cache->features.discard_passdown) { cache 1880 drivers/md/dm-cache-target.c remap_to_origin(cache, bio); cache 1890 drivers/md/dm-cache-target.c struct cache *cache = container_of(ws, struct cache, deferred_bio_worker); cache 1899 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 1900 drivers/md/dm-cache-target.c bio_list_merge(&bios, &cache->deferred_bios); cache 1901 drivers/md/dm-cache-target.c bio_list_init(&cache->deferred_bios); cache 1902 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 1906 drivers/md/dm-cache-target.c commit_needed = process_flush_bio(cache, bio) || commit_needed; cache 1909 drivers/md/dm-cache-target.c commit_needed = process_discard_bio(cache, bio) || commit_needed; cache 1912 drivers/md/dm-cache-target.c commit_needed = process_bio(cache, bio) || commit_needed; cache 1916 drivers/md/dm-cache-target.c schedule_commit(&cache->committer); cache 1923 drivers/md/dm-cache-target.c static void requeue_deferred_bios(struct cache *cache) cache 1929 drivers/md/dm-cache-target.c bio_list_merge(&bios, &cache->deferred_bios); cache 1930 drivers/md/dm-cache-target.c bio_list_init(&cache->deferred_bios); cache 1944 drivers/md/dm-cache-target.c struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); cache 1946 drivers/md/dm-cache-target.c policy_tick(cache->policy, true); cache 1947 drivers/md/dm-cache-target.c wake_migration_worker(cache); cache 1948 drivers/md/dm-cache-target.c schedule_commit(&cache->committer); cache 1949 drivers/md/dm-cache-target.c queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); cache 1956 drivers/md/dm-cache-target.c struct cache *cache = container_of(ws, struct cache, migration_worker); cache 1960 drivers/md/dm-cache-target.c b = spare_migration_bandwidth(cache); cache 1962 drivers/md/dm-cache-target.c r = policy_get_background_work(cache->policy, b == IDLE, &op); cache 1968 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 1972 drivers/md/dm-cache-target.c r = mg_start(cache, op, NULL); cache 1986 drivers/md/dm-cache-target.c static void destroy(struct cache *cache) cache 1990 drivers/md/dm-cache-target.c mempool_exit(&cache->migration_pool); cache 1992 drivers/md/dm-cache-target.c if (cache->prison) cache 1993 drivers/md/dm-cache-target.c dm_bio_prison_destroy_v2(cache->prison); cache 1995 drivers/md/dm-cache-target.c if (cache->wq) cache 1996 drivers/md/dm-cache-target.c destroy_workqueue(cache->wq); cache 1998 drivers/md/dm-cache-target.c if (cache->dirty_bitset) cache 1999 drivers/md/dm-cache-target.c free_bitset(cache->dirty_bitset); cache 2001 drivers/md/dm-cache-target.c if (cache->discard_bitset) cache 2002 drivers/md/dm-cache-target.c free_bitset(cache->discard_bitset); cache 2004 drivers/md/dm-cache-target.c if (cache->copier) cache 2005 drivers/md/dm-cache-target.c dm_kcopyd_client_destroy(cache->copier); cache 2007 drivers/md/dm-cache-target.c if (cache->cmd) cache 2008 drivers/md/dm-cache-target.c dm_cache_metadata_close(cache->cmd); cache 2010 drivers/md/dm-cache-target.c if (cache->metadata_dev) cache 2011 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->metadata_dev); cache 2013 drivers/md/dm-cache-target.c if (cache->origin_dev) cache 2014 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->origin_dev); cache 2016 drivers/md/dm-cache-target.c if (cache->cache_dev) cache 2017 drivers/md/dm-cache-target.c dm_put_device(cache->ti, cache->cache_dev); cache 2019 drivers/md/dm-cache-target.c if (cache->policy) cache 2020 drivers/md/dm-cache-target.c dm_cache_policy_destroy(cache->policy); cache 2022 drivers/md/dm-cache-target.c for (i = 0; i < cache->nr_ctr_args ; i++) cache 2023 drivers/md/dm-cache-target.c kfree(cache->ctr_args[i]); cache 2024 drivers/md/dm-cache-target.c kfree(cache->ctr_args); cache 2026 drivers/md/dm-cache-target.c bioset_exit(&cache->bs); cache 2028 drivers/md/dm-cache-target.c kfree(cache); cache 2033 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 2035 drivers/md/dm-cache-target.c destroy(cache); cache 2342 drivers/md/dm-cache-target.c static int process_config_option(struct cache *cache, const char *key, const char *value) cache 2350 drivers/md/dm-cache-target.c cache->migration_threshold = tmp; cache 2357 drivers/md/dm-cache-target.c static int set_config_value(struct cache *cache, const char *key, const char *value) cache 2359 drivers/md/dm-cache-target.c int r = process_config_option(cache, key, value); cache 2362 drivers/md/dm-cache-target.c r = policy_set_config_value(cache->policy, key, value); cache 2370 drivers/md/dm-cache-target.c static int set_config_values(struct cache *cache, int argc, const char **argv) cache 2380 drivers/md/dm-cache-target.c r = set_config_value(cache, argv[0], argv[1]); cache 2391 drivers/md/dm-cache-target.c static int create_cache_policy(struct cache *cache, struct cache_args *ca, cache 2395 drivers/md/dm-cache-target.c cache->cache_size, cache 2396 drivers/md/dm-cache-target.c cache->origin_sectors, cache 2397 drivers/md/dm-cache-target.c cache->sectors_per_block); cache 2402 drivers/md/dm-cache-target.c cache->policy = p; cache 2403 drivers/md/dm-cache-target.c BUG_ON(!cache->policy); cache 2434 drivers/md/dm-cache-target.c static void set_cache_size(struct cache *cache, dm_cblock_t size) cache 2438 drivers/md/dm-cache-target.c if (nr_blocks > (1 << 20) && cache->cache_size != size) cache 2444 drivers/md/dm-cache-target.c cache->cache_size = size; cache 2455 drivers/md/dm-cache-target.c struct cache *cache = container_of(cb, struct cache, callbacks); cache 2457 drivers/md/dm-cache-target.c return is_congested(cache->origin_dev, bdi_bits) || cache 2458 drivers/md/dm-cache-target.c is_congested(cache->cache_dev, bdi_bits); cache 2463 drivers/md/dm-cache-target.c static int cache_create(struct cache_args *ca, struct cache **result) cache 2467 drivers/md/dm-cache-target.c struct cache *cache; cache 2473 drivers/md/dm-cache-target.c cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache 2474 drivers/md/dm-cache-target.c if (!cache) cache 2477 drivers/md/dm-cache-target.c cache->ti = ca->ti; cache 2478 drivers/md/dm-cache-target.c ti->private = cache; cache 2487 drivers/md/dm-cache-target.c cache->features = ca->features; cache 2488 drivers/md/dm-cache-target.c if (writethrough_mode(cache)) { cache 2490 drivers/md/dm-cache-target.c r = bioset_init(&cache->bs, BIO_POOL_SIZE, 0, 0); cache 2495 drivers/md/dm-cache-target.c cache->callbacks.congested_fn = cache_is_congested; cache 2496 drivers/md/dm-cache-target.c dm_table_add_target_callbacks(ti->table, &cache->callbacks); cache 2498 drivers/md/dm-cache-target.c cache->metadata_dev = ca->metadata_dev; cache 2499 drivers/md/dm-cache-target.c cache->origin_dev = ca->origin_dev; cache 2500 drivers/md/dm-cache-target.c cache->cache_dev = ca->cache_dev; cache 2504 drivers/md/dm-cache-target.c origin_blocks = cache->origin_sectors = ca->origin_sectors; cache 2506 drivers/md/dm-cache-target.c cache->origin_blocks = to_oblock(origin_blocks); cache 2508 drivers/md/dm-cache-target.c cache->sectors_per_block = ca->block_size; cache 2509 drivers/md/dm-cache-target.c if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) { cache 2517 drivers/md/dm-cache-target.c cache->sectors_per_block_shift = -1; cache 2519 drivers/md/dm-cache-target.c set_cache_size(cache, to_cblock(cache_size)); cache 2521 drivers/md/dm-cache-target.c cache->sectors_per_block_shift = __ffs(ca->block_size); cache 2522 drivers/md/dm-cache-target.c set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift)); cache 2525 drivers/md/dm-cache-target.c r = create_cache_policy(cache, ca, error); cache 2529 drivers/md/dm-cache-target.c cache->policy_nr_args = ca->policy_argc; cache 2530 drivers/md/dm-cache-target.c cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; cache 2532 drivers/md/dm-cache-target.c r = set_config_values(cache, ca->policy_argc, ca->policy_argv); cache 2538 drivers/md/dm-cache-target.c cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, cache 2540 drivers/md/dm-cache-target.c dm_cache_policy_get_hint_size(cache->policy), cache 2547 drivers/md/dm-cache-target.c cache->cmd = cmd; cache 2548 drivers/md/dm-cache-target.c set_cache_mode(cache, CM_WRITE); cache 2549 drivers/md/dm-cache-target.c if (get_cache_mode(cache) != CM_WRITE) { cache 2555 drivers/md/dm-cache-target.c if (passthrough_mode(cache)) { cache 2558 drivers/md/dm-cache-target.c r = dm_cache_metadata_all_clean(cache->cmd, &all_clean); cache 2570 drivers/md/dm-cache-target.c policy_allow_migrations(cache->policy, false); cache 2573 drivers/md/dm-cache-target.c spin_lock_init(&cache->lock); cache 2574 drivers/md/dm-cache-target.c bio_list_init(&cache->deferred_bios); cache 2575 drivers/md/dm-cache-target.c atomic_set(&cache->nr_allocated_migrations, 0); cache 2576 drivers/md/dm-cache-target.c atomic_set(&cache->nr_io_migrations, 0); cache 2577 drivers/md/dm-cache-target.c init_waitqueue_head(&cache->migration_wait); cache 2580 drivers/md/dm-cache-target.c atomic_set(&cache->nr_dirty, 0); cache 2581 drivers/md/dm-cache-target.c cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); cache 2582 drivers/md/dm-cache-target.c if (!cache->dirty_bitset) { cache 2586 drivers/md/dm-cache-target.c clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size)); cache 2588 drivers/md/dm-cache-target.c cache->discard_block_size = cache 2589 drivers/md/dm-cache-target.c calculate_discard_block_size(cache->sectors_per_block, cache 2590 drivers/md/dm-cache-target.c cache->origin_sectors); cache 2591 drivers/md/dm-cache-target.c cache->discard_nr_blocks = to_dblock(dm_sector_div_up(cache->origin_sectors, cache 2592 drivers/md/dm-cache-target.c cache->discard_block_size)); cache 2593 drivers/md/dm-cache-target.c cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks)); cache 2594 drivers/md/dm-cache-target.c if (!cache->discard_bitset) { cache 2598 drivers/md/dm-cache-target.c clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache 2600 drivers/md/dm-cache-target.c cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); cache 2601 drivers/md/dm-cache-target.c if (IS_ERR(cache->copier)) { cache 2603 drivers/md/dm-cache-target.c r = PTR_ERR(cache->copier); cache 2607 drivers/md/dm-cache-target.c cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); cache 2608 drivers/md/dm-cache-target.c if (!cache->wq) { cache 2612 drivers/md/dm-cache-target.c INIT_WORK(&cache->deferred_bio_worker, process_deferred_bios); cache 2613 drivers/md/dm-cache-target.c INIT_WORK(&cache->migration_worker, check_migrations); cache 2614 drivers/md/dm-cache-target.c INIT_DELAYED_WORK(&cache->waker, do_waker); cache 2616 drivers/md/dm-cache-target.c cache->prison = dm_bio_prison_create_v2(cache->wq); cache 2617 drivers/md/dm-cache-target.c if (!cache->prison) { cache 2622 drivers/md/dm-cache-target.c r = mempool_init_slab_pool(&cache->migration_pool, MIGRATION_POOL_SIZE, cache 2629 drivers/md/dm-cache-target.c cache->need_tick_bio = true; cache 2630 drivers/md/dm-cache-target.c cache->sized = false; cache 2631 drivers/md/dm-cache-target.c cache->invalidate = false; cache 2632 drivers/md/dm-cache-target.c cache->commit_requested = false; cache 2633 drivers/md/dm-cache-target.c cache->loaded_mappings = false; cache 2634 drivers/md/dm-cache-target.c cache->loaded_discards = false; cache 2636 drivers/md/dm-cache-target.c load_stats(cache); cache 2638 drivers/md/dm-cache-target.c atomic_set(&cache->stats.demotion, 0); cache 2639 drivers/md/dm-cache-target.c atomic_set(&cache->stats.promotion, 0); cache 2640 drivers/md/dm-cache-target.c atomic_set(&cache->stats.copies_avoided, 0); cache 2641 drivers/md/dm-cache-target.c atomic_set(&cache->stats.cache_cell_clash, 0); cache 2642 drivers/md/dm-cache-target.c atomic_set(&cache->stats.commit_count, 0); cache 2643 drivers/md/dm-cache-target.c atomic_set(&cache->stats.discard_count, 0); cache 2645 drivers/md/dm-cache-target.c spin_lock_init(&cache->invalidation_lock); cache 2646 drivers/md/dm-cache-target.c INIT_LIST_HEAD(&cache->invalidation_requests); cache 2648 drivers/md/dm-cache-target.c batcher_init(&cache->committer, commit_op, cache, cache 2649 drivers/md/dm-cache-target.c issue_op, cache, cache->wq); cache 2650 drivers/md/dm-cache-target.c iot_init(&cache->tracker); cache 2652 drivers/md/dm-cache-target.c init_rwsem(&cache->background_work_lock); cache 2653 drivers/md/dm-cache-target.c prevent_background_work(cache); cache 2655 drivers/md/dm-cache-target.c *result = cache; cache 2658 drivers/md/dm-cache-target.c destroy(cache); cache 2662 drivers/md/dm-cache-target.c static int copy_ctr_args(struct cache *cache, int argc, const char **argv) cache 2680 drivers/md/dm-cache-target.c cache->nr_ctr_args = argc; cache 2681 drivers/md/dm-cache-target.c cache->ctr_args = copy; cache 2690 drivers/md/dm-cache-target.c struct cache *cache = NULL; cache 2703 drivers/md/dm-cache-target.c r = cache_create(ca, &cache); cache 2707 drivers/md/dm-cache-target.c r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3); cache 2709 drivers/md/dm-cache-target.c destroy(cache); cache 2713 drivers/md/dm-cache-target.c ti->private = cache; cache 2723 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 2727 drivers/md/dm-cache-target.c dm_oblock_t block = get_bio_block(cache, bio); cache 2730 drivers/md/dm-cache-target.c if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { cache 2736 drivers/md/dm-cache-target.c remap_to_origin(cache, bio); cache 2737 drivers/md/dm-cache-target.c accounted_begin(cache, bio); cache 2742 drivers/md/dm-cache-target.c defer_bio(cache, bio); cache 2746 drivers/md/dm-cache-target.c r = map_bio(cache, bio, block, &commit_needed); cache 2748 drivers/md/dm-cache-target.c schedule_commit(&cache->committer); cache 2755 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 2760 drivers/md/dm-cache-target.c policy_tick(cache->policy, false); cache 2762 drivers/md/dm-cache-target.c spin_lock_irqsave(&cache->lock, flags); cache 2763 drivers/md/dm-cache-target.c cache->need_tick_bio = true; cache 2764 drivers/md/dm-cache-target.c spin_unlock_irqrestore(&cache->lock, flags); cache 2767 drivers/md/dm-cache-target.c bio_drop_shared_lock(cache, bio); cache 2768 drivers/md/dm-cache-target.c accounted_complete(cache, bio); cache 2773 drivers/md/dm-cache-target.c static int write_dirty_bitset(struct cache *cache) cache 2777 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 2780 drivers/md/dm-cache-target.c r = dm_cache_set_dirty_bits(cache->cmd, from_cblock(cache->cache_size), cache->dirty_bitset); cache 2782 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_set_dirty_bits", r); cache 2787 drivers/md/dm-cache-target.c static int write_discard_bitset(struct cache *cache) cache 2791 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 2794 drivers/md/dm-cache-target.c r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size, cache 2795 drivers/md/dm-cache-target.c cache->discard_nr_blocks); cache 2797 drivers/md/dm-cache-target.c DMERR("%s: could not resize on-disk discard bitset", cache_device_name(cache)); cache 2798 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_discard_bitset_resize", r); cache 2802 drivers/md/dm-cache-target.c for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) { cache 2803 drivers/md/dm-cache-target.c r = dm_cache_set_discard(cache->cmd, to_dblock(i), cache 2804 drivers/md/dm-cache-target.c is_discarded(cache, to_dblock(i))); cache 2806 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_set_discard", r); cache 2814 drivers/md/dm-cache-target.c static int write_hints(struct cache *cache) cache 2818 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) cache 2821 drivers/md/dm-cache-target.c r = dm_cache_write_hints(cache->cmd, cache->policy); cache 2823 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_write_hints", r); cache 2833 drivers/md/dm-cache-target.c static bool sync_metadata(struct cache *cache) cache 2837 drivers/md/dm-cache-target.c r1 = write_dirty_bitset(cache); cache 2839 drivers/md/dm-cache-target.c DMERR("%s: could not write dirty bitset", cache_device_name(cache)); cache 2841 drivers/md/dm-cache-target.c r2 = write_discard_bitset(cache); cache 2843 drivers/md/dm-cache-target.c DMERR("%s: could not write discard bitset", cache_device_name(cache)); cache 2845 drivers/md/dm-cache-target.c save_stats(cache); cache 2847 drivers/md/dm-cache-target.c r3 = write_hints(cache); cache 2849 drivers/md/dm-cache-target.c DMERR("%s: could not write hints", cache_device_name(cache)); cache 2856 drivers/md/dm-cache-target.c r4 = commit(cache, !r1 && !r2 && !r3); cache 2858 drivers/md/dm-cache-target.c DMERR("%s: could not write cache metadata", cache_device_name(cache)); cache 2865 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 2867 drivers/md/dm-cache-target.c prevent_background_work(cache); cache 2868 drivers/md/dm-cache-target.c BUG_ON(atomic_read(&cache->nr_io_migrations)); cache 2870 drivers/md/dm-cache-target.c cancel_delayed_work_sync(&cache->waker); cache 2871 drivers/md/dm-cache-target.c drain_workqueue(cache->wq); cache 2872 drivers/md/dm-cache-target.c WARN_ON(cache->tracker.in_flight); cache 2878 drivers/md/dm-cache-target.c requeue_deferred_bios(cache); cache 2880 drivers/md/dm-cache-target.c if (get_cache_mode(cache) == CM_WRITE) cache 2881 drivers/md/dm-cache-target.c (void) sync_metadata(cache); cache 2888 drivers/md/dm-cache-target.c struct cache *cache = context; cache 2891 drivers/md/dm-cache-target.c set_bit(from_cblock(cblock), cache->dirty_bitset); cache 2892 drivers/md/dm-cache-target.c atomic_inc(&cache->nr_dirty); cache 2894 drivers/md/dm-cache-target.c clear_bit(from_cblock(cblock), cache->dirty_bitset); cache 2896 drivers/md/dm-cache-target.c r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid); cache 2910 drivers/md/dm-cache-target.c struct cache *cache; cache 2920 drivers/md/dm-cache-target.c static void discard_load_info_init(struct cache *cache, cache 2923 drivers/md/dm-cache-target.c li->cache = cache; cache 2943 drivers/md/dm-cache-target.c b = dm_sector_div_up(b, li->cache->discard_block_size); cache 2944 drivers/md/dm-cache-target.c sector_div(e, li->cache->discard_block_size); cache 2950 drivers/md/dm-cache-target.c if (e > from_dblock(li->cache->discard_nr_blocks)) cache 2951 drivers/md/dm-cache-target.c e = from_dblock(li->cache->discard_nr_blocks); cache 2954 drivers/md/dm-cache-target.c set_discard(li->cache, to_dblock(b)); cache 2987 drivers/md/dm-cache-target.c static dm_cblock_t get_cache_dev_size(struct cache *cache) cache 2989 drivers/md/dm-cache-target.c sector_t size = get_dev_size(cache->cache_dev); cache 2990 drivers/md/dm-cache-target.c (void) sector_div(size, cache->sectors_per_block); cache 2994 drivers/md/dm-cache-target.c static bool can_resize(struct cache *cache, dm_cblock_t new_size) cache 2996 drivers/md/dm-cache-target.c if (from_cblock(new_size) > from_cblock(cache->cache_size)) { cache 2997 drivers/md/dm-cache-target.c if (cache->sized) { cache 2999 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 3007 drivers/md/dm-cache-target.c while (from_cblock(new_size) < from_cblock(cache->cache_size)) { cache 3009 drivers/md/dm-cache-target.c if (is_dirty(cache, new_size)) { cache 3011 drivers/md/dm-cache-target.c cache_device_name(cache), cache 3020 drivers/md/dm-cache-target.c static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size) cache 3024 drivers/md/dm-cache-target.c r = dm_cache_resize(cache->cmd, new_size); cache 3026 drivers/md/dm-cache-target.c DMERR("%s: could not resize cache metadata", cache_device_name(cache)); cache 3027 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_resize", r); cache 3031 drivers/md/dm-cache-target.c set_cache_size(cache, new_size); cache 3039 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3040 drivers/md/dm-cache-target.c dm_cblock_t csize = get_cache_dev_size(cache); cache 3045 drivers/md/dm-cache-target.c if (!cache->sized) { cache 3046 drivers/md/dm-cache-target.c r = resize_cache_dev(cache, csize); cache 3050 drivers/md/dm-cache-target.c cache->sized = true; cache 3052 drivers/md/dm-cache-target.c } else if (csize != cache->cache_size) { cache 3053 drivers/md/dm-cache-target.c if (!can_resize(cache, csize)) cache 3056 drivers/md/dm-cache-target.c r = resize_cache_dev(cache, csize); cache 3061 drivers/md/dm-cache-target.c if (!cache->loaded_mappings) { cache 3062 drivers/md/dm-cache-target.c r = dm_cache_load_mappings(cache->cmd, cache->policy, cache 3063 drivers/md/dm-cache-target.c load_mapping, cache); cache 3065 drivers/md/dm-cache-target.c DMERR("%s: could not load cache mappings", cache_device_name(cache)); cache 3066 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_load_mappings", r); cache 3070 drivers/md/dm-cache-target.c cache->loaded_mappings = true; cache 3073 drivers/md/dm-cache-target.c if (!cache->loaded_discards) { cache 3081 drivers/md/dm-cache-target.c clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks)); cache 3083 drivers/md/dm-cache-target.c discard_load_info_init(cache, &li); cache 3084 drivers/md/dm-cache-target.c r = dm_cache_load_discards(cache->cmd, load_discard, &li); cache 3086 drivers/md/dm-cache-target.c DMERR("%s: could not load origin discards", cache_device_name(cache)); cache 3087 drivers/md/dm-cache-target.c metadata_operation_failed(cache, "dm_cache_load_discards", r); cache 3092 drivers/md/dm-cache-target.c cache->loaded_discards = true; cache 3100 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3102 drivers/md/dm-cache-target.c cache->need_tick_bio = true; cache 3103 drivers/md/dm-cache-target.c allow_background_work(cache); cache 3104 drivers/md/dm-cache-target.c do_waker(&cache->waker.work); cache 3107 drivers/md/dm-cache-target.c static void emit_flags(struct cache *cache, char *result, cache 3111 drivers/md/dm-cache-target.c struct cache_features *cf = &cache->features; cache 3119 drivers/md/dm-cache-target.c if (writethrough_mode(cache)) cache 3122 drivers/md/dm-cache-target.c else if (passthrough_mode(cache)) cache 3125 drivers/md/dm-cache-target.c else if (writeback_mode(cache)) cache 3131 drivers/md/dm-cache-target.c cache_device_name(cache), (int) cf->io_mode); cache 3160 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3166 drivers/md/dm-cache-target.c if (get_cache_mode(cache) == CM_FAIL) { cache 3173 drivers/md/dm-cache-target.c (void) commit(cache, false); cache 3175 drivers/md/dm-cache-target.c r = dm_cache_get_free_metadata_block_count(cache->cmd, &nr_free_blocks_metadata); cache 3178 drivers/md/dm-cache-target.c cache_device_name(cache), r); cache 3182 drivers/md/dm-cache-target.c r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata); cache 3185 drivers/md/dm-cache-target.c cache_device_name(cache), r); cache 3189 drivers/md/dm-cache-target.c residency = policy_residency(cache->policy); cache 3195 drivers/md/dm-cache-target.c (unsigned long long)cache->sectors_per_block, cache 3197 drivers/md/dm-cache-target.c (unsigned long long) from_cblock(cache->cache_size), cache 3198 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.read_hit), cache 3199 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.read_miss), cache 3200 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.write_hit), cache 3201 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.write_miss), cache 3202 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.demotion), cache 3203 drivers/md/dm-cache-target.c (unsigned) atomic_read(&cache->stats.promotion), cache 3204 drivers/md/dm-cache-target.c (unsigned long) atomic_read(&cache->nr_dirty)); cache 3206 drivers/md/dm-cache-target.c emit_flags(cache, result, maxlen, &sz); cache 3208 drivers/md/dm-cache-target.c DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold); cache 3210 drivers/md/dm-cache-target.c DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); cache 3212 drivers/md/dm-cache-target.c r = policy_emit_config_values(cache->policy, result, maxlen, &sz); cache 3215 drivers/md/dm-cache-target.c cache_device_name(cache), r); cache 3218 drivers/md/dm-cache-target.c if (get_cache_mode(cache) == CM_READ_ONLY) cache 3223 drivers/md/dm-cache-target.c r = dm_cache_metadata_needs_check(cache->cmd, &needs_check); cache 3233 drivers/md/dm-cache-target.c format_dev_t(buf, cache->metadata_dev->bdev->bd_dev); cache 3235 drivers/md/dm-cache-target.c format_dev_t(buf, cache->cache_dev->bdev->bd_dev); cache 3237 drivers/md/dm-cache-target.c format_dev_t(buf, cache->origin_dev->bdev->bd_dev); cache 3240 drivers/md/dm-cache-target.c for (i = 0; i < cache->nr_ctr_args - 1; i++) cache 3241 drivers/md/dm-cache-target.c DMEMIT(" %s", cache->ctr_args[i]); cache 3242 drivers/md/dm-cache-target.c if (cache->nr_ctr_args) cache 3243 drivers/md/dm-cache-target.c DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]); cache 3267 drivers/md/dm-cache-target.c static int parse_cblock_range(struct cache *cache, const char *str, cache 3300 drivers/md/dm-cache-target.c DMERR("%s: invalid cblock range '%s'", cache_device_name(cache), str); cache 3304 drivers/md/dm-cache-target.c static int validate_cblock_range(struct cache *cache, struct cblock_range *range) cache 3308 drivers/md/dm-cache-target.c uint64_t n = from_cblock(cache->cache_size); cache 3312 drivers/md/dm-cache-target.c cache_device_name(cache), b, n); cache 3318 drivers/md/dm-cache-target.c cache_device_name(cache), e, n); cache 3324 drivers/md/dm-cache-target.c cache_device_name(cache), b, e); cache 3336 drivers/md/dm-cache-target.c static int request_invalidation(struct cache *cache, struct cblock_range *range) cache 3347 drivers/md/dm-cache-target.c r = invalidate_cblock(cache, range->begin); cache 3354 drivers/md/dm-cache-target.c cache->commit_requested = true; cache 3358 drivers/md/dm-cache-target.c static int process_invalidate_cblocks_message(struct cache *cache, unsigned count, cache 3365 drivers/md/dm-cache-target.c if (!passthrough_mode(cache)) { cache 3367 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 3372 drivers/md/dm-cache-target.c r = parse_cblock_range(cache, cblock_ranges[i], &range); cache 3376 drivers/md/dm-cache-target.c r = validate_cblock_range(cache, &range); cache 3383 drivers/md/dm-cache-target.c r = request_invalidation(cache, &range); cache 3402 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3407 drivers/md/dm-cache-target.c if (get_cache_mode(cache) >= CM_READ_ONLY) { cache 3409 drivers/md/dm-cache-target.c cache_device_name(cache)); cache 3414 drivers/md/dm-cache-target.c return process_invalidate_cblocks_message(cache, argc - 1, (const char **) argv + 1); cache 3419 drivers/md/dm-cache-target.c return set_config_value(cache, argv[0], argv[1]); cache 3426 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3428 drivers/md/dm-cache-target.c r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data); cache 3430 drivers/md/dm-cache-target.c r = fn(ti, cache->origin_dev, 0, ti->len, data); cache 3446 drivers/md/dm-cache-target.c static void disable_passdown_if_not_supported(struct cache *cache) cache 3448 drivers/md/dm-cache-target.c struct block_device *origin_bdev = cache->origin_dev->bdev; cache 3453 drivers/md/dm-cache-target.c if (!cache->features.discard_passdown) cache 3459 drivers/md/dm-cache-target.c else if (origin_limits->max_discard_sectors < cache->sectors_per_block) cache 3465 drivers/md/dm-cache-target.c cache->features.discard_passdown = false; cache 3469 drivers/md/dm-cache-target.c static void set_discard_limits(struct cache *cache, struct queue_limits *limits) cache 3471 drivers/md/dm-cache-target.c struct block_device *origin_bdev = cache->origin_dev->bdev; cache 3474 drivers/md/dm-cache-target.c if (!cache->features.discard_passdown) { cache 3476 drivers/md/dm-cache-target.c limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024, cache 3477 drivers/md/dm-cache-target.c cache->origin_sectors); cache 3478 drivers/md/dm-cache-target.c limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT; cache 3495 drivers/md/dm-cache-target.c struct cache *cache = ti->private; cache 3502 drivers/md/dm-cache-target.c if (io_opt_sectors < cache->sectors_per_block || cache 3503 drivers/md/dm-cache-target.c do_div(io_opt_sectors, cache->sectors_per_block)) { cache 3504 drivers/md/dm-cache-target.c blk_limits_io_min(limits, cache->sectors_per_block << SECTOR_SHIFT); cache 3505 drivers/md/dm-cache-target.c blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT); cache 3508 drivers/md/dm-cache-target.c disable_passdown_if_not_supported(cache); cache 3509 drivers/md/dm-cache-target.c set_discard_limits(cache, limits); cache 555 drivers/md/dm-verity-fec.c kmem_cache_destroy(f->cache); cache 781 drivers/md/dm-verity-fec.c f->cache = kmem_cache_create("dm_verity_fec_buffers", cache 784 drivers/md/dm-verity-fec.c if (!f->cache) { cache 792 drivers/md/dm-verity-fec.c f->cache); cache 798 drivers/md/dm-verity-fec.c ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache); cache 49 drivers/md/dm-verity-fec.h struct kmem_cache *cache; /* cache for buffers */ cache 1517 drivers/memstick/core/ms_block.c if (!msb->cache) cache 1518 drivers/memstick/core/ms_block.c msb->cache = kzalloc(msb->block_size, GFP_KERNEL); cache 1519 drivers/memstick/core/ms_block.c if (!msb->cache) cache 1545 drivers/memstick/core/ms_block.c sg_init_one(&sg, msb->cache , msb->block_size); cache 1634 drivers/memstick/core/ms_block.c msb->cache + page * msb->page_size, msb->page_size); cache 1657 drivers/memstick/core/ms_block.c msb->cache + msb->page_size * page, cache 1958 drivers/memstick/core/ms_block.c kfree(msb->cache); cache 185 drivers/memstick/core/ms_block.h unsigned char *cache; cache 246 drivers/mfd/htc-i2cpld.c u8 cache; cache 249 drivers/mfd/htc-i2cpld.c cache = chip_data->cache_out; cache 251 drivers/mfd/htc-i2cpld.c cache = chip_data->cache_in; cache 255 drivers/mfd/htc-i2cpld.c return (cache >> offset) & 1; cache 1648 drivers/misc/mic/scif/scif_dma.c bool cache = false; cache 1670 drivers/misc/mic/scif/scif_dma.c cache = scif_is_set_reg_cache(flags); cache 1689 drivers/misc/mic/scif/scif_dma.c if (addr && cache) { cache 1699 drivers/misc/mic/scif/scif_dma.c cache = cache && !scif_rma_tc_can_cache(ep, len); cache 1724 drivers/misc/mic/scif/scif_dma.c if (!cache) cache 1791 drivers/misc/mic/scif/scif_dma.c if (addr && !cache) cache 1807 drivers/misc/mic/scif/scif_dma.c if (addr && !cache) cache 1813 drivers/misc/mic/scif/scif_dma.c if (addr && local_window && !cache) cache 30 drivers/mtd/nand/bbt.c nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache), cache 32 drivers/mtd/nand/bbt.c if (!nand->bbt.cache) cache 47 drivers/mtd/nand/bbt.c kfree(nand->bbt.cache); cache 78 drivers/mtd/nand/bbt.c unsigned long *pos = nand->bbt.cache + cache 111 drivers/mtd/nand/bbt.c unsigned long *pos = nand->bbt.cache + cache 168 drivers/mtd/nand/raw/atmel/pmecc.c struct atmel_pmecc_user_conf_cache cache; cache 397 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.cfg = PMECC_CFG_BCH_STRENGTH(strength) | cache 401 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.cfg |= PMECC_CFG_SECTOR1024; cache 403 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.sarea = req->oobsize - 1; cache 404 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.saddr = req->ecc.ooboffset; cache 405 drivers/mtd/nand/raw/atmel/pmecc.c user->cache.eaddr = req->ecc.ooboffset + req->ecc.bytes - 1; cache 421 drivers/mtd/nand/raw/atmel/pmecc.c return strengths[user->cache.cfg & PMECC_CFG_BCH_STRENGTH_MASK]; cache 426 drivers/mtd/nand/raw/atmel/pmecc.c return user->cache.cfg & PMECC_CFG_SECTOR1024 ? 1024 : 512; cache 784 drivers/mtd/nand/raw/atmel/pmecc.c cfg = user->cache.cfg; cache 791 drivers/mtd/nand/raw/atmel/pmecc.c writel(user->cache.sarea, pmecc->regs.base + ATMEL_PMECC_SAREA); cache 792 drivers/mtd/nand/raw/atmel/pmecc.c writel(user->cache.saddr, pmecc->regs.base + ATMEL_PMECC_SADDR); cache 793 drivers/mtd/nand/raw/atmel/pmecc.c writel(user->cache.eaddr, pmecc->regs.base + ATMEL_PMECC_EADDR); cache 1034 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c u16 *cache = phy->phy_cache; cache 1066 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16; cache 1067 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c *cache++ = be32_to_cpu(p[i]) & 0xffff; cache 837 drivers/net/ethernet/dec/tulip/de4x5.c } cache; cache 1142 drivers/net/ethernet/dec/tulip/de4x5.c skb_queue_head_init(&lp->cache.queue); cache 1143 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gepc = GEP_INIT; cache 1477 drivers/net/ethernet/dec/tulip/de4x5.c if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) cache 1492 drivers/net/ethernet/dec/tulip/de4x5.c if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) { cache 1516 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.lock = 0; cache 1586 drivers/net/ethernet/dec/tulip/de4x5.c if (!test_and_set_bit(0, (void *)&lp->cache.lock)) { cache 1587 drivers/net/ethernet/dec/tulip/de4x5.c while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) { cache 1590 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.lock = 0; cache 3669 drivers/net/ethernet/dec/tulip/de4x5.c __skb_queue_purge(&lp->cache.queue); cache 3686 drivers/net/ethernet/dec/tulip/de4x5.c if (!lp->cache.save_cnt) { cache 3693 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.save_cnt++; cache 3706 drivers/net/ethernet/dec/tulip/de4x5.c if (lp->cache.save_cnt) { cache 3724 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.save_cnt--; cache 3737 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr0 = inl(DE4X5_BMR); cache 3738 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR)); cache 3739 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr7 = inl(DE4X5_IMR); cache 3743 drivers/net/ethernet/dec/tulip/de4x5.c outl(lp->cache.csr0, DE4X5_BMR); cache 3744 drivers/net/ethernet/dec/tulip/de4x5.c outl(lp->cache.csr6, DE4X5_OMR); cache 3745 drivers/net/ethernet/dec/tulip/de4x5.c outl(lp->cache.csr7, DE4X5_IMR); cache 3747 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gepc, dev); cache 3748 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gep, dev); cache 3750 drivers/net/ethernet/dec/tulip/de4x5.c reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, cache 3751 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr15); cache 3762 drivers/net/ethernet/dec/tulip/de4x5.c __skb_queue_tail(&lp->cache.queue, skb); cache 3770 drivers/net/ethernet/dec/tulip/de4x5.c __skb_queue_head(&lp->cache.queue, skb); cache 3778 drivers/net/ethernet/dec/tulip/de4x5.c return __skb_dequeue(&lp->cache.queue); cache 3846 drivers/net/ethernet/dec/tulip/de4x5.c csr15 = lp->cache.csr15; cache 3847 drivers/net/ethernet/dec/tulip/de4x5.c csr14 = lp->cache.csr14; cache 3848 drivers/net/ethernet/dec/tulip/de4x5.c csr13 = lp->cache.csr13; cache 3849 drivers/net/ethernet/dec/tulip/de4x5.c outl(csr15 | lp->cache.gepc, DE4X5_SIGR); cache 3850 drivers/net/ethernet/dec/tulip/de4x5.c outl(csr15 | lp->cache.gep, DE4X5_SIGR); cache 4351 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gepc = (*p++ | GEP_CTRL); cache 4352 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gepc, dev); cache 4404 drivers/net/ethernet/dec/tulip/de4x5.c outl(lp->cache.csr14, DE4X5_STRR); cache 4405 drivers/net/ethernet/dec/tulip/de4x5.c outl(lp->cache.csr13, DE4X5_SICR); cache 4432 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gepc = (*p++ | GEP_CTRL); cache 4549 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gepc, dev); cache 4551 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gep = *p++; cache 4589 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gepc, dev); cache 4592 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gep = *p++; cache 4672 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr13 = get_unaligned_le16(p); p += 2; cache 4673 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr14 = get_unaligned_le16(p); p += 2; cache 4674 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr15 = get_unaligned_le16(p); p += 2; cache 4676 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr13 = CSR13; cache 4677 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr14 = CSR14; cache 4678 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr15 = CSR15; cache 4680 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; cache 4681 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); cache 4753 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr13 = CSR13; /* Hard coded defaults */ cache 4754 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr14 = CSR14; cache 4755 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.csr15 = CSR15; cache 4756 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; cache 4757 drivers/net/ethernet/dec/tulip/de4x5.c lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; cache 5092 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gepc, dev); cache 5093 drivers/net/ethernet/dec/tulip/de4x5.c gep_wr(lp->cache.gep, dev); cache 5095 drivers/net/ethernet/dec/tulip/de4x5.c reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15); cache 5116 drivers/net/ethernet/dec/tulip/de4x5.c outl((data<<16) | lp->cache.csr15, DE4X5_SIGR); cache 929 drivers/net/ethernet/dec/tulip/de4x5.h if (!lp->useSROM) lp->cache.gep = 0;\ cache 938 drivers/net/ethernet/dec/tulip/de4x5.h lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD);\ cache 939 drivers/net/ethernet/dec/tulip/de4x5.h gep_wr(lp->cache.gep, dev);\ cache 958 drivers/net/ethernet/dec/tulip/de4x5.h if (!lp->useSROM) lp->cache.gep = 0;\ cache 967 drivers/net/ethernet/dec/tulip/de4x5.h lp->cache.gep = (lp->fdx ? 0 : GEP_FDXD) | GEP_MODE;\ cache 968 drivers/net/ethernet/dec/tulip/de4x5.h gep_wr(lp->cache.gep, dev);\ cache 984 drivers/net/ethernet/dec/tulip/de4x5.h lp->cache.gep = (GEP_FDXD | GEP_MODE);\ cache 985 drivers/net/ethernet/dec/tulip/de4x5.h gep_wr(lp->cache.gep, dev);\ cache 1193 drivers/net/ethernet/dec/tulip/tulip_core.c u8 cache; cache 1216 drivers/net/ethernet/dec/tulip/tulip_core.c pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache); cache 1217 drivers/net/ethernet/dec/tulip/tulip_core.c if ((csr0 & MWI) && (cache == 0)) { cache 1225 drivers/net/ethernet/dec/tulip/tulip_core.c switch (cache) { cache 1236 drivers/net/ethernet/dec/tulip/tulip_core.c cache = 0; cache 1243 drivers/net/ethernet/dec/tulip/tulip_core.c if (cache) cache 1261 drivers/net/ethernet/dec/tulip/tulip_core.c cache, csr0); cache 418 drivers/net/ethernet/mellanox/mlx4/en_rx.c struct mlx4_en_page_cache *cache = &ring->page_cache; cache 420 drivers/net/ethernet/mellanox/mlx4/en_rx.c if (cache->index >= MLX4_EN_CACHE_SIZE) cache 423 drivers/net/ethernet/mellanox/mlx4/en_rx.c cache->buf[cache->index].page = frame->page; cache 424 drivers/net/ethernet/mellanox/mlx4/en_rx.c cache->buf[cache->index].dma = frame->dma; cache 425 drivers/net/ethernet/mellanox/mlx4/en_rx.c cache->index++; cache 1642 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ch = &cmd->cache[i]; cache 1826 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ch = &dev->cmd.cache[i]; cache 1856 drivers/net/ethernet/mellanox/mlx5/core/cmd.c ch = &cmd->cache[k]; cache 195 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_page_cache *cache = &rq->page_cache; cache 196 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 tail_next = (cache->tail + 1) & (MLX5E_CACHE_SIZE - 1); cache 199 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (tail_next == cache->head) { cache 209 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cache->page_cache[cache->tail] = *dma_info; cache 210 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cache->tail = tail_next; cache 217 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_page_cache *cache = &rq->page_cache; cache 220 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (unlikely(cache->head == cache->tail)) { cache 225 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (page_ref_count(cache->page_cache[cache->head].page) != 1) { cache 230 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c *dma_info = cache->page_cache[cache->head]; cache 231 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cache->head = (cache->head + 1) & (MLX5E_CACHE_SIZE - 1); cache 67 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; cache 154 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c struct mlx5_fc_cache *cache) cache 161 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c if (cache->packets == packets) cache 164 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c cache->packets = packets; cache 165 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c cache->bytes = bytes; cache 166 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c cache->lastuse = jiffies; cache 200 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c struct mlx5_fc_cache *cache = &counter->cache; cache 207 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c update_counter_cache(counter_index, data, cache); cache 314 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c counter->cache.lastuse = jiffies; cache 315 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c counter->lastbytes = counter->cache.bytes; cache 316 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c counter->lastpackets = counter->cache.packets; cache 430 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c return counter->cache.lastuse; cache 438 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c c = counter->cache; cache 12 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports, cache 13 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c sizeof(dmn->cache.recalc_cs_ft[0]), cache 15 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c if (!dmn->cache.recalc_cs_ft) cache 26 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c if (!dmn->cache.recalc_cs_ft[i]) cache 29 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]); cache 32 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c kfree(dmn->cache.recalc_cs_ft); cache 41 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num]; cache 48 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft; cache 659 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5dr_domain_cache cache; cache 219 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c if (!nfp_map->cache) cache 224 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c reply = (void *)nfp_map->cache->data; cache 254 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c dev_consume_skb_any(nfp_map->cache); cache 255 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c nfp_map->cache = NULL; cache 283 drivers/net/ethernet/netronome/nfp/bpf/cmsg.c swap(nfp_map->cache, skb); cache 218 drivers/net/ethernet/netronome/nfp/bpf/main.h struct sk_buff *cache; cache 411 drivers/net/ethernet/netronome/nfp/bpf/offload.c dev_consume_skb_any(nfp_map->cache); cache 163 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c struct nfp_cpp_area_cache *cache, *ctmp; cache 167 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { cache 168 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_del(&cache->entry); cache 169 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache->id) cache 170 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c nfp_cpp_area_release(cache->area); cache 171 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c nfp_cpp_area_free(cache->area); cache 172 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c kfree(cache); cache 794 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c struct nfp_cpp_area_cache *cache; cache 805 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache 806 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (!cache) cache 809 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->id = 0; cache 810 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->addr = 0; cache 811 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->size = size; cache 812 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->area = area; cache 814 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_add_tail(&cache->entry, &cpp->area_cache_list); cache 824 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c struct nfp_cpp_area_cache *cache; cache 849 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_for_each_entry(cache, &cpp->area_cache_list, entry) { cache 850 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (id == cache->id && cache 851 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c addr >= cache->addr && cache 852 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c addr + length <= cache->addr + cache->size) cache 857 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache = list_entry(cpp->area_cache_list.prev, cache 861 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (round_down(addr + length - 1, cache->size) != cache 862 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c round_down(addr, cache->size)) { cache 868 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache->id) { cache 869 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c nfp_cpp_area_release(cache->area); cache 870 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->id = 0; cache 871 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->addr = 0; cache 875 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->id = id; cache 876 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache->addr = addr & ~(u64)(cache->size - 1); cache 880 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c err = cpp->op->area_init(cache->area, cache 881 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c id, cache->addr, cache->size); cache 889 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c err = nfp_cpp_area_acquire(cache->area); cache 897 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c *offset = addr - cache->addr; cache 898 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c return cache; cache 902 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache) cache 904 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (!cache) cache 908 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_del(&cache->entry); cache 909 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c list_add(&cache->entry, &cpp->area_cache_list); cache 918 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c struct nfp_cpp_area_cache *cache; cache 923 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache = area_cache_get(cpp, destination, address, &offset, length); cache 924 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache) { cache 925 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area = cache->area; cache 940 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache) cache 941 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area_cache_put(cpp, cache); cache 987 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c struct nfp_cpp_area_cache *cache; cache 992 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c cache = area_cache_get(cpp, destination, address, &offset, length); cache 993 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache) { cache 994 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area = cache->area; cache 1009 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c if (cache) cache 1010 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c area_cache_put(cpp, cache); cache 54 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, u32 strtab_size, cache 58 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size; cache 101 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c struct nfp_rtsym_table *cache; cache 121 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c size = sizeof(*cache); cache 124 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c cache = kmalloc(size, GFP_KERNEL); cache 125 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c if (!cache) cache 128 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c cache->cpp = cpp; cache 129 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c cache->num = symtab_size / sizeof(*rtsymtab); cache 130 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c cache->strtab = (void *)&cache->symtab[cache->num]; cache 136 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); cache 139 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c cache->strtab[strtab_size] = '\0'; cache 141 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c for (n = 0; n < cache->num; n++) cache 142 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c nfp_rtsym_sw_entry_init(cache, strtab_size, cache 143 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c &cache->symtab[n], &rtsymtab[n]); cache 147 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c return cache; cache 150 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c kfree(cache); cache 233 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c struct __cache cache; cache 594 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c struct __cache *l1 = &entry->region.cache; cache 620 drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c struct __cache *l2 = &entry->region.cache; cache 365 drivers/net/ipvlan/ipvlan_main.c .cache = eth_header_cache, cache 592 drivers/net/macvlan.c .cache = eth_header_cache, cache 261 drivers/net/plip/plip.c .cache = plip_hard_header_cache, cache 460 drivers/net/wireless/ath/carl9170/carl9170.h u16 cache[CARL9170_HWRNG_CACHE_SIZE / sizeof(u16)]; cache 1561 drivers/net/wireless/ath/carl9170/main.c count = ARRAY_SIZE(ar->rng.cache); cache 1571 drivers/net/wireless/ath/carl9170/main.c ar->rng.cache[off + i] = buf[i]; cache 1590 drivers/net/wireless/ath/carl9170/main.c if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) { cache 1598 drivers/net/wireless/ath/carl9170/main.c *data = ar->rng.cache[ar->rng.cache_idx++]; cache 787 drivers/net/wireless/intersil/hostap/hostap_main.c .cache = eth_header_cache, cache 412 drivers/net/wireless/intersil/prism54/oid_mgt.c void *cache, *_data = data; cache 423 drivers/net/wireless/intersil/prism54/oid_mgt.c cache = priv->mib[n]; cache 424 drivers/net/wireless/intersil/prism54/oid_mgt.c cache += (cache ? extra * dlen : 0); cache 429 drivers/net/wireless/intersil/prism54/oid_mgt.c _data = cache; cache 438 drivers/net/wireless/intersil/prism54/oid_mgt.c if (cache) cache 450 drivers/net/wireless/intersil/prism54/oid_mgt.c } else if (!cache) cache 453 drivers/net/wireless/intersil/prism54/oid_mgt.c if (cache) { cache 455 drivers/net/wireless/intersil/prism54/oid_mgt.c memcpy(cache, _data, dlen); cache 512 drivers/net/wireless/intersil/prism54/oid_mgt.c void *cache, *_res = NULL; cache 525 drivers/net/wireless/intersil/prism54/oid_mgt.c cache = priv->mib[n]; cache 526 drivers/net/wireless/intersil/prism54/oid_mgt.c cache += cache ? extra * dlen : 0; cache 530 drivers/net/wireless/intersil/prism54/oid_mgt.c if (cache) cache 546 drivers/net/wireless/intersil/prism54/oid_mgt.c } else if (cache) { cache 547 drivers/net/wireless/intersil/prism54/oid_mgt.c _res = cache; cache 563 drivers/net/wireless/intersil/prism54/oid_mgt.c if (cache) cache 820 drivers/net/wireless/ralink/rt2x00/rt2x00.h void *cache; cache 92 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { cache 98 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c memcpy(rt2x00dev->csr.cache, buffer, buffer_length); cache 101 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c offset, 0, rt2x00dev->csr.cache, cache 105 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c memcpy(buffer, rt2x00dev->csr.cache, buffer_length); cache 766 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c kfree(rt2x00dev->csr.cache); cache 767 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c rt2x00dev->csr.cache = NULL; cache 772 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); cache 773 drivers/net/wireless/ralink/rt2x00/rt2x00usb.c if (!rt2x00dev->csr.cache) cache 213 drivers/net/wireless/st/cw1200/txrx.c static int tx_policy_find(struct tx_policy_cache *cache, cache 222 drivers/net/wireless/st/cw1200/txrx.c list_for_each_entry(it, &cache->used, link) { cache 224 drivers/net/wireless/st/cw1200/txrx.c return it - cache->cache; cache 227 drivers/net/wireless/st/cw1200/txrx.c list_for_each_entry(it, &cache->free, link) { cache 229 drivers/net/wireless/st/cw1200/txrx.c return it - cache->cache; cache 234 drivers/net/wireless/st/cw1200/txrx.c static inline void tx_policy_use(struct tx_policy_cache *cache, cache 238 drivers/net/wireless/st/cw1200/txrx.c list_move(&entry->link, &cache->used); cache 241 drivers/net/wireless/st/cw1200/txrx.c static inline int tx_policy_release(struct tx_policy_cache *cache, cache 246 drivers/net/wireless/st/cw1200/txrx.c list_move(&entry->link, &cache->free); cache 253 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy_cache *cache = &priv->tx_policy_cache; cache 257 drivers/net/wireless/st/cw1200/txrx.c spin_lock_bh(&cache->lock); cache 258 drivers/net/wireless/st/cw1200/txrx.c locked = list_empty(&cache->free); cache 261 drivers/net/wireless/st/cw1200/txrx.c entry = &cache->cache[idx]; cache 267 drivers/net/wireless/st/cw1200/txrx.c list_move(&entry->link, &cache->free); cache 275 drivers/net/wireless/st/cw1200/txrx.c spin_unlock_bh(&cache->lock); cache 283 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy_cache *cache = &priv->tx_policy_cache; cache 286 drivers/net/wireless/st/cw1200/txrx.c memset(cache, 0, sizeof(*cache)); cache 288 drivers/net/wireless/st/cw1200/txrx.c spin_lock_init(&cache->lock); cache 289 drivers/net/wireless/st/cw1200/txrx.c INIT_LIST_HEAD(&cache->used); cache 290 drivers/net/wireless/st/cw1200/txrx.c INIT_LIST_HEAD(&cache->free); cache 293 drivers/net/wireless/st/cw1200/txrx.c list_add(&cache->cache[i].link, &cache->free); cache 301 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy_cache *cache = &priv->tx_policy_cache; cache 306 drivers/net/wireless/st/cw1200/txrx.c spin_lock_bh(&cache->lock); cache 307 drivers/net/wireless/st/cw1200/txrx.c if (WARN_ON_ONCE(list_empty(&cache->free))) { cache 308 drivers/net/wireless/st/cw1200/txrx.c spin_unlock_bh(&cache->lock); cache 311 drivers/net/wireless/st/cw1200/txrx.c idx = tx_policy_find(cache, &wanted); cache 321 drivers/net/wireless/st/cw1200/txrx.c entry = list_entry(cache->free.prev, cache 324 drivers/net/wireless/st/cw1200/txrx.c idx = entry - cache->cache; cache 328 drivers/net/wireless/st/cw1200/txrx.c tx_policy_use(cache, &cache->cache[idx]); cache 329 drivers/net/wireless/st/cw1200/txrx.c if (list_empty(&cache->free)) { cache 333 drivers/net/wireless/st/cw1200/txrx.c spin_unlock_bh(&cache->lock); cache 340 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy_cache *cache = &priv->tx_policy_cache; cache 342 drivers/net/wireless/st/cw1200/txrx.c spin_lock_bh(&cache->lock); cache 343 drivers/net/wireless/st/cw1200/txrx.c locked = list_empty(&cache->free); cache 344 drivers/net/wireless/st/cw1200/txrx.c usage = tx_policy_release(cache, &cache->cache[idx]); cache 349 drivers/net/wireless/st/cw1200/txrx.c spin_unlock_bh(&cache->lock); cache 354 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy_cache *cache = &priv->tx_policy_cache; cache 359 drivers/net/wireless/st/cw1200/txrx.c spin_lock_bh(&cache->lock); cache 363 drivers/net/wireless/st/cw1200/txrx.c struct tx_policy *src = &cache->cache[i].policy; cache 379 drivers/net/wireless/st/cw1200/txrx.c spin_unlock_bh(&cache->lock); cache 39 drivers/net/wireless/st/cw1200/txrx.h struct tx_policy_cache_entry cache[TX_POLICY_CACHE_SIZE]; cache 248 drivers/net/xen-netback/common.h struct xenvif_hash_cache cache; cache 50 drivers/net/xen-netback/hash.c spin_lock_irqsave(&vif->hash.cache.lock, flags); cache 54 drivers/net/xen-netback/hash.c list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { cache 64 drivers/net/xen-netback/hash.c new->seq = atomic_inc_return(&vif->hash.cache.seq); cache 65 drivers/net/xen-netback/hash.c list_add_rcu(&new->link, &vif->hash.cache.list); cache 67 drivers/net/xen-netback/hash.c if (++vif->hash.cache.count > xenvif_hash_cache_size) { cache 69 drivers/net/xen-netback/hash.c vif->hash.cache.count--; cache 74 drivers/net/xen-netback/hash.c spin_unlock_irqrestore(&vif->hash.cache.lock, flags); cache 103 drivers/net/xen-netback/hash.c spin_lock_irqsave(&vif->hash.cache.lock, flags); cache 105 drivers/net/xen-netback/hash.c list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { cache 107 drivers/net/xen-netback/hash.c vif->hash.cache.count--; cache 111 drivers/net/xen-netback/hash.c spin_unlock_irqrestore(&vif->hash.cache.lock, flags); cache 131 drivers/net/xen-netback/hash.c list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) { cache 135 drivers/net/xen-netback/hash.c entry->seq = atomic_inc_return(&vif->hash.cache.seq); cache 457 drivers/net/xen-netback/hash.c BUG_ON(vif->hash.cache.count); cache 459 drivers/net/xen-netback/hash.c spin_lock_init(&vif->hash.cache.lock); cache 460 drivers/net/xen-netback/hash.c INIT_LIST_HEAD(&vif->hash.cache.list); cache 309 drivers/pcmcia/cistpl.c memcpy(ptr, cis->cache, len); cache 324 drivers/pcmcia/cistpl.c memcpy(cis->cache, ptr, len); cache 392 drivers/pcmcia/cistpl.c if (ret || memcmp(buf, cis->cache, len) != 0) { cache 43 drivers/pcmcia/cs_internal.h unsigned char cache[0]; cache 282 drivers/platform/mellanox/mlxreg-hotplug.c asserted = item->cache ^ regval; cache 283 drivers/platform/mellanox/mlxreg-hotplug.c item->cache = regval; cache 337 drivers/platform/mellanox/mlxreg-hotplug.c if (item->cache == regval) cache 368 drivers/platform/mellanox/mlxreg-hotplug.c item->cache = regval; cache 534 drivers/platform/mellanox/mlxreg-hotplug.c item->cache = item->mask; cache 1561 drivers/power/supply/bq27xxx_battery.c if (unlikely(bq27xxx_battery_overtemp(di, di->cache.flags))) cache 1563 drivers/power/supply/bq27xxx_battery.c if (unlikely(bq27xxx_battery_undertemp(di, di->cache.flags))) cache 1565 drivers/power/supply/bq27xxx_battery.c if (unlikely(bq27xxx_battery_dead(di, di->cache.flags))) cache 1573 drivers/power/supply/bq27xxx_battery.c struct bq27xxx_reg_cache cache = {0, }; cache 1577 drivers/power/supply/bq27xxx_battery.c cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag); cache 1578 drivers/power/supply/bq27xxx_battery.c if ((cache.flags & 0xff) == 0xff) cache 1579 drivers/power/supply/bq27xxx_battery.c cache.flags = -1; /* read error */ cache 1580 drivers/power/supply/bq27xxx_battery.c if (cache.flags >= 0) { cache 1581 drivers/power/supply/bq27xxx_battery.c cache.temperature = bq27xxx_battery_read_temperature(di); cache 1582 drivers/power/supply/bq27xxx_battery.c if (has_ci_flag && (cache.flags & BQ27000_FLAG_CI)) { cache 1584 drivers/power/supply/bq27xxx_battery.c cache.capacity = -ENODATA; cache 1585 drivers/power/supply/bq27xxx_battery.c cache.energy = -ENODATA; cache 1586 drivers/power/supply/bq27xxx_battery.c cache.time_to_empty = -ENODATA; cache 1587 drivers/power/supply/bq27xxx_battery.c cache.time_to_empty_avg = -ENODATA; cache 1588 drivers/power/supply/bq27xxx_battery.c cache.time_to_full = -ENODATA; cache 1589 drivers/power/supply/bq27xxx_battery.c cache.charge_full = -ENODATA; cache 1590 drivers/power/supply/bq27xxx_battery.c cache.health = -ENODATA; cache 1593 drivers/power/supply/bq27xxx_battery.c cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); cache 1595 drivers/power/supply/bq27xxx_battery.c cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); cache 1597 drivers/power/supply/bq27xxx_battery.c cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); cache 1598 drivers/power/supply/bq27xxx_battery.c cache.charge_full = bq27xxx_battery_read_fcc(di); cache 1599 drivers/power/supply/bq27xxx_battery.c cache.capacity = bq27xxx_battery_read_soc(di); cache 1601 drivers/power/supply/bq27xxx_battery.c cache.energy = bq27xxx_battery_read_energy(di); cache 1602 drivers/power/supply/bq27xxx_battery.c di->cache.flags = cache.flags; cache 1603 drivers/power/supply/bq27xxx_battery.c cache.health = bq27xxx_battery_read_health(di); cache 1606 drivers/power/supply/bq27xxx_battery.c cache.cycle_count = bq27xxx_battery_read_cyct(di); cache 1608 drivers/power/supply/bq27xxx_battery.c cache.power_avg = bq27xxx_battery_read_pwr_avg(di); cache 1615 drivers/power/supply/bq27xxx_battery.c if ((di->cache.capacity != cache.capacity) || cache 1616 drivers/power/supply/bq27xxx_battery.c (di->cache.flags != cache.flags)) cache 1619 drivers/power/supply/bq27xxx_battery.c if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) cache 1620 drivers/power/supply/bq27xxx_battery.c di->cache = cache; cache 1677 drivers/power/supply/bq27xxx_battery.c if (di->cache.flags & BQ27000_FLAG_FC) cache 1679 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27000_FLAG_CHGS) cache 1686 drivers/power/supply/bq27xxx_battery.c if (di->cache.flags & BQ27XXX_FLAG_FC) cache 1688 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27XXX_FLAG_DSC) cache 1705 drivers/power/supply/bq27xxx_battery.c if (di->cache.flags & BQ27000_FLAG_FC) cache 1707 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27000_FLAG_EDV1) cache 1709 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27000_FLAG_EDVF) cache 1714 drivers/power/supply/bq27xxx_battery.c if (di->cache.flags & BQ27XXX_FLAG_FC) cache 1716 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27XXX_FLAG_SOC1) cache 1718 drivers/power/supply/bq27xxx_battery.c else if (di->cache.flags & BQ27XXX_FLAG_SOCF) cache 1774 drivers/power/supply/bq27xxx_battery.c if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0) cache 1785 drivers/power/supply/bq27xxx_battery.c val->intval = di->cache.flags < 0 ? 0 : 1; cache 1791 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.capacity, val); cache 1797 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.temperature, val); cache 1802 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.time_to_empty, val); cache 1805 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.time_to_empty_avg, val); cache 1808 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.time_to_full, val); cache 1817 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.charge_full, val); cache 1830 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.cycle_count, val); cache 1833 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.energy, val); cache 1836 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.power_avg, val); cache 1839 drivers/power/supply/bq27xxx_battery.c ret = bq27xxx_simple_value(di->cache.health, val); cache 269 drivers/s390/block/dasd_eckd.h unsigned char cache:1; cache 256 drivers/scsi/aacraid/aachba.c module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR); cache 257 drivers/scsi/aacraid/aachba.c MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n" cache 1117 drivers/scsi/gdth.c cmd_ptr->u.cache.DeviceNo = (u16)p1; cache 1118 drivers/scsi/gdth.c cmd_ptr->u.cache.BlockNo = (u32)p2; cache 2037 drivers/scsi/gdth.c cmdp->u.cache.DeviceNo = hdrive; cache 2038 drivers/scsi/gdth.c cmdp->u.cache.BlockNo = 1; cache 2039 drivers/scsi/gdth.c cmdp->u.cache.sg_canz = 0; cache 2062 drivers/scsi/gdth.c cmdp->u.cache.BlockNo = (u32)blockno; cache 2063 drivers/scsi/gdth.c cmdp->u.cache.BlockCnt = blockcnt; cache 2083 drivers/scsi/gdth.c cmdp->u.cache.DestAddr= 0xffffffff; cache 2084 drivers/scsi/gdth.c cmdp->u.cache.sg_canz = sgcnt; cache 2086 drivers/scsi/gdth.c cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl); cache 2087 drivers/scsi/gdth.c cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl); cache 2112 drivers/scsi/gdth.c cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz, cache 2113 drivers/scsi/gdth.c cmdp->u.cache.sg_lst[0].sg_ptr, cache 2114 drivers/scsi/gdth.c cmdp->u.cache.sg_lst[0].sg_len)); cache 2116 drivers/scsi/gdth.c cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt)); cache 2117 drivers/scsi/gdth.c ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + cache 2118 drivers/scsi/gdth.c (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str); cache 2328 drivers/scsi/gdth.c GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str); cache 3538 drivers/scsi/gdth.c cmd.u.cache.DeviceNo = res.number; cache 3555 drivers/scsi/gdth.c gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt; cache 3556 drivers/scsi/gdth.c gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo; cache 3557 drivers/scsi/gdth.c gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo; cache 3571 drivers/scsi/gdth.c gen->command.u.cache.DestAddr = 0xffffffff; cache 3572 drivers/scsi/gdth.c gen->command.u.cache.sg_canz = 1; cache 3573 drivers/scsi/gdth.c gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr; cache 3574 drivers/scsi/gdth.c gen->command.u.cache.sg_lst[0].sg_len = gen->data_len; cache 3575 drivers/scsi/gdth.c gen->command.u.cache.sg_lst[1].sg_len = 0; cache 3577 drivers/scsi/gdth.c gen->command.u.cache.DestAddr = paddr; cache 3578 drivers/scsi/gdth.c gen->command.u.cache.sg_canz = 0; cache 3730 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = i; cache 3778 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = LINUX_OS; cache 3795 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = i; cache 3829 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = i; cache 3842 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = i; cache 3857 drivers/scsi/gdth.c cmd->u.cache.DeviceNo = i; cache 4052 drivers/scsi/gdth.c gdtcmd.u.cache.DeviceNo = i; cache 4053 drivers/scsi/gdth.c gdtcmd.u.cache.BlockNo = 1; cache 4054 drivers/scsi/gdth.c gdtcmd.u.cache.sg_canz = 0; cache 57 drivers/scsi/gdth_ioctl.h } __attribute__((packed)) cache; /* cache service cmd. str. */ cache 70 drivers/scsi/gdth_proc.c gdtcmd.u.cache.DeviceNo = i; cache 71 drivers/scsi/gdth_proc.c gdtcmd.u.cache.BlockNo = 1; cache 316 drivers/scsi/hisi_sas/hisi_sas.h u32 *cache); cache 2917 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c u32 *cache) cache 2921 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c u32 *buf = cache; cache 84 drivers/scsi/scsi_lib.c struct kmem_cache *cache; cache 88 drivers/scsi/scsi_lib.c cache = scsi_select_sense_cache(shost->unchecked_isa_dma); cache 89 drivers/scsi/scsi_lib.c if (cache) cache 78 drivers/soc/fsl/qbman/qman_portal.c stash_attr.cache = PAMU_ATTR_CACHE_L1; cache 177 drivers/soc/fsl/qbman/qman_portal.c stash_attr.cache = PAMU_ATTR_CACHE_L1; cache 77 drivers/soc/qcom/rpmh-internal.h struct list_head cache; cache 670 drivers/soc/qcom/rpmh-rsc.c INIT_LIST_HEAD(&drv->client.cache); cache 106 drivers/soc/qcom/rpmh.c list_for_each_entry(p, &ctrlr->cache, list) { cache 137 drivers/soc/qcom/rpmh.c list_add_tail(&req->list, &ctrlr->cache); cache 477 drivers/soc/qcom/rpmh.c list_for_each_entry(p, &ctrlr->cache, list) { cache 267 drivers/staging/fieldbus/anybuss/host.c struct kmem_cache *cache; cache 280 drivers/staging/fieldbus/anybuss/host.c static struct ab_task *ab_task_create_get(struct kmem_cache *cache, cache 285 drivers/staging/fieldbus/anybuss/host.c t = kmem_cache_alloc(cache, GFP_KERNEL); cache 288 drivers/staging/fieldbus/anybuss/host.c t->cache = cache; cache 300 drivers/staging/fieldbus/anybuss/host.c struct kmem_cache *cache = t->cache; cache 302 drivers/staging/fieldbus/anybuss/host.c kmem_cache_free(cache, t); cache 613 drivers/staging/rtl8723bs/core/rtw_wlan_util.c struct cam_entry_cache cache; cache 616 drivers/staging/rtl8723bs/core/rtw_wlan_util.c memcpy(&cache, &dvobj->cam_cache[id], sizeof(struct cam_entry_cache)); cache 619 drivers/staging/rtl8723bs/core/rtw_wlan_util.c _write_cam(adapter, id, cache.ctrl, cache.mac, cache.key); cache 185 drivers/tty/serial/atmel_serial.c } cache; cache 2729 drivers/tty/serial/atmel_serial.c atmel_port->cache.mr = atmel_uart_readl(port, ATMEL_US_MR); cache 2730 drivers/tty/serial/atmel_serial.c atmel_port->cache.imr = atmel_uart_readl(port, ATMEL_US_IMR); cache 2731 drivers/tty/serial/atmel_serial.c atmel_port->cache.brgr = atmel_uart_readl(port, ATMEL_US_BRGR); cache 2732 drivers/tty/serial/atmel_serial.c atmel_port->cache.rtor = atmel_uart_readl(port, cache 2734 drivers/tty/serial/atmel_serial.c atmel_port->cache.ttgr = atmel_uart_readl(port, ATMEL_US_TTGR); cache 2735 drivers/tty/serial/atmel_serial.c atmel_port->cache.fmr = atmel_uart_readl(port, ATMEL_US_FMR); cache 2736 drivers/tty/serial/atmel_serial.c atmel_port->cache.fimr = atmel_uart_readl(port, ATMEL_US_FIMR); cache 2762 drivers/tty/serial/atmel_serial.c atmel_uart_writel(port, ATMEL_US_MR, atmel_port->cache.mr); cache 2763 drivers/tty/serial/atmel_serial.c atmel_uart_writel(port, ATMEL_US_IER, atmel_port->cache.imr); cache 2764 drivers/tty/serial/atmel_serial.c atmel_uart_writel(port, ATMEL_US_BRGR, atmel_port->cache.brgr); cache 2766 drivers/tty/serial/atmel_serial.c atmel_port->cache.rtor); cache 2767 drivers/tty/serial/atmel_serial.c atmel_uart_writel(port, ATMEL_US_TTGR, atmel_port->cache.ttgr); cache 2773 drivers/tty/serial/atmel_serial.c atmel_port->cache.fmr); cache 2775 drivers/tty/serial/atmel_serial.c atmel_port->cache.fimr); cache 538 drivers/usb/core/quirks.c struct usb_interface_cache *cache; cache 541 drivers/usb/core/quirks.c cache = cfg->intf_cache[j]; cache 542 drivers/usb/core/quirks.c if (cache->num_altsetting == 0) cache 545 drivers/usb/core/quirks.c intf = &cache->altsetting[0]; cache 352 drivers/usb/dwc3/core.c evt->cache = devm_kzalloc(dwc->dev, length, GFP_KERNEL); cache 353 drivers/usb/dwc3/core.c if (!evt->cache) cache 630 drivers/usb/dwc3/core.h void *cache; cache 3212 drivers/usb/dwc3/gadget.c event.raw = *(u32 *) (evt->cache + evt->lpos); cache 3297 drivers/usb/dwc3/gadget.c memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount); cache 3300 drivers/usb/dwc3/gadget.c memcpy(evt->cache, evt->buf, count - amount); cache 473 drivers/video/console/sticore.c sti->regions[i].region_desc.cache, cache 379 drivers/video/fbdev/ffb.c int cache = par->fifo_cache; cache 381 drivers/video/fbdev/ffb.c if (cache - n < 0) { cache 384 drivers/video/fbdev/ffb.c cache = (upa_readl(&fbc->ucsr) & FFB_UCSR_FIFO_MASK); cache 385 drivers/video/fbdev/ffb.c cache -= 8; cache 386 drivers/video/fbdev/ffb.c } while (cache - n < 0); cache 388 drivers/video/fbdev/ffb.c par->fifo_cache = cache - n; cache 395 drivers/video/fbdev/matrox/g450_pll.c ci = &minfo->cache.pixel; cache 407 drivers/video/fbdev/matrox/g450_pll.c ci = &minfo->cache.system; cache 427 drivers/video/fbdev/matrox/g450_pll.c ci = &minfo->cache.video; cache 472 drivers/video/fbdev/matrox/matroxfb_base.h } cache; cache 68 drivers/video/fbdev/sticore.h u32 cache : 1; /* map to data cache */ cache 126 fs/9p/v9fs.c if (v9ses->cache) cache 127 fs/9p/v9fs.c seq_printf(m, ",%s", v9fs_cache_modes[v9ses->cache]); cache 129 fs/9p/v9fs.c if (v9ses->cachetag && v9ses->cache == CACHE_FSCACHE) cache 174 fs/9p/v9fs.c v9ses->cache = CACHE_NONE; cache 270 fs/9p/v9fs.c v9ses->cache = CACHE_LOOSE; cache 273 fs/9p/v9fs.c v9ses->cache = CACHE_FSCACHE; cache 276 fs/9p/v9fs.c v9ses->cache = CACHE_MMAP; cache 300 fs/9p/v9fs.c v9ses->cache = r; cache 89 fs/9p/v9fs.h unsigned int cache; cache 78 fs/9p/vfs_file.c if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && cache 97 fs/9p/vfs_file.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 281 fs/9p/vfs_inode.c if (v9ses->cache == CACHE_LOOSE || cache 282 fs/9p/vfs_inode.c v9ses->cache == CACHE_FSCACHE) cache 285 fs/9p/vfs_inode.c else if (v9ses->cache == CACHE_MMAP) cache 291 fs/9p/vfs_inode.c if (v9ses->cache == CACHE_LOOSE || cache 292 fs/9p/vfs_inode.c v9ses->cache == CACHE_FSCACHE) cache 295 fs/9p/vfs_inode.c else if (v9ses->cache == CACHE_MMAP) cache 816 fs/9p/vfs_inode.c else if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 879 fs/9p/vfs_inode.c if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && cache 903 fs/9p/vfs_inode.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 1062 fs/9p/vfs_inode.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 1419 fs/9p/vfs_inode.c flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? cache 319 fs/9p/vfs_inode_dotl.c if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && cache 343 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 421 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 468 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 702 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 778 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 854 fs/9p/vfs_inode_dotl.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { cache 938 fs/9p/vfs_inode_dotl.c flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? cache 83 fs/9p/vfs_super.c if (v9ses->cache) cache 87 fs/9p/vfs_super.c if (!v9ses->cache) cache 139 fs/9p/vfs_super.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 277 fs/9p/vfs_super.c if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) cache 594 fs/afs/cell.c cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index, cache 637 fs/afs/cell.c fscache_relinquish_cookie(cell->cache, NULL, false); cache 638 fs/afs/cell.c cell->cache = NULL; cache 290 fs/afs/file.c ret = fscache_read_or_alloc_page(vnode->cache, cache 345 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 362 fs/afs/file.c fscache_write_page(vnode->cache, page, vnode->status.size, cache 364 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 427 fs/afs/file.c fscache_write_page(vnode->cache, page, vnode->status.size, cache 429 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 492 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 527 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 561 fs/afs/file.c ret = fscache_read_or_alloc_pages(vnode->cache, cache 621 fs/afs/file.c fscache_wait_on_page_write(vnode->cache, page); cache 622 fs/afs/file.c fscache_uncache_page(vnode->cache, page); cache 654 fs/afs/file.c if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { cache 484 fs/afs/inode.c vnode->cache = NULL; cache 494 fs/afs/inode.c vnode->cache = fscache_acquire_cookie(vnode->volume->cache, cache 577 fs/afs/inode.c fscache_invalidate(vnode->cache); cache 797 fs/afs/inode.c fscache_relinquish_cookie(vnode->cache, &aux, cache 799 fs/afs/inode.c vnode->cache = NULL; cache 376 fs/afs/internal.h struct fscache_cookie *cache; /* caching cookie */ cache 611 fs/afs/internal.h struct fscache_cookie *cache; /* caching cookie */ cache 652 fs/afs/internal.h struct fscache_cookie *cache; /* caching cookie */ cache 694 fs/afs/internal.h return vnode->cache; cache 685 fs/afs/super.c vnode->cache = NULL; cache 156 fs/afs/volume.c ASSERTCMP(volume->cache, ==, NULL); cache 185 fs/afs/volume.c volume->cache = fscache_acquire_cookie(volume->cell->cache, cache 209 fs/afs/volume.c fscache_relinquish_cookie(volume->cache, NULL, cache 211 fs/afs/volume.c volume->cache = NULL; cache 792 fs/afs/write.c fscache_wait_on_page_write(vnode->cache, vmf->page); cache 881 fs/afs/write.c fscache_wait_on_page_write(vnode->cache, page); cache 882 fs/afs/write.c fscache_uncache_page(vnode->cache, page); cache 123 fs/btrfs/block-group.c void btrfs_get_block_group(struct btrfs_block_group_cache *cache) cache 125 fs/btrfs/block-group.c atomic_inc(&cache->count); cache 128 fs/btrfs/block-group.c void btrfs_put_block_group(struct btrfs_block_group_cache *cache) cache 130 fs/btrfs/block-group.c if (atomic_dec_and_test(&cache->count)) { cache 131 fs/btrfs/block-group.c WARN_ON(cache->pinned > 0); cache 132 fs/btrfs/block-group.c WARN_ON(cache->reserved > 0); cache 142 fs/btrfs/block-group.c WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root)); cache 143 fs/btrfs/block-group.c kfree(cache->free_space_ctl); cache 144 fs/btrfs/block-group.c kfree(cache); cache 156 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 163 fs/btrfs/block-group.c cache = rb_entry(parent, struct btrfs_block_group_cache, cache 165 fs/btrfs/block-group.c if (block_group->key.objectid < cache->key.objectid) { cache 167 fs/btrfs/block-group.c } else if (block_group->key.objectid > cache->key.objectid) { cache 194 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache, *ret = NULL; cache 202 fs/btrfs/block-group.c cache = rb_entry(n, struct btrfs_block_group_cache, cache 204 fs/btrfs/block-group.c end = cache->key.objectid + cache->key.offset - 1; cache 205 fs/btrfs/block-group.c start = cache->key.objectid; cache 209 fs/btrfs/block-group.c ret = cache; cache 213 fs/btrfs/block-group.c ret = cache; cache 218 fs/btrfs/block-group.c ret = cache; cache 251 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache) cache 253 fs/btrfs/block-group.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 259 fs/btrfs/block-group.c if (RB_EMPTY_NODE(&cache->cache_node)) { cache 260 fs/btrfs/block-group.c const u64 next_bytenr = cache->key.objectid + cache->key.offset; cache 263 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 264 fs/btrfs/block-group.c cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache; cache 266 fs/btrfs/block-group.c node = rb_next(&cache->cache_node); cache 267 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 269 fs/btrfs/block-group.c cache = rb_entry(node, struct btrfs_block_group_cache, cache 271 fs/btrfs/block-group.c btrfs_get_block_group(cache); cache 273 fs/btrfs/block-group.c cache = NULL; cache 275 fs/btrfs/block-group.c return cache; cache 360 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache) cache 364 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 365 fs/btrfs/block-group.c if (!cache->caching_ctl) { cache 366 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 370 fs/btrfs/block-group.c ctl = cache->caching_ctl; cache 372 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 395 fs/btrfs/block-group.c void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, cache 400 fs/btrfs/block-group.c caching_ctl = btrfs_get_caching_control(cache); cache 404 fs/btrfs/block-group.c wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache) || cache 405 fs/btrfs/block-group.c (cache->free_space_ctl->free_space >= num_bytes)); cache 410 fs/btrfs/block-group.c int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache) cache 415 fs/btrfs/block-group.c caching_ctl = btrfs_get_caching_control(cache); cache 417 fs/btrfs/block-group.c return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0; cache 419 fs/btrfs/block-group.c wait_event(caching_ctl->wait, btrfs_block_group_cache_done(cache)); cache 420 fs/btrfs/block-group.c if (cache->cached == BTRFS_CACHE_ERROR) cache 680 fs/btrfs/block-group.c int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, cache 684 fs/btrfs/block-group.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 695 fs/btrfs/block-group.c caching_ctl->block_group = cache; cache 696 fs/btrfs/block-group.c caching_ctl->progress = cache->key.objectid; cache 700 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 713 fs/btrfs/block-group.c while (cache->cached == BTRFS_CACHE_FAST) { cache 716 fs/btrfs/block-group.c ctl = cache->caching_ctl; cache 719 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 725 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 728 fs/btrfs/block-group.c if (cache->cached != BTRFS_CACHE_NO) { cache 729 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 733 fs/btrfs/block-group.c WARN_ON(cache->caching_ctl); cache 734 fs/btrfs/block-group.c cache->caching_ctl = caching_ctl; cache 735 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_FAST; cache 736 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 740 fs/btrfs/block-group.c ret = load_free_space_cache(cache); cache 742 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 744 fs/btrfs/block-group.c cache->caching_ctl = NULL; cache 745 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_FINISHED; cache 746 fs/btrfs/block-group.c cache->last_byte_to_unpin = (u64)-1; cache 750 fs/btrfs/block-group.c cache->caching_ctl = NULL; cache 751 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_NO; cache 753 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_STARTED; cache 754 fs/btrfs/block-group.c cache->has_caching_ctl = 1; cache 757 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 760 fs/btrfs/block-group.c btrfs_should_fragment_free_space(cache)) { cache 763 fs/btrfs/block-group.c spin_lock(&cache->space_info->lock); cache 764 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 765 fs/btrfs/block-group.c bytes_used = cache->key.offset - cache 766 fs/btrfs/block-group.c btrfs_block_group_used(&cache->item); cache 767 fs/btrfs/block-group.c cache->space_info->bytes_used += bytes_used >> 1; cache 768 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 769 fs/btrfs/block-group.c spin_unlock(&cache->space_info->lock); cache 770 fs/btrfs/block-group.c fragment_free_space(cache); cache 778 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 786 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 788 fs/btrfs/block-group.c cache->caching_ctl = NULL; cache 789 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_NO; cache 791 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_STARTED; cache 792 fs/btrfs/block-group.c cache->has_caching_ctl = 1; cache 794 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 808 fs/btrfs/block-group.c btrfs_get_block_group(cache); cache 1186 fs/btrfs/block-group.c static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force) cache 1188 fs/btrfs/block-group.c struct btrfs_space_info *sinfo = cache->space_info; cache 1207 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 1209 fs/btrfs/block-group.c if (cache->ro) { cache 1210 fs/btrfs/block-group.c cache->ro++; cache 1215 fs/btrfs/block-group.c num_bytes = cache->key.offset - cache->reserved - cache->pinned - cache 1216 fs/btrfs/block-group.c cache->bytes_super - btrfs_block_group_used(&cache->item); cache 1228 fs/btrfs/block-group.c cache->ro++; cache 1229 fs/btrfs/block-group.c list_add_tail(&cache->ro_list, &sinfo->ro_bgs); cache 1233 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 1235 fs/btrfs/block-group.c if (ret == -ENOSPC && btrfs_test_opt(cache->fs_info, ENOSPC_DEBUG)) { cache 1236 fs/btrfs/block-group.c btrfs_info(cache->fs_info, cache 1238 fs/btrfs/block-group.c cache->key.objectid); cache 1239 fs/btrfs/block-group.c btrfs_info(cache->fs_info, cache 1242 fs/btrfs/block-group.c btrfs_dump_space_info(cache->fs_info, cache->space_info, 0, 0); cache 1524 fs/btrfs/block-group.c static int exclude_super_stripes(struct btrfs_block_group_cache *cache) cache 1526 fs/btrfs/block-group.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 1532 fs/btrfs/block-group.c if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) { cache 1533 fs/btrfs/block-group.c stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid; cache 1534 fs/btrfs/block-group.c cache->bytes_super += stripe_len; cache 1535 fs/btrfs/block-group.c ret = btrfs_add_excluded_extent(fs_info, cache->key.objectid, cache 1543 fs/btrfs/block-group.c ret = btrfs_rmap_block(fs_info, cache->key.objectid, cache 1551 fs/btrfs/block-group.c if (logical[nr] > cache->key.objectid + cache 1552 fs/btrfs/block-group.c cache->key.offset) cache 1555 fs/btrfs/block-group.c if (logical[nr] + stripe_len <= cache->key.objectid) cache 1559 fs/btrfs/block-group.c if (start < cache->key.objectid) { cache 1560 fs/btrfs/block-group.c start = cache->key.objectid; cache 1564 fs/btrfs/block-group.c cache->key.objectid + cache 1565 fs/btrfs/block-group.c cache->key.offset - start); cache 1568 fs/btrfs/block-group.c cache->bytes_super += len; cache 1581 fs/btrfs/block-group.c static void link_block_group(struct btrfs_block_group_cache *cache) cache 1583 fs/btrfs/block-group.c struct btrfs_space_info *space_info = cache->space_info; cache 1584 fs/btrfs/block-group.c int index = btrfs_bg_flags_to_raid_index(cache->flags); cache 1590 fs/btrfs/block-group.c list_add_tail(&cache->list, &space_info->block_groups[index]); cache 1594 fs/btrfs/block-group.c btrfs_sysfs_add_block_group_type(cache); cache 1600 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 1602 fs/btrfs/block-group.c cache = kzalloc(sizeof(*cache), GFP_NOFS); cache 1603 fs/btrfs/block-group.c if (!cache) cache 1606 fs/btrfs/block-group.c cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), cache 1608 fs/btrfs/block-group.c if (!cache->free_space_ctl) { cache 1609 fs/btrfs/block-group.c kfree(cache); cache 1613 fs/btrfs/block-group.c cache->key.objectid = start; cache 1614 fs/btrfs/block-group.c cache->key.offset = size; cache 1615 fs/btrfs/block-group.c cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; cache 1617 fs/btrfs/block-group.c cache->fs_info = fs_info; cache 1618 fs/btrfs/block-group.c cache->full_stripe_len = btrfs_full_stripe_len(fs_info, start); cache 1619 fs/btrfs/block-group.c set_free_space_tree_thresholds(cache); cache 1621 fs/btrfs/block-group.c atomic_set(&cache->count, 1); cache 1622 fs/btrfs/block-group.c spin_lock_init(&cache->lock); cache 1623 fs/btrfs/block-group.c init_rwsem(&cache->data_rwsem); cache 1624 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->list); cache 1625 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->cluster_list); cache 1626 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->bg_list); cache 1627 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->ro_list); cache 1628 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->dirty_list); cache 1629 fs/btrfs/block-group.c INIT_LIST_HEAD(&cache->io_list); cache 1630 fs/btrfs/block-group.c btrfs_init_free_space_ctl(cache); cache 1631 fs/btrfs/block-group.c atomic_set(&cache->trimming, 0); cache 1632 fs/btrfs/block-group.c mutex_init(&cache->free_space_lock); cache 1633 fs/btrfs/block-group.c btrfs_init_full_stripe_locks_tree(&cache->full_stripe_locks_root); cache 1635 fs/btrfs/block-group.c return cache; cache 1697 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 1735 fs/btrfs/block-group.c cache = btrfs_create_block_group_cache(info, found_key.objectid, cache 1737 fs/btrfs/block-group.c if (!cache) { cache 1754 fs/btrfs/block-group.c cache->disk_cache_state = BTRFS_DC_CLEAR; cache 1757 fs/btrfs/block-group.c read_extent_buffer(leaf, &cache->item, cache 1759 fs/btrfs/block-group.c sizeof(cache->item)); cache 1760 fs/btrfs/block-group.c cache->flags = btrfs_block_group_flags(&cache->item); cache 1762 fs/btrfs/block-group.c ((cache->flags & BTRFS_BLOCK_GROUP_METADATA) && cache 1763 fs/btrfs/block-group.c (cache->flags & BTRFS_BLOCK_GROUP_DATA))) { cache 1766 fs/btrfs/block-group.c cache->key.objectid); cache 1767 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 1780 fs/btrfs/block-group.c ret = exclude_super_stripes(cache); cache 1786 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 1787 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 1798 fs/btrfs/block-group.c if (found_key.offset == btrfs_block_group_used(&cache->item)) { cache 1799 fs/btrfs/block-group.c cache->last_byte_to_unpin = (u64)-1; cache 1800 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_FINISHED; cache 1801 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 1802 fs/btrfs/block-group.c } else if (btrfs_block_group_used(&cache->item) == 0) { cache 1803 fs/btrfs/block-group.c cache->last_byte_to_unpin = (u64)-1; cache 1804 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_FINISHED; cache 1805 fs/btrfs/block-group.c add_new_free_space(cache, found_key.objectid, cache 1808 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 1811 fs/btrfs/block-group.c ret = btrfs_add_block_group_cache(info, cache); cache 1813 fs/btrfs/block-group.c btrfs_remove_free_space_cache(cache); cache 1814 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 1818 fs/btrfs/block-group.c trace_btrfs_add_block_group(info, cache, 0); cache 1819 fs/btrfs/block-group.c btrfs_update_space_info(info, cache->flags, found_key.offset, cache 1820 fs/btrfs/block-group.c btrfs_block_group_used(&cache->item), cache 1821 fs/btrfs/block-group.c cache->bytes_super, &space_info); cache 1823 fs/btrfs/block-group.c cache->space_info = space_info; cache 1825 fs/btrfs/block-group.c link_block_group(cache); cache 1827 fs/btrfs/block-group.c set_avail_alloc_bits(info, cache->flags); cache 1828 fs/btrfs/block-group.c if (btrfs_chunk_readonly(info, cache->key.objectid)) { cache 1829 fs/btrfs/block-group.c inc_block_group_ro(cache, 1); cache 1830 fs/btrfs/block-group.c } else if (btrfs_block_group_used(&cache->item) == 0) { cache 1831 fs/btrfs/block-group.c ASSERT(list_empty(&cache->bg_list)); cache 1832 fs/btrfs/block-group.c btrfs_mark_bg_unused(cache); cache 1848 fs/btrfs/block-group.c list_for_each_entry(cache, cache 1851 fs/btrfs/block-group.c inc_block_group_ro(cache, 1); cache 1852 fs/btrfs/block-group.c list_for_each_entry(cache, cache 1855 fs/btrfs/block-group.c inc_block_group_ro(cache, 1); cache 1910 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 1915 fs/btrfs/block-group.c cache = btrfs_create_block_group_cache(fs_info, chunk_offset, size); cache 1916 fs/btrfs/block-group.c if (!cache) cache 1919 fs/btrfs/block-group.c btrfs_set_block_group_used(&cache->item, bytes_used); cache 1920 fs/btrfs/block-group.c btrfs_set_block_group_chunk_objectid(&cache->item, cache 1922 fs/btrfs/block-group.c btrfs_set_block_group_flags(&cache->item, type); cache 1924 fs/btrfs/block-group.c cache->flags = type; cache 1925 fs/btrfs/block-group.c cache->last_byte_to_unpin = (u64)-1; cache 1926 fs/btrfs/block-group.c cache->cached = BTRFS_CACHE_FINISHED; cache 1927 fs/btrfs/block-group.c cache->needs_free_space = 1; cache 1928 fs/btrfs/block-group.c ret = exclude_super_stripes(cache); cache 1931 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 1932 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 1936 fs/btrfs/block-group.c add_new_free_space(cache, chunk_offset, chunk_offset + size); cache 1938 fs/btrfs/block-group.c btrfs_free_excluded_extents(cache); cache 1941 fs/btrfs/block-group.c if (btrfs_should_fragment_free_space(cache)) { cache 1945 fs/btrfs/block-group.c fragment_free_space(cache); cache 1953 fs/btrfs/block-group.c cache->space_info = btrfs_find_space_info(fs_info, cache->flags); cache 1954 fs/btrfs/block-group.c ASSERT(cache->space_info); cache 1956 fs/btrfs/block-group.c ret = btrfs_add_block_group_cache(fs_info, cache); cache 1958 fs/btrfs/block-group.c btrfs_remove_free_space_cache(cache); cache 1959 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 1967 fs/btrfs/block-group.c trace_btrfs_add_block_group(fs_info, cache, 1); cache 1968 fs/btrfs/block-group.c btrfs_update_space_info(fs_info, cache->flags, size, bytes_used, cache 1969 fs/btrfs/block-group.c cache->bytes_super, &cache->space_info); cache 1972 fs/btrfs/block-group.c link_block_group(cache); cache 1974 fs/btrfs/block-group.c list_add_tail(&cache->bg_list, &trans->new_bgs); cache 2030 fs/btrfs/block-group.c int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache) cache 2033 fs/btrfs/block-group.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 2065 fs/btrfs/block-group.c alloc_flags = update_block_group_flags(fs_info, cache->flags); cache 2066 fs/btrfs/block-group.c if (alloc_flags != cache->flags) { cache 2079 fs/btrfs/block-group.c ret = inc_block_group_ro(cache, 0); cache 2082 fs/btrfs/block-group.c alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags); cache 2086 fs/btrfs/block-group.c ret = inc_block_group_ro(cache, 0); cache 2088 fs/btrfs/block-group.c if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { cache 2089 fs/btrfs/block-group.c alloc_flags = update_block_group_flags(fs_info, cache->flags); cache 2100 fs/btrfs/block-group.c void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache) cache 2102 fs/btrfs/block-group.c struct btrfs_space_info *sinfo = cache->space_info; cache 2105 fs/btrfs/block-group.c BUG_ON(!cache->ro); cache 2108 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 2109 fs/btrfs/block-group.c if (!--cache->ro) { cache 2110 fs/btrfs/block-group.c num_bytes = cache->key.offset - cache->reserved - cache 2111 fs/btrfs/block-group.c cache->pinned - cache->bytes_super - cache 2112 fs/btrfs/block-group.c btrfs_block_group_used(&cache->item); cache 2114 fs/btrfs/block-group.c list_del_init(&cache->ro_list); cache 2116 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 2122 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache) cache 2130 fs/btrfs/block-group.c ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); cache 2139 fs/btrfs/block-group.c write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); cache 2311 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache, *tmp; cache 2324 fs/btrfs/block-group.c list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs, cache 2326 fs/btrfs/block-group.c if (cache->disk_cache_state == BTRFS_DC_CLEAR) cache 2327 fs/btrfs/block-group.c cache_save_setup(cache, trans, path); cache 2349 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 2386 fs/btrfs/block-group.c cache = list_first_entry(&dirty, cache 2394 fs/btrfs/block-group.c if (!list_empty(&cache->io_list)) { cache 2395 fs/btrfs/block-group.c list_del_init(&cache->io_list); cache 2396 fs/btrfs/block-group.c btrfs_wait_cache_io(trans, cache, path); cache 2397 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2410 fs/btrfs/block-group.c list_del_init(&cache->dirty_list); cache 2415 fs/btrfs/block-group.c cache_save_setup(cache, trans, path); cache 2417 fs/btrfs/block-group.c if (cache->disk_cache_state == BTRFS_DC_SETUP) { cache 2418 fs/btrfs/block-group.c cache->io_ctl.inode = NULL; cache 2419 fs/btrfs/block-group.c ret = btrfs_write_out_cache(trans, cache, path); cache 2420 fs/btrfs/block-group.c if (ret == 0 && cache->io_ctl.inode) { cache 2429 fs/btrfs/block-group.c list_add_tail(&cache->io_list, io); cache 2439 fs/btrfs/block-group.c ret = write_one_cache_group(trans, path, cache); cache 2452 fs/btrfs/block-group.c if (list_empty(&cache->dirty_list)) { cache 2453 fs/btrfs/block-group.c list_add_tail(&cache->dirty_list, cache 2455 fs/btrfs/block-group.c btrfs_get_block_group(cache); cache 2466 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2512 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache; cache 2541 fs/btrfs/block-group.c cache = list_first_entry(&cur_trans->dirty_bgs, cache 2550 fs/btrfs/block-group.c if (!list_empty(&cache->io_list)) { cache 2552 fs/btrfs/block-group.c list_del_init(&cache->io_list); cache 2553 fs/btrfs/block-group.c btrfs_wait_cache_io(trans, cache, path); cache 2554 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2562 fs/btrfs/block-group.c list_del_init(&cache->dirty_list); cache 2566 fs/btrfs/block-group.c cache_save_setup(cache, trans, path); cache 2572 fs/btrfs/block-group.c if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) { cache 2573 fs/btrfs/block-group.c cache->io_ctl.inode = NULL; cache 2574 fs/btrfs/block-group.c ret = btrfs_write_out_cache(trans, cache, path); cache 2575 fs/btrfs/block-group.c if (ret == 0 && cache->io_ctl.inode) { cache 2578 fs/btrfs/block-group.c list_add_tail(&cache->io_list, io); cache 2588 fs/btrfs/block-group.c ret = write_one_cache_group(trans, path, cache); cache 2605 fs/btrfs/block-group.c ret = write_one_cache_group(trans, path, cache); cache 2613 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2624 fs/btrfs/block-group.c cache = list_first_entry(io, struct btrfs_block_group_cache, cache 2626 fs/btrfs/block-group.c list_del_init(&cache->io_list); cache 2627 fs/btrfs/block-group.c btrfs_wait_cache_io(trans, cache, path); cache 2628 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2639 fs/btrfs/block-group.c struct btrfs_block_group_cache *cache = NULL; cache 2657 fs/btrfs/block-group.c cache = btrfs_lookup_block_group(info, bytenr); cache 2658 fs/btrfs/block-group.c if (!cache) { cache 2662 fs/btrfs/block-group.c factor = btrfs_bg_type_to_factor(cache->flags); cache 2670 fs/btrfs/block-group.c if (!alloc && !btrfs_block_group_cache_done(cache)) cache 2671 fs/btrfs/block-group.c btrfs_cache_block_group(cache, 1); cache 2673 fs/btrfs/block-group.c byte_in_group = bytenr - cache->key.objectid; cache 2674 fs/btrfs/block-group.c WARN_ON(byte_in_group > cache->key.offset); cache 2676 fs/btrfs/block-group.c spin_lock(&cache->space_info->lock); cache 2677 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 2680 fs/btrfs/block-group.c cache->disk_cache_state < BTRFS_DC_CLEAR) cache 2681 fs/btrfs/block-group.c cache->disk_cache_state = BTRFS_DC_CLEAR; cache 2683 fs/btrfs/block-group.c old_val = btrfs_block_group_used(&cache->item); cache 2684 fs/btrfs/block-group.c num_bytes = min(total, cache->key.offset - byte_in_group); cache 2687 fs/btrfs/block-group.c btrfs_set_block_group_used(&cache->item, old_val); cache 2688 fs/btrfs/block-group.c cache->reserved -= num_bytes; cache 2689 fs/btrfs/block-group.c cache->space_info->bytes_reserved -= num_bytes; cache 2690 fs/btrfs/block-group.c cache->space_info->bytes_used += num_bytes; cache 2691 fs/btrfs/block-group.c cache->space_info->disk_used += num_bytes * factor; cache 2692 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 2693 fs/btrfs/block-group.c spin_unlock(&cache->space_info->lock); cache 2696 fs/btrfs/block-group.c btrfs_set_block_group_used(&cache->item, old_val); cache 2697 fs/btrfs/block-group.c cache->pinned += num_bytes; cache 2699 fs/btrfs/block-group.c cache->space_info, num_bytes); cache 2700 fs/btrfs/block-group.c cache->space_info->bytes_used -= num_bytes; cache 2701 fs/btrfs/block-group.c cache->space_info->disk_used -= num_bytes * factor; cache 2702 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 2703 fs/btrfs/block-group.c spin_unlock(&cache->space_info->lock); cache 2706 fs/btrfs/block-group.c &cache->space_info->total_bytes_pinned, cache 2715 fs/btrfs/block-group.c if (list_empty(&cache->dirty_list)) { cache 2716 fs/btrfs/block-group.c list_add_tail(&cache->dirty_list, cache 2719 fs/btrfs/block-group.c btrfs_get_block_group(cache); cache 2730 fs/btrfs/block-group.c btrfs_mark_bg_unused(cache); cache 2732 fs/btrfs/block-group.c btrfs_put_block_group(cache); cache 2754 fs/btrfs/block-group.c int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, cache 2757 fs/btrfs/block-group.c struct btrfs_space_info *space_info = cache->space_info; cache 2761 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 2762 fs/btrfs/block-group.c if (cache->ro) { cache 2765 fs/btrfs/block-group.c cache->reserved += num_bytes; cache 2767 fs/btrfs/block-group.c trace_btrfs_space_reservation(cache->fs_info, "space_info", cache 2769 fs/btrfs/block-group.c btrfs_space_info_update_bytes_may_use(cache->fs_info, cache 2772 fs/btrfs/block-group.c cache->delalloc_bytes += num_bytes; cache 2774 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 2790 fs/btrfs/block-group.c void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, cache 2793 fs/btrfs/block-group.c struct btrfs_space_info *space_info = cache->space_info; cache 2796 fs/btrfs/block-group.c spin_lock(&cache->lock); cache 2797 fs/btrfs/block-group.c if (cache->ro) cache 2799 fs/btrfs/block-group.c cache->reserved -= num_bytes; cache 2804 fs/btrfs/block-group.c cache->delalloc_bytes -= num_bytes; cache 2805 fs/btrfs/block-group.c spin_unlock(&cache->lock); cache 178 fs/btrfs/block-group.h struct btrfs_block_group_cache *cache); cache 179 fs/btrfs/block-group.h void btrfs_get_block_group(struct btrfs_block_group_cache *cache); cache 180 fs/btrfs/block-group.h void btrfs_put_block_group(struct btrfs_block_group_cache *cache); cache 187 fs/btrfs/block-group.h void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, cache 189 fs/btrfs/block-group.h int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); cache 190 fs/btrfs/block-group.h int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, cache 194 fs/btrfs/block-group.h struct btrfs_block_group_cache *cache); cache 208 fs/btrfs/block-group.h int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); cache 209 fs/btrfs/block-group.h void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); cache 215 fs/btrfs/block-group.h int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, cache 217 fs/btrfs/block-group.h void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, cache 243 fs/btrfs/block-group.h struct btrfs_block_group_cache *cache) cache 246 fs/btrfs/block-group.h return cache->cached == BTRFS_CACHE_FINISHED || cache 247 fs/btrfs/block-group.h cache->cached == BTRFS_CACHE_ERROR; cache 2398 fs/btrfs/ctree.h void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache); cache 2454 fs/btrfs/ctree.h void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); cache 2455 fs/btrfs/ctree.h void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); cache 4462 fs/btrfs/disk-io.c static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache) cache 4466 fs/btrfs/disk-io.c inode = cache->io_ctl.inode; cache 4470 fs/btrfs/disk-io.c cache->io_ctl.inode = NULL; cache 4473 fs/btrfs/disk-io.c btrfs_put_block_group(cache); cache 4479 fs/btrfs/disk-io.c struct btrfs_block_group_cache *cache; cache 4483 fs/btrfs/disk-io.c cache = list_first_entry(&cur_trans->dirty_bgs, cache 4487 fs/btrfs/disk-io.c if (!list_empty(&cache->io_list)) { cache 4489 fs/btrfs/disk-io.c list_del_init(&cache->io_list); cache 4490 fs/btrfs/disk-io.c btrfs_cleanup_bg_io(cache); cache 4494 fs/btrfs/disk-io.c list_del_init(&cache->dirty_list); cache 4495 fs/btrfs/disk-io.c spin_lock(&cache->lock); cache 4496 fs/btrfs/disk-io.c cache->disk_cache_state = BTRFS_DC_ERROR; cache 4497 fs/btrfs/disk-io.c spin_unlock(&cache->lock); cache 4500 fs/btrfs/disk-io.c btrfs_put_block_group(cache); cache 4511 fs/btrfs/disk-io.c cache = list_first_entry(&cur_trans->io_bgs, cache 4515 fs/btrfs/disk-io.c list_del_init(&cache->io_list); cache 4516 fs/btrfs/disk-io.c spin_lock(&cache->lock); cache 4517 fs/btrfs/disk-io.c cache->disk_cache_state = BTRFS_DC_ERROR; cache 4518 fs/btrfs/disk-io.c spin_unlock(&cache->lock); cache 4519 fs/btrfs/disk-io.c btrfs_cleanup_bg_io(cache); cache 57 fs/btrfs/extent-tree.c static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) cache 59 fs/btrfs/extent-tree.c return (cache->flags & bits) == bits; cache 73 fs/btrfs/extent-tree.c void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache) cache 75 fs/btrfs/extent-tree.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 78 fs/btrfs/extent-tree.c start = cache->key.objectid; cache 79 fs/btrfs/extent-tree.c end = start + cache->key.offset - 1; cache 2549 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 2559 fs/btrfs/extent-tree.c cache = btrfs_lookup_first_block_group(fs_info, search_start); cache 2560 fs/btrfs/extent-tree.c if (!cache) cache 2563 fs/btrfs/extent-tree.c bytenr = cache->key.objectid; cache 2564 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 2569 fs/btrfs/extent-tree.c static int pin_down_extent(struct btrfs_block_group_cache *cache, cache 2572 fs/btrfs/extent-tree.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 2574 fs/btrfs/extent-tree.c spin_lock(&cache->space_info->lock); cache 2575 fs/btrfs/extent-tree.c spin_lock(&cache->lock); cache 2576 fs/btrfs/extent-tree.c cache->pinned += num_bytes; cache 2577 fs/btrfs/extent-tree.c btrfs_space_info_update_bytes_pinned(fs_info, cache->space_info, cache 2580 fs/btrfs/extent-tree.c cache->reserved -= num_bytes; cache 2581 fs/btrfs/extent-tree.c cache->space_info->bytes_reserved -= num_bytes; cache 2583 fs/btrfs/extent-tree.c spin_unlock(&cache->lock); cache 2584 fs/btrfs/extent-tree.c spin_unlock(&cache->space_info->lock); cache 2586 fs/btrfs/extent-tree.c percpu_counter_add_batch(&cache->space_info->total_bytes_pinned, cache 2599 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 2601 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, bytenr); cache 2602 fs/btrfs/extent-tree.c BUG_ON(!cache); /* Logic error */ cache 2604 fs/btrfs/extent-tree.c pin_down_extent(cache, bytenr, num_bytes, reserved); cache 2606 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 2616 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 2619 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, bytenr); cache 2620 fs/btrfs/extent-tree.c if (!cache) cache 2629 fs/btrfs/extent-tree.c btrfs_cache_block_group(cache, 1); cache 2631 fs/btrfs/extent-tree.c pin_down_extent(cache, bytenr, num_bytes, 0); cache 2634 fs/btrfs/extent-tree.c ret = btrfs_remove_free_space(cache, bytenr, num_bytes); cache 2635 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 2729 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 2735 fs/btrfs/extent-tree.c cache = caching_ctl->block_group; cache 2736 fs/btrfs/extent-tree.c if (btrfs_block_group_cache_done(cache)) { cache 2737 fs/btrfs/extent-tree.c cache->last_byte_to_unpin = (u64)-1; cache 2741 fs/btrfs/extent-tree.c cache->last_byte_to_unpin = caching_ctl->progress; cache 2788 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache = NULL; cache 2799 fs/btrfs/extent-tree.c if (!cache || cache 2800 fs/btrfs/extent-tree.c start >= cache->key.objectid + cache->key.offset) { cache 2801 fs/btrfs/extent-tree.c if (cache) cache 2802 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 2804 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, start); cache 2805 fs/btrfs/extent-tree.c BUG_ON(!cache); /* Logic error */ cache 2808 fs/btrfs/extent-tree.c cache->space_info, cache 2813 fs/btrfs/extent-tree.c len = cache->key.objectid + cache->key.offset - start; cache 2816 fs/btrfs/extent-tree.c if (start < cache->last_byte_to_unpin) { cache 2817 fs/btrfs/extent-tree.c len = min(len, cache->last_byte_to_unpin - start); cache 2819 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, start, len); cache 2824 fs/btrfs/extent-tree.c space_info = cache->space_info; cache 2840 fs/btrfs/extent-tree.c spin_lock(&cache->lock); cache 2841 fs/btrfs/extent-tree.c cache->pinned -= len; cache 2846 fs/btrfs/extent-tree.c if (cache->ro) { cache 2850 fs/btrfs/extent-tree.c spin_unlock(&cache->lock); cache 2875 fs/btrfs/extent-tree.c if (cache) cache 2876 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 3266 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 3275 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, buf->start); cache 3278 fs/btrfs/extent-tree.c pin_down_extent(cache, buf->start, buf->len, 1); cache 3279 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 3285 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, buf->start, buf->len); cache 3286 fs/btrfs/extent-tree.c btrfs_free_reserved_bytes(cache, buf->len, 0); cache 3287 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 3353 fs/btrfs/extent-tree.c btrfs_lock_block_group(struct btrfs_block_group_cache *cache, cache 3357 fs/btrfs/extent-tree.c down_read(&cache->data_rwsem); cache 3361 fs/btrfs/extent-tree.c btrfs_grab_block_group(struct btrfs_block_group_cache *cache, cache 3364 fs/btrfs/extent-tree.c btrfs_get_block_group(cache); cache 3366 fs/btrfs/extent-tree.c down_read(&cache->data_rwsem); cache 3408 fs/btrfs/extent-tree.c btrfs_release_block_group(struct btrfs_block_group_cache *cache, cache 3412 fs/btrfs/extent-tree.c up_read(&cache->data_rwsem); cache 3413 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 4153 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache; cache 4156 fs/btrfs/extent-tree.c cache = btrfs_lookup_block_group(fs_info, start); cache 4157 fs/btrfs/extent-tree.c if (!cache) { cache 4164 fs/btrfs/extent-tree.c pin_down_extent(cache, start, len, 1); cache 4168 fs/btrfs/extent-tree.c btrfs_add_free_space(cache, start, len); cache 4169 fs/btrfs/extent-tree.c btrfs_free_reserved_bytes(cache, len, delalloc); cache 4173 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 5645 fs/btrfs/extent-tree.c struct btrfs_block_group_cache *cache = NULL; cache 5667 fs/btrfs/extent-tree.c cache = btrfs_lookup_first_block_group(fs_info, range->start); cache 5668 fs/btrfs/extent-tree.c for (; cache; cache = btrfs_next_block_group(cache)) { cache 5669 fs/btrfs/extent-tree.c if (cache->key.objectid >= range_end) { cache 5670 fs/btrfs/extent-tree.c btrfs_put_block_group(cache); cache 5674 fs/btrfs/extent-tree.c start = max(range->start, cache->key.objectid); cache 5675 fs/btrfs/extent-tree.c end = min(range_end, cache->key.objectid + cache->key.offset); cache 5678 fs/btrfs/extent-tree.c if (!btrfs_block_group_cache_done(cache)) { cache 5679 fs/btrfs/extent-tree.c ret = btrfs_cache_block_group(cache, 0); cache 5685 fs/btrfs/extent-tree.c ret = btrfs_wait_block_group_cache_done(cache); cache 5692 fs/btrfs/extent-tree.c ret = btrfs_trim_block_group(cache, cache 4548 fs/btrfs/extent_io.c struct fiemap_cache *cache, cache 4553 fs/btrfs/extent_io.c if (!cache->cached) cache 4563 fs/btrfs/extent_io.c if (cache->offset + cache->len > offset) { cache 4579 fs/btrfs/extent_io.c if (cache->offset + cache->len == offset && cache 4580 fs/btrfs/extent_io.c cache->phys + cache->len == phys && cache 4581 fs/btrfs/extent_io.c (cache->flags & ~FIEMAP_EXTENT_LAST) == cache 4583 fs/btrfs/extent_io.c cache->len += len; cache 4584 fs/btrfs/extent_io.c cache->flags |= flags; cache 4589 fs/btrfs/extent_io.c ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, cache 4590 fs/btrfs/extent_io.c cache->len, cache->flags); cache 4591 fs/btrfs/extent_io.c cache->cached = false; cache 4595 fs/btrfs/extent_io.c cache->cached = true; cache 4596 fs/btrfs/extent_io.c cache->offset = offset; cache 4597 fs/btrfs/extent_io.c cache->phys = phys; cache 4598 fs/btrfs/extent_io.c cache->len = len; cache 4599 fs/btrfs/extent_io.c cache->flags = flags; cache 4601 fs/btrfs/extent_io.c if (cache->flags & FIEMAP_EXTENT_LAST) { cache 4602 fs/btrfs/extent_io.c ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache 4603 fs/btrfs/extent_io.c cache->phys, cache->len, cache->flags); cache 4604 fs/btrfs/extent_io.c cache->cached = false; cache 4621 fs/btrfs/extent_io.c struct fiemap_cache *cache) cache 4625 fs/btrfs/extent_io.c if (!cache->cached) cache 4628 fs/btrfs/extent_io.c ret = fiemap_fill_next_extent(fieinfo, cache->offset, cache->phys, cache 4629 fs/btrfs/extent_io.c cache->len, cache->flags); cache 4630 fs/btrfs/extent_io.c cache->cached = false; cache 4653 fs/btrfs/extent_io.c struct fiemap_cache cache = { 0 }; cache 4825 fs/btrfs/extent_io.c ret = emit_fiemap_extent(fieinfo, &cache, em_start, disko, cache 4835 fs/btrfs/extent_io.c ret = emit_last_fiemap_cache(fieinfo, &cache); cache 3361 fs/btrfs/free-space-cache.c void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache) cache 3363 fs/btrfs/free-space-cache.c atomic_inc(&cache->trimming); cache 3599 fs/btrfs/free-space-cache.c int test_add_free_space_entry(struct btrfs_block_group_cache *cache, cache 3602 fs/btrfs/free-space-cache.c struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; cache 3667 fs/btrfs/free-space-cache.c int test_check_exists(struct btrfs_block_group_cache *cache, cache 3670 fs/btrfs/free-space-cache.c struct btrfs_free_space_ctl *ctl = cache->free_space_ctl; cache 116 fs/btrfs/free-space-cache.h int test_add_free_space_entry(struct btrfs_block_group_cache *cache, cache 118 fs/btrfs/free-space-cache.h int test_check_exists(struct btrfs_block_group_cache *cache, cache 19 fs/btrfs/free-space-tree.c void set_free_space_tree_thresholds(struct btrfs_block_group_cache *cache) cache 29 fs/btrfs/free-space-tree.c bitmap_range = cache->fs_info->sectorsize * BTRFS_FREE_SPACE_BITMAP_BITS; cache 30 fs/btrfs/free-space-tree.c num_bitmaps = div_u64(cache->key.offset + bitmap_range - 1, cache 34 fs/btrfs/free-space-tree.c cache->bitmap_high_thresh = div_u64(total_bitmap_size, cache 41 fs/btrfs/free-space-tree.c if (cache->bitmap_high_thresh > 100) cache 42 fs/btrfs/free-space-tree.c cache->bitmap_low_thresh = cache->bitmap_high_thresh - 100; cache 44 fs/btrfs/free-space-tree.c cache->bitmap_low_thresh = 0; cache 3091 fs/btrfs/inode.c struct btrfs_block_group_cache *cache; cache 3093 fs/btrfs/inode.c cache = btrfs_lookup_block_group(fs_info, start); cache 3094 fs/btrfs/inode.c ASSERT(cache); cache 3096 fs/btrfs/inode.c spin_lock(&cache->lock); cache 3097 fs/btrfs/inode.c cache->delalloc_bytes -= len; cache 3098 fs/btrfs/inode.c spin_unlock(&cache->lock); cache 3100 fs/btrfs/inode.c btrfs_put_block_group(cache); cache 993 fs/btrfs/ordered-data.c struct extent_state *cache = NULL; cache 994 fs/btrfs/ordered-data.c struct extent_state **cachedp = &cache; cache 1010 fs/btrfs/ordered-data.c refcount_dec(&cache->refs); cache 230 fs/btrfs/reada.c struct btrfs_block_group_cache *cache = NULL; cache 247 fs/btrfs/reada.c cache = btrfs_lookup_block_group(fs_info, logical); cache 248 fs/btrfs/reada.c if (!cache) cache 251 fs/btrfs/reada.c start = cache->key.objectid; cache 252 fs/btrfs/reada.c end = start + cache->key.offset - 1; cache 253 fs/btrfs/reada.c btrfs_put_block_group(cache); cache 189 fs/btrfs/relocation.c static void remove_backref_node(struct backref_cache *cache, cache 200 fs/btrfs/relocation.c static void backref_cache_init(struct backref_cache *cache) cache 203 fs/btrfs/relocation.c cache->rb_root = RB_ROOT; cache 205 fs/btrfs/relocation.c INIT_LIST_HEAD(&cache->pending[i]); cache 206 fs/btrfs/relocation.c INIT_LIST_HEAD(&cache->changed); cache 207 fs/btrfs/relocation.c INIT_LIST_HEAD(&cache->detached); cache 208 fs/btrfs/relocation.c INIT_LIST_HEAD(&cache->leaves); cache 211 fs/btrfs/relocation.c static void backref_cache_cleanup(struct backref_cache *cache) cache 216 fs/btrfs/relocation.c while (!list_empty(&cache->detached)) { cache 217 fs/btrfs/relocation.c node = list_entry(cache->detached.next, cache 219 fs/btrfs/relocation.c remove_backref_node(cache, node); cache 222 fs/btrfs/relocation.c while (!list_empty(&cache->leaves)) { cache 223 fs/btrfs/relocation.c node = list_entry(cache->leaves.next, cache 225 fs/btrfs/relocation.c remove_backref_node(cache, node); cache 228 fs/btrfs/relocation.c cache->last_trans = 0; cache 231 fs/btrfs/relocation.c ASSERT(list_empty(&cache->pending[i])); cache 232 fs/btrfs/relocation.c ASSERT(list_empty(&cache->changed)); cache 233 fs/btrfs/relocation.c ASSERT(list_empty(&cache->detached)); cache 234 fs/btrfs/relocation.c ASSERT(RB_EMPTY_ROOT(&cache->rb_root)); cache 235 fs/btrfs/relocation.c ASSERT(!cache->nr_nodes); cache 236 fs/btrfs/relocation.c ASSERT(!cache->nr_edges); cache 239 fs/btrfs/relocation.c static struct backref_node *alloc_backref_node(struct backref_cache *cache) cache 249 fs/btrfs/relocation.c cache->nr_nodes++; cache 254 fs/btrfs/relocation.c static void free_backref_node(struct backref_cache *cache, cache 258 fs/btrfs/relocation.c cache->nr_nodes--; cache 263 fs/btrfs/relocation.c static struct backref_edge *alloc_backref_edge(struct backref_cache *cache) cache 269 fs/btrfs/relocation.c cache->nr_edges++; cache 273 fs/btrfs/relocation.c static void free_backref_edge(struct backref_cache *cache, cache 277 fs/btrfs/relocation.c cache->nr_edges--; cache 418 fs/btrfs/relocation.c static void remove_backref_node(struct backref_cache *cache, cache 434 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 438 fs/btrfs/relocation.c drop_backref_node(cache, node); cache 448 fs/btrfs/relocation.c list_add_tail(&upper->lower, &cache->leaves); cache 453 fs/btrfs/relocation.c drop_backref_node(cache, node); cache 456 fs/btrfs/relocation.c static void update_backref_node(struct backref_cache *cache, cache 460 fs/btrfs/relocation.c rb_erase(&node->rb_node, &cache->rb_root); cache 462 fs/btrfs/relocation.c rb_node = tree_insert(&cache->rb_root, node->bytenr, &node->rb_node); cache 471 fs/btrfs/relocation.c struct backref_cache *cache) cache 476 fs/btrfs/relocation.c if (cache->last_trans == 0) { cache 477 fs/btrfs/relocation.c cache->last_trans = trans->transid; cache 481 fs/btrfs/relocation.c if (cache->last_trans == trans->transid) cache 489 fs/btrfs/relocation.c while (!list_empty(&cache->detached)) { cache 490 fs/btrfs/relocation.c node = list_entry(cache->detached.next, cache 492 fs/btrfs/relocation.c remove_backref_node(cache, node); cache 495 fs/btrfs/relocation.c while (!list_empty(&cache->changed)) { cache 496 fs/btrfs/relocation.c node = list_entry(cache->changed.next, cache 500 fs/btrfs/relocation.c update_backref_node(cache, node, node->new_bytenr); cache 508 fs/btrfs/relocation.c list_for_each_entry(node, &cache->pending[level], list) { cache 512 fs/btrfs/relocation.c update_backref_node(cache, node, node->new_bytenr); cache 516 fs/btrfs/relocation.c cache->last_trans = 0; cache 686 fs/btrfs/relocation.c struct backref_cache *cache = &rc->backref_cache; cache 717 fs/btrfs/relocation.c node = alloc_backref_node(cache); cache 843 fs/btrfs/relocation.c edge = alloc_backref_edge(cache); cache 848 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, key.offset); cache 850 fs/btrfs/relocation.c upper = alloc_backref_node(cache); cache 852 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 951 fs/btrfs/relocation.c edge = alloc_backref_edge(cache); cache 958 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, eb->start); cache 960 fs/btrfs/relocation.c upper = alloc_backref_node(cache); cache 962 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 1047 fs/btrfs/relocation.c rb_node = tree_insert(&cache->rb_root, node->bytenr, cache 1051 fs/btrfs/relocation.c list_add_tail(&node->lower, &cache->leaves); cache 1064 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 1096 fs/btrfs/relocation.c rb_node = tree_insert(&cache->rb_root, upper->bytenr, cache 1130 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 1137 fs/btrfs/relocation.c list_add(&upper->list, &cache->detached); cache 1140 fs/btrfs/relocation.c rb_erase(&upper->rb_node, &cache->rb_root); cache 1141 fs/btrfs/relocation.c free_backref_node(cache, upper); cache 1160 fs/btrfs/relocation.c free_backref_edge(cache, edge); cache 1186 fs/btrfs/relocation.c free_backref_node(cache, lower); cache 1189 fs/btrfs/relocation.c remove_backref_node(cache, node); cache 1207 fs/btrfs/relocation.c struct backref_cache *cache = &rc->backref_cache; cache 1214 fs/btrfs/relocation.c if (cache->last_trans > 0) cache 1215 fs/btrfs/relocation.c update_backref_cache(trans, cache); cache 1217 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, src->commit_root->start); cache 1227 fs/btrfs/relocation.c rb_node = tree_search(&cache->rb_root, cache 1239 fs/btrfs/relocation.c new_node = alloc_backref_node(cache); cache 1251 fs/btrfs/relocation.c new_edge = alloc_backref_edge(cache); cache 1261 fs/btrfs/relocation.c list_add_tail(&new_node->lower, &cache->leaves); cache 1264 fs/btrfs/relocation.c rb_node = tree_insert(&cache->rb_root, new_node->bytenr, cache 1281 fs/btrfs/relocation.c free_backref_edge(cache, new_edge); cache 1283 fs/btrfs/relocation.c free_backref_node(cache, new_node); cache 2971 fs/btrfs/relocation.c struct backref_cache *cache = &rc->backref_cache; cache 2977 fs/btrfs/relocation.c while (!list_empty(&cache->pending[level])) { cache 2978 fs/btrfs/relocation.c node = list_entry(cache->pending[level].next, cache 2989 fs/btrfs/relocation.c list_splice_init(&list, &cache->pending[level]); cache 392 fs/btrfs/scrub.c static u64 get_full_stripe_logical(struct btrfs_block_group_cache *cache, cache 401 fs/btrfs/scrub.c WARN_ON_ONCE(cache->full_stripe_len >= U32_MAX); cache 407 fs/btrfs/scrub.c ret = div64_u64(bytenr - cache->key.objectid, cache->full_stripe_len) * cache 408 fs/btrfs/scrub.c cache->full_stripe_len + cache->key.objectid; cache 3420 fs/btrfs/scrub.c struct btrfs_block_group_cache *cache) cache 3438 fs/btrfs/scrub.c spin_lock(&cache->lock); cache 3439 fs/btrfs/scrub.c if (!cache->removed) cache 3441 fs/btrfs/scrub.c spin_unlock(&cache->lock); cache 3484 fs/btrfs/scrub.c struct btrfs_block_group_cache *cache; cache 3547 fs/btrfs/scrub.c cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache 3551 fs/btrfs/scrub.c if (!cache) cache 3563 fs/btrfs/scrub.c ret = btrfs_inc_block_group_ro(cache); cache 3583 fs/btrfs/scrub.c btrfs_wait_block_group_reservations(cache); cache 3584 fs/btrfs/scrub.c btrfs_wait_nocow_writers(cache); cache 3586 fs/btrfs/scrub.c cache->key.objectid, cache 3587 fs/btrfs/scrub.c cache->key.offset); cache 3598 fs/btrfs/scrub.c btrfs_put_block_group(cache); cache 3619 fs/btrfs/scrub.c btrfs_put_block_group(cache); cache 3630 fs/btrfs/scrub.c found_key.offset, cache); cache 3670 fs/btrfs/scrub.c btrfs_dec_block_group_ro(cache); cache 3679 fs/btrfs/scrub.c spin_lock(&cache->lock); cache 3680 fs/btrfs/scrub.c if (!cache->removed && !cache->ro && cache->reserved == 0 && cache 3681 fs/btrfs/scrub.c btrfs_block_group_used(&cache->item) == 0) { cache 3682 fs/btrfs/scrub.c spin_unlock(&cache->lock); cache 3683 fs/btrfs/scrub.c btrfs_mark_bg_unused(cache); cache 3685 fs/btrfs/scrub.c spin_unlock(&cache->lock); cache 3688 fs/btrfs/scrub.c btrfs_put_block_group(cache); cache 288 fs/btrfs/space-info.c struct btrfs_block_group_cache *cache; cache 300 fs/btrfs/space-info.c list_for_each_entry(cache, &info->block_groups[index], list) { cache 301 fs/btrfs/space-info.c spin_lock(&cache->lock); cache 304 fs/btrfs/space-info.c cache->key.objectid, cache->key.offset, cache 305 fs/btrfs/space-info.c btrfs_block_group_used(&cache->item), cache->pinned, cache 306 fs/btrfs/space-info.c cache->reserved, cache->ro ? "[readonly]" : ""); cache 307 fs/btrfs/space-info.c btrfs_dump_free_space(cache, bytes); cache 308 fs/btrfs/space-info.c spin_unlock(&cache->lock); cache 825 fs/btrfs/sysfs.c void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache) cache 827 fs/btrfs/sysfs.c struct btrfs_fs_info *fs_info = cache->fs_info; cache 828 fs/btrfs/sysfs.c struct btrfs_space_info *space_info = cache->space_info; cache 830 fs/btrfs/sysfs.c const int index = btrfs_bg_flags_to_raid_index(cache->flags); cache 846 fs/btrfs/sysfs.c btrfs_warn(cache->fs_info, cache 851 fs/btrfs/sysfs.c rkobj->flags = cache->flags; cache 35 fs/btrfs/sysfs.h void btrfs_sysfs_add_block_group_type(struct btrfs_block_group_cache *cache); cache 208 fs/btrfs/tests/btrfs-tests.c struct btrfs_block_group_cache *cache; cache 210 fs/btrfs/tests/btrfs-tests.c cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache 211 fs/btrfs/tests/btrfs-tests.c if (!cache) cache 213 fs/btrfs/tests/btrfs-tests.c cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), cache 215 fs/btrfs/tests/btrfs-tests.c if (!cache->free_space_ctl) { cache 216 fs/btrfs/tests/btrfs-tests.c kfree(cache); cache 220 fs/btrfs/tests/btrfs-tests.c cache->key.objectid = 0; cache 221 fs/btrfs/tests/btrfs-tests.c cache->key.offset = length; cache 222 fs/btrfs/tests/btrfs-tests.c cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; cache 223 fs/btrfs/tests/btrfs-tests.c cache->full_stripe_len = fs_info->sectorsize; cache 224 fs/btrfs/tests/btrfs-tests.c cache->fs_info = fs_info; cache 226 fs/btrfs/tests/btrfs-tests.c INIT_LIST_HEAD(&cache->list); cache 227 fs/btrfs/tests/btrfs-tests.c INIT_LIST_HEAD(&cache->cluster_list); cache 228 fs/btrfs/tests/btrfs-tests.c INIT_LIST_HEAD(&cache->bg_list); cache 229 fs/btrfs/tests/btrfs-tests.c btrfs_init_free_space_ctl(cache); cache 230 fs/btrfs/tests/btrfs-tests.c mutex_init(&cache->free_space_lock); cache 232 fs/btrfs/tests/btrfs-tests.c return cache; cache 235 fs/btrfs/tests/btrfs-tests.c void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache) cache 237 fs/btrfs/tests/btrfs-tests.c if (!cache) cache 239 fs/btrfs/tests/btrfs-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 240 fs/btrfs/tests/btrfs-tests.c kfree(cache->free_space_ctl); cache 241 fs/btrfs/tests/btrfs-tests.c kfree(cache); cache 46 fs/btrfs/tests/btrfs-tests.h void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); cache 20 fs/btrfs/tests/free-space-tests.c static int test_extents(struct btrfs_block_group_cache *cache) cache 27 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, 0, SZ_4M); cache 33 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 0, SZ_4M); cache 39 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 0, SZ_4M)) { cache 45 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, 0, SZ_4M); cache 51 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M); cache 57 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 0, SZ_1M); cache 63 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, SZ_2M, 4096); cache 69 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 0, SZ_1M)) { cache 74 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_2M, 4096)) { cache 79 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 3 * SZ_1M, SZ_1M)) { cache 85 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 90 fs/btrfs/tests/free-space-tests.c static int test_bitmaps(struct btrfs_block_group_cache *cache, cache 98 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); cache 104 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 0, SZ_4M); cache 110 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 0, SZ_4M)) { cache 115 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 0, SZ_4M, 1); cache 121 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, SZ_1M, SZ_2M); cache 134 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, cache 142 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, next_bitmap_offset - SZ_1M, SZ_2M); cache 148 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, next_bitmap_offset - SZ_1M, SZ_2M)) { cache 153 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 159 fs/btrfs/tests/free-space-tests.c static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache, cache 172 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_4M, SZ_1M, 1); cache 178 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); cache 184 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 0, SZ_1M); cache 190 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 0, SZ_1M)) { cache 196 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 0, SZ_1M, 0); cache 202 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, SZ_4M, SZ_1M); cache 208 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_4M, SZ_1M)) { cache 217 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_1M, SZ_4M, 1); cache 223 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, SZ_512K, 3 * SZ_1M); cache 229 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_512K, 3 * SZ_1M)) { cache 234 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 237 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_4M, SZ_4M, 1); cache 243 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_2M, SZ_2M, 0); cache 249 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_4M); cache 255 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 3 * SZ_1M, SZ_4M)) { cache 270 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 271 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, bitmap_offset + SZ_4M, SZ_4M, 1); cache 277 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, bitmap_offset - SZ_1M, cache 284 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, bitmap_offset + SZ_1M, 5 * SZ_1M); cache 290 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, bitmap_offset + SZ_1M, 5 * SZ_1M)) { cache 295 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 303 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_1M, SZ_2M, 1); cache 309 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 3 * SZ_1M, SZ_1M, 0); cache 315 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, SZ_1M, 3 * SZ_1M); cache 321 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 334 fs/btrfs/tests/free-space-tests.c check_num_extents_and_bitmaps(const struct btrfs_block_group_cache *cache, cache 338 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_extents != num_extents) { cache 341 fs/btrfs/tests/free-space-tests.c cache->free_space_ctl->free_extents, num_extents); cache 344 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->total_bitmaps != num_bitmaps) { cache 347 fs/btrfs/tests/free-space-tests.c cache->free_space_ctl->total_bitmaps, num_bitmaps); cache 354 fs/btrfs/tests/free-space-tests.c static int check_cache_empty(struct btrfs_block_group_cache *cache) cache 363 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_space != 0) { cache 369 fs/btrfs/tests/free-space-tests.c offset = btrfs_find_space_for_alloc(cache, 0, 4096, 0, cache 378 fs/btrfs/tests/free-space-tests.c return check_num_extents_and_bitmaps(cache, 0, 0); cache 396 fs/btrfs/tests/free-space-tests.c test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, cache 403 fs/btrfs/tests/free-space-tests.c .recalc_thresholds = cache->free_space_ctl->op->recalc_thresholds, cache 428 fs/btrfs/tests/free-space-tests.c orig_free_space_ops = cache->free_space_ctl->op; cache 429 fs/btrfs/tests/free-space-tests.c cache->free_space_ctl->op = &test_free_space_ops; cache 434 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_128M - SZ_256K, SZ_128K, 0); cache 441 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_128M + SZ_512K, cache 448 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 459 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, cache 468 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_128K)) { cache 472 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M + SZ_512K, SZ_256K)) { cache 481 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_128M + 768 * SZ_1K, cache 491 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_128M + SZ_256K, SZ_256K)) { cache 500 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_128M, SZ_256K)) { cache 510 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_128M, SZ_512K); cache 516 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M, SZ_512K)) { cache 525 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 535 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize); cache 545 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 554 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_128M - SZ_128K, SZ_128K); cache 560 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - SZ_128K, SZ_128K)) { cache 569 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 588 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - SZ_256K, SZ_1M)) { cache 593 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) { cache 598 fs/btrfs/tests/free-space-tests.c offset = btrfs_find_space_for_alloc(cache, cache 612 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 1, 1); cache 616 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_space != sectorsize) { cache 621 fs/btrfs/tests/free-space-tests.c offset = btrfs_find_space_for_alloc(cache, cache 630 fs/btrfs/tests/free-space-tests.c ret = check_cache_empty(cache); cache 634 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 645 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, SZ_128M + SZ_128K, SZ_128K, 0); cache 652 fs/btrfs/tests/free-space-tests.c ret = test_add_free_space_entry(cache, 0, SZ_128M - SZ_512K, 1); cache 658 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 669 fs/btrfs/tests/free-space-tests.c ret = btrfs_remove_free_space(cache, 0, SZ_128M - 768 * SZ_1K); cache 676 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M + SZ_128K, SZ_128K)) { cache 680 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_256K)) { cache 689 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, 0, SZ_128M - 768 * SZ_1K)) { cache 698 fs/btrfs/tests/free-space-tests.c if (test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { cache 708 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_128M - SZ_512K, SZ_512K); cache 714 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - SZ_512K, SZ_512K)) { cache 723 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 733 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize); cache 744 fs/btrfs/tests/free-space-tests.c ret = btrfs_add_free_space(cache, SZ_128M, SZ_128K); cache 750 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M, SZ_128K)) { cache 759 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 2, 1); cache 778 fs/btrfs/tests/free-space-tests.c if (!test_check_exists(cache, SZ_128M - 768 * SZ_1K, SZ_1M)) { cache 783 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) { cache 788 fs/btrfs/tests/free-space-tests.c offset = btrfs_find_space_for_alloc(cache, 0, SZ_1M, 0, cache 801 fs/btrfs/tests/free-space-tests.c ret = check_num_extents_and_bitmaps(cache, 1, 1); cache 805 fs/btrfs/tests/free-space-tests.c if (cache->free_space_ctl->free_space != 2 * sectorsize) { cache 810 fs/btrfs/tests/free-space-tests.c offset = btrfs_find_space_for_alloc(cache, cache 819 fs/btrfs/tests/free-space-tests.c ret = check_cache_empty(cache); cache 823 fs/btrfs/tests/free-space-tests.c cache->free_space_ctl->op = orig_free_space_ops; cache 824 fs/btrfs/tests/free-space-tests.c __btrfs_remove_free_space_cache(cache->free_space_ctl); cache 832 fs/btrfs/tests/free-space-tests.c struct btrfs_block_group_cache *cache; cache 848 fs/btrfs/tests/free-space-tests.c cache = btrfs_alloc_dummy_block_group(fs_info, cache 850 fs/btrfs/tests/free-space-tests.c if (!cache) { cache 865 fs/btrfs/tests/free-space-tests.c ret = test_extents(cache); cache 868 fs/btrfs/tests/free-space-tests.c ret = test_bitmaps(cache, sectorsize); cache 871 fs/btrfs/tests/free-space-tests.c ret = test_bitmaps_and_extents(cache, sectorsize); cache 875 fs/btrfs/tests/free-space-tests.c ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize); cache 877 fs/btrfs/tests/free-space-tests.c btrfs_free_dummy_block_group(cache); cache 21 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 34 fs/btrfs/tests/free-space-tree-tests.c info = search_free_space_info(trans, cache, path, 0); cache 51 fs/btrfs/tests/free-space-tree-tests.c end = cache->key.objectid + cache->key.offset; cache 59 fs/btrfs/tests/free-space-tree-tests.c bit = free_space_test_bit(cache, path, offset); cache 110 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 119 fs/btrfs/tests/free-space-tree-tests.c info = search_free_space_info(trans, cache, path, 0); cache 128 fs/btrfs/tests/free-space-tree-tests.c ret = __check_free_space_extents(trans, fs_info, cache, path, extents, cache 135 fs/btrfs/tests/free-space-tree-tests.c ret = convert_free_space_to_extents(trans, cache, path); cache 141 fs/btrfs/tests/free-space-tree-tests.c ret = convert_free_space_to_bitmaps(trans, cache, path); cache 147 fs/btrfs/tests/free-space-tree-tests.c return __check_free_space_extents(trans, fs_info, cache, path, extents, cache 153 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 158 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, cache->key.offset}, cache 161 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 167 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 174 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 175 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, cache 176 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset); cache 182 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 188 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 193 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid + alignment, cache 194 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset - alignment}, cache 198 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 199 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, alignment); cache 205 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 212 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 217 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, cache->key.offset - alignment}, cache 221 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 222 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + cache 223 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset - alignment, cache 230 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 236 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 241 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, alignment}, cache 242 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid + 2 * alignment, cache 243 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset - 2 * alignment}, cache 247 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 248 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + alignment, cache 255 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 261 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 266 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, 2 * alignment}, cache 270 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 271 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, cache 272 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset); cache 278 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, cache 285 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 286 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + alignment, cache 293 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 299 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 304 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid + alignment, 2 * alignment}, cache 308 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 309 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, cache 310 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset); cache 316 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 317 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + 2 * alignment, cache 324 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 325 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + alignment, cache 332 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 338 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 343 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, 3 * alignment}, cache 347 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 348 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, cache 349 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset); cache 355 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, cache 362 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 363 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + 2 * alignment, cache 370 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 371 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + alignment, cache 378 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 384 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache, cache 389 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid, alignment}, cache 390 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid + 2 * alignment, alignment}, cache 391 fs/btrfs/tests/free-space-tree-tests.c {cache->key.objectid + 4 * alignment, alignment}, cache 395 fs/btrfs/tests/free-space-tree-tests.c ret = __remove_from_free_space_tree(trans, cache, path, cache 396 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid, cache 397 fs/btrfs/tests/free-space-tree-tests.c cache->key.offset); cache 403 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache->key.objectid, cache 410 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 411 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + 4 * alignment, cache 418 fs/btrfs/tests/free-space-tree-tests.c ret = __add_to_free_space_tree(trans, cache, path, cache 419 fs/btrfs/tests/free-space-tree-tests.c cache->key.objectid + 2 * alignment, cache 426 fs/btrfs/tests/free-space-tree-tests.c return check_free_space_extents(trans, fs_info, cache, path, cache 441 fs/btrfs/tests/free-space-tree-tests.c struct btrfs_block_group_cache *cache = NULL; cache 475 fs/btrfs/tests/free-space-tree-tests.c cache = btrfs_alloc_dummy_block_group(fs_info, 8 * alignment); cache 476 fs/btrfs/tests/free-space-tree-tests.c if (!cache) { cache 481 fs/btrfs/tests/free-space-tree-tests.c cache->bitmap_low_thresh = 0; cache 482 fs/btrfs/tests/free-space-tree-tests.c cache->bitmap_high_thresh = (u32)-1; cache 483 fs/btrfs/tests/free-space-tree-tests.c cache->needs_free_space = 1; cache 484 fs/btrfs/tests/free-space-tree-tests.c cache->fs_info = root->fs_info; cache 495 fs/btrfs/tests/free-space-tree-tests.c ret = add_block_group_free_space(&trans, cache); cache 502 fs/btrfs/tests/free-space-tree-tests.c ret = convert_free_space_to_bitmaps(&trans, cache, path); cache 509 fs/btrfs/tests/free-space-tree-tests.c ret = test_func(&trans, root->fs_info, cache, path, alignment); cache 513 fs/btrfs/tests/free-space-tree-tests.c ret = remove_block_group_free_space(&trans, cache); cache 528 fs/btrfs/tests/free-space-tree-tests.c btrfs_free_dummy_block_group(cache); cache 68 fs/btrfs/transaction.c struct btrfs_block_group_cache *cache; cache 70 fs/btrfs/transaction.c cache = list_first_entry(&transaction->deleted_bgs, cache 73 fs/btrfs/transaction.c list_del_init(&cache->bg_list); cache 74 fs/btrfs/transaction.c btrfs_put_block_group_trimming(cache); cache 75 fs/btrfs/transaction.c btrfs_put_block_group(cache); cache 3225 fs/btrfs/volumes.c struct btrfs_block_group_cache *cache; cache 3229 fs/btrfs/volumes.c cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache 3230 fs/btrfs/volumes.c ASSERT(cache); cache 3231 fs/btrfs/volumes.c chunk_type = cache->flags; cache 3232 fs/btrfs/volumes.c btrfs_put_block_group(cache); cache 3433 fs/btrfs/volumes.c struct btrfs_block_group_cache *cache; cache 3439 fs/btrfs/volumes.c cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache 3440 fs/btrfs/volumes.c chunk_used = btrfs_block_group_used(&cache->item); cache 3445 fs/btrfs/volumes.c user_thresh_min = div_factor_fine(cache->key.offset, cache 3451 fs/btrfs/volumes.c user_thresh_max = cache->key.offset; cache 3453 fs/btrfs/volumes.c user_thresh_max = div_factor_fine(cache->key.offset, cache 3459 fs/btrfs/volumes.c btrfs_put_block_group(cache); cache 3466 fs/btrfs/volumes.c struct btrfs_block_group_cache *cache; cache 3470 fs/btrfs/volumes.c cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache 3471 fs/btrfs/volumes.c chunk_used = btrfs_block_group_used(&cache->item); cache 3476 fs/btrfs/volumes.c user_thresh = cache->key.offset; cache 3478 fs/btrfs/volumes.c user_thresh = div_factor_fine(cache->key.offset, cache 3484 fs/btrfs/volumes.c btrfs_put_block_group(cache); cache 27 fs/cachefiles/bind.c int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) cache 30 fs/cachefiles/bind.c cache->frun_percent, cache 31 fs/cachefiles/bind.c cache->fcull_percent, cache 32 fs/cachefiles/bind.c cache->fstop_percent, cache 33 fs/cachefiles/bind.c cache->brun_percent, cache 34 fs/cachefiles/bind.c cache->bcull_percent, cache 35 fs/cachefiles/bind.c cache->bstop_percent, cache 39 fs/cachefiles/bind.c ASSERT(cache->fstop_percent >= 0 && cache 40 fs/cachefiles/bind.c cache->fstop_percent < cache->fcull_percent && cache 41 fs/cachefiles/bind.c cache->fcull_percent < cache->frun_percent && cache 42 fs/cachefiles/bind.c cache->frun_percent < 100); cache 44 fs/cachefiles/bind.c ASSERT(cache->bstop_percent >= 0 && cache 45 fs/cachefiles/bind.c cache->bstop_percent < cache->bcull_percent && cache 46 fs/cachefiles/bind.c cache->bcull_percent < cache->brun_percent && cache 47 fs/cachefiles/bind.c cache->brun_percent < 100); cache 54 fs/cachefiles/bind.c if (!cache->rootdirname) { cache 60 fs/cachefiles/bind.c if (test_bit(CACHEFILES_READY, &cache->flags)) { cache 66 fs/cachefiles/bind.c if (!cache->tag) { cache 69 fs/cachefiles/bind.c cache->tag = kstrdup("CacheFiles", GFP_KERNEL); cache 70 fs/cachefiles/bind.c if (!cache->tag) cache 75 fs/cachefiles/bind.c return cachefiles_daemon_add_cache(cache); cache 81 fs/cachefiles/bind.c static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) cache 93 fs/cachefiles/bind.c ret = cachefiles_get_security_ID(cache); cache 97 fs/cachefiles/bind.c cachefiles_begin_secure(cache, &saved_cred); cache 114 fs/cachefiles/bind.c ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path); cache 118 fs/cachefiles/bind.c cache->mnt = path.mnt; cache 137 fs/cachefiles/bind.c ret = cachefiles_determine_cache_security(cache, root, &saved_cred); cache 154 fs/cachefiles/bind.c cache->bsize = stats.f_bsize; cache 155 fs/cachefiles/bind.c cache->bshift = 0; cache 157 fs/cachefiles/bind.c cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize); cache 160 fs/cachefiles/bind.c cache->bsize, cache->bshift); cache 168 fs/cachefiles/bind.c cache->fstop = stats.f_files * cache->fstop_percent; cache 169 fs/cachefiles/bind.c cache->fcull = stats.f_files * cache->fcull_percent; cache 170 fs/cachefiles/bind.c cache->frun = stats.f_files * cache->frun_percent; cache 173 fs/cachefiles/bind.c (unsigned long long) cache->frun, cache 174 fs/cachefiles/bind.c (unsigned long long) cache->fcull, cache 175 fs/cachefiles/bind.c (unsigned long long) cache->fstop); cache 177 fs/cachefiles/bind.c stats.f_blocks >>= cache->bshift; cache 179 fs/cachefiles/bind.c cache->bstop = stats.f_blocks * cache->bstop_percent; cache 180 fs/cachefiles/bind.c cache->bcull = stats.f_blocks * cache->bcull_percent; cache 181 fs/cachefiles/bind.c cache->brun = stats.f_blocks * cache->brun_percent; cache 184 fs/cachefiles/bind.c (unsigned long long) cache->brun, cache 185 fs/cachefiles/bind.c (unsigned long long) cache->bcull, cache 186 fs/cachefiles/bind.c (unsigned long long) cache->bstop); cache 189 fs/cachefiles/bind.c cachedir = cachefiles_get_directory(cache, root, "cache"); cache 203 fs/cachefiles/bind.c graveyard = cachefiles_get_directory(cache, root, "graveyard"); cache 209 fs/cachefiles/bind.c cache->graveyard = graveyard; cache 212 fs/cachefiles/bind.c fscache_init_cache(&cache->cache, cache 218 fs/cachefiles/bind.c &cache->cache); cache 220 fs/cachefiles/bind.c ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); cache 225 fs/cachefiles/bind.c set_bit(CACHEFILES_READY, &cache->flags); cache 228 fs/cachefiles/bind.c pr_info("File cache on %s registered\n", cache->cache.identifier); cache 231 fs/cachefiles/bind.c cachefiles_has_space(cache, 0, 0); cache 232 fs/cachefiles/bind.c cachefiles_end_secure(cache, saved_cred); cache 236 fs/cachefiles/bind.c dput(cache->graveyard); cache 237 fs/cachefiles/bind.c cache->graveyard = NULL; cache 239 fs/cachefiles/bind.c mntput(cache->mnt); cache 240 fs/cachefiles/bind.c cache->mnt = NULL; cache 247 fs/cachefiles/bind.c cachefiles_end_secure(cache, saved_cred); cache 255 fs/cachefiles/bind.c void cachefiles_daemon_unbind(struct cachefiles_cache *cache) cache 259 fs/cachefiles/bind.c if (test_bit(CACHEFILES_READY, &cache->flags)) { cache 261 fs/cachefiles/bind.c cache->cache.identifier); cache 263 fs/cachefiles/bind.c fscache_withdraw_cache(&cache->cache); cache 266 fs/cachefiles/bind.c dput(cache->graveyard); cache 267 fs/cachefiles/bind.c mntput(cache->mnt); cache 269 fs/cachefiles/bind.c kfree(cache->rootdirname); cache 270 fs/cachefiles/bind.c kfree(cache->secctx); cache 271 fs/cachefiles/bind.c kfree(cache->tag); cache 59 fs/cachefiles/daemon.c int (*handler)(struct cachefiles_cache *cache, char *args); cache 85 fs/cachefiles/daemon.c struct cachefiles_cache *cache; cache 98 fs/cachefiles/daemon.c cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); cache 99 fs/cachefiles/daemon.c if (!cache) { cache 104 fs/cachefiles/daemon.c mutex_init(&cache->daemon_mutex); cache 105 fs/cachefiles/daemon.c cache->active_nodes = RB_ROOT; cache 106 fs/cachefiles/daemon.c rwlock_init(&cache->active_lock); cache 107 fs/cachefiles/daemon.c init_waitqueue_head(&cache->daemon_pollwq); cache 114 fs/cachefiles/daemon.c cache->frun_percent = 7; cache 115 fs/cachefiles/daemon.c cache->fcull_percent = 5; cache 116 fs/cachefiles/daemon.c cache->fstop_percent = 1; cache 117 fs/cachefiles/daemon.c cache->brun_percent = 7; cache 118 fs/cachefiles/daemon.c cache->bcull_percent = 5; cache 119 fs/cachefiles/daemon.c cache->bstop_percent = 1; cache 121 fs/cachefiles/daemon.c file->private_data = cache; cache 122 fs/cachefiles/daemon.c cache->cachefilesd = file; cache 131 fs/cachefiles/daemon.c struct cachefiles_cache *cache = file->private_data; cache 135 fs/cachefiles/daemon.c ASSERT(cache); cache 137 fs/cachefiles/daemon.c set_bit(CACHEFILES_DEAD, &cache->flags); cache 139 fs/cachefiles/daemon.c cachefiles_daemon_unbind(cache); cache 141 fs/cachefiles/daemon.c ASSERT(!cache->active_nodes.rb_node); cache 144 fs/cachefiles/daemon.c cache->cachefilesd = NULL; cache 148 fs/cachefiles/daemon.c kfree(cache); cache 160 fs/cachefiles/daemon.c struct cachefiles_cache *cache = file->private_data; cache 168 fs/cachefiles/daemon.c if (!test_bit(CACHEFILES_READY, &cache->flags)) cache 172 fs/cachefiles/daemon.c cachefiles_has_space(cache, 0, 0); cache 175 fs/cachefiles/daemon.c f_released = atomic_xchg(&cache->f_released, 0); cache 176 fs/cachefiles/daemon.c b_released = atomic_long_xchg(&cache->b_released, 0); cache 177 fs/cachefiles/daemon.c clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags); cache 189 fs/cachefiles/daemon.c test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0', cache 190 fs/cachefiles/daemon.c (unsigned long long) cache->frun, cache 191 fs/cachefiles/daemon.c (unsigned long long) cache->fcull, cache 192 fs/cachefiles/daemon.c (unsigned long long) cache->fstop, cache 193 fs/cachefiles/daemon.c (unsigned long long) cache->brun, cache 194 fs/cachefiles/daemon.c (unsigned long long) cache->bcull, cache 195 fs/cachefiles/daemon.c (unsigned long long) cache->bstop, cache 217 fs/cachefiles/daemon.c struct cachefiles_cache *cache = file->private_data; cache 223 fs/cachefiles/daemon.c ASSERT(cache); cache 225 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_DEAD, &cache->flags)) cache 273 fs/cachefiles/daemon.c mutex_lock(&cache->daemon_mutex); cache 276 fs/cachefiles/daemon.c if (!test_bit(CACHEFILES_DEAD, &cache->flags)) cache 277 fs/cachefiles/daemon.c ret = cmd->handler(cache, args); cache 279 fs/cachefiles/daemon.c mutex_unlock(&cache->daemon_mutex); cache 293 fs/cachefiles/daemon.c struct cachefiles_cache *cache = file->private_data; cache 296 fs/cachefiles/daemon.c poll_wait(file, &cache->daemon_pollwq, poll); cache 299 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) cache 302 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_CULLING, &cache->flags)) cache 312 fs/cachefiles/daemon.c static int cachefiles_daemon_range_error(struct cachefiles_cache *cache, cache 324 fs/cachefiles/daemon.c static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args) cache 337 fs/cachefiles/daemon.c if (frun <= cache->fcull_percent || frun >= 100) cache 338 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 340 fs/cachefiles/daemon.c cache->frun_percent = frun; cache 348 fs/cachefiles/daemon.c static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args) cache 361 fs/cachefiles/daemon.c if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent) cache 362 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 364 fs/cachefiles/daemon.c cache->fcull_percent = fcull; cache 372 fs/cachefiles/daemon.c static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args) cache 385 fs/cachefiles/daemon.c if (fstop < 0 || fstop >= cache->fcull_percent) cache 386 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 388 fs/cachefiles/daemon.c cache->fstop_percent = fstop; cache 396 fs/cachefiles/daemon.c static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args) cache 409 fs/cachefiles/daemon.c if (brun <= cache->bcull_percent || brun >= 100) cache 410 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 412 fs/cachefiles/daemon.c cache->brun_percent = brun; cache 420 fs/cachefiles/daemon.c static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args) cache 433 fs/cachefiles/daemon.c if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent) cache 434 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 436 fs/cachefiles/daemon.c cache->bcull_percent = bcull; cache 444 fs/cachefiles/daemon.c static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args) cache 457 fs/cachefiles/daemon.c if (bstop < 0 || bstop >= cache->bcull_percent) cache 458 fs/cachefiles/daemon.c return cachefiles_daemon_range_error(cache, args); cache 460 fs/cachefiles/daemon.c cache->bstop_percent = bstop; cache 468 fs/cachefiles/daemon.c static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args) cache 479 fs/cachefiles/daemon.c if (cache->rootdirname) { cache 488 fs/cachefiles/daemon.c cache->rootdirname = dir; cache 496 fs/cachefiles/daemon.c static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args) cache 507 fs/cachefiles/daemon.c if (cache->secctx) { cache 516 fs/cachefiles/daemon.c cache->secctx = secctx; cache 524 fs/cachefiles/daemon.c static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args) cache 535 fs/cachefiles/daemon.c if (cache->tag) cache 542 fs/cachefiles/daemon.c cache->tag = tag; cache 550 fs/cachefiles/daemon.c static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) cache 561 fs/cachefiles/daemon.c if (!test_bit(CACHEFILES_READY, &cache->flags)) { cache 566 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_DEAD, &cache->flags)) { cache 577 fs/cachefiles/daemon.c cachefiles_begin_secure(cache, &saved_cred); cache 578 fs/cachefiles/daemon.c ret = cachefiles_cull(cache, path.dentry, args); cache 579 fs/cachefiles/daemon.c cachefiles_end_secure(cache, saved_cred); cache 599 fs/cachefiles/daemon.c static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args) cache 622 fs/cachefiles/daemon.c static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) cache 633 fs/cachefiles/daemon.c if (!test_bit(CACHEFILES_READY, &cache->flags)) { cache 638 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_DEAD, &cache->flags)) { cache 649 fs/cachefiles/daemon.c cachefiles_begin_secure(cache, &saved_cred); cache 650 fs/cachefiles/daemon.c ret = cachefiles_check_in_use(cache, path.dentry, args); cache 651 fs/cachefiles/daemon.c cachefiles_end_secure(cache, saved_cred); cache 671 fs/cachefiles/daemon.c int cachefiles_has_space(struct cachefiles_cache *cache, cache 676 fs/cachefiles/daemon.c .mnt = cache->mnt, cache 677 fs/cachefiles/daemon.c .dentry = cache->mnt->mnt_root, cache 696 fs/cachefiles/daemon.c cachefiles_io_error(cache, "statfs failed"); cache 701 fs/cachefiles/daemon.c stats.f_bavail >>= cache->bshift; cache 719 fs/cachefiles/daemon.c if (stats.f_ffree < cache->fstop || cache 720 fs/cachefiles/daemon.c stats.f_bavail < cache->bstop) cache 724 fs/cachefiles/daemon.c if (stats.f_ffree < cache->fcull || cache 725 fs/cachefiles/daemon.c stats.f_bavail < cache->bcull) cache 728 fs/cachefiles/daemon.c if (test_bit(CACHEFILES_CULLING, &cache->flags) && cache 729 fs/cachefiles/daemon.c stats.f_ffree >= cache->frun && cache 730 fs/cachefiles/daemon.c stats.f_bavail >= cache->brun && cache 731 fs/cachefiles/daemon.c test_and_clear_bit(CACHEFILES_CULLING, &cache->flags) cache 734 fs/cachefiles/daemon.c cachefiles_state_changed(cache); cache 741 fs/cachefiles/daemon.c if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) { cache 743 fs/cachefiles/daemon.c cachefiles_state_changed(cache); cache 28 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 34 fs/cachefiles/interface.c cache = container_of(_cache, struct cachefiles_cache, cache); cache 36 fs/cachefiles/interface.c _enter("{%s},%p,", cache->cache.identifier, cookie); cache 52 fs/cachefiles/interface.c fscache_object_init(&object->fscache, cookie, &cache->cache); cache 107 fs/cachefiles/interface.c fscache_object_destroyed(&cache->cache); cache 123 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 129 fs/cachefiles/interface.c cache = container_of(_object->cache, struct cachefiles_cache, cache); cache 138 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 142 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 208 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 217 fs/cachefiles/interface.c cache = container_of(object->fscache.cache, struct cachefiles_cache, cache 218 fs/cachefiles/interface.c cache); cache 250 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 252 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 264 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 276 fs/cachefiles/interface.c cache = container_of(object->fscache.cache, cache 277 fs/cachefiles/interface.c struct cachefiles_cache, cache); cache 291 fs/cachefiles/interface.c _object != cache->cache.fsdef cache 298 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 299 fs/cachefiles/interface.c cachefiles_delete_object(cache, object); cache 300 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 311 fs/cachefiles/interface.c cachefiles_mark_object_inactive(cache, object, i_blocks); cache 326 fs/cachefiles/interface.c struct fscache_cache *cache; cache 364 fs/cachefiles/interface.c cache = object->fscache.cache; cache 367 fs/cachefiles/interface.c fscache_object_destroyed(cache); cache 378 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 384 fs/cachefiles/interface.c cache = container_of(_cache, struct cachefiles_cache, cache); cache 388 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 389 fs/cachefiles/interface.c down_read(&cache->mnt->mnt_sb->s_umount); cache 390 fs/cachefiles/interface.c ret = sync_filesystem(cache->mnt->mnt_sb); cache 391 fs/cachefiles/interface.c up_read(&cache->mnt->mnt_sb->s_umount); cache 392 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 395 fs/cachefiles/interface.c cachefiles_io_error(cache, cache 408 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 415 fs/cachefiles/interface.c cache = container_of(object->fscache.cache, cache 416 fs/cachefiles/interface.c struct cachefiles_cache, cache); cache 418 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 420 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 433 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 446 fs/cachefiles/interface.c cache = container_of(object->fscache.cache, cache 447 fs/cachefiles/interface.c struct cachefiles_cache, cache); cache 463 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 484 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 502 fs/cachefiles/interface.c struct cachefiles_cache *cache; cache 509 fs/cachefiles/interface.c cache = container_of(object->fscache.cache, cache 510 fs/cachefiles/interface.c struct cachefiles_cache, cache); cache 523 fs/cachefiles/interface.c path.mnt = cache->mnt; cache 525 fs/cachefiles/interface.c cachefiles_begin_secure(cache, &saved_cred); cache 529 fs/cachefiles/interface.c cachefiles_end_secure(cache, saved_cred); cache 546 fs/cachefiles/interface.c static void cachefiles_dissociate_pages(struct fscache_cache *cache) cache 56 fs/cachefiles/internal.h struct fscache_cache cache; /* FS-Cache record */ cache 128 fs/cachefiles/internal.h static inline void cachefiles_state_changed(struct cachefiles_cache *cache) cache 130 fs/cachefiles/internal.h set_bit(CACHEFILES_STATE_CHANGED, &cache->flags); cache 131 fs/cachefiles/internal.h wake_up_all(&cache->daemon_pollwq); cache 137 fs/cachefiles/internal.h extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args); cache 138 fs/cachefiles/internal.h extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache); cache 145 fs/cachefiles/internal.h extern int cachefiles_has_space(struct cachefiles_cache *cache, cache 161 fs/cachefiles/internal.h extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, cache 164 fs/cachefiles/internal.h extern int cachefiles_delete_object(struct cachefiles_cache *cache, cache 170 fs/cachefiles/internal.h extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, cache 174 fs/cachefiles/internal.h extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, cache 177 fs/cachefiles/internal.h extern int cachefiles_check_in_use(struct cachefiles_cache *cache, cache 223 fs/cachefiles/internal.h extern int cachefiles_get_security_ID(struct cachefiles_cache *cache); cache 224 fs/cachefiles/internal.h extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache, cache 228 fs/cachefiles/internal.h static inline void cachefiles_begin_secure(struct cachefiles_cache *cache, cache 231 fs/cachefiles/internal.h *_saved_cred = override_creds(cache->cache_cred); cache 234 fs/cachefiles/internal.h static inline void cachefiles_end_secure(struct cachefiles_cache *cache, cache 251 fs/cachefiles/internal.h extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, cache 262 fs/cachefiles/internal.h fscache_io_error(&(___cache)->cache); \ cache 270 fs/cachefiles/internal.h ___cache = container_of((object)->fscache.cache, \ cache 271 fs/cachefiles/internal.h struct cachefiles_cache, cache); \ cache 84 fs/cachefiles/namei.c static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, cache 93 fs/cachefiles/namei.c write_lock(&cache->active_lock); cache 95 fs/cachefiles/namei.c p = cache->active_nodes.rb_node; cache 106 fs/cachefiles/namei.c write_unlock(&cache->active_lock); cache 129 fs/cachefiles/namei.c write_unlock(&cache->active_lock); cache 136 fs/cachefiles/namei.c static int cachefiles_mark_object_active(struct cachefiles_cache *cache, cache 146 fs/cachefiles/namei.c write_lock(&cache->active_lock); cache 157 fs/cachefiles/namei.c _p = &cache->active_nodes.rb_node; cache 174 fs/cachefiles/namei.c rb_insert_color(&object->active_node, &cache->active_nodes); cache 176 fs/cachefiles/namei.c write_unlock(&cache->active_lock); cache 192 fs/cachefiles/namei.c write_unlock(&cache->active_lock); cache 242 fs/cachefiles/namei.c cache->cache.ops->put_object(&xobject->fscache, cache 247 fs/cachefiles/namei.c cache->cache.ops->put_object(&xobject->fscache, cache 256 fs/cachefiles/namei.c void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, cache 265 fs/cachefiles/namei.c write_lock(&cache->active_lock); cache 266 fs/cachefiles/namei.c rb_erase(&object->active_node, &cache->active_nodes); cache 268 fs/cachefiles/namei.c write_unlock(&cache->active_lock); cache 275 fs/cachefiles/namei.c atomic_long_add(i_blocks, &cache->b_released); cache 276 fs/cachefiles/namei.c if (atomic_inc_return(&cache->f_released)) cache 277 fs/cachefiles/namei.c cachefiles_state_changed(cache); cache 287 fs/cachefiles/namei.c static int cachefiles_bury_object(struct cachefiles_cache *cache, cache 307 fs/cachefiles/namei.c path.mnt = cache->mnt; cache 311 fs/cachefiles/namei.c cachefiles_io_error(cache, "Unlink security error"); cache 317 fs/cachefiles/namei.c cachefiles_mark_object_buried(cache, rep, why); cache 323 fs/cachefiles/namei.c cachefiles_io_error(cache, "Unlink failed"); cache 337 fs/cachefiles/namei.c (uint32_t) atomic_inc_return(&cache->gravecounter)); cache 340 fs/cachefiles/namei.c trap = lock_rename(cache->graveyard, dir); cache 346 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 351 fs/cachefiles/namei.c if (!d_can_lookup(cache->graveyard)) { cache 352 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 353 fs/cachefiles/namei.c cachefiles_io_error(cache, "Graveyard no longer a directory"); cache 358 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 359 fs/cachefiles/namei.c cachefiles_io_error(cache, "May not make directory loop"); cache 364 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 365 fs/cachefiles/namei.c cachefiles_io_error(cache, "Mountpoint in cache"); cache 369 fs/cachefiles/namei.c grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer)); cache 371 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 378 fs/cachefiles/namei.c cachefiles_io_error(cache, "Lookup error %ld", cache 384 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 392 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 394 fs/cachefiles/namei.c cachefiles_io_error(cache, "Mountpoint in graveyard"); cache 400 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 402 fs/cachefiles/namei.c cachefiles_io_error(cache, "May not make directory loop"); cache 407 fs/cachefiles/namei.c path.mnt = cache->mnt; cache 409 fs/cachefiles/namei.c path_to_graveyard.mnt = cache->mnt; cache 410 fs/cachefiles/namei.c path_to_graveyard.dentry = cache->graveyard; cache 413 fs/cachefiles/namei.c cachefiles_io_error(cache, "Rename security error %d", ret); cache 417 fs/cachefiles/namei.c d_inode(cache->graveyard), grave, NULL, 0); cache 419 fs/cachefiles/namei.c cachefiles_io_error(cache, cache 423 fs/cachefiles/namei.c cachefiles_mark_object_buried(cache, rep, why); cache 426 fs/cachefiles/namei.c unlock_rename(cache->graveyard, dir); cache 435 fs/cachefiles/namei.c int cachefiles_delete_object(struct cachefiles_cache *cache, cache 461 fs/cachefiles/namei.c ret = cachefiles_bury_object(cache, object, dir, cache 487 fs/cachefiles/namei.c struct cachefiles_cache *cache; cache 499 fs/cachefiles/namei.c cache = container_of(parent->fscache.cache, cache 500 fs/cachefiles/namei.c struct cachefiles_cache, cache); cache 501 fs/cachefiles/namei.c path.mnt = cache->mnt; cache 555 fs/cachefiles/namei.c ret = cachefiles_has_space(cache, 1, 0); cache 591 fs/cachefiles/namei.c ret = cachefiles_has_space(cache, 1, 0); cache 645 fs/cachefiles/namei.c ret = cachefiles_bury_object(cache, object, dir, next, cache 661 fs/cachefiles/namei.c ret = cachefiles_mark_object_active(cache, object); cache 715 fs/cachefiles/namei.c cachefiles_io_error(cache, "Create/mkdir failed"); cache 725 fs/cachefiles/namei.c cache, object, d_backing_inode(object->dentry)->i_blocks); cache 739 fs/cachefiles/namei.c cachefiles_io_error(cache, "Lookup failed"); cache 754 fs/cachefiles/namei.c struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, cache 783 fs/cachefiles/namei.c ret = cachefiles_has_space(cache, 1, 0); cache 789 fs/cachefiles/namei.c path.mnt = cache->mnt; cache 863 fs/cachefiles/namei.c static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache, cache 899 fs/cachefiles/namei.c read_lock(&cache->active_lock); cache 901 fs/cachefiles/namei.c _n = cache->active_nodes.rb_node; cache 914 fs/cachefiles/namei.c read_unlock(&cache->active_lock); cache 920 fs/cachefiles/namei.c read_unlock(&cache->active_lock); cache 936 fs/cachefiles/namei.c cachefiles_io_error(cache, "Lookup failed"); cache 950 fs/cachefiles/namei.c int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, cache 958 fs/cachefiles/namei.c victim = cachefiles_check_active(cache, dir, filename); cache 970 fs/cachefiles/namei.c ret = cachefiles_remove_object_xattr(cache, victim); cache 977 fs/cachefiles/namei.c ret = cachefiles_bury_object(cache, NULL, dir, victim, false, cache 1010 fs/cachefiles/namei.c int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir, cache 1018 fs/cachefiles/namei.c victim = cachefiles_check_active(cache, dir, filename); cache 397 fs/cachefiles/rdwr.c struct cachefiles_cache *cache; cache 405 fs/cachefiles/rdwr.c cache = container_of(object->fscache.cache, cache 406 fs/cachefiles/rdwr.c struct cachefiles_cache, cache); cache 443 fs/cachefiles/rdwr.c } else if (cachefiles_has_space(cache, 0, 1) == 0) { cache 688 fs/cachefiles/rdwr.c struct cachefiles_cache *cache; cache 698 fs/cachefiles/rdwr.c cache = container_of(object->fscache.cache, cache 699 fs/cachefiles/rdwr.c struct cachefiles_cache, cache); cache 709 fs/cachefiles/rdwr.c if (cachefiles_has_space(cache, 0, *nr_pages) < 0) cache 802 fs/cachefiles/rdwr.c struct cachefiles_cache *cache; cache 807 fs/cachefiles/rdwr.c cache = container_of(object->fscache.cache, cache 808 fs/cachefiles/rdwr.c struct cachefiles_cache, cache); cache 812 fs/cachefiles/rdwr.c ret = cachefiles_has_space(cache, 0, 1); cache 840 fs/cachefiles/rdwr.c struct cachefiles_cache *cache; cache 847 fs/cachefiles/rdwr.c cache = container_of(object->fscache.cache, cache 848 fs/cachefiles/rdwr.c struct cachefiles_cache, cache); cache 852 fs/cachefiles/rdwr.c ret = cachefiles_has_space(cache, 0, *nr_pages); cache 883 fs/cachefiles/rdwr.c struct cachefiles_cache *cache; cache 906 fs/cachefiles/rdwr.c cache = container_of(object->fscache.cache, cache 907 fs/cachefiles/rdwr.c struct cachefiles_cache, cache); cache 920 fs/cachefiles/rdwr.c path.mnt = cache->mnt; cache 922 fs/cachefiles/rdwr.c file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred); cache 16 fs/cachefiles/security.c int cachefiles_get_security_ID(struct cachefiles_cache *cache) cache 21 fs/cachefiles/security.c _enter("{%s}", cache->secctx); cache 29 fs/cachefiles/security.c if (cache->secctx) { cache 30 fs/cachefiles/security.c ret = set_security_override_from_ctx(new, cache->secctx); cache 39 fs/cachefiles/security.c cache->cache_cred = new; cache 49 fs/cachefiles/security.c static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, cache 75 fs/cachefiles/security.c int cachefiles_determine_cache_security(struct cachefiles_cache *cache, cache 90 fs/cachefiles/security.c cachefiles_end_secure(cache, *_saved_cred); cache 97 fs/cachefiles/security.c cachefiles_begin_secure(cache, _saved_cred); cache 102 fs/cachefiles/security.c put_cred(cache->cache_cred); cache 103 fs/cachefiles/security.c cache->cache_cred = new; cache 105 fs/cachefiles/security.c cachefiles_begin_secure(cache, _saved_cred); cache 106 fs/cachefiles/security.c ret = cachefiles_check_cache_dir(cache, root); cache 307 fs/cachefiles/xattr.c int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, cache 317 fs/cachefiles/xattr.c cachefiles_io_error(cache, cache 646 fs/ecryptfs/main.c struct kmem_cache **cache; cache 653 fs/ecryptfs/main.c .cache = &ecryptfs_auth_tok_list_item_cache, cache 658 fs/ecryptfs/main.c .cache = &ecryptfs_file_info_cache, cache 663 fs/ecryptfs/main.c .cache = &ecryptfs_dentry_info_cache, cache 668 fs/ecryptfs/main.c .cache = &ecryptfs_inode_info_cache, cache 675 fs/ecryptfs/main.c .cache = &ecryptfs_sb_info_cache, cache 680 fs/ecryptfs/main.c .cache = &ecryptfs_header_cache, cache 685 fs/ecryptfs/main.c .cache = &ecryptfs_xattr_cache, cache 690 fs/ecryptfs/main.c .cache = &ecryptfs_key_record_cache, cache 695 fs/ecryptfs/main.c .cache = &ecryptfs_key_sig_cache, cache 700 fs/ecryptfs/main.c .cache = &ecryptfs_global_auth_tok_cache, cache 705 fs/ecryptfs/main.c .cache = &ecryptfs_key_tfm_cache, cache 725 fs/ecryptfs/main.c kmem_cache_destroy(*(info->cache)); cache 742 fs/ecryptfs/main.c *(info->cache) = kmem_cache_create(info->name, info->size, 0, cache 744 fs/ecryptfs/main.c if (!*(info->cache)) { cache 859 fs/ext2/xattr.c ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) cache 864 fs/ext2/xattr.c error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, cache 1056 fs/ext2/xattr.c void ext2_xattr_destroy_cache(struct mb_cache *cache) cache 1058 fs/ext2/xattr.c if (cache) cache 1059 fs/ext2/xattr.c mb_cache_destroy(cache); cache 73 fs/ext2/xattr.h extern void ext2_xattr_destroy_cache(struct mb_cache *cache); cache 98 fs/ext2/xattr.h static inline void ext2_xattr_destroy_cache(struct mb_cache *cache) cache 3143 fs/ext4/xattr.c void ext4_xattr_destroy_cache(struct mb_cache *cache) cache 3145 fs/ext4/xattr.c if (cache) cache 3146 fs/ext4/xattr.c mb_cache_destroy(cache); cache 556 fs/f2fs/node.c goto cache; cache 571 fs/f2fs/node.c cache: cache 41 fs/fat/cache.c struct fat_cache *cache = (struct fat_cache *)foo; cache 43 fs/fat/cache.c INIT_LIST_HEAD(&cache->cache_list); cache 67 fs/fat/cache.c static inline void fat_cache_free(struct fat_cache *cache) cache 69 fs/fat/cache.c BUG_ON(!list_empty(&cache->cache_list)); cache 70 fs/fat/cache.c kmem_cache_free(fat_cache_cachep, cache); cache 74 fs/fat/cache.c struct fat_cache *cache) cache 76 fs/fat/cache.c if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) cache 77 fs/fat/cache.c list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); cache 136 fs/fat/cache.c struct fat_cache *cache, *tmp; cache 146 fs/fat/cache.c cache = fat_cache_merge(inode, new); cache 147 fs/fat/cache.c if (cache == NULL) { cache 161 fs/fat/cache.c cache = fat_cache_merge(inode, new); cache 162 fs/fat/cache.c if (cache != NULL) { cache 167 fs/fat/cache.c cache = tmp; cache 170 fs/fat/cache.c cache = list_entry(p, struct fat_cache, cache_list); cache 172 fs/fat/cache.c cache->fcluster = new->fcluster; cache 173 fs/fat/cache.c cache->dcluster = new->dcluster; cache 174 fs/fat/cache.c cache->nr_contig = new->nr_contig; cache 177 fs/fat/cache.c fat_cache_update_lru(inode, cache); cache 189 fs/fat/cache.c struct fat_cache *cache; cache 192 fs/fat/cache.c cache = list_entry(i->cache_lru.next, cache 194 fs/fat/cache.c list_del_init(&cache->cache_list); cache 196 fs/fat/cache.c fat_cache_free(cache); cache 95 fs/fscache/cache.c struct fscache_cache *cache; cache 113 fs/fscache/cache.c cache = object->cache; cache 115 fs/fscache/cache.c test_bit(FSCACHE_IOERROR, &cache->flags)) cache 116 fs/fscache/cache.c cache = NULL; cache 119 fs/fscache/cache.c _leave(" = %p [parent]", cache); cache 120 fs/fscache/cache.c return cache; cache 147 fs/fscache/cache.c if (!tag->cache) { cache 152 fs/fscache/cache.c if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) cache 155 fs/fscache/cache.c _leave(" = %p [specific]", tag->cache); cache 156 fs/fscache/cache.c return tag->cache; cache 160 fs/fscache/cache.c cache = list_entry(fscache_cache_list.next, cache 162 fs/fscache/cache.c _leave(" = %p [first]", cache); cache 163 fs/fscache/cache.c return cache; cache 178 fs/fscache/cache.c void fscache_init_cache(struct fscache_cache *cache, cache 185 fs/fscache/cache.c memset(cache, 0, sizeof(*cache)); cache 187 fs/fscache/cache.c cache->ops = ops; cache 190 fs/fscache/cache.c vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va); cache 193 fs/fscache/cache.c INIT_WORK(&cache->op_gc, fscache_operation_gc); cache 194 fs/fscache/cache.c INIT_LIST_HEAD(&cache->link); cache 195 fs/fscache/cache.c INIT_LIST_HEAD(&cache->object_list); cache 196 fs/fscache/cache.c INIT_LIST_HEAD(&cache->op_gc_list); cache 197 fs/fscache/cache.c spin_lock_init(&cache->object_list_lock); cache 198 fs/fscache/cache.c spin_lock_init(&cache->op_gc_list_lock); cache 213 fs/fscache/cache.c int fscache_add_cache(struct fscache_cache *cache, cache 220 fs/fscache/cache.c BUG_ON(!cache->ops); cache 223 fs/fscache/cache.c cache->flags = 0; cache 230 fs/fscache/cache.c tagname = cache->identifier; cache 234 fs/fscache/cache.c _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname); cache 244 fs/fscache/cache.c cache->kobj = kobject_create_and_add(tagname, fscache_root); cache 245 fs/fscache/cache.c if (!cache->kobj) cache 248 fs/fscache/cache.c ifsdef->cache = cache; cache 249 fs/fscache/cache.c cache->fsdef = ifsdef; cache 253 fs/fscache/cache.c tag->cache = cache; cache 254 fs/fscache/cache.c cache->tag = tag; cache 257 fs/fscache/cache.c list_add(&cache->link, &fscache_cache_list); cache 261 fs/fscache/cache.c spin_lock(&cache->object_list_lock); cache 262 fs/fscache/cache.c list_add_tail(&ifsdef->cache_link, &cache->object_list); cache 263 fs/fscache/cache.c spin_unlock(&cache->object_list_lock); cache 280 fs/fscache/cache.c cache->tag->name, cache->ops->name); cache 281 fs/fscache/cache.c kobject_uevent(cache->kobj, KOBJ_ADD); cache 283 fs/fscache/cache.c _leave(" = 0 [%s]", cache->identifier); cache 313 fs/fscache/cache.c void fscache_io_error(struct fscache_cache *cache) cache 315 fs/fscache/cache.c if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags)) cache 317 fs/fscache/cache.c cache->ops->name); cache 325 fs/fscache/cache.c static void fscache_withdraw_all_objects(struct fscache_cache *cache, cache 330 fs/fscache/cache.c while (!list_empty(&cache->object_list)) { cache 331 fs/fscache/cache.c spin_lock(&cache->object_list_lock); cache 333 fs/fscache/cache.c if (!list_empty(&cache->object_list)) { cache 334 fs/fscache/cache.c object = list_entry(cache->object_list.next, cache 346 fs/fscache/cache.c spin_unlock(&cache->object_list_lock); cache 361 fs/fscache/cache.c void fscache_withdraw_cache(struct fscache_cache *cache) cache 368 fs/fscache/cache.c cache->tag->name); cache 371 fs/fscache/cache.c if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags)) cache 375 fs/fscache/cache.c list_del_init(&cache->link); cache 376 fs/fscache/cache.c cache->tag->cache = NULL; cache 382 fs/fscache/cache.c cache->ops->sync_cache(cache); cache 388 fs/fscache/cache.c cache->ops->dissociate_pages(cache); cache 396 fs/fscache/cache.c fscache_withdraw_all_objects(cache, &dying_objects); cache 402 fs/fscache/cache.c atomic_read(&cache->object_count) == 0); cache 405 fs/fscache/cache.c list_empty(&cache->object_list)); cache 409 fs/fscache/cache.c kobject_put(cache->kobj); cache 411 fs/fscache/cache.c clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags); cache 412 fs/fscache/cache.c fscache_release_cache_tag(cache->tag); cache 413 fs/fscache/cache.c cache->tag = NULL; cache 25 fs/fscache/cookie.c static int fscache_alloc_object(struct fscache_cache *cache, cache 397 fs/fscache/cookie.c struct fscache_cache *cache; cache 415 fs/fscache/cookie.c cache = fscache_select_cache_for_object(cookie->parent); cache 416 fs/fscache/cookie.c if (!cache) { cache 423 fs/fscache/cookie.c _debug("cache %s", cache->tag->name); cache 429 fs/fscache/cookie.c ret = fscache_alloc_object(cache, cookie); cache 477 fs/fscache/cookie.c static int fscache_alloc_object(struct fscache_cache *cache, cache 483 fs/fscache/cookie.c _enter("%p,%p{%s}", cache, cookie, cookie->def->name); cache 488 fs/fscache/cookie.c if (object->cache == cache) cache 496 fs/fscache/cookie.c object = cache->ops->alloc_object(cache, cookie); cache 512 fs/fscache/cookie.c ret = fscache_alloc_object(cache, cookie->parent); cache 521 fs/fscache/cookie.c cache->ops->put_object(object, fscache_obj_put_attach_fail); cache 541 fs/fscache/cookie.c cache->ops->put_object(object, fscache_obj_put_alloc_fail); cache 555 fs/fscache/cookie.c struct fscache_cache *cache = object->cache; cache 568 fs/fscache/cookie.c if (p->cache == object->cache) { cache 579 fs/fscache/cookie.c if (p->cache == object->cache) { cache 596 fs/fscache/cookie.c spin_lock(&cache->object_list_lock); cache 597 fs/fscache/cookie.c list_add(&object->cache_link, &cache->object_list); cache 598 fs/fscache/cookie.c spin_unlock(&cache->object_list_lock); cache 925 fs/fscache/cookie.c if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) cache 940 fs/fscache/cookie.c ret = object->cache->ops->check_consistency(op); cache 303 fs/fscache/object.c struct fscache_cache *cache) cache 307 fs/fscache/object.c atomic_inc(&cache->object_count); cache 324 fs/fscache/object.c object->cache = cache; cache 406 fs/fscache/object.c object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) { cache 470 fs/fscache/object.c test_bit(FSCACHE_IOERROR, &object->cache->flags) || cache 477 fs/fscache/object.c cookie->def->name, object->cache->tag->name); cache 481 fs/fscache/object.c ret = object->cache->ops->lookup_object(object); cache 596 fs/fscache/object.c object->cache->ops->lookup_complete(object); cache 632 fs/fscache/object.c object->cache->ops->lookup_complete(object); cache 704 fs/fscache/object.c struct fscache_cache *cache = object->cache; cache 740 fs/fscache/object.c spin_lock(&cache->object_list_lock); cache 742 fs/fscache/object.c spin_unlock(&cache->object_list_lock); cache 745 fs/fscache/object.c cache->ops->drop_object(object); cache 778 fs/fscache/object.c ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN; cache 790 fs/fscache/object.c object->cache->ops->put_object(object, why); cache 992 fs/fscache/object.c fscache_operation_init(cookie, op, object->cache->ops->invalidate_object, cache 1053 fs/fscache/object.c object->cache->ops->update_object(object); cache 1097 fs/fscache/object.c object->cache->identifier); cache 496 fs/fscache/operation.c struct fscache_cache *cache; cache 536 fs/fscache/operation.c cache = object->cache; cache 537 fs/fscache/operation.c spin_lock(&cache->op_gc_list_lock); cache 538 fs/fscache/operation.c list_add_tail(&op->pend_link, &cache->op_gc_list); cache 539 fs/fscache/operation.c spin_unlock(&cache->op_gc_list_lock); cache 540 fs/fscache/operation.c schedule_work(&cache->op_gc); cache 565 fs/fscache/operation.c struct fscache_cache *cache = cache 572 fs/fscache/operation.c spin_lock(&cache->op_gc_list_lock); cache 573 fs/fscache/operation.c if (list_empty(&cache->op_gc_list)) { cache 574 fs/fscache/operation.c spin_unlock(&cache->op_gc_list_lock); cache 578 fs/fscache/operation.c op = list_entry(cache->op_gc_list.next, cache 581 fs/fscache/operation.c spin_unlock(&cache->op_gc_list_lock); cache 605 fs/fscache/operation.c if (!list_empty(&cache->op_gc_list)) cache 606 fs/fscache/operation.c schedule_work(&cache->op_gc); cache 198 fs/fscache/page.c ret = object->cache->ops->attr_changed(object); cache 499 fs/fscache/page.c ret = object->cache->ops->allocate_page(op, page, gfp); cache 505 fs/fscache/page.c ret = object->cache->ops->read_or_alloc_page(op, page, gfp); cache 626 fs/fscache/page.c ret = object->cache->ops->allocate_pages( cache 631 fs/fscache/page.c ret = object->cache->ops->read_or_alloc_pages( cache 732 fs/fscache/page.c ret = object->cache->ops->allocate_page(op, page, gfp); cache 860 fs/fscache/page.c ret = object->cache->ops->write_page(op, page); cache 1006 fs/fscache/page.c if (test_bit(FSCACHE_IOERROR, &object->cache->flags)) cache 1142 fs/fscache/page.c object->cache->ops->uncache_page) { cache 1145 fs/fscache/page.c object->cache->ops->uncache_page(object, page); cache 522 fs/lockd/host.c static struct nlm_host *next_host_state(struct hlist_head *cache, cache 530 fs/lockd/host.c for_each_host(host, chain, cache) { cache 47 fs/mbcache.c static unsigned long mb_cache_shrink(struct mb_cache *cache, cache 50 fs/mbcache.c static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, cache 53 fs/mbcache.c return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; cache 74 fs/mbcache.c int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, cache 82 fs/mbcache.c if (cache->c_entry_count >= cache->c_max_entries) cache 83 fs/mbcache.c schedule_work(&cache->c_shrink_work); cache 85 fs/mbcache.c if (cache->c_entry_count >= 2*cache->c_max_entries) cache 86 fs/mbcache.c mb_cache_shrink(cache, SYNC_SHRINK_BATCH); cache 99 fs/mbcache.c head = mb_cache_entry_head(cache, key); cache 111 fs/mbcache.c spin_lock(&cache->c_list_lock); cache 112 fs/mbcache.c list_add_tail(&entry->e_list, &cache->c_list); cache 115 fs/mbcache.c cache->c_entry_count++; cache 116 fs/mbcache.c spin_unlock(&cache->c_list_lock); cache 128 fs/mbcache.c static struct mb_cache_entry *__entry_find(struct mb_cache *cache, cache 136 fs/mbcache.c head = mb_cache_entry_head(cache, key); cache 155 fs/mbcache.c mb_cache_entry_put(cache, old_entry); cache 168 fs/mbcache.c struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, cache 171 fs/mbcache.c return __entry_find(cache, NULL, key); cache 185 fs/mbcache.c struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, cache 188 fs/mbcache.c return __entry_find(cache, entry, entry->e_key); cache 198 fs/mbcache.c struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, cache 205 fs/mbcache.c head = mb_cache_entry_head(cache, key); cache 227 fs/mbcache.c void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value) cache 233 fs/mbcache.c head = mb_cache_entry_head(cache, key); cache 240 fs/mbcache.c spin_lock(&cache->c_list_lock); cache 243 fs/mbcache.c if (!WARN_ONCE(cache->c_entry_count == 0, cache 245 fs/mbcache.c cache->c_entry_count--; cache 248 fs/mbcache.c spin_unlock(&cache->c_list_lock); cache 249 fs/mbcache.c mb_cache_entry_put(cache, entry); cache 263 fs/mbcache.c void mb_cache_entry_touch(struct mb_cache *cache, cache 273 fs/mbcache.c struct mb_cache *cache = container_of(shrink, struct mb_cache, cache 276 fs/mbcache.c return cache->c_entry_count; cache 280 fs/mbcache.c static unsigned long mb_cache_shrink(struct mb_cache *cache, cache 287 fs/mbcache.c spin_lock(&cache->c_list_lock); cache 288 fs/mbcache.c while (nr_to_scan-- && !list_empty(&cache->c_list)) { cache 289 fs/mbcache.c entry = list_first_entry(&cache->c_list, cache 293 fs/mbcache.c list_move_tail(&entry->e_list, &cache->c_list); cache 297 fs/mbcache.c cache->c_entry_count--; cache 302 fs/mbcache.c spin_unlock(&cache->c_list_lock); cache 303 fs/mbcache.c head = mb_cache_entry_head(cache, entry->e_key); cache 310 fs/mbcache.c if (mb_cache_entry_put(cache, entry)) cache 313 fs/mbcache.c spin_lock(&cache->c_list_lock); cache 315 fs/mbcache.c spin_unlock(&cache->c_list_lock); cache 323 fs/mbcache.c struct mb_cache *cache = container_of(shrink, struct mb_cache, cache 325 fs/mbcache.c return mb_cache_shrink(cache, sc->nr_to_scan); cache 333 fs/mbcache.c struct mb_cache *cache = container_of(work, struct mb_cache, cache 335 fs/mbcache.c mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR); cache 346 fs/mbcache.c struct mb_cache *cache; cache 350 fs/mbcache.c cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL); cache 351 fs/mbcache.c if (!cache) cache 353 fs/mbcache.c cache->c_bucket_bits = bucket_bits; cache 354 fs/mbcache.c cache->c_max_entries = bucket_count << 4; cache 355 fs/mbcache.c INIT_LIST_HEAD(&cache->c_list); cache 356 fs/mbcache.c spin_lock_init(&cache->c_list_lock); cache 357 fs/mbcache.c cache->c_hash = kmalloc_array(bucket_count, cache 360 fs/mbcache.c if (!cache->c_hash) { cache 361 fs/mbcache.c kfree(cache); cache 365 fs/mbcache.c INIT_HLIST_BL_HEAD(&cache->c_hash[i]); cache 367 fs/mbcache.c cache->c_shrink.count_objects = mb_cache_count; cache 368 fs/mbcache.c cache->c_shrink.scan_objects = mb_cache_scan; cache 369 fs/mbcache.c cache->c_shrink.seeks = DEFAULT_SEEKS; cache 370 fs/mbcache.c if (register_shrinker(&cache->c_shrink)) { cache 371 fs/mbcache.c kfree(cache->c_hash); cache 372 fs/mbcache.c kfree(cache); cache 376 fs/mbcache.c INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker); cache 378 fs/mbcache.c return cache; cache 392 fs/mbcache.c void mb_cache_destroy(struct mb_cache *cache) cache 396 fs/mbcache.c unregister_shrinker(&cache->c_shrink); cache 402 fs/mbcache.c list_for_each_entry_safe(entry, next, &cache->c_list, e_list) { cache 410 fs/mbcache.c mb_cache_entry_put(cache, entry); cache 412 fs/mbcache.c kfree(cache->c_hash); cache 413 fs/mbcache.c kfree(cache); cache 2182 fs/nfs/dir.c struct nfs_access_entry *cache; cache 2185 fs/nfs/dir.c cache = list_entry(head->next, struct nfs_access_entry, lru); cache 2186 fs/nfs/dir.c list_del(&cache->lru); cache 2187 fs/nfs/dir.c nfs_access_free_entry(cache); cache 2196 fs/nfs/dir.c struct nfs_access_entry *cache; cache 2209 fs/nfs/dir.c cache = list_entry(nfsi->access_cache_entry_lru.next, cache 2211 fs/nfs/dir.c list_move(&cache->lru, &head); cache 2212 fs/nfs/dir.c rb_erase(&cache->rb_node, &nfsi->access_cache); cache 2321 fs/nfs/dir.c struct nfs_access_entry *cache; cache 2329 fs/nfs/dir.c cache = nfs_access_search_rbtree(inode, cred); cache 2331 fs/nfs/dir.c if (cache == NULL) cache 2348 fs/nfs/dir.c res->cred = cache->cred; cache 2349 fs/nfs/dir.c res->mask = cache->mask; cache 2350 fs/nfs/dir.c list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru); cache 2367 fs/nfs/dir.c struct nfs_access_entry *cache; cache 2375 fs/nfs/dir.c cache = list_entry(lh, struct nfs_access_entry, lru); cache 2377 fs/nfs/dir.c cred != cache->cred) cache 2378 fs/nfs/dir.c cache = NULL; cache 2379 fs/nfs/dir.c if (cache == NULL) cache 2383 fs/nfs/dir.c res->cred = cache->cred; cache 2384 fs/nfs/dir.c res->mask = cache->mask; cache 2428 fs/nfs/dir.c struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL); cache 2429 fs/nfs/dir.c if (cache == NULL) cache 2431 fs/nfs/dir.c RB_CLEAR_NODE(&cache->rb_node); cache 2432 fs/nfs/dir.c cache->cred = get_cred(set->cred); cache 2433 fs/nfs/dir.c cache->mask = set->mask; cache 2440 fs/nfs/dir.c nfs_access_add_rbtree(inode, cache); cache 2498 fs/nfs/dir.c struct nfs_access_entry cache; cache 2505 fs/nfs/dir.c status = nfs_access_get_cached_rcu(inode, cred, &cache); cache 2507 fs/nfs/dir.c status = nfs_access_get_cached(inode, cred, &cache, may_block); cache 2518 fs/nfs/dir.c cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND; cache 2520 fs/nfs/dir.c cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; cache 2522 fs/nfs/dir.c cache.mask |= NFS_ACCESS_EXECUTE; cache 2523 fs/nfs/dir.c cache.cred = cred; cache 2524 fs/nfs/dir.c status = NFS_PROTO(inode)->access(inode, &cache); cache 2533 fs/nfs/dir.c nfs_access_add_cache(inode, &cache); cache 2535 fs/nfs/dir.c cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode); cache 2564 fs/nfs/nfs4proc.c struct nfs_access_entry cache; cache 2586 fs/nfs/nfs4proc.c cache.cred = cred; cache 2587 fs/nfs/nfs4proc.c nfs_access_set_mask(&cache, opendata->o_res.access_result); cache 2588 fs/nfs/nfs4proc.c nfs_access_add_cache(state->inode, &cache); cache 2591 fs/nfs/nfs4proc.c if ((mask & ~cache.mask & flags) == 0) cache 357 fs/nfsd/nfs2acl.c #define PROC(name, argt, rest, relt, cache, respsize) \ cache 365 fs/nfsd/nfs2acl.c .pc_cachetype = cache, \ cache 244 fs/nfsd/nfs3acl.c #define PROC(name, argt, rest, relt, cache, respsize) \ cache 252 fs/nfsd/nfs3acl.c .pc_cachetype = cache, \ cache 263 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 268 fs/nilfs2/alloc.c bhp, &cache->prev_desc, &cache->lock); cache 282 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 287 fs/nilfs2/alloc.c &cache->prev_bitmap, &cache->lock); cache 298 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 303 fs/nilfs2/alloc.c &cache->prev_bitmap, &cache->lock); cache 316 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 321 fs/nilfs2/alloc.c &cache->prev_entry, &cache->lock); cache 331 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 335 fs/nilfs2/alloc.c &cache->prev_entry, &cache->lock); cache 838 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache) cache 840 fs/nilfs2/alloc.c NILFS_MDT(inode)->mi_palloc_cache = cache; cache 841 fs/nilfs2/alloc.c spin_lock_init(&cache->lock); cache 846 fs/nilfs2/alloc.c struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; cache 848 fs/nilfs2/alloc.c spin_lock(&cache->lock); cache 849 fs/nilfs2/alloc.c brelse(cache->prev_desc.bh); cache 850 fs/nilfs2/alloc.c brelse(cache->prev_bitmap.bh); cache 851 fs/nilfs2/alloc.c brelse(cache->prev_entry.bh); cache 852 fs/nilfs2/alloc.c cache->prev_desc.bh = NULL; cache 853 fs/nilfs2/alloc.c cache->prev_bitmap.bh = NULL; cache 854 fs/nilfs2/alloc.c cache->prev_entry.bh = NULL; cache 855 fs/nilfs2/alloc.c spin_unlock(&cache->lock); cache 93 fs/nilfs2/alloc.h struct nilfs_palloc_cache *cache); cache 233 fs/overlayfs/overlayfs.h void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache); cache 99 fs/overlayfs/ovl_entry.h struct ovl_dir_cache *cache; /* directory */ cache 55 fs/overlayfs/readdir.c struct ovl_dir_cache *cache; cache 230 fs/overlayfs/readdir.c struct ovl_dir_cache *cache = ovl_dir_cache(inode); cache 232 fs/overlayfs/readdir.c if (cache) { cache 233 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); cache 234 fs/overlayfs/readdir.c kfree(cache); cache 240 fs/overlayfs/readdir.c struct ovl_dir_cache *cache = od->cache; cache 242 fs/overlayfs/readdir.c WARN_ON(cache->refcount <= 0); cache 243 fs/overlayfs/readdir.c cache->refcount--; cache 244 fs/overlayfs/readdir.c if (!cache->refcount) { cache 245 fs/overlayfs/readdir.c if (ovl_dir_cache(d_inode(dentry)) == cache) cache 248 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); cache 249 fs/overlayfs/readdir.c kfree(cache); cache 337 fs/overlayfs/readdir.c struct ovl_dir_cache *cache = od->cache; cache 341 fs/overlayfs/readdir.c if (cache && ovl_dentry_version_get(dentry) != cache->version) { cache 343 fs/overlayfs/readdir.c od->cache = NULL; cache 396 fs/overlayfs/readdir.c list_for_each(p, &od->cache->entries) { cache 408 fs/overlayfs/readdir.c struct ovl_dir_cache *cache; cache 410 fs/overlayfs/readdir.c cache = ovl_dir_cache(d_inode(dentry)); cache 411 fs/overlayfs/readdir.c if (cache && ovl_dentry_version_get(dentry) == cache->version) { cache 412 fs/overlayfs/readdir.c WARN_ON(!cache->refcount); cache 413 fs/overlayfs/readdir.c cache->refcount++; cache 414 fs/overlayfs/readdir.c return cache; cache 418 fs/overlayfs/readdir.c cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); cache 419 fs/overlayfs/readdir.c if (!cache) cache 422 fs/overlayfs/readdir.c cache->refcount = 1; cache 423 fs/overlayfs/readdir.c INIT_LIST_HEAD(&cache->entries); cache 424 fs/overlayfs/readdir.c cache->root = RB_ROOT; cache 426 fs/overlayfs/readdir.c res = ovl_dir_read_merged(dentry, &cache->entries, &cache->root); cache 428 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); cache 429 fs/overlayfs/readdir.c kfree(cache); cache 433 fs/overlayfs/readdir.c cache->version = ovl_dentry_version_get(dentry); cache 434 fs/overlayfs/readdir.c ovl_set_dir_cache(d_inode(dentry), cache); cache 436 fs/overlayfs/readdir.c return cache; cache 600 fs/overlayfs/readdir.c struct ovl_dir_cache *cache; cache 602 fs/overlayfs/readdir.c cache = ovl_dir_cache(d_inode(dentry)); cache 603 fs/overlayfs/readdir.c if (cache && ovl_dentry_version_get(dentry) == cache->version) cache 604 fs/overlayfs/readdir.c return cache; cache 610 fs/overlayfs/readdir.c cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL); cache 611 fs/overlayfs/readdir.c if (!cache) cache 614 fs/overlayfs/readdir.c res = ovl_dir_read_impure(path, &cache->entries, &cache->root); cache 616 fs/overlayfs/readdir.c ovl_cache_free(&cache->entries); cache 617 fs/overlayfs/readdir.c kfree(cache); cache 620 fs/overlayfs/readdir.c if (list_empty(&cache->entries)) { cache 631 fs/overlayfs/readdir.c kfree(cache); cache 635 fs/overlayfs/readdir.c cache->version = ovl_dentry_version_get(dentry); cache 636 fs/overlayfs/readdir.c ovl_set_dir_cache(d_inode(dentry), cache); cache 638 fs/overlayfs/readdir.c return cache; cache 643 fs/overlayfs/readdir.c struct ovl_dir_cache *cache; cache 660 fs/overlayfs/readdir.c } else if (rdt->cache) { cache 663 fs/overlayfs/readdir.c p = ovl_cache_entry_find(&rdt->cache->root, name, namelen); cache 718 fs/overlayfs/readdir.c rdt.cache = ovl_cache_get_impure(&file->f_path); cache 719 fs/overlayfs/readdir.c if (IS_ERR(rdt.cache)) cache 720 fs/overlayfs/readdir.c return PTR_ERR(rdt.cache); cache 755 fs/overlayfs/readdir.c if (!od->cache) { cache 756 fs/overlayfs/readdir.c struct ovl_dir_cache *cache; cache 758 fs/overlayfs/readdir.c cache = ovl_cache_get(dentry); cache 759 fs/overlayfs/readdir.c if (IS_ERR(cache)) cache 760 fs/overlayfs/readdir.c return PTR_ERR(cache); cache 762 fs/overlayfs/readdir.c od->cache = cache; cache 766 fs/overlayfs/readdir.c while (od->cursor != &od->cache->entries) { cache 812 fs/overlayfs/readdir.c if (od->cache) cache 871 fs/overlayfs/readdir.c if (od->cache) { cache 178 fs/overlayfs/super.c oi->cache = NULL; cache 271 fs/overlayfs/util.c return OVL_I(inode)->cache; cache 274 fs/overlayfs/util.c void ovl_set_dir_cache(struct inode *inode, struct ovl_dir_cache *cache) cache 276 fs/overlayfs/util.c OVL_I(inode)->cache = cache; cache 53 fs/squashfs/cache.c struct squashfs_cache *cache, u64 block, int length) cache 58 fs/squashfs/cache.c spin_lock(&cache->lock); cache 61 fs/squashfs/cache.c for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { cache 62 fs/squashfs/cache.c if (cache->entry[i].block == block) { cache 63 fs/squashfs/cache.c cache->curr_blk = i; cache 66 fs/squashfs/cache.c i = (i + 1) % cache->entries; cache 69 fs/squashfs/cache.c if (n == cache->entries) { cache 74 fs/squashfs/cache.c if (cache->unused == 0) { cache 75 fs/squashfs/cache.c cache->num_waiters++; cache 76 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 77 fs/squashfs/cache.c wait_event(cache->wait_queue, cache->unused); cache 78 fs/squashfs/cache.c spin_lock(&cache->lock); cache 79 fs/squashfs/cache.c cache->num_waiters--; cache 88 fs/squashfs/cache.c i = cache->next_blk; cache 89 fs/squashfs/cache.c for (n = 0; n < cache->entries; n++) { cache 90 fs/squashfs/cache.c if (cache->entry[i].refcount == 0) cache 92 fs/squashfs/cache.c i = (i + 1) % cache->entries; cache 95 fs/squashfs/cache.c cache->next_blk = (i + 1) % cache->entries; cache 96 fs/squashfs/cache.c entry = &cache->entry[i]; cache 102 fs/squashfs/cache.c cache->unused--; cache 108 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 113 fs/squashfs/cache.c spin_lock(&cache->lock); cache 126 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 129 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 140 fs/squashfs/cache.c entry = &cache->entry[i]; cache 142 fs/squashfs/cache.c cache->unused--; cache 151 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 154 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 161 fs/squashfs/cache.c cache->name, i, entry->block, entry->refcount, entry->error); cache 164 fs/squashfs/cache.c ERROR("Unable to read %s cache entry [%llx]\n", cache->name, cache 175 fs/squashfs/cache.c struct squashfs_cache *cache = entry->cache; cache 177 fs/squashfs/cache.c spin_lock(&cache->lock); cache 180 fs/squashfs/cache.c cache->unused++; cache 185 fs/squashfs/cache.c if (cache->num_waiters) { cache 186 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 187 fs/squashfs/cache.c wake_up(&cache->wait_queue); cache 191 fs/squashfs/cache.c spin_unlock(&cache->lock); cache 197 fs/squashfs/cache.c void squashfs_cache_delete(struct squashfs_cache *cache) cache 201 fs/squashfs/cache.c if (cache == NULL) cache 204 fs/squashfs/cache.c for (i = 0; i < cache->entries; i++) { cache 205 fs/squashfs/cache.c if (cache->entry[i].data) { cache 206 fs/squashfs/cache.c for (j = 0; j < cache->pages; j++) cache 207 fs/squashfs/cache.c kfree(cache->entry[i].data[j]); cache 208 fs/squashfs/cache.c kfree(cache->entry[i].data); cache 210 fs/squashfs/cache.c kfree(cache->entry[i].actor); cache 213 fs/squashfs/cache.c kfree(cache->entry); cache 214 fs/squashfs/cache.c kfree(cache); cache 227 fs/squashfs/cache.c struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); cache 229 fs/squashfs/cache.c if (cache == NULL) { cache 234 fs/squashfs/cache.c cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); cache 235 fs/squashfs/cache.c if (cache->entry == NULL) { cache 240 fs/squashfs/cache.c cache->curr_blk = 0; cache 241 fs/squashfs/cache.c cache->next_blk = 0; cache 242 fs/squashfs/cache.c cache->unused = entries; cache 243 fs/squashfs/cache.c cache->entries = entries; cache 244 fs/squashfs/cache.c cache->block_size = block_size; cache 245 fs/squashfs/cache.c cache->pages = block_size >> PAGE_SHIFT; cache 246 fs/squashfs/cache.c cache->pages = cache->pages ? cache->pages : 1; cache 247 fs/squashfs/cache.c cache->name = name; cache 248 fs/squashfs/cache.c cache->num_waiters = 0; cache 249 fs/squashfs/cache.c spin_lock_init(&cache->lock); cache 250 fs/squashfs/cache.c init_waitqueue_head(&cache->wait_queue); cache 253 fs/squashfs/cache.c struct squashfs_cache_entry *entry = &cache->entry[i]; cache 255 fs/squashfs/cache.c init_waitqueue_head(&cache->entry[i].wait_queue); cache 256 fs/squashfs/cache.c entry->cache = cache; cache 258 fs/squashfs/cache.c entry->data = kcalloc(cache->pages, sizeof(void *), GFP_KERNEL); cache 264 fs/squashfs/cache.c for (j = 0; j < cache->pages; j++) { cache 273 fs/squashfs/cache.c cache->pages, 0); cache 280 fs/squashfs/cache.c return cache; cache 283 fs/squashfs/cache.c squashfs_cache_delete(cache); cache 38 fs/squashfs/squashfs_fs_sb.h struct squashfs_cache *cache; cache 207 include/acpi/acpiosxf.h acpi_status acpi_os_delete_cache(acpi_cache_t * cache); cache 211 include/acpi/acpiosxf.h acpi_status acpi_os_purge_cache(acpi_cache_t * cache); cache 215 include/acpi/acpiosxf.h void *acpi_os_acquire_object(acpi_cache_t * cache); cache 219 include/acpi/acpiosxf.h acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object); cache 65 include/acpi/platform/aclinuxex.h static inline void *acpi_os_acquire_object(acpi_cache_t * cache) cache 67 include/acpi/platform/aclinuxex.h return kmem_cache_zalloc(cache, cache 51 include/linux/bch.h int *cache; cache 45 include/linux/fscache-cache.h struct fscache_cache *cache; /* cache referred to by this tag */ cache 230 include/linux/fscache-cache.h struct fscache_object *(*alloc_object)(struct fscache_cache *cache, cache 270 include/linux/fscache-cache.h void (*sync_cache)(struct fscache_cache *cache); cache 306 include/linux/fscache-cache.h void (*dissociate_pages)(struct fscache_cache *cache); cache 377 include/linux/fscache-cache.h struct fscache_cache *cache; /* cache that supplied this object */ cache 415 include/linux/fscache-cache.h return test_bit(FSCACHE_IOERROR, &object->cache->flags); cache 431 include/linux/fscache-cache.h static inline void fscache_object_destroyed(struct fscache_cache *cache) cache 433 include/linux/fscache-cache.h if (atomic_dec_and_test(&cache->object_count)) cache 530 include/linux/fscache-cache.h void fscache_init_cache(struct fscache_cache *cache, cache 534 include/linux/fscache-cache.h extern int fscache_add_cache(struct fscache_cache *cache, cache 537 include/linux/fscache-cache.h extern void fscache_withdraw_cache(struct fscache_cache *cache); cache 539 include/linux/fscache-cache.h extern void fscache_io_error(struct fscache_cache *cache); cache 46 include/linux/kasan.h void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, cache 50 include/linux/kasan.h void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); cache 51 include/linux/kasan.h void kasan_poison_object_data(struct kmem_cache *cache, void *object); cache 52 include/linux/kasan.h void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, cache 84 include/linux/kasan.h size_t kasan_metadata_size(struct kmem_cache *cache); cache 102 include/linux/kasan.h static inline void kasan_cache_create(struct kmem_cache *cache, cache 107 include/linux/kasan.h static inline void kasan_unpoison_object_data(struct kmem_cache *cache, cache 109 include/linux/kasan.h static inline void kasan_poison_object_data(struct kmem_cache *cache, cache 111 include/linux/kasan.h static inline void *kasan_init_slab_obj(struct kmem_cache *cache, cache 157 include/linux/kasan.h static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; } cache 165 include/linux/kasan.h void kasan_cache_shrink(struct kmem_cache *cache); cache 166 include/linux/kasan.h void kasan_cache_shutdown(struct kmem_cache *cache); cache 170 include/linux/kasan.h static inline void kasan_cache_shrink(struct kmem_cache *cache) {} cache 171 include/linux/kasan.h static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} cache 731 include/linux/kvm_host.h void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); cache 763 include/linux/kvm_host.h struct gfn_to_pfn_cache *cache, bool atomic); cache 767 include/linux/kvm_host.h struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); cache 239 include/linux/lru_cache.h extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, cache 28 include/linux/mbcache.h void mb_cache_destroy(struct mb_cache *cache); cache 30 include/linux/mbcache.h int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, cache 33 include/linux/mbcache.h static inline int mb_cache_entry_put(struct mb_cache *cache, cache 42 include/linux/mbcache.h void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value); cache 43 include/linux/mbcache.h struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key, cache 45 include/linux/mbcache.h struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache, cache 47 include/linux/mbcache.h struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache, cache 49 include/linux/mbcache.h void mb_cache_entry_touch(struct mb_cache *cache, cache 305 include/linux/mlx5/driver.h struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; cache 382 include/linux/mroute_base.h struct list_head *cache; cache 413 include/linux/mroute_base.h it->cache = NULL; cache 426 include/linux/mroute_base.h if (it->cache == &mrt->mfc_unres_queue) cache 428 include/linux/mroute_base.h else if (it->cache == &mrt->mfc_cache_list) cache 133 include/linux/mtd/nand.h unsigned long *cache; cache 756 include/linux/mtd/nand.h return !!nand->bbt.cache; cache 270 include/linux/netdevice.h int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); cache 115 include/linux/platform_data/mlxreg.h u32 cache; cache 66 include/linux/power/bq27xxx_battery.h struct bq27xxx_reg_cache cache; cache 92 include/linux/slab_def.h static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, cache 95 include/linux/slab_def.h void *object = x - (x - page->s_mem) % cache->size; cache 96 include/linux/slab_def.h void *last_object = page->s_mem + (cache->num - 1) * cache->size; cache 110 include/linux/slab_def.h static inline unsigned int obj_to_index(const struct kmem_cache *cache, cache 114 include/linux/slab_def.h return reciprocal_divide(offset, cache->reciprocal_buffer_size); cache 174 include/linux/slub_def.h static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, cache 176 include/linux/slub_def.h void *object = x - (x - page_address(page)) % cache->size; cache 178 include/linux/slub_def.h (page->objects - 1) * cache->size; cache 181 include/linux/slub_def.h result = fixup_red_left(cache, result); cache 732 include/linux/syscalls.h asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache); cache 549 include/net/9p/9p.h struct kmem_cache *cache; cache 12 include/net/dst_cache.h struct dst_cache_pcpu __percpu *cache; cache 23 include/net/netfilter/nf_conntrack_ecache.h unsigned long cache; /* bitops want long */ cache 118 include/net/netfilter/nf_conntrack_ecache.h set_bit(event, &e->cache); cache 198 include/net/netlabel.h struct netlbl_lsm_cache *cache; cache 280 include/net/netlabel.h struct netlbl_lsm_cache *cache; cache 282 include/net/netlabel.h cache = kzalloc(sizeof(*cache), flags); cache 283 include/net/netlabel.h if (cache) cache 284 include/net/netlabel.h refcount_set(&cache->refcount, 1); cache 285 include/net/netlabel.h return cache; cache 296 include/net/netlabel.h static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) cache 298 include/net/netlabel.h if (!refcount_dec_and_test(&cache->refcount)) cache 301 include/net/netlabel.h if (cache->free) cache 302 include/net/netlabel.h cache->free(cache->data); cache 303 include/net/netlabel.h kfree(cache); cache 366 include/net/netlabel.h netlbl_secattr_cache_free(secattr->cache); cache 58 include/net/page_pool.h void *cache[PP_ALLOC_CACHE_SIZE]; cache 64 include/rdma/ib_fmr_pool.h unsigned cache:1; cache 2166 include/rdma/ib_verbs.h struct ib_port_cache cache; cache 2602 include/rdma/ib_verbs.h struct ib_cache cache; cache 428 include/trace/events/bcache.h TP_PROTO(struct cache *ca, size_t bucket), cache 449 include/trace/events/bcache.h TP_PROTO(struct cache *ca, size_t bucket), cache 467 include/trace/events/bcache.h TP_PROTO(struct cache *ca, unsigned reserve), cache 86 kernel/kprobes.c struct kprobe_insn_cache *cache; cache 183 kernel/kprobes.c kip->cache = c; cache 206 kernel/kprobes.c kip->cache->free(kip->insns); cache 734 lib/bch.c rep = bch->cache; cache 821 lib/bch.c gf_poly_logrep(bch, f, bch->cache); cache 836 lib/bch.c gf_poly_mod(bch, z, f, bch->cache); cache 932 lib/bch.c gf_poly_logrep(bch, p, bch->cache); cache 933 lib/bch.c bch->cache[p->deg] = 0; cache 939 lib/bch.c m = bch->cache[j]; cache 1322 lib/bch.c bch->cache = bch_alloc(2*t*sizeof(*bch->cache), &err); cache 1371 lib/bch.c kfree(bch->cache); cache 87 lib/lru_cache.c struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, cache 95 lib/lru_cache.c unsigned cache_obj_size = kmem_cache_size(cache); cache 128 lib/lru_cache.c lc->lc_cache = cache; cache 134 lib/lru_cache.c void *p = kmem_cache_alloc(cache, GFP_KERNEL); cache 151 lib/lru_cache.c kmem_cache_free(cache, p - e_off); cache 384 lib/test_kasan.c struct kmem_cache *cache = kmem_cache_create("test_cache", cache 387 lib/test_kasan.c if (!cache) { cache 392 lib/test_kasan.c p = kmem_cache_alloc(cache, GFP_KERNEL); cache 395 lib/test_kasan.c kmem_cache_destroy(cache); cache 400 lib/test_kasan.c kmem_cache_free(cache, p); cache 401 lib/test_kasan.c kmem_cache_destroy(cache); cache 409 lib/test_kasan.c struct kmem_cache *cache; cache 411 lib/test_kasan.c cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL); cache 412 lib/test_kasan.c if (!cache) { cache 423 lib/test_kasan.c p = kmem_cache_alloc(cache, GFP_KERNEL); cache 427 lib/test_kasan.c kmem_cache_free(cache, p); cache 432 lib/test_kasan.c kmem_cache_destroy(cache); cache 544 lib/test_kasan.c struct kmem_cache *cache; cache 546 lib/test_kasan.c cache = kmem_cache_create("test_cache", size, 0, 0, NULL); cache 547 lib/test_kasan.c if (!cache) { cache 552 lib/test_kasan.c p = kmem_cache_alloc(cache, GFP_KERNEL); cache 555 lib/test_kasan.c kmem_cache_destroy(cache); cache 559 lib/test_kasan.c kmem_cache_free(cache, p); cache 560 lib/test_kasan.c kmem_cache_free(cache, p); cache 561 lib/test_kasan.c kmem_cache_destroy(cache); cache 568 lib/test_kasan.c struct kmem_cache *cache; cache 570 lib/test_kasan.c cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU, cache 572 lib/test_kasan.c if (!cache) { cache 577 lib/test_kasan.c p = kmem_cache_alloc(cache, GFP_KERNEL); cache 580 lib/test_kasan.c kmem_cache_destroy(cache); cache 585 lib/test_kasan.c kmem_cache_free(cache, p + 1); cache 591 lib/test_kasan.c kmem_cache_free(cache, p); cache 593 lib/test_kasan.c kmem_cache_destroy(cache); cache 255 mm/kasan/common.c void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, cache 263 mm/kasan/common.c cache->kasan_info.alloc_meta_offset = *size; cache 268 mm/kasan/common.c (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor || cache 269 mm/kasan/common.c cache->object_size < sizeof(struct kasan_free_meta))) { cache 270 mm/kasan/common.c cache->kasan_info.free_meta_offset = *size; cache 274 mm/kasan/common.c redzone_size = optimal_redzone(cache->object_size); cache 275 mm/kasan/common.c redzone_adjust = redzone_size - (*size - cache->object_size); cache 280 mm/kasan/common.c max(*size, cache->object_size + redzone_size)); cache 285 mm/kasan/common.c if (*size <= cache->kasan_info.alloc_meta_offset || cache 286 mm/kasan/common.c *size <= cache->kasan_info.free_meta_offset) { cache 287 mm/kasan/common.c cache->kasan_info.alloc_meta_offset = 0; cache 288 mm/kasan/common.c cache->kasan_info.free_meta_offset = 0; cache 296 mm/kasan/common.c size_t kasan_metadata_size(struct kmem_cache *cache) cache 298 mm/kasan/common.c return (cache->kasan_info.alloc_meta_offset ? cache 300 mm/kasan/common.c (cache->kasan_info.free_meta_offset ? cache 304 mm/kasan/common.c struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, cache 307 mm/kasan/common.c return (void *)object + cache->kasan_info.alloc_meta_offset; cache 310 mm/kasan/common.c struct kasan_free_meta *get_free_info(struct kmem_cache *cache, cache 314 mm/kasan/common.c return (void *)object + cache->kasan_info.free_meta_offset; cache 318 mm/kasan/common.c static void kasan_set_free_info(struct kmem_cache *cache, cache 324 mm/kasan/common.c alloc_meta = get_alloc_info(cache, object); cache 345 mm/kasan/common.c void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) cache 347 mm/kasan/common.c kasan_unpoison_shadow(object, cache->object_size); cache 350 mm/kasan/common.c void kasan_poison_object_data(struct kmem_cache *cache, void *object) cache 353 mm/kasan/common.c round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), cache 371 mm/kasan/common.c static u8 assign_tag(struct kmem_cache *cache, const void *object, cache 387 mm/kasan/common.c if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU)) cache 393 mm/kasan/common.c return (u8)obj_to_index(cache, virt_to_page(object), (void *)object); cache 403 mm/kasan/common.c void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, cache 408 mm/kasan/common.c if (!(cache->flags & SLAB_KASAN)) cache 411 mm/kasan/common.c alloc_info = get_alloc_info(cache, object); cache 416 mm/kasan/common.c assign_tag(cache, object, true, false)); cache 436 mm/kasan/common.c static bool __kasan_slab_free(struct kmem_cache *cache, void *object, cache 448 mm/kasan/common.c if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) != cache 455 mm/kasan/common.c if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU)) cache 464 mm/kasan/common.c rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE); cache 468 mm/kasan/common.c unlikely(!(cache->flags & SLAB_KASAN))) cache 471 mm/kasan/common.c kasan_set_free_info(cache, object, tag); cache 473 mm/kasan/common.c quarantine_put(get_free_info(cache, object), cache); cache 478 mm/kasan/common.c bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) cache 480 mm/kasan/common.c return __kasan_slab_free(cache, object, ip, true); cache 483 mm/kasan/common.c static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, cache 498 mm/kasan/common.c redzone_end = round_up((unsigned long)object + cache->object_size, cache 502 mm/kasan/common.c tag = assign_tag(cache, object, false, keep_tag); cache 509 mm/kasan/common.c if (cache->flags & SLAB_KASAN) cache 510 mm/kasan/common.c set_track(&get_alloc_info(cache, object)->alloc_track, flags); cache 515 mm/kasan/common.c void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, cache 518 mm/kasan/common.c return __kasan_kmalloc(cache, object, cache->object_size, flags, false); cache 521 mm/kasan/common.c void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, cache 524 mm/kasan/common.c return __kasan_kmalloc(cache, object, size, flags, true); cache 194 mm/kasan/generic.c void kasan_cache_shrink(struct kmem_cache *cache) cache 196 mm/kasan/generic.c quarantine_remove_cache(cache); cache 199 mm/kasan/generic.c void kasan_cache_shutdown(struct kmem_cache *cache) cache 201 mm/kasan/generic.c if (!__kmem_cache_empty(cache)) cache 202 mm/kasan/generic.c quarantine_remove_cache(cache); cache 123 mm/kasan/kasan.h struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache, cache 125 mm/kasan/kasan.h struct kasan_free_meta *get_free_info(struct kmem_cache *cache, cache 163 mm/kasan/kasan.h void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache); cache 165 mm/kasan/kasan.h void quarantine_remove_cache(struct kmem_cache *cache); cache 168 mm/kasan/kasan.h struct kmem_cache *cache) { } cache 170 mm/kasan/kasan.h static inline void quarantine_remove_cache(struct kmem_cache *cache) { } cache 131 mm/kasan/quarantine.c static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache) cache 137 mm/kasan/quarantine.c return ((void *)free_info) - cache->kasan_info.free_meta_offset; cache 140 mm/kasan/quarantine.c static void qlink_free(struct qlist_node *qlink, struct kmem_cache *cache) cache 142 mm/kasan/quarantine.c void *object = qlink_to_object(qlink, cache); cache 148 mm/kasan/quarantine.c ___cache_free(cache, object, _THIS_IP_); cache 154 mm/kasan/quarantine.c static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) cache 164 mm/kasan/quarantine.c cache ? cache : qlink_to_cache(qlink); cache 173 mm/kasan/quarantine.c void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache) cache 190 mm/kasan/quarantine.c qlist_put(q, &info->quarantine_link, cache->size); cache 266 mm/kasan/quarantine.c struct kmem_cache *cache) cache 279 mm/kasan/quarantine.c if (obj_cache == cache) cache 290 mm/kasan/quarantine.c struct kmem_cache *cache = arg; cache 295 mm/kasan/quarantine.c qlist_move_cache(q, &to_free, cache); cache 296 mm/kasan/quarantine.c qlist_free_all(&to_free, cache); cache 300 mm/kasan/quarantine.c void quarantine_remove_cache(struct kmem_cache *cache) cache 312 mm/kasan/quarantine.c on_each_cpu(per_cpu_remove_cache, cache, 1); cache 318 mm/kasan/quarantine.c qlist_move_cache(&global_quarantine[i], &to_free, cache); cache 326 mm/kasan/quarantine.c qlist_free_all(&to_free, cache); cache 122 mm/kasan/report.c static void describe_object_addr(struct kmem_cache *cache, void *object, cache 132 mm/kasan/report.c object, cache->name, cache->object_size); cache 140 mm/kasan/report.c } else if (access_addr >= object_addr + cache->object_size) { cache 142 mm/kasan/report.c rel_bytes = access_addr - (object_addr + cache->object_size); cache 150 mm/kasan/report.c rel_bytes, rel_type, cache->object_size, (void *)object_addr, cache 151 mm/kasan/report.c (void *)(object_addr + cache->object_size)); cache 154 mm/kasan/report.c static struct kasan_track *kasan_get_free_track(struct kmem_cache *cache, cache 160 mm/kasan/report.c alloc_meta = get_alloc_info(cache, object); cache 174 mm/kasan/report.c static void describe_object(struct kmem_cache *cache, void *object, cache 177 mm/kasan/report.c struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); cache 179 mm/kasan/report.c if (cache->flags & SLAB_KASAN) { cache 184 mm/kasan/report.c free_track = kasan_get_free_track(cache, object, tag); cache 189 mm/kasan/report.c describe_object_addr(cache, object, addr); cache 378 mm/kasan/report.c struct kmem_cache *cache = page->slab_cache; cache 379 mm/kasan/report.c void *object = nearest_obj(cache, page, addr); cache 381 mm/kasan/report.c describe_object(cache, object, addr, tag); cache 41 mm/kasan/tags_report.c struct kmem_cache *cache; cache 52 mm/kasan/tags_report.c cache = page->slab_cache; cache 53 mm/kasan/tags_report.c object = nearest_obj(cache, page, (void *)addr); cache 54 mm/kasan/tags_report.c alloc_meta = get_alloc_info(cache, object); cache 209 mm/slab.c static int drain_freelist(struct kmem_cache *cache, cache 374 mm/slab.c static inline void *index_to_obj(struct kmem_cache *cache, struct page *page, cache 377 mm/slab.c return page->s_mem + cache->size * idx; cache 2181 mm/slab.c static int drain_freelist(struct kmem_cache *cache, cache 2206 mm/slab.c n->free_objects -= cache->num; cache 2208 mm/slab.c slab_destroy(cache, page); cache 2556 mm/slab.c static void slab_map_pages(struct kmem_cache *cache, struct page *page, cache 2559 mm/slab.c page->slab_cache = cache; cache 2691 mm/slab.c static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) cache 2695 mm/slab.c redzone1 = *dbg_redzone1(cache, obj); cache 2696 mm/slab.c redzone2 = *dbg_redzone2(cache, obj); cache 2705 mm/slab.c slab_error(cache, "double free detected"); cache 2707 mm/slab.c slab_error(cache, "memory outside object was overwritten"); cache 3103 mm/slab.c static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) cache 3130 mm/slab.c get_node(cache, nid) && cache 3131 mm/slab.c get_node(cache, nid)->free_objects) { cache 3132 mm/slab.c obj = ____cache_alloc_node(cache, cache 3146 mm/slab.c page = cache_grow_begin(cache, flags, numa_mem_id()); cache 3147 mm/slab.c cache_grow_end(cache, page); cache 3150 mm/slab.c obj = ____cache_alloc_node(cache, cache 3267 mm/slab.c __do_cache_alloc(struct kmem_cache *cache, gfp_t flags) cache 3272 mm/slab.c objp = alternate_node_alloc(cache, flags); cache 3276 mm/slab.c objp = ____cache_alloc(cache, flags); cache 3283 mm/slab.c objp = ____cache_alloc_node(cache, flags, numa_mem_id()); cache 659 mm/slab.h void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); cache 3019 mm/slub.c void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr) cache 3021 mm/slub.c do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr); cache 117 mm/swap_slots.c struct swap_slots_cache *cache; cache 138 mm/swap_slots.c cache = &per_cpu(swp_slots, cpu); cache 139 mm/swap_slots.c if (cache->slots || cache->slots_ret) cache 142 mm/swap_slots.c if (!cache->lock_initialized) { cache 143 mm/swap_slots.c mutex_init(&cache->alloc_lock); cache 144 mm/swap_slots.c spin_lock_init(&cache->free_lock); cache 145 mm/swap_slots.c cache->lock_initialized = true; cache 147 mm/swap_slots.c cache->nr = 0; cache 148 mm/swap_slots.c cache->cur = 0; cache 149 mm/swap_slots.c cache->n_ret = 0; cache 157 mm/swap_slots.c cache->slots = slots; cache 159 mm/swap_slots.c cache->slots_ret = slots_ret; cache 173 mm/swap_slots.c struct swap_slots_cache *cache; cache 176 mm/swap_slots.c cache = &per_cpu(swp_slots, cpu); cache 177 mm/swap_slots.c if ((type & SLOTS_CACHE) && cache->slots) { cache 178 mm/swap_slots.c mutex_lock(&cache->alloc_lock); cache 179 mm/swap_slots.c swapcache_free_entries(cache->slots + cache->cur, cache->nr); cache 180 mm/swap_slots.c cache->cur = 0; cache 181 mm/swap_slots.c cache->nr = 0; cache 182 mm/swap_slots.c if (free_slots && cache->slots) { cache 183 mm/swap_slots.c kvfree(cache->slots); cache 184 mm/swap_slots.c cache->slots = NULL; cache 186 mm/swap_slots.c mutex_unlock(&cache->alloc_lock); cache 188 mm/swap_slots.c if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { cache 189 mm/swap_slots.c spin_lock_irq(&cache->free_lock); cache 190 mm/swap_slots.c swapcache_free_entries(cache->slots_ret, cache->n_ret); cache 191 mm/swap_slots.c cache->n_ret = 0; cache 192 mm/swap_slots.c if (free_slots && cache->slots_ret) { cache 193 mm/swap_slots.c slots = cache->slots_ret; cache 194 mm/swap_slots.c cache->slots_ret = NULL; cache 196 mm/swap_slots.c spin_unlock_irq(&cache->free_lock); cache 265 mm/swap_slots.c static int refill_swap_slots_cache(struct swap_slots_cache *cache) cache 267 mm/swap_slots.c if (!use_swap_slot_cache || cache->nr) cache 270 mm/swap_slots.c cache->cur = 0; cache 272 mm/swap_slots.c cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, cache 273 mm/swap_slots.c cache->slots, 1); cache 275 mm/swap_slots.c return cache->nr; cache 280 mm/swap_slots.c struct swap_slots_cache *cache; cache 282 mm/swap_slots.c cache = raw_cpu_ptr(&swp_slots); cache 283 mm/swap_slots.c if (likely(use_swap_slot_cache && cache->slots_ret)) { cache 284 mm/swap_slots.c spin_lock_irq(&cache->free_lock); cache 286 mm/swap_slots.c if (!use_swap_slot_cache || !cache->slots_ret) { cache 287 mm/swap_slots.c spin_unlock_irq(&cache->free_lock); cache 290 mm/swap_slots.c if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { cache 297 mm/swap_slots.c swapcache_free_entries(cache->slots_ret, cache->n_ret); cache 298 mm/swap_slots.c cache->n_ret = 0; cache 300 mm/swap_slots.c cache->slots_ret[cache->n_ret++] = entry; cache 301 mm/swap_slots.c spin_unlock_irq(&cache->free_lock); cache 313 mm/swap_slots.c struct swap_slots_cache *cache; cache 332 mm/swap_slots.c cache = raw_cpu_ptr(&swp_slots); cache 334 mm/swap_slots.c if (likely(check_cache_active() && cache->slots)) { cache 335 mm/swap_slots.c mutex_lock(&cache->alloc_lock); cache 336 mm/swap_slots.c if (cache->slots) { cache 338 mm/swap_slots.c if (cache->nr) { cache 339 mm/swap_slots.c pentry = &cache->slots[cache->cur++]; cache 342 mm/swap_slots.c cache->nr--; cache 344 mm/swap_slots.c if (refill_swap_slots_cache(cache)) cache 348 mm/swap_slots.c mutex_unlock(&cache->alloc_lock); cache 230 net/9p/client.c fc->cache = c->fcall_cache; cache 233 net/9p/client.c fc->cache = NULL; cache 249 net/9p/client.c if (fc->cache) cache 250 net/9p/client.c kmem_cache_free(fc->cache, fc->sdata); cache 1072 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1075 net/bluetooth/hci_core.c list_for_each_entry_safe(p, n, &cache->all, all) { cache 1080 net/bluetooth/hci_core.c INIT_LIST_HEAD(&cache->unknown); cache 1081 net/bluetooth/hci_core.c INIT_LIST_HEAD(&cache->resolve); cache 1087 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1090 net/bluetooth/hci_core.c BT_DBG("cache %p, %pMR", cache, bdaddr); cache 1092 net/bluetooth/hci_core.c list_for_each_entry(e, &cache->all, all) { cache 1103 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1106 net/bluetooth/hci_core.c BT_DBG("cache %p, %pMR", cache, bdaddr); cache 1108 net/bluetooth/hci_core.c list_for_each_entry(e, &cache->unknown, list) { cache 1120 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1123 net/bluetooth/hci_core.c BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); cache 1125 net/bluetooth/hci_core.c list_for_each_entry(e, &cache->resolve, list) { cache 1138 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1139 net/bluetooth/hci_core.c struct list_head *pos = &cache->resolve; cache 1144 net/bluetooth/hci_core.c list_for_each_entry(p, &cache->resolve, list) { cache 1157 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1161 net/bluetooth/hci_core.c BT_DBG("cache %p, %pMR", cache, &data->bdaddr); cache 1189 net/bluetooth/hci_core.c list_add(&ie->all, &cache->all); cache 1195 net/bluetooth/hci_core.c list_add(&ie->list, &cache->unknown); cache 1207 net/bluetooth/hci_core.c cache->timestamp = jiffies; cache 1218 net/bluetooth/hci_core.c struct discovery_state *cache = &hdev->discovery; cache 1223 net/bluetooth/hci_core.c list_for_each_entry(e, &cache->all, all) { cache 1240 net/bluetooth/hci_core.c BT_DBG("cache %p, copied %d", cache, copied); cache 340 net/bluetooth/hci_debugfs.c struct discovery_state *cache = &hdev->discovery; cache 345 net/bluetooth/hci_debugfs.c list_for_each_entry(e, &cache->all, all) { cache 84 net/core/bpf_sk_storage.c struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE]; cache 187 net/core/bpf_sk_storage.c if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) == cache 189 net/core/bpf_sk_storage.c RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL); cache 269 net/core/bpf_sk_storage.c sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]); cache 290 net/core/bpf_sk_storage.c rcu_assign_pointer(sk_storage->cache[smap->cache_idx], cache 65 net/core/dst_cache.c if (!dst_cache->cache) cache 68 net/core/dst_cache.c return dst_cache_per_cpu_get(dst_cache, this_cpu_ptr(dst_cache->cache)); cache 77 net/core/dst_cache.c if (!dst_cache->cache) cache 80 net/core/dst_cache.c idst = this_cpu_ptr(dst_cache->cache); cache 95 net/core/dst_cache.c if (!dst_cache->cache) cache 98 net/core/dst_cache.c idst = this_cpu_ptr(dst_cache->cache); cache 110 net/core/dst_cache.c if (!dst_cache->cache) cache 113 net/core/dst_cache.c idst = this_cpu_ptr(dst_cache->cache); cache 114 net/core/dst_cache.c dst_cache_per_cpu_dst_set(this_cpu_ptr(dst_cache->cache), dst, cache 126 net/core/dst_cache.c if (!dst_cache->cache) cache 129 net/core/dst_cache.c idst = this_cpu_ptr(dst_cache->cache); cache 142 net/core/dst_cache.c dst_cache->cache = alloc_percpu_gfp(struct dst_cache_pcpu, cache 144 net/core/dst_cache.c if (!dst_cache->cache) cache 156 net/core/dst_cache.c if (!dst_cache->cache) cache 160 net/core/dst_cache.c dst_release(per_cpu_ptr(dst_cache->cache, i)->dst); cache 162 net/core/dst_cache.c free_percpu(dst_cache->cache); cache 1460 net/core/neighbour.c dev->header_ops->cache(n, hh, prot); cache 1476 net/core/neighbour.c if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len)) cache 95 net/core/page_pool.c page = pool->alloc.cache[--pool->alloc.count]; cache 112 net/core/page_pool.c pool->alloc.cache, cache 280 net/core/page_pool.c pool->alloc.cache[pool->alloc.count++] = page; cache 361 net/core/page_pool.c page = pool->alloc.cache[--pool->alloc.count]; cache 184 net/core/skbuff.c struct kmem_cache *cache; cache 190 net/core/skbuff.c cache = (flags & SKB_ALLOC_FCLONE) cache 197 net/core/skbuff.c skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); cache 254 net/core/skbuff.c kmem_cache_free(cache, skb); cache 366 net/ethernet/eth.c .cache = eth_header_cache, cache 278 net/ipv4/arp.c if (dev->header_ops->cache) cache 255 net/ipv4/cipso_ipv4.c secattr->cache = entry->lsm_data; cache 321 net/ipv4/cipso_ipv4.c refcount_inc(&secattr->cache->refcount); cache 322 net/ipv4/cipso_ipv4.c entry->lsm_data = secattr->cache; cache 102 net/ipv4/ipmr.c struct mfc_cache *cache, int local); cache 809 net/ipv4/ipmr.c static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache, cache 814 net/ipv4/ipmr.c cache->mfc_un.res.minvif = MAXVIFS; cache 815 net/ipv4/ipmr.c cache->mfc_un.res.maxvif = 0; cache 816 net/ipv4/ipmr.c memset(cache->mfc_un.res.ttls, 255, MAXVIFS); cache 821 net/ipv4/ipmr.c cache->mfc_un.res.ttls[vifi] = ttls[vifi]; cache 822 net/ipv4/ipmr.c if (cache->mfc_un.res.minvif > vifi) cache 823 net/ipv4/ipmr.c cache->mfc_un.res.minvif = vifi; cache 824 net/ipv4/ipmr.c if (cache->mfc_un.res.maxvif <= vifi) cache 825 net/ipv4/ipmr.c cache->mfc_un.res.maxvif = vifi + 1; cache 828 net/ipv4/ipmr.c cache->mfc_un.res.lastuse = jiffies; cache 1298 net/ipv4/ipmr.c struct mfc_cache *cache; cache 1322 net/ipv4/ipmr.c cache = (struct mfc_cache *)c; cache 1323 net/ipv4/ipmr.c call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache, cache 1325 net/ipv4/ipmr.c mroute_netlink_event(mrt, cache, RTM_DELROUTE); cache 1335 net/ipv4/ipmr.c cache = (struct mfc_cache *)c; cache 1336 net/ipv4/ipmr.c mroute_netlink_event(mrt, cache, RTM_DELROUTE); cache 1337 net/ipv4/ipmr.c ipmr_destroy_unres(mrt, cache); cache 2098 net/ipv4/ipmr.c struct mfc_cache *cache; cache 2151 net/ipv4/ipmr.c cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); cache 2152 net/ipv4/ipmr.c if (!cache) { cache 2156 net/ipv4/ipmr.c cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, cache 2161 net/ipv4/ipmr.c if (!cache) { cache 2186 net/ipv4/ipmr.c ip_mr_forward(net, mrt, dev, skb, cache, local); cache 2261 net/ipv4/ipmr.c struct mfc_cache *cache; cache 2270 net/ipv4/ipmr.c cache = ipmr_cache_find(mrt, saddr, daddr); cache 2271 net/ipv4/ipmr.c if (!cache && skb->dev) { cache 2275 net/ipv4/ipmr.c cache = ipmr_cache_find_any(mrt, daddr, vif); cache 2277 net/ipv4/ipmr.c if (!cache) { cache 2315 net/ipv4/ipmr.c err = mr_fill_mroute(mrt, skb, &cache->_c, rtm); cache 2544 net/ipv4/ipmr.c struct mfc_cache *cache; cache 2566 net/ipv4/ipmr.c cache = ipmr_cache_find(mrt, src, grp); cache 2568 net/ipv4/ipmr.c if (!cache) { cache 2580 net/ipv4/ipmr.c nlh->nlmsg_seq, cache, cache 2998 net/ipv4/ipmr.c if (it->cache != &mrt->mfc_unres_queue) { cache 154 net/ipv4/ipmr_base.c it->cache = &mrt->mfc_cache_list; cache 161 net/ipv4/ipmr_base.c it->cache = &mrt->mfc_unres_queue; cache 162 net/ipv4/ipmr_base.c list_for_each_entry(mfc, it->cache, list) cache 167 net/ipv4/ipmr_base.c it->cache = NULL; cache 185 net/ipv4/ipmr_base.c if (c->list.next != it->cache) cache 188 net/ipv4/ipmr_base.c if (it->cache == &mrt->mfc_unres_queue) cache 193 net/ipv4/ipmr_base.c it->cache = &mrt->mfc_unres_queue; cache 196 net/ipv4/ipmr_base.c if (!list_empty(it->cache)) cache 197 net/ipv4/ipmr_base.c return list_first_entry(it->cache, struct mr_mfc, list); cache 201 net/ipv4/ipmr_base.c it->cache = NULL; cache 1653 net/ipv4/tcp_input.c static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) cache 1655 net/ipv4/tcp_input.c return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); cache 1667 net/ipv4/tcp_input.c struct tcp_sack_block *cache; cache 1759 net/ipv4/tcp_input.c cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); cache 1761 net/ipv4/tcp_input.c cache = tp->recv_sack_cache; cache 1763 net/ipv4/tcp_input.c while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && cache 1764 net/ipv4/tcp_input.c !cache->end_seq) cache 1765 net/ipv4/tcp_input.c cache++; cache 1778 net/ipv4/tcp_input.c while (tcp_sack_cache_ok(tp, cache) && cache 1779 net/ipv4/tcp_input.c !before(start_seq, cache->end_seq)) cache 1780 net/ipv4/tcp_input.c cache++; cache 1783 net/ipv4/tcp_input.c if (tcp_sack_cache_ok(tp, cache) && !dup_sack && cache 1784 net/ipv4/tcp_input.c after(end_seq, cache->start_seq)) { cache 1787 net/ipv4/tcp_input.c if (before(start_seq, cache->start_seq)) { cache 1792 net/ipv4/tcp_input.c cache->start_seq, cache 1797 net/ipv4/tcp_input.c if (!after(end_seq, cache->end_seq)) cache 1802 net/ipv4/tcp_input.c cache->end_seq); cache 1805 net/ipv4/tcp_input.c if (tcp_highest_sack_seq(tp) == cache->end_seq) { cache 1810 net/ipv4/tcp_input.c cache++; cache 1814 net/ipv4/tcp_input.c skb = tcp_sacktag_skip(skb, sk, cache->end_seq); cache 1816 net/ipv4/tcp_input.c cache++; cache 217 net/ipv6/calipso.c secattr->cache = entry->lsm_data; cache 285 net/ipv6/calipso.c refcount_inc(&secattr->cache->refcount); cache 286 net/ipv6/calipso.c entry->lsm_data = secattr->cache; cache 87 net/ipv6/ip6mr.c struct mfc6_cache *cache); cache 483 net/ipv6/ip6mr.c if (it->cache != &mrt->mfc_unres_queue) { cache 834 net/ipv6/ip6mr.c struct mr_mfc *cache, cache 839 net/ipv6/ip6mr.c cache->mfc_un.res.minvif = MAXMIFS; cache 840 net/ipv6/ip6mr.c cache->mfc_un.res.maxvif = 0; cache 841 net/ipv6/ip6mr.c memset(cache->mfc_un.res.ttls, 255, MAXMIFS); cache 846 net/ipv6/ip6mr.c cache->mfc_un.res.ttls[vifi] = ttls[vifi]; cache 847 net/ipv6/ip6mr.c if (cache->mfc_un.res.minvif > vifi) cache 848 net/ipv6/ip6mr.c cache->mfc_un.res.minvif = vifi; cache 849 net/ipv6/ip6mr.c if (cache->mfc_un.res.maxvif <= vifi) cache 850 net/ipv6/ip6mr.c cache->mfc_un.res.maxvif = vifi + 1; cache 853 net/ipv6/ip6mr.c cache->mfc_un.res.lastuse = jiffies; cache 2185 net/ipv6/ip6mr.c struct mfc6_cache *cache; cache 2214 net/ipv6/ip6mr.c cache = ip6mr_cache_find(mrt, cache 2216 net/ipv6/ip6mr.c if (!cache) { cache 2220 net/ipv6/ip6mr.c cache = ip6mr_cache_find_any(mrt, cache 2228 net/ipv6/ip6mr.c if (!cache) { cache 2243 net/ipv6/ip6mr.c ip6_mr_forward(net, mrt, dev, skb, cache); cache 2255 net/ipv6/ip6mr.c struct mfc6_cache *cache; cache 2263 net/ipv6/ip6mr.c cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr); cache 2264 net/ipv6/ip6mr.c if (!cache && skb->dev) { cache 2268 net/ipv6/ip6mr.c cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr, cache 2272 net/ipv6/ip6mr.c if (!cache) { cache 2315 net/ipv6/ip6mr.c err = mr_fill_mroute(mrt, skb, &cache->_c, rtm); cache 357 net/ipv6/ndisc.c if (dev->header_ops->cache) cache 31 net/ipv6/seg6_iptunnel.c struct dst_cache cache; cache 301 net/ipv6/seg6_iptunnel.c dst = dst_cache_get(&slwt->cache); cache 311 net/ipv6/seg6_iptunnel.c dst_cache_set_ip6(&slwt->cache, dst, cache 340 net/ipv6/seg6_iptunnel.c dst = dst_cache_get(&slwt->cache); cache 362 net/ipv6/seg6_iptunnel.c dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr); cache 438 net/ipv6/seg6_iptunnel.c err = dst_cache_init(&slwt->cache, GFP_ATOMIC); cache 461 net/ipv6/seg6_iptunnel.c dst_cache_destroy(&seg6_lwt_lwtunnel(lwt)->cache); cache 196 net/netfilter/nf_conntrack_ecache.c events = xchg(&e->cache, 0); cache 1208 net/openvswitch/conntrack.c struct nf_conntrack_ecache *cache = nf_ct_ecache_find(ct); cache 1210 net/openvswitch/conntrack.c if (cache) cache 1211 net/openvswitch/conntrack.c cache->ctmask = info->eventmask; cache 88 net/rds/ib_recv.c static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) cache 92 net/rds/ib_recv.c tmp = xchg(&cache->xfer, NULL); cache 94 net/rds/ib_recv.c if (cache->ready) cache 95 net/rds/ib_recv.c list_splice_entire_tail(tmp, cache->ready); cache 97 net/rds/ib_recv.c cache->ready = tmp; cache 101 net/rds/ib_recv.c static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp) cache 106 net/rds/ib_recv.c cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); cache 107 net/rds/ib_recv.c if (!cache->percpu) cache 111 net/rds/ib_recv.c head = per_cpu_ptr(cache->percpu, cpu); cache 115 net/rds/ib_recv.c cache->xfer = NULL; cache 116 net/rds/ib_recv.c cache->ready = NULL; cache 135 net/rds/ib_recv.c static void rds_ib_cache_splice_all_lists(struct rds_ib_refill_cache *cache, cache 142 net/rds/ib_recv.c head = per_cpu_ptr(cache->percpu, cpu); cache 149 net/rds/ib_recv.c if (cache->ready) { cache 150 net/rds/ib_recv.c list_splice_entire_tail(cache->ready, caller_list); cache 151 net/rds/ib_recv.c cache->ready = NULL; cache 187 net/rds/ib_recv.c struct rds_ib_refill_cache *cache); cache 188 net/rds/ib_recv.c static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache); cache 478 net/rds/ib_recv.c struct rds_ib_refill_cache *cache) cache 485 net/rds/ib_recv.c chpfirst = __this_cpu_read(cache->percpu->first); cache 491 net/rds/ib_recv.c __this_cpu_write(cache->percpu->first, new_item); cache 492 net/rds/ib_recv.c __this_cpu_inc(cache->percpu->count); cache 494 net/rds/ib_recv.c if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) cache 504 net/rds/ib_recv.c old = xchg(&cache->xfer, NULL); cache 507 net/rds/ib_recv.c old = cmpxchg(&cache->xfer, NULL, chpfirst); cache 511 net/rds/ib_recv.c __this_cpu_write(cache->percpu->first, NULL); cache 512 net/rds/ib_recv.c __this_cpu_write(cache->percpu->count, 0); cache 517 net/rds/ib_recv.c static struct list_head *rds_ib_recv_cache_get(struct rds_ib_refill_cache *cache) cache 519 net/rds/ib_recv.c struct list_head *head = cache->ready; cache 523 net/rds/ib_recv.c cache->ready = head->next; cache 526 net/rds/ib_recv.c cache->ready = NULL; cache 430 net/sunrpc/auth.c rpcauth_clear_credcache(struct rpc_cred_cache *cache) cache 435 net/sunrpc/auth.c unsigned int hashsize = 1U << cache->hashbits; cache 439 net/sunrpc/auth.c spin_lock(&cache->lock); cache 441 net/sunrpc/auth.c head = &cache->hashtable[i]; cache 450 net/sunrpc/auth.c spin_unlock(&cache->lock); cache 461 net/sunrpc/auth.c struct rpc_cred_cache *cache = auth->au_credcache; cache 463 net/sunrpc/auth.c if (cache) { cache 465 net/sunrpc/auth.c rpcauth_clear_credcache(cache); cache 466 net/sunrpc/auth.c kfree(cache->hashtable); cache 467 net/sunrpc/auth.c kfree(cache); cache 570 net/sunrpc/auth.c struct rpc_cred_cache *cache = auth->au_credcache; cache 575 net/sunrpc/auth.c nr = auth->au_ops->hash_cred(acred, cache->hashbits); cache 578 net/sunrpc/auth.c hlist_for_each_entry_rcu(entry, &cache->hashtable[nr], cr_hash) { cache 596 net/sunrpc/auth.c spin_lock(&cache->lock); cache 597 net/sunrpc/auth.c hlist_for_each_entry(entry, &cache->hashtable[nr], cr_hash) { cache 608 net/sunrpc/auth.c hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]); cache 611 net/sunrpc/auth.c spin_unlock(&cache->lock); cache 92 net/tipc/monitor.c struct tipc_mon_domain cache; cache 243 net/tipc/monitor.c struct tipc_mon_domain *cache = &mon->cache; cache 263 net/tipc/monitor.c cache->members[i] = htonl(peer->addr); cache 269 net/tipc/monitor.c cache->len = htons(dom->len); cache 270 net/tipc/monitor.c cache->gen = htons(dom->gen); cache 271 net/tipc/monitor.c cache->member_cnt = htons(member_cnt); cache 272 net/tipc/monitor.c cache->up_map = cpu_to_be64(dom->up_map); cache 551 net/tipc/monitor.c len = ntohs(mon->cache.len); cache 553 net/tipc/monitor.c memcpy(data, &mon->cache, len); cache 162 net/tipc/udp_media.c struct udp_media_addr *dst, struct dst_cache *cache) cache 168 net/tipc/udp_media.c ndst = dst_cache_get(cache); cache 184 net/tipc/udp_media.c dst_cache_set_ip4(cache, &rt->dst, fl.saddr); cache 207 net/tipc/udp_media.c dst_cache_set_ip6(cache, ndst, &fl6.saddr); cache 3555 security/selinux/ss/services.c secattr->cache = netlbl_secattr_cache_alloc(GFP_ATOMIC); cache 3556 security/selinux/ss/services.c if (secattr->cache == NULL) { cache 3562 security/selinux/ss/services.c secattr->cache->free = kfree; cache 3563 security/selinux/ss/services.c secattr->cache->data = sid_cache; cache 3600 security/selinux/ss/services.c *sid = *(u32 *)secattr->cache->data; cache 47 sound/aoa/codecs/onyx.c u8 cache[16]; cache 70 sound/aoa/codecs/onyx.c *value = onyx->cache[reg-FIRSTREGISTER]; cache 79 sound/aoa/codecs/onyx.c onyx->cache[ONYX_REG_CONTROL-FIRSTREGISTER] = *value; cache 89 sound/aoa/codecs/onyx.c onyx->cache[reg-FIRSTREGISTER] = value; cache 593 sound/aoa/codecs/onyx.c regs[i] = onyx->cache[register_map[i]-FIRSTREGISTER]; cache 589 sound/pci/emu10k1/emumixer.c unsigned int val, cache; cache 591 sound/pci/emu10k1/emumixer.c cache = emu->emu1010.adc_pads; cache 593 sound/pci/emu10k1/emumixer.c cache = cache | mask; cache 595 sound/pci/emu10k1/emumixer.c cache = cache & ~mask; cache 596 sound/pci/emu10k1/emumixer.c if (cache != emu->emu1010.adc_pads) { cache 597 sound/pci/emu10k1/emumixer.c snd_emu1010_fpga_write(emu, EMU_HANA_ADC_PADS, cache ); cache 598 sound/pci/emu10k1/emumixer.c emu->emu1010.adc_pads = cache; cache 637 sound/pci/emu10k1/emumixer.c unsigned int val, cache; cache 639 sound/pci/emu10k1/emumixer.c cache = emu->emu1010.dac_pads; cache 641 sound/pci/emu10k1/emumixer.c cache = cache | mask; cache 643 sound/pci/emu10k1/emumixer.c cache = cache & ~mask; cache 644 sound/pci/emu10k1/emumixer.c if (cache != emu->emu1010.dac_pads) { cache 645 sound/pci/emu10k1/emumixer.c snd_emu1010_fpga_write(emu, EMU_HANA_DAC_PADS, cache ); cache 646 sound/pci/emu10k1/emumixer.c emu->emu1010.dac_pads = cache; cache 705 sound/pci/rme9652/hdsp.c const u32 *cache; cache 708 sound/pci/rme9652/hdsp.c cache = hdsp->fw_uploaded; cache 712 sound/pci/rme9652/hdsp.c cache = (u32 *)hdsp->firmware->data; cache 713 sound/pci/rme9652/hdsp.c if (!cache) cache 734 sound/pci/rme9652/hdsp.c hdsp_write(hdsp, HDSP_fifoData, cache[i]); cache 35 sound/soc/codecs/sigmadsp.c uint8_t cache[]; cache 143 sound/soc/codecs/sigmadsp.c memcpy(ctrl->cache, data, ctrl->num_bytes); cache 162 sound/soc/codecs/sigmadsp.c ret = sigmadsp_read(sigmadsp, ctrl->addr, ctrl->cache, cache 168 sound/soc/codecs/sigmadsp.c memcpy(ucontrol->value.bytes.data, ctrl->cache, cache 684 sound/soc/codecs/sigmadsp.c sigmadsp_ctrl_write(sigmadsp, ctrl, ctrl->cache); cache 164 sound/soc/codecs/tlv320dac33.c u8 *cache = dac33->reg_cache; cache 168 sound/soc/codecs/tlv320dac33.c return cache[reg]; cache 175 sound/soc/codecs/tlv320dac33.c u8 *cache = dac33->reg_cache; cache 179 sound/soc/codecs/tlv320dac33.c cache[reg] = value; cache 70 sound/soc/codecs/tscs454.c u8 cache[COEFF_RAM_SIZE]; cache 75 sound/soc/codecs/tscs454.c static inline void init_coeff_ram_cache(u8 *cache) cache 85 sound/soc/codecs/tscs454.c cache[((norm_addrs[i] + 1) * COEFF_SIZE) - 1] = 0x40; cache 90 sound/soc/codecs/tscs454.c init_coeff_ram_cache(ram->cache); cache 320 sound/soc/codecs/tscs454.c coeff_ram = tscs454->dac_ram.cache; cache 323 sound/soc/codecs/tscs454.c coeff_ram = tscs454->spk_ram.cache; cache 326 sound/soc/codecs/tscs454.c coeff_ram = tscs454->sub_ram.cache; cache 413 sound/soc/codecs/tscs454.c coeff_ram = tscs454->dac_ram.cache; cache 420 sound/soc/codecs/tscs454.c coeff_ram = tscs454->spk_ram.cache; cache 427 sound/soc/codecs/tscs454.c coeff_ram = tscs454->sub_ram.cache; cache 481 sound/soc/codecs/tscs454.c ret = write_coeff_ram(component, tscs454->dac_ram.cache, cache 493 sound/soc/codecs/tscs454.c ret = write_coeff_ram(component, tscs454->spk_ram.cache, cache 505 sound/soc/codecs/tscs454.c ret = write_coeff_ram(component, tscs454->sub_ram.cache, cache 65 sound/soc/codecs/uda1380.c u16 *cache = uda1380->reg_cache; cache 71 sound/soc/codecs/uda1380.c return cache[reg]; cache 81 sound/soc/codecs/uda1380.c u16 *cache = uda1380->reg_cache; cache 85 sound/soc/codecs/uda1380.c if ((reg >= 0x10) && (cache[reg] != value)) cache 87 sound/soc/codecs/uda1380.c cache[reg] = value; cache 138 sound/soc/codecs/uda1380.c u16 *cache = uda1380->reg_cache; cache 143 sound/soc/codecs/uda1380.c data[1] = (cache[reg] & 0xff00) >> 8; cache 144 sound/soc/codecs/uda1380.c data[2] = cache[reg] & 0x00ff; cache 607 sound/soc/codecs/wm_adsp.c void *cache; cache 1075 sound/soc/codecs/wm_adsp.c memcpy(ctl->cache, p, ctl->len); cache 1096 sound/soc/codecs/wm_adsp.c if (copy_from_user(ctl->cache, bytes, size)) { cache 1101 sound/soc/codecs/wm_adsp.c ret = wm_coeff_write_control(ctl, ctl->cache, size); cache 1184 sound/soc/codecs/wm_adsp.c ret = wm_coeff_read_control(ctl, ctl->cache, ctl->len); cache 1186 sound/soc/codecs/wm_adsp.c memcpy(p, ctl->cache, ctl->len); cache 1206 sound/soc/codecs/wm_adsp.c ret = wm_coeff_read_control(ctl, ctl->cache, size); cache 1211 sound/soc/codecs/wm_adsp.c ret = wm_coeff_read_control(ctl, ctl->cache, size); cache 1214 sound/soc/codecs/wm_adsp.c if (!ret && copy_to_user(bytes, ctl->cache, size)) cache 1340 sound/soc/codecs/wm_adsp.c ret = wm_coeff_read_control(ctl, ctl->cache, ctl->len); cache 1358 sound/soc/codecs/wm_adsp.c ret = wm_coeff_write_control(ctl, ctl->cache, ctl->len); cache 1400 sound/soc/codecs/wm_adsp.c kfree(ctl->cache); cache 1485 sound/soc/codecs/wm_adsp.c ctl->cache = kzalloc(ctl->len, GFP_KERNEL); cache 1486 sound/soc/codecs/wm_adsp.c if (!ctl->cache) { cache 1510 sound/soc/codecs/wm_adsp.c kfree(ctl->cache); cache 152 sound/soc/codecs/wm_hubs.c struct wm_hubs_dcs_cache *cache; cache 161 sound/soc/codecs/wm_hubs.c list_for_each_entry(cache, &hubs->dcs_cache, list) { cache 162 sound/soc/codecs/wm_hubs.c if (cache->left != left || cache->right != right) cache 165 sound/soc/codecs/wm_hubs.c *entry = cache; cache 175 sound/soc/codecs/wm_hubs.c struct wm_hubs_dcs_cache *cache; cache 180 sound/soc/codecs/wm_hubs.c cache = devm_kzalloc(component->dev, sizeof(*cache), GFP_KERNEL); cache 181 sound/soc/codecs/wm_hubs.c if (!cache) cache 184 sound/soc/codecs/wm_hubs.c cache->left = snd_soc_component_read32(component, WM8993_LEFT_OUTPUT_VOLUME); cache 185 sound/soc/codecs/wm_hubs.c cache->left &= WM8993_HPOUT1L_VOL_MASK; cache 187 sound/soc/codecs/wm_hubs.c cache->right = snd_soc_component_read32(component, WM8993_RIGHT_OUTPUT_VOLUME); cache 188 sound/soc/codecs/wm_hubs.c cache->right &= WM8993_HPOUT1R_VOL_MASK; cache 190 sound/soc/codecs/wm_hubs.c cache->dcs_cfg = dcs_cfg; cache 192 sound/soc/codecs/wm_hubs.c list_add_tail(&cache->list, &hubs->dcs_cache); cache 244 sound/soc/codecs/wm_hubs.c struct wm_hubs_dcs_cache *cache; cache 260 sound/soc/codecs/wm_hubs.c wm_hubs_dcs_cache_get(component, &cache)) { cache 262 sound/soc/codecs/wm_hubs.c cache->dcs_cfg, cache->left, cache->right); cache 263 sound/soc/codecs/wm_hubs.c snd_soc_component_write(component, dcs_reg, cache->dcs_cfg); cache 393 tools/perf/builtin-probe.c struct probe_cache *cache; cache 406 tools/perf/builtin-probe.c cache = probe_cache__new(nd->s, NULL); cache 407 tools/perf/builtin-probe.c if (!cache) cache 409 tools/perf/builtin-probe.c if (probe_cache__filter_purge(cache, filter) < 0 || cache 410 tools/perf/builtin-probe.c probe_cache__commit(cache) < 0) cache 412 tools/perf/builtin-probe.c probe_cache__delete(cache); cache 429 tools/perf/builtin-probe.c if (probe_conf.cache) cache 574 tools/perf/builtin-probe.c OPT_BOOLEAN(0, "cache", &probe_conf.cache, "Manipulate probe cache"), cache 62 tools/perf/tests/sdt.c struct probe_cache *cache = probe_cache__new(target, NULL); cache 65 tools/perf/tests/sdt.c if (!cache) { cache 70 tools/perf/tests/sdt.c if (!probe_cache__find_by_name(cache, group, event)) { cache 74 tools/perf/tests/sdt.c probe_cache__delete(cache); cache 594 tools/perf/util/build-id.c struct probe_cache *cache; cache 598 tools/perf/util/build-id.c cache = probe_cache__new(sbuild_id, nsi); cache 599 tools/perf/util/build-id.c if (!cache) cache 603 tools/perf/util/build-id.c ret = probe_cache__scan_sdt(cache, realname); cache 607 tools/perf/util/build-id.c if (probe_cache__commit(cache) < 0) cache 610 tools/perf/util/build-id.c probe_cache__delete(cache); cache 756 tools/perf/util/dso.c struct rb_root *root = &dso->data.cache; cache 761 tools/perf/util/dso.c struct dso_cache *cache; cache 763 tools/perf/util/dso.c cache = rb_entry(next, struct dso_cache, rb_node); cache 764 tools/perf/util/dso.c next = rb_next(&cache->rb_node); cache 765 tools/perf/util/dso.c rb_erase(&cache->rb_node, root); cache 766 tools/perf/util/dso.c free(cache); cache 773 tools/perf/util/dso.c const struct rb_root *root = &dso->data.cache; cache 776 tools/perf/util/dso.c struct dso_cache *cache; cache 782 tools/perf/util/dso.c cache = rb_entry(parent, struct dso_cache, rb_node); cache 783 tools/perf/util/dso.c end = cache->offset + DSO__DATA_CACHE_SIZE; cache 785 tools/perf/util/dso.c if (offset < cache->offset) cache 790 tools/perf/util/dso.c return cache; cache 799 tools/perf/util/dso.c struct rb_root *root = &dso->data.cache; cache 802 tools/perf/util/dso.c struct dso_cache *cache; cache 810 tools/perf/util/dso.c cache = rb_entry(parent, struct dso_cache, rb_node); cache 811 tools/perf/util/dso.c end = cache->offset + DSO__DATA_CACHE_SIZE; cache 813 tools/perf/util/dso.c if (offset < cache->offset) cache 824 tools/perf/util/dso.c cache = NULL; cache 827 tools/perf/util/dso.c return cache; cache 831 tools/perf/util/dso.c dso_cache__memcpy(struct dso_cache *cache, u64 offset, cache 834 tools/perf/util/dso.c u64 cache_offset = offset - cache->offset; cache 835 tools/perf/util/dso.c u64 cache_size = min(cache->size - cache_offset, size); cache 837 tools/perf/util/dso.c memcpy(data, cache->data + cache_offset, cache_size); cache 871 tools/perf/util/dso.c struct dso_cache *cache; cache 875 tools/perf/util/dso.c cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE); cache 876 tools/perf/util/dso.c if (!cache) cache 880 tools/perf/util/dso.c ret = bpf_read(dso, cache_offset, cache->data); cache 882 tools/perf/util/dso.c ret = file_read(dso, machine, cache_offset, cache->data); cache 885 tools/perf/util/dso.c cache->offset = cache_offset; cache 886 tools/perf/util/dso.c cache->size = ret; cache 888 tools/perf/util/dso.c old = dso_cache__insert(dso, cache); cache 891 tools/perf/util/dso.c free(cache); cache 892 tools/perf/util/dso.c cache = old; cache 895 tools/perf/util/dso.c ret = dso_cache__memcpy(cache, offset, data, size); cache 899 tools/perf/util/dso.c free(cache); cache 907 tools/perf/util/dso.c struct dso_cache *cache; cache 909 tools/perf/util/dso.c cache = dso_cache__find(dso, offset); cache 910 tools/perf/util/dso.c if (cache) cache 911 tools/perf/util/dso.c return dso_cache__memcpy(cache, offset, data, size); cache 1174 tools/perf/util/dso.c dso->data.cache = RB_ROOT; cache 178 tools/perf/util/dso.h struct rb_root cache; cache 290 tools/perf/util/env.c void cpu_cache_level__free(struct cpu_cache_level *cache) cache 292 tools/perf/util/env.c zfree(&cache->type); cache 293 tools/perf/util/env.c zfree(&cache->map); cache 294 tools/perf/util/env.c zfree(&cache->size); cache 109 tools/perf/util/env.h void cpu_cache_level__free(struct cpu_cache_level *cache); cache 1031 tools/perf/util/header.c static int cpu_cache_level__read(struct cpu_cache_level *cache, u32 cpu, u16 level) cache 1044 tools/perf/util/header.c if (sysfs__read_int(file, (int *) &cache->level)) cache 1048 tools/perf/util/header.c if (sysfs__read_int(file, (int *) &cache->line_size)) cache 1052 tools/perf/util/header.c if (sysfs__read_int(file, (int *) &cache->sets)) cache 1056 tools/perf/util/header.c if (sysfs__read_int(file, (int *) &cache->ways)) cache 1060 tools/perf/util/header.c if (sysfs__read_str(file, &cache->type, &len)) cache 1063 tools/perf/util/header.c cache->type[len] = 0; cache 1064 tools/perf/util/header.c cache->type = strim(cache->type); cache 1067 tools/perf/util/header.c if (sysfs__read_str(file, &cache->size, &len)) { cache 1068 tools/perf/util/header.c zfree(&cache->type); cache 1072 tools/perf/util/header.c cache->size[len] = 0; cache 1073 tools/perf/util/header.c cache->size = strim(cache->size); cache 1076 tools/perf/util/header.c if (sysfs__read_str(file, &cache->map, &len)) { cache 1077 tools/perf/util/header.c zfree(&cache->size); cache 1078 tools/perf/util/header.c zfree(&cache->type); cache 1082 tools/perf/util/header.c cache->map[len] = 0; cache 1083 tools/perf/util/header.c cache->map = strim(cache->map); cache 2861 tools/perf/util/header.c FEAT_OPN(CACHE, cache, true), cache 102 tools/perf/util/ordered-events.c struct list_head *cache = &oe->cache; cache 140 tools/perf/util/ordered-events.c if (!list_empty(cache)) { cache 141 tools/perf/util/ordered-events.c new = list_entry(cache->next, struct ordered_event, list); cache 188 tools/perf/util/ordered-events.c list_move(&event->list, &oe->cache); cache 364 tools/perf/util/ordered-events.c INIT_LIST_HEAD(&oe->cache); cache 42 tools/perf/util/ordered-events.h struct list_head cache; cache 2571 tools/perf/util/probe-event.c if (probe_conf.cache) cache 2762 tools/perf/util/probe-event.c struct probe_cache *cache = NULL; cache 2807 tools/perf/util/probe-event.c if (ret == 0 && probe_conf.cache) { cache 2808 tools/perf/util/probe-event.c cache = probe_cache__new(pev->target, pev->nsi); cache 2809 tools/perf/util/probe-event.c if (!cache || cache 2810 tools/perf/util/probe-event.c probe_cache__add_entry(cache, pev, tevs, ntevs) < 0 || cache 2811 tools/perf/util/probe-event.c probe_cache__commit(cache) < 0) cache 2813 tools/perf/util/probe-event.c probe_cache__delete(cache); cache 3176 tools/perf/util/probe-event.c struct probe_cache *cache; cache 3182 tools/perf/util/probe-event.c cache = probe_cache__new(target, pev->nsi); cache 3184 tools/perf/util/probe-event.c if (!cache) cache 3187 tools/perf/util/probe-event.c for_each_probe_cache_entry(entry, cache) { cache 3201 tools/perf/util/probe-event.c probe_cache__delete(cache); cache 3259 tools/perf/util/probe-event.c struct probe_cache *cache; cache 3272 tools/perf/util/probe-event.c cache = probe_cache__new(pev->target, pev->nsi); cache 3273 tools/perf/util/probe-event.c if (!cache) cache 3276 tools/perf/util/probe-event.c entry = probe_cache__find(cache, pev); cache 3309 tools/perf/util/probe-event.c probe_cache__delete(cache); cache 17 tools/perf/util/probe-event.h bool cache; cache 119 tools/testing/selftests/x86/test_vdso.c void* cache) cache 121 tools/testing/selftests/x86/test_vdso.c return syscall(__NR_getcpu, cpu, node, cache); cache 175 tools/testing/selftests/x86/test_vsyscall.c void* cache) cache 177 tools/testing/selftests/x86/test_vsyscall.c return syscall(SYS_getcpu, cpu, node, cache); cache 128 virt/kvm/arm/mmu.c static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, cache 134 virt/kvm/arm/mmu.c if (cache->nobjs >= min) cache 136 virt/kvm/arm/mmu.c while (cache->nobjs < max) { cache 140 virt/kvm/arm/mmu.c cache->objects[cache->nobjs++] = page; cache 1016 virt/kvm/arm/mmu.c static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, cache 1024 virt/kvm/arm/mmu.c if (!cache) cache 1026 virt/kvm/arm/mmu.c pud = mmu_memory_cache_alloc(cache); cache 1034 virt/kvm/arm/mmu.c static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, cache 1040 virt/kvm/arm/mmu.c pud = stage2_get_pud(kvm, cache, addr); cache 1045 virt/kvm/arm/mmu.c if (!cache) cache 1047 virt/kvm/arm/mmu.c pmd = mmu_memory_cache_alloc(cache); cache 1056 virt/kvm/arm/mmu.c *cache, phys_addr_t addr, const pmd_t *new_pmd) cache 1061 virt/kvm/arm/mmu.c pmd = stage2_get_pmd(kvm, cache, addr); cache 1119 virt/kvm/arm/mmu.c static int stage2_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, cache 1125 virt/kvm/arm/mmu.c pudp = stage2_get_pud(kvm, cache, addr); cache 1221 virt/kvm/arm/mmu.c static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, cache 1231 virt/kvm/arm/mmu.c VM_BUG_ON(logging_active && !cache); cache 1234 virt/kvm/arm/mmu.c pud = stage2_get_pud(kvm, cache, addr); cache 1251 virt/kvm/arm/mmu.c if (!cache) cache 1253 virt/kvm/arm/mmu.c pmd = mmu_memory_cache_alloc(cache); cache 1276 virt/kvm/arm/mmu.c if (!cache) cache 1278 virt/kvm/arm/mmu.c pte = mmu_memory_cache_alloc(cache); cache 1345 virt/kvm/arm/mmu.c struct kvm_mmu_memory_cache cache = { 0, }; cache 1356 virt/kvm/arm/mmu.c ret = mmu_topup_memory_cache(&cache, cache 1362 virt/kvm/arm/mmu.c ret = stage2_set_pte(kvm, &cache, addr, &pte, cache 1372 virt/kvm/arm/mmu.c mmu_free_memory_cache(&cache); cache 205 virt/kvm/arm/trace.h TP_PROTO(unsigned long vcpu_pc, bool cache), cache 206 virt/kvm/arm/trace.h TP_ARGS(vcpu_pc, cache), cache 210 virt/kvm/arm/trace.h __field( bool, cache ) cache 215 virt/kvm/arm/trace.h __entry->cache = cache; cache 219 virt/kvm/arm/trace.h __entry->vcpu_pc, __entry->cache ? "on" : "off") cache 1818 virt/kvm/kvm_main.c void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache) cache 1823 virt/kvm/kvm_main.c if (cache) cache 1824 virt/kvm/kvm_main.c cache->pfn = cache->gfn = 0; cache 1833 virt/kvm/kvm_main.c struct gfn_to_pfn_cache *cache, u64 gen) cache 1835 virt/kvm/kvm_main.c kvm_release_pfn(cache->pfn, cache->dirty, cache); cache 1837 virt/kvm/kvm_main.c cache->pfn = gfn_to_pfn_memslot(slot, gfn); cache 1838 virt/kvm/kvm_main.c cache->gfn = gfn; cache 1839 virt/kvm/kvm_main.c cache->dirty = false; cache 1840 virt/kvm/kvm_main.c cache->generation = gen; cache 1845 virt/kvm/kvm_main.c struct gfn_to_pfn_cache *cache, cache 1857 virt/kvm/kvm_main.c if (cache) { cache 1858 virt/kvm/kvm_main.c if (!cache->pfn || cache->gfn != gfn || cache 1859 virt/kvm/kvm_main.c cache->generation != gen) { cache 1862 virt/kvm/kvm_main.c kvm_cache_gfn_to_pfn(slot, gfn, cache, gen); cache 1864 virt/kvm/kvm_main.c pfn = cache->pfn; cache 1899 virt/kvm/kvm_main.c struct gfn_to_pfn_cache *cache, bool atomic) cache 1902 virt/kvm/kvm_main.c cache, atomic); cache 1915 virt/kvm/kvm_main.c struct gfn_to_pfn_cache *cache, cache 1940 virt/kvm/kvm_main.c if (cache) cache 1941 virt/kvm/kvm_main.c cache->dirty |= dirty; cache 1950 virt/kvm/kvm_main.c struct gfn_to_pfn_cache *cache, bool dirty, bool atomic) cache 1953 virt/kvm/kvm_main.c cache, dirty, atomic);