/linux-4.1.27/drivers/md/ |
D | dm-cache-target.c | 178 struct cache { struct 304 struct cache *cache; member 311 struct cache *cache; member 341 static void wake_worker(struct cache *cache) in wake_worker() argument 343 queue_work(cache->wq, &cache->worker); in wake_worker() 348 static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache) in alloc_prison_cell() argument 351 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT); in alloc_prison_cell() 354 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell) in free_prison_cell() argument 356 dm_bio_prison_free_cell(cache->prison, cell); in free_prison_cell() 359 static struct dm_cache_migration *alloc_migration(struct cache *cache) in alloc_migration() argument [all …]
|
D | Makefile | 14 dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o 15 dm-cache-mq-y += dm-cache-policy-mq.o 16 dm-cache-cleaner-y += dm-cache-policy-cleaner.o 55 obj-$(CONFIG_DM_CACHE) += dm-cache.o 56 obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o 57 obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
|
/linux-4.1.27/fs/btrfs/tests/ |
D | free-space-tests.c | 27 struct btrfs_block_group_cache *cache; in init_test_block_group() local 29 cache = kzalloc(sizeof(*cache), GFP_NOFS); in init_test_block_group() 30 if (!cache) in init_test_block_group() 32 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in init_test_block_group() 34 if (!cache->free_space_ctl) { in init_test_block_group() 35 kfree(cache); in init_test_block_group() 39 cache->key.objectid = 0; in init_test_block_group() 40 cache->key.offset = 1024 * 1024 * 1024; in init_test_block_group() 41 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; in init_test_block_group() 42 cache->sectorsize = 4096; in init_test_block_group() [all …]
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | cacheinfo.c | 43 struct cache *cache; member 117 struct cache { struct 123 struct cache *next_local; /* next cache of >= level */ argument 138 static const char *cache_type_string(const struct cache *cache) in cache_type_string() argument 140 return cache_type_info[cache->type].name; in cache_type_string() 143 static void cache_init(struct cache *cache, int type, int level, in cache_init() argument 146 cache->type = type; in cache_init() 147 cache->level = level; in cache_init() 148 cache->ofnode = of_node_get(ofnode); in cache_init() 149 INIT_LIST_HEAD(&cache->list); in cache_init() [all …]
|
/linux-4.1.27/fs/cachefiles/ |
D | bind.c | 30 int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) in cachefiles_daemon_bind() argument 33 cache->frun_percent, in cachefiles_daemon_bind() 34 cache->fcull_percent, in cachefiles_daemon_bind() 35 cache->fstop_percent, in cachefiles_daemon_bind() 36 cache->brun_percent, in cachefiles_daemon_bind() 37 cache->bcull_percent, in cachefiles_daemon_bind() 38 cache->bstop_percent, in cachefiles_daemon_bind() 42 ASSERT(cache->fstop_percent >= 0 && in cachefiles_daemon_bind() 43 cache->fstop_percent < cache->fcull_percent && in cachefiles_daemon_bind() 44 cache->fcull_percent < cache->frun_percent && in cachefiles_daemon_bind() [all …]
|
D | daemon.c | 63 int (*handler)(struct cachefiles_cache *cache, char *args); 89 struct cachefiles_cache *cache; in cachefiles_daemon_open() local 102 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); in cachefiles_daemon_open() 103 if (!cache) { in cachefiles_daemon_open() 108 mutex_init(&cache->daemon_mutex); in cachefiles_daemon_open() 109 cache->active_nodes = RB_ROOT; in cachefiles_daemon_open() 110 rwlock_init(&cache->active_lock); in cachefiles_daemon_open() 111 init_waitqueue_head(&cache->daemon_pollwq); in cachefiles_daemon_open() 118 cache->frun_percent = 7; in cachefiles_daemon_open() 119 cache->fcull_percent = 5; in cachefiles_daemon_open() [all …]
|
D | interface.c | 32 struct cachefiles_cache *cache; in cachefiles_alloc_object() local 38 cache = container_of(_cache, struct cachefiles_cache, cache); in cachefiles_alloc_object() 40 _enter("{%s},%p,", cache->cache.identifier, cookie); in cachefiles_alloc_object() 56 fscache_object_init(&object->fscache, cookie, &cache->cache); in cachefiles_alloc_object() 105 fscache_object_destroyed(&cache->cache); in cachefiles_alloc_object() 121 struct cachefiles_cache *cache; in cachefiles_lookup_object() local 127 cache = container_of(_object->cache, struct cachefiles_cache, cache); in cachefiles_lookup_object() 136 cachefiles_begin_secure(cache, &saved_cred); in cachefiles_lookup_object() 140 cachefiles_end_secure(cache, saved_cred); in cachefiles_lookup_object() 202 struct cachefiles_cache *cache; in cachefiles_update_object() local [all …]
|
D | namei.c | 99 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, in cachefiles_mark_object_buried() argument 107 write_lock(&cache->active_lock); in cachefiles_mark_object_buried() 109 p = cache->active_nodes.rb_node; in cachefiles_mark_object_buried() 120 write_unlock(&cache->active_lock); in cachefiles_mark_object_buried() 139 write_unlock(&cache->active_lock); in cachefiles_mark_object_buried() 146 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, in cachefiles_mark_object_active() argument 156 write_lock(&cache->active_lock); in cachefiles_mark_object_active() 165 _p = &cache->active_nodes.rb_node; in cachefiles_mark_object_active() 182 rb_insert_color(&object->active_node, &cache->active_nodes); in cachefiles_mark_object_active() 184 write_unlock(&cache->active_lock); in cachefiles_mark_object_active() [all …]
|
D | security.c | 20 int cachefiles_get_security_ID(struct cachefiles_cache *cache) in cachefiles_get_security_ID() argument 25 _enter("{%s}", cache->secctx); in cachefiles_get_security_ID() 33 if (cache->secctx) { in cachefiles_get_security_ID() 34 ret = set_security_override_from_ctx(new, cache->secctx); in cachefiles_get_security_ID() 43 cache->cache_cred = new; in cachefiles_get_security_ID() 53 static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, in cachefiles_check_cache_dir() argument 79 int cachefiles_determine_cache_security(struct cachefiles_cache *cache, in cachefiles_determine_cache_security() argument 94 cachefiles_end_secure(cache, *_saved_cred); in cachefiles_determine_cache_security() 101 cachefiles_begin_secure(cache, _saved_cred); in cachefiles_determine_cache_security() 106 put_cred(cache->cache_cred); in cachefiles_determine_cache_security() [all …]
|
D | internal.h | 60 struct fscache_cache cache; /* FS-Cache record */ member 128 static inline void cachefiles_state_changed(struct cachefiles_cache *cache) in cachefiles_state_changed() argument 130 set_bit(CACHEFILES_STATE_CHANGED, &cache->flags); in cachefiles_state_changed() 131 wake_up_all(&cache->daemon_pollwq); in cachefiles_state_changed() 137 extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args); 138 extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache); 145 extern int cachefiles_has_space(struct cachefiles_cache *cache, 161 extern int cachefiles_delete_object(struct cachefiles_cache *cache, 167 extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, 171 extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, [all …]
|
D | rdwr.c | 395 struct cachefiles_cache *cache; in cachefiles_read_or_alloc_page() local 403 cache = container_of(object->fscache.cache, in cachefiles_read_or_alloc_page() 404 struct cachefiles_cache, cache); in cachefiles_read_or_alloc_page() 444 } else if (cachefiles_has_space(cache, 0, 1) == 0) { in cachefiles_read_or_alloc_page() 684 struct cachefiles_cache *cache; in cachefiles_read_or_alloc_pages() local 694 cache = container_of(object->fscache.cache, in cachefiles_read_or_alloc_pages() 695 struct cachefiles_cache, cache); in cachefiles_read_or_alloc_pages() 705 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) in cachefiles_read_or_alloc_pages() 801 struct cachefiles_cache *cache; in cachefiles_allocate_page() local 806 cache = container_of(object->fscache.cache, in cachefiles_allocate_page() [all …]
|
/linux-4.1.27/drivers/acpi/acpica/ |
D | utcache.c | 70 struct acpi_memory_list *cache; in acpi_os_create_cache() local 80 cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); in acpi_os_create_cache() 81 if (!cache) { in acpi_os_create_cache() 87 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); in acpi_os_create_cache() 88 cache->list_name = cache_name; in acpi_os_create_cache() 89 cache->object_size = object_size; in acpi_os_create_cache() 90 cache->max_depth = max_depth; in acpi_os_create_cache() 92 *return_cache = cache; in acpi_os_create_cache() 108 acpi_status acpi_os_purge_cache(struct acpi_memory_list * cache) in acpi_os_purge_cache() argument 115 if (!cache) { in acpi_os_purge_cache() [all …]
|
D | uttrack.c | 96 struct acpi_memory_list *cache; in acpi_ut_create_list() local 98 cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); in acpi_ut_create_list() 99 if (!cache) { in acpi_ut_create_list() 103 ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); in acpi_ut_create_list() 105 cache->list_name = list_name; in acpi_ut_create_list() 106 cache->object_size = object_size; in acpi_ut_create_list() 108 *return_cache = cache; in acpi_ut_create_list()
|
/linux-4.1.27/fs/fscache/ |
D | cache.c | 99 struct fscache_cache *cache; in fscache_select_cache_for_object() local 117 cache = object->cache; in fscache_select_cache_for_object() 119 test_bit(FSCACHE_IOERROR, &cache->flags)) in fscache_select_cache_for_object() 120 cache = NULL; in fscache_select_cache_for_object() 123 _leave(" = %p [parent]", cache); in fscache_select_cache_for_object() 124 return cache; in fscache_select_cache_for_object() 151 if (!tag->cache) { in fscache_select_cache_for_object() 156 if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) in fscache_select_cache_for_object() 159 _leave(" = %p [specific]", tag->cache); in fscache_select_cache_for_object() 160 return tag->cache; in fscache_select_cache_for_object() [all …]
|
D | cookie.c | 25 static int fscache_alloc_object(struct fscache_cache *cache, 194 struct fscache_cache *cache; in fscache_acquire_non_index_cookie() local 213 cache = fscache_select_cache_for_object(cookie->parent); in fscache_acquire_non_index_cookie() 214 if (!cache) { in fscache_acquire_non_index_cookie() 221 _debug("cache %s", cache->tag->name); in fscache_acquire_non_index_cookie() 227 ret = fscache_alloc_object(cache, cookie); in fscache_acquire_non_index_cookie() 278 static int fscache_alloc_object(struct fscache_cache *cache, in fscache_alloc_object() argument 284 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); in fscache_alloc_object() 289 if (object->cache == cache) in fscache_alloc_object() 297 object = cache->ops->alloc_object(cache, cookie); in fscache_alloc_object() [all …]
|
D | operation.c | 133 ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags)); in fscache_submit_exclusive_op() 236 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { in fscache_submit_op() 401 struct fscache_cache *cache; in fscache_put_operation() local 437 cache = object->cache; in fscache_put_operation() 438 spin_lock(&cache->op_gc_list_lock); in fscache_put_operation() 439 list_add_tail(&op->pend_link, &cache->op_gc_list); in fscache_put_operation() 440 spin_unlock(&cache->op_gc_list_lock); in fscache_put_operation() 441 schedule_work(&cache->op_gc); in fscache_put_operation() 465 struct fscache_cache *cache = in fscache_operation_gc() local 472 spin_lock(&cache->op_gc_list_lock); in fscache_operation_gc() [all …]
|
D | object.c | 293 struct fscache_cache *cache) in fscache_object_init() argument 297 atomic_inc(&cache->object_count); in fscache_object_init() 314 object->cache = cache; in fscache_object_init() 384 object->cache->ops->grab_object(object)) { in fscache_initialise_object() 448 test_bit(FSCACHE_IOERROR, &object->cache->flags) || in fscache_look_up_object() 455 cookie->def->name, object->cache->tag->name); in fscache_look_up_object() 459 ret = object->cache->ops->lookup_object(object); in fscache_look_up_object() 574 object->cache->ops->lookup_complete(object); in fscache_object_available() 610 object->cache->ops->lookup_complete(object); in fscache_lookup_failure() 674 struct fscache_cache *cache = object->cache; in fscache_drop_object() local [all …]
|
/linux-4.1.27/drivers/staging/lustre/lustre/fld/ |
D | fld_cache.c | 67 struct fld_cache *cache; in fld_cache_init() local 72 OBD_ALLOC_PTR(cache); in fld_cache_init() 73 if (cache == NULL) in fld_cache_init() 76 INIT_LIST_HEAD(&cache->fci_entries_head); in fld_cache_init() 77 INIT_LIST_HEAD(&cache->fci_lru); in fld_cache_init() 79 cache->fci_cache_count = 0; in fld_cache_init() 80 rwlock_init(&cache->fci_lock); in fld_cache_init() 82 strlcpy(cache->fci_name, name, in fld_cache_init() 83 sizeof(cache->fci_name)); in fld_cache_init() 85 cache->fci_cache_size = cache_size; in fld_cache_init() [all …]
|
D | fld_internal.h | 153 void fld_cache_fini(struct fld_cache *cache); 155 void fld_cache_flush(struct fld_cache *cache); 157 int fld_cache_insert(struct fld_cache *cache, 163 int fld_cache_insert_nolock(struct fld_cache *cache, 165 void fld_cache_delete(struct fld_cache *cache, 167 void fld_cache_delete_nolock(struct fld_cache *cache, 169 int fld_cache_lookup(struct fld_cache *cache, 173 fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range); 174 void fld_cache_entry_delete(struct fld_cache *cache, 176 void fld_dump_cache_entries(struct fld_cache *cache); [all …]
|
/linux-4.1.27/fs/squashfs/ |
D | cache.c | 66 struct squashfs_cache *cache, u64 block, int length) in squashfs_cache_get() argument 71 spin_lock(&cache->lock); in squashfs_cache_get() 74 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { in squashfs_cache_get() 75 if (cache->entry[i].block == block) { in squashfs_cache_get() 76 cache->curr_blk = i; in squashfs_cache_get() 79 i = (i + 1) % cache->entries; in squashfs_cache_get() 82 if (n == cache->entries) { in squashfs_cache_get() 87 if (cache->unused == 0) { in squashfs_cache_get() 88 cache->num_waiters++; in squashfs_cache_get() 89 spin_unlock(&cache->lock); in squashfs_cache_get() [all …]
|
/linux-4.1.27/drivers/infiniband/core/ |
D | cache.c | 77 struct ib_gid_cache *cache; in ib_get_cached_gid() local 84 read_lock_irqsave(&device->cache.lock, flags); in ib_get_cached_gid() 86 cache = device->cache.gid_cache[port_num - start_port(device)]; in ib_get_cached_gid() 88 if (index < 0 || index >= cache->table_len) in ib_get_cached_gid() 91 *gid = cache->table[index]; in ib_get_cached_gid() 93 read_unlock_irqrestore(&device->cache.lock, flags); in ib_get_cached_gid() 104 struct ib_gid_cache *cache; in ib_find_cached_gid() local 113 read_lock_irqsave(&device->cache.lock, flags); in ib_find_cached_gid() 116 cache = device->cache.gid_cache[p]; in ib_find_cached_gid() 117 for (i = 0; i < cache->table_len; ++i) { in ib_find_cached_gid() [all …]
|
/linux-4.1.27/arch/mn10300/mm/ |
D | Makefile | 5 cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o 7 cacheflush-y := cache.o 8 cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y) 9 cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o 10 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o 11 cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o 12 cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o 13 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o 14 cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o 17 cache-dbg-flush-by-tag.o cache-dbg-inv-by-tag.o [all …]
|
D | Kconfig.cache | 2 # MN10300 CPU cache options 12 the affected cacheline to be read into the cache first before being 13 operated upon. Memory is not then updated by a write until the cache 14 is filled and a cacheline needs to be displaced from the cache to 19 cacheline is also in cache, it will be updated too. 35 cache. This means that the written data is immediately available for 38 This is not available for use with an SMP kernel if cache flushing 53 prompt "CPU cache flush/invalidate method" 58 This determines the method by which CPU cache flushing and 62 bool "Use the cache tag registers directly" [all …]
|
D | cache.inc | 15 # Invalidate the instruction cache. 27 # don't want an interrupt routine seeing a disabled cache 49 # wait for the cache to finish 76 # Invalidate the data cache. 88 # don't want an interrupt routine seeing a disabled cache 110 # wait for the cache to finish
|
D | cache-dbg-flush-by-tag.S | 26 # Flush the entire data cache back to RAM and invalidate the icache 43 # read the addresses tagged in the cache's tag RAM and attempt to flush 103 # retain valid entries in the cache
|
D | cache-flush-by-tag.S | 45 # Flush the entire data cache back to RAM 56 # read the addresses tagged in the cache's tag RAM and attempt to flush
|
/linux-4.1.27/Documentation/devicetree/bindings/arm/ |
D | l2cc.txt | 3 ARM cores often have a separate level 2 cache controller. There are various 4 implementations of the L2 cache controller with compatible programming models. 5 Some of the properties that are just prefixed "cache-*" are taken from section 9 The ARM L2 cache representation in the device tree should be done as follows: 14 "arm,pl310-cache" 15 "arm,l220-cache" 16 "arm,l210-cache" 17 "bcm,bcm11351-a2-pl310-cache": DEPRECATED by "brcm,bcm11351-a2-pl310-cache" 18 "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an 20 cache controller [all …]
|
/linux-4.1.27/drivers/base/regmap/ |
D | regcache-flat.c | 22 unsigned int *cache; in regcache_flat_init() local 24 map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1), in regcache_flat_init() 26 if (!map->cache) in regcache_flat_init() 29 cache = map->cache; in regcache_flat_init() 32 cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def; in regcache_flat_init() 39 kfree(map->cache); in regcache_flat_exit() 40 map->cache = NULL; in regcache_flat_exit() 48 unsigned int *cache = map->cache; in regcache_flat_read() local 50 *value = cache[reg]; in regcache_flat_read() 58 unsigned int *cache = map->cache; in regcache_flat_write() local [all …]
|
D | regcache.c | 128 map->cache = NULL; in regcache_init() 516 u8 *cache = base; in regcache_set_val() local 517 cache[idx] = val; in regcache_set_val() 521 u16 *cache = base; in regcache_set_val() local 522 cache[idx] = val; in regcache_set_val() 526 u32 *cache = base; in regcache_set_val() local 527 cache[idx] = val; in regcache_set_val() 549 const u8 *cache = base; in regcache_get_val() local 550 return cache[idx]; in regcache_get_val() 553 const u16 *cache = base; in regcache_get_val() local [all …]
|
D | regcache-lzo.c | 142 map->cache = kzalloc(blkcount * sizeof *lzo_blocks, in regcache_lzo_init() 144 if (!map->cache) in regcache_lzo_init() 146 lzo_blocks = map->cache; in regcache_lzo_init() 209 lzo_blocks = map->cache; in regcache_lzo_exit() 230 map->cache = NULL; in regcache_lzo_exit() 248 lzo_blocks = map->cache; in regcache_lzo_read() 287 lzo_blocks = map->cache; in regcache_lzo_write() 342 lzo_blocks = map->cache; in regcache_lzo_sync()
|
D | regcache-rbtree.c | 69 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; in regcache_rbtree_lookup() 140 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; in rbtree_show() 205 map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); in regcache_rbtree_init() 206 if (!map->cache) in regcache_rbtree_init() 209 rbtree_ctx = map->cache; in regcache_rbtree_init() 235 rbtree_ctx = map->cache; in regcache_rbtree_exit() 251 kfree(map->cache); in regcache_rbtree_exit() 252 map->cache = NULL; in regcache_rbtree_exit() 392 rbtree_ctx = map->cache; in regcache_rbtree_write() 466 rbtree_ctx = map->cache; in regcache_rbtree_sync() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/ |
D | l2cache.txt | 3 L2 cache is present in Freescale's QorIQ and QorIQ Qonverge platforms. 4 The cache bindings explained below are ePAPR compliant 8 - compatible : Should include "fsl,chip-l2-cache-controller" and "cache" 10 - reg : Address and size of L2 cache controller registers 11 - cache-size : Size of the entire L2 cache 13 - cache-line-size : Size of L2 cache lines 17 L2: l2-cache-controller@20000 { 18 compatible = "fsl,bsc9132-l2-cache-controller", "cache"; 20 cache-line-size = <32>; // 32 bytes 21 cache-size = <0x40000>; // L2,256K
|
D | cache_sram.txt | 4 option of configuring a part of (or full) cache memory 5 as SRAM. This cache SRAM representation in the device 10 - compatible : should be "fsl,p2020-cache-sram" 11 - fsl,cache-sram-ctlr-handle : points to the L2 controller 12 - reg : offset and length of the cache-sram. 16 cache-sram@fff00000 { 17 fsl,cache-sram-ctlr-handle = <&L2>; 19 compatible = "fsl,p2020-cache-sram";
|
D | pamu.txt | 59 - fsl,primary-cache-geometry 62 cache. The first is the number of cache lines, and the 65 - fsl,secondary-cache-geometry 68 cache. The first is the number of cache lines, and the 83 best LIODN values to minimize PAMU cache thrashing. 109 fsl,primary-cache-geometry = <32 1>; 110 fsl,secondary-cache-geometry = <128 2>; 115 fsl,primary-cache-geometry = <32 1>; 116 fsl,secondary-cache-geometry = <128 2>; 121 fsl,primary-cache-geometry = <32 1>; [all …]
|
/linux-4.1.27/fs/ |
D | mbcache.c | 193 struct mb_cache *cache = ce->e_cache; in __mb_cache_entry_forget() local 196 kmem_cache_free(cache->c_entry_cache, ce); in __mb_cache_entry_forget() 197 atomic_dec(&cache->c_entry_count); in __mb_cache_entry_forget() 293 struct mb_cache *cache; in mb_cache_shrink_count() local 297 list_for_each_entry(cache, &mb_cache_list, c_cache_list) { in mb_cache_shrink_count() 298 mb_debug("cache %s (%d)", cache->c_name, in mb_cache_shrink_count() 299 atomic_read(&cache->c_entry_count)); in mb_cache_shrink_count() 300 count += atomic_read(&cache->c_entry_count); in mb_cache_shrink_count() 328 struct mb_cache *cache = NULL; in mb_cache_create() local 338 cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL); in mb_cache_create() [all …]
|
/linux-4.1.27/drivers/video/fbdev/ |
D | sh_mobile_meram.c | 133 struct sh_mobile_meram_icb *cache; member 227 plane->cache = &priv->icbs[idx]; in meram_plane_alloc() 239 __set_bit(plane->cache->index, &priv->used_icb); in meram_plane_alloc() 255 __clear_bit(plane->cache->index, &priv->used_icb); in meram_plane_free() 269 struct sh_mobile_meram_fb_cache *cache, in meram_set_next_addr() argument 273 struct sh_mobile_meram_icb *icb = cache->planes[0].marker; in meram_set_next_addr() 280 meram_write_icb(priv->base, cache->planes[0].cache->index, target, in meram_set_next_addr() 282 meram_write_icb(priv->base, cache->planes[0].marker->index, target, in meram_set_next_addr() 283 base_addr_y + cache->planes[0].marker->cache_unit); in meram_set_next_addr() 285 if (cache->nplanes == 2) { in meram_set_next_addr() [all …]
|
/linux-4.1.27/Documentation/device-mapper/ |
D | cache.txt | 4 dm-cache is a device mapper target written by Joe Thornber, Heinz 33 may be out of date or kept in sync with the copy on the cache device 47 2. A cache device - the small, fast one. 49 3. A small metadata device - records which blocks are in the cache, 51 This information could be put on the cache device, but having it 54 be used by a single cache device. 60 is configurable when you first create the cache. Typically we've been 66 getting hit a lot, yet the whole block will be promoted to the cache. 67 So large block sizes are bad because they waste cache space. And small 74 The cache has three operating modes: writeback, writethrough and [all …]
|
D | cache-policies.txt | 25 Overview of supplied cache replacement policies 34 waiting for the cache and another two for those in the cache (a set for 38 the cache is based on variable thresholds and queue selection is based 39 on hit count on entry. The policy aims to take different cache miss 51 considered sequential it will bypass the cache. The random threshold 61 promote sequential blocks to the cache (e.g. fast application startup). 63 disabled and sequential I/O will no longer implicitly bypass the cache. 68 count of a block not in the cache goes above this threshold it gets 69 promoted to the cache. The read, write and discard promote adjustment 72 If you're trying to quickly warm a new cache device you may wish to [all …]
|
D | era.txt | 11 partially invalidating the contents of a cache to restore cache 64 The scenario of invalidating a cache when rolling back a vendor 78 - Cache enters passthrough mode (see: dm-cache's docs in cache.txt)
|
/linux-4.1.27/fs/overlayfs/ |
D | readdir.c | 51 struct ovl_dir_cache *cache; member 175 struct ovl_dir_cache *cache = od->cache; in ovl_cache_put() local 177 WARN_ON(cache->refcount <= 0); in ovl_cache_put() 178 cache->refcount--; in ovl_cache_put() 179 if (!cache->refcount) { in ovl_cache_put() 180 if (ovl_dir_cache(dentry) == cache) in ovl_cache_put() 183 ovl_cache_free(&cache->entries); in ovl_cache_put() 184 kfree(cache); in ovl_cache_put() 270 struct ovl_dir_cache *cache = od->cache; in ovl_dir_reset() local 274 if (cache && ovl_dentry_version_get(dentry) != cache->version) { in ovl_dir_reset() [all …]
|
/linux-4.1.27/arch/powerpc/boot/dts/fsl/ |
D | p4080si-pre.dtsi | 85 next-level-cache = <&L2_0>; 87 L2_0: l2-cache { 88 next-level-cache = <&cpc>; 95 next-level-cache = <&L2_1>; 97 L2_1: l2-cache { 98 next-level-cache = <&cpc>; 105 next-level-cache = <&L2_2>; 107 L2_2: l2-cache { 108 next-level-cache = <&cpc>; 115 next-level-cache = <&L2_3>; [all …]
|
D | t104xsi-pre.dtsi | 71 next-level-cache = <&L2_1>; 72 L2_1: l2-cache { 73 next-level-cache = <&cpc>; 80 next-level-cache = <&L2_2>; 81 L2_2: l2-cache { 82 next-level-cache = <&cpc>; 89 next-level-cache = <&L2_3>; 90 L2_3: l2-cache { 91 next-level-cache = <&cpc>; 98 next-level-cache = <&L2_4>; [all …]
|
D | p2041si-pre.dtsi | 85 next-level-cache = <&L2_0>; 87 L2_0: l2-cache { 88 next-level-cache = <&cpc>; 95 next-level-cache = <&L2_1>; 97 L2_1: l2-cache { 98 next-level-cache = <&cpc>; 105 next-level-cache = <&L2_2>; 107 L2_2: l2-cache { 108 next-level-cache = <&cpc>; 115 next-level-cache = <&L2_3>; [all …]
|
D | p3041si-pre.dtsi | 86 next-level-cache = <&L2_0>; 88 L2_0: l2-cache { 89 next-level-cache = <&cpc>; 96 next-level-cache = <&L2_1>; 98 L2_1: l2-cache { 99 next-level-cache = <&cpc>; 106 next-level-cache = <&L2_2>; 108 L2_2: l2-cache { 109 next-level-cache = <&cpc>; 116 next-level-cache = <&L2_3>; [all …]
|
D | p5040si-pre.dtsi | 85 next-level-cache = <&L2_0>; 87 L2_0: l2-cache { 88 next-level-cache = <&cpc>; 95 next-level-cache = <&L2_1>; 97 L2_1: l2-cache { 98 next-level-cache = <&cpc>; 105 next-level-cache = <&L2_2>; 107 L2_2: l2-cache { 108 next-level-cache = <&cpc>; 115 next-level-cache = <&L2_3>; [all …]
|
D | t4240si-pre.dtsi | 72 next-level-cache = <&L2_1>; 79 next-level-cache = <&L2_1>; 86 next-level-cache = <&L2_1>; 93 next-level-cache = <&L2_1>; 100 next-level-cache = <&L2_2>; 107 next-level-cache = <&L2_2>; 114 next-level-cache = <&L2_2>; 121 next-level-cache = <&L2_2>; 128 next-level-cache = <&L2_3>; 135 next-level-cache = <&L2_3>; [all …]
|
D | b4si-post.dtsi | 236 cpc: l3-cache-controller@10000 { 237 compatible = "fsl,b4-l3-cache-controller", "cache"; 264 fsl,primary-cache-geometry = <8 1>; 265 fsl,secondary-cache-geometry = <32 2>; 271 fsl,primary-cache-geometry = <32 1>; 272 fsl,secondary-cache-geometry = <32 2>; 278 fsl,primary-cache-geometry = <32 1>; 279 fsl,secondary-cache-geometry = <32 2>; 285 fsl,primary-cache-geometry = <32 1>; 286 fsl,secondary-cache-geometry = <32 2>; [all …]
|
D | p5020si-pre.dtsi | 92 next-level-cache = <&L2_0>; 94 L2_0: l2-cache { 95 next-level-cache = <&cpc>; 102 next-level-cache = <&L2_1>; 104 L2_1: l2-cache { 105 next-level-cache = <&cpc>;
|
D | p5040si-post.dtsi | 234 cpc: l3-cache-controller@10000 { 235 compatible = "fsl,p5040-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; 262 fsl,primary-cache-geometry = <32 1>; 263 fsl,secondary-cache-geometry = <128 2>; 268 fsl,primary-cache-geometry = <32 1>; 269 fsl,secondary-cache-geometry = <128 2>; 274 fsl,primary-cache-geometry = <32 1>; 275 fsl,secondary-cache-geometry = <128 2>; 280 fsl,primary-cache-geometry = <32 1>; 281 fsl,secondary-cache-geometry = <128 2>; [all …]
|
D | p3041si-post.dtsi | 276 cpc: l3-cache-controller@10000 { 277 compatible = "fsl,p3041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; 303 fsl,primary-cache-geometry = <32 1>; 304 fsl,secondary-cache-geometry = <128 2>; 309 fsl,primary-cache-geometry = <32 1>; 310 fsl,secondary-cache-geometry = <128 2>; 315 fsl,primary-cache-geometry = <32 1>; 316 fsl,secondary-cache-geometry = <128 2>; 321 fsl,primary-cache-geometry = <32 1>; 322 fsl,secondary-cache-geometry = <128 2>;
|
D | p5020si-post.dtsi | 279 cpc: l3-cache-controller@10000 { 280 compatible = "fsl,p5020-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; 308 fsl,primary-cache-geometry = <32 1>; 309 fsl,secondary-cache-geometry = <128 2>; 314 fsl,primary-cache-geometry = <32 1>; 315 fsl,secondary-cache-geometry = <128 2>; 320 fsl,primary-cache-geometry = <32 1>; 321 fsl,secondary-cache-geometry = <128 2>; 326 fsl,primary-cache-geometry = <32 1>; 327 fsl,secondary-cache-geometry = <128 2>;
|
D | p2041si-post.dtsi | 249 cpc: l3-cache-controller@10000 { 250 compatible = "fsl,p2041-l3-cache-controller", "fsl,p4080-l3-cache-controller", "cache"; 276 fsl,primary-cache-geometry = <32 1>; 277 fsl,secondary-cache-geometry = <128 2>; 282 fsl,primary-cache-geometry = <32 1>; 283 fsl,secondary-cache-geometry = <128 2>; 288 fsl,primary-cache-geometry = <32 1>; 289 fsl,secondary-cache-geometry = <128 2>; 294 fsl,primary-cache-geometry = <32 1>; 295 fsl,secondary-cache-geometry = <128 2>;
|
D | t2081si-post.dtsi | 353 cpc: l3-cache-controller@10000 { 354 compatible = "fsl,t2080-l3-cache-controller", "cache"; 384 fsl,primary-cache-geometry = <32 1>; 385 fsl,secondary-cache-geometry = <128 2>; 390 fsl,primary-cache-geometry = <32 1>; 391 fsl,secondary-cache-geometry = <128 2>; 396 fsl,primary-cache-geometry = <32 1>; 397 fsl,secondary-cache-geometry = <128 2>; 507 L2_1: l2-cache-controller@c20000 { 508 /* Cluster 0 L2 cache */ [all …]
|
D | p4080si-post.dtsi | 282 cpc: l3-cache-controller@10000 { 283 compatible = "fsl,p4080-l3-cache-controller", "cache"; 311 fsl,primary-cache-geometry = <32 1>; 312 fsl,secondary-cache-geometry = <128 2>; 317 fsl,primary-cache-geometry = <32 1>; 318 fsl,secondary-cache-geometry = <128 2>; 323 fsl,primary-cache-geometry = <32 1>; 324 fsl,secondary-cache-geometry = <128 2>; 329 fsl,primary-cache-geometry = <32 1>; 330 fsl,secondary-cache-geometry = <128 2>; [all …]
|
D | b4420si-post.dtsi | 75 cpc: l3-cache-controller@10000 { 76 compatible = "fsl,b4420-l3-cache-controller", "cache"; 103 L2: l2-cache-controller@c20000 { 104 compatible = "fsl,b4420-l2-cache-controller";
|
D | t208xsi-pre.dtsi | 74 next-level-cache = <&L2_1>; 81 next-level-cache = <&L2_1>; 88 next-level-cache = <&L2_1>; 95 next-level-cache = <&L2_1>;
|
D | b4860si-pre.dtsi | 68 next-level-cache = <&L2>; 75 next-level-cache = <&L2>; 82 next-level-cache = <&L2>; 89 next-level-cache = <&L2>;
|
/linux-4.1.27/drivers/power/ |
D | bq27x00_battery.c | 113 struct bq27x00_reg_cache cache; member 473 struct bq27x00_reg_cache cache = {0, }; in bq27x00_update() local 480 cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, flags_1b); in bq27x00_update() 481 if ((cache.flags & 0xff) == 0xff) in bq27x00_update() 483 cache.flags = -1; in bq27x00_update() 484 if (cache.flags >= 0) { in bq27x00_update() 486 && (cache.flags & BQ27000_FLAG_CI)) { in bq27x00_update() 488 cache.capacity = -ENODATA; in bq27x00_update() 489 cache.energy = -ENODATA; in bq27x00_update() 490 cache.time_to_empty = -ENODATA; in bq27x00_update() [all …]
|
/linux-4.1.27/tools/perf/Documentation/ |
D | perf-buildid-cache.txt | 1 perf-buildid-cache(1) 6 perf-buildid-cache - Manage build-id cache. 11 'perf buildid-cache <options>' 15 This command manages the build-id cache. It can add, remove, update and purge 16 files to/from the cache. In the future it should as well set upper limits for 17 the space used by the cache, etc. 23 Add specified file to the cache. 26 Add specified kcore file to the cache. For the current host that is 28 running 'perf buildid-cache' as root may update root's build-id cache 34 kcore in the cache (with the same build-id) that has the same modules at [all …]
|
/linux-4.1.27/fs/fat/ |
D | cache.c | 40 struct fat_cache *cache = (struct fat_cache *)foo; in init_once() local 42 INIT_LIST_HEAD(&cache->cache_list); in init_once() 66 static inline void fat_cache_free(struct fat_cache *cache) in fat_cache_free() argument 68 BUG_ON(!list_empty(&cache->cache_list)); in fat_cache_free() 69 kmem_cache_free(fat_cache_cachep, cache); in fat_cache_free() 73 struct fat_cache *cache) in fat_cache_update_lru() argument 75 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) in fat_cache_update_lru() 76 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); in fat_cache_update_lru() 135 struct fat_cache *cache, *tmp; in fat_cache_add() local 145 cache = fat_cache_merge(inode, new); in fat_cache_add() [all …]
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-block-bcache | 5 A write to this file causes the backing device or cache to be 6 unregistered. If a backing device had dirty data in the cache, 17 What: /sys/block/<disk>/bcache/cache 21 For a backing device that has cache, a symlink to 22 the bcache/ dir of that cache. 28 For backing devices: integer number of full cache hits, 29 counted per bio. A partial cache hit counts as a miss. 35 For backing devices: integer number of cache misses. 41 For backing devices: cache hits as a percentage. 48 skip the cache. Read and written as bytes in human readable [all …]
|
D | sysfs-kernel-slab | 8 internal state of the SLUB allocator for each cache. Certain 9 files may be modified to change the behavior of the cache (and 10 any cache it aliases, if any). 13 What: /sys/kernel/slab/cache/aliases 20 have merged into this cache. 22 What: /sys/kernel/slab/cache/align 28 The align file is read-only and specifies the cache's object 31 What: /sys/kernel/slab/cache/alloc_calls 38 locations from which allocations for this cache were performed. 40 enabled for that cache (see Documentation/vm/slub.txt). [all …]
|
D | sysfs-devices-system-cpu | 162 What: /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1} 166 Description: Disable L3 cache indices 168 These files exist in every CPU's cache/index3 directory. Each 170 can be used to disable a cache index. Reading from these files 174 index to one of these files will cause the specificed cache 228 What: /sys/devices/system/cpu/cpu*/cache/index*/<set_of_attributes_mentioned_below> 232 Description: Parameters for the CPU cache attributes 235 - WriteAllocate: allocate a memory location to a cache line 236 on a cache miss because of a write 237 - ReadAllocate: allocate a memory location to a cache line [all …]
|
D | sysfs-class-bdi | 36 total write-back cache that relates to its current average 40 percentage of the write-back cache to a particular device. 46 given percentage of the write-back cache. This is useful in 48 most of the write-back cache. For example in case of an NFS
|
/linux-4.1.27/Documentation/ |
D | bcache.txt | 2 nice if you could use them as cache... Hence bcache. 21 Writeback caching can use most of the cache for buffering writes - writing 30 thus entirely bypass the cache. 33 from disk or invalidating cache entries. For unrecoverable errors (meta data 35 in the cache it first disables writeback caching and waits for all dirty data 39 You'll need make-bcache from the bcache-tools repository. Both the cache device 45 you format your backing devices and cache device at the same time, you won't 57 device, it'll be running in passthrough mode until you attach it to a cache. 78 cache set shows up as /sys/fs/bcache/<UUID> 82 After your cache device and backing device are registered, the backing device [all …]
|
D | cachetlb.txt | 6 This document describes the cache/tlb flushing interfaces called 16 thinking SMP cache/tlb flushing must be so inefficient, this is in 23 "TLB" is abstracted under Linux as something the cpu uses to cache 26 possible for stale translations to exist in this "TLB" cache. 113 Next, we have the cache flushing interfaces. In general, when Linux 129 The cache level flush will always be first, because this allows 132 when that virtual address is flushed from the cache. The HyperSparc 135 The cache flushing routines below need only deal with cache flushing 149 the caches. That is, after running, there will be no cache 158 the caches. That is, after running, there will be no cache [all …]
|
/linux-4.1.27/drivers/block/ |
D | ps3vram.c | 83 struct ps3vram_cache cache; member 317 struct ps3vram_cache *cache = &priv->cache; in ps3vram_cache_evict() local 319 if (!(cache->tags[entry].flags & CACHE_PAGE_DIRTY)) in ps3vram_cache_evict() 323 cache->tags[entry].address); in ps3vram_cache_evict() 324 if (ps3vram_upload(dev, CACHE_OFFSET + entry * cache->page_size, in ps3vram_cache_evict() 325 cache->tags[entry].address, DMA_PAGE_SIZE, in ps3vram_cache_evict() 326 cache->page_size / DMA_PAGE_SIZE) < 0) { in ps3vram_cache_evict() 329 entry * cache->page_size, cache->tags[entry].address, in ps3vram_cache_evict() 330 cache->page_size); in ps3vram_cache_evict() 332 cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; in ps3vram_cache_evict() [all …]
|
/linux-4.1.27/fs/btrfs/ |
D | extent-tree.c | 108 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache, 117 block_group_cache_done(struct btrfs_block_group_cache *cache) in block_group_cache_done() argument 120 return cache->cached == BTRFS_CACHE_FINISHED || in block_group_cache_done() 121 cache->cached == BTRFS_CACHE_ERROR; in block_group_cache_done() 124 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) in block_group_bits() argument 126 return (cache->flags & bits) == bits; in block_group_bits() 129 static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) in btrfs_get_block_group() argument 131 atomic_inc(&cache->count); in btrfs_get_block_group() 134 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) in btrfs_put_block_group() argument 136 if (atomic_dec_and_test(&cache->count)) { in btrfs_put_block_group() [all …]
|
D | relocation.c | 196 static void remove_backref_node(struct backref_cache *cache, 207 static void backref_cache_init(struct backref_cache *cache) in backref_cache_init() argument 210 cache->rb_root = RB_ROOT; in backref_cache_init() 212 INIT_LIST_HEAD(&cache->pending[i]); in backref_cache_init() 213 INIT_LIST_HEAD(&cache->changed); in backref_cache_init() 214 INIT_LIST_HEAD(&cache->detached); in backref_cache_init() 215 INIT_LIST_HEAD(&cache->leaves); in backref_cache_init() 218 static void backref_cache_cleanup(struct backref_cache *cache) in backref_cache_cleanup() argument 223 while (!list_empty(&cache->detached)) { in backref_cache_cleanup() 224 node = list_entry(cache->detached.next, in backref_cache_cleanup() [all …]
|
/linux-4.1.27/Documentation/devicetree/bindings/arm/mrvl/ |
D | feroceon.txt | 4 - compatible : Should be either "marvell,feroceon-cache" or 5 "marvell,kirkwood-cache". 8 - reg : Address of the L2 cache control register. Mandatory for 9 "marvell,kirkwood-cache", not used by "marvell,feroceon-cache" 13 l2: l2-cache@20128 { 14 compatible = "marvell,kirkwood-cache";
|
D | tauros2.txt | 4 - compatible : Should be "marvell,tauros2-cache". 5 - marvell,tauros2-cache-features : Specify the features supported for the 6 tauros2 cache. 11 arch/arm/include/asm/hardware/cache-tauros2.h 14 L2: l2-cache { 15 compatible = "marvell,tauros2-cache"; 16 marvell,tauros2-cache-features = <0x3>;
|
/linux-4.1.27/arch/powerpc/boot/dts/ |
D | iss4xx-mpic.dts | 40 i-cache-line-size = <32>; 41 d-cache-line-size = <32>; 42 i-cache-size = <32768>; 43 d-cache-size = <32768>; 54 i-cache-line-size = <32>; 55 d-cache-line-size = <32>; 56 i-cache-size = <32768>; 57 d-cache-size = <32768>; 70 i-cache-line-size = <32>; 71 d-cache-line-size = <32>; [all …]
|
D | sbc8548-pre.dtsi | 36 d-cache-line-size = <0x20>; // 32 bytes 37 i-cache-line-size = <0x20>; // 32 bytes 38 d-cache-size = <0x8000>; // L1, 32K 39 i-cache-size = <0x8000>; // L1, 32K 43 next-level-cache = <&L2>;
|
D | stx_gp3_8560.dts | 34 d-cache-line-size = <32>; 35 i-cache-line-size = <32>; 36 d-cache-size = <32768>; 37 i-cache-size = <32768>; 41 next-level-cache = <&L2>; 78 L2: l2-cache-controller@20000 { 79 compatible = "fsl,mpc8540-l2-cache-controller"; 81 cache-line-size = <32>; 82 cache-size = <0x40000>; // L2, 256K
|
D | tqm8555.dts | 35 d-cache-line-size = <32>; 36 i-cache-line-size = <32>; 37 d-cache-size = <32768>; 38 i-cache-size = <32768>; 42 next-level-cache = <&L2>; 79 L2: l2-cache-controller@20000 { 80 compatible = "fsl,mpc8540-l2-cache-controller"; 82 cache-line-size = <32>; 83 cache-size = <0x40000>; // L2, 256K
|
D | tqm8540.dts | 36 d-cache-line-size = <32>; 37 i-cache-line-size = <32>; 38 d-cache-size = <32768>; 39 i-cache-size = <32768>; 43 next-level-cache = <&L2>; 80 L2: l2-cache-controller@20000 { 81 compatible = "fsl,mpc8540-l2-cache-controller"; 83 cache-line-size = <32>; 84 cache-size = <0x40000>; // L2, 256K
|
D | tqm8541.dts | 35 d-cache-line-size = <32>; 36 i-cache-line-size = <32>; 37 d-cache-size = <32768>; 38 i-cache-size = <32768>; 42 next-level-cache = <&L2>; 79 L2: l2-cache-controller@20000 { 80 compatible = "fsl,mpc8540-l2-cache-controller"; 82 cache-line-size = <32>; 83 cache-size = <0x40000>; // L2, 256K
|
D | socrates.dts | 36 d-cache-line-size = <32>; 37 i-cache-line-size = <32>; 38 d-cache-size = <0x8000>; // L1, 32K 39 i-cache-size = <0x8000>; // L1, 32K 43 next-level-cache = <&L2>; 81 L2: l2-cache-controller@20000 { 82 compatible = "fsl,mpc8544-l2-cache-controller"; 84 cache-line-size = <32>; 85 cache-size = <0x40000>; // L2, 256K
|
D | gamecube.dts | 42 i-cache-line-size = <32>; 43 d-cache-line-size = <32>; 44 i-cache-size = <32768>; 45 d-cache-size = <32768>;
|
D | ksi8560.dts | 36 d-cache-line-size = <32>; 37 i-cache-line-size = <32>; 38 d-cache-size = <0x8000>; /* L1, 32K */ 39 i-cache-size = <0x8000>; /* L1, 32K */ 43 next-level-cache = <&L2>; 79 L2: l2-cache-controller@20000 { 80 compatible = "fsl,mpc8540-l2-cache-controller"; 82 cache-line-size = <0x20>; /* 32 bytes */ 83 cache-size = <0x40000>; /* L2, 256K */
|
D | ps3.dts | 64 i-cache-size = <32768>; 65 d-cache-size = <32768>; 66 i-cache-line-size = <128>; 67 d-cache-line-size = <128>;
|
D | xpedite5370.dts | 35 d-cache-line-size = <32>; // 32 bytes 36 i-cache-line-size = <32>; // 32 bytes 37 d-cache-size = <0x8000>; // L1, 32K 38 i-cache-size = <0x8000>; // L1, 32K 42 next-level-cache = <&L2>; 48 d-cache-line-size = <32>; // 32 bytes 49 i-cache-line-size = <32>; // 32 bytes 50 d-cache-size = <0x8000>; // L1, 32K 51 i-cache-size = <0x8000>; // L1, 32K 55 next-level-cache = <&L2>; [all …]
|
D | xpedite5301.dts | 37 d-cache-line-size = <32>; // 32 bytes 38 i-cache-line-size = <32>; // 32 bytes 39 d-cache-size = <0x8000>; // L1, 32K 40 i-cache-size = <0x8000>; // L1, 32K 44 next-level-cache = <&L2>; 50 d-cache-line-size = <32>; // 32 bytes 51 i-cache-line-size = <32>; // 32 bytes 52 d-cache-size = <0x8000>; // L1, 32K 53 i-cache-size = <0x8000>; // L1, 32K 57 next-level-cache = <&L2>; [all …]
|
D | holly.dts | 27 d-cache-line-size = <32>; 28 i-cache-line-size = <32>; 29 d-cache-size = <32768>; 30 i-cache-size = <32768>; 31 d-cache-sets = <128>; 32 i-cache-sets = <128>;
|
D | tqm8560.dts | 37 d-cache-line-size = <32>; 38 i-cache-line-size = <32>; 39 d-cache-size = <32768>; 40 i-cache-size = <32768>; 44 next-level-cache = <&L2>; 81 L2: l2-cache-controller@20000 { 82 compatible = "fsl,mpc8540-l2-cache-controller"; 84 cache-line-size = <32>; 85 cache-size = <0x40000>; // L2, 256K
|
D | arches.dts | 57 i-cache-line-size = <32>; 58 d-cache-line-size = <32>; 59 i-cache-size = <32768>; 60 d-cache-size = <32768>; 63 next-level-cache = <&L2C0>; 129 compatible = "ibm,l2-cache-460gt", "ibm,l2-cache"; 131 0x030 0x008>; /* L2 cache DCR's */ 132 cache-line-size = <32>; /* 32 bytes */ 133 cache-size = <262144>; /* L2, 256K */
|
D | storcenter.dts | 38 i-cache-line-size = <32>; 39 d-cache-line-size = <32>; 40 i-cache-size = <16384>; 41 d-cache-size = <16384>;
|
D | xpedite5330.dts | 73 d-cache-line-size = <32>; // 32 bytes 74 i-cache-line-size = <32>; // 32 bytes 75 d-cache-size = <0x8000>; // L1, 32K 76 i-cache-size = <0x8000>; // L1, 32K 80 next-level-cache = <&L2>; 86 d-cache-line-size = <32>; // 32 bytes 87 i-cache-line-size = <32>; // 32 bytes 88 d-cache-size = <0x8000>; // L1, 32K 89 i-cache-size = <0x8000>; // L1, 32K 93 next-level-cache = <&L2>; [all …]
|
D | xcalibur1501.dts | 36 d-cache-line-size = <32>; // 32 bytes 37 i-cache-line-size = <32>; // 32 bytes 38 d-cache-size = <0x8000>; // L1, 32K 39 i-cache-size = <0x8000>; // L1, 32K 43 next-level-cache = <&L2>; 49 d-cache-line-size = <32>; // 32 bytes 50 i-cache-line-size = <32>; // 32 bytes 51 d-cache-size = <0x8000>; // L1, 32K 52 i-cache-size = <0x8000>; // L1, 32K 56 next-level-cache = <&L2>; [all …]
|
D | mpc8572ds_camp_core0.dts | 4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache 44 l2-cache-controller@20000 { 45 cache-size = <0x80000>; // L2, 512K
|
D | iss4xx.dts | 38 i-cache-line-size = <32>; // may need fixup in sim 39 d-cache-line-size = <32>; // may need fixup in sim 40 i-cache-size = <32768>; /* may need fixup in sim */ 41 d-cache-size = <32768>; /* may need fixup in sim */
|
D | xpedite5200.dts | 38 d-cache-line-size = <32>; // 32 bytes 39 i-cache-line-size = <32>; // 32 bytes 40 d-cache-size = <0x8000>; // L1, 32K 41 i-cache-size = <0x8000>; // L1, 32K 42 next-level-cache = <&L2>; 79 L2: l2-cache-controller@20000 { 80 compatible = "fsl,mpc8548-l2-cache-controller"; 82 cache-line-size = <32>; // 32 bytes 83 cache-size = <0x80000>; // L2, 512K
|
D | mpc8540ads.dts | 38 d-cache-line-size = <32>; // 32 bytes 39 i-cache-line-size = <32>; // 32 bytes 40 d-cache-size = <0x8000>; // L1, 32K 41 i-cache-size = <0x8000>; // L1, 32K 45 next-level-cache = <&L2>; 82 L2: l2-cache-controller@20000 { 83 compatible = "fsl,mpc8540-l2-cache-controller"; 85 cache-line-size = <32>; // 32 bytes 86 cache-size = <0x40000>; // L2, 256K
|
D | mpc8555cds.dts | 38 d-cache-line-size = <32>; // 32 bytes 39 i-cache-line-size = <32>; // 32 bytes 40 d-cache-size = <0x8000>; // L1, 32K 41 i-cache-size = <0x8000>; // L1, 32K 45 next-level-cache = <&L2>; 82 L2: l2-cache-controller@20000 { 83 compatible = "fsl,mpc8555-l2-cache-controller"; 85 cache-line-size = <32>; // 32 bytes 86 cache-size = <0x40000>; // L2, 256K
|
D | stxssa8555.dts | 37 d-cache-line-size = <32>; // 32 bytes 38 i-cache-line-size = <32>; // 32 bytes 39 d-cache-size = <0x8000>; // L1, 32K 40 i-cache-size = <0x8000>; // L1, 32K 44 next-level-cache = <&L2>; 81 L2: l2-cache-controller@20000 { 82 compatible = "fsl,mpc8555-l2-cache-controller"; 84 cache-line-size = <32>; // 32 bytes 85 cache-size = <0x40000>; // L2, 256K
|
D | mpc8541cds.dts | 38 d-cache-line-size = <32>; // 32 bytes 39 i-cache-line-size = <32>; // 32 bytes 40 d-cache-size = <0x8000>; // L1, 32K 41 i-cache-size = <0x8000>; // L1, 32K 45 next-level-cache = <&L2>; 82 L2: l2-cache-controller@20000 { 83 compatible = "fsl,mpc8541-l2-cache-controller"; 85 cache-line-size = <32>; // 32 bytes 86 cache-size = <0x40000>; // L2, 256K
|
D | currituck.dts | 36 i-cache-line-size = <32>; 37 d-cache-line-size = <32>; 38 i-cache-size = <32768>; 39 d-cache-size = <32768>; 50 i-cache-line-size = <32>; 51 d-cache-line-size = <32>; 52 i-cache-size = <32768>; 53 d-cache-size = <32768>;
|
/linux-4.1.27/arch/sh/mm/ |
D | cache-debugfs.c | 28 struct cache_info *cache; in cache_seq_show() local 49 cache = ¤t_cpu_data.dcache; in cache_seq_show() 52 cache = ¤t_cpu_data.icache; in cache_seq_show() 55 waysize = cache->sets; in cache_seq_show() 64 waysize <<= cache->entry_shift; in cache_seq_show() 66 for (way = 0; way < cache->ways; way++) { in cache_seq_show() 76 addr += cache->linesz, line++) { in cache_seq_show() 89 addrstart += cache->way_incr; in cache_seq_show()
|
D | Makefile | 5 obj-y := alignment.o cache.o init.o consistent.o mmap.o 7 cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o 8 cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o 9 cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o 10 cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o 11 cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o 12 cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o 13 cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o 25 debugfs-$(CONFIG_CPU_SH4) += cache-debugfs.o
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | backend-api.txt | 9 This API is declared in <linux/fscache-cache.h>. 16 To start off, a cache definition must be initialised and registered for each 17 cache the backend wants to make available. For instance, CacheFS does this in 20 The cache definition (struct fscache_cache) should be initialised by calling: 22 void fscache_init_cache(struct fscache_cache *cache, 29 (*) "cache" is a pointer to the cache definition; 32 this cache; and 35 for the cache. 38 The cache should then be registered with FS-Cache by passing a pointer to the 39 previously initialised cache definition to: [all …]
|
D | cachefiles.txt | 13 (*) Starting the cache. 34 CacheFiles is a caching backend that's meant to use as a cache a directory on 37 CacheFiles uses a userspace daemon to do some of the cache management - such as 41 The filesystem and data integrity of the cache are only as good as those of the 48 and whilst it is open, a cache is at least partially in existence. The daemon 49 opens this and sends commands down it to control the cache. 51 CacheFiles is currently limited to a single cache. 54 the filesystem, shrinking the cache by culling the objects it contains to make 66 available in the system and in the cache filesystem: 79 filesystems being used as a cache. [all …]
|
D | netfs-api.txt | 67 entire in-cache hierarchy for this netfs will be scrapped and begun 106 cache. Any such objects created within an index will be created in the 107 first cache only. The cache in which an index is created can be 108 controlled by cache tags (see below). 179 (3) A function to select the cache in which to store an index [optional]. 181 This function is invoked when an index needs to be instantiated in a cache 189 cache in the parent's list will be chosen, or failing that, the first 190 cache in the master list. 203 this is a data file. The size may be used to govern how much cache must 204 be reserved for this file in the cache. [all …]
|
D | fscache.txt | 9 This facility is a general purpose cache for network filesystems, though it 12 FS-Cache mediates between cache backends (such as CacheFS) and network 29 | ISOFS |--+ | /var/cache | 34 facility to a network filesystem such that the cache is transparent to the 54 +---------+ | /var/cache | | /dev/sda6 | 71 opened in its entirety into a cache before permitting it to be accessed and 72 then serving the pages out of that cache rather than the netfs inode because: 74 (1) It must be practical to operate without a cache. 77 cache. 80 must not be limited to the size of the cache. [all …]
|
D | object.txt | 30 a cache backend is currently actively caching. Such objects are represented by 31 the fscache_object struct. The cache backends allocate these upon request, and 36 represented by multiple objects - an index may exist in more than one cache - 108 (2) Initialisation: states that perform lookups in the cache and validate 149 to disconnecting the netfs's representation of a cache object (fscache_cookie) 150 from the cache backend's representation (fscache_object) - which may be 165 the cache, it is expected that it will not be possible to look an object 175 FS-Cache expects the cache backend to probe the cache to see whether this 179 The cache should call fscache_object_lookup_negative() to indicate lookup 187 to be read out of the cache for that file that isn't currently also held [all …]
|
/linux-4.1.27/arch/m68k/kernel/ |
D | sys_m68k.c | 67 cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len) in cache_flush_040() argument 74 switch (cache) in cache_flush_040() 127 switch (cache) in cache_flush_040() 184 switch (cache) in cache_flush_040() 227 cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) in cache_flush_060() argument 240 switch (cache) in cache_flush_060() 288 switch (cache) in cache_flush_060() 347 switch (cache) in cache_flush_060() 377 sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) in sys_cacheflush() argument 382 cache & ~FLUSH_CACHE_BOTH) in sys_cacheflush() [all …]
|
/linux-4.1.27/Documentation/filesystems/nfs/ |
D | rpc-cache.txt | 24 - general cache lookup with correct locking 26 - allowing an EXPIRED time on cache items, and removing 28 - making requests to user-space to fill in cache entries 29 - allowing user-space to directly set entries in the cache 31 cache entries, and replaying those requests when the cache entry 38 1/ A cache needs a datum to store. This is in the form of a 43 Each cache element is reference counted and contains 44 expiry and update times for use in cache management. 45 2/ A cache needs a "cache_detail" structure that 46 describes the cache. This stores the hash table, some [all …]
|
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_topology.c | 220 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache) in kfd_parse_subtype_cache() argument 226 BUG_ON(!cache); in kfd_parse_subtype_cache() 228 id = cache->processor_id_low; in kfd_parse_subtype_cache() 239 props->cache_level = cache->cache_level; in kfd_parse_subtype_cache() 240 props->cache_size = cache->cache_size; in kfd_parse_subtype_cache() 241 props->cacheline_size = cache->cache_line_size; in kfd_parse_subtype_cache() 242 props->cachelines_per_tag = cache->lines_per_tag; in kfd_parse_subtype_cache() 243 props->cache_assoc = cache->associativity; in kfd_parse_subtype_cache() 244 props->cache_latency = cache->cache_latency; in kfd_parse_subtype_cache() 246 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE) in kfd_parse_subtype_cache() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | mr.c | 72 struct mlx5_mr_cache *cache = &dev->cache; in order2idx() local 74 if (order < cache->ent[0].order) in order2idx() 77 return order - cache->ent[0].order; in order2idx() 84 struct mlx5_mr_cache *cache = &dev->cache; in reg_mr_callback() local 86 struct mlx5_cache_ent *ent = &cache->ent[c]; in reg_mr_callback() 118 cache->last_add = jiffies; in reg_mr_callback() 136 struct mlx5_mr_cache *cache = &dev->cache; in add_keys() local 137 struct mlx5_cache_ent *ent = &cache->ent[c]; in add_keys() 190 struct mlx5_mr_cache *cache = &dev->cache; in remove_keys() local 191 struct mlx5_cache_ent *ent = &cache->ent[c]; in remove_keys() [all …]
|
/linux-4.1.27/include/net/ |
D | netlabel.h | 209 struct netlbl_lsm_cache *cache; member 234 struct netlbl_lsm_cache *cache; in netlbl_secattr_cache_alloc() local 236 cache = kzalloc(sizeof(*cache), flags); in netlbl_secattr_cache_alloc() 237 if (cache) in netlbl_secattr_cache_alloc() 238 atomic_set(&cache->refcount, 1); in netlbl_secattr_cache_alloc() 239 return cache; in netlbl_secattr_cache_alloc() 250 static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) in netlbl_secattr_cache_free() argument 252 if (!atomic_dec_and_test(&cache->refcount)) in netlbl_secattr_cache_free() 255 if (cache->free) in netlbl_secattr_cache_free() 256 cache->free(cache->data); in netlbl_secattr_cache_free() [all …]
|
/linux-4.1.27/tools/perf/util/ |
D | dso.c | 502 struct dso_cache *cache; in dso_cache__free() local 504 cache = rb_entry(next, struct dso_cache, rb_node); in dso_cache__free() 505 next = rb_next(&cache->rb_node); in dso_cache__free() 506 rb_erase(&cache->rb_node, root); in dso_cache__free() 507 free(cache); in dso_cache__free() 515 struct dso_cache *cache; in dso_cache__find() local 521 cache = rb_entry(parent, struct dso_cache, rb_node); in dso_cache__find() 522 end = cache->offset + DSO__DATA_CACHE_SIZE; in dso_cache__find() 524 if (offset < cache->offset) in dso_cache__find() 529 return cache; in dso_cache__find() [all …]
|
D | ordered-events.c | 92 struct list_head *cache = &oe->cache; in alloc_event() local 100 if (!list_empty(cache)) { in alloc_event() 101 new = list_entry(cache->next, struct ordered_event, list); in alloc_event() 150 list_move(&event->list, &oe->cache); in ordered_events__delete() 290 INIT_LIST_HEAD(&oe->cache); in ordered_events__init()
|
/linux-4.1.27/arch/arm/boot/dts/ |
D | bcm63138.dtsi | 27 next-level-cache = <&L2>; 34 next-level-cache = <&L2>; 64 L2: cache-controller@1d000 { 65 compatible = "arm,pl310-cache"; 67 cache-unified; 68 cache-level = <2>; 69 cache-size = <524288>; 70 cache-sets = <1024>; 71 cache-line-size = <32>;
|
D | vf610.dtsi | 13 next-level-cache = <&L2>; 17 L2: l2-cache@40006000 { 18 compatible = "arm,pl310-cache"; 20 cache-unified; 21 cache-level = <2>;
|
D | highbank.dts | 37 next-level-cache = <&L2>; 56 next-level-cache = <&L2>; 65 next-level-cache = <&L2>; 74 next-level-cache = <&L2>; 119 L2: l2-cache { 120 compatible = "arm,pl310-cache"; 123 cache-unified; 124 cache-level = <2>;
|
D | vexpress-v2p-ca9.dts | 40 next-level-cache = <&L2>; 47 next-level-cache = <&L2>; 54 next-level-cache = <&L2>; 61 next-level-cache = <&L2>; 169 L2: cache-controller@1e00a000 { 170 compatible = "arm,pl310-cache"; 173 cache-unified; 174 cache-level = <2>; 230 /* PL310, L2 cache, RAM cell supply (not PL310 logic) */ 275 /* PL310, L2 cache, RAM cell supply (not PL310 logic) */ [all …]
|
D | qcom-apq8084.dtsi | 22 next-level-cache = <&L2>; 33 next-level-cache = <&L2>; 44 next-level-cache = <&L2>; 55 next-level-cache = <&L2>; 61 L2: l2-cache { 62 compatible = "qcom,arch-cache"; 63 cache-level = <2>;
|
D | qcom-msm8660.dtsi | 23 next-level-cache = <&L2>; 31 next-level-cache = <&L2>; 34 L2: l2-cache { 35 compatible = "cache"; 36 cache-level = <2>;
|
D | qcom-msm8974.dtsi | 22 next-level-cache = <&L2>; 33 next-level-cache = <&L2>; 44 next-level-cache = <&L2>; 55 next-level-cache = <&L2>; 61 L2: l2-cache { 62 compatible = "cache"; 63 cache-level = <2>;
|
D | bcm5301x.dtsi | 77 L2: cache-controller@2000 { 78 compatible = "arm,pl310-cache"; 80 cache-unified; 81 cache-level = <2>;
|
D | vexpress-v2p-ca5s.dts | 40 next-level-cache = <&L2>; 47 next-level-cache = <&L2>; 114 L2: cache-controller@2c0f0000 { 115 compatible = "arm,pl310-cache"; 118 cache-level = <2>;
|
D | bcm4708.dtsi | 22 next-level-cache = <&L2>; 29 next-level-cache = <&L2>;
|
/linux-4.1.27/net/rds/ |
D | ib_recv.c | 87 static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache) in rds_ib_cache_xfer_to_ready() argument 91 tmp = xchg(&cache->xfer, NULL); in rds_ib_cache_xfer_to_ready() 93 if (cache->ready) in rds_ib_cache_xfer_to_ready() 94 list_splice_entire_tail(tmp, cache->ready); in rds_ib_cache_xfer_to_ready() 96 cache->ready = tmp; in rds_ib_cache_xfer_to_ready() 100 static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache) in rds_ib_recv_alloc_cache() argument 105 cache->percpu = alloc_percpu(struct rds_ib_cache_head); in rds_ib_recv_alloc_cache() 106 if (!cache->percpu) in rds_ib_recv_alloc_cache() 110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 114 cache->xfer = NULL; in rds_ib_recv_alloc_cache() [all …]
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
D | txrx.c | 216 static int tx_policy_find(struct tx_policy_cache *cache, in tx_policy_find() argument 225 list_for_each_entry(it, &cache->used, link) { in tx_policy_find() 227 return it - cache->cache; in tx_policy_find() 230 list_for_each_entry(it, &cache->free, link) { in tx_policy_find() 232 return it - cache->cache; in tx_policy_find() 237 static inline void tx_policy_use(struct tx_policy_cache *cache, in tx_policy_use() argument 241 list_move(&entry->link, &cache->used); in tx_policy_use() 244 static inline int tx_policy_release(struct tx_policy_cache *cache, in tx_policy_release() argument 249 list_move(&entry->link, &cache->free); in tx_policy_release() 256 struct tx_policy_cache *cache = &priv->tx_policy_cache; in tx_policy_clean() local [all …]
|
/linux-4.1.27/include/linux/ |
D | fscache-cache.h | 37 struct fscache_cache *cache; /* cache referred to by this tag */ member 234 struct fscache_object *(*alloc_object)(struct fscache_cache *cache, 272 void (*sync_cache)(struct fscache_cache *cache); 308 void (*dissociate_pages)(struct fscache_cache *cache); 377 struct fscache_cache *cache; /* cache that supplied this object */ member 417 !test_bit(FSCACHE_IOERROR, &object->cache->flags); in fscache_object_is_active() 423 test_bit(FSCACHE_IOERROR, &object->cache->flags); in fscache_object_is_dead() 432 static inline void fscache_object_destroyed(struct fscache_cache *cache) in fscache_object_destroyed() argument 434 if (atomic_dec_and_test(&cache->object_count)) in fscache_object_destroyed() 531 void fscache_init_cache(struct fscache_cache *cache, [all …]
|
D | kasan.h | 42 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object); 43 void kasan_poison_object_data(struct kmem_cache *cache, void *object); 68 static inline void kasan_unpoison_object_data(struct kmem_cache *cache, in kasan_unpoison_object_data() argument 70 static inline void kasan_poison_object_data(struct kmem_cache *cache, in kasan_poison_object_data() argument
|
/linux-4.1.27/drivers/gpio/ |
D | gpio-mcp23s08.c | 70 u16 cache[11]; member 197 if ((n + reg) > sizeof(mcp->cache)) in mcp23s08_read_regs() 239 if ((n + reg) > sizeof(mcp->cache)) in mcp23s17_read_regs() 276 mcp->cache[MCP_IODIR] |= (1 << offset); in mcp23s08_direction_input() 277 status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]); in mcp23s08_direction_input() 294 mcp->cache[MCP_GPIO] = status; in mcp23s08_get() 303 unsigned olat = mcp->cache[MCP_OLAT]; in __mcp23s08_set() 309 mcp->cache[MCP_OLAT] = olat; in __mcp23s08_set() 333 mcp->cache[MCP_IODIR] &= ~mask; in mcp23s08_direction_output() 334 status = mcp->ops->write(mcp, MCP_IODIR, mcp->cache[MCP_IODIR]); in mcp23s08_direction_output() [all …]
|
/linux-4.1.27/arch/arm/mm/ |
D | Makefile | 38 obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o 39 obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o 40 obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o 41 obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o 42 obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o 43 obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o 44 obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o 99 obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o 100 obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o 101 obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o [all …]
|
D | cache-v7.S | 77 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable 78 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate 104 beq start_flush_levels @ start flushing cache levels 125 mov r10, #0 @ start clean at cache level 0 127 add r2, r10, r10, lsr #1 @ work out 3x current cache level 128 mov r1, r0, lsr r2 @ extract cache type bits from clidr 129 and r1, r1, #7 @ mask of the bits for current cache only 130 cmp r1, #2 @ see what cache we have at this level 131 blt skip @ skip if no cache, or just i-cache 135 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr [all …]
|
D | proc-arm940.S | 41 bic r0, r0, #0x00001000 @ i-cache 42 bic r0, r0, #0x00000004 @ d-cache 54 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 55 mcr p15, 0, ip, c7, c6, 0 @ flush D cache 59 bic ip, ip, #0x00001000 @ i-cache 80 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 112 mcr p15, 0, ip, c7, c6, 0 @ flush D cache 123 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 171 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 279 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache [all …]
|
D | proc-arm946.S | 48 bic r0, r0, #0x00001000 @ i-cache 49 bic r0, r0, #0x00000004 @ d-cache 61 mcr p15, 0, ip, c7, c5, 0 @ flush I cache 62 mcr p15, 0, ip, c7, c6, 0 @ flush D cache 66 bic ip, ip, #0x00001000 @ i-cache 87 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 107 mcr p15, 0, ip, c7, c6, 0 @ flush D cache 118 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache 213 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 332 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache [all …]
|
D | Kconfig | 17 which has no memory control unit and cache. 50 A 32-bit RISC processor with 8KB cache or 4KB variants, 67 which has no memory control unit and cache. 140 instruction sequences for cache and TLB operations. Curiously, 159 Branch Target Buffer, Unified TLB and cache line size 16. 242 based upon the ARM10 integer core with a 16KiB L1 Harvard cache, 494 # The cache model 543 ARM Architecture Version 4 TLB with writethrough cache. 548 ARM Architecture Version 4 TLB with writeback cache. 553 ARM Architecture Version 4 TLB with writeback cache and invalidate [all …]
|
D | proc-xscale.S | 94 1: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 155 @ *** cache line aligned *** 193 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 217 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 239 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 240 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 241 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line [all …]
|
D | cache-v6.S | 43 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 44 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 45 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 46 mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache 52 mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache 67 mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate 69 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 146 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
|
D | cache-v4wb.S | 30 # error Unknown cache size 61 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 80 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 114 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 117 bhs __flush_whole_cache @ flush whole D cache 172 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
D | cache-v4.S | 43 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 62 mcr p15, 0, ip, c7, c7, 0 @ flush ID cache 118 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
|
D | cache-v4wt.S | 51 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 73 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 74 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 143 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
|
D | proc-feroceon.S | 60 mrc p15, 0, r0, c0, c0, 1 @ read cache type register 64 mov r0, r0, lsr #18 @ get cache size order 68 mov r2, r2, lsl r0 @ actual cache size 136 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 170 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 255 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 269 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 488 mcreq p15, 0, ip, c7, c5, 0 @ invalidate I cache 589 .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req 612 .long \cache [all …]
|
D | proc-mohawk.S | 105 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 127 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 129 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 215 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 330 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache 331 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 333 orr r0, r0, #0x18 @ cache the page table in L2 378 orr r1, r1, #0x18 @ cache the page table in L2 392 orr r4, r4, #0x18 @ cache the page table in L2
|
D | cache-fa.S | 48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 69 mcr p15, 0, ip, c7, c14, 0 @ clean/invalidate D cache 71 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 91 bhs __flush_whole_cache @ flush whole D cache 157 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
/linux-4.1.27/scripts/ |
D | decode_stacktrace.sh | 13 declare -A cache 30 local base_addr=${cache[$name]} 33 cache["$name"]="$base_addr" 50 local code=${cache[$address]} 53 cache[$address]=$code
|
/linux-4.1.27/fs/nilfs2/ |
D | alloc.c | 253 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_desc_block() local 258 bhp, &cache->prev_desc, &cache->lock); in nilfs_palloc_get_desc_block() 272 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_bitmap_block() local 277 &cache->prev_bitmap, &cache->lock); in nilfs_palloc_get_bitmap_block() 290 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_entry_block() local 295 &cache->prev_entry, &cache->lock); in nilfs_palloc_get_entry_block() 761 struct nilfs_palloc_cache *cache) in nilfs_palloc_setup_cache() argument 763 NILFS_MDT(inode)->mi_palloc_cache = cache; in nilfs_palloc_setup_cache() 764 spin_lock_init(&cache->lock); in nilfs_palloc_setup_cache() 769 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_clear_cache() local [all …]
|
/linux-4.1.27/arch/s390/kernel/ |
D | cache.c | 70 struct cacheinfo *cache; in show_cacheinfo() local 78 cache = this_cpu_ci->info_list + idx; in show_cacheinfo() 80 seq_printf(m, "level=%d ", cache->level); in show_cacheinfo() 81 seq_printf(m, "type=%s ", cache_type_string[cache->type]); in show_cacheinfo() 83 cache->disable_sysfs ? "Shared" : "Private"); in show_cacheinfo() 84 seq_printf(m, "size=%dK ", cache->size >> 10); in show_cacheinfo() 85 seq_printf(m, "line_size=%u ", cache->coherency_line_size); in show_cacheinfo() 86 seq_printf(m, "associativity=%d", cache->ways_of_associativity); in show_cacheinfo()
|
/linux-4.1.27/drivers/acpi/apei/ |
D | ghes.c | 528 struct ghes_estatus_cache *cache; in ghes_estatus_cached() local 534 cache = rcu_dereference(ghes_estatus_caches[i]); in ghes_estatus_cached() 535 if (cache == NULL) in ghes_estatus_cached() 537 if (len != cache->estatus_len) in ghes_estatus_cached() 539 cache_estatus = GHES_ESTATUS_FROM_CACHE(cache); in ghes_estatus_cached() 542 atomic_inc(&cache->count); in ghes_estatus_cached() 544 if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC) in ghes_estatus_cached() 558 struct ghes_estatus_cache *cache; in ghes_estatus_cache_alloc() local 568 cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len); in ghes_estatus_cache_alloc() 569 if (!cache) { in ghes_estatus_cache_alloc() [all …]
|
/linux-4.1.27/arch/unicore32/mm/ |
D | Kconfig | 15 Say Y here to disable the processor instruction cache. Unless 21 Say Y here to disable the processor data cache. Unless 25 bool "Force write through D-cache" 27 Say Y here to use the data cache in writethrough mode. Unless you 31 bool "Disable D-cache line ops" 34 Say Y here to disable the data cache line operations.
|
/linux-4.1.27/drivers/md/bcache/ |
D | bcache.h | 395 struct cache { struct 501 struct cache *cache[MAX_CACHES_PER_SET]; member 502 struct cache *cache_by_alloc[MAX_CACHES_PER_SET]; 732 static inline struct cache *PTR_CACHE(struct cache_set *c, in PTR_CACHE() 736 return c->cache[PTR_DEV(k, ptr)]; in PTR_CACHE() 817 for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++) 860 struct cache *ca; in wake_up_allocators() 869 void bch_count_io_errors(struct cache *, int, const char *); 880 uint8_t bch_inc_gen(struct cache *, struct bucket *); 883 bool bch_can_invalidate_bucket(struct cache *, struct bucket *); [all …]
|
D | alloc.c | 74 uint8_t bch_inc_gen(struct cache *ca, struct bucket *b) in bch_inc_gen() 86 struct cache *ca; in bch_rescale_priorities() 129 bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b) in bch_can_invalidate_bucket() 139 void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in __bch_invalidate_one_bucket() 152 static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b) in bch_invalidate_one_bucket() 178 static void invalidate_buckets_lru(struct cache *ca) in invalidate_buckets_lru() 215 static void invalidate_buckets_fifo(struct cache *ca) in invalidate_buckets_fifo() 238 static void invalidate_buckets_random(struct cache *ca) in invalidate_buckets_random() 263 static void invalidate_buckets(struct cache *ca) in invalidate_buckets() 298 static int bch_allocator_push(struct cache *ca, long bucket) in bch_allocator_push() [all …]
|
/linux-4.1.27/arch/metag/kernel/ |
D | cachepart.c | 58 static int get_thread_cache_size(unsigned int cache, int thread_id) in get_thread_cache_size() argument 64 isEnabled = (cache == DCACHE ? metag_in32(MMCU_DCACHE_CTRL_ADDR) & 0x1 : in get_thread_cache_size() 70 cache_size = (cache == DCACHE ? get_global_dcache_size() : in get_thread_cache_size() 74 cache_size = (cache == DCACHE ? get_dcache_size() : in get_thread_cache_size() 77 t_cache_part = (cache == DCACHE ? in get_thread_cache_size()
|
/linux-4.1.27/arch/mips/alchemy/common/ |
D | sleeper.S | 99 cache 0x14, 0(t0) 100 cache 0x14, 32(t0) 101 cache 0x14, 64(t0) 102 cache 0x14, 96(t0) 125 cache 0x14, 0(t0) 126 cache 0x14, 32(t0) 127 cache 0x14, 64(t0) 128 cache 0x14, 96(t0) 168 1: cache 0x14, 0(t0)
|
/linux-4.1.27/net/ipv6/ |
D | ip6mr.c | 112 struct sk_buff *skb, struct mfc6_cache *cache); 347 struct list_head *cache; member 360 it->cache = &mrt->mfc6_cache_array[it->ct]; in ipmr_mfc_seq_idx() 361 list_for_each_entry(mfc, it->cache, list) in ipmr_mfc_seq_idx() 368 it->cache = &mrt->mfc6_unres_queue; in ipmr_mfc_seq_idx() 369 list_for_each_entry(mfc, it->cache, list) in ipmr_mfc_seq_idx() 374 it->cache = NULL; in ipmr_mfc_seq_idx() 515 if (mfc->list.next != it->cache) in ipmr_mfc_seq_next() 518 if (it->cache == &mrt->mfc6_unres_queue) in ipmr_mfc_seq_next() 521 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]); in ipmr_mfc_seq_next() [all …]
|
/linux-4.1.27/arch/arm/boot/compressed/ |
D | head.S | 622 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 623 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 651 mcr p15, 0, r0, c2, c0, 0 @ cache on 658 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 671 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 745 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 768 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 796 mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache 800 orr r0, r0, #0x1000 @ I-cache enable 815 .align 5 @ cache line aligned [all …]
|
D | head-xscale.S | 16 @ Data cache might be active. 17 @ Be sure to flush kernel binary out of the cache, 20 @ memory to be sure we hit the same cache.
|
/linux-4.1.27/mm/kasan/ |
D | kasan.c | 310 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object) in kasan_unpoison_object_data() argument 312 kasan_unpoison_shadow(object, cache->object_size); in kasan_unpoison_object_data() 315 void kasan_poison_object_data(struct kmem_cache *cache, void *object) in kasan_poison_object_data() argument 318 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE), in kasan_poison_object_data() 322 void kasan_slab_alloc(struct kmem_cache *cache, void *object) in kasan_slab_alloc() argument 324 kasan_kmalloc(cache, object, cache->object_size); in kasan_slab_alloc() 327 void kasan_slab_free(struct kmem_cache *cache, void *object) in kasan_slab_free() argument 329 unsigned long size = cache->object_size; in kasan_slab_free() 333 if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU)) in kasan_slab_free() 339 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size) in kasan_kmalloc() argument [all …]
|
D | report.c | 110 struct kmem_cache *cache = page->slab_cache; in print_address_description() local 113 object = virt_to_obj(cache, page_address(page), addr); in print_address_description() 115 page->objects * cache->size; in print_address_description() 120 object_err(cache, page, object, in print_address_description()
|
/linux-4.1.27/arch/m32r/boot/ |
D | setup.S | 67 ldi r1, #0x0101 ; cache on (with invalidation) 68 ; ldi r1, #0x00 ; cache off 73 ldi r1, #0x73 ; cache on (with invalidation) 74 ; ldi r1, #0x00 ; cache off 78 ldi r1, #0x101 ; cache on (with invalidation) 79 ; ldi r1, #0x00 ; cache off 93 ldi r1, #0x703 ; cache on (with invalidation)
|
/linux-4.1.27/net/ipv4/ |
D | ipmr.c | 129 struct sk_buff *skb, struct mfc_cache *cache, 700 static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache, in ipmr_update_thresholds() argument 705 cache->mfc_un.res.minvif = MAXVIFS; in ipmr_update_thresholds() 706 cache->mfc_un.res.maxvif = 0; in ipmr_update_thresholds() 707 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); in ipmr_update_thresholds() 712 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; in ipmr_update_thresholds() 713 if (cache->mfc_un.res.minvif > vifi) in ipmr_update_thresholds() 714 cache->mfc_un.res.minvif = vifi; in ipmr_update_thresholds() 715 if (cache->mfc_un.res.maxvif <= vifi) in ipmr_update_thresholds() 716 cache->mfc_un.res.maxvif = vifi + 1; in ipmr_update_thresholds() [all …]
|
/linux-4.1.27/net/sunrpc/ |
D | auth.c | 399 rpcauth_clear_credcache(struct rpc_cred_cache *cache) in rpcauth_clear_credcache() argument 404 unsigned int hashsize = 1U << cache->hashbits; in rpcauth_clear_credcache() 408 spin_lock(&cache->lock); in rpcauth_clear_credcache() 410 head = &cache->hashtable[i]; in rpcauth_clear_credcache() 422 spin_unlock(&cache->lock); in rpcauth_clear_credcache() 433 struct rpc_cred_cache *cache = auth->au_credcache; in rpcauth_destroy_credcache() local 435 if (cache) { in rpcauth_destroy_credcache() 437 rpcauth_clear_credcache(cache); in rpcauth_destroy_credcache() 438 kfree(cache->hashtable); in rpcauth_destroy_credcache() 439 kfree(cache); in rpcauth_destroy_credcache() [all …]
|
/linux-4.1.27/fs/ecryptfs/ |
D | main.c | 663 struct kmem_cache **cache; member 669 .cache = &ecryptfs_auth_tok_list_item_cache, 674 .cache = &ecryptfs_file_info_cache, 679 .cache = &ecryptfs_dentry_info_cache, 684 .cache = &ecryptfs_inode_info_cache, 690 .cache = &ecryptfs_sb_info_cache, 695 .cache = &ecryptfs_header_cache, 700 .cache = &ecryptfs_xattr_cache, 705 .cache = &ecryptfs_key_record_cache, 710 .cache = &ecryptfs_key_sig_cache, [all …]
|
/linux-4.1.27/net/irda/ |
D | irlmp_frame.c | 394 lap->cache.valid = FALSE; in irlmp_update_cache() 396 lap->cache.dlsap_sel = lsap->dlsap_sel; in irlmp_update_cache() 397 lap->cache.slsap_sel = lsap->slsap_sel; in irlmp_update_cache() 398 lap->cache.lsap = lsap; in irlmp_update_cache() 399 lap->cache.valid = TRUE; in irlmp_update_cache() 433 if ((self->cache.valid) && in irlmp_find_lsap() 434 (self->cache.slsap_sel == slsap_sel) && in irlmp_find_lsap() 435 (self->cache.dlsap_sel == dlsap_sel)) in irlmp_find_lsap() 437 return self->cache.lsap; in irlmp_find_lsap()
|
/linux-4.1.27/arch/mips/mm/ |
D | cex-sb1.S | 121 cache Index_Invalidate_I,(0<<13)(k0) 122 cache Index_Invalidate_I,(1<<13)(k0) 123 cache Index_Invalidate_I,(2<<13)(k0) 124 cache Index_Invalidate_I,(3<<13)(k0)
|
/linux-4.1.27/lib/ |
D | test_kasan.c | 216 struct kmem_cache *cache = kmem_cache_create("test_cache", in kmem_cache_oob() local 219 if (!cache) { in kmem_cache_oob() 224 p = kmem_cache_alloc(cache, GFP_KERNEL); in kmem_cache_oob() 227 kmem_cache_destroy(cache); in kmem_cache_oob() 232 kmem_cache_free(cache, p); in kmem_cache_oob() 233 kmem_cache_destroy(cache); in kmem_cache_oob()
|
/linux-4.1.27/arch/powerpc/platforms/ps3/ |
D | spu.c | 125 struct priv1_cache cache; member 368 spu_pdata(spu)->cache.sr1 = 0x33; in ps3_create_spu() 498 spu_pdata(spu)->cache.masks[class] = mask; in int_mask_set() 500 spu_pdata(spu)->cache.masks[class]); in int_mask_set() 505 return spu_pdata(spu)->cache.masks[class]; in int_mask_get() 556 BUG_ON((sr1 & allowed) != (spu_pdata(spu)->cache.sr1 & allowed)); in mfc_sr1_set() 558 spu_pdata(spu)->cache.sr1 = sr1; in mfc_sr1_set() 562 spu_pdata(spu)->cache.sr1); in mfc_sr1_set() 567 return spu_pdata(spu)->cache.sr1; in mfc_sr1_get() 572 spu_pdata(spu)->cache.tclass_id = tclass_id; in mfc_tclass_id_set() [all …]
|
/linux-4.1.27/sound/soc/codecs/ |
D | wm_hubs.c | 157 struct wm_hubs_dcs_cache *cache; in wm_hubs_dcs_cache_get() local 166 list_for_each_entry(cache, &hubs->dcs_cache, list) { in wm_hubs_dcs_cache_get() 167 if (cache->left != left || cache->right != right) in wm_hubs_dcs_cache_get() 170 *entry = cache; in wm_hubs_dcs_cache_get() 180 struct wm_hubs_dcs_cache *cache; in wm_hubs_dcs_cache_set() local 185 cache = devm_kzalloc(codec->dev, sizeof(*cache), GFP_KERNEL); in wm_hubs_dcs_cache_set() 186 if (!cache) in wm_hubs_dcs_cache_set() 189 cache->left = snd_soc_read(codec, WM8993_LEFT_OUTPUT_VOLUME); in wm_hubs_dcs_cache_set() 190 cache->left &= WM8993_HPOUT1L_VOL_MASK; in wm_hubs_dcs_cache_set() 192 cache->right = snd_soc_read(codec, WM8993_RIGHT_OUTPUT_VOLUME); in wm_hubs_dcs_cache_set() [all …]
|
D | wm9705.c | 207 u16 *cache = codec->reg_cache; in ac97_read() local 220 return cache[reg]; in ac97_read() 228 u16 *cache = codec->reg_cache; in ac97_write() local 233 cache[reg] = val; in ac97_write() 325 u16 *cache = codec->reg_cache; in wm9705_soc_resume() local 332 soc_ac97_ops->write(ac97, i, cache[i>>1]); in wm9705_soc_resume()
|
/linux-4.1.27/fs/afs/ |
D | file.c | 144 ret = fscache_read_or_alloc_page(vnode->cache, in afs_page_filler() 182 fscache_uncache_page(vnode->cache, page); in afs_page_filler() 193 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { in afs_page_filler() 194 fscache_uncache_page(vnode->cache, page); in afs_page_filler() 260 ret = fscache_read_or_alloc_pages(vnode->cache, in afs_readpages() 326 fscache_wait_on_page_write(vnode->cache, page); in afs_invalidatepage() 327 fscache_uncache_page(vnode->cache, page); in afs_invalidatepage() 361 if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) { in afs_releasepage()
|
D | Makefile | 5 afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o 8 $(afs-cache-y) \
|
/linux-4.1.27/arch/tile/kernel/ |
D | tlb.c | 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_page_mm() local 58 flush_remote(0, cache, mm_cpumask(mm), in flush_tlb_page_mm() 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; in flush_tlb_range() local 74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, in flush_tlb_range()
|
/linux-4.1.27/arch/mips/include/asm/ |
D | r4kcache.h | 559 static inline void extra##blast_##pfx##cache##lsize(void) \ 572 cache##lsize##_unroll32(addr|ws, indexop); \ 577 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ 585 cache##lsize##_unroll32(start, hitop); \ 592 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ 606 cache##lsize##_unroll32(addr|ws, indexop); \ 633 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \ 641 cache##lsize##_unroll32_user(start, hitop); \ 660 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \ 687 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
|
/linux-4.1.27/fs/nfs/ |
D | dir.c | 2105 struct nfs_access_entry *cache; in nfs_access_free_list() local 2108 cache = list_entry(head->next, struct nfs_access_entry, lru); in nfs_access_free_list() 2109 list_del(&cache->lru); in nfs_access_free_list() 2110 nfs_access_free_entry(cache); in nfs_access_free_list() 2119 struct nfs_access_entry *cache; in nfs_do_access_cache_scan() local 2132 cache = list_entry(nfsi->access_cache_entry_lru.next, in nfs_do_access_cache_scan() 2134 list_move(&cache->lru, &head); in nfs_do_access_cache_scan() 2135 rb_erase(&cache->rb_node, &nfsi->access_cache); in nfs_do_access_cache_scan() 2243 struct nfs_access_entry *cache; in nfs_access_get_cached() local 2249 cache = nfs_access_search_rbtree(inode, cred); in nfs_access_get_cached() [all …]
|
/linux-4.1.27/arch/arm64/boot/dts/arm/ |
D | juno.dts | 42 next-level-cache = <&A57_L2>; 50 next-level-cache = <&A57_L2>; 58 next-level-cache = <&A53_L2>; 66 next-level-cache = <&A53_L2>; 74 next-level-cache = <&A53_L2>; 82 next-level-cache = <&A53_L2>; 86 compatible = "cache"; 90 compatible = "cache";
|
D | rtsm_ve-aemv8a.dts | 40 next-level-cache = <&L2_0>; 48 next-level-cache = <&L2_0>; 56 next-level-cache = <&L2_0>; 64 next-level-cache = <&L2_0>; 68 compatible = "cache";
|
D | foundation-v8.dts | 37 next-level-cache = <&L2_0>; 45 next-level-cache = <&L2_0>; 53 next-level-cache = <&L2_0>; 61 next-level-cache = <&L2_0>; 65 compatible = "cache";
|
/linux-4.1.27/drivers/staging/lustre/lustre/llite/ |
D | xattr_cache.c | 77 static int ll_xattr_cache_find(struct list_head *cache, in ll_xattr_cache_find() argument 85 list_for_each_entry(entry, cache, xe_list) { in ll_xattr_cache_find() 109 static int ll_xattr_cache_add(struct list_head *cache, in ll_xattr_cache_add() argument 118 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { in ll_xattr_cache_add() 140 list_add(&xattr->xe_list, cache); in ll_xattr_cache_add() 162 static int ll_xattr_cache_del(struct list_head *cache, in ll_xattr_cache_del() argument 171 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) { in ll_xattr_cache_del() 193 static int ll_xattr_cache_list(struct list_head *cache, in ll_xattr_cache_list() argument 202 list_for_each_entry_safe(xattr, tmp, cache, xe_list) { in ll_xattr_cache_list()
|
/linux-4.1.27/Documentation/devicetree/bindings/cpufreq/ |
D | arm_big_little_dt.txt | 31 next-level-cache = <&L2>; 44 next-level-cache = <&L2>; 50 next-level-cache = <&L2>; 63 next-level-cache = <&L2>;
|
D | cpufreq-dt.txt | 34 next-level-cache = <&L2>; 50 next-level-cache = <&L2>; 56 next-level-cache = <&L2>; 62 next-level-cache = <&L2>;
|
/linux-4.1.27/Documentation/block/ |
D | writeback_cache_control.txt | 2 Explicit volatile write back cache control 17 a forced cache flush, and the Force Unit Access (FUA) flag for requests. 20 Explicit cache flushes 24 the filesystem and will make sure the volatile cache of the storage device 28 set on an otherwise empty bio structure, which causes only an explicit cache 30 the blkdev_issue_flush() helper for a pure cache flush. 45 worry if the underlying devices need any explicit cache flushing and how 57 drivers that do not have a volatile cache the REQ_FLUSH and REQ_FUA bits
|
/linux-4.1.27/drivers/staging/lustre/lustre/osc/ |
D | osc_page.c | 595 struct cl_client_cache *cache = cli->cl_cache; in osc_cache_too_much() local 605 if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { in osc_cache_too_much() 608 tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users); in osc_cache_too_much() 816 struct cl_client_cache *cache = cli->cl_cache; in osc_lru_reclaim() local 820 LASSERT(cache != NULL); in osc_lru_reclaim() 821 LASSERT(!list_empty(&cache->ccc_lru)); in osc_lru_reclaim() 837 spin_lock(&cache->ccc_lru_lock); in osc_lru_reclaim() 838 cache->ccc_lru_shrinkers++; in osc_lru_reclaim() 839 list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); in osc_lru_reclaim() 841 max_scans = atomic_read(&cache->ccc_users); in osc_lru_reclaim() [all …]
|
/linux-4.1.27/drivers/hwmon/ |
D | applesmc.c | 142 struct applesmc_entry *cache; /* cached key entries */ member 342 struct applesmc_entry *cache = &smcreg.cache[index]; in applesmc_get_entry_by_index() local 347 if (cache->valid) in applesmc_get_entry_by_index() 348 return cache; in applesmc_get_entry_by_index() 352 if (cache->valid) in applesmc_get_entry_by_index() 362 memcpy(cache->key, key, 4); in applesmc_get_entry_by_index() 363 cache->len = info[0]; in applesmc_get_entry_by_index() 364 memcpy(cache->type, &info[1], 4); in applesmc_get_entry_by_index() 365 cache->flags = info[5]; in applesmc_get_entry_by_index() 366 cache->valid = 1; in applesmc_get_entry_by_index() [all …]
|
/linux-4.1.27/drivers/net/wireless/prism54/ |
D | oid_mgt.c | 424 void *cache, *_data = data; in mgt_set_request() local 435 cache = priv->mib[n]; in mgt_set_request() 436 cache += (cache ? extra * dlen : 0); in mgt_set_request() 441 _data = cache; in mgt_set_request() 450 if (cache) in mgt_set_request() 462 } else if (!cache) in mgt_set_request() 465 if (cache) { in mgt_set_request() 467 memcpy(cache, _data, dlen); in mgt_set_request() 524 void *cache, *_res = NULL; in mgt_get_request() local 537 cache = priv->mib[n]; in mgt_get_request() [all …]
|
/linux-4.1.27/arch/x86/mm/ |
D | pat.c | 152 enum page_cache_mode cache; in pat_get_cache_mode() local 156 case PAT_UC: cache = CM(UC); cache_mode = "UC "; break; in pat_get_cache_mode() 157 case PAT_WC: cache = CM(WC); cache_mode = "WC "; break; in pat_get_cache_mode() 158 case PAT_WT: cache = CM(WT); cache_mode = "WT "; break; in pat_get_cache_mode() 159 case PAT_WP: cache = CM(WP); cache_mode = "WP "; break; in pat_get_cache_mode() 160 case PAT_WB: cache = CM(WB); cache_mode = "WB "; break; in pat_get_cache_mode() 161 case PAT_UC_MINUS: cache = CM(UC_MINUS); cache_mode = "UC- "; break; in pat_get_cache_mode() 162 default: cache = CM(WB); cache_mode = "WB "; break; in pat_get_cache_mode() 167 return cache; in pat_get_cache_mode() 180 enum page_cache_mode cache; in pat_init_cache_modes() local [all …]
|
/linux-4.1.27/drivers/macintosh/ |
D | windfarm_smu_sat.c | 41 u8 cache[16]; member 131 err = i2c_smbus_read_i2c_block_data(sat->i2c, 0x3f, 16, sat->cache); in wf_sat_read_cache() 140 DBG(" %.2x", sat->cache[i]); in wf_sat_read_cache() 165 val = ((sat->cache[i] << 8) + sat->cache[i+1]) << sens->shift; in wf_sat_sensor_get() 169 val = (val * ((sat->cache[i] << 8) + sat->cache[i+1])) >> 4; in wf_sat_sensor_get()
|
/linux-4.1.27/Documentation/cris/ |
D | README | 30 The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad 61 I-cache and 16kB D-cache and with a wide range of device interfaces 93 Dentry-cache hash table entries: 2048 (order: 1, 16384 bytes) 94 Buffer-cache hash table entries: 2048 (order: 0, 8192 bytes) 95 Page-cache hash table entries: 2048 (order: 0, 8192 bytes) 98 Inode-cache hash table entries: 1024 (order: 0, 8192 bytes) 104 kmem_create: Forcing size word alignment - file lock cache 127 IP: routing cache hash table of 1024 buckets, 8Kbytes 151 cache size : 8 kB
|
/linux-4.1.27/Documentation/filesystems/ |
D | 9p.txt | 71 cache=mode specifies a caching policy. By default, no caches are used. 72 none = default no cache policy, metadata and data 77 cache backend. 78 mmap = minimal cache that is only used for read-write 79 mmap. Northing else is cached, like cache=none 132 cachetag cache tag to use the specified persistent cache. 133 cache tags for existing cache sessions can be listed at 134 /sys/fs/9p/caches. (applies only to cache=fscache)
|
/linux-4.1.27/drivers/gpu/drm/shmobile/ |
D | shmob_drm_crtc.c | 273 if (scrtc->cache) { in shmob_drm_crtc_stop() 274 sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); in shmob_drm_crtc_stop() 275 scrtc->cache = NULL; in shmob_drm_crtc_stop() 325 if (scrtc->cache) in shmob_drm_crtc_compute_base() 326 sh_mobile_meram_cache_update(sdev->meram, scrtc->cache, in shmob_drm_crtc_compute_base() 384 void *cache; in shmob_drm_crtc_mode_set() local 400 if (scrtc->cache) { in shmob_drm_crtc_mode_set() 401 sh_mobile_meram_cache_free(sdev->meram, scrtc->cache); in shmob_drm_crtc_mode_set() 402 scrtc->cache = NULL; in shmob_drm_crtc_mode_set() 405 cache = sh_mobile_meram_cache_alloc(sdev->meram, mdata, in shmob_drm_crtc_mode_set() [all …]
|
/linux-4.1.27/Documentation/xtensa/ |
D | atomctl.txt | 11 The Core comes up with a default value of for the three types of cache ops: 21 For systems without an coherent cache controller, non-MX, we always 31 with the cache being bypassed; for example studying cache alias problems.
|
/linux-4.1.27/arch/m32r/boot/compressed/ |
D | head.S | 77 ; Touch memory for the no-write-allocating cache. 138 ldi r1, 0xd0 ; invalidate i-cache, copy back d-cache 148 ldi r1, 0x0700 ; invalidate i-cache, copy back d-cache
|
/linux-4.1.27/fs/9p/ |
D | vfs_inode_dotl.c | 332 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && in v9fs_vfs_atomic_open_dotl() 356 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) in v9fs_vfs_atomic_open_dotl() 436 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_mkdir_dotl() 482 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_getattr_dotl() 712 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_symlink_dotl() 790 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_link_dotl() 871 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_mknod_dotl() 967 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) in v9fs_refresh_inode_dotl()
|
D | vfs_super.c | 89 if (v9ses->cache) in v9fs_fill_super() 93 if (!v9ses->cache) in v9fs_fill_super() 147 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) in v9fs_mount() 285 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) in v9fs_drop_inode()
|
/linux-4.1.27/arch/arm/kvm/ |
D | trace.h | 282 TP_PROTO(unsigned long vcpu_pc, bool cache), 283 TP_ARGS(vcpu_pc, cache), 287 __field( bool, cache ) 292 __entry->cache = cache; 296 __entry->vcpu_pc, __entry->cache ? "on" : "off")
|
D | mmu.c | 125 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, in mmu_topup_memory_cache() argument 131 if (cache->nobjs >= min) in mmu_topup_memory_cache() 133 while (cache->nobjs < max) { in mmu_topup_memory_cache() 137 cache->objects[cache->nobjs++] = page; in mmu_topup_memory_cache() 833 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in stage2_get_pud() argument 841 if (!cache) in stage2_get_pud() 843 pud = mmu_memory_cache_alloc(cache); in stage2_get_pud() 851 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, in stage2_get_pmd() argument 857 pud = stage2_get_pud(kvm, cache, addr); in stage2_get_pmd() 859 if (!cache) in stage2_get_pmd() [all …]
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | mmu_context.h | 87 unsigned long mc = MMU_NO_CONTEXT, cache; in get_mmu_context() local 90 cache = mmu_context_cache[smp_processor_id()]; in get_mmu_context() 94 if ((mc ^ cache) & MMU_CONTEXT_VERSION_MASK) in get_mmu_context()
|
/linux-4.1.27/arch/mips/cavium-octeon/ |
D | Kconfig | 14 int "Number of L1 cache lines reserved for CVMSEG memory" 19 local memory; the larger CVMSEG is, the smaller the cache is. 20 This selects the size of CVMSEG LM, which is in cache blocks. The 21 legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is 41 Enable locking parts of the kernel into the L2 cache.
|
/linux-4.1.27/arch/m32r/mm/ |
D | Makefile | 6 obj-y := init.o fault.o mmu.o extable.o ioremap.o cache.o page.o 8 obj-y := init.o fault-nommu.o mmu.o extable.o ioremap-nommu.o cache.o page.o
|
/linux-4.1.27/arch/blackfin/mach-common/ |
D | Makefile | 6 cache.o cache-c.o entry.o head.o \
|
/linux-4.1.27/arch/nios2/platform/ |
D | Kconfig.platform | 12 or cache bits set. 99 bool "Custom cache settings" 101 This option allows you to tweak the cache settings used during early 113 Maximum possible data cache size. 120 Minimum possible data cache line size. 127 Maximum possible instruction cache size.
|
/linux-4.1.27/tools/perf/tests/attr/ |
D | test-record-group-sampling | 3 args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1 16 # cache-misses
|
/linux-4.1.27/arch/powerpc/perf/ |
D | power8-pmu.c | 287 unsigned int unit, pmc, cache, ebb; in power8_get_constraint() local 297 cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK; in power8_get_constraint() 337 if (cache & 0x7) in power8_get_constraint() 342 value |= CNST_L1_QUAL_VAL(cache); in power8_get_constraint() 405 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val; in power8_compute_mmcr() local 445 cache = event[i] >> EVENT_CACHE_SEL_SHIFT; in power8_compute_mmcr() 446 mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT; in power8_compute_mmcr() 447 cache >>= 1; in power8_compute_mmcr() 448 mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; in power8_compute_mmcr()
|
/linux-4.1.27/arch/mips/sgi-ip22/ |
D | Platform | 28 ifeq ($(call cc-option-yn,-mr10k-cache-barrier=store), n) 29 $(error gcc doesn't support needed option -mr10k-cache-barrier=store) 33 cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28
|
/linux-4.1.27/drivers/usb/core/ |
D | quirks.c | 244 struct usb_interface_cache *cache; in usb_match_any_interface() local 247 cache = cfg->intf_cache[j]; in usb_match_any_interface() 248 if (cache->num_altsetting == 0) in usb_match_any_interface() 251 intf = &cache->altsetting[0]; in usb_match_any_interface()
|
/linux-4.1.27/tools/usb/usbip/ |
D | INSTALL | 27 It can also use an optional file (typically called `config.cache' 28 and enabled with `--cache-file=config.cache' or simply `-C') that saves 31 cache files. 36 be considered for the next release. If you are using the cache, and at 37 some point `config.cache' contains results you don't want to keep, you 215 `--cache-file=FILE' 216 Enable the cache: use and save the results of the tests in FILE, 217 traditionally `config.cache'. FILE defaults to `/dev/null' to 220 `--config-cache' 222 Alias for `--cache-file=config.cache'.
|
/linux-4.1.27/Documentation/devicetree/bindings/arc/ |
D | pct.txt | 4 CPU and cache events like cache misses and hits. Like conventional PCT there
|