/linux-4.4.14/arch/powerpc/mm/ |
D | tlb_hash64.c | 47 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 54 i = batch->index; in hpte_need_flush() 100 if (!batch->active) { in hpte_need_flush() 116 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 117 batch->ssize != ssize)) { in hpte_need_flush() 118 __flush_tlb_pending(batch); in hpte_need_flush() 122 batch->mm = mm; in hpte_need_flush() 123 batch->psize = psize; in hpte_need_flush() 124 batch->ssize = ssize; in hpte_need_flush() 126 batch->pte[i] = rpte; in hpte_need_flush() [all …]
|
D | hash_native_64.c | 644 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in native_flush_hash_range() local 645 unsigned long psize = batch->psize; in native_flush_hash_range() 646 int ssize = batch->ssize; in native_flush_hash_range() 652 vpn = batch->vpn[i]; in native_flush_hash_range() 653 pte = batch->pte[i]; in native_flush_hash_range() 678 vpn = batch->vpn[i]; in native_flush_hash_range() 679 pte = batch->pte[i]; in native_flush_hash_range() 695 vpn = batch->vpn[i]; in native_flush_hash_range() 696 pte = batch->pte[i]; in native_flush_hash_range()
|
D | hugetlbpage.c | 475 struct hugepd_freelist *batch = in hugepd_free_rcu_callback() local 479 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback() 480 kmem_cache_free(hugepte_cache, batch->ptes[i]); in hugepd_free_rcu_callback() 482 free_page((unsigned long)batch); in hugepd_free_rcu_callback()
|
D | hash_utils_64.c | 1410 struct ppc64_tlb_batch *batch = in flush_hash_range() local 1414 flush_hash_page(batch->vpn[i], batch->pte[i], in flush_hash_range() 1415 batch->psize, batch->ssize, local); in flush_hash_range()
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_mob.c | 238 struct vmw_otable_batch *batch) in vmw_otable_batch_setup() argument 242 struct vmw_otable *otables = batch->otables; in vmw_otable_batch_setup() 247 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 260 &batch->otable_bo); in vmw_otable_batch_setup() 265 ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); in vmw_otable_batch_setup() 267 ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); in vmw_otable_batch_setup() 270 ret = vmw_bo_map_dma(batch->otable_bo); in vmw_otable_batch_setup() 274 ttm_bo_unreserve(batch->otable_bo); in vmw_otable_batch_setup() 277 for (i = 0; i < batch->num_otables; ++i) { in vmw_otable_batch_setup() 278 if (!batch->otables[i].enabled) in vmw_otable_batch_setup() [all …]
|
/linux-4.4.14/arch/powerpc/include/asm/ |
D | tlbflush.h | 104 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() local 112 batch->active = 1; in arch_enter_lazy_mmu_mode() 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() local 119 if (batch->index) in arch_leave_lazy_mmu_mode() 120 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() 121 batch->active = 0; in arch_leave_lazy_mmu_mode()
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
D | intel_lrc.c | 1114 #define wa_ctx_emit(batch, index, cmd) \ argument 1120 batch[__index] = (cmd); \ 1141 uint32_t *const batch, in gen8_emit_flush_coherentl3_wa() argument 1155 wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | in gen8_emit_flush_coherentl3_wa() 1157 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1158 wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); in gen8_emit_flush_coherentl3_wa() 1159 wa_ctx_emit(batch, index, 0); in gen8_emit_flush_coherentl3_wa() 1161 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); in gen8_emit_flush_coherentl3_wa() 1162 wa_ctx_emit(batch, index, GEN8_L3SQCREG4); in gen8_emit_flush_coherentl3_wa() 1163 wa_ctx_emit(batch, index, l3sqc4_flush); in gen8_emit_flush_coherentl3_wa() [all …]
|
D | i915_gem_render_state.c | 85 #define OUT_BATCH(batch, i, val) \ argument 91 (batch)[(i)++] = (val); \ 110 u32 s = rodata->batch[i]; in render_state_setup() 117 rodata->batch[i + 1] != 0) { in render_state_setup()
|
D | intel_renderstate.h | 37 .batch = gen ## _g ## _null_state_batch, \
|
D | i915_gem_render_state.h | 31 const u32 *batch; member
|
/linux-4.4.14/lib/ |
D | percpu_counter.c | 75 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument 81 if (count >= batch || count <= -batch) { in __percpu_counter_add() 200 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 206 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
|
/linux-4.4.14/mm/ |
D | memory.c | 186 struct mmu_gather_batch *batch; in tlb_next_batch() local 188 batch = tlb->active; in tlb_next_batch() 189 if (batch->next) { in tlb_next_batch() 190 tlb->active = batch->next; in tlb_next_batch() 197 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch() 198 if (!batch) in tlb_next_batch() 202 batch->next = NULL; in tlb_next_batch() 203 batch->nr = 0; in tlb_next_batch() 204 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 206 tlb->active->next = batch; in tlb_next_batch() [all …]
|
D | mm_init.c | 152 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch() local 157 vm_committed_as_batch = max_t(s32, memsized_batch, batch); in mm_compute_batch()
|
D | page_alloc.c | 1880 int to_drain, batch; in drain_zone_pages() local 1883 batch = READ_ONCE(pcp->batch); in drain_zone_pages() 1884 to_drain = min(pcp->count, batch); in drain_zone_pages() 2082 unsigned long batch = READ_ONCE(pcp->batch); in free_hot_cold_page() local 2083 free_pcppages_bulk(zone, batch, pcp); in free_hot_cold_page() 2084 pcp->count -= batch; in free_hot_cold_page() 2228 pcp->batch, list, in buffered_rmqueue() 4296 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 4557 int batch; in zone_batchsize() local 4565 batch = zone->managed_pages / 1024; in zone_batchsize() [all …]
|
D | memcontrol.c | 2030 unsigned int batch = max(CHARGE_BATCH, nr_pages); in try_charge() local 2045 page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge() 2046 if (page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge() 2049 page_counter_uncharge(&memcg->memsw, batch); in try_charge() 2056 if (batch > nr_pages) { in try_charge() 2057 batch = nr_pages; in try_charge() 2142 css_get_many(&memcg->css, batch); in try_charge() 2143 if (batch > nr_pages) in try_charge() 2144 refill_stock(memcg, batch - nr_pages); in try_charge() 2157 current->memcg_nr_pages_over_high += batch; in try_charge()
|
D | slab.c | 661 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument 674 ac->batchcount = batch; in init_arraycache() 872 int batch, gfp_t gfp) in __alloc_alien_cache() argument 878 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
|
D | vmscan.c | 278 long batch_size = shrinker->batch ? shrinker->batch in do_shrink_slab()
|
D | vmstat.c | 1252 pageset->pcp.batch); in zoneinfo_show_print()
|
D | zsmalloc.c | 1871 pool->shrinker.batch = 0; in zs_register_shrinker()
|
/linux-4.4.14/include/linux/ |
D | percpu_counter.h | 42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 44 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 125 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 139 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
|
D | shrinker.h | 56 long batch; /* reclaim batch size, 0 = default */ member
|
D | mmzone.h | 255 int batch; /* chunk size for buddy add/remove */ member
|
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/ |
D | console.c | 1140 lstcon_batch_t *batch; in lstcon_testrpc_condition() local 1148 batch = test->tes_batch; in lstcon_testrpc_condition() 1149 LASSERT(batch != NULL); in lstcon_testrpc_condition() 1159 hash = batch->bat_cli_hash; in lstcon_testrpc_condition() 1160 head = &batch->bat_cli_list; in lstcon_testrpc_condition() 1165 hash = batch->bat_srv_hash; in lstcon_testrpc_condition() 1166 head = &batch->bat_srv_list; in lstcon_testrpc_condition() 1232 lstcon_verify_batch(const char *name, lstcon_batch_t **batch) in lstcon_verify_batch() argument 1236 rc = lstcon_batch_find(name, batch); in lstcon_verify_batch() 1242 if ((*batch)->bat_state != LST_BATCH_IDLE) { in lstcon_verify_batch() [all …]
|
D | conrpc.c | 658 lstcon_batch_t *batch; in lstcon_batrpc_prep() local 681 batch = (lstcon_batch_t *)tsb; in lstcon_batrpc_prep() 682 brq->bar_arg = batch->bat_arg; in lstcon_batrpc_prep()
|
D | framework.c | 662 sfw_batch_t *batch; in sfw_destroy_session() local 668 batch = list_entry(sn->sn_batches.next, in sfw_destroy_session() 670 list_del_init(&batch->bat_list); in sfw_destroy_session() 671 sfw_destroy_batch(batch); in sfw_destroy_session()
|
/linux-4.4.14/arch/s390/mm/ |
D | pgtable.c | 1060 struct mmu_table_batch *batch; in tlb_remove_table_rcu() local 1063 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu() 1065 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu() 1066 __tlb_remove_table(batch->tables[i]); in tlb_remove_table_rcu() 1068 free_page((unsigned long)batch); in tlb_remove_table_rcu() 1073 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local 1075 if (*batch) { in tlb_table_flush() 1076 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); in tlb_table_flush() 1077 *batch = NULL; in tlb_table_flush() 1083 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local [all …]
|
/linux-4.4.14/fs/xfs/ |
D | xfs_icache.c | 532 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local 540 (void **)batch, first_index, in xfs_inode_ag_walk() 545 (void **) batch, first_index, in xfs_inode_ag_walk() 558 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk() 561 batch[i] = NULL; in xfs_inode_ag_walk() 586 if (!batch[i]) in xfs_inode_ag_walk() 588 error = execute(batch[i], flags, args); in xfs_inode_ag_walk() 589 IRELE(batch[i]); in xfs_inode_ag_walk() 1053 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_reclaim_inodes_ag() local 1059 (void **)batch, first_index, in xfs_reclaim_inodes_ag() [all …]
|
D | xfs_mount.c | 1175 s32 batch; in xfs_mod_fdblocks() local 1211 batch = 1; in xfs_mod_fdblocks() 1213 batch = XFS_FDBLOCKS_BATCH; in xfs_mod_fdblocks() 1215 __percpu_counter_add(&mp->m_fdblocks, delta, batch); in xfs_mod_fdblocks()
|
D | xfs_qm.c | 81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; in xfs_qm_dquot_walk() local 86 nr_found = radix_tree_gang_lookup(tree, (void **)batch, in xfs_qm_dquot_walk() 94 struct xfs_dquot *dqp = batch[i]; in xfs_qm_dquot_walk() 98 error = execute(batch[i], data); in xfs_qm_dquot_walk()
|
/linux-4.4.14/arch/s390/include/asm/ |
D | tlb.h | 33 struct mmu_table_batch *batch; member 59 tlb->batch = NULL; in tlb_gather_mmu()
|
/linux-4.4.14/drivers/xen/ |
D | grant-table.c | 753 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) in gnttab_batch_map() argument 757 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) in gnttab_batch_map() 759 for (op = batch; op < batch + count; op++) in gnttab_batch_map() 766 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) in gnttab_batch_copy() argument 770 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) in gnttab_batch_copy() 772 for (op = batch; op < batch + count; op++) in gnttab_batch_copy()
|
/linux-4.4.14/Documentation/networking/timestamping/ |
D | txtimestamp.c | 184 int batch = 0; in __recv_errmsg_cmsg() local 223 batch++; in __recv_errmsg_cmsg() 227 if (batch > 1) in __recv_errmsg_cmsg() 228 fprintf(stderr, "batched %d timestamps\n", batch); in __recv_errmsg_cmsg()
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
D | lpar.c | 524 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in pSeries_lpar_flush_hash_range() local 534 psize = batch->psize; in pSeries_lpar_flush_hash_range() 535 ssize = batch->ssize; in pSeries_lpar_flush_hash_range() 538 vpn = batch->vpn[i]; in pSeries_lpar_flush_hash_range() 539 pte = batch->pte[i]; in pSeries_lpar_flush_hash_range()
|
/linux-4.4.14/include/xen/ |
D | grant_table.h | 217 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); 218 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
|
/linux-4.4.14/virt/kvm/ |
D | Kconfig | 28 # Toggle to switch between direct notification and batch job
|
/linux-4.4.14/Documentation/features/vm/TLB/ |
D | arch-support.txt | 2 # Feature name: batch-unmap-tlb-flush
|
/linux-4.4.14/arch/arm/include/asm/ |
D | tlb.h | 68 struct mmu_table_batch *batch; member 164 tlb->batch = NULL; in tlb_gather_mmu()
|
/linux-4.4.14/drivers/misc/ |
D | vmw_balloon.c | 182 static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx) in vmballoon_batch_get_pa() argument 184 return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK; in vmballoon_batch_get_pa() 187 static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch, in vmballoon_batch_get_status() argument 190 return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK); in vmballoon_batch_get_status() 193 static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx, in vmballoon_batch_set_pa() argument 196 batch->pages[idx] = pa; in vmballoon_batch_set_pa()
|
/linux-4.4.14/tools/vm/ |
D | page-types.c | 598 unsigned long batch; in walk_pfn() local 603 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); in walk_pfn() 604 pages = kpageflags_read(buf, index, batch); in walk_pfn() 620 unsigned long batch; in walk_vma() local 626 batch = min_t(unsigned long, count, PAGEMAP_BATCH); in walk_vma() 627 pages = pagemap_read(buf, index, batch); in walk_vma()
|
/linux-4.4.14/arch/powerpc/kernel/ |
D | process.c | 769 struct ppc64_tlb_batch *batch; in __switch_to() local 884 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to() 885 if (batch->active) { in __switch_to() 887 if (batch->index) in __switch_to() 888 __flush_tlb_pending(batch); in __switch_to() 889 batch->active = 0; in __switch_to() 907 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to() 908 batch->active = 1; in __switch_to()
|
/linux-4.4.14/certs/ |
D | Makefile | 53 -batch -x509 -config $(obj)/x509.genkey \
|
/linux-4.4.14/include/asm-generic/ |
D | tlb.h | 95 struct mmu_table_batch *batch; member
|
/linux-4.4.14/block/ |
D | Kconfig.iosched | 21 a new point in the service tree and doing a batch of IO from there
|
/linux-4.4.14/drivers/target/iscsi/ |
D | iscsi_target_erl1.c | 1073 int batch = 0; in iscsit_handle_ooo_cmdsn() local 1081 batch = 1; in iscsit_handle_ooo_cmdsn() 1086 batch = 1; in iscsit_handle_ooo_cmdsn() 1094 ooo_cmdsn->batch_count = (batch) ? in iscsit_handle_ooo_cmdsn()
|
/linux-4.4.14/Documentation/cgroups/ |
D | freezer-subsystem.txt | 1 The cgroup freezer is useful to batch job management system which start 6 be started/stopped by the batch job management system. It also provides
|
D | cpuacct.txt | 49 due to the batch processing nature of percpu_counter.
|
D | cpusets.txt | 253 This enables batch managers monitoring jobs running in dedicated 264 This mechanism provides a very economical way for the batch manager 266 batch manager or other user code to decide what to do about it and 278 the system load imposed by a batch scheduler monitoring this 283 counter, a batch scheduler can detect memory pressure with a 288 the batch scheduler can obtain the key information, memory
|
/linux-4.4.14/Documentation/trace/ |
D | events-kmem.txt | 60 When pages are freed in batch, the also mm_page_free_batched is triggered. 62 freed in batch with a page list. Significant amounts of activity here could
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-fs-f2fs | 82 Controls the trimming rate in batch mode.
|
/linux-4.4.14/Documentation/block/ |
D | deadline-iosched.txt | 39 maximum number of requests per batch.
|
D | biodoc.txt | 983 The generic i/o scheduler algorithm attempts to sort/merge/batch requests for 1011 iii. Plugging the queue to batch requests in anticipation of opportunities for
|
/linux-4.4.14/drivers/block/xen-blkback/ |
D | blkback.c | 770 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap() local 772 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, in xen_blkbk_unmap() 779 pages += batch; in xen_blkbk_unmap() 780 num -= batch; in xen_blkbk_unmap()
|
/linux-4.4.14/drivers/staging/android/ion/ |
D | ion_heap.c | 314 heap->shrinker.batch = 0; in ion_heap_init_shrinker()
|
/linux-4.4.14/arch/x86/xen/ |
D | mmu.c | 2844 int batch = min(REMAP_BATCH_SIZE, nr); in do_remap_gfn() local 2845 int batch_left = batch; in do_remap_gfn() 2846 range = (unsigned long)batch << PAGE_SHIFT; in do_remap_gfn() 2882 nr -= batch; in do_remap_gfn() 2885 err_ptr += batch; in do_remap_gfn()
|
/linux-4.4.14/Documentation/scheduler/ |
D | sched-bwc.txt | 54 "silos" in a batch fashion. This greatly reduces global accounting pressure
|
D | sched-design-CFS.txt | 125 batch jobs.
|
/linux-4.4.14/Documentation/ |
D | dynamic-debug-howto.txt | 107 If your query set is big, you can batch them too: 109 ~# cat query-batch-file > <debugfs>/dynamic_debug/control
|
D | module-signing.txt | 163 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
|
D | kprobes.txt | 43 There are also register_/unregister_*probes() functions for batch
|
D | kernel-parameters.txt | 3084 process in one batch. 3160 batch limiting is disabled. 3164 batch limiting is re-enabled.
|
D | devices.txt | 67 in "batch mode", so there is likely additional registrations that
|
/linux-4.4.14/arch/arm/lib/ |
D | lib1funcs.S | 171 @ Do comparisons in batch of 4 first.
|
/linux-4.4.14/Documentation/DocBook/ |
D | gpu.xml.db | 595 API-i915-gem-batch-pool-init 596 API-i915-gem-batch-pool-fini 597 API-i915-gem-batch-pool-get
|
/linux-4.4.14/Documentation/vm/ |
D | page_migration | 28 a process to a processor on a distant node. A batch scheduler or an
|
/linux-4.4.14/Documentation/crypto/ |
D | async-tx-api.txt | 92 async_<operation> call. Offload engine drivers batch operations to
|
/linux-4.4.14/Documentation/filesystems/ |
D | btrfs.txt | 109 initiate batch trims from userspace).
|
D | f2fs.txt | 212 to be trimmed out in batch mode when FITRIM
|
D | ext4.txt | 277 additional filesystem operations to be batch
|
D | vfs.txt | 318 appropriate scan batch sizes without having to worry about whether 319 implementations will cause holdoff problems due to large scan batch
|
D | xfs-delayed-logging-design.txt | 82 the current batch completes. It is now common for a single current CPU core to
|
/linux-4.4.14/fs/ |
D | super.c | 247 s->s_shrink.batch = 1024; in alloc_super()
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 1647 u64 batch, struct obd_trans_info *oti, in echo_client_prep_commit() argument 1662 npages = batch >> PAGE_CACHE_SHIFT; in echo_client_prep_commit()
|
/linux-4.4.14/arch/x86/kvm/ |
D | mmu.c | 4735 int batch = 0; in kvm_zap_obsolete_pages() local 4761 if (batch >= BATCH_ZAP_PAGES && in kvm_zap_obsolete_pages() 4763 batch = 0; in kvm_zap_obsolete_pages() 4769 batch += ret; in kvm_zap_obsolete_pages()
|
/linux-4.4.14/Documentation/locking/ |
D | ww-mutex-design.txt | 16 there is no way to guarantee that buffers appear in a execbuf/batch in
|
/linux-4.4.14/Documentation/sysctl/ |
D | vm.txt | 712 The batch value of each per cpu pagelist is also updated as a result. It is 713 set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
|
/linux-4.4.14/drivers/md/ |
D | raid5.c | 5745 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local 5751 batch[batch_size++] = sh; in handle_active_stripes() 5777 handle_stripe(batch[i]); in handle_active_stripes() 5784 hash = batch[i]->hash_lock_index; in handle_active_stripes() 5785 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes() 6637 conf->shrinker.batch = 128; in setup_conf()
|
D | dm-bufio.c | 1674 c->shrinker.batch = 0; in dm_bufio_client_create()
|
/linux-4.4.14/Documentation/RCU/ |
D | checklist.txt | 190 a single non-expedited primitive to cover the entire batch.
|
D | trace.txt | 166 o "b" is the batch limit for this CPU. If more than this number
|
D | RTFP.txt | 1030 Add per-cpu batch counter"
|
/linux-4.4.14/Documentation/networking/ |
D | cs89x0.txt | 89 build - batch file to compile cs89x0.c.
|
/linux-4.4.14/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 230 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
|
/linux-4.4.14/fs/btrfs/ |
D | tree-log.c | 2757 int batch = atomic_read(&root->log_batch); in btrfs_sync_log() local 2766 if (batch == atomic_read(&root->log_batch)) in btrfs_sync_log()
|
/linux-4.4.14/drivers/md/bcache/ |
D | btree.c | 810 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
|
/linux-4.4.14/drivers/scsi/aic7xxx/ |
D | aic7xxx.seq | 1078 * we can batch the clearing of HADDR with the fixup.
|
D | aic79xx.seq | 376 * order is preserved even if we batch.
|