/linux-4.1.27/arch/powerpc/mm/ |
D | tlb_hash64.c | 47 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local 54 i = batch->index; in hpte_need_flush() 100 if (!batch->active) { in hpte_need_flush() 116 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush() 117 batch->ssize != ssize)) { in hpte_need_flush() 118 __flush_tlb_pending(batch); in hpte_need_flush() 122 batch->mm = mm; in hpte_need_flush() 123 batch->psize = psize; in hpte_need_flush() 124 batch->ssize = ssize; in hpte_need_flush() 126 batch->pte[i] = rpte; in hpte_need_flush() [all …]
|
D | hash_native_64.c | 645 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in native_flush_hash_range() local 646 unsigned long psize = batch->psize; in native_flush_hash_range() 647 int ssize = batch->ssize; in native_flush_hash_range() 653 vpn = batch->vpn[i]; in native_flush_hash_range() 654 pte = batch->pte[i]; in native_flush_hash_range() 679 vpn = batch->vpn[i]; in native_flush_hash_range() 680 pte = batch->pte[i]; in native_flush_hash_range() 696 vpn = batch->vpn[i]; in native_flush_hash_range() 697 pte = batch->pte[i]; in native_flush_hash_range()
|
D | hugetlbpage.c | 461 struct hugepd_freelist *batch = in hugepd_free_rcu_callback() local 465 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback() 466 kmem_cache_free(hugepte_cache, batch->ptes[i]); in hugepd_free_rcu_callback() 468 free_page((unsigned long)batch); in hugepd_free_rcu_callback()
|
D | hash_utils_64.c | 1407 struct ppc64_tlb_batch *batch = in flush_hash_range() local 1411 flush_hash_page(batch->vpn[i], batch->pte[i], in flush_hash_range() 1412 batch->psize, batch->ssize, local); in flush_hash_range()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | tlbflush.h | 104 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch); 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() local 112 batch->active = 1; in arch_enter_lazy_mmu_mode() 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() local 119 if (batch->index) in arch_leave_lazy_mmu_mode() 120 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode() 121 batch->active = 0; in arch_leave_lazy_mmu_mode()
|
/linux-4.1.27/mm/ |
D | memory.c | 185 struct mmu_gather_batch *batch; in tlb_next_batch() local 187 batch = tlb->active; in tlb_next_batch() 188 if (batch->next) { in tlb_next_batch() 189 tlb->active = batch->next; in tlb_next_batch() 196 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch() 197 if (!batch) in tlb_next_batch() 201 batch->next = NULL; in tlb_next_batch() 202 batch->nr = 0; in tlb_next_batch() 203 batch->max = MAX_GATHER_BATCH; in tlb_next_batch() 205 tlb->active->next = batch; in tlb_next_batch() [all …]
|
D | mm_init.c | 159 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch() local 164 vm_committed_as_batch = max_t(s32, memsized_batch, batch); in mm_compute_batch()
|
D | page_alloc.c | 1399 int to_drain, batch; in drain_zone_pages() local 1402 batch = READ_ONCE(pcp->batch); in drain_zone_pages() 1403 to_drain = min(pcp->count, batch); in drain_zone_pages() 1601 unsigned long batch = READ_ONCE(pcp->batch); in free_hot_cold_page() local 1602 free_pcppages_bulk(zone, batch, pcp); in free_hot_cold_page() 1603 pcp->count -= batch; in free_hot_cold_page() 1743 pcp->batch, list, in buffered_rmqueue() 3892 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch); 4273 int batch; in zone_batchsize() local 4281 batch = zone->managed_pages / 1024; in zone_batchsize() [all …]
|
D | memcontrol.c | 2221 unsigned int batch = max(CHARGE_BATCH, nr_pages); in try_charge() local 2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge() 2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge() 2241 page_counter_uncharge(&memcg->memsw, batch); in try_charge() 2248 if (batch > nr_pages) { in try_charge() 2249 batch = nr_pages; in try_charge() 2323 css_get_many(&memcg->css, batch); in try_charge() 2324 if (batch > nr_pages) in try_charge() 2325 refill_stock(memcg, batch - nr_pages); in try_charge()
|
D | slab.c | 660 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument 673 ac->batchcount = batch; in init_arraycache() 871 int batch, gfp_t gfp) in __alloc_alien_cache() argument 877 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
|
D | vmstat.c | 1230 pageset->pcp.batch); in zoneinfo_show_print()
|
D | vmscan.c | 247 long batch_size = shrinker->batch ? shrinker->batch in do_shrink_slab()
|
/linux-4.1.27/lib/ |
D | percpu_counter.c | 75 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument 81 if (count >= batch || count <= -batch) { in __percpu_counter_add() 200 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 206 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
|
/linux-4.1.27/include/linux/ |
D | percpu_counter.h | 42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 44 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch); 125 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument 139 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
|
D | shrinker.h | 56 long batch; /* reclaim batch size, 0 = default */ member
|
D | mmzone.h | 255 int batch; /* chunk size for buddy add/remove */ member
|
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/ |
D | console.c | 1141 lstcon_batch_t *batch; in lstcon_testrpc_condition() local 1149 batch = test->tes_batch; in lstcon_testrpc_condition() 1150 LASSERT(batch != NULL); in lstcon_testrpc_condition() 1160 hash = batch->bat_cli_hash; in lstcon_testrpc_condition() 1161 head = &batch->bat_cli_list; in lstcon_testrpc_condition() 1166 hash = batch->bat_srv_hash; in lstcon_testrpc_condition() 1167 head = &batch->bat_srv_list; in lstcon_testrpc_condition() 1233 lstcon_verify_batch(const char *name, lstcon_batch_t **batch) in lstcon_verify_batch() argument 1237 rc = lstcon_batch_find(name, batch); in lstcon_verify_batch() 1243 if ((*batch)->bat_state != LST_BATCH_IDLE) { in lstcon_verify_batch() [all …]
|
D | conrpc.c | 659 lstcon_batch_t *batch; in lstcon_batrpc_prep() local 682 batch = (lstcon_batch_t *)tsb; in lstcon_batrpc_prep() 683 brq->bar_arg = batch->bat_arg; in lstcon_batrpc_prep()
|
D | framework.c | 666 sfw_batch_t *batch; in sfw_destroy_session() local 672 batch = list_entry(sn->sn_batches.next, in sfw_destroy_session() 674 list_del_init(&batch->bat_list); in sfw_destroy_session() 675 sfw_destroy_batch(batch); in sfw_destroy_session()
|
/linux-4.1.27/arch/s390/mm/ |
D | pgtable.c | 1136 struct mmu_table_batch *batch; in tlb_remove_table_rcu() local 1139 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu() 1141 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu() 1142 __tlb_remove_table(batch->tables[i]); in tlb_remove_table_rcu() 1144 free_page((unsigned long)batch); in tlb_remove_table_rcu() 1149 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local 1151 if (*batch) { in tlb_table_flush() 1152 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); in tlb_table_flush() 1153 *batch = NULL; in tlb_table_flush() 1159 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local [all …]
|
/linux-4.1.27/fs/xfs/ |
D | xfs_icache.c | 530 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local 538 (void **)batch, first_index, in xfs_inode_ag_walk() 543 (void **) batch, first_index, in xfs_inode_ag_walk() 556 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk() 559 batch[i] = NULL; in xfs_inode_ag_walk() 584 if (!batch[i]) in xfs_inode_ag_walk() 586 error = execute(batch[i], flags, args); in xfs_inode_ag_walk() 587 IRELE(batch[i]); in xfs_inode_ag_walk() 1051 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_reclaim_inodes_ag() local 1057 (void **)batch, first_index, in xfs_reclaim_inodes_ag() [all …]
|
D | xfs_mount.c | 1136 s32 batch; in xfs_mod_fdblocks() local 1172 batch = 1; in xfs_mod_fdblocks() 1174 batch = XFS_FDBLOCKS_BATCH; in xfs_mod_fdblocks() 1176 __percpu_counter_add(&mp->m_fdblocks, delta, batch); in xfs_mod_fdblocks()
|
D | xfs_qm.c | 81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; in xfs_qm_dquot_walk() local 86 nr_found = radix_tree_gang_lookup(tree, (void **)batch, in xfs_qm_dquot_walk() 94 struct xfs_dquot *dqp = batch[i]; in xfs_qm_dquot_walk() 98 error = execute(batch[i], data); in xfs_qm_dquot_walk()
|
/linux-4.1.27/arch/s390/include/asm/ |
D | tlb.h | 33 struct mmu_table_batch *batch; member 59 tlb->batch = NULL; in tlb_gather_mmu()
|
/linux-4.1.27/drivers/xen/ |
D | grant-table.c | 754 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) in gnttab_batch_map() argument 758 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) in gnttab_batch_map() 760 for (op = batch; op < batch + count; op++) in gnttab_batch_map() 767 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) in gnttab_batch_copy() argument 771 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) in gnttab_batch_copy() 773 for (op = batch; op < batch + count; op++) in gnttab_batch_copy()
|
/linux-4.1.27/drivers/gpu/drm/i915/ |
D | i915_gem_render_state.c | 92 u32 s = rodata->batch[i]; in render_state_setup() 99 rodata->batch[i + 1] != 0) in render_state_setup()
|
D | i915_gem_render_state.h | 31 const u32 *batch; member
|
D | intel_renderstate.h | 37 .batch = gen ## _g ## _null_state_batch, \
|
/linux-4.1.27/include/xen/ |
D | grant_table.h | 206 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count); 207 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
|
/linux-4.1.27/Documentation/networking/timestamping/ |
D | txtimestamp.c | 184 int batch = 0; in __recv_errmsg_cmsg() local 223 batch++; in __recv_errmsg_cmsg() 227 if (batch > 1) in __recv_errmsg_cmsg() 228 fprintf(stderr, "batched %d timestamps\n", batch); in __recv_errmsg_cmsg()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | lpar.c | 524 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in pSeries_lpar_flush_hash_range() local 534 psize = batch->psize; in pSeries_lpar_flush_hash_range() 535 ssize = batch->ssize; in pSeries_lpar_flush_hash_range() 538 vpn = batch->vpn[i]; in pSeries_lpar_flush_hash_range() 539 pte = batch->pte[i]; in pSeries_lpar_flush_hash_range()
|
/linux-4.1.27/virt/kvm/ |
D | Kconfig | 28 # Toggle to switch between direct notification and batch job
|
/linux-4.1.27/arch/arm/include/asm/ |
D | tlb.h | 68 struct mmu_table_batch *batch; member 164 tlb->batch = NULL; in tlb_gather_mmu()
|
/linux-4.1.27/tools/vm/ |
D | page-types.c | 596 unsigned long batch; in walk_pfn() local 601 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); in walk_pfn() 602 pages = kpageflags_read(buf, index, batch); in walk_pfn() 618 unsigned long batch; in walk_vma() local 624 batch = min_t(unsigned long, count, PAGEMAP_BATCH); in walk_vma() 625 pages = pagemap_read(buf, index, batch); in walk_vma()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | process.c | 769 struct ppc64_tlb_batch *batch; in __switch_to() local 884 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to() 885 if (batch->active) { in __switch_to() 887 if (batch->index) in __switch_to() 888 __flush_tlb_pending(batch); in __switch_to() 889 batch->active = 0; in __switch_to() 907 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to() 908 batch->active = 1; in __switch_to()
|
/linux-4.1.27/include/asm-generic/ |
D | tlb.h | 95 struct mmu_table_batch *batch; member
|
/linux-4.1.27/block/ |
D | Kconfig.iosched | 21 a new point in the service tree and doing a batch of IO from there
|
/linux-4.1.27/drivers/target/iscsi/ |
D | iscsi_target_erl1.c | 1073 int batch = 0; in iscsit_handle_ooo_cmdsn() local 1081 batch = 1; in iscsit_handle_ooo_cmdsn() 1086 batch = 1; in iscsit_handle_ooo_cmdsn() 1094 ooo_cmdsn->batch_count = (batch) ? in iscsit_handle_ooo_cmdsn()
|
/linux-4.1.27/Documentation/trace/ |
D | events-kmem.txt | 60 When pages are freed in batch, the also mm_page_free_batched is triggered. 62 freed in batch with a page list. Significant amounts of activity here could
|
/linux-4.1.27/Documentation/cgroups/ |
D | freezer-subsystem.txt | 1 The cgroup freezer is useful to batch job management system which start 6 be started/stopped by the batch job management system. It also provides
|
D | cpuacct.txt | 49 due to the batch processing nature of percpu_counter.
|
D | cpusets.txt | 253 This enables batch managers monitoring jobs running in dedicated 264 This mechanism provides a very economical way for the batch manager 266 batch manager or other user code to decide what to do about it and 278 the system load imposed by a batch scheduler monitoring this 283 counter, a batch scheduler can detect memory pressure with a 288 the batch scheduler can obtain the key information, memory
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-fs-f2fs | 82 Controls the trimming rate in batch mode.
|
/linux-4.1.27/drivers/block/xen-blkback/ |
D | blkback.c | 763 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap() local 765 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, in xen_blkbk_unmap() 772 pages += batch; in xen_blkbk_unmap() 773 num -= batch; in xen_blkbk_unmap()
|
/linux-4.1.27/Documentation/block/ |
D | deadline-iosched.txt | 39 maximum number of requests per batch.
|
D | biodoc.txt | 983 The generic i/o scheduler algorithm attempts to sort/merge/batch requests for 1011 iii. Plugging the queue to batch requests in anticipation of opportunities for
|
/linux-4.1.27/arch/x86/xen/ |
D | mmu.c | 2502 int batch = min(REMAP_BATCH_SIZE, nr); in do_remap_mfn() local 2503 int batch_left = batch; in do_remap_mfn() 2504 range = (unsigned long)batch << PAGE_SHIFT; in do_remap_mfn() 2540 nr -= batch; in do_remap_mfn() 2543 err_ptr += batch; in do_remap_mfn()
|
/linux-4.1.27/kernel/ |
D | Makefile | 183 -batch -x509 -config x509.genkey \
|
/linux-4.1.27/drivers/staging/android/ion/ |
D | ion_heap.c | 314 heap->shrinker.batch = 0; in ion_heap_init_shrinker()
|
/linux-4.1.27/Documentation/scheduler/ |
D | sched-bwc.txt | 54 "silos" in a batch fashion. This greatly reduces global accounting pressure
|
D | sched-design-CFS.txt | 125 batch jobs.
|
/linux-4.1.27/Documentation/ |
D | dynamic-debug-howto.txt | 107 If your query set is big, you can batch them too: 109 ~# cat query-batch-file > <debugfs>/dynamic_debug/control
|
D | module-signing.txt | 137 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
|
D | kprobes.txt | 43 There are also register_/unregister_*probes() functions for batch
|
D | kernel-parameters.txt | 3007 process in one batch. 3057 batch limiting is disabled. 3061 batch limiting is re-enabled.
|
D | devices.txt | 67 in "batch mode", so there is likely additional registrations that
|
/linux-4.1.27/arch/arm/lib/ |
D | lib1funcs.S | 171 @ Do comparisons in batch of 4 first.
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | service.c | 405 DECLARE_RS_BATCH(batch); in ptlrpc_commit_replies() 407 rs_batch_init(&batch); in ptlrpc_commit_replies() 420 rs_batch_add(&batch, rs); in ptlrpc_commit_replies() 424 rs_batch_fini(&batch); in ptlrpc_commit_replies()
|
/linux-4.1.27/Documentation/vm/ |
D | page_migration | 28 a process to a processor on a distant node. A batch scheduler or an
|
/linux-4.1.27/Documentation/crypto/ |
D | async-tx-api.txt | 92 async_<operation> call. Offload engine drivers batch operations to
|
/linux-4.1.27/Documentation/filesystems/ |
D | btrfs.txt | 109 initiate batch trims from userspace).
|
D | f2fs.txt | 209 to be trimmed out in batch mode when FITRIM
|
D | ext4.txt | 277 additional filesystem operations to be batch
|
D | vfs.txt | 318 appropriate scan batch sizes without having to worry about whether 319 implementations will cause holdoff problems due to large scan batch
|
D | xfs-delayed-logging-design.txt | 82 the current batch completes. It is now common for a single current CPU core to
|
/linux-4.1.27/arch/x86/kvm/ |
D | mmu.c | 4617 int batch = 0; in kvm_zap_obsolete_pages() local 4643 if (batch >= BATCH_ZAP_PAGES && in kvm_zap_obsolete_pages() 4645 batch = 0; in kvm_zap_obsolete_pages() 4651 batch += ret; in kvm_zap_obsolete_pages()
|
/linux-4.1.27/fs/ |
D | super.c | 232 s->s_shrink.batch = 1024; in alloc_super()
|
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/ |
D | echo_client.c | 1646 u64 batch, struct obd_trans_info *oti, in echo_client_prep_commit() argument 1661 npages = batch >> PAGE_CACHE_SHIFT; in echo_client_prep_commit()
|
/linux-4.1.27/Documentation/locking/ |
D | ww-mutex-design.txt | 16 there is no way to guarantee that buffers appear in a execbuf/batch in
|
/linux-4.1.27/Documentation/sysctl/ |
D | vm.txt | 712 The batch value of each per cpu pagelist is also updated as a result. It is 713 set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
|
/linux-4.1.27/drivers/md/ |
D | raid5.c | 5729 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local 5735 batch[batch_size++] = sh; in handle_active_stripes() 5756 handle_stripe(batch[i]); in handle_active_stripes() 5762 hash = batch[i]->hash_lock_index; in handle_active_stripes() 5763 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes() 6593 conf->shrinker.batch = 128; in setup_conf()
|
D | dm-bufio.c | 1668 c->shrinker.batch = 0; in dm_bufio_client_create()
|
/linux-4.1.27/Documentation/RCU/ |
D | checklist.txt | 190 a single non-expedited primitive to cover the entire batch.
|
D | trace.txt | 166 o "b" is the batch limit for this CPU. If more than this number
|
D | RTFP.txt | 1030 Add per-cpu batch counter"
|
/linux-4.1.27/Documentation/networking/ |
D | cs89x0.txt | 89 build - batch file to compile cs89x0.c.
|
/linux-4.1.27/drivers/message/fusion/lsi/ |
D | mpi_history.txt | 230 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
|
/linux-4.1.27/fs/btrfs/ |
D | tree-log.c | 2657 int batch = atomic_read(&root->log_batch); in btrfs_sync_log() local 2666 if (batch == atomic_read(&root->log_batch)) in btrfs_sync_log()
|
/linux-4.1.27/drivers/md/bcache/ |
D | btree.c | 810 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
D | aic7xxx.seq | 1078 * we can batch the clearing of HADDR with the fixup.
|
D | aic79xx.seq | 376 * order is preserved even if we batch.
|