Home
last modified time | relevance | path

Searched refs:batch (Results 1 – 81 of 81) sorted by relevance

/linux-4.1.27/arch/powerpc/mm/
Dtlb_hash64.c47 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); in hpte_need_flush() local
54 i = batch->index; in hpte_need_flush()
100 if (!batch->active) { in hpte_need_flush()
116 if (i != 0 && (mm != batch->mm || batch->psize != psize || in hpte_need_flush()
117 batch->ssize != ssize)) { in hpte_need_flush()
118 __flush_tlb_pending(batch); in hpte_need_flush()
122 batch->mm = mm; in hpte_need_flush()
123 batch->psize = psize; in hpte_need_flush()
124 batch->ssize = ssize; in hpte_need_flush()
126 batch->pte[i] = rpte; in hpte_need_flush()
[all …]
Dhash_native_64.c645 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in native_flush_hash_range() local
646 unsigned long psize = batch->psize; in native_flush_hash_range()
647 int ssize = batch->ssize; in native_flush_hash_range()
653 vpn = batch->vpn[i]; in native_flush_hash_range()
654 pte = batch->pte[i]; in native_flush_hash_range()
679 vpn = batch->vpn[i]; in native_flush_hash_range()
680 pte = batch->pte[i]; in native_flush_hash_range()
696 vpn = batch->vpn[i]; in native_flush_hash_range()
697 pte = batch->pte[i]; in native_flush_hash_range()
Dhugetlbpage.c461 struct hugepd_freelist *batch = in hugepd_free_rcu_callback() local
465 for (i = 0; i < batch->index; i++) in hugepd_free_rcu_callback()
466 kmem_cache_free(hugepte_cache, batch->ptes[i]); in hugepd_free_rcu_callback()
468 free_page((unsigned long)batch); in hugepd_free_rcu_callback()
Dhash_utils_64.c1407 struct ppc64_tlb_batch *batch = in flush_hash_range() local
1411 flush_hash_page(batch->vpn[i], batch->pte[i], in flush_hash_range()
1412 batch->psize, batch->ssize, local); in flush_hash_range()
/linux-4.1.27/arch/powerpc/include/asm/
Dtlbflush.h104 extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_enter_lazy_mmu_mode() local
112 batch->active = 1; in arch_enter_lazy_mmu_mode()
117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in arch_leave_lazy_mmu_mode() local
119 if (batch->index) in arch_leave_lazy_mmu_mode()
120 __flush_tlb_pending(batch); in arch_leave_lazy_mmu_mode()
121 batch->active = 0; in arch_leave_lazy_mmu_mode()
/linux-4.1.27/mm/
Dmemory.c185 struct mmu_gather_batch *batch; in tlb_next_batch() local
187 batch = tlb->active; in tlb_next_batch()
188 if (batch->next) { in tlb_next_batch()
189 tlb->active = batch->next; in tlb_next_batch()
196 batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); in tlb_next_batch()
197 if (!batch) in tlb_next_batch()
201 batch->next = NULL; in tlb_next_batch()
202 batch->nr = 0; in tlb_next_batch()
203 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
205 tlb->active->next = batch; in tlb_next_batch()
[all …]
Dmm_init.c159 s32 batch = max_t(s32, nr*2, 32); in mm_compute_batch() local
164 vm_committed_as_batch = max_t(s32, memsized_batch, batch); in mm_compute_batch()
Dpage_alloc.c1399 int to_drain, batch; in drain_zone_pages() local
1402 batch = READ_ONCE(pcp->batch); in drain_zone_pages()
1403 to_drain = min(pcp->count, batch); in drain_zone_pages()
1601 unsigned long batch = READ_ONCE(pcp->batch); in free_hot_cold_page() local
1602 free_pcppages_bulk(zone, batch, pcp); in free_hot_cold_page()
1603 pcp->count -= batch; in free_hot_cold_page()
1743 pcp->batch, list, in buffered_rmqueue()
3892 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
4273 int batch; in zone_batchsize() local
4281 batch = zone->managed_pages / 1024; in zone_batchsize()
[all …]
Dmemcontrol.c2221 unsigned int batch = max(CHARGE_BATCH, nr_pages); in try_charge() local
2237 !page_counter_try_charge(&memcg->memsw, batch, &counter)) { in try_charge()
2238 if (!page_counter_try_charge(&memcg->memory, batch, &counter)) in try_charge()
2241 page_counter_uncharge(&memcg->memsw, batch); in try_charge()
2248 if (batch > nr_pages) { in try_charge()
2249 batch = nr_pages; in try_charge()
2323 css_get_many(&memcg->css, batch); in try_charge()
2324 if (batch > nr_pages) in try_charge()
2325 refill_stock(memcg, batch - nr_pages); in try_charge()
Dslab.c660 static void init_arraycache(struct array_cache *ac, int limit, int batch) in init_arraycache() argument
673 ac->batchcount = batch; in init_arraycache()
871 int batch, gfp_t gfp) in __alloc_alien_cache() argument
877 init_arraycache(&alc->ac, entries, batch); in __alloc_alien_cache()
Dvmstat.c1230 pageset->pcp.batch); in zoneinfo_show_print()
Dvmscan.c247 long batch_size = shrinker->batch ? shrinker->batch in do_shrink_slab()
/linux-4.1.27/lib/
Dpercpu_counter.c75 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
81 if (count >= batch || count <= -batch) { in __percpu_counter_add()
200 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument
206 if (abs(count - rhs) > (batch * num_online_cpus())) { in __percpu_counter_compare()
/linux-4.1.27/include/linux/
Dpercpu_counter.h42 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
44 int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
125 __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) in __percpu_counter_compare() argument
139 __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) in __percpu_counter_add() argument
Dshrinker.h56 long batch; /* reclaim batch size, 0 = default */ member
Dmmzone.h255 int batch; /* chunk size for buddy add/remove */ member
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
Dconsole.c1141 lstcon_batch_t *batch; in lstcon_testrpc_condition() local
1149 batch = test->tes_batch; in lstcon_testrpc_condition()
1150 LASSERT(batch != NULL); in lstcon_testrpc_condition()
1160 hash = batch->bat_cli_hash; in lstcon_testrpc_condition()
1161 head = &batch->bat_cli_list; in lstcon_testrpc_condition()
1166 hash = batch->bat_srv_hash; in lstcon_testrpc_condition()
1167 head = &batch->bat_srv_list; in lstcon_testrpc_condition()
1233 lstcon_verify_batch(const char *name, lstcon_batch_t **batch) in lstcon_verify_batch() argument
1237 rc = lstcon_batch_find(name, batch); in lstcon_verify_batch()
1243 if ((*batch)->bat_state != LST_BATCH_IDLE) { in lstcon_verify_batch()
[all …]
Dconrpc.c659 lstcon_batch_t *batch; in lstcon_batrpc_prep() local
682 batch = (lstcon_batch_t *)tsb; in lstcon_batrpc_prep()
683 brq->bar_arg = batch->bat_arg; in lstcon_batrpc_prep()
Dframework.c666 sfw_batch_t *batch; in sfw_destroy_session() local
672 batch = list_entry(sn->sn_batches.next, in sfw_destroy_session()
674 list_del_init(&batch->bat_list); in sfw_destroy_session()
675 sfw_destroy_batch(batch); in sfw_destroy_session()
/linux-4.1.27/arch/s390/mm/
Dpgtable.c1136 struct mmu_table_batch *batch; in tlb_remove_table_rcu() local
1139 batch = container_of(head, struct mmu_table_batch, rcu); in tlb_remove_table_rcu()
1141 for (i = 0; i < batch->nr; i++) in tlb_remove_table_rcu()
1142 __tlb_remove_table(batch->tables[i]); in tlb_remove_table_rcu()
1144 free_page((unsigned long)batch); in tlb_remove_table_rcu()
1149 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local
1151 if (*batch) { in tlb_table_flush()
1152 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); in tlb_table_flush()
1153 *batch = NULL; in tlb_table_flush()
1159 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local
[all …]
/linux-4.1.27/fs/xfs/
Dxfs_icache.c530 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_inode_ag_walk() local
538 (void **)batch, first_index, in xfs_inode_ag_walk()
543 (void **) batch, first_index, in xfs_inode_ag_walk()
556 struct xfs_inode *ip = batch[i]; in xfs_inode_ag_walk()
559 batch[i] = NULL; in xfs_inode_ag_walk()
584 if (!batch[i]) in xfs_inode_ag_walk()
586 error = execute(batch[i], flags, args); in xfs_inode_ag_walk()
587 IRELE(batch[i]); in xfs_inode_ag_walk()
1051 struct xfs_inode *batch[XFS_LOOKUP_BATCH]; in xfs_reclaim_inodes_ag() local
1057 (void **)batch, first_index, in xfs_reclaim_inodes_ag()
[all …]
Dxfs_mount.c1136 s32 batch; in xfs_mod_fdblocks() local
1172 batch = 1; in xfs_mod_fdblocks()
1174 batch = XFS_FDBLOCKS_BATCH; in xfs_mod_fdblocks()
1176 __percpu_counter_add(&mp->m_fdblocks, delta, batch); in xfs_mod_fdblocks()
Dxfs_qm.c81 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; in xfs_qm_dquot_walk() local
86 nr_found = radix_tree_gang_lookup(tree, (void **)batch, in xfs_qm_dquot_walk()
94 struct xfs_dquot *dqp = batch[i]; in xfs_qm_dquot_walk()
98 error = execute(batch[i], data); in xfs_qm_dquot_walk()
/linux-4.1.27/arch/s390/include/asm/
Dtlb.h33 struct mmu_table_batch *batch; member
59 tlb->batch = NULL; in tlb_gather_mmu()
/linux-4.1.27/drivers/xen/
Dgrant-table.c754 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count) in gnttab_batch_map() argument
758 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count)) in gnttab_batch_map()
760 for (op = batch; op < batch + count; op++) in gnttab_batch_map()
767 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count) in gnttab_batch_copy() argument
771 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count)) in gnttab_batch_copy()
773 for (op = batch; op < batch + count; op++) in gnttab_batch_copy()
/linux-4.1.27/drivers/gpu/drm/i915/
Di915_gem_render_state.c92 u32 s = rodata->batch[i]; in render_state_setup()
99 rodata->batch[i + 1] != 0) in render_state_setup()
Di915_gem_render_state.h31 const u32 *batch; member
Dintel_renderstate.h37 .batch = gen ## _g ## _null_state_batch, \
/linux-4.1.27/include/xen/
Dgrant_table.h206 void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
207 void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
/linux-4.1.27/Documentation/networking/timestamping/
Dtxtimestamp.c184 int batch = 0; in __recv_errmsg_cmsg() local
223 batch++; in __recv_errmsg_cmsg()
227 if (batch > 1) in __recv_errmsg_cmsg()
228 fprintf(stderr, "batched %d timestamps\n", batch); in __recv_errmsg_cmsg()
/linux-4.1.27/arch/powerpc/platforms/pseries/
Dlpar.c524 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); in pSeries_lpar_flush_hash_range() local
534 psize = batch->psize; in pSeries_lpar_flush_hash_range()
535 ssize = batch->ssize; in pSeries_lpar_flush_hash_range()
538 vpn = batch->vpn[i]; in pSeries_lpar_flush_hash_range()
539 pte = batch->pte[i]; in pSeries_lpar_flush_hash_range()
/linux-4.1.27/virt/kvm/
DKconfig28 # Toggle to switch between direct notification and batch job
/linux-4.1.27/arch/arm/include/asm/
Dtlb.h68 struct mmu_table_batch *batch; member
164 tlb->batch = NULL; in tlb_gather_mmu()
/linux-4.1.27/tools/vm/
Dpage-types.c596 unsigned long batch; in walk_pfn() local
601 batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); in walk_pfn()
602 pages = kpageflags_read(buf, index, batch); in walk_pfn()
618 unsigned long batch; in walk_vma() local
624 batch = min_t(unsigned long, count, PAGEMAP_BATCH); in walk_vma()
625 pages = pagemap_read(buf, index, batch); in walk_vma()
/linux-4.1.27/arch/powerpc/kernel/
Dprocess.c769 struct ppc64_tlb_batch *batch; in __switch_to() local
884 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to()
885 if (batch->active) { in __switch_to()
887 if (batch->index) in __switch_to()
888 __flush_tlb_pending(batch); in __switch_to()
889 batch->active = 0; in __switch_to()
907 batch = this_cpu_ptr(&ppc64_tlb_batch); in __switch_to()
908 batch->active = 1; in __switch_to()
/linux-4.1.27/include/asm-generic/
Dtlb.h95 struct mmu_table_batch *batch; member
/linux-4.1.27/block/
DKconfig.iosched21 a new point in the service tree and doing a batch of IO from there
/linux-4.1.27/drivers/target/iscsi/
Discsi_target_erl1.c1073 int batch = 0; in iscsit_handle_ooo_cmdsn() local
1081 batch = 1; in iscsit_handle_ooo_cmdsn()
1086 batch = 1; in iscsit_handle_ooo_cmdsn()
1094 ooo_cmdsn->batch_count = (batch) ? in iscsit_handle_ooo_cmdsn()
/linux-4.1.27/Documentation/trace/
Devents-kmem.txt60 When pages are freed in batch, the also mm_page_free_batched is triggered.
62 freed in batch with a page list. Significant amounts of activity here could
/linux-4.1.27/Documentation/cgroups/
Dfreezer-subsystem.txt1 The cgroup freezer is useful to batch job management system which start
6 be started/stopped by the batch job management system. It also provides
Dcpuacct.txt49 due to the batch processing nature of percpu_counter.
Dcpusets.txt253 This enables batch managers monitoring jobs running in dedicated
264 This mechanism provides a very economical way for the batch manager
266 batch manager or other user code to decide what to do about it and
278 the system load imposed by a batch scheduler monitoring this
283 counter, a batch scheduler can detect memory pressure with a
288 the batch scheduler can obtain the key information, memory
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-fs-f2fs82 Controls the trimming rate in batch mode.
/linux-4.1.27/drivers/block/xen-blkback/
Dblkback.c763 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST); in xen_blkbk_unmap() local
765 invcount = xen_blkbk_unmap_prepare(blkif, pages, batch, in xen_blkbk_unmap()
772 pages += batch; in xen_blkbk_unmap()
773 num -= batch; in xen_blkbk_unmap()
/linux-4.1.27/Documentation/block/
Ddeadline-iosched.txt39 maximum number of requests per batch.
Dbiodoc.txt983 The generic i/o scheduler algorithm attempts to sort/merge/batch requests for
1011 iii. Plugging the queue to batch requests in anticipation of opportunities for
/linux-4.1.27/arch/x86/xen/
Dmmu.c2502 int batch = min(REMAP_BATCH_SIZE, nr); in do_remap_mfn() local
2503 int batch_left = batch; in do_remap_mfn()
2504 range = (unsigned long)batch << PAGE_SHIFT; in do_remap_mfn()
2540 nr -= batch; in do_remap_mfn()
2543 err_ptr += batch; in do_remap_mfn()
/linux-4.1.27/kernel/
DMakefile183 -batch -x509 -config x509.genkey \
/linux-4.1.27/drivers/staging/android/ion/
Dion_heap.c314 heap->shrinker.batch = 0; in ion_heap_init_shrinker()
/linux-4.1.27/Documentation/scheduler/
Dsched-bwc.txt54 "silos" in a batch fashion. This greatly reduces global accounting pressure
Dsched-design-CFS.txt125 batch jobs.
/linux-4.1.27/Documentation/
Ddynamic-debug-howto.txt107 If your query set is big, you can batch them too:
109 ~# cat query-batch-file > <debugfs>/dynamic_debug/control
Dmodule-signing.txt137 openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
Dkprobes.txt43 There are also register_/unregister_*probes() functions for batch
Dkernel-parameters.txt3007 process in one batch.
3057 batch limiting is disabled.
3061 batch limiting is re-enabled.
Ddevices.txt67 in "batch mode", so there is likely additional registrations that
/linux-4.1.27/arch/arm/lib/
Dlib1funcs.S171 @ Do comparisons in batch of 4 first.
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
Dservice.c405 DECLARE_RS_BATCH(batch); in ptlrpc_commit_replies()
407 rs_batch_init(&batch); in ptlrpc_commit_replies()
420 rs_batch_add(&batch, rs); in ptlrpc_commit_replies()
424 rs_batch_fini(&batch); in ptlrpc_commit_replies()
/linux-4.1.27/Documentation/vm/
Dpage_migration28 a process to a processor on a distant node. A batch scheduler or an
/linux-4.1.27/Documentation/crypto/
Dasync-tx-api.txt92 async_<operation> call. Offload engine drivers batch operations to
/linux-4.1.27/Documentation/filesystems/
Dbtrfs.txt109 initiate batch trims from userspace).
Df2fs.txt209 to be trimmed out in batch mode when FITRIM
Dext4.txt277 additional filesystem operations to be batch
Dvfs.txt318 appropriate scan batch sizes without having to worry about whether
319 implementations will cause holdoff problems due to large scan batch
Dxfs-delayed-logging-design.txt82 the current batch completes. It is now common for a single current CPU core to
/linux-4.1.27/arch/x86/kvm/
Dmmu.c4617 int batch = 0; in kvm_zap_obsolete_pages() local
4643 if (batch >= BATCH_ZAP_PAGES && in kvm_zap_obsolete_pages()
4645 batch = 0; in kvm_zap_obsolete_pages()
4651 batch += ret; in kvm_zap_obsolete_pages()
/linux-4.1.27/fs/
Dsuper.c232 s->s_shrink.batch = 1024; in alloc_super()
/linux-4.1.27/drivers/staging/lustre/lustre/obdecho/
Decho_client.c1646 u64 batch, struct obd_trans_info *oti, in echo_client_prep_commit() argument
1661 npages = batch >> PAGE_CACHE_SHIFT; in echo_client_prep_commit()
/linux-4.1.27/Documentation/locking/
Dww-mutex-design.txt16 there is no way to guarantee that buffers appear in a execbuf/batch in
/linux-4.1.27/Documentation/sysctl/
Dvm.txt712 The batch value of each per cpu pagelist is also updated as a result. It is
713 set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
/linux-4.1.27/drivers/md/
Draid5.c5729 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
5735 batch[batch_size++] = sh; in handle_active_stripes()
5756 handle_stripe(batch[i]); in handle_active_stripes()
5762 hash = batch[i]->hash_lock_index; in handle_active_stripes()
5763 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); in handle_active_stripes()
6593 conf->shrinker.batch = 128; in setup_conf()
Ddm-bufio.c1668 c->shrinker.batch = 0; in dm_bufio_client_create()
/linux-4.1.27/Documentation/RCU/
Dchecklist.txt190 a single non-expedited primitive to cover the entire batch.
Dtrace.txt166 o "b" is the batch limit for this CPU. If more than this number
DRTFP.txt1030 Add per-cpu batch counter"
/linux-4.1.27/Documentation/networking/
Dcs89x0.txt89 build - batch file to compile cs89x0.c.
/linux-4.1.27/drivers/message/fusion/lsi/
Dmpi_history.txt230 * 06-22-00 01.00.03 Removed batch controls from LAN_0 page and updated the
/linux-4.1.27/fs/btrfs/
Dtree-log.c2657 int batch = atomic_read(&root->log_batch); in btrfs_sync_log() local
2666 if (batch == atomic_read(&root->log_batch)) in btrfs_sync_log()
/linux-4.1.27/drivers/md/bcache/
Dbtree.c810 c->shrink.batch = c->btree_pages * 2; in bch_btree_cache_alloc()
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic7xxx.seq1078 * we can batch the clearing of HADDR with the fixup.
Daic79xx.seq376 * order is preserved even if we batch.