mcidx              40 arch/x86/xen/multicalls.c 	unsigned mcidx, argidx, cbidx;
mcidx              70 arch/x86/xen/multicalls.c 	trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx);
mcidx              74 arch/x86/xen/multicalls.c 	       b->mcidx * sizeof(struct multicall_entry));
mcidx              77 arch/x86/xen/multicalls.c 	switch (b->mcidx) {
mcidx              95 arch/x86/xen/multicalls.c 		if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0)
mcidx              97 arch/x86/xen/multicalls.c 		for (i = 0; i < b->mcidx; i++)
mcidx             104 arch/x86/xen/multicalls.c 		       ret, b->mcidx, smp_processor_id());
mcidx             105 arch/x86/xen/multicalls.c 		for (i = 0; i < b->mcidx; i++) {
mcidx             125 arch/x86/xen/multicalls.c 	b->mcidx = 0;
mcidx             149 arch/x86/xen/multicalls.c 	if (unlikely(b->mcidx == MC_BATCH ||
mcidx             151 arch/x86/xen/multicalls.c 		trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ?
mcidx             157 arch/x86/xen/multicalls.c 	ret.mc = &b->entries[b->mcidx];
mcidx             159 arch/x86/xen/multicalls.c 	b->caller[b->mcidx] = __builtin_return_address(0);
mcidx             161 arch/x86/xen/multicalls.c 	b->mcidx++;
mcidx             177 arch/x86/xen/multicalls.c 	if (unlikely(b->mcidx == 0 ||
mcidx             178 arch/x86/xen/multicalls.c 		     b->entries[b->mcidx - 1].op != op)) {
mcidx             188 arch/x86/xen/multicalls.c 	ret.mc = &b->entries[b->mcidx - 1];
mcidx              99 include/trace/events/xen.h 	    TP_PROTO(unsigned mcidx, unsigned argidx, unsigned cbidx),
mcidx             100 include/trace/events/xen.h 	    TP_ARGS(mcidx, argidx, cbidx),
mcidx             102 include/trace/events/xen.h 		    __field(unsigned, mcidx)
mcidx             106 include/trace/events/xen.h 	    TP_fast_assign(__entry->mcidx = mcidx;
mcidx             110 include/trace/events/xen.h 		      __entry->mcidx, __entry->argidx, __entry->cbidx)