call               12 arch/arc/kernel/sys.c #define __SYSCALL(nr, call) [nr] = (call),
call              363 arch/arm/kernel/sys_oabi-compat.c asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third,
call              366 arch/arm/kernel/sys_oabi-compat.c 	switch (call & 0xffff) {
call              377 arch/arm/kernel/sys_oabi-compat.c 		return sys_ipc(call, first, second, third, ptr, fifth);
call              440 arch/arm/kernel/sys_oabi-compat.c asmlinkage long sys_oabi_socketcall(int call, unsigned long __user *args)
call              444 arch/arm/kernel/sys_oabi-compat.c 	switch (call) {
call              463 arch/arm/kernel/sys_oabi-compat.c 		r = sys_socketcall(call, args);
call               48 arch/c6x/kernel/sys_c6x.c #define __SYSCALL(nr, call) [nr] = (call),
call               41 arch/csky/kernel/ftrace.c 			     uint16_t *call, bool nolr)
call               45 arch/csky/kernel/ftrace.c 	call[0]	= nolr ? NOP : PUSH_LR;
call               50 arch/csky/kernel/ftrace.c 		call[1] = MOVIH_LINK;
call               51 arch/csky/kernel/ftrace.c 		call[2] = callee >> 16;
call               52 arch/csky/kernel/ftrace.c 		call[3] = ORI_LINK;
call               53 arch/csky/kernel/ftrace.c 		call[4] = callee & 0xffff;
call               54 arch/csky/kernel/ftrace.c 		call[5] = JSR_LINK;
call               55 arch/csky/kernel/ftrace.c 		call[6] = 0;
call               59 arch/csky/kernel/ftrace.c 		call[1] = BSR_LINK |
call               61 arch/csky/kernel/ftrace.c 		call[2] = (uint16_t)((unsigned long) offset & 0xffff);
call               62 arch/csky/kernel/ftrace.c 		call[3] = call[5] = NOP32_HI;
call               63 arch/csky/kernel/ftrace.c 		call[4] = call[6] = NOP32_LO;
call               92 arch/csky/kernel/ftrace.c 	uint16_t call[7];
call               97 arch/csky/kernel/ftrace.c 	make_jbsr(target, hook, call, nolr);
call               99 arch/csky/kernel/ftrace.c 	ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
call                8 arch/csky/kernel/syscall_table.c #define __SYSCALL(nr, call)[nr] = (call),
call                7 arch/h8300/kernel/syscalls.c #define __SYSCALL(nr, call) [nr] = (call),
call               15 arch/hexagon/kernel/syscalltab.c #define __SYSCALL(nr, call) [nr] = (call),
call               96 arch/ia64/include/asm/asmmacro.h 	br.call.sptk.many b7=2f;;			\
call               13 arch/ia64/kernel/minstate.h (pUStk) br.call.spnt rp=account_sys_enter		\
call             1824 arch/ia64/kernel/ptrace.c do_regset_call(void (*call)(struct unw_frame_info *, void *),
call             1836 arch/ia64/kernel/ptrace.c 		unw_init_running(call, &info);
call             1841 arch/ia64/kernel/ptrace.c 		(*call)(&ufi, &info);
call             20598 arch/m68k/ifpsp060/src/fpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
call             20809 arch/m68k/ifpsp060/src/fpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
call             7499 arch/m68k/ifpsp060/src/pfpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
call             7710 arch/m68k/ifpsp060/src/pfpsp.S #     if exp now equals one, then it overflowed so call ovf_res.
call              259 arch/mips/sgi-ip27/ip27-irq.c 	int resched, call;
call              265 arch/mips/sgi-ip27/ip27-irq.c 	call = CPU_CALL_A_IRQ + slice;
call              266 arch/mips/sgi-ip27/ip27-irq.c 	set_bit(call, mask);
call              267 arch/mips/sgi-ip27/ip27-irq.c 	LOCAL_HUB_CLR_INTR(call);
call               10 arch/nds32/kernel/syscall_table.c #define __SYSCALL(nr, call) [nr] = (call),
call               13 arch/nios2/kernel/syscall_table.c #define __SYSCALL(nr, call) [nr] = (call),
call               20 arch/openrisc/kernel/sys_call_table.c #define __SYSCALL(nr, call) [nr] = (call),
call               46 arch/riscv/include/asm/ftrace.h #define make_call(caller, callee, call)					\
call               48 arch/riscv/include/asm/ftrace.h 	call[0] = to_auipc_insn((unsigned int)((unsigned long)callee -	\
call               50 arch/riscv/include/asm/ftrace.h 	call[1] = to_jalr_insn((unsigned int)((unsigned long)callee -	\
call               47 arch/riscv/kernel/ftrace.c 	unsigned int call[2];
call               51 arch/riscv/kernel/ftrace.c 	make_call(hook_pos, target, call);
call               54 arch/riscv/kernel/ftrace.c 	ret = probe_kernel_write((void *)hook_pos, enable ? call : nops,
call               79 arch/riscv/kernel/ftrace.c 	unsigned int call[2];
call               82 arch/riscv/kernel/ftrace.c 	make_call(rec->ip, addr, call);
call               83 arch/riscv/kernel/ftrace.c 	ret = ftrace_check_current_call(rec->ip, call);
call              113 arch/riscv/kernel/ftrace.c 	unsigned int call[2];
call              116 arch/riscv/kernel/ftrace.c 	make_call(rec->ip, old_addr, call);
call              117 arch/riscv/kernel/ftrace.c 	ret = ftrace_check_current_call(rec->ip, call);
call              153 arch/riscv/kernel/ftrace.c 	unsigned int call[2];
call              157 arch/riscv/kernel/ftrace.c 	make_call(&ftrace_graph_call, &ftrace_stub, call);
call              166 arch/riscv/kernel/ftrace.c 						call);
call              182 arch/riscv/kernel/ftrace.c 	unsigned int call[2];
call              185 arch/riscv/kernel/ftrace.c 	make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
call              192 arch/riscv/kernel/ftrace.c 					call);
call               14 arch/riscv/kernel/syscall_table.c #define __SYSCALL(nr, call)	[nr] = (call),
call               61 arch/s390/kernel/compat_linux.c COMPAT_SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, compat_ulong_t, second,
call               64 arch/s390/kernel/compat_linux.c 	if (call >> 16)		/* hack for backward compatibility */
call               66 arch/s390/kernel/compat_linux.c 	return compat_ksys_ipc(call, first, second, third, ptr, third);
call               65 arch/s390/kernel/sys_s390.c SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second,
call               68 arch/s390/kernel/sys_s390.c 	if (call >> 16)
call               78 arch/s390/kernel/sys_s390.c 	return ksys_ipc(call, first, second, third, ptr, third);
call               29 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               39 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               48 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               68 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               79 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               89 arch/sparc/include/asm/ttable.h 	call	routine;				\
call              136 arch/sparc/include/asm/ttable.h 1:	call	trace_hardirqs_off;			\
call              139 arch/sparc/include/asm/ttable.h 	call	routine;				\
call              152 arch/sparc/include/asm/ttable.h 	call	routine;				\
call              164 arch/sparc/include/asm/ttable.h 	call	routine;				\
call               17 arch/sparc/kernel/ftrace.c 	u32 call;
call               21 arch/sparc/kernel/ftrace.c 	call = 0x40000000 | ((u32)off >> 2);
call               23 arch/sparc/kernel/ftrace.c 	return call;
call              334 arch/sparc/kernel/sys_sparc_64.c SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second,
call              343 arch/sparc/kernel/sys_sparc_64.c 	if (call <= SEMTIMEDOP) {
call              344 arch/sparc/kernel/sys_sparc_64.c 		switch (call) {
call              368 arch/sparc/kernel/sys_sparc_64.c 	if (call <= MSGCTL) {
call              369 arch/sparc/kernel/sys_sparc_64.c 		switch (call) {
call              389 arch/sparc/kernel/sys_sparc_64.c 	if (call <= SHMCTL) {
call              390 arch/sparc/kernel/sys_sparc_64.c 		switch (call) {
call               33 arch/sparc/kernel/systbls.h asmlinkage long sys_sparc_ipc(unsigned int call, int first,
call              278 arch/um/kernel/process.c 	exitcall_t *call;
call              280 arch/um/kernel/process.c 	call = &__uml_exitcall_end;
call              281 arch/um/kernel/process.c 	while (--call >= &__uml_exitcall_begin)
call              282 arch/um/kernel/process.c 		(*call)();
call               30 arch/unicore32/kernel/sys.c #define __SYSCALL(nr, call)	[nr] = (call),
call              339 arch/x86/entry/calling.h 	call stackleak_erase
call              348 arch/x86/entry/calling.h 	call stackleak_erase
call              361 arch/x86/entry/calling.h 	call enter_from_user_mode
call              207 arch/x86/include/asm/efi.h 	efi_status_t (*call)(unsigned long, ...);
call              230 arch/x86/include/asm/efi.h 	__efi_early()->call(efi_table_attr(protocol, f, instance),	\
call              234 arch/x86/include/asm/efi.h 	__efi_early()->call(efi_table_attr(efi_boot_services, f,	\
call              238 arch/x86/include/asm/efi.h 	__efi_early()->call((unsigned long)f, __VA_ARGS__);
call              241 arch/x86/include/asm/efi.h 	__efi_early()->call(efi_table_attr(efi_runtime_services, f,	\
call              177 arch/x86/include/asm/irqflags.h #  define TRACE_IRQS_ON		call trace_hardirqs_on_thunk;
call              178 arch/x86/include/asm/irqflags.h #  define TRACE_IRQS_OFF	call trace_hardirqs_off_thunk;
call              185 arch/x86/include/asm/irqflags.h #    define LOCKDEP_SYS_EXIT		call lockdep_sys_exit_thunk
call              189 arch/x86/include/asm/irqflags.h 	call lockdep_sys_exit_thunk; \
call              197 arch/x86/include/asm/irqflags.h 	call lockdep_sys_exit;			\
call               50 arch/x86/include/asm/nospec-branch.h 	call	772f;				\
call               56 arch/x86/include/asm/nospec-branch.h 	call	774f;				\
call               86 arch/x86/include/asm/nospec-branch.h 	call	.Ldo_rop_\@
call              105 arch/x86/include/asm/nospec-branch.h 	call	.Ldo_retpoline_jmp_\@
call              127 arch/x86/include/asm/nospec-branch.h 	ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg),	\
call              129 arch/x86/include/asm/nospec-branch.h 		__stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
call              131 arch/x86/include/asm/nospec-branch.h 	call	*\reg
call              869 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_disable);	\
call              876 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_irq_enable);		\
call              899 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_CPU_swapgs);		\
call              912 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);	    \
call              923 arch/x86/include/asm/paravirt.h 		  call PARA_INDIRECT(pv_ops+PV_MMU_read_cr2);		\
call              201 arch/x86/include/asm/xen/hypercall.h xen_single_call(unsigned int call,
call              209 arch/x86/include/asm/xen/hypercall.h 	if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
call              214 arch/x86/include/asm/xen/hypercall.h 		     : [thunk_target] "a" (&hypercall_page[call])
call              237 arch/x86/include/asm/xen/hypercall.h privcmd_call(unsigned int call,
call              245 arch/x86/include/asm/xen/hypercall.h 	res = xen_single_call(call, a1, a2, a3, a4, a5);
call              600 arch/x86/kernel/apm_32.c 	struct apm_bios_call	*call = _call;
call              611 arch/x86/kernel/apm_32.c 	apm_bios_call_asm(call->func, call->ebx, call->ecx,
call              612 arch/x86/kernel/apm_32.c 			  &call->eax, &call->ebx, &call->ecx, &call->edx,
call              613 arch/x86/kernel/apm_32.c 			  &call->esi);
call              620 arch/x86/kernel/apm_32.c 	return call->eax & 0xff;
call              624 arch/x86/kernel/apm_32.c static int on_cpu0(long (*fn)(void *), struct apm_bios_call *call)
call              631 arch/x86/kernel/apm_32.c 		ret = fn(call);
call              635 arch/x86/kernel/apm_32.c 		ret = work_on_cpu(0, fn, call);
call              640 arch/x86/kernel/apm_32.c 		call->err = ret;
call              642 arch/x86/kernel/apm_32.c 		call->err = (call->eax >> 8) & 0xff;
call              653 arch/x86/kernel/apm_32.c static int apm_bios_call(struct apm_bios_call *call)
call              655 arch/x86/kernel/apm_32.c 	return on_cpu0(__apm_bios_call, call);
call              678 arch/x86/kernel/apm_32.c 	struct apm_bios_call	*call = _call;
call              689 arch/x86/kernel/apm_32.c 	error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
call              690 arch/x86/kernel/apm_32.c 					 &call->eax);
call              716 arch/x86/kernel/apm_32.c 	struct apm_bios_call call;
call              719 arch/x86/kernel/apm_32.c 	call.func = func;
call              720 arch/x86/kernel/apm_32.c 	call.ebx = ebx_in;
call              721 arch/x86/kernel/apm_32.c 	call.ecx = ecx_in;
call              723 arch/x86/kernel/apm_32.c 	ret = on_cpu0(__apm_bios_call_simple, &call);
call              724 arch/x86/kernel/apm_32.c 	*eax = call.eax;
call              725 arch/x86/kernel/apm_32.c 	*err = call.err;
call              774 arch/x86/kernel/apm_32.c 	struct apm_bios_call call;
call              776 arch/x86/kernel/apm_32.c 	call.func = APM_FUNC_GET_EVENT;
call              777 arch/x86/kernel/apm_32.c 	call.ebx = call.ecx = 0;
call              779 arch/x86/kernel/apm_32.c 	if (apm_bios_call(&call))
call              780 arch/x86/kernel/apm_32.c 		return call.err;
call              782 arch/x86/kernel/apm_32.c 	*event = call.ebx;
call              786 arch/x86/kernel/apm_32.c 		*info = call.ecx;
call             1034 arch/x86/kernel/apm_32.c 	struct apm_bios_call call;
call             1036 arch/x86/kernel/apm_32.c 	call.func = APM_FUNC_GET_STATUS;
call             1037 arch/x86/kernel/apm_32.c 	call.ebx = APM_DEVICE_ALL;
call             1038 arch/x86/kernel/apm_32.c 	call.ecx = 0;
call             1042 arch/x86/kernel/apm_32.c 	if (apm_bios_call(&call)) {
call             1043 arch/x86/kernel/apm_32.c 		if (!call.err)
call             1045 arch/x86/kernel/apm_32.c 		return call.err;
call             1047 arch/x86/kernel/apm_32.c 	*status = call.ebx;
call             1048 arch/x86/kernel/apm_32.c 	*bat = call.ecx;
call             1050 arch/x86/kernel/apm_32.c 		*life = swab16((u16)call.edx);
call             1053 arch/x86/kernel/apm_32.c 		*life = call.edx;
call              414 drivers/cdrom/cdrom.c #define ENSURE(cdo, call, bits)					\
call              416 drivers/cdrom/cdrom.c 	if (cdo->call == NULL)					\
call              104 drivers/firmware/efi/runtime-wrappers.c void efi_call_virt_check_flags(unsigned long flags, const char *call)
call              116 drivers/firmware/efi/runtime-wrappers.c 			   flags, cur_flags, call);
call               49 drivers/misc/cxl/hcalls.c #define _CXL_LOOP_HCALL(call, rc, retbuf, fn, ...)			\
call               56 drivers/misc/cxl/hcalls.c 			rc = call(fn, retbuf, __VA_ARGS__, token);	\
call               98 drivers/net/arcnet/arcdevice.h #define TIME(dev, name, bytes, call)					\
call              103 drivers/net/arcnet/arcdevice.h 		call;							\
call              110 drivers/net/arcnet/arcdevice.h 		call;							\
call             3529 drivers/net/ethernet/netronome/nfp/bpf/jit.c 	[BPF_JMP | BPF_CALL] =		call,
call              782 drivers/scsi/qla2xxx/qla_tmpl.c 	typeof(qla27xx_fwdt_entry_other)(*call);
call              813 drivers/scsi/qla2xxx/qla_tmpl.c typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
call              821 drivers/scsi/qla2xxx/qla_tmpl.c 		return list->call;
call             1317 drivers/virt/vboxguest/vboxguest_core.c 			       struct vbg_ioctl_hgcm_call *call)
call             1323 drivers/virt/vboxguest/vboxguest_core.c 	if (call->hdr.size_in < sizeof(*call))
call             1326 drivers/virt/vboxguest/vboxguest_core.c 	if (call->hdr.size_in != call->hdr.size_out)
call             1329 drivers/virt/vboxguest/vboxguest_core.c 	if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
call             1332 drivers/virt/vboxguest/vboxguest_core.c 	client_id = call->client_id;
call             1336 drivers/virt/vboxguest/vboxguest_core.c 	actual_size = sizeof(*call);
call             1338 drivers/virt/vboxguest/vboxguest_core.c 		actual_size += call->parm_count *
call             1341 drivers/virt/vboxguest/vboxguest_core.c 		actual_size += call->parm_count *
call             1343 drivers/virt/vboxguest/vboxguest_core.c 	if (call->hdr.size_in < actual_size) {
call             1345 drivers/virt/vboxguest/vboxguest_core.c 			  call->hdr.size_in, actual_size);
call             1348 drivers/virt/vboxguest/vboxguest_core.c 	call->hdr.size_out = actual_size;
call             1353 drivers/virt/vboxguest/vboxguest_core.c 			VBG_IOCTL_HGCM_CALL_PARMS32(call);
call             1355 drivers/virt/vboxguest/vboxguest_core.c 		for (i = 0; i < call->parm_count; i++)
call             1360 drivers/virt/vboxguest/vboxguest_core.c 			VBG_IOCTL_HGCM_CALL_PARMS(call);
call             1362 drivers/virt/vboxguest/vboxguest_core.c 		for (i = 0; i < call->parm_count; i++)
call             1383 drivers/virt/vboxguest/vboxguest_core.c 				      call->function, call->timeout_ms,
call             1384 drivers/virt/vboxguest/vboxguest_core.c 				      VBG_IOCTL_HGCM_CALL_PARMS32(call),
call             1385 drivers/virt/vboxguest/vboxguest_core.c 				      call->parm_count, &call->hdr.rc);
call             1388 drivers/virt/vboxguest/vboxguest_core.c 				    call->function, call->timeout_ms,
call             1389 drivers/virt/vboxguest/vboxguest_core.c 				    VBG_IOCTL_HGCM_CALL_PARMS(call),
call             1390 drivers/virt/vboxguest/vboxguest_core.c 				    call->parm_count, &call->hdr.rc);
call             1394 drivers/virt/vboxguest/vboxguest_core.c 		call->hdr.rc = VERR_OUT_OF_RANGE;
call              328 drivers/virt/vboxguest/vboxguest_utils.c static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call,
call              345 drivers/virt/vboxguest/vboxguest_utils.c 	dst_pg_lst = (void *)call + *off_extra;
call              379 drivers/virt/vboxguest/vboxguest_utils.c 	struct vmmdev_hgcm_call *call, u32 client_id, u32 function,
call              384 drivers/virt/vboxguest/vboxguest_utils.c 		VMMDEV_HGCM_CALL_PARMS(call);
call              385 drivers/virt/vboxguest/vboxguest_utils.c 	u32 i, off_extra = (uintptr_t)(dst_parm + parm_count) - (uintptr_t)call;
call              388 drivers/virt/vboxguest/vboxguest_utils.c 	call->header.flags = 0;
call              389 drivers/virt/vboxguest/vboxguest_utils.c 	call->header.result = VINF_SUCCESS;
call              390 drivers/virt/vboxguest/vboxguest_utils.c 	call->client_id = client_id;
call              391 drivers/virt/vboxguest/vboxguest_utils.c 	call->function = function;
call              392 drivers/virt/vboxguest/vboxguest_utils.c 	call->parm_count = parm_count;
call              404 drivers/virt/vboxguest/vboxguest_utils.c 			hgcm_call_init_linaddr(call, dst_parm, bounce_bufs[i],
call              413 drivers/virt/vboxguest/vboxguest_utils.c 			hgcm_call_init_linaddr(call, dst_parm, buf,
call              430 drivers/virt/vboxguest/vboxguest_utils.c static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
call              440 drivers/virt/vboxguest/vboxguest_utils.c 	gdev->cancel_req->phys_req_to_cancel = virt_to_phys(call);
call              445 drivers/virt/vboxguest/vboxguest_utils.c 		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
call              446 drivers/virt/vboxguest/vboxguest_utils.c 		call->header.header.request_type = VMMDEVREQ_HGCM_CANCEL;
call              448 drivers/virt/vboxguest/vboxguest_utils.c 		rc = vbg_req_perform(gdev, call);
call              454 drivers/virt/vboxguest/vboxguest_utils.c 		call->header.flags |= VMMDEV_HGCM_REQ_CANCELLED;
call              468 drivers/virt/vboxguest/vboxguest_utils.c static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
call              476 drivers/virt/vboxguest/vboxguest_utils.c 	rc = vbg_req_perform(gdev, call);
call              483 drivers/virt/vboxguest/vboxguest_utils.c 		call->header.result = rc;
call              498 drivers/virt/vboxguest/vboxguest_utils.c 					hgcm_req_done(gdev, &call->header),
call              511 drivers/virt/vboxguest/vboxguest_utils.c 	cancel_rc = hgcm_cancel_call(gdev, call);
call              525 drivers/virt/vboxguest/vboxguest_utils.c 				     hgcm_req_done(gdev, &call->header),
call              550 drivers/virt/vboxguest/vboxguest_utils.c 	const struct vmmdev_hgcm_call *call,
call              555 drivers/virt/vboxguest/vboxguest_utils.c 		VMMDEV_HGCM_CALL_PARMS(call);
call              605 drivers/virt/vboxguest/vboxguest_utils.c 	struct vmmdev_hgcm_call *call;
call              623 drivers/virt/vboxguest/vboxguest_utils.c 	call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
call              624 drivers/virt/vboxguest/vboxguest_utils.c 	if (!call) {
call              629 drivers/virt/vboxguest/vboxguest_utils.c 	hgcm_call_init_call(call, client_id, function, parms, parm_count,
call              632 drivers/virt/vboxguest/vboxguest_utils.c 	ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
call              634 drivers/virt/vboxguest/vboxguest_utils.c 		*vbox_status = call->header.result;
call              635 drivers/virt/vboxguest/vboxguest_utils.c 		ret = hgcm_call_copy_back_result(call, parms, parm_count,
call              640 drivers/virt/vboxguest/vboxguest_utils.c 		vbg_req_free(call, size);
call             1313 drivers/vme/vme.c 	void (*call)(int, int, void *);
call             1316 drivers/vme/vme.c 	call = bridge->irq[level - 1].callback[statid].func;
call             1318 drivers/vme/vme.c 	if (call)
call             1319 drivers/vme/vme.c 		call(level, statid, priv_data);
call              117 fs/afs/cmservice.c bool afs_cm_incoming_call(struct afs_call *call)
call              119 fs/afs/cmservice.c 	_enter("{%u, CB.OP %u}", call->service_id, call->operation_ID);
call              121 fs/afs/cmservice.c 	call->epoch = rxrpc_kernel_get_epoch(call->net->socket, call->rxcall);
call              123 fs/afs/cmservice.c 	switch (call->operation_ID) {
call              125 fs/afs/cmservice.c 		call->type = &afs_SRXCBCallBack;
call              128 fs/afs/cmservice.c 		call->type = &afs_SRXCBInitCallBackState;
call              131 fs/afs/cmservice.c 		call->type = &afs_SRXCBInitCallBackState3;
call              134 fs/afs/cmservice.c 		call->type = &afs_SRXCBProbe;
call              137 fs/afs/cmservice.c 		call->type = &afs_SRXCBProbeUuid;
call              140 fs/afs/cmservice.c 		call->type = &afs_SRXCBTellMeAboutYourself;
call              143 fs/afs/cmservice.c 		if (call->service_id != YFS_CM_SERVICE)
call              145 fs/afs/cmservice.c 		call->type = &afs_SRXYFSCB_CallBack;
call              155 fs/afs/cmservice.c static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
call              161 fs/afs/cmservice.c 		if (server->cm_epoch == call->epoch)
call              173 fs/afs/cmservice.c 		server->cm_epoch = call->epoch;
call              174 fs/afs/cmservice.c 		server->probe.cm_epoch = call->epoch;
call              179 fs/afs/cmservice.c 	    call->epoch != server->probe.cm_epoch &&
call              186 fs/afs/cmservice.c 	if (!server->probe.cm_probed || call->epoch == server->cm_epoch)
call              199 fs/afs/cmservice.c static int afs_find_cm_server_by_peer(struct afs_call *call)
call              204 fs/afs/cmservice.c 	rxrpc_kernel_get_peer(call->net->socket, call->rxcall, &srx);
call              206 fs/afs/cmservice.c 	server = afs_find_server(call->net, &srx);
call              208 fs/afs/cmservice.c 		trace_afs_cm_no_server(call, &srx);
call              212 fs/afs/cmservice.c 	call->server = server;
call              213 fs/afs/cmservice.c 	return afs_record_cm_probe(call, server);
call              220 fs/afs/cmservice.c static int afs_find_cm_server_by_uuid(struct afs_call *call,
call              226 fs/afs/cmservice.c 	server = afs_find_server_by_uuid(call->net, call->request);
call              229 fs/afs/cmservice.c 		trace_afs_cm_no_server_u(call, call->request);
call              233 fs/afs/cmservice.c 	call->server = server;
call              234 fs/afs/cmservice.c 	return afs_record_cm_probe(call, server);
call              240 fs/afs/cmservice.c static void afs_cm_destructor(struct afs_call *call)
call              242 fs/afs/cmservice.c 	kfree(call->buffer);
call              243 fs/afs/cmservice.c 	call->buffer = NULL;
call              249 fs/afs/cmservice.c static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
call              252 fs/afs/cmservice.c 	rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
call              254 fs/afs/cmservice.c 	afs_set_call_complete(call, error, 0);
call              262 fs/afs/cmservice.c 	struct afs_call *call = container_of(work, struct afs_call, work);
call              270 fs/afs/cmservice.c 	if (call->server) {
call              271 fs/afs/cmservice.c 		trace_afs_server(call->server, atomic_read(&call->server->usage),
call              273 fs/afs/cmservice.c 		afs_break_callbacks(call->server, call->count, call->request);
call              276 fs/afs/cmservice.c 	afs_send_empty_reply(call);
call              277 fs/afs/cmservice.c 	afs_put_call(call);
call              284 fs/afs/cmservice.c static int afs_deliver_cb_callback(struct afs_call *call)
call              290 fs/afs/cmservice.c 	_enter("{%u}", call->unmarshall);
call              292 fs/afs/cmservice.c 	switch (call->unmarshall) {
call              294 fs/afs/cmservice.c 		afs_extract_to_tmp(call);
call              295 fs/afs/cmservice.c 		call->unmarshall++;
call              301 fs/afs/cmservice.c 		ret = afs_extract_data(call, true);
call              305 fs/afs/cmservice.c 		call->count = ntohl(call->tmp);
call              306 fs/afs/cmservice.c 		_debug("FID count: %u", call->count);
call              307 fs/afs/cmservice.c 		if (call->count > AFSCBMAX)
call              308 fs/afs/cmservice.c 			return afs_protocol_error(call, -EBADMSG,
call              311 fs/afs/cmservice.c 		call->buffer = kmalloc(array3_size(call->count, 3, 4),
call              313 fs/afs/cmservice.c 		if (!call->buffer)
call              315 fs/afs/cmservice.c 		afs_extract_to_buf(call, call->count * 3 * 4);
call              316 fs/afs/cmservice.c 		call->unmarshall++;
call              321 fs/afs/cmservice.c 		ret = afs_extract_data(call, true);
call              326 fs/afs/cmservice.c 		call->request = kcalloc(call->count,
call              329 fs/afs/cmservice.c 		if (!call->request)
call              332 fs/afs/cmservice.c 		cb = call->request;
call              333 fs/afs/cmservice.c 		bp = call->buffer;
call              334 fs/afs/cmservice.c 		for (loop = call->count; loop > 0; loop--, cb++) {
call              340 fs/afs/cmservice.c 		afs_extract_to_tmp(call);
call              341 fs/afs/cmservice.c 		call->unmarshall++;
call              347 fs/afs/cmservice.c 		ret = afs_extract_data(call, true);
call              351 fs/afs/cmservice.c 		call->count2 = ntohl(call->tmp);
call              352 fs/afs/cmservice.c 		_debug("CB count: %u", call->count2);
call              353 fs/afs/cmservice.c 		if (call->count2 != call->count && call->count2 != 0)
call              354 fs/afs/cmservice.c 			return afs_protocol_error(call, -EBADMSG,
call              356 fs/afs/cmservice.c 		call->_iter = &call->iter;
call              357 fs/afs/cmservice.c 		iov_iter_discard(&call->iter, READ, call->count2 * 3 * 4);
call              358 fs/afs/cmservice.c 		call->unmarshall++;
call              363 fs/afs/cmservice.c 		       iov_iter_count(&call->iter), call->count2 * 3 * 4);
call              365 fs/afs/cmservice.c 		ret = afs_extract_data(call, false);
call              369 fs/afs/cmservice.c 		call->unmarshall++;
call              374 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              375 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              379 fs/afs/cmservice.c 	return afs_find_cm_server_by_peer(call);
call              387 fs/afs/cmservice.c 	struct afs_call *call = container_of(work, struct afs_call, work);
call              389 fs/afs/cmservice.c 	_enter("{%p}", call->server);
call              391 fs/afs/cmservice.c 	if (call->server)
call              392 fs/afs/cmservice.c 		afs_init_callback_state(call->server);
call              393 fs/afs/cmservice.c 	afs_send_empty_reply(call);
call              394 fs/afs/cmservice.c 	afs_put_call(call);
call              401 fs/afs/cmservice.c static int afs_deliver_cb_init_call_back_state(struct afs_call *call)
call              407 fs/afs/cmservice.c 	afs_extract_discard(call, 0);
call              408 fs/afs/cmservice.c 	ret = afs_extract_data(call, false);
call              414 fs/afs/cmservice.c 	return afs_find_cm_server_by_peer(call);
call              420 fs/afs/cmservice.c static int afs_deliver_cb_init_call_back_state3(struct afs_call *call)
call              429 fs/afs/cmservice.c 	_enter("{%u}", call->unmarshall);
call              431 fs/afs/cmservice.c 	switch (call->unmarshall) {
call              433 fs/afs/cmservice.c 		call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
call              434 fs/afs/cmservice.c 		if (!call->buffer)
call              436 fs/afs/cmservice.c 		afs_extract_to_buf(call, 11 * sizeof(__be32));
call              437 fs/afs/cmservice.c 		call->unmarshall++;
call              442 fs/afs/cmservice.c 		ret = afs_extract_data(call, false);
call              450 fs/afs/cmservice.c 		call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
call              451 fs/afs/cmservice.c 		if (!call->request)
call              454 fs/afs/cmservice.c 		b = call->buffer;
call              455 fs/afs/cmservice.c 		r = call->request;
call              465 fs/afs/cmservice.c 		call->unmarshall++;
call              471 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              472 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              476 fs/afs/cmservice.c 	return afs_find_cm_server_by_uuid(call, call->request);
call              484 fs/afs/cmservice.c 	struct afs_call *call = container_of(work, struct afs_call, work);
call              487 fs/afs/cmservice.c 	afs_send_empty_reply(call);
call              488 fs/afs/cmservice.c 	afs_put_call(call);
call              495 fs/afs/cmservice.c static int afs_deliver_cb_probe(struct afs_call *call)
call              501 fs/afs/cmservice.c 	afs_extract_discard(call, 0);
call              502 fs/afs/cmservice.c 	ret = afs_extract_data(call, false);
call              506 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              507 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              508 fs/afs/cmservice.c 	return afs_find_cm_server_by_peer(call);
call              516 fs/afs/cmservice.c 	struct afs_call *call = container_of(work, struct afs_call, work);
call              517 fs/afs/cmservice.c 	struct afs_uuid *r = call->request;
call              521 fs/afs/cmservice.c 	if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
call              522 fs/afs/cmservice.c 		afs_send_empty_reply(call);
call              524 fs/afs/cmservice.c 		afs_abort_service_call(call, 1, 1, "K-1");
call              526 fs/afs/cmservice.c 	afs_put_call(call);
call              533 fs/afs/cmservice.c static int afs_deliver_cb_probe_uuid(struct afs_call *call)
call              540 fs/afs/cmservice.c 	_enter("{%u}", call->unmarshall);
call              542 fs/afs/cmservice.c 	switch (call->unmarshall) {
call              544 fs/afs/cmservice.c 		call->buffer = kmalloc_array(11, sizeof(__be32), GFP_KERNEL);
call              545 fs/afs/cmservice.c 		if (!call->buffer)
call              547 fs/afs/cmservice.c 		afs_extract_to_buf(call, 11 * sizeof(__be32));
call              548 fs/afs/cmservice.c 		call->unmarshall++;
call              553 fs/afs/cmservice.c 		ret = afs_extract_data(call, false);
call              561 fs/afs/cmservice.c 		call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
call              562 fs/afs/cmservice.c 		if (!call->request)
call              565 fs/afs/cmservice.c 		b = call->buffer;
call              566 fs/afs/cmservice.c 		r = call->request;
call              576 fs/afs/cmservice.c 		call->unmarshall++;
call              582 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              583 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              584 fs/afs/cmservice.c 	return afs_find_cm_server_by_uuid(call, call->request);
call              592 fs/afs/cmservice.c 	struct afs_call *call = container_of(work, struct afs_call, work);
call              613 fs/afs/cmservice.c 	reply.ia.uuid[0] = call->net->uuid.time_low;
call              614 fs/afs/cmservice.c 	reply.ia.uuid[1] = htonl(ntohs(call->net->uuid.time_mid));
call              615 fs/afs/cmservice.c 	reply.ia.uuid[2] = htonl(ntohs(call->net->uuid.time_hi_and_version));
call              616 fs/afs/cmservice.c 	reply.ia.uuid[3] = htonl((s8) call->net->uuid.clock_seq_hi_and_reserved);
call              617 fs/afs/cmservice.c 	reply.ia.uuid[4] = htonl((s8) call->net->uuid.clock_seq_low);
call              619 fs/afs/cmservice.c 		reply.ia.uuid[loop + 5] = htonl((s8) call->net->uuid.node[loop]);
call              623 fs/afs/cmservice.c 	afs_send_simple_reply(call, &reply, sizeof(reply));
call              624 fs/afs/cmservice.c 	afs_put_call(call);
call              631 fs/afs/cmservice.c static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call)
call              637 fs/afs/cmservice.c 	afs_extract_discard(call, 0);
call              638 fs/afs/cmservice.c 	ret = afs_extract_data(call, false);
call              642 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              643 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              644 fs/afs/cmservice.c 	return afs_find_cm_server_by_peer(call);
call              650 fs/afs/cmservice.c static int afs_deliver_yfs_cb_callback(struct afs_call *call)
call              657 fs/afs/cmservice.c 	_enter("{%u}", call->unmarshall);
call              659 fs/afs/cmservice.c 	switch (call->unmarshall) {
call              661 fs/afs/cmservice.c 		afs_extract_to_tmp(call);
call              662 fs/afs/cmservice.c 		call->unmarshall++;
call              668 fs/afs/cmservice.c 		ret = afs_extract_data(call, true);
call              672 fs/afs/cmservice.c 		call->count = ntohl(call->tmp);
call              673 fs/afs/cmservice.c 		_debug("FID count: %u", call->count);
call              674 fs/afs/cmservice.c 		if (call->count > YFSCBMAX)
call              675 fs/afs/cmservice.c 			return afs_protocol_error(call, -EBADMSG,
call              678 fs/afs/cmservice.c 		size = array_size(call->count, sizeof(struct yfs_xdr_YFSFid));
call              679 fs/afs/cmservice.c 		call->buffer = kmalloc(size, GFP_KERNEL);
call              680 fs/afs/cmservice.c 		if (!call->buffer)
call              682 fs/afs/cmservice.c 		afs_extract_to_buf(call, size);
call              683 fs/afs/cmservice.c 		call->unmarshall++;
call              688 fs/afs/cmservice.c 		ret = afs_extract_data(call, false);
call              693 fs/afs/cmservice.c 		call->request = kcalloc(call->count,
call              696 fs/afs/cmservice.c 		if (!call->request)
call              699 fs/afs/cmservice.c 		cb = call->request;
call              700 fs/afs/cmservice.c 		bp = call->buffer;
call              701 fs/afs/cmservice.c 		for (loop = call->count; loop > 0; loop--, cb++) {
call              709 fs/afs/cmservice.c 		afs_extract_to_tmp(call);
call              710 fs/afs/cmservice.c 		call->unmarshall++;
call              716 fs/afs/cmservice.c 	if (!afs_check_call_state(call, AFS_CALL_SV_REPLYING))
call              717 fs/afs/cmservice.c 		return afs_io_error(call, afs_io_error_cm_reply);
call              722 fs/afs/cmservice.c 	return afs_find_cm_server_by_peer(call);
call               71 fs/afs/flock.c void afs_lock_op_done(struct afs_call *call)
call               73 fs/afs/flock.c 	struct afs_vnode *vnode = call->lvnode;
call               75 fs/afs/flock.c 	if (call->error == 0) {
call               78 fs/afs/flock.c 		vnode->locked_at = call->reply_time;
call               29 fs/afs/fs_probe.c void afs_fileserver_probe_result(struct afs_call *call)
call               31 fs/afs/fs_probe.c 	struct afs_addr_list *alist = call->alist;
call               32 fs/afs/fs_probe.c 	struct afs_server *server = call->server;
call               33 fs/afs/fs_probe.c 	unsigned int server_index = call->server_index;
call               34 fs/afs/fs_probe.c 	unsigned int index = call->addr_ix;
call               37 fs/afs/fs_probe.c 	int ret = call->error;
call               49 fs/afs/fs_probe.c 			server->probe.abort_code = call->abort_code;
call               56 fs/afs/fs_probe.c 		afs_io_error(call, afs_io_error_fs_probe_fail);
call               75 fs/afs/fs_probe.c 		afs_io_error(call, afs_io_error_fs_probe_fail);
call               83 fs/afs/fs_probe.c 	if (call->service_id == YFS_FS_SERVICE) {
call               86 fs/afs/fs_probe.c 		alist->addrs[index].srx_service = call->service_id;
call               91 fs/afs/fs_probe.c 			alist->addrs[index].srx_service = call->service_id;
call               95 fs/afs/fs_probe.c 	rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
call              132 fs/afs/fs_probe.c 	struct afs_call *call;
call              148 fs/afs/fs_probe.c 		call = afs_fs_get_capabilities(net, server, &ac, key, server_index);
call              149 fs/afs/fs_probe.c 		if (!IS_ERR(call)) {
call              150 fs/afs/fs_probe.c 			afs_put_call(call);
call              153 fs/afs/fs_probe.c 			afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
call               18 fs/afs/fsclient.c static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
call               20 fs/afs/fsclient.c 	call->cbi = afs_get_cb_interest(cbi);
call               60 fs/afs/fsclient.c 				     struct afs_call *call,
call               65 fs/afs/fsclient.c 	bool inline_error = (call->operation_ID == afs_FS_InlineBulkStatus);
call              136 fs/afs/fsclient.c 	ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
call              140 fs/afs/fsclient.c static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
call              142 fs/afs/fsclient.c 	return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
call              146 fs/afs/fsclient.c 				   struct afs_call *call,
call              153 fs/afs/fsclient.c 	cb->expires_at	= xdr_decode_expiry(call, ntohl(*bp++));
call              246 fs/afs/fsclient.c static int afs_deliver_fs_fetch_status_vnode(struct afs_call *call)
call              251 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              256 fs/afs/fsclient.c 	bp = call->buffer;
call              257 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call              260 fs/afs/fsclient.c 	xdr_decode_AFSCallBack(&bp, call, call->out_scb);
call              261 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              284 fs/afs/fsclient.c 	struct afs_call *call;
call              294 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus_vnode,
call              296 fs/afs/fsclient.c 	if (!call) {
call              301 fs/afs/fsclient.c 	call->key = fc->key;
call              302 fs/afs/fsclient.c 	call->out_scb = scb;
call              303 fs/afs/fsclient.c 	call->out_volsync = volsync;
call              306 fs/afs/fsclient.c 	bp = call->request;
call              312 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              313 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call              315 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              316 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              317 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              323 fs/afs/fsclient.c static int afs_deliver_fs_fetch_data(struct afs_call *call)
call              325 fs/afs/fsclient.c 	struct afs_read *req = call->read_request;
call              331 fs/afs/fsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
call              333 fs/afs/fsclient.c 	switch (call->unmarshall) {
call              338 fs/afs/fsclient.c 		call->unmarshall++;
call              339 fs/afs/fsclient.c 		if (call->operation_ID == FSFETCHDATA64) {
call              340 fs/afs/fsclient.c 			afs_extract_to_tmp64(call);
call              342 fs/afs/fsclient.c 			call->tmp_u = htonl(0);
call              343 fs/afs/fsclient.c 			afs_extract_to_tmp(call);
call              350 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call              354 fs/afs/fsclient.c 		req->actual_len = be64_to_cpu(call->tmp64);
call              360 fs/afs/fsclient.c 		call->unmarshall++;
call              368 fs/afs/fsclient.c 		call->bvec[0].bv_len = size;
call              369 fs/afs/fsclient.c 		call->bvec[0].bv_offset = req->offset;
call              370 fs/afs/fsclient.c 		call->bvec[0].bv_page = req->pages[req->index];
call              371 fs/afs/fsclient.c 		iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
call              378 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->remain);
call              380 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call              383 fs/afs/fsclient.c 		req->remain -= call->bvec[0].bv_len;
call              384 fs/afs/fsclient.c 		req->offset += call->bvec[0].bv_len;
call              398 fs/afs/fsclient.c 		afs_extract_discard(call, req->actual_len - req->len);
call              399 fs/afs/fsclient.c 		call->unmarshall = 3;
call              404 fs/afs/fsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
call              406 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call              411 fs/afs/fsclient.c 		call->unmarshall = 4;
call              412 fs/afs/fsclient.c 		afs_extract_to_buf(call, (21 + 3 + 6) * 4);
call              417 fs/afs/fsclient.c 		ret = afs_extract_data(call, false);
call              421 fs/afs/fsclient.c 		bp = call->buffer;
call              422 fs/afs/fsclient.c 		ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call              425 fs/afs/fsclient.c 		xdr_decode_AFSCallBack(&bp, call, call->out_scb);
call              426 fs/afs/fsclient.c 		xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              428 fs/afs/fsclient.c 		req->data_version = call->out_scb->status.data_version;
call              429 fs/afs/fsclient.c 		req->file_size = call->out_scb->status.size;
call              431 fs/afs/fsclient.c 		call->unmarshall++;
call              452 fs/afs/fsclient.c static void afs_fetch_data_destructor(struct afs_call *call)
call              454 fs/afs/fsclient.c 	struct afs_read *req = call->read_request;
call              457 fs/afs/fsclient.c 	afs_flat_call_destructor(call);
call              485 fs/afs/fsclient.c 	struct afs_call *call;
call              491 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSFetchData64, 32, (21 + 3 + 6) * 4);
call              492 fs/afs/fsclient.c 	if (!call)
call              495 fs/afs/fsclient.c 	call->key = fc->key;
call              496 fs/afs/fsclient.c 	call->out_scb = scb;
call              497 fs/afs/fsclient.c 	call->out_volsync = NULL;
call              498 fs/afs/fsclient.c 	call->read_request = req;
call              501 fs/afs/fsclient.c 	bp = call->request;
call              512 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              513 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call              514 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              515 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              516 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              527 fs/afs/fsclient.c 	struct afs_call *call;
call              541 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSFetchData, 24, (21 + 3 + 6) * 4);
call              542 fs/afs/fsclient.c 	if (!call)
call              545 fs/afs/fsclient.c 	call->key = fc->key;
call              546 fs/afs/fsclient.c 	call->out_scb = scb;
call              547 fs/afs/fsclient.c 	call->out_volsync = NULL;
call              548 fs/afs/fsclient.c 	call->read_request = req;
call              551 fs/afs/fsclient.c 	bp = call->request;
call              560 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              561 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call              562 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              563 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              564 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              570 fs/afs/fsclient.c static int afs_deliver_fs_create_vnode(struct afs_call *call)
call              575 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              580 fs/afs/fsclient.c 	bp = call->buffer;
call              581 fs/afs/fsclient.c 	xdr_decode_AFSFid(&bp, call->out_fid);
call              582 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call              585 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
call              588 fs/afs/fsclient.c 	xdr_decode_AFSCallBack(&bp, call, call->out_scb);
call              589 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              623 fs/afs/fsclient.c 	struct afs_call *call;
call              643 fs/afs/fsclient.c 	call = afs_alloc_flat_call(
call              646 fs/afs/fsclient.c 	if (!call)
call              649 fs/afs/fsclient.c 	call->key = fc->key;
call              650 fs/afs/fsclient.c 	call->out_dir_scb = dvnode_scb;
call              651 fs/afs/fsclient.c 	call->out_fid = newfid;
call              652 fs/afs/fsclient.c 	call->out_scb = new_scb;
call              655 fs/afs/fsclient.c 	bp = call->request;
call              674 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              675 fs/afs/fsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              676 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              677 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              678 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              685 fs/afs/fsclient.c static int afs_deliver_fs_dir_status_and_vol(struct afs_call *call)
call              690 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              695 fs/afs/fsclient.c 	bp = call->buffer;
call              696 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
call              699 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              729 fs/afs/fsclient.c 	struct afs_call *call;
call              743 fs/afs/fsclient.c 	call = afs_alloc_flat_call(
call              746 fs/afs/fsclient.c 	if (!call)
call              749 fs/afs/fsclient.c 	call->key = fc->key;
call              750 fs/afs/fsclient.c 	call->out_dir_scb = dvnode_scb;
call              753 fs/afs/fsclient.c 	bp = call->request;
call              766 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              767 fs/afs/fsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              768 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              769 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              770 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              776 fs/afs/fsclient.c static int afs_deliver_fs_link(struct afs_call *call)
call              781 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call              783 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              788 fs/afs/fsclient.c 	bp = call->buffer;
call              789 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call              792 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
call              795 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              820 fs/afs/fsclient.c 	struct afs_call *call;
call              834 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSLink, reqsz, (21 + 21 + 6) * 4);
call              835 fs/afs/fsclient.c 	if (!call)
call              838 fs/afs/fsclient.c 	call->key = fc->key;
call              839 fs/afs/fsclient.c 	call->out_dir_scb = dvnode_scb;
call              840 fs/afs/fsclient.c 	call->out_scb = vnode_scb;
call              843 fs/afs/fsclient.c 	bp = call->request;
call              859 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              860 fs/afs/fsclient.c 	trace_afs_make_fs_call1(call, &vnode->fid, name);
call              861 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              862 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              863 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              869 fs/afs/fsclient.c static int afs_deliver_fs_symlink(struct afs_call *call)
call              874 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call              876 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              881 fs/afs/fsclient.c 	bp = call->buffer;
call              882 fs/afs/fsclient.c 	xdr_decode_AFSFid(&bp, call->out_fid);
call              883 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call              886 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
call              889 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call              916 fs/afs/fsclient.c 	struct afs_call *call;
call              935 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSSymlink, reqsz,
call              937 fs/afs/fsclient.c 	if (!call)
call              940 fs/afs/fsclient.c 	call->key = fc->key;
call              941 fs/afs/fsclient.c 	call->out_dir_scb = dvnode_scb;
call              942 fs/afs/fsclient.c 	call->out_fid = newfid;
call              943 fs/afs/fsclient.c 	call->out_scb = new_scb;
call              946 fs/afs/fsclient.c 	bp = call->request;
call              972 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call              973 fs/afs/fsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              974 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call              975 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              976 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              982 fs/afs/fsclient.c static int afs_deliver_fs_rename(struct afs_call *call)
call              987 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call              994 fs/afs/fsclient.c 	bp = call->buffer;
call              995 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_dir_scb);
call              998 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             1001 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             1028 fs/afs/fsclient.c 	struct afs_call *call;
call             1052 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSRename, reqsz, (21 + 21 + 6) * 4);
call             1053 fs/afs/fsclient.c 	if (!call)
call             1056 fs/afs/fsclient.c 	call->key = fc->key;
call             1057 fs/afs/fsclient.c 	call->out_dir_scb = orig_dvnode_scb;
call             1058 fs/afs/fsclient.c 	call->out_scb = new_dvnode_scb;
call             1061 fs/afs/fsclient.c 	bp = call->request;
call             1085 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1086 fs/afs/fsclient.c 	trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
call             1087 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1088 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1089 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1095 fs/afs/fsclient.c static int afs_deliver_fs_store_data(struct afs_call *call)
call             1102 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call             1107 fs/afs/fsclient.c 	bp = call->buffer;
call             1108 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             1111 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             1145 fs/afs/fsclient.c 	struct afs_call *call;
call             1152 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreData64,
call             1155 fs/afs/fsclient.c 	if (!call)
call             1158 fs/afs/fsclient.c 	call->key = fc->key;
call             1159 fs/afs/fsclient.c 	call->mapping = mapping;
call             1160 fs/afs/fsclient.c 	call->first = first;
call             1161 fs/afs/fsclient.c 	call->last = last;
call             1162 fs/afs/fsclient.c 	call->first_offset = offset;
call             1163 fs/afs/fsclient.c 	call->last_to = to;
call             1164 fs/afs/fsclient.c 	call->send_pages = true;
call             1165 fs/afs/fsclient.c 	call->out_scb = scb;
call             1168 fs/afs/fsclient.c 	bp = call->request;
call             1188 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1189 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1190 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1191 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1203 fs/afs/fsclient.c 	struct afs_call *call;
call             1232 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreData,
call             1235 fs/afs/fsclient.c 	if (!call)
call             1238 fs/afs/fsclient.c 	call->key = fc->key;
call             1239 fs/afs/fsclient.c 	call->mapping = mapping;
call             1240 fs/afs/fsclient.c 	call->first = first;
call             1241 fs/afs/fsclient.c 	call->last = last;
call             1242 fs/afs/fsclient.c 	call->first_offset = offset;
call             1243 fs/afs/fsclient.c 	call->last_to = to;
call             1244 fs/afs/fsclient.c 	call->send_pages = true;
call             1245 fs/afs/fsclient.c 	call->out_scb = scb;
call             1248 fs/afs/fsclient.c 	bp = call->request;
call             1265 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1266 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1267 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1268 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1269 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1275 fs/afs/fsclient.c static int afs_deliver_fs_store_status(struct afs_call *call)
call             1282 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call             1287 fs/afs/fsclient.c 	bp = call->buffer;
call             1288 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             1291 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             1329 fs/afs/fsclient.c 	struct afs_call *call;
call             1338 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreData64_as_Status,
call             1341 fs/afs/fsclient.c 	if (!call)
call             1344 fs/afs/fsclient.c 	call->key = fc->key;
call             1345 fs/afs/fsclient.c 	call->out_scb = scb;
call             1348 fs/afs/fsclient.c 	bp = call->request;
call             1363 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1364 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1365 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1366 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1367 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1378 fs/afs/fsclient.c 	struct afs_call *call;
call             1389 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreData_as_Status,
call             1392 fs/afs/fsclient.c 	if (!call)
call             1395 fs/afs/fsclient.c 	call->key = fc->key;
call             1396 fs/afs/fsclient.c 	call->out_scb = scb;
call             1399 fs/afs/fsclient.c 	bp = call->request;
call             1411 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1412 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1413 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1414 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1415 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1426 fs/afs/fsclient.c 	struct afs_call *call;
call             1439 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreStatus,
call             1442 fs/afs/fsclient.c 	if (!call)
call             1445 fs/afs/fsclient.c 	call->key = fc->key;
call             1446 fs/afs/fsclient.c 	call->out_scb = scb;
call             1449 fs/afs/fsclient.c 	bp = call->request;
call             1457 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1458 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1459 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1460 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1461 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1467 fs/afs/fsclient.c static int afs_deliver_fs_get_volume_status(struct afs_call *call)
call             1474 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call             1476 fs/afs/fsclient.c 	switch (call->unmarshall) {
call             1478 fs/afs/fsclient.c 		call->unmarshall++;
call             1479 fs/afs/fsclient.c 		afs_extract_to_buf(call, 12 * 4);
call             1485 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1489 fs/afs/fsclient.c 		bp = call->buffer;
call             1490 fs/afs/fsclient.c 		xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
call             1491 fs/afs/fsclient.c 		call->unmarshall++;
call             1492 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             1497 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1501 fs/afs/fsclient.c 		call->count = ntohl(call->tmp);
call             1502 fs/afs/fsclient.c 		_debug("volname length: %u", call->count);
call             1503 fs/afs/fsclient.c 		if (call->count >= AFSNAMEMAX)
call             1504 fs/afs/fsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1506 fs/afs/fsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1507 fs/afs/fsclient.c 		afs_extract_to_buf(call, size);
call             1508 fs/afs/fsclient.c 		call->unmarshall++;
call             1514 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1518 fs/afs/fsclient.c 		p = call->buffer;
call             1519 fs/afs/fsclient.c 		p[call->count] = 0;
call             1521 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             1522 fs/afs/fsclient.c 		call->unmarshall++;
call             1527 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1531 fs/afs/fsclient.c 		call->count = ntohl(call->tmp);
call             1532 fs/afs/fsclient.c 		_debug("offline msg length: %u", call->count);
call             1533 fs/afs/fsclient.c 		if (call->count >= AFSNAMEMAX)
call             1534 fs/afs/fsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1536 fs/afs/fsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1537 fs/afs/fsclient.c 		afs_extract_to_buf(call, size);
call             1538 fs/afs/fsclient.c 		call->unmarshall++;
call             1544 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1548 fs/afs/fsclient.c 		p = call->buffer;
call             1549 fs/afs/fsclient.c 		p[call->count] = 0;
call             1552 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             1553 fs/afs/fsclient.c 		call->unmarshall++;
call             1558 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1562 fs/afs/fsclient.c 		call->count = ntohl(call->tmp);
call             1563 fs/afs/fsclient.c 		_debug("motd length: %u", call->count);
call             1564 fs/afs/fsclient.c 		if (call->count >= AFSNAMEMAX)
call             1565 fs/afs/fsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1567 fs/afs/fsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1568 fs/afs/fsclient.c 		afs_extract_to_buf(call, size);
call             1569 fs/afs/fsclient.c 		call->unmarshall++;
call             1575 fs/afs/fsclient.c 		ret = afs_extract_data(call, false);
call             1579 fs/afs/fsclient.c 		p = call->buffer;
call             1580 fs/afs/fsclient.c 		p[call->count] = 0;
call             1583 fs/afs/fsclient.c 		call->unmarshall++;
call             1610 fs/afs/fsclient.c 	struct afs_call *call;
call             1619 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSGetVolumeStatus, 2 * 4,
call             1621 fs/afs/fsclient.c 	if (!call)
call             1624 fs/afs/fsclient.c 	call->key = fc->key;
call             1625 fs/afs/fsclient.c 	call->out_volstatus = vs;
call             1628 fs/afs/fsclient.c 	bp = call->request;
call             1632 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1633 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1634 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1635 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1636 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1642 fs/afs/fsclient.c static int afs_deliver_fs_xxxx_lock(struct afs_call *call)
call             1647 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call             1649 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call             1654 fs/afs/fsclient.c 	bp = call->buffer;
call             1655 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             1700 fs/afs/fsclient.c 	struct afs_call *call;
call             1709 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSSetLock, 5 * 4, 6 * 4);
call             1710 fs/afs/fsclient.c 	if (!call)
call             1713 fs/afs/fsclient.c 	call->key = fc->key;
call             1714 fs/afs/fsclient.c 	call->lvnode = vnode;
call             1715 fs/afs/fsclient.c 	call->out_scb = scb;
call             1718 fs/afs/fsclient.c 	bp = call->request;
call             1725 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1726 fs/afs/fsclient.c 	trace_afs_make_fs_calli(call, &vnode->fid, type);
call             1727 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1728 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1729 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1738 fs/afs/fsclient.c 	struct afs_call *call;
call             1747 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSExtendLock, 4 * 4, 6 * 4);
call             1748 fs/afs/fsclient.c 	if (!call)
call             1751 fs/afs/fsclient.c 	call->key = fc->key;
call             1752 fs/afs/fsclient.c 	call->lvnode = vnode;
call             1753 fs/afs/fsclient.c 	call->out_scb = scb;
call             1756 fs/afs/fsclient.c 	bp = call->request;
call             1762 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1763 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1764 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1765 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1766 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1775 fs/afs/fsclient.c 	struct afs_call *call;
call             1784 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSReleaseLock, 4 * 4, 6 * 4);
call             1785 fs/afs/fsclient.c 	if (!call)
call             1788 fs/afs/fsclient.c 	call->key = fc->key;
call             1789 fs/afs/fsclient.c 	call->lvnode = vnode;
call             1790 fs/afs/fsclient.c 	call->out_scb = scb;
call             1793 fs/afs/fsclient.c 	bp = call->request;
call             1799 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1800 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1801 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             1802 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1803 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1809 fs/afs/fsclient.c static int afs_deliver_fs_give_up_all_callbacks(struct afs_call *call)
call             1811 fs/afs/fsclient.c 	return afs_transfer_reply(call);
call             1832 fs/afs/fsclient.c 	struct afs_call *call;
call             1837 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSGiveUpAllCallBacks, 1 * 4, 0);
call             1838 fs/afs/fsclient.c 	if (!call)
call             1841 fs/afs/fsclient.c 	call->key = key;
call             1844 fs/afs/fsclient.c 	bp = call->request;
call             1848 fs/afs/fsclient.c 	afs_make_call(ac, call, GFP_NOFS);
call             1849 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, ac);
call             1855 fs/afs/fsclient.c static int afs_deliver_fs_get_capabilities(struct afs_call *call)
call             1860 fs/afs/fsclient.c 	_enter("{%u,%zu}", call->unmarshall, iov_iter_count(&call->iter));
call             1862 fs/afs/fsclient.c 	switch (call->unmarshall) {
call             1864 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             1865 fs/afs/fsclient.c 		call->unmarshall++;
call             1870 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             1874 fs/afs/fsclient.c 		count = ntohl(call->tmp);
call             1876 fs/afs/fsclient.c 		call->count = count;
call             1877 fs/afs/fsclient.c 		call->count2 = count;
call             1878 fs/afs/fsclient.c 		afs_extract_discard(call, count * sizeof(__be32));
call             1879 fs/afs/fsclient.c 		call->unmarshall++;
call             1884 fs/afs/fsclient.c 		ret = afs_extract_data(call, false);
call             1890 fs/afs/fsclient.c 		call->unmarshall++;
call             1919 fs/afs/fsclient.c 	struct afs_call *call;
call             1924 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSGetCapabilities, 1 * 4, 16 * 4);
call             1925 fs/afs/fsclient.c 	if (!call)
call             1928 fs/afs/fsclient.c 	call->key = key;
call             1929 fs/afs/fsclient.c 	call->server = afs_get_server(server, afs_server_trace_get_caps);
call             1930 fs/afs/fsclient.c 	call->server_index = server_index;
call             1931 fs/afs/fsclient.c 	call->upgrade = true;
call             1932 fs/afs/fsclient.c 	call->async = true;
call             1933 fs/afs/fsclient.c 	call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
call             1936 fs/afs/fsclient.c 	bp = call->request;
call             1940 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, NULL);
call             1941 fs/afs/fsclient.c 	afs_make_call(ac, call, GFP_NOFS);
call             1942 fs/afs/fsclient.c 	return call;
call             1948 fs/afs/fsclient.c static int afs_deliver_fs_fetch_status(struct afs_call *call)
call             1953 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call             1958 fs/afs/fsclient.c 	bp = call->buffer;
call             1959 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             1962 fs/afs/fsclient.c 	xdr_decode_AFSCallBack(&bp, call, call->out_scb);
call             1963 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             1988 fs/afs/fsclient.c 	struct afs_call *call;
call             1997 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSFetchStatus, 16, (21 + 3 + 6) * 4);
call             1998 fs/afs/fsclient.c 	if (!call) {
call             2003 fs/afs/fsclient.c 	call->key = fc->key;
call             2004 fs/afs/fsclient.c 	call->out_fid = fid;
call             2005 fs/afs/fsclient.c 	call->out_scb = scb;
call             2006 fs/afs/fsclient.c 	call->out_volsync = volsync;
call             2009 fs/afs/fsclient.c 	bp = call->request;
call             2015 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             2016 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, fid);
call             2017 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             2018 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             2019 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             2025 fs/afs/fsclient.c static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call             2032 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call             2034 fs/afs/fsclient.c 	switch (call->unmarshall) {
call             2036 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             2037 fs/afs/fsclient.c 		call->unmarshall++;
call             2043 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2047 fs/afs/fsclient.c 		tmp = ntohl(call->tmp);
call             2048 fs/afs/fsclient.c 		_debug("status count: %u/%u", tmp, call->count2);
call             2049 fs/afs/fsclient.c 		if (tmp != call->count2)
call             2050 fs/afs/fsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             2053 fs/afs/fsclient.c 		call->count = 0;
call             2054 fs/afs/fsclient.c 		call->unmarshall++;
call             2056 fs/afs/fsclient.c 		afs_extract_to_buf(call, 21 * sizeof(__be32));
call             2060 fs/afs/fsclient.c 		_debug("extract status array %u", call->count);
call             2061 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2065 fs/afs/fsclient.c 		bp = call->buffer;
call             2066 fs/afs/fsclient.c 		scb = &call->out_scb[call->count];
call             2067 fs/afs/fsclient.c 		ret = xdr_decode_AFSFetchStatus(&bp, call, scb);
call             2071 fs/afs/fsclient.c 		call->count++;
call             2072 fs/afs/fsclient.c 		if (call->count < call->count2)
call             2075 fs/afs/fsclient.c 		call->count = 0;
call             2076 fs/afs/fsclient.c 		call->unmarshall++;
call             2077 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             2083 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2087 fs/afs/fsclient.c 		tmp = ntohl(call->tmp);
call             2089 fs/afs/fsclient.c 		if (tmp != call->count2)
call             2090 fs/afs/fsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             2092 fs/afs/fsclient.c 		call->count = 0;
call             2093 fs/afs/fsclient.c 		call->unmarshall++;
call             2095 fs/afs/fsclient.c 		afs_extract_to_buf(call, 3 * sizeof(__be32));
call             2100 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2105 fs/afs/fsclient.c 		bp = call->buffer;
call             2106 fs/afs/fsclient.c 		scb = &call->out_scb[call->count];
call             2107 fs/afs/fsclient.c 		xdr_decode_AFSCallBack(&bp, call, scb);
call             2108 fs/afs/fsclient.c 		call->count++;
call             2109 fs/afs/fsclient.c 		if (call->count < call->count2)
call             2112 fs/afs/fsclient.c 		afs_extract_to_buf(call, 6 * sizeof(__be32));
call             2113 fs/afs/fsclient.c 		call->unmarshall++;
call             2117 fs/afs/fsclient.c 		ret = afs_extract_data(call, false);
call             2121 fs/afs/fsclient.c 		bp = call->buffer;
call             2122 fs/afs/fsclient.c 		xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             2124 fs/afs/fsclient.c 		call->unmarshall++;
call             2154 fs/afs/fsclient.c 	struct afs_call *call;
call             2165 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSInlineBulkStatus,
call             2168 fs/afs/fsclient.c 	if (!call) {
call             2173 fs/afs/fsclient.c 	call->key = fc->key;
call             2174 fs/afs/fsclient.c 	call->out_scb = statuses;
call             2175 fs/afs/fsclient.c 	call->out_volsync = volsync;
call             2176 fs/afs/fsclient.c 	call->count2 = nr_fids;
call             2179 fs/afs/fsclient.c 	bp = call->request;
call             2188 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             2189 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &fids[0]);
call             2190 fs/afs/fsclient.c 	afs_set_fc_call(call, fc);
call             2191 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             2192 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             2198 fs/afs/fsclient.c static int afs_deliver_fs_fetch_acl(struct afs_call *call)
call             2205 fs/afs/fsclient.c 	_enter("{%u}", call->unmarshall);
call             2207 fs/afs/fsclient.c 	switch (call->unmarshall) {
call             2209 fs/afs/fsclient.c 		afs_extract_to_tmp(call);
call             2210 fs/afs/fsclient.c 		call->unmarshall++;
call             2215 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2219 fs/afs/fsclient.c 		size = call->count2 = ntohl(call->tmp);
call             2225 fs/afs/fsclient.c 		call->ret_acl = acl;
call             2226 fs/afs/fsclient.c 		acl->size = call->count2;
call             2227 fs/afs/fsclient.c 		afs_extract_begin(call, acl->data, size);
call             2228 fs/afs/fsclient.c 		call->unmarshall++;
call             2233 fs/afs/fsclient.c 		ret = afs_extract_data(call, true);
call             2237 fs/afs/fsclient.c 		afs_extract_to_buf(call, (21 + 6) * 4);
call             2238 fs/afs/fsclient.c 		call->unmarshall++;
call             2243 fs/afs/fsclient.c 		ret = afs_extract_data(call, false);
call             2247 fs/afs/fsclient.c 		bp = call->buffer;
call             2248 fs/afs/fsclient.c 		ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             2251 fs/afs/fsclient.c 		xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             2253 fs/afs/fsclient.c 		call->unmarshall++;
call             2263 fs/afs/fsclient.c static void afs_destroy_fs_fetch_acl(struct afs_call *call)
call             2265 fs/afs/fsclient.c 	kfree(call->ret_acl);
call             2266 fs/afs/fsclient.c 	afs_flat_call_destructor(call);
call             2286 fs/afs/fsclient.c 	struct afs_call *call;
call             2293 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSFetchACL, 16, (21 + 6) * 4);
call             2294 fs/afs/fsclient.c 	if (!call) {
call             2299 fs/afs/fsclient.c 	call->key = fc->key;
call             2300 fs/afs/fsclient.c 	call->ret_acl = NULL;
call             2301 fs/afs/fsclient.c 	call->out_scb = scb;
call             2302 fs/afs/fsclient.c 	call->out_volsync = NULL;
call             2305 fs/afs/fsclient.c 	bp = call->request;
call             2311 fs/afs/fsclient.c 	afs_use_fs_server(call, fc->cbi);
call             2312 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             2313 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_KERNEL);
call             2314 fs/afs/fsclient.c 	return (struct afs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
call             2321 fs/afs/fsclient.c static int afs_deliver_fs_file_status_and_vol(struct afs_call *call)
call             2326 fs/afs/fsclient.c 	ret = afs_transfer_reply(call);
call             2330 fs/afs/fsclient.c 	bp = call->buffer;
call             2331 fs/afs/fsclient.c 	ret = xdr_decode_AFSFetchStatus(&bp, call, call->out_scb);
call             2334 fs/afs/fsclient.c 	xdr_decode_AFSVolSync(&bp, call->out_volsync);
call             2357 fs/afs/fsclient.c 	struct afs_call *call;
call             2366 fs/afs/fsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSStoreACL,
call             2368 fs/afs/fsclient.c 	if (!call) {
call             2373 fs/afs/fsclient.c 	call->key = fc->key;
call             2374 fs/afs/fsclient.c 	call->out_scb = scb;
call             2375 fs/afs/fsclient.c 	call->out_volsync = NULL;
call             2378 fs/afs/fsclient.c 	bp = call->request;
call             2388 fs/afs/fsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             2389 fs/afs/fsclient.c 	afs_make_call(&fc->ac, call, GFP_KERNEL);
call             2390 fs/afs/fsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              185 fs/afs/internal.h 	int (*deliver)(struct afs_call *call);
call              188 fs/afs/internal.h 	void (*destructor)(struct afs_call *call);
call              194 fs/afs/internal.h 	void (*done)(struct afs_call *call);
call             1129 fs/afs/internal.h static inline void afs_set_fc_call(struct afs_call *call, struct afs_fs_cursor *fc)
call             1131 fs/afs/internal.h 	call->intr = fc->flags & AFS_FS_CURSOR_INTR;
call             1132 fs/afs/internal.h 	fc->type = call->type;
call             1135 fs/afs/internal.h static inline void afs_extract_begin(struct afs_call *call, void *buf, size_t size)
call             1137 fs/afs/internal.h 	call->kvec[0].iov_base = buf;
call             1138 fs/afs/internal.h 	call->kvec[0].iov_len = size;
call             1139 fs/afs/internal.h 	iov_iter_kvec(&call->iter, READ, call->kvec, 1, size);
call             1142 fs/afs/internal.h static inline void afs_extract_to_tmp(struct afs_call *call)
call             1144 fs/afs/internal.h 	afs_extract_begin(call, &call->tmp, sizeof(call->tmp));
call             1147 fs/afs/internal.h static inline void afs_extract_to_tmp64(struct afs_call *call)
call             1149 fs/afs/internal.h 	afs_extract_begin(call, &call->tmp64, sizeof(call->tmp64));
call             1152 fs/afs/internal.h static inline void afs_extract_discard(struct afs_call *call, size_t size)
call             1154 fs/afs/internal.h 	iov_iter_discard(&call->iter, READ, size);
call             1157 fs/afs/internal.h static inline void afs_extract_to_buf(struct afs_call *call, size_t size)
call             1159 fs/afs/internal.h 	afs_extract_begin(call, call->buffer, size);
call             1162 fs/afs/internal.h static inline int afs_transfer_reply(struct afs_call *call)
call             1164 fs/afs/internal.h 	return afs_extract_data(call, false);
call             1167 fs/afs/internal.h static inline bool afs_check_call_state(struct afs_call *call,
call             1170 fs/afs/internal.h 	return READ_ONCE(call->state) == state;
call             1173 fs/afs/internal.h static inline bool afs_set_call_state(struct afs_call *call,
call             1179 fs/afs/internal.h 	spin_lock_bh(&call->state_lock);
call             1180 fs/afs/internal.h 	if (call->state == from) {
call             1181 fs/afs/internal.h 		call->state = to;
call             1182 fs/afs/internal.h 		trace_afs_call_state(call, from, to, 0, 0);
call             1185 fs/afs/internal.h 	spin_unlock_bh(&call->state_lock);
call             1189 fs/afs/internal.h static inline void afs_set_call_complete(struct afs_call *call,
call             1195 fs/afs/internal.h 	spin_lock_bh(&call->state_lock);
call             1196 fs/afs/internal.h 	state = call->state;
call             1198 fs/afs/internal.h 		call->abort_code = remote_abort;
call             1199 fs/afs/internal.h 		call->error = error;
call             1200 fs/afs/internal.h 		call->state = AFS_CALL_COMPLETE;
call             1201 fs/afs/internal.h 		trace_afs_call_state(call, state, AFS_CALL_COMPLETE,
call             1205 fs/afs/internal.h 	spin_unlock_bh(&call->state_lock);
call             1207 fs/afs/internal.h 		trace_afs_call_done(call);
call             1213 fs/afs/internal.h 		if (call->drop_ref)
call             1214 fs/afs/internal.h 			afs_put_call(call);
call             1430 fs/afs/internal.h static inline int afs_io_error(struct afs_call *call, enum afs_io_error where)
call             1432 fs/afs/internal.h 	trace_afs_io_error(call->debug_id, -EIO, where);
call              140 fs/afs/rxrpc.c 	struct afs_call *call;
call              143 fs/afs/rxrpc.c 	call = kzalloc(sizeof(*call), gfp);
call              144 fs/afs/rxrpc.c 	if (!call)
call              147 fs/afs/rxrpc.c 	call->type = type;
call              148 fs/afs/rxrpc.c 	call->net = net;
call              149 fs/afs/rxrpc.c 	call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call              150 fs/afs/rxrpc.c 	atomic_set(&call->usage, 1);
call              151 fs/afs/rxrpc.c 	INIT_WORK(&call->async_work, afs_process_async_call);
call              152 fs/afs/rxrpc.c 	init_waitqueue_head(&call->waitq);
call              153 fs/afs/rxrpc.c 	spin_lock_init(&call->state_lock);
call              154 fs/afs/rxrpc.c 	call->_iter = &call->iter;
call              157 fs/afs/rxrpc.c 	trace_afs_call(call, afs_call_trace_alloc, 1, o,
call              159 fs/afs/rxrpc.c 	return call;
call              165 fs/afs/rxrpc.c void afs_put_call(struct afs_call *call)
call              167 fs/afs/rxrpc.c 	struct afs_net *net = call->net;
call              168 fs/afs/rxrpc.c 	int n = atomic_dec_return(&call->usage);
call              171 fs/afs/rxrpc.c 	trace_afs_call(call, afs_call_trace_put, n, o,
call              176 fs/afs/rxrpc.c 		ASSERT(!work_pending(&call->async_work));
call              177 fs/afs/rxrpc.c 		ASSERT(call->type->name != NULL);
call              179 fs/afs/rxrpc.c 		if (call->rxcall) {
call              180 fs/afs/rxrpc.c 			rxrpc_kernel_end_call(net->socket, call->rxcall);
call              181 fs/afs/rxrpc.c 			call->rxcall = NULL;
call              183 fs/afs/rxrpc.c 		if (call->type->destructor)
call              184 fs/afs/rxrpc.c 			call->type->destructor(call);
call              186 fs/afs/rxrpc.c 		afs_put_server(call->net, call->server, afs_server_trace_put_call);
call              187 fs/afs/rxrpc.c 		afs_put_cb_interest(call->net, call->cbi);
call              188 fs/afs/rxrpc.c 		afs_put_addrlist(call->alist);
call              189 fs/afs/rxrpc.c 		kfree(call->request);
call              191 fs/afs/rxrpc.c 		trace_afs_call(call, afs_call_trace_free, 0, o,
call              193 fs/afs/rxrpc.c 		kfree(call);
call              201 fs/afs/rxrpc.c static struct afs_call *afs_get_call(struct afs_call *call,
call              204 fs/afs/rxrpc.c 	int u = atomic_inc_return(&call->usage);
call              206 fs/afs/rxrpc.c 	trace_afs_call(call, why, u,
call              207 fs/afs/rxrpc.c 		       atomic_read(&call->net->nr_outstanding_calls),
call              209 fs/afs/rxrpc.c 	return call;
call              215 fs/afs/rxrpc.c static void afs_queue_call_work(struct afs_call *call)
call              217 fs/afs/rxrpc.c 	if (call->type->work) {
call              218 fs/afs/rxrpc.c 		INIT_WORK(&call->work, call->type->work);
call              220 fs/afs/rxrpc.c 		afs_get_call(call, afs_call_trace_work);
call              221 fs/afs/rxrpc.c 		if (!queue_work(afs_wq, &call->work))
call              222 fs/afs/rxrpc.c 			afs_put_call(call);
call              233 fs/afs/rxrpc.c 	struct afs_call *call;
call              235 fs/afs/rxrpc.c 	call = afs_alloc_call(net, type, GFP_NOFS);
call              236 fs/afs/rxrpc.c 	if (!call)
call              240 fs/afs/rxrpc.c 		call->request_size = request_size;
call              241 fs/afs/rxrpc.c 		call->request = kmalloc(request_size, GFP_NOFS);
call              242 fs/afs/rxrpc.c 		if (!call->request)
call              247 fs/afs/rxrpc.c 		call->reply_max = reply_max;
call              248 fs/afs/rxrpc.c 		call->buffer = kmalloc(reply_max, GFP_NOFS);
call              249 fs/afs/rxrpc.c 		if (!call->buffer)
call              253 fs/afs/rxrpc.c 	afs_extract_to_buf(call, call->reply_max);
call              254 fs/afs/rxrpc.c 	call->operation_ID = type->op;
call              255 fs/afs/rxrpc.c 	init_waitqueue_head(&call->waitq);
call              256 fs/afs/rxrpc.c 	return call;
call              259 fs/afs/rxrpc.c 	afs_put_call(call);
call              267 fs/afs/rxrpc.c void afs_flat_call_destructor(struct afs_call *call)
call              271 fs/afs/rxrpc.c 	kfree(call->request);
call              272 fs/afs/rxrpc.c 	call->request = NULL;
call              273 fs/afs/rxrpc.c 	kfree(call->buffer);
call              274 fs/afs/rxrpc.c 	call->buffer = NULL;
call              282 fs/afs/rxrpc.c static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
call              290 fs/afs/rxrpc.c 	n = find_get_pages_contig(call->mapping, first, nr, pages);
call              297 fs/afs/rxrpc.c 			to = call->last_to;
call              317 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)call_user_ID;
call              319 fs/afs/rxrpc.c 	afs_set_call_state(call, AFS_CALL_CL_REQUESTING, AFS_CALL_CL_AWAIT_REPLY);
call              325 fs/afs/rxrpc.c static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
call              329 fs/afs/rxrpc.c 	pgoff_t first = call->first, last = call->last;
call              332 fs/afs/rxrpc.c 	offset = call->first_offset;
call              333 fs/afs/rxrpc.c 	call->first_offset = 0;
call              336 fs/afs/rxrpc.c 		afs_load_bvec(call, msg, bv, first, last, offset);
call              337 fs/afs/rxrpc.c 		trace_afs_send_pages(call, msg, first, last, offset);
call              343 fs/afs/rxrpc.c 		ret = rxrpc_kernel_send_data(call->net->socket, call->rxcall, msg,
call              353 fs/afs/rxrpc.c 	trace_afs_sent_pages(call, call->first, last, first, ret);
call              361 fs/afs/rxrpc.c void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
call              372 fs/afs/rxrpc.c 	ASSERT(call->type != NULL);
call              373 fs/afs/rxrpc.c 	ASSERT(call->type->name != NULL);
call              376 fs/afs/rxrpc.c 	       call, call->type->name, key_serial(call->key),
call              377 fs/afs/rxrpc.c 	       atomic_read(&call->net->nr_outstanding_calls));
call              379 fs/afs/rxrpc.c 	call->addr_ix = ac->index;
call              380 fs/afs/rxrpc.c 	call->alist = afs_get_addrlist(ac->alist);
call              386 fs/afs/rxrpc.c 	tx_total_len = call->request_size;
call              387 fs/afs/rxrpc.c 	if (call->send_pages) {
call              388 fs/afs/rxrpc.c 		if (call->last == call->first) {
call              389 fs/afs/rxrpc.c 			tx_total_len += call->last_to - call->first_offset;
call              395 fs/afs/rxrpc.c 			tx_total_len += PAGE_SIZE - call->first_offset;
call              396 fs/afs/rxrpc.c 			tx_total_len += call->last_to;
call              397 fs/afs/rxrpc.c 			tx_total_len += (call->last - call->first - 1) * PAGE_SIZE;
call              404 fs/afs/rxrpc.c 	if (call->async) {
call              405 fs/afs/rxrpc.c 		afs_get_call(call, afs_call_trace_get);
call              406 fs/afs/rxrpc.c 		call->drop_ref = true;
call              410 fs/afs/rxrpc.c 	rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
call              411 fs/afs/rxrpc.c 					 (unsigned long)call,
call              413 fs/afs/rxrpc.c 					 (call->async ?
call              416 fs/afs/rxrpc.c 					 call->upgrade,
call              417 fs/afs/rxrpc.c 					 (call->intr ? RXRPC_PREINTERRUPTIBLE :
call              419 fs/afs/rxrpc.c 					 call->debug_id);
call              422 fs/afs/rxrpc.c 		call->error = ret;
call              426 fs/afs/rxrpc.c 	call->rxcall = rxcall;
call              428 fs/afs/rxrpc.c 	if (call->max_lifespan)
call              429 fs/afs/rxrpc.c 		rxrpc_kernel_set_max_life(call->net->socket, rxcall,
call              430 fs/afs/rxrpc.c 					  call->max_lifespan);
call              433 fs/afs/rxrpc.c 	iov[0].iov_base	= call->request;
call              434 fs/afs/rxrpc.c 	iov[0].iov_len	= call->request_size;
call              438 fs/afs/rxrpc.c 	iov_iter_kvec(&msg.msg_iter, WRITE, iov, 1, call->request_size);
call              441 fs/afs/rxrpc.c 	msg.msg_flags		= MSG_WAITALL | (call->send_pages ? MSG_MORE : 0);
call              443 fs/afs/rxrpc.c 	ret = rxrpc_kernel_send_data(call->net->socket, rxcall,
call              444 fs/afs/rxrpc.c 				     &msg, call->request_size,
call              449 fs/afs/rxrpc.c 	if (call->send_pages) {
call              450 fs/afs/rxrpc.c 		ret = afs_send_pages(call, &msg);
call              465 fs/afs/rxrpc.c 		rxrpc_kernel_abort_call(call->net->socket, rxcall,
call              469 fs/afs/rxrpc.c 		rxrpc_kernel_recv_data(call->net->socket, rxcall,
call              471 fs/afs/rxrpc.c 				       &call->abort_code, &call->service_id);
call              472 fs/afs/rxrpc.c 		ac->abort_code = call->abort_code;
call              475 fs/afs/rxrpc.c 	call->error = ret;
call              476 fs/afs/rxrpc.c 	trace_afs_call_done(call);
call              478 fs/afs/rxrpc.c 	if (call->type->done)
call              479 fs/afs/rxrpc.c 		call->type->done(call);
call              485 fs/afs/rxrpc.c 	if (call->rxcall) {
call              486 fs/afs/rxrpc.c 		rxrpc_kernel_end_call(call->net->socket, call->rxcall);
call              487 fs/afs/rxrpc.c 		call->rxcall = NULL;
call              489 fs/afs/rxrpc.c 	if (call->async) {
call              490 fs/afs/rxrpc.c 		if (cancel_work_sync(&call->async_work))
call              491 fs/afs/rxrpc.c 			afs_put_call(call);
call              492 fs/afs/rxrpc.c 		afs_put_call(call);
call              496 fs/afs/rxrpc.c 	call->state = AFS_CALL_COMPLETE;
call              503 fs/afs/rxrpc.c static void afs_deliver_to_call(struct afs_call *call)
call              509 fs/afs/rxrpc.c 	_enter("%s", call->type->name);
call              511 fs/afs/rxrpc.c 	while (state = READ_ONCE(call->state),
call              518 fs/afs/rxrpc.c 			iov_iter_kvec(&call->iter, READ, NULL, 0, 0);
call              519 fs/afs/rxrpc.c 			ret = rxrpc_kernel_recv_data(call->net->socket,
call              520 fs/afs/rxrpc.c 						     call->rxcall, &call->iter,
call              522 fs/afs/rxrpc.c 						     &call->service_id);
call              523 fs/afs/rxrpc.c 			trace_afs_receive_data(call, &call->iter, false, ret);
call              535 fs/afs/rxrpc.c 		if (!call->have_reply_time &&
call              536 fs/afs/rxrpc.c 		    rxrpc_kernel_get_reply_time(call->net->socket,
call              537 fs/afs/rxrpc.c 						call->rxcall,
call              538 fs/afs/rxrpc.c 						&call->reply_time))
call              539 fs/afs/rxrpc.c 			call->have_reply_time = true;
call              541 fs/afs/rxrpc.c 		ret = call->type->deliver(call);
call              542 fs/afs/rxrpc.c 		state = READ_ONCE(call->state);
call              545 fs/afs/rxrpc.c 			afs_queue_call_work(call);
call              547 fs/afs/rxrpc.c 				if (call->cbi)
call              549 fs/afs/rxrpc.c 						&call->cbi->server->flags);
call              562 fs/afs/rxrpc.c 			rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
call              567 fs/afs/rxrpc.c 			       call->debug_id, state);
call              575 fs/afs/rxrpc.c 			rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
call              580 fs/afs/rxrpc.c 			rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
call              587 fs/afs/rxrpc.c 	if (call->type->done)
call              588 fs/afs/rxrpc.c 		call->type->done(call);
call              596 fs/afs/rxrpc.c 	afs_set_call_complete(call, ret, remote_abort);
call              604 fs/afs/rxrpc.c long afs_wait_for_call_to_complete(struct afs_call *call,
call              614 fs/afs/rxrpc.c 	ret = call->error;
call              618 fs/afs/rxrpc.c 	add_wait_queue(&call->waitq, &myself);
call              623 fs/afs/rxrpc.c 		if (!afs_check_call_state(call, AFS_CALL_COMPLETE) &&
call              624 fs/afs/rxrpc.c 		    call->need_attention) {
call              625 fs/afs/rxrpc.c 			call->need_attention = false;
call              627 fs/afs/rxrpc.c 			afs_deliver_to_call(call);
call              631 fs/afs/rxrpc.c 		if (afs_check_call_state(call, AFS_CALL_COMPLETE))
call              634 fs/afs/rxrpc.c 		if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
call              643 fs/afs/rxrpc.c 	remove_wait_queue(&call->waitq, &myself);
call              646 fs/afs/rxrpc.c 	if (!afs_check_call_state(call, AFS_CALL_COMPLETE)) {
call              648 fs/afs/rxrpc.c 			afs_set_call_complete(call, call->error, call->abort_code);
call              652 fs/afs/rxrpc.c 			if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
call              654 fs/afs/rxrpc.c 				afs_set_call_complete(call, -EINTR, 0);
call              658 fs/afs/rxrpc.c 	spin_lock_bh(&call->state_lock);
call              659 fs/afs/rxrpc.c 	ac->abort_code = call->abort_code;
call              660 fs/afs/rxrpc.c 	ac->error = call->error;
call              661 fs/afs/rxrpc.c 	spin_unlock_bh(&call->state_lock);
call              666 fs/afs/rxrpc.c 		ret = call->ret0;
call              667 fs/afs/rxrpc.c 		call->ret0 = 0;
call              677 fs/afs/rxrpc.c 	afs_put_call(call);
call              688 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)call_user_ID;
call              690 fs/afs/rxrpc.c 	call->need_attention = true;
call              691 fs/afs/rxrpc.c 	wake_up(&call->waitq);
call              700 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)call_user_ID;
call              703 fs/afs/rxrpc.c 	trace_afs_notify_call(rxcall, call);
call              704 fs/afs/rxrpc.c 	call->need_attention = true;
call              706 fs/afs/rxrpc.c 	u = atomic_fetch_add_unless(&call->usage, 1, 0);
call              708 fs/afs/rxrpc.c 		trace_afs_call(call, afs_call_trace_wake, u + 1,
call              709 fs/afs/rxrpc.c 			       atomic_read(&call->net->nr_outstanding_calls),
call              712 fs/afs/rxrpc.c 		if (!queue_work(afs_async_calls, &call->async_work))
call              713 fs/afs/rxrpc.c 			afs_put_call(call);
call              723 fs/afs/rxrpc.c 	struct afs_call *call = container_of(work, struct afs_call, async_work);
call              727 fs/afs/rxrpc.c 	if (call->state < AFS_CALL_COMPLETE && call->need_attention) {
call              728 fs/afs/rxrpc.c 		call->need_attention = false;
call              729 fs/afs/rxrpc.c 		afs_deliver_to_call(call);
call              732 fs/afs/rxrpc.c 	afs_put_call(call);
call              738 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)user_call_ID;
call              740 fs/afs/rxrpc.c 	call->rxcall = rxcall;
call              750 fs/afs/rxrpc.c 	struct afs_call *call = net->spare_incoming_call;
call              753 fs/afs/rxrpc.c 		if (!call) {
call              754 fs/afs/rxrpc.c 			call = afs_alloc_call(net, &afs_RXCMxxxx, GFP_KERNEL);
call              755 fs/afs/rxrpc.c 			if (!call)
call              758 fs/afs/rxrpc.c 			call->drop_ref = true;
call              759 fs/afs/rxrpc.c 			call->async = true;
call              760 fs/afs/rxrpc.c 			call->state = AFS_CALL_SV_AWAIT_OP_ID;
call              761 fs/afs/rxrpc.c 			init_waitqueue_head(&call->waitq);
call              762 fs/afs/rxrpc.c 			afs_extract_to_tmp(call);
call              768 fs/afs/rxrpc.c 					       (unsigned long)call,
call              770 fs/afs/rxrpc.c 					       call->debug_id) < 0)
call              772 fs/afs/rxrpc.c 		call = NULL;
call              774 fs/afs/rxrpc.c 	net->spare_incoming_call = call;
call              783 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)user_call_ID;
call              785 fs/afs/rxrpc.c 	call->rxcall = NULL;
call              786 fs/afs/rxrpc.c 	afs_put_call(call);
call              804 fs/afs/rxrpc.c static int afs_deliver_cm_op_id(struct afs_call *call)
call              808 fs/afs/rxrpc.c 	_enter("{%zu}", iov_iter_count(call->_iter));
call              811 fs/afs/rxrpc.c 	ret = afs_extract_data(call, true);
call              815 fs/afs/rxrpc.c 	call->operation_ID = ntohl(call->tmp);
call              816 fs/afs/rxrpc.c 	afs_set_call_state(call, AFS_CALL_SV_AWAIT_OP_ID, AFS_CALL_SV_AWAIT_REQUEST);
call              820 fs/afs/rxrpc.c 	if (!afs_cm_incoming_call(call))
call              823 fs/afs/rxrpc.c 	trace_afs_cb_call(call);
call              827 fs/afs/rxrpc.c 	return call->type->deliver(call);
call              838 fs/afs/rxrpc.c 	struct afs_call *call = (struct afs_call *)call_user_ID;
call              840 fs/afs/rxrpc.c 	afs_set_call_state(call, AFS_CALL_SV_REPLYING, AFS_CALL_SV_AWAIT_ACK);
call              846 fs/afs/rxrpc.c void afs_send_empty_reply(struct afs_call *call)
call              848 fs/afs/rxrpc.c 	struct afs_net *net = call->net;
call              853 fs/afs/rxrpc.c 	rxrpc_kernel_set_tx_length(net->socket, call->rxcall, 0);
call              862 fs/afs/rxrpc.c 	switch (rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, 0,
call              870 fs/afs/rxrpc.c 		rxrpc_kernel_abort_call(net->socket, call->rxcall,
call              882 fs/afs/rxrpc.c void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
call              884 fs/afs/rxrpc.c 	struct afs_net *net = call->net;
call              891 fs/afs/rxrpc.c 	rxrpc_kernel_set_tx_length(net->socket, call->rxcall, len);
call              902 fs/afs/rxrpc.c 	n = rxrpc_kernel_send_data(net->socket, call->rxcall, &msg, len,
call              912 fs/afs/rxrpc.c 		rxrpc_kernel_abort_call(net->socket, call->rxcall,
call              921 fs/afs/rxrpc.c int afs_extract_data(struct afs_call *call, bool want_more)
call              923 fs/afs/rxrpc.c 	struct afs_net *net = call->net;
call              924 fs/afs/rxrpc.c 	struct iov_iter *iter = call->_iter;
call              929 fs/afs/rxrpc.c 	_enter("{%s,%zu},%d", call->type->name, iov_iter_count(iter), want_more);
call              931 fs/afs/rxrpc.c 	ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
call              933 fs/afs/rxrpc.c 				     &call->service_id);
call              937 fs/afs/rxrpc.c 	state = READ_ONCE(call->state);
call              941 fs/afs/rxrpc.c 			afs_set_call_state(call, state, AFS_CALL_CL_PROC_REPLY);
call              944 fs/afs/rxrpc.c 			afs_set_call_state(call, state, AFS_CALL_SV_REPLYING);
call              947 fs/afs/rxrpc.c 			kdebug("prem complete %d", call->error);
call              948 fs/afs/rxrpc.c 			return afs_io_error(call, afs_io_error_extract);
call              955 fs/afs/rxrpc.c 	afs_set_call_complete(call, ret, remote_abort);
call              962 fs/afs/rxrpc.c noinline int afs_protocol_error(struct afs_call *call, int error,
call              965 fs/afs/rxrpc.c 	trace_afs_protocol_error(call, error, cause);
call               29 fs/afs/vl_probe.c void afs_vlserver_probe_result(struct afs_call *call)
call               31 fs/afs/vl_probe.c 	struct afs_addr_list *alist = call->alist;
call               32 fs/afs/vl_probe.c 	struct afs_vlserver *server = call->vlserver;
call               33 fs/afs/vl_probe.c 	unsigned int server_index = call->server_index;
call               35 fs/afs/vl_probe.c 	unsigned int index = call->addr_ix;
call               37 fs/afs/vl_probe.c 	int ret = call->error;
call               39 fs/afs/vl_probe.c 	_enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code);
call               49 fs/afs/vl_probe.c 			server->probe.abort_code = call->abort_code;
call               56 fs/afs/vl_probe.c 		afs_io_error(call, afs_io_error_vl_probe_fail);
call               75 fs/afs/vl_probe.c 		afs_io_error(call, afs_io_error_vl_probe_fail);
call               83 fs/afs/vl_probe.c 	if (call->service_id == YFS_VL_SERVICE) {
call               86 fs/afs/vl_probe.c 		alist->addrs[index].srx_service = call->service_id;
call               91 fs/afs/vl_probe.c 			alist->addrs[index].srx_service = call->service_id;
call               95 fs/afs/vl_probe.c 	rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
call              132 fs/afs/vl_probe.c 	struct afs_call *call;
call              147 fs/afs/vl_probe.c 		call = afs_vl_get_capabilities(net, &ac, key, server,
call              149 fs/afs/vl_probe.c 		if (!IS_ERR(call)) {
call              150 fs/afs/vl_probe.c 			afs_put_call(call);
call              153 fs/afs/vl_probe.c 			afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code);
call               17 fs/afs/vlclient.c static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
call               27 fs/afs/vlclient.c 	ret = afs_transfer_reply(call);
call               32 fs/afs/vlclient.c 	uvldb = call->buffer;
call               33 fs/afs/vlclient.c 	entry = call->ret_vldb;
call              108 fs/afs/vlclient.c static void afs_destroy_vl_get_entry_by_name_u(struct afs_call *call)
call              110 fs/afs/vlclient.c 	kfree(call->ret_vldb);
call              111 fs/afs/vlclient.c 	afs_flat_call_destructor(call);
call              133 fs/afs/vlclient.c 	struct afs_call *call;
call              147 fs/afs/vlclient.c 	call = afs_alloc_flat_call(net, &afs_RXVLGetEntryByNameU, reqsz,
call              149 fs/afs/vlclient.c 	if (!call) {
call              154 fs/afs/vlclient.c 	call->key = vc->key;
call              155 fs/afs/vlclient.c 	call->ret_vldb = entry;
call              156 fs/afs/vlclient.c 	call->max_lifespan = AFS_VL_MAX_LIFESPAN;
call              159 fs/afs/vlclient.c 	bp = call->request;
call              166 fs/afs/vlclient.c 	trace_afs_make_vl_call(call);
call              167 fs/afs/vlclient.c 	afs_make_call(&vc->ac, call, GFP_KERNEL);
call              168 fs/afs/vlclient.c 	return (struct afs_vldb_entry *)afs_wait_for_call_to_complete(call, &vc->ac);
call              180 fs/afs/vlclient.c static int afs_deliver_vl_get_addrs_u(struct afs_call *call)
call              188 fs/afs/vlclient.c 	       call->unmarshall, iov_iter_count(call->_iter), call->count);
call              190 fs/afs/vlclient.c 	switch (call->unmarshall) {
call              192 fs/afs/vlclient.c 		afs_extract_to_buf(call,
call              194 fs/afs/vlclient.c 		call->unmarshall++;
call              200 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              204 fs/afs/vlclient.c 		bp = call->buffer + sizeof(struct afs_uuid__xdr);
call              214 fs/afs/vlclient.c 		call->ret_alist = alist;
call              215 fs/afs/vlclient.c 		call->count = count;
call              216 fs/afs/vlclient.c 		call->count2 = nentries;
call              217 fs/afs/vlclient.c 		call->unmarshall++;
call              220 fs/afs/vlclient.c 		count = min(call->count, 4U);
call              221 fs/afs/vlclient.c 		afs_extract_to_buf(call, count * sizeof(__be32));
call              225 fs/afs/vlclient.c 		ret = afs_extract_data(call, call->count > 4);
call              229 fs/afs/vlclient.c 		alist = call->ret_alist;
call              230 fs/afs/vlclient.c 		bp = call->buffer;
call              231 fs/afs/vlclient.c 		count = min(call->count, 4U);
call              233 fs/afs/vlclient.c 			if (alist->nr_addrs < call->count2)
call              236 fs/afs/vlclient.c 		call->count -= count;
call              237 fs/afs/vlclient.c 		if (call->count > 0)
call              239 fs/afs/vlclient.c 		call->unmarshall++;
call              247 fs/afs/vlclient.c static void afs_vl_get_addrs_u_destructor(struct afs_call *call)
call              249 fs/afs/vlclient.c 	afs_put_addrlist(call->ret_alist);
call              250 fs/afs/vlclient.c 	return afs_flat_call_destructor(call);
call              272 fs/afs/vlclient.c 	struct afs_call *call;
call              279 fs/afs/vlclient.c 	call = afs_alloc_flat_call(net, &afs_RXVLGetAddrsU,
call              282 fs/afs/vlclient.c 	if (!call)
call              285 fs/afs/vlclient.c 	call->key = vc->key;
call              286 fs/afs/vlclient.c 	call->ret_alist = NULL;
call              287 fs/afs/vlclient.c 	call->max_lifespan = AFS_VL_MAX_LIFESPAN;
call              290 fs/afs/vlclient.c 	bp = call->request;
call              305 fs/afs/vlclient.c 	trace_afs_make_vl_call(call);
call              306 fs/afs/vlclient.c 	afs_make_call(&vc->ac, call, GFP_KERNEL);
call              307 fs/afs/vlclient.c 	return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
call              313 fs/afs/vlclient.c static int afs_deliver_vl_get_capabilities(struct afs_call *call)
call              319 fs/afs/vlclient.c 	       call->unmarshall, iov_iter_count(call->_iter), call->count);
call              321 fs/afs/vlclient.c 	switch (call->unmarshall) {
call              323 fs/afs/vlclient.c 		afs_extract_to_tmp(call);
call              324 fs/afs/vlclient.c 		call->unmarshall++;
call              328 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              332 fs/afs/vlclient.c 		count = ntohl(call->tmp);
call              333 fs/afs/vlclient.c 		call->count = count;
call              334 fs/afs/vlclient.c 		call->count2 = count;
call              336 fs/afs/vlclient.c 		call->unmarshall++;
call              337 fs/afs/vlclient.c 		afs_extract_discard(call, count * sizeof(__be32));
call              341 fs/afs/vlclient.c 		ret = afs_extract_data(call, false);
call              347 fs/afs/vlclient.c 		call->unmarshall++;
call              355 fs/afs/vlclient.c static void afs_destroy_vl_get_capabilities(struct afs_call *call)
call              357 fs/afs/vlclient.c 	afs_put_vlserver(call->net, call->vlserver);
call              358 fs/afs/vlclient.c 	afs_flat_call_destructor(call);
call              385 fs/afs/vlclient.c 	struct afs_call *call;
call              390 fs/afs/vlclient.c 	call = afs_alloc_flat_call(net, &afs_RXVLGetCapabilities, 1 * 4, 16 * 4);
call              391 fs/afs/vlclient.c 	if (!call)
call              394 fs/afs/vlclient.c 	call->key = key;
call              395 fs/afs/vlclient.c 	call->vlserver = afs_get_vlserver(server);
call              396 fs/afs/vlclient.c 	call->server_index = server_index;
call              397 fs/afs/vlclient.c 	call->upgrade = true;
call              398 fs/afs/vlclient.c 	call->async = true;
call              399 fs/afs/vlclient.c 	call->max_lifespan = AFS_PROBE_MAX_LIFESPAN;
call              402 fs/afs/vlclient.c 	bp = call->request;
call              406 fs/afs/vlclient.c 	trace_afs_make_vl_call(call);
call              407 fs/afs/vlclient.c 	afs_make_call(ac, call, GFP_KERNEL);
call              408 fs/afs/vlclient.c 	return call;
call              420 fs/afs/vlclient.c static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call)
call              428 fs/afs/vlclient.c 	       call->unmarshall, iov_iter_count(call->_iter), call->count2);
call              430 fs/afs/vlclient.c 	switch (call->unmarshall) {
call              432 fs/afs/vlclient.c 		afs_extract_to_buf(call, sizeof(uuid_t) + 3 * sizeof(__be32));
call              433 fs/afs/vlclient.c 		call->unmarshall = 1;
call              440 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              444 fs/afs/vlclient.c 		bp = call->buffer + sizeof(uuid_t);
call              446 fs/afs/vlclient.c 		call->count	= ntohl(*bp++);
call              447 fs/afs/vlclient.c 		call->count2	= ntohl(*bp); /* Type or next count */
call              449 fs/afs/vlclient.c 		if (call->count > YFS_MAXENDPOINTS)
call              450 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              453 fs/afs/vlclient.c 		alist = afs_alloc_addrlist(call->count, FS_SERVICE, AFS_FS_PORT);
call              457 fs/afs/vlclient.c 		call->ret_alist = alist;
call              459 fs/afs/vlclient.c 		if (call->count == 0)
call              463 fs/afs/vlclient.c 		switch (call->count2) {
call              471 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              476 fs/afs/vlclient.c 		afs_extract_to_buf(call, size);
call              477 fs/afs/vlclient.c 		call->unmarshall = 2;
call              481 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              485 fs/afs/vlclient.c 		alist = call->ret_alist;
call              486 fs/afs/vlclient.c 		bp = call->buffer;
call              487 fs/afs/vlclient.c 		switch (call->count2) {
call              490 fs/afs/vlclient.c 				return afs_protocol_error(call, -EBADMSG,
call              497 fs/afs/vlclient.c 				return afs_protocol_error(call, -EBADMSG,
call              503 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              510 fs/afs/vlclient.c 		call->count2 = ntohl(*bp++);
call              512 fs/afs/vlclient.c 		call->count--;
call              513 fs/afs/vlclient.c 		if (call->count > 0)
call              518 fs/afs/vlclient.c 		call->count = call->count2;
call              519 fs/afs/vlclient.c 		if (!call->count)
call              521 fs/afs/vlclient.c 		if (call->count > YFS_MAXENDPOINTS)
call              522 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              525 fs/afs/vlclient.c 		afs_extract_to_buf(call, 1 * sizeof(__be32));
call              526 fs/afs/vlclient.c 		call->unmarshall = 3;
call              534 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              538 fs/afs/vlclient.c 		bp = call->buffer;
call              541 fs/afs/vlclient.c 		call->count2 = ntohl(*bp++);
call              542 fs/afs/vlclient.c 		switch (call->count2) {
call              550 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              554 fs/afs/vlclient.c 		if (call->count > 1)
call              556 fs/afs/vlclient.c 		afs_extract_to_buf(call, size);
call              557 fs/afs/vlclient.c 		call->unmarshall = 4;
call              561 fs/afs/vlclient.c 		ret = afs_extract_data(call, true);
call              565 fs/afs/vlclient.c 		bp = call->buffer;
call              566 fs/afs/vlclient.c 		switch (call->count2) {
call              569 fs/afs/vlclient.c 				return afs_protocol_error(call, -EBADMSG,
call              575 fs/afs/vlclient.c 				return afs_protocol_error(call, -EBADMSG,
call              580 fs/afs/vlclient.c 			return afs_protocol_error(call, -EBADMSG,
call              587 fs/afs/vlclient.c 		call->count--;
call              588 fs/afs/vlclient.c 		if (call->count > 0)
call              592 fs/afs/vlclient.c 		afs_extract_discard(call, 0);
call              593 fs/afs/vlclient.c 		call->unmarshall = 5;
call              597 fs/afs/vlclient.c 		ret = afs_extract_data(call, false);
call              600 fs/afs/vlclient.c 		call->unmarshall = 6;
call              627 fs/afs/vlclient.c 	struct afs_call *call;
call              633 fs/afs/vlclient.c 	call = afs_alloc_flat_call(net, &afs_YFSVLGetEndpoints,
call              636 fs/afs/vlclient.c 	if (!call)
call              639 fs/afs/vlclient.c 	call->key = vc->key;
call              640 fs/afs/vlclient.c 	call->ret_alist = NULL;
call              641 fs/afs/vlclient.c 	call->max_lifespan = AFS_VL_MAX_LIFESPAN;
call              644 fs/afs/vlclient.c 	bp = call->request;
call              649 fs/afs/vlclient.c 	trace_afs_make_vl_call(call);
call              650 fs/afs/vlclient.c 	afs_make_call(&vc->ac, call, GFP_KERNEL);
call              651 fs/afs/vlclient.c 	return (struct afs_addr_list *)afs_wait_for_call_to_complete(call, &vc->ac);
call               20 fs/afs/yfsclient.c static inline void afs_use_fs_server(struct afs_call *call, struct afs_cb_interest *cbi)
call               22 fs/afs/yfsclient.c 	call->cbi = afs_get_cb_interest(cbi);
call              147 fs/afs/yfsclient.c static void yfs_check_req(struct afs_call *call, __be32 *bp)
call              149 fs/afs/yfsclient.c 	size_t len = (void *)bp - call->request;
call              151 fs/afs/yfsclient.c 	if (len > call->request_size)
call              153 fs/afs/yfsclient.c 		       call->type->name, len, call->request_size);
call              154 fs/afs/yfsclient.c 	else if (len < call->request_size)
call              156 fs/afs/yfsclient.c 			   call->type->name, len, call->request_size);
call              183 fs/afs/yfsclient.c 				     struct afs_call *call,
call              232 fs/afs/yfsclient.c 	ret = afs_protocol_error(call, -EBADMSG, afs_eproto_bad_status);
call              240 fs/afs/yfsclient.c 				   struct afs_call *call,
call              247 fs/afs/yfsclient.c 	cb_expiry = call->reply_time;
call              340 fs/afs/yfsclient.c static int yfs_deliver_fs_status_cb_and_volsync(struct afs_call *call)
call              345 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              350 fs/afs/yfsclient.c 	bp = call->buffer;
call              351 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              354 fs/afs/yfsclient.c 	xdr_decode_YFSCallBack(&bp, call, call->out_scb);
call              355 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              365 fs/afs/yfsclient.c static int yfs_deliver_status_and_volsync(struct afs_call *call)
call              370 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              374 fs/afs/yfsclient.c 	bp = call->buffer;
call              375 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              378 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              401 fs/afs/yfsclient.c 	struct afs_call *call;
call              408 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus_vnode,
call              414 fs/afs/yfsclient.c 	if (!call) {
call              419 fs/afs/yfsclient.c 	call->key = fc->key;
call              420 fs/afs/yfsclient.c 	call->out_scb = scb;
call              421 fs/afs/yfsclient.c 	call->out_volsync = volsync;
call              424 fs/afs/yfsclient.c 	bp = call->request;
call              428 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              430 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              431 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call              432 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              433 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              434 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              440 fs/afs/yfsclient.c static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
call              442 fs/afs/yfsclient.c 	struct afs_read *req = call->read_request;
call              448 fs/afs/yfsclient.c 	       call->unmarshall, iov_iter_count(&call->iter), req->actual_len);
call              450 fs/afs/yfsclient.c 	switch (call->unmarshall) {
call              455 fs/afs/yfsclient.c 		afs_extract_to_tmp64(call);
call              456 fs/afs/yfsclient.c 		call->unmarshall++;
call              462 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call              466 fs/afs/yfsclient.c 		req->actual_len = be64_to_cpu(call->tmp64);
call              472 fs/afs/yfsclient.c 		call->unmarshall++;
call              480 fs/afs/yfsclient.c 		call->bvec[0].bv_len = size;
call              481 fs/afs/yfsclient.c 		call->bvec[0].bv_offset = req->offset;
call              482 fs/afs/yfsclient.c 		call->bvec[0].bv_page = req->pages[req->index];
call              483 fs/afs/yfsclient.c 		iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
call              490 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->remain);
call              492 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call              495 fs/afs/yfsclient.c 		req->remain -= call->bvec[0].bv_len;
call              496 fs/afs/yfsclient.c 		req->offset += call->bvec[0].bv_len;
call              510 fs/afs/yfsclient.c 		afs_extract_discard(call, req->actual_len - req->len);
call              511 fs/afs/yfsclient.c 		call->unmarshall = 3;
call              516 fs/afs/yfsclient.c 		       iov_iter_count(&call->iter), req->actual_len - req->len);
call              518 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call              523 fs/afs/yfsclient.c 		call->unmarshall = 4;
call              524 fs/afs/yfsclient.c 		afs_extract_to_buf(call,
call              532 fs/afs/yfsclient.c 		ret = afs_extract_data(call, false);
call              536 fs/afs/yfsclient.c 		bp = call->buffer;
call              537 fs/afs/yfsclient.c 		ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              540 fs/afs/yfsclient.c 		xdr_decode_YFSCallBack(&bp, call, call->out_scb);
call              541 fs/afs/yfsclient.c 		xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              543 fs/afs/yfsclient.c 		req->data_version = call->out_scb->status.data_version;
call              544 fs/afs/yfsclient.c 		req->file_size = call->out_scb->status.size;
call              546 fs/afs/yfsclient.c 		call->unmarshall++;
call              568 fs/afs/yfsclient.c static void yfs_fetch_data_destructor(struct afs_call *call)
call              570 fs/afs/yfsclient.c 	afs_put_read(call->read_request);
call              571 fs/afs/yfsclient.c 	afs_flat_call_destructor(call);
call              591 fs/afs/yfsclient.c 	struct afs_call *call;
call              599 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSFetchData64,
call              606 fs/afs/yfsclient.c 	if (!call)
call              609 fs/afs/yfsclient.c 	call->key = fc->key;
call              610 fs/afs/yfsclient.c 	call->out_scb = scb;
call              611 fs/afs/yfsclient.c 	call->out_volsync = NULL;
call              612 fs/afs/yfsclient.c 	call->read_request = req;
call              615 fs/afs/yfsclient.c 	bp = call->request;
call              621 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              624 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              625 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call              626 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              627 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              628 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              634 fs/afs/yfsclient.c static int yfs_deliver_fs_create_vnode(struct afs_call *call)
call              639 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call              641 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              646 fs/afs/yfsclient.c 	bp = call->buffer;
call              647 fs/afs/yfsclient.c 	xdr_decode_YFSFid(&bp, call->out_fid);
call              648 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              651 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call              654 fs/afs/yfsclient.c 	xdr_decode_YFSCallBack(&bp, call, call->out_scb);
call              655 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              682 fs/afs/yfsclient.c 	struct afs_call *call;
call              702 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &afs_RXFSCreateFile, reqsz, rplsz);
call              703 fs/afs/yfsclient.c 	if (!call)
call              706 fs/afs/yfsclient.c 	call->key = fc->key;
call              707 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call              708 fs/afs/yfsclient.c 	call->out_fid = newfid;
call              709 fs/afs/yfsclient.c 	call->out_scb = new_scb;
call              712 fs/afs/yfsclient.c 	bp = call->request;
call              719 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              721 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              722 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              723 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              724 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              725 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              746 fs/afs/yfsclient.c 	struct afs_call *call;
call              765 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXFSMakeDir, reqsz, rplsz);
call              766 fs/afs/yfsclient.c 	if (!call)
call              769 fs/afs/yfsclient.c 	call->key = fc->key;
call              770 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call              771 fs/afs/yfsclient.c 	call->out_fid = newfid;
call              772 fs/afs/yfsclient.c 	call->out_scb = new_scb;
call              775 fs/afs/yfsclient.c 	bp = call->request;
call              781 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              783 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              784 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              785 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              786 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              787 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              793 fs/afs/yfsclient.c static int yfs_deliver_fs_remove_file2(struct afs_call *call)
call              799 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call              801 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              805 fs/afs/yfsclient.c 	bp = call->buffer;
call              806 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call              811 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              816 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              838 fs/afs/yfsclient.c 	struct afs_call *call;
call              847 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSRemoveFile2,
call              856 fs/afs/yfsclient.c 	if (!call)
call              859 fs/afs/yfsclient.c 	call->key = fc->key;
call              860 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call              861 fs/afs/yfsclient.c 	call->out_scb = vnode_scb;
call              864 fs/afs/yfsclient.c 	bp = call->request;
call              869 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              871 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              872 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              873 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              874 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              875 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              881 fs/afs/yfsclient.c static int yfs_deliver_fs_remove(struct afs_call *call)
call              886 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call              888 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              892 fs/afs/yfsclient.c 	bp = call->buffer;
call              893 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call              897 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call              926 fs/afs/yfsclient.c 	struct afs_call *call;
call              934 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(
call              942 fs/afs/yfsclient.c 	if (!call)
call              945 fs/afs/yfsclient.c 	call->key = fc->key;
call              946 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call              949 fs/afs/yfsclient.c 	bp = call->request;
call              954 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call              956 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call              957 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call              958 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call              959 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call              960 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              966 fs/afs/yfsclient.c static int yfs_deliver_fs_link(struct afs_call *call)
call              971 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call              973 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call              977 fs/afs/yfsclient.c 	bp = call->buffer;
call              978 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call              981 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call              984 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call             1008 fs/afs/yfsclient.c 	struct afs_call *call;
call             1016 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSLink,
call             1025 fs/afs/yfsclient.c 	if (!call)
call             1028 fs/afs/yfsclient.c 	call->key = fc->key;
call             1029 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call             1030 fs/afs/yfsclient.c 	call->out_scb = vnode_scb;
call             1033 fs/afs/yfsclient.c 	bp = call->request;
call             1039 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1041 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1042 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &vnode->fid, name);
call             1043 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1044 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1045 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1051 fs/afs/yfsclient.c static int yfs_deliver_fs_symlink(struct afs_call *call)
call             1056 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call             1058 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call             1063 fs/afs/yfsclient.c 	bp = call->buffer;
call             1064 fs/afs/yfsclient.c 	xdr_decode_YFSFid(&bp, call->out_fid);
call             1065 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call             1068 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call             1071 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call             1098 fs/afs/yfsclient.c 	struct afs_call *call;
call             1107 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSSymlink,
call             1118 fs/afs/yfsclient.c 	if (!call)
call             1121 fs/afs/yfsclient.c 	call->key = fc->key;
call             1122 fs/afs/yfsclient.c 	call->out_dir_scb = dvnode_scb;
call             1123 fs/afs/yfsclient.c 	call->out_fid = newfid;
call             1124 fs/afs/yfsclient.c 	call->out_scb = vnode_scb;
call             1127 fs/afs/yfsclient.c 	bp = call->request;
call             1134 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1136 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1137 fs/afs/yfsclient.c 	trace_afs_make_fs_call1(call, &dvnode->fid, name);
call             1138 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1139 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1140 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1146 fs/afs/yfsclient.c static int yfs_deliver_fs_rename(struct afs_call *call)
call             1151 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call             1153 fs/afs/yfsclient.c 	ret = afs_transfer_reply(call);
call             1157 fs/afs/yfsclient.c 	bp = call->buffer;
call             1158 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_dir_scb);
call             1161 fs/afs/yfsclient.c 	ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call             1165 fs/afs/yfsclient.c 	xdr_decode_YFSVolSync(&bp, call->out_volsync);
call             1191 fs/afs/yfsclient.c 	struct afs_call *call;
call             1200 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSRename,
call             1210 fs/afs/yfsclient.c 	if (!call)
call             1213 fs/afs/yfsclient.c 	call->key = fc->key;
call             1214 fs/afs/yfsclient.c 	call->out_dir_scb = orig_dvnode_scb;
call             1215 fs/afs/yfsclient.c 	call->out_scb = new_dvnode_scb;
call             1218 fs/afs/yfsclient.c 	bp = call->request;
call             1225 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1227 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1228 fs/afs/yfsclient.c 	trace_afs_make_fs_call2(call, &orig_dvnode->fid, orig_name, new_name);
call             1229 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1230 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1231 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1253 fs/afs/yfsclient.c 	struct afs_call *call;
call             1275 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64,
call             1283 fs/afs/yfsclient.c 	if (!call)
call             1286 fs/afs/yfsclient.c 	call->key = fc->key;
call             1287 fs/afs/yfsclient.c 	call->mapping = mapping;
call             1288 fs/afs/yfsclient.c 	call->first = first;
call             1289 fs/afs/yfsclient.c 	call->last = last;
call             1290 fs/afs/yfsclient.c 	call->first_offset = offset;
call             1291 fs/afs/yfsclient.c 	call->last_to = to;
call             1292 fs/afs/yfsclient.c 	call->send_pages = true;
call             1293 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1296 fs/afs/yfsclient.c 	bp = call->request;
call             1304 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1306 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1307 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1308 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1309 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1310 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1338 fs/afs/yfsclient.c 	struct afs_call *call;
call             1345 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSStoreData64_as_Status,
call             1352 fs/afs/yfsclient.c 	if (!call)
call             1355 fs/afs/yfsclient.c 	call->key = fc->key;
call             1356 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1359 fs/afs/yfsclient.c 	bp = call->request;
call             1367 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1369 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1370 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1371 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1372 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1373 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1384 fs/afs/yfsclient.c 	struct afs_call *call;
call             1394 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
call             1400 fs/afs/yfsclient.c 	if (!call)
call             1403 fs/afs/yfsclient.c 	call->key = fc->key;
call             1404 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1407 fs/afs/yfsclient.c 	bp = call->request;
call             1412 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1414 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1415 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1416 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1417 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1418 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1424 fs/afs/yfsclient.c static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
call             1431 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call             1433 fs/afs/yfsclient.c 	switch (call->unmarshall) {
call             1435 fs/afs/yfsclient.c 		call->unmarshall++;
call             1436 fs/afs/yfsclient.c 		afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
call             1442 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1446 fs/afs/yfsclient.c 		bp = call->buffer;
call             1447 fs/afs/yfsclient.c 		xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
call             1448 fs/afs/yfsclient.c 		call->unmarshall++;
call             1449 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1454 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1458 fs/afs/yfsclient.c 		call->count = ntohl(call->tmp);
call             1459 fs/afs/yfsclient.c 		_debug("volname length: %u", call->count);
call             1460 fs/afs/yfsclient.c 		if (call->count >= AFSNAMEMAX)
call             1461 fs/afs/yfsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1463 fs/afs/yfsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1464 fs/afs/yfsclient.c 		afs_extract_to_buf(call, size);
call             1465 fs/afs/yfsclient.c 		call->unmarshall++;
call             1471 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1475 fs/afs/yfsclient.c 		p = call->buffer;
call             1476 fs/afs/yfsclient.c 		p[call->count] = 0;
call             1478 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1479 fs/afs/yfsclient.c 		call->unmarshall++;
call             1484 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1488 fs/afs/yfsclient.c 		call->count = ntohl(call->tmp);
call             1489 fs/afs/yfsclient.c 		_debug("offline msg length: %u", call->count);
call             1490 fs/afs/yfsclient.c 		if (call->count >= AFSNAMEMAX)
call             1491 fs/afs/yfsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1493 fs/afs/yfsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1494 fs/afs/yfsclient.c 		afs_extract_to_buf(call, size);
call             1495 fs/afs/yfsclient.c 		call->unmarshall++;
call             1501 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1505 fs/afs/yfsclient.c 		p = call->buffer;
call             1506 fs/afs/yfsclient.c 		p[call->count] = 0;
call             1509 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1510 fs/afs/yfsclient.c 		call->unmarshall++;
call             1515 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1519 fs/afs/yfsclient.c 		call->count = ntohl(call->tmp);
call             1520 fs/afs/yfsclient.c 		_debug("motd length: %u", call->count);
call             1521 fs/afs/yfsclient.c 		if (call->count >= AFSNAMEMAX)
call             1522 fs/afs/yfsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1524 fs/afs/yfsclient.c 		size = (call->count + 3) & ~3; /* It's padded */
call             1525 fs/afs/yfsclient.c 		afs_extract_to_buf(call, size);
call             1526 fs/afs/yfsclient.c 		call->unmarshall++;
call             1532 fs/afs/yfsclient.c 		ret = afs_extract_data(call, false);
call             1536 fs/afs/yfsclient.c 		p = call->buffer;
call             1537 fs/afs/yfsclient.c 		p[call->count] = 0;
call             1540 fs/afs/yfsclient.c 		call->unmarshall++;
call             1568 fs/afs/yfsclient.c 	struct afs_call *call;
call             1574 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSGetVolumeStatus,
call             1581 fs/afs/yfsclient.c 	if (!call)
call             1584 fs/afs/yfsclient.c 	call->key = fc->key;
call             1585 fs/afs/yfsclient.c 	call->out_volstatus = vs;
call             1588 fs/afs/yfsclient.c 	bp = call->request;
call             1592 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1594 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1595 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1596 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1597 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1598 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1640 fs/afs/yfsclient.c 	struct afs_call *call;
call             1646 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSSetLock,
call             1652 fs/afs/yfsclient.c 	if (!call)
call             1655 fs/afs/yfsclient.c 	call->key = fc->key;
call             1656 fs/afs/yfsclient.c 	call->lvnode = vnode;
call             1657 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1660 fs/afs/yfsclient.c 	bp = call->request;
call             1665 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1667 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1668 fs/afs/yfsclient.c 	trace_afs_make_fs_calli(call, &vnode->fid, type);
call             1669 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1670 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1671 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1680 fs/afs/yfsclient.c 	struct afs_call *call;
call             1686 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSExtendLock,
call             1691 fs/afs/yfsclient.c 	if (!call)
call             1694 fs/afs/yfsclient.c 	call->key = fc->key;
call             1695 fs/afs/yfsclient.c 	call->lvnode = vnode;
call             1696 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1699 fs/afs/yfsclient.c 	bp = call->request;
call             1703 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1705 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1706 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1707 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1708 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1709 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1718 fs/afs/yfsclient.c 	struct afs_call *call;
call             1724 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSReleaseLock,
call             1729 fs/afs/yfsclient.c 	if (!call)
call             1732 fs/afs/yfsclient.c 	call->key = fc->key;
call             1733 fs/afs/yfsclient.c 	call->lvnode = vnode;
call             1734 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1737 fs/afs/yfsclient.c 	bp = call->request;
call             1741 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1743 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1744 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             1745 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1746 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1747 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1769 fs/afs/yfsclient.c 	struct afs_call *call;
call             1775 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSFetchStatus,
call             1781 fs/afs/yfsclient.c 	if (!call) {
call             1786 fs/afs/yfsclient.c 	call->key = fc->key;
call             1787 fs/afs/yfsclient.c 	call->out_scb = scb;
call             1788 fs/afs/yfsclient.c 	call->out_volsync = volsync;
call             1791 fs/afs/yfsclient.c 	bp = call->request;
call             1795 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1797 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1798 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, fid);
call             1799 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1800 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1801 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1807 fs/afs/yfsclient.c static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call             1814 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call             1816 fs/afs/yfsclient.c 	switch (call->unmarshall) {
call             1818 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1819 fs/afs/yfsclient.c 		call->unmarshall++;
call             1825 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1829 fs/afs/yfsclient.c 		tmp = ntohl(call->tmp);
call             1830 fs/afs/yfsclient.c 		_debug("status count: %u/%u", tmp, call->count2);
call             1831 fs/afs/yfsclient.c 		if (tmp != call->count2)
call             1832 fs/afs/yfsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1835 fs/afs/yfsclient.c 		call->count = 0;
call             1836 fs/afs/yfsclient.c 		call->unmarshall++;
call             1838 fs/afs/yfsclient.c 		afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
call             1842 fs/afs/yfsclient.c 		_debug("extract status array %u", call->count);
call             1843 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1847 fs/afs/yfsclient.c 		bp = call->buffer;
call             1848 fs/afs/yfsclient.c 		scb = &call->out_scb[call->count];
call             1849 fs/afs/yfsclient.c 		ret = xdr_decode_YFSFetchStatus(&bp, call, scb);
call             1853 fs/afs/yfsclient.c 		call->count++;
call             1854 fs/afs/yfsclient.c 		if (call->count < call->count2)
call             1857 fs/afs/yfsclient.c 		call->count = 0;
call             1858 fs/afs/yfsclient.c 		call->unmarshall++;
call             1859 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1865 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1869 fs/afs/yfsclient.c 		tmp = ntohl(call->tmp);
call             1871 fs/afs/yfsclient.c 		if (tmp != call->count2)
call             1872 fs/afs/yfsclient.c 			return afs_protocol_error(call, -EBADMSG,
call             1874 fs/afs/yfsclient.c 		call->count = 0;
call             1875 fs/afs/yfsclient.c 		call->unmarshall++;
call             1877 fs/afs/yfsclient.c 		afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
call             1882 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             1887 fs/afs/yfsclient.c 		bp = call->buffer;
call             1888 fs/afs/yfsclient.c 		scb = &call->out_scb[call->count];
call             1889 fs/afs/yfsclient.c 		xdr_decode_YFSCallBack(&bp, call, scb);
call             1890 fs/afs/yfsclient.c 		call->count++;
call             1891 fs/afs/yfsclient.c 		if (call->count < call->count2)
call             1894 fs/afs/yfsclient.c 		afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
call             1895 fs/afs/yfsclient.c 		call->unmarshall++;
call             1899 fs/afs/yfsclient.c 		ret = afs_extract_data(call, false);
call             1903 fs/afs/yfsclient.c 		bp = call->buffer;
call             1904 fs/afs/yfsclient.c 		xdr_decode_YFSVolSync(&bp, call->out_volsync);
call             1906 fs/afs/yfsclient.c 		call->unmarshall++;
call             1937 fs/afs/yfsclient.c 	struct afs_call *call;
call             1944 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSInlineBulkStatus,
call             1950 fs/afs/yfsclient.c 	if (!call) {
call             1955 fs/afs/yfsclient.c 	call->key = fc->key;
call             1956 fs/afs/yfsclient.c 	call->out_scb = statuses;
call             1957 fs/afs/yfsclient.c 	call->out_volsync = volsync;
call             1958 fs/afs/yfsclient.c 	call->count2 = nr_fids;
call             1961 fs/afs/yfsclient.c 	bp = call->request;
call             1967 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             1969 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             1970 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &fids[0]);
call             1971 fs/afs/yfsclient.c 	afs_set_fc_call(call, fc);
call             1972 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_NOFS);
call             1973 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call             1979 fs/afs/yfsclient.c static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
call             1981 fs/afs/yfsclient.c 	struct yfs_acl *yacl = call->out_yacl;
call             1987 fs/afs/yfsclient.c 	_enter("{%u}", call->unmarshall);
call             1989 fs/afs/yfsclient.c 	switch (call->unmarshall) {
call             1991 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             1992 fs/afs/yfsclient.c 		call->unmarshall++;
call             1997 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             2001 fs/afs/yfsclient.c 		size = call->count2 = ntohl(call->tmp);
call             2009 fs/afs/yfsclient.c 			acl->size = call->count2;
call             2010 fs/afs/yfsclient.c 			afs_extract_begin(call, acl->data, size);
call             2012 fs/afs/yfsclient.c 			afs_extract_discard(call, size);
call             2014 fs/afs/yfsclient.c 		call->unmarshall++;
call             2019 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             2023 fs/afs/yfsclient.c 		afs_extract_to_tmp(call);
call             2024 fs/afs/yfsclient.c 		call->unmarshall++;
call             2029 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             2033 fs/afs/yfsclient.c 		size = call->count2 = ntohl(call->tmp);
call             2041 fs/afs/yfsclient.c 			acl->size = call->count2;
call             2042 fs/afs/yfsclient.c 			afs_extract_begin(call, acl->data, size);
call             2044 fs/afs/yfsclient.c 			afs_extract_discard(call, size);
call             2046 fs/afs/yfsclient.c 		call->unmarshall++;
call             2051 fs/afs/yfsclient.c 		ret = afs_extract_data(call, true);
call             2055 fs/afs/yfsclient.c 		afs_extract_to_buf(call,
call             2059 fs/afs/yfsclient.c 		call->unmarshall++;
call             2064 fs/afs/yfsclient.c 		ret = afs_extract_data(call, false);
call             2068 fs/afs/yfsclient.c 		bp = call->buffer;
call             2071 fs/afs/yfsclient.c 		ret = xdr_decode_YFSFetchStatus(&bp, call, call->out_scb);
call             2074 fs/afs/yfsclient.c 		xdr_decode_YFSVolSync(&bp, call->out_volsync);
call             2076 fs/afs/yfsclient.c 		call->unmarshall++;
call             2114 fs/afs/yfsclient.c 	struct afs_call *call;
call             2121 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSFetchOpaqueACL,
call             2127 fs/afs/yfsclient.c 	if (!call) {
call             2132 fs/afs/yfsclient.c 	call->key = fc->key;
call             2133 fs/afs/yfsclient.c 	call->out_yacl = yacl;
call             2134 fs/afs/yfsclient.c 	call->out_scb = scb;
call             2135 fs/afs/yfsclient.c 	call->out_volsync = NULL;
call             2138 fs/afs/yfsclient.c 	bp = call->request;
call             2142 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             2144 fs/afs/yfsclient.c 	afs_use_fs_server(call, fc->cbi);
call             2145 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             2146 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_KERNEL);
call             2147 fs/afs/yfsclient.c 	return (struct yfs_acl *)afs_wait_for_call_to_complete(call, &fc->ac);
call             2167 fs/afs/yfsclient.c 	struct afs_call *call;
call             2176 fs/afs/yfsclient.c 	call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
call             2182 fs/afs/yfsclient.c 	if (!call) {
call             2187 fs/afs/yfsclient.c 	call->key = fc->key;
call             2188 fs/afs/yfsclient.c 	call->out_scb = scb;
call             2189 fs/afs/yfsclient.c 	call->out_volsync = NULL;
call             2192 fs/afs/yfsclient.c 	bp = call->request;
call             2200 fs/afs/yfsclient.c 	yfs_check_req(call, bp);
call             2202 fs/afs/yfsclient.c 	trace_afs_make_fs_call(call, &vnode->fid);
call             2203 fs/afs/yfsclient.c 	afs_make_call(&fc->ac, call, GFP_KERNEL);
call             2204 fs/afs/yfsclient.c 	return afs_wait_for_call_to_complete(call, &fc->ac);
call              160 fs/lockd/clntproc.c 	struct nlm_rqst		*call;
call              164 fs/lockd/clntproc.c 	call = nlm_alloc_call(host);
call              165 fs/lockd/clntproc.c 	if (call == NULL)
call              174 fs/lockd/clntproc.c 		nlmclnt_release_call(call);
call              178 fs/lockd/clntproc.c 	nlmclnt_setlockargs(call, fl);
call              179 fs/lockd/clntproc.c 	call->a_callback_data = data;
call              183 fs/lockd/clntproc.c 			call->a_args.block = IS_SETLKW(cmd) ? 1 : 0;
call              184 fs/lockd/clntproc.c 			status = nlmclnt_lock(call, fl);
call              186 fs/lockd/clntproc.c 			status = nlmclnt_unlock(call, fl);
call              188 fs/lockd/clntproc.c 		status = nlmclnt_test(call, fl);
call              204 fs/lockd/clntproc.c 	struct nlm_rqst	*call;
call              207 fs/lockd/clntproc.c 		call = kzalloc(sizeof(*call), GFP_KERNEL);
call              208 fs/lockd/clntproc.c 		if (call != NULL) {
call              209 fs/lockd/clntproc.c 			refcount_set(&call->a_count, 1);
call              210 fs/lockd/clntproc.c 			locks_init_lock(&call->a_args.lock.fl);
call              211 fs/lockd/clntproc.c 			locks_init_lock(&call->a_res.lock.fl);
call              212 fs/lockd/clntproc.c 			call->a_host = nlm_get_host(host);
call              213 fs/lockd/clntproc.c 			return call;
call              223 fs/lockd/clntproc.c void nlmclnt_release_call(struct nlm_rqst *call)
call              225 fs/lockd/clntproc.c 	const struct nlmclnt_operations *nlmclnt_ops = call->a_host->h_nlmclnt_ops;
call              227 fs/lockd/clntproc.c 	if (!refcount_dec_and_test(&call->a_count))
call              230 fs/lockd/clntproc.c 		nlmclnt_ops->nlmclnt_release_call(call->a_callback_data);
call              231 fs/lockd/clntproc.c 	nlmclnt_release_host(call->a_host);
call              232 fs/lockd/clntproc.c 	nlmclnt_release_lockargs(call);
call              233 fs/lockd/clntproc.c 	kfree(call);
call              293 fs/lockd/svc4proc.c 	struct nlm_rqst	*call;
call              302 fs/lockd/svc4proc.c 	call = nlm_alloc_call(host);
call              304 fs/lockd/svc4proc.c 	if (call == NULL)
call              307 fs/lockd/svc4proc.c 	stat = func(rqstp, &call->a_res);
call              309 fs/lockd/svc4proc.c 		nlmsvc_release_call(call);
call              313 fs/lockd/svc4proc.c 	call->a_flags = RPC_TASK_ASYNC;
call              314 fs/lockd/svc4proc.c 	if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0)
call               47 fs/lockd/svclock.c static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock);
call               48 fs/lockd/svclock.c static void nlmsvc_freegrantargs(struct nlm_rqst *call);
call              221 fs/lockd/svclock.c 	struct nlm_rqst		*call = NULL;
call              223 fs/lockd/svclock.c 	call = nlm_alloc_call(host);
call              224 fs/lockd/svclock.c 	if (call == NULL)
call              235 fs/lockd/svclock.c 	if (!nlmsvc_setgrantargs(call, lock))
call              239 fs/lockd/svclock.c 	call->a_args.lock.fl.fl_flags |= FL_SLEEP;
call              240 fs/lockd/svclock.c 	call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations;
call              241 fs/lockd/svclock.c 	nlmclnt_next_cookie(&call->a_args.cookie);
call              255 fs/lockd/svclock.c 	block->b_call = call;
call              256 fs/lockd/svclock.c 	call->a_flags   = RPC_TASK_ASYNC;
call              257 fs/lockd/svclock.c 	call->a_block = block;
call              264 fs/lockd/svclock.c 	nlmsvc_release_call(call);
call              426 fs/lockd/svclock.c static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock)
call              428 fs/lockd/svclock.c 	locks_copy_lock(&call->a_args.lock.fl, &lock->fl);
call              429 fs/lockd/svclock.c 	memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh));
call              430 fs/lockd/svclock.c 	call->a_args.lock.caller = utsname()->nodename;
call              431 fs/lockd/svclock.c 	call->a_args.lock.oh.len = lock->oh.len;
call              434 fs/lockd/svclock.c 	call->a_args.lock.oh.data = call->a_owner;
call              435 fs/lockd/svclock.c 	call->a_args.lock.svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
call              441 fs/lockd/svclock.c 		call->a_args.lock.oh.data = (u8 *) data;
call              444 fs/lockd/svclock.c 	memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len);
call              448 fs/lockd/svclock.c static void nlmsvc_freegrantargs(struct nlm_rqst *call)
call              450 fs/lockd/svclock.c 	if (call->a_args.lock.oh.data != call->a_owner)
call              451 fs/lockd/svclock.c 		kfree(call->a_args.lock.oh.data);
call              453 fs/lockd/svclock.c 	locks_release_private(&call->a_args.lock.fl);
call              889 fs/lockd/svclock.c 	struct nlm_rqst		*call = data;
call              890 fs/lockd/svclock.c 	struct nlm_block	*block = call->a_block;
call              929 fs/lockd/svclock.c 	struct nlm_rqst		*call = data;
call              930 fs/lockd/svclock.c 	nlmsvc_release_block(call->a_block);
call              306 fs/lockd/svcproc.c void nlmsvc_release_call(struct nlm_rqst *call)
call              308 fs/lockd/svcproc.c 	if (!refcount_dec_and_test(&call->a_count))
call              310 fs/lockd/svcproc.c 	nlmsvc_release_host(call->a_host);
call              311 fs/lockd/svcproc.c 	kfree(call);
call              334 fs/lockd/svcproc.c 	struct nlm_rqst	*call;
call              343 fs/lockd/svcproc.c 	call = nlm_alloc_call(host);
call              345 fs/lockd/svcproc.c 	if (call == NULL)
call              348 fs/lockd/svcproc.c 	stat = func(rqstp, &call->a_res);
call              350 fs/lockd/svcproc.c 		nlmsvc_release_call(call);
call              354 fs/lockd/svcproc.c 	call->a_flags = RPC_TASK_ASYNC;
call              355 fs/lockd/svcproc.c 	if (nlm_async_reply(call, proc, &nlmsvc_callback_ops) < 0)
call              767 fs/nfsd/nfs4callback.c #define PROC(proc, call, argtype, restype)				\
call              769 fs/nfsd/nfs4callback.c 	.p_proc    = NFSPROC4_CB_##call,				\
call              774 fs/nfsd/nfs4callback.c 	.p_statidx = NFSPROC4_CB_##call,				\
call              358 fs/reiserfs/bitmap.c 	PROC_INFO_INC(s, scan_bitmap.call);
call              215 fs/reiserfs/procfs.c 		   SFPF(call),
call              484 fs/reiserfs/reiserfs.h 		stat_cnt_t call;
call              915 include/linux/compat.h asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
call               18 include/linux/delayed_call.h static inline void set_delayed_call(struct delayed_call *call,
call               21 include/linux/delayed_call.h 	call->fn = fn;
call               22 include/linux/delayed_call.h 	call->arg = arg;
call               25 include/linux/delayed_call.h static inline void do_delayed_call(struct delayed_call *call)
call               27 include/linux/delayed_call.h 	if (call->fn)
call               28 include/linux/delayed_call.h 		call->fn(call->arg);
call               31 include/linux/delayed_call.h static inline void clear_delayed_call(struct delayed_call *call)
call               33 include/linux/delayed_call.h 	call->fn = NULL;
call             1630 include/linux/efi.h extern void efi_call_virt_check_flags(unsigned long flags, const char *call);
call               11 include/linux/netfilter/nfnetlink.h 	int (*call)(struct net *net, struct sock *nl, struct sk_buff *skb,
call             1182 include/linux/syscalls.h asmlinkage long sys_socketcall(int call, unsigned long __user *args);
call             1209 include/linux/syscalls.h asmlinkage long sys_ipc(unsigned int call, int first, unsigned long second,
call             1271 include/linux/syscalls.h int ksys_ipc(unsigned int call, int first, unsigned long second,
call             1273 include/linux/syscalls.h int compat_ksys_ipc(u32 call, int first, int second,
call              289 include/linux/trace_events.h static inline bool bpf_prog_array_valid(struct trace_event_call *call)
call              308 include/linux/trace_events.h 	return !!READ_ONCE(call->prog_array);
call              313 include/linux/trace_events.h trace_event_name(struct trace_event_call *call)
call              315 include/linux/trace_events.h 	if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
call              316 include/linux/trace_events.h 		return call->tp ? call->tp->name : NULL;
call              318 include/linux/trace_events.h 		return call->name;
call              477 include/linux/trace_events.h unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
call              489 include/linux/trace_events.h static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
call              541 include/linux/trace_events.h extern int trace_event_raw_init(struct trace_event_call *call);
call              542 include/linux/trace_events.h extern int trace_define_field(struct trace_event_call *call, const char *type,
call              545 include/linux/trace_events.h extern int trace_add_event_call(struct trace_event_call *call);
call              546 include/linux/trace_events.h extern int trace_remove_event_call(struct trace_event_call *call);
call              547 include/linux/trace_events.h extern int trace_event_get_offsets(struct trace_event_call *call);
call              633 include/linux/trace_events.h 			       struct trace_event_call *call, u64 count,
call              165 include/net/ax25.h 	ax25_address		call;
call               59 include/trace/bpf_probe.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call               61 include/trace/bpf_probe.h __bpf_trace_##call(void *__data, proto)					\
call               72 include/trace/bpf_probe.h #define __DEFINE_EVENT(template, call, proto, args, size)		\
call               73 include/trace/bpf_probe.h static inline void bpf_test_probe_##call(void)				\
call               75 include/trace/bpf_probe.h 	check_trace_callback_type_##call(__bpf_trace_##template);	\
call               79 include/trace/bpf_probe.h __bpf_trace_tp_map_##call = {						\
call               80 include/trace/bpf_probe.h 	.tp		= &__tracepoint_##call,				\
call               89 include/trace/bpf_probe.h #define DEFINE_EVENT_WRITABLE(template, call, proto, args, size)	\
call               90 include/trace/bpf_probe.h static inline void bpf_test_buffer_##call(void)				\
call               99 include/trace/bpf_probe.h __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
call              102 include/trace/bpf_probe.h #define DEFINE_EVENT(template, call, proto, args)			\
call              103 include/trace/bpf_probe.h 	__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0)
call              464 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, struct iov_iter *iter,
call              467 include/trace/events/afs.h 	    TP_ARGS(call, iter, want_more, ret),
call              471 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              479 include/trace/events/afs.h 		    __entry->call	= call->debug_id;
call              480 include/trace/events/afs.h 		    __entry->state	= call->state;
call              481 include/trace/events/afs.h 		    __entry->unmarshall	= call->unmarshall;
call              488 include/trace/events/afs.h 		      __entry->call,
call              497 include/trace/events/afs.h 	    TP_PROTO(struct rxrpc_call *rxcall, struct afs_call *call),
call              499 include/trace/events/afs.h 	    TP_ARGS(rxcall, call),
call              502 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              508 include/trace/events/afs.h 		    __entry->call	= call->debug_id;
call              509 include/trace/events/afs.h 		    __entry->state	= call->state;
call              510 include/trace/events/afs.h 		    __entry->unmarshall	= call->unmarshall;
call              514 include/trace/events/afs.h 		      __entry->call,
call              519 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call),
call              521 include/trace/events/afs.h 	    TP_ARGS(call),
call              524 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              530 include/trace/events/afs.h 		    __entry->call	= call->debug_id;
call              531 include/trace/events/afs.h 		    __entry->name	= call->type->name;
call              532 include/trace/events/afs.h 		    __entry->op		= call->operation_ID;
call              536 include/trace/events/afs.h 		      __entry->call,
call              542 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, enum afs_call_trace op,
call              545 include/trace/events/afs.h 	    TP_ARGS(call, op, usage, outstanding, where),
call              548 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              556 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              564 include/trace/events/afs.h 		      __entry->call,
call              572 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, const struct afs_fid *fid),
call              574 include/trace/events/afs.h 	    TP_ARGS(call, fid),
call              577 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              583 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              584 include/trace/events/afs.h 		    __entry->op = call->operation_ID;
call              595 include/trace/events/afs.h 		      __entry->call,
call              603 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
call              606 include/trace/events/afs.h 	    TP_ARGS(call, fid, i),
call              609 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              616 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              618 include/trace/events/afs.h 		    __entry->op = call->operation_ID;
call              629 include/trace/events/afs.h 		      __entry->call,
call              638 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
call              641 include/trace/events/afs.h 	    TP_ARGS(call, fid, name),
call              644 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              653 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              654 include/trace/events/afs.h 		    __entry->op = call->operation_ID;
call              667 include/trace/events/afs.h 		      __entry->call,
call              676 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, const struct afs_fid *fid,
call              679 include/trace/events/afs.h 	    TP_ARGS(call, fid, name, name2),
call              682 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              694 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              695 include/trace/events/afs.h 		    __entry->op = call->operation_ID;
call              710 include/trace/events/afs.h 		      __entry->call,
call              720 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call),
call              722 include/trace/events/afs.h 	    TP_ARGS(call),
call              725 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              730 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              731 include/trace/events/afs.h 		    __entry->op = call->operation_ID;
call              735 include/trace/events/afs.h 		      __entry->call,
call              740 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call),
call              742 include/trace/events/afs.h 	    TP_ARGS(call),
call              745 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              752 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              753 include/trace/events/afs.h 		    __entry->rx_call = call->rxcall;
call              754 include/trace/events/afs.h 		    __entry->ret = call->error;
call              755 include/trace/events/afs.h 		    __entry->abort_code = call->abort_code;
call              759 include/trace/events/afs.h 		      __entry->call,
call              766 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, struct msghdr *msg,
call              769 include/trace/events/afs.h 	    TP_ARGS(call, msg, first, last, offset),
call              772 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              782 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              792 include/trace/events/afs.h 		      __entry->call,
call              799 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, pgoff_t first, pgoff_t last,
call              802 include/trace/events/afs.h 	    TP_ARGS(call, first, last, cursor, ret),
call              805 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              813 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              821 include/trace/events/afs.h 		      __entry->call,
call              887 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call,
call              892 include/trace/events/afs.h 	    TP_ARGS(call, from, to, ret, remote_abort),
call              895 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call              903 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call              911 include/trace/events/afs.h 		      __entry->call,
call              991 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, int error, enum afs_eproto_cause cause),
call              993 include/trace/events/afs.h 	    TP_ARGS(call, error, cause),
call              996 include/trace/events/afs.h 		    __field(unsigned int,		call		)
call             1002 include/trace/events/afs.h 		    __entry->call = call ? call->debug_id : 0;
call             1008 include/trace/events/afs.h 		      __entry->call, __entry->error,
call             1013 include/trace/events/afs.h 	    TP_PROTO(unsigned int call, int error, enum afs_io_error where),
call             1015 include/trace/events/afs.h 	    TP_ARGS(call, error, where),
call             1018 include/trace/events/afs.h 		    __field(unsigned int,	call		)
call             1024 include/trace/events/afs.h 		    __entry->call = call;
call             1030 include/trace/events/afs.h 		      __entry->call, __entry->error,
call             1058 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
call             1060 include/trace/events/afs.h 	    TP_ARGS(call, srx),
call             1063 include/trace/events/afs.h 		    __field(unsigned int,			call	)
call             1069 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call             1070 include/trace/events/afs.h 		    __entry->op_id = call->operation_ID;
call             1075 include/trace/events/afs.h 		      __entry->call, __entry->op_id, &__entry->srx.transport)
call             1079 include/trace/events/afs.h 	    TP_PROTO(struct afs_call *call, const uuid_t *uuid),
call             1081 include/trace/events/afs.h 	    TP_ARGS(call, uuid),
call             1084 include/trace/events/afs.h 		    __field(unsigned int,			call	)
call             1090 include/trace/events/afs.h 		    __entry->call = call->debug_id;
call             1091 include/trace/events/afs.h 		    __entry->op_id = call->operation_ID;
call             1096 include/trace/events/afs.h 		      __entry->call, __entry->op_id, &__entry->uuid)
call               29 include/trace/events/bpf_test_run.h #define BPF_TEST_RUN_DEFINE_EVENT(template, call, proto, args, size)	\
call               30 include/trace/events/bpf_test_run.h 	DEFINE_EVENT_WRITABLE(template, call, PARAMS(proto),		\
call               34 include/trace/events/bpf_test_run.h #define BPF_TEST_RUN_DEFINE_EVENT(template, call, proto, args, size)	\
call               35 include/trace/events/bpf_test_run.h 	DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args))
call               85 include/trace/events/nbd.h #define NBD_DEFINE_EVENT(template, call, proto, args, size)		\
call               86 include/trace/events/nbd.h 	DEFINE_EVENT_WRITABLE(template, call, PARAMS(proto),		\
call               90 include/trace/events/nbd.h #define NBD_DEFINE_EVENT(template, call, proto, args, size)		\
call               91 include/trace/events/nbd.h 	DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args))
call              615 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              623 include/trace/events/rxrpc.h 		    __entry->call = call_debug_id;
call              631 include/trace/events/rxrpc.h 		      __entry->call,
call              744 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call),
call              746 include/trace/events/rxrpc.h 	    TP_ARGS(call),
call              749 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              756 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              757 include/trace/events/rxrpc.h 		    __entry->compl = call->completion;
call              758 include/trace/events/rxrpc.h 		    __entry->error = call->error;
call              759 include/trace/events/rxrpc.h 		    __entry->abort_code = call->abort_code;
call              763 include/trace/events/rxrpc.h 		      __entry->call,
call              770 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_transmit_trace why),
call              772 include/trace/events/rxrpc.h 	    TP_ARGS(call, why),
call              775 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              783 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              785 include/trace/events/rxrpc.h 		    __entry->tx_hard_ack = call->tx_hard_ack;
call              786 include/trace/events/rxrpc.h 		    __entry->tx_top = call->tx_top;
call              787 include/trace/events/rxrpc.h 		    __entry->tx_winsize = call->tx_winsize;
call              791 include/trace/events/rxrpc.h 		      __entry->call,
call              799 include/trace/events/rxrpc.h 	    TP_PROTO(unsigned int call, rxrpc_seq_t seq,
call              802 include/trace/events/rxrpc.h 	    TP_ARGS(call, seq, serial, flags, anno),
call              805 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              813 include/trace/events/rxrpc.h 		    __entry->call = call;
call              821 include/trace/events/rxrpc.h 		      __entry->call,
call              829 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call,
call              833 include/trace/events/rxrpc.h 	    TP_ARGS(call, serial, ack_serial, first, prev, reason, n_acks),
call              836 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              846 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              856 include/trace/events/rxrpc.h 		      __entry->call,
call              866 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
call              869 include/trace/events/rxrpc.h 	    TP_ARGS(call, serial, abort_code),
call              872 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              878 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              884 include/trace/events/rxrpc.h 		      __entry->call,
call              890 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
call              893 include/trace/events/rxrpc.h 	    TP_ARGS(call, serial, rwind, wake),
call              896 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              903 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              910 include/trace/events/rxrpc.h 		      __entry->call,
call              923 include/trace/events/rxrpc.h 		    __field(unsigned int,			call	)
call              929 include/trace/events/rxrpc.h 		    __entry->call = call_id;
call              935 include/trace/events/rxrpc.h 		      __entry->call,
call              949 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
call              952 include/trace/events/rxrpc.h 	    TP_ARGS(call, seq, serial, flags, retrans, lose),
call              955 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call              966 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call              967 include/trace/events/rxrpc.h 		    __entry->cid = call->cid;
call              968 include/trace/events/rxrpc.h 		    __entry->call_id = call->call_id;
call              977 include/trace/events/rxrpc.h 		      __entry->call,
call              988 include/trace/events/rxrpc.h 	    TP_PROTO(unsigned int call, rxrpc_serial_t serial,
call              992 include/trace/events/rxrpc.h 	    TP_ARGS(call, serial, ack_first, ack_serial, reason, n_acks),
call              995 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1004 include/trace/events/rxrpc.h 		    __entry->call = call;
call             1013 include/trace/events/rxrpc.h 		      __entry->call,
call             1022 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_receive_trace why,
call             1025 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, serial, seq),
call             1028 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1037 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1041 include/trace/events/rxrpc.h 		    __entry->hard_ack = call->rx_hard_ack;
call             1042 include/trace/events/rxrpc.h 		    __entry->top = call->rx_top;
call             1046 include/trace/events/rxrpc.h 		      __entry->call,
call             1055 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
call             1059 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, seq, offset, len, ret),
call             1062 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1071 include/trace/events/rxrpc.h 		    __entry->call = call ? call->debug_id : 0;
call             1080 include/trace/events/rxrpc.h 		      __entry->call,
call             1089 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_tx_trace why,
call             1092 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, send_serial),
call             1095 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1101 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1107 include/trace/events/rxrpc.h 		      __entry->call,
call             1113 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
call             1117 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, send_serial, resp_serial, rtt, rto),
call             1120 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1129 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1138 include/trace/events/rxrpc.h 		      __entry->call,
call             1147 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
call             1150 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, now),
call             1153 include/trace/events/rxrpc.h 		    __field(unsigned int,			call		)
call             1167 include/trace/events/rxrpc.h 		    __entry->call		= call->debug_id;
call             1170 include/trace/events/rxrpc.h 		    __entry->ack_at		= call->ack_at;
call             1171 include/trace/events/rxrpc.h 		    __entry->ack_lost_at	= call->ack_lost_at;
call             1172 include/trace/events/rxrpc.h 		    __entry->resend_at		= call->resend_at;
call             1173 include/trace/events/rxrpc.h 		    __entry->expect_rx_by	= call->expect_rx_by;
call             1174 include/trace/events/rxrpc.h 		    __entry->expect_req_by	= call->expect_req_by;
call             1175 include/trace/events/rxrpc.h 		    __entry->expect_term_by	= call->expect_term_by;
call             1176 include/trace/events/rxrpc.h 		    __entry->timer		= call->timer.expires;
call             1180 include/trace/events/rxrpc.h 		      __entry->call,
call             1214 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_propose_ack_trace why,
call             1218 include/trace/events/rxrpc.h 	    TP_ARGS(call, why, ack_reason, serial, immediate, background,
call             1222 include/trace/events/rxrpc.h 		    __field(unsigned int,			call		)
call             1232 include/trace/events/rxrpc.h 		    __entry->call	= call->debug_id;
call             1242 include/trace/events/rxrpc.h 		      __entry->call,
call             1252 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq, u8 annotation,
call             1255 include/trace/events/rxrpc.h 	    TP_ARGS(call, seq, annotation, expiry),
call             1258 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1265 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1272 include/trace/events/rxrpc.h 		      __entry->call,
call             1279 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, struct rxrpc_ack_summary *summary,
call             1282 include/trace/events/rxrpc.h 	    TP_ARGS(call, summary, ack_serial, change),
call             1285 include/trace/events/rxrpc.h 		    __field(unsigned int,			call		)
call             1295 include/trace/events/rxrpc.h 		    __entry->call	= call->debug_id;
call             1297 include/trace/events/rxrpc.h 		    __entry->hard_ack	= call->tx_hard_ack;
call             1298 include/trace/events/rxrpc.h 		    __entry->top	= call->tx_top;
call             1299 include/trace/events/rxrpc.h 		    __entry->lowest_nak	= call->acks_lowest_nak;
call             1305 include/trace/events/rxrpc.h 		      __entry->call,
call             1324 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call),
call             1326 include/trace/events/rxrpc.h 	    TP_ARGS(call),
call             1329 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1334 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1335 include/trace/events/rxrpc.h 		    __entry->abort_code = call->abort_code;
call             1339 include/trace/events/rxrpc.h 		      __entry->call,
call             1344 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call),
call             1346 include/trace/events/rxrpc.h 	    TP_ARGS(call),
call             1349 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1354 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1355 include/trace/events/rxrpc.h 		    __entry->abort_code = call->abort_code;
call             1359 include/trace/events/rxrpc.h 		      __entry->call,
call             1364 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
call             1367 include/trace/events/rxrpc.h 	    TP_ARGS(call, serial, why),
call             1370 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1376 include/trace/events/rxrpc.h 		    __entry->call = call ? call->debug_id : 0;
call             1382 include/trace/events/rxrpc.h 		      __entry->call,
call             1388 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call),
call             1390 include/trace/events/rxrpc.h 	    TP_ARGS(call),
call             1393 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1400 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1401 include/trace/events/rxrpc.h 		    __entry->user_call_ID = call->user_call_ID;
call             1402 include/trace/events/rxrpc.h 		    __entry->cid = call->cid;
call             1403 include/trace/events/rxrpc.h 		    __entry->call_id = call->call_id;
call             1407 include/trace/events/rxrpc.h 		      __entry->call,
call             1414 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call, int ix),
call             1416 include/trace/events/rxrpc.h 	    TP_ARGS(call, ix),
call             1419 include/trace/events/rxrpc.h 		    __field(unsigned int,		call		)
call             1425 include/trace/events/rxrpc.h 		    __entry->call = call->debug_id;
call             1427 include/trace/events/rxrpc.h 		    memcpy(__entry->anno, call->rxtx_annotations, 64);
call             1431 include/trace/events/rxrpc.h 		      __entry->call,
call             1493 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_call *call),
call             1495 include/trace/events/rxrpc.h 	    TP_ARGS(call),
call             1508 include/trace/events/rxrpc.h 		    __entry->debug_id = call->debug_id;
call             1509 include/trace/events/rxrpc.h 		    __entry->cid = call->cid;
call             1510 include/trace/events/rxrpc.h 		    __entry->call_id = call->call_id;
call             1511 include/trace/events/rxrpc.h 		    __entry->call_serial = call->rx_serial;
call             1512 include/trace/events/rxrpc.h 		    __entry->conn_serial = call->conn->hi_serial;
call             1513 include/trace/events/rxrpc.h 		    __entry->tx_seq = call->tx_hard_ack;
call             1514 include/trace/events/rxrpc.h 		    __entry->rx_seq = call->ackr_seen;
call               46 include/trace/events/sunrpc.h DEFINE_RPC_STATUS_EVENT(call);
call               31 include/trace/perf.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call               33 include/trace/perf.h perf_trace_##call(void *__data, proto)					\
call               36 include/trace/perf.h 	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
call               37 include/trace/perf.h 	struct trace_event_raw_##call *entry;				\
call               46 include/trace/perf.h 	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
call               79 include/trace/perf.h #define DEFINE_EVENT(template, call, proto, args)			\
call               80 include/trace/perf.h static inline void perf_test_probe_##call(void)				\
call               82 include/trace/perf.h 	check_trace_callback_type_##call(perf_trace_##template);	\
call              203 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call              204 include/trace/trace_events.h 	struct trace_event_data_offsets_##call {			\
call              344 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call              346 include/trace/trace_events.h trace_raw_output_##call(struct trace_iterator *iter, int flags,		\
call              351 include/trace/trace_events.h 	struct trace_event_raw_##call *field;				\
call              364 include/trace/trace_events.h static struct trace_event_functions trace_event_type_funcs_##call = {	\
call              365 include/trace/trace_events.h 	.trace			= trace_raw_output_##call,		\
call              369 include/trace/trace_events.h #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
call              371 include/trace/trace_events.h trace_raw_output_##call(struct trace_iterator *iter, int flags,		\
call              380 include/trace/trace_events.h 	if (entry->type != event_##call.event.type) {			\
call              388 include/trace/trace_events.h 	return trace_output_call(iter, #call, print);			\
call              390 include/trace/trace_events.h static struct trace_event_functions trace_event_type_funcs_##call = {	\
call              391 include/trace/trace_events.h 	.trace			= trace_raw_output_##call,		\
call              448 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)	\
call              450 include/trace/trace_events.h trace_event_define_fields_##call(struct trace_event_call *event_call)	\
call              452 include/trace/trace_events.h 	struct trace_event_raw_##call field;				\
call              527 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call              528 include/trace/trace_events.h static inline notrace int trace_event_get_offsets_##call(		\
call              529 include/trace/trace_events.h 	struct trace_event_data_offsets_##call *__data_offsets, proto)	\
call              533 include/trace/trace_events.h 	struct trace_event_raw_##call __maybe_unused *entry;		\
call              641 include/trace/trace_events.h #define _TRACE_PERF_PROTO(call, proto)					\
call              643 include/trace/trace_events.h 	perf_trace_##call(void *__data, proto);
call              645 include/trace/trace_events.h #define _TRACE_PERF_INIT(call)						\
call              646 include/trace/trace_events.h 	.perf_probe		= perf_trace_##call,
call              649 include/trace/trace_events.h #define _TRACE_PERF_PROTO(call, proto)
call              650 include/trace/trace_events.h #define _TRACE_PERF_INIT(call)
call              696 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call              699 include/trace/trace_events.h trace_event_raw_event_##call(void *__data, proto)			\
call              702 include/trace/trace_events.h 	struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
call              704 include/trace/trace_events.h 	struct trace_event_raw_##call *entry;				\
call              710 include/trace/trace_events.h 	__data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
call              731 include/trace/trace_events.h #define DEFINE_EVENT(template, call, proto, args)			\
call              732 include/trace/trace_events.h static inline void ftrace_test_probe_##call(void)			\
call              734 include/trace/trace_events.h 	check_trace_callback_type_##call(trace_event_raw_event_##template); \
call              759 include/trace/trace_events.h #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)	\
call              760 include/trace/trace_events.h _TRACE_PERF_PROTO(call, PARAMS(proto));					\
call              761 include/trace/trace_events.h static char print_fmt_##call[] = print;					\
call              762 include/trace/trace_events.h static struct trace_event_class __used __refdata event_class_##call = { \
call              764 include/trace/trace_events.h 	.define_fields		= trace_event_define_fields_##call,	\
call              765 include/trace/trace_events.h 	.fields			= LIST_HEAD_INIT(event_class_##call.fields),\
call              767 include/trace/trace_events.h 	.probe			= trace_event_raw_event_##call,		\
call              769 include/trace/trace_events.h 	_TRACE_PERF_INIT(call)						\
call              773 include/trace/trace_events.h #define DEFINE_EVENT(template, call, proto, args)			\
call              775 include/trace/trace_events.h static struct trace_event_call __used event_##call = {			\
call              778 include/trace/trace_events.h 		.tp			= &__tracepoint_##call,		\
call              785 include/trace/trace_events.h __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
call              788 include/trace/trace_events.h #define DEFINE_EVENT_PRINT(template, call, proto, args, print)		\
call              790 include/trace/trace_events.h static char print_fmt_##call[] = print;					\
call              792 include/trace/trace_events.h static struct trace_event_call __used event_##call = {			\
call              795 include/trace/trace_events.h 		.tp			= &__tracepoint_##call,		\
call              797 include/trace/trace_events.h 	.event.funcs		= &trace_event_type_funcs_##call,	\
call              798 include/trace/trace_events.h 	.print_fmt		= print_fmt_##call,			\
call              802 include/trace/trace_events.h __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
call               44 include/xen/arm/hypercall.h long privcmd_call(unsigned call, unsigned long a1,
call               20 ipc/syscall.c  int ksys_ipc(unsigned int call, int first, unsigned long second,
call               25 ipc/syscall.c  	version = call >> 16; /* hack for backward compatibility */
call               26 ipc/syscall.c  	call &= 0xffff;
call               28 ipc/syscall.c  	switch (call) {
call              110 ipc/syscall.c  SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
call              113 ipc/syscall.c  	return ksys_ipc(call, first, second, third, ptr, fifth);
call              130 ipc/syscall.c  int compat_ksys_ipc(u32 call, int first, int second,
call              136 ipc/syscall.c  	version = call >> 16; /* hack for backward compatibility */
call              137 ipc/syscall.c  	call &= 0xffff;
call              139 ipc/syscall.c  	switch (call) {
call              205 ipc/syscall.c  COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second,
call              208 ipc/syscall.c  	return compat_ksys_ipc(call, first, second, third, ptr, fifth);
call             8706 kernel/events/core.c 			       struct trace_event_call *call, u64 count,
call             8710 kernel/events/core.c 	if (bpf_prog_array_valid(call)) {
call             8712 kernel/events/core.c 		if (!trace_call_bpf(call, raw_data) || hlist_empty(head)) {
call             8717 kernel/events/core.c 	perf_tp_event(call->event.type, count, raw_data, size, regs, head,
call             2897 kernel/printk/printk.c 	initcall_t call;
call             2910 kernel/printk/printk.c 		call = initcall_from_entry(ce);
call             2911 kernel/printk/printk.c 		trace_initcall_start(call);
call             2912 kernel/printk/printk.c 		ret = call();
call             2913 kernel/printk/printk.c 		trace_initcall_finish(call, ret);
call              304 kernel/rcu/rcutorture.c 	call_rcu_func_t call;
call              452 kernel/rcu/rcutorture.c 	.call		= call_rcu,
call              498 kernel/rcu/rcutorture.c 	.call		= call_rcu_busted,
call              589 kernel/rcu/rcutorture.c 	.call		= srcu_torture_call,
call              621 kernel/rcu/rcutorture.c 	.call		= srcu_torture_call,
call              640 kernel/rcu/rcutorture.c 	.call		= srcu_torture_call,
call              676 kernel/rcu/rcutorture.c 	.call		= call_rcu_tasks,
call             1339 kernel/rcu/rcutorture.c 	if (cur_ops->call) {
call             1343 kernel/rcu/rcutorture.c 			cur_ops->call(rhp, rcu_torture_timer_cb);
call             1652 kernel/rcu/rcutorture.c 	cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
call             1780 kernel/rcu/rcutorture.c 	if  (cur_ops->call && cur_ops->sync && cur_ops->cb_barrier) {
call             1790 kernel/rcu/rcutorture.c 		cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
call             1850 kernel/rcu/rcutorture.c 	if (!cur_ops->call)
call             1892 kernel/rcu/rcutorture.c 		cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
call             2032 kernel/rcu/rcutorture.c 		cur_ops->call(&rcu, rcu_torture_barrier_cbf);
call             2087 kernel/rcu/rcutorture.c 	if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
call              379 kernel/smp.c   		goto call;
call              386 kernel/smp.c   			goto call;
call              391 kernel/smp.c   call:
call               79 kernel/trace/bpf_trace.c unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
call              114 kernel/trace/bpf_trace.c 	ret = BPF_PROG_RUN_ARRAY_CHECK(call->prog_array, ctx, BPF_PROG_RUN);
call              325 kernel/trace/trace.c int call_filter_check_discard(struct trace_event_call *call, void *rec,
call              329 kernel/trace/trace.c 	if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
call              330 kernel/trace/trace.c 	    !filter_match_preds(call->filter, rec)) {
call             2765 kernel/trace/trace.c 	struct trace_event_call *call = &event_function;
call             2778 kernel/trace/trace.c 	if (!call_filter_check_discard(call, entry, buffer, event)) {
call             2808 kernel/trace/trace.c 	struct trace_event_call *call = &event_kernel_stack;
call             2867 kernel/trace/trace.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call             2941 kernel/trace/trace.c 	struct trace_event_call *call = &event_user_stack;
call             2975 kernel/trace/trace.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call             3107 kernel/trace/trace.c 	struct trace_event_call *call = &event_bprint;
call             3148 kernel/trace/trace.c 	if (!call_filter_check_discard(call, entry, buffer, event)) {
call             3169 kernel/trace/trace.c 	struct trace_event_call *call = &event_print;
call             3204 kernel/trace/trace.c 	if (!call_filter_check_discard(call, entry, buffer, event)) {
call             1172 kernel/trace/trace.h bool ftrace_event_is_function(struct trace_event_call *call);
call             1370 kernel/trace/trace.h extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
call             1573 kernel/trace/trace.h 			       struct trace_event_call *call,
call             1579 kernel/trace/trace.h trace_find_event_field(struct trace_event_call *call, char *name);
call             1921 kernel/trace/trace.h #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)	\
call             1923 kernel/trace/trace.h 	__aligned(4) event_##call;
call             1925 kernel/trace/trace.h #define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)	\
call             1926 kernel/trace/trace.h 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
call             1929 kernel/trace/trace.h #define FTRACE_ENTRY_PACKED(call, struct_name, id, tstruct, print, filter) \
call             1930 kernel/trace/trace.h 	FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
call             1936 kernel/trace/trace.h int perf_ftrace_event_register(struct trace_event_call *call,
call               33 kernel/trace/trace_branch.c 	struct trace_event_call *call = &event_branch;
call               85 kernel/trace/trace_branch.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              490 kernel/trace/trace_event_perf.c int perf_ftrace_event_register(struct trace_event_call *call,
call               88 kernel/trace/trace_events.c trace_find_event_field(struct trace_event_call *call, char *name)
call               93 kernel/trace/trace_events.c 	head = trace_get_fields(call);
call              132 kernel/trace/trace_events.c int trace_define_field(struct trace_event_call *call, const char *type,
call              138 kernel/trace/trace_events.c 	if (WARN_ON(!call->class))
call              141 kernel/trace/trace_events.c 	head = trace_get_fields(call);
call              188 kernel/trace/trace_events.c static void trace_destroy_fields(struct trace_event_call *call)
call              193 kernel/trace/trace_events.c 	head = trace_get_fields(call);
call              204 kernel/trace/trace_events.c int trace_event_get_offsets(struct trace_event_call *call)
call              209 kernel/trace/trace_events.c 	head = trace_get_fields(call);
call              218 kernel/trace/trace_events.c int trace_event_raw_init(struct trace_event_call *call)
call              222 kernel/trace/trace_events.c 	id = register_trace_event(&call->event);
call              280 kernel/trace/trace_events.c int trace_event_reg(struct trace_event_call *call,
call              285 kernel/trace/trace_events.c 	WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
call              288 kernel/trace/trace_events.c 		return tracepoint_probe_register(call->tp,
call              289 kernel/trace/trace_events.c 						 call->class->probe,
call              292 kernel/trace/trace_events.c 		tracepoint_probe_unregister(call->tp,
call              293 kernel/trace/trace_events.c 					    call->class->probe,
call              299 kernel/trace/trace_events.c 		return tracepoint_probe_register(call->tp,
call              300 kernel/trace/trace_events.c 						 call->class->perf_probe,
call              301 kernel/trace/trace_events.c 						 call);
call              303 kernel/trace/trace_events.c 		tracepoint_probe_unregister(call->tp,
call              304 kernel/trace/trace_events.c 					    call->class->perf_probe,
call              305 kernel/trace/trace_events.c 					    call);
call              365 kernel/trace/trace_events.c 	struct trace_event_call *call = file->event_call;
call              406 kernel/trace/trace_events.c 			call->class->reg(call, TRACE_REG_UNREGISTER, file);
call              450 kernel/trace/trace_events.c 			ret = call->class->reg(call, TRACE_REG_REGISTER, file);
call              457 kernel/trace/trace_events.c 					"%s\n", trace_event_name(call));
call              736 kernel/trace/trace_events.c 	struct trace_event_call *call;
call              743 kernel/trace/trace_events.c 		call = file->event_call;
call              744 kernel/trace/trace_events.c 		name = trace_event_name(call);
call              746 kernel/trace/trace_events.c 		if (!name || !call->class || !call->class->reg)
call              749 kernel/trace/trace_events.c 		if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
call              754 kernel/trace/trace_events.c 		    strcmp(match, call->class->system) != 0)
call              757 kernel/trace/trace_events.c 		if (sub && strcmp(sub, call->class->system) != 0)
call              900 kernel/trace/trace_events.c 	struct trace_event_call *call;
call              906 kernel/trace/trace_events.c 		call = file->event_call;
call              911 kernel/trace/trace_events.c 		if (call->class && call->class->reg &&
call              912 kernel/trace/trace_events.c 		    !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
call              972 kernel/trace/trace_events.c 	struct trace_event_call *call = file->event_call;
call              974 kernel/trace/trace_events.c 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
call              975 kernel/trace/trace_events.c 		seq_printf(m, "%s:", call->class->system);
call              976 kernel/trace/trace_events.c 	seq_printf(m, "%s\n", trace_event_name(call));
call             1098 kernel/trace/trace_events.c 	struct trace_event_call *call;
call             1107 kernel/trace/trace_events.c 		call = file->event_call;
call             1108 kernel/trace/trace_events.c 		if (!trace_event_name(call) || !call->class || !call->class->reg)
call             1111 kernel/trace/trace_events.c 		if (system && strcmp(call->class->system, system->name) != 0)
call             1185 kernel/trace/trace_events.c 	struct trace_event_call *call = event_file_data(m->private);
call             1187 kernel/trace/trace_events.c 	struct list_head *head = trace_get_fields(call);
call             1217 kernel/trace/trace_events.c 	struct trace_event_call *call = event_file_data(m->private);
call             1223 kernel/trace/trace_events.c 		seq_printf(m, "name: %s\n", trace_event_name(call));
call             1224 kernel/trace/trace_events.c 		seq_printf(m, "ID: %d\n", call->event.type);
call             1234 kernel/trace/trace_events.c 			   call->print_fmt);
call             1951 kernel/trace/trace_events.c 	struct trace_event_call *call = file->event_call;
call             1962 kernel/trace/trace_events.c 	if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
call             1963 kernel/trace/trace_events.c 		d_events = event_subsystem_dir(tr, call->class->system, file, parent);
call             1969 kernel/trace/trace_events.c 	name = trace_event_name(call);
call             1976 kernel/trace/trace_events.c 	if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
call             1981 kernel/trace/trace_events.c 	if (call->event.type && call->class->reg)
call             1983 kernel/trace/trace_events.c 				  (void *)(long)call->event.type,
call             1991 kernel/trace/trace_events.c 	head = trace_get_fields(call);
call             1993 kernel/trace/trace_events.c 		ret = call->class->define_fields(call);
call             2005 kernel/trace/trace_events.c 	if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) {
call             2017 kernel/trace/trace_events.c 	trace_create_file("format", 0444, file->dir, call,
call             2023 kernel/trace/trace_events.c static void remove_event_from_tracers(struct trace_event_call *call)
call             2029 kernel/trace/trace_events.c 		if (file->event_call != call)
call             2043 kernel/trace/trace_events.c static void event_remove(struct trace_event_call *call)
call             2049 kernel/trace/trace_events.c 		if (file->event_call != call)
call             2065 kernel/trace/trace_events.c 	if (call->event.funcs)
call             2066 kernel/trace/trace_events.c 		__unregister_trace_event(&call->event);
call             2067 kernel/trace/trace_events.c 	remove_event_from_tracers(call);
call             2068 kernel/trace/trace_events.c 	list_del(&call->list);
call             2071 kernel/trace/trace_events.c static int event_init(struct trace_event_call *call)
call             2076 kernel/trace/trace_events.c 	name = trace_event_name(call);
call             2080 kernel/trace/trace_events.c 	if (call->class->raw_init) {
call             2081 kernel/trace/trace_events.c 		ret = call->class->raw_init(call);
call             2090 kernel/trace/trace_events.c __register_event(struct trace_event_call *call, struct module *mod)
call             2094 kernel/trace/trace_events.c 	ret = event_init(call);
call             2098 kernel/trace/trace_events.c 	list_add(&call->list, &ftrace_events);
call             2099 kernel/trace/trace_events.c 	call->mod = mod;
call             2126 kernel/trace/trace_events.c static void update_event_printk(struct trace_event_call *call,
call             2133 kernel/trace/trace_events.c 	for (ptr = call->print_fmt; *ptr; ptr++) {
call             2204 kernel/trace/trace_events.c 	struct trace_event_call *call, *p;
call             2211 kernel/trace/trace_events.c 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
call             2213 kernel/trace/trace_events.c 		if (!last_system || call->class->system != last_system) {
call             2216 kernel/trace/trace_events.c 			last_system = call->class->system;
call             2230 kernel/trace/trace_events.c 			if (call->class->system == map[i]->system) {
call             2236 kernel/trace/trace_events.c 				update_event_printk(call, map[i]);
call             2244 kernel/trace/trace_events.c trace_create_new_event(struct trace_event_call *call,
call             2253 kernel/trace/trace_events.c 	file->event_call = call;
call             2265 kernel/trace/trace_events.c __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
call             2269 kernel/trace/trace_events.c 	file = trace_create_new_event(call, tr);
call             2282 kernel/trace/trace_events.c __trace_early_add_new_event(struct trace_event_call *call,
call             2287 kernel/trace/trace_events.c 	file = trace_create_new_event(call, tr);
call             2295 kernel/trace/trace_events.c static void __add_event_to_tracers(struct trace_event_call *call);
call             2298 kernel/trace/trace_events.c int trace_add_event_call(struct trace_event_call *call)
call             2305 kernel/trace/trace_events.c 	ret = __register_event(call, NULL);
call             2307 kernel/trace/trace_events.c 		__add_event_to_tracers(call);
call             2317 kernel/trace/trace_events.c static void __trace_remove_event_call(struct trace_event_call *call)
call             2319 kernel/trace/trace_events.c 	event_remove(call);
call             2320 kernel/trace/trace_events.c 	trace_destroy_fields(call);
call             2321 kernel/trace/trace_events.c 	free_event_filter(call->filter);
call             2322 kernel/trace/trace_events.c 	call->filter = NULL;
call             2325 kernel/trace/trace_events.c static int probe_remove_event_call(struct trace_event_call *call)
call             2331 kernel/trace/trace_events.c 	if (call->perf_refcount)
call             2335 kernel/trace/trace_events.c 		if (file->event_call != call)
call             2353 kernel/trace/trace_events.c 	__trace_remove_event_call(call);
call             2359 kernel/trace/trace_events.c int trace_remove_event_call(struct trace_event_call *call)
call             2367 kernel/trace/trace_events.c 	ret = probe_remove_event_call(call);
call             2383 kernel/trace/trace_events.c 	struct trace_event_call **call, **start, **end;
call             2398 kernel/trace/trace_events.c 	for_each_event(call, start, end) {
call             2399 kernel/trace/trace_events.c 		__register_event(*call, mod);
call             2400 kernel/trace/trace_events.c 		__add_event_to_tracers(*call);
call             2406 kernel/trace/trace_events.c 	struct trace_event_call *call, *p;
call             2409 kernel/trace/trace_events.c 	list_for_each_entry_safe(call, p, &ftrace_events, list) {
call             2410 kernel/trace/trace_events.c 		if (call->mod == mod)
call             2411 kernel/trace/trace_events.c 			__trace_remove_event_call(call);
call             2457 kernel/trace/trace_events.c 	struct trace_event_call *call;
call             2460 kernel/trace/trace_events.c 	list_for_each_entry(call, &ftrace_events, list) {
call             2461 kernel/trace/trace_events.c 		ret = __trace_add_new_event(call, tr);
call             2464 kernel/trace/trace_events.c 				trace_event_name(call));
call             2473 kernel/trace/trace_events.c 	struct trace_event_call *call;
call             2478 kernel/trace/trace_events.c 		call = file->event_call;
call             2479 kernel/trace/trace_events.c 		name = trace_event_name(call);
call             2481 kernel/trace/trace_events.c 		if (!name || !call->class)
call             2485 kernel/trace/trace_events.c 		    strcmp(system, call->class->system) == 0)
call             2859 kernel/trace/trace_events.c 	struct trace_event_call *call;
call             2862 kernel/trace/trace_events.c 	list_for_each_entry(call, &ftrace_events, list) {
call             2864 kernel/trace/trace_events.c 		if (WARN_ON_ONCE(call->mod))
call             2867 kernel/trace/trace_events.c 		ret = __trace_early_add_new_event(call, tr);
call             2870 kernel/trace/trace_events.c 				trace_event_name(call));
call             2884 kernel/trace/trace_events.c static void __add_event_to_tracers(struct trace_event_call *call)
call             2889 kernel/trace/trace_events.c 		__trace_add_new_event(call, tr);
call             3081 kernel/trace/trace_events.c 	struct trace_event_call **iter, *call;
call             3089 kernel/trace/trace_events.c 		call = *iter;
call             3090 kernel/trace/trace_events.c 		ret = event_init(call);
call             3092 kernel/trace/trace_events.c 			list_add(&call->list, &ftrace_events);
call             3245 kernel/trace/trace_events.c 	struct trace_event_call *call;
call             3258 kernel/trace/trace_events.c 		call = file->event_call;
call             3261 kernel/trace/trace_events.c 		if (!call->class || !call->class->probe)
call             3271 kernel/trace/trace_events.c 		if (call->class->system &&
call             3272 kernel/trace/trace_events.c 		    strcmp(call->class->system, "syscalls") == 0)
call             3276 kernel/trace/trace_events.c 		pr_info("Testing event %s: ", trace_event_name(call));
call             1156 kernel/trace/trace_events_filter.c 	struct trace_event_call *call = data;
call             1188 kernel/trace/trace_events_filter.c 	field = trace_find_event_field(call, field_name);
call             1225 kernel/trace/trace_events_filter.c 	if (ftrace_event_is_function(call)) {
call             1505 kernel/trace/trace_events_filter.c static int process_preds(struct trace_event_call *call,
call             1535 kernel/trace/trace_events_filter.c 			       parse_pred, call, pe);
call             1733 kernel/trace/trace_events_filter.c 			 struct trace_event_call *call,
call             1748 kernel/trace/trace_events_filter.c 	err = process_preds(call, filter_string, *filterp, pe);
call             1757 kernel/trace/trace_events_filter.c 			struct trace_event_call *call,
call             1761 kernel/trace/trace_events_filter.c 	return create_filter(tr, call, filter_str, set_str, filterp);
call             1799 kernel/trace/trace_events_filter.c 	struct trace_event_call *call = file->event_call;
call             1819 kernel/trace/trace_events_filter.c 	err = create_filter(file->tr, call, filter_string, true, &filter);
call             2061 kernel/trace/trace_events_filter.c 	struct trace_event_call *call;
call             2065 kernel/trace/trace_events_filter.c 	call = event->tp_event;
call             2068 kernel/trace/trace_events_filter.c 	if (!call)
call             2075 kernel/trace/trace_events_filter.c 	err = create_filter(NULL, call, filter_str, false, &filter);
call             2079 kernel/trace/trace_events_filter.c 	if (ftrace_event_is_function(call))
call             2085 kernel/trace/trace_events_filter.c 	if (err || ftrace_event_is_function(call))
call              409 kernel/trace/trace_events_hist.c 	struct trace_event_call			call;
call              607 kernel/trace/trace_events_hist.c 	struct trace_event_call *call;
call              615 kernel/trace/trace_events_hist.c 		call = file->event_call;
call              617 kernel/trace/trace_events_hist.c 		system = call->class->system;
call              619 kernel/trace/trace_events_hist.c 			name = trace_event_name(call);
call              646 kernel/trace/trace_events_hist.c static int synth_event_define_fields(struct trace_event_call *call)
call              650 kernel/trace/trace_events_hist.c 	struct synth_event *event = call->data;
call              661 kernel/trace/trace_events_hist.c 		ret = trace_define_field(call, type, name, offset, size,
call              850 kernel/trace/trace_events_hist.c 	se = container_of(event, struct synth_event, call.event);
call              972 kernel/trace/trace_events_hist.c static void free_synth_event_print_fmt(struct trace_event_call *call)
call              974 kernel/trace/trace_events_hist.c 	if (call) {
call              975 kernel/trace/trace_events_hist.c 		kfree(call->print_fmt);
call              976 kernel/trace/trace_events_hist.c 		call->print_fmt = NULL;
call             1010 kernel/trace/trace_events_hist.c static int set_synth_event_print_fmt(struct trace_event_call *call)
call             1012 kernel/trace/trace_events_hist.c 	struct synth_event *event = call->data;
call             1025 kernel/trace/trace_events_hist.c 	call->print_fmt = print_fmt;
call             1188 kernel/trace/trace_events_hist.c 	struct trace_event_call *call = &event->call;
call             1191 kernel/trace/trace_events_hist.c 	event->call.class = &event->class;
call             1205 kernel/trace/trace_events_hist.c 	INIT_LIST_HEAD(&call->class->fields);
call             1206 kernel/trace/trace_events_hist.c 	call->event.funcs = &synth_event_funcs;
call             1207 kernel/trace/trace_events_hist.c 	call->class->define_fields = synth_event_define_fields;
call             1209 kernel/trace/trace_events_hist.c 	ret = register_trace_event(&call->event);
call             1214 kernel/trace/trace_events_hist.c 	call->flags = TRACE_EVENT_FL_TRACEPOINT;
call             1215 kernel/trace/trace_events_hist.c 	call->class->reg = trace_event_reg;
call             1216 kernel/trace/trace_events_hist.c 	call->class->probe = trace_event_raw_event_synth;
call             1217 kernel/trace/trace_events_hist.c 	call->data = event;
call             1218 kernel/trace/trace_events_hist.c 	call->tp = event->tp;
call             1220 kernel/trace/trace_events_hist.c 	ret = trace_add_event_call(call);
call             1223 kernel/trace/trace_events_hist.c 			trace_event_name(call));
call             1227 kernel/trace/trace_events_hist.c 	ret = set_synth_event_print_fmt(call);
call             1229 kernel/trace/trace_events_hist.c 		trace_remove_event_call(call);
call             1235 kernel/trace/trace_events_hist.c 	unregister_trace_event(&call->event);
call             1241 kernel/trace/trace_events_hist.c 	struct trace_event_call *call = &event->call;
call             1244 kernel/trace/trace_events_hist.c 	ret = trace_remove_event_call(call);
call             1263 kernel/trace/trace_events_hist.c 	free_synth_event_print_fmt(&event->call);
call             2780 kernel/trace/trace_events_hist.c 	struct trace_event_call *call;
call             2783 kernel/trace/trace_events_hist.c 		call = hist_data->event_file->event_call;
call             2785 kernel/trace/trace_events_hist.c 		if (strcmp(system, call->class->system) != 0)
call             2788 kernel/trace/trace_events_hist.c 		if (strcmp(event_name, trace_event_name(call)) != 0)
call             3442 kernel/trace/trace_events_hist.c 		struct trace_event_call *call;
call             3447 kernel/trace/trace_events_hist.c 		call = file->event_call;
call             3449 kernel/trace/trace_events_hist.c 		if (strcmp(subsys_name, call->class->system) != 0)
call             3452 kernel/trace/trace_events_hist.c 		if (strcmp(event_name, trace_event_name(call)) != 0)
call             3626 kernel/trace/trace_events_hist.c 		struct trace_event_call *call;
call             3631 kernel/trace/trace_events_hist.c 		call = file->event_call;
call             3633 kernel/trace/trace_events_hist.c 		if (strcmp(subsys_name, call->class->system) != 0)
call             3636 kernel/trace/trace_events_hist.c 		if (strcmp(event_name, trace_event_name(call)) != 0)
call               18 kernel/trace/trace_export.c static int ftrace_event_register(struct trace_event_call *call,
call              171 kernel/trace/trace_export.c #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
call              174 kernel/trace/trace_export.c struct trace_event_class __refdata event_class_ftrace_##call = {	\
call              176 kernel/trace/trace_export.c 	.define_fields		= ftrace_define_fields_##call,		\
call              177 kernel/trace/trace_export.c 	.fields			= LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
call              181 kernel/trace/trace_export.c struct trace_event_call __used event_##call = {				\
call              182 kernel/trace/trace_export.c 	.class			= &event_class_ftrace_##call,		\
call              184 kernel/trace/trace_export.c 		.name			= #call,			\
call              191 kernel/trace/trace_export.c __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
call              194 kernel/trace/trace_export.c #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter)	\
call              195 kernel/trace/trace_export.c 	FTRACE_ENTRY_REG(call, struct_name, etype,			\
call              198 kernel/trace/trace_export.c bool ftrace_event_is_function(struct trace_event_call *call)
call              200 kernel/trace/trace_export.c 	return call == &event_function;
call              102 kernel/trace/trace_functions_graph.c 	struct trace_event_call *call = &event_funcgraph_entry;
call              113 kernel/trace/trace_functions_graph.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              222 kernel/trace/trace_functions_graph.c 	struct trace_event_call *call = &event_funcgraph_exit;
call              233 kernel/trace/trace_functions_graph.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              634 kernel/trace/trace_functions_graph.c 	struct ftrace_graph_ent *call;
call              640 kernel/trace/trace_functions_graph.c 	call = &entry->graph_ent;
call              653 kernel/trace/trace_functions_graph.c 		cpu_data->depth = call->depth - 1;
call              656 kernel/trace/trace_functions_graph.c 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
call              657 kernel/trace/trace_functions_graph.c 		    !WARN_ON_ONCE(call->depth < 0))
call              658 kernel/trace/trace_functions_graph.c 			cpu_data->enter_funcs[call->depth] = 0;
call              665 kernel/trace/trace_functions_graph.c 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
call              668 kernel/trace/trace_functions_graph.c 	trace_seq_printf(s, "%ps();\n", (void *)call->func);
call              681 kernel/trace/trace_functions_graph.c 	struct ftrace_graph_ent *call = &entry->graph_ent;
call              691 kernel/trace/trace_functions_graph.c 		cpu_data->depth = call->depth;
call              694 kernel/trace/trace_functions_graph.c 		if (call->depth < FTRACE_RETFUNC_DEPTH &&
call              695 kernel/trace/trace_functions_graph.c 		    !WARN_ON_ONCE(call->depth < 0))
call              696 kernel/trace/trace_functions_graph.c 			cpu_data->enter_funcs[call->depth] = call->func;
call              703 kernel/trace/trace_functions_graph.c 	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
call              706 kernel/trace/trace_functions_graph.c 	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
call              869 kernel/trace/trace_functions_graph.c 	struct ftrace_graph_ent *call = &field->graph_ent;
call              874 kernel/trace/trace_functions_graph.c 	if (check_irq_entry(iter, flags, call->func, call->depth))
call              877 kernel/trace/trace_functions_graph.c 	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
call              106 kernel/trace/trace_hwlat.c 	struct trace_event_call *call = &event_hwlat;
call              128 kernel/trace/trace_hwlat.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              208 kernel/trace/trace_kprobe.c trace_kprobe_primary_from_call(struct trace_event_call *call)
call              212 kernel/trace/trace_kprobe.c 	tp = trace_probe_primary_from_call(call);
call              219 kernel/trace/trace_kprobe.c bool trace_kprobe_on_func_entry(struct trace_event_call *call)
call              221 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
call              228 kernel/trace/trace_kprobe.c bool trace_kprobe_error_injectable(struct trace_event_call *call)
call              230 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
call              351 kernel/trace/trace_kprobe.c static int enable_trace_kprobe(struct trace_event_call *call,
call              359 kernel/trace/trace_kprobe.c 	tp = trace_probe_primary_from_call(call);
call              402 kernel/trace/trace_kprobe.c static int disable_trace_kprobe(struct trace_event_call *call,
call              407 kernel/trace/trace_kprobe.c 	tp = trace_probe_primary_from_call(call);
call             1184 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
call             1186 kernel/trace/trace_kprobe.c 	WARN_ON(call != trace_file->event_call);
call             1198 kernel/trace/trace_kprobe.c 						call->event.type,
call             1232 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
call             1234 kernel/trace/trace_kprobe.c 	WARN_ON(call != trace_file->event_call);
call             1246 kernel/trace/trace_kprobe.c 						call->event.type,
call             1376 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
call             1382 kernel/trace/trace_kprobe.c 	if (bpf_prog_array_valid(call)) {
call             1386 kernel/trace/trace_kprobe.c 		ret = trace_call_bpf(call, regs);
call             1399 kernel/trace/trace_kprobe.c 	head = this_cpu_ptr(call->perf_events);
call             1415 kernel/trace/trace_kprobe.c 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
call             1426 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
call             1432 kernel/trace/trace_kprobe.c 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
call             1435 kernel/trace/trace_kprobe.c 	head = this_cpu_ptr(call->perf_events);
call             1451 kernel/trace/trace_kprobe.c 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
call             1562 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
call             1565 kernel/trace/trace_kprobe.c 		call->event.funcs = &kretprobe_funcs;
call             1566 kernel/trace/trace_kprobe.c 		call->class->define_fields = kretprobe_event_define_fields;
call             1568 kernel/trace/trace_kprobe.c 		call->event.funcs = &kprobe_funcs;
call             1569 kernel/trace/trace_kprobe.c 		call->class->define_fields = kprobe_event_define_fields;
call             1572 kernel/trace/trace_kprobe.c 	call->flags = TRACE_EVENT_FL_KPROBE;
call             1573 kernel/trace/trace_kprobe.c 	call->class->reg = kprobe_register;
call              299 kernel/trace/trace_mmiotrace.c 	struct trace_event_call *call = &event_mmiotrace_rw;
call              314 kernel/trace/trace_mmiotrace.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              329 kernel/trace/trace_mmiotrace.c 	struct trace_event_call *call = &event_mmiotrace_map;
call              344 kernel/trace/trace_mmiotrace.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              905 kernel/trace/trace_probe.c 	struct trace_event_call *call = trace_probe_event_call(tp);
call              917 kernel/trace/trace_probe.c 	call->print_fmt = print_fmt;
call              950 kernel/trace/trace_probe.c 	kfree(tpe->call.name);
call              951 kernel/trace/trace_probe.c 	kfree(tpe->call.print_fmt);
call              991 kernel/trace/trace_probe.c 	struct trace_event_call *call;
call             1011 kernel/trace/trace_probe.c 	call = trace_probe_event_call(tp);
call             1012 kernel/trace/trace_probe.c 	call->class = &tp->event->class;
call             1013 kernel/trace/trace_probe.c 	call->name = kstrdup(event, GFP_KERNEL);
call             1014 kernel/trace/trace_probe.c 	if (!call->name) {
call             1034 kernel/trace/trace_probe.c 	struct trace_event_call *call = trace_probe_event_call(tp);
call             1037 kernel/trace/trace_probe.c 	ret = register_trace_event(&call->event);
call             1041 kernel/trace/trace_probe.c 	ret = trace_add_event_call(call);
call             1043 kernel/trace/trace_probe.c 		unregister_trace_event(&call->event);
call              201 kernel/trace/trace_probe.h bool trace_kprobe_on_func_entry(struct trace_event_call *call);
call              202 kernel/trace/trace_probe.h bool trace_kprobe_error_injectable(struct trace_event_call *call);
call              204 kernel/trace/trace_probe.h static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
call              209 kernel/trace/trace_probe.h static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
call              236 kernel/trace/trace_probe.h 	struct trace_event_call		call;
call              280 kernel/trace/trace_probe.h 	return trace_event_name(&tp->event->call);
call              285 kernel/trace/trace_probe.h 	return tp->event->call.class->system;
call              291 kernel/trace/trace_probe.h 	return &tp->event->call;
call              297 kernel/trace/trace_probe.h 	return container_of(event_call, struct trace_probe_event, call);
call              301 kernel/trace/trace_probe.h trace_probe_primary_from_call(struct trace_event_call *call)
call              303 kernel/trace/trace_probe.h 	struct trace_probe_event *tpe = trace_probe_event_from_call(call);
call              323 kernel/trace/trace_probe.h 	return trace_remove_event_call(&tp->event->call);
call              380 kernel/trace/trace_sched_wakeup.c 	struct trace_event_call *call = &event_context_switch;
call              398 kernel/trace/trace_sched_wakeup.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call              408 kernel/trace/trace_sched_wakeup.c 	struct trace_event_call *call = &event_wakeup;
call              426 kernel/trace/trace_sched_wakeup.c 	if (!call_filter_check_discard(call, entry, buffer, event))
call               23 kernel/trace/trace_syscalls.c syscall_get_enter_fields(struct trace_event_call *call)
call               25 kernel/trace/trace_syscalls.c 	struct syscall_metadata *entry = call->data;
call              235 kernel/trace/trace_syscalls.c static int __init set_syscall_print_fmt(struct trace_event_call *call)
call              239 kernel/trace/trace_syscalls.c 	struct syscall_metadata *entry = call->data;
call              241 kernel/trace/trace_syscalls.c 	if (entry->enter_event != call) {
call              242 kernel/trace/trace_syscalls.c 		call->print_fmt = "\"0x%lx\", REC->ret";
call              255 kernel/trace/trace_syscalls.c 	call->print_fmt = print_fmt;
call              260 kernel/trace/trace_syscalls.c static void __init free_syscall_print_fmt(struct trace_event_call *call)
call              262 kernel/trace/trace_syscalls.c 	struct syscall_metadata *entry = call->data;
call              264 kernel/trace/trace_syscalls.c 	if (entry->enter_event == call)
call              265 kernel/trace/trace_syscalls.c 		kfree(call->print_fmt);
call              268 kernel/trace/trace_syscalls.c static int __init syscall_enter_define_fields(struct trace_event_call *call)
call              271 kernel/trace/trace_syscalls.c 	struct syscall_metadata *meta = call->data;
call              276 kernel/trace/trace_syscalls.c 	ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
call              282 kernel/trace/trace_syscalls.c 		ret = trace_define_field(call, meta->types[i],
call              292 kernel/trace/trace_syscalls.c static int __init syscall_exit_define_fields(struct trace_event_call *call)
call              297 kernel/trace/trace_syscalls.c 	ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
call              302 kernel/trace/trace_syscalls.c 	ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret),
call              405 kernel/trace/trace_syscalls.c 				   struct trace_event_call *call)
call              411 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              426 kernel/trace/trace_syscalls.c 				      struct trace_event_call *call)
call              431 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              443 kernel/trace/trace_syscalls.c 				  struct trace_event_call *call)
call              449 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              464 kernel/trace/trace_syscalls.c 				     struct trace_event_call *call)
call              469 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              480 kernel/trace/trace_syscalls.c static int __init init_syscall_trace(struct trace_event_call *call)
call              485 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              488 kernel/trace/trace_syscalls.c 				((struct syscall_metadata *)call->data)->name);
call              492 kernel/trace/trace_syscalls.c 	if (set_syscall_print_fmt(call) < 0)
call              495 kernel/trace/trace_syscalls.c 	id = trace_event_raw_init(call);
call              498 kernel/trace/trace_syscalls.c 		free_syscall_print_fmt(call);
call              565 kernel/trace/trace_syscalls.c static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
call              580 kernel/trace/trace_syscalls.c 	return trace_call_bpf(call, &param);
call              634 kernel/trace/trace_syscalls.c static int perf_sysenter_enable(struct trace_event_call *call)
call              639 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              654 kernel/trace/trace_syscalls.c static void perf_sysenter_disable(struct trace_event_call *call)
call              658 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              668 kernel/trace/trace_syscalls.c static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
call              680 kernel/trace/trace_syscalls.c 	return trace_call_bpf(call, &param);
call              730 kernel/trace/trace_syscalls.c static int perf_sysexit_enable(struct trace_event_call *call)
call              735 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              750 kernel/trace/trace_syscalls.c static void perf_sysexit_disable(struct trace_event_call *call)
call              754 kernel/trace/trace_syscalls.c 	num = ((struct syscall_metadata *)call->data)->syscall_nr;
call              323 kernel/trace/trace_uprobe.c trace_uprobe_primary_from_call(struct trace_event_call *call)
call              327 kernel/trace/trace_uprobe.c 	tp = trace_probe_primary_from_call(call);
call              938 kernel/trace/trace_uprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
call              940 kernel/trace/trace_uprobe.c 	WARN_ON(call != trace_file->event_call);
call              951 kernel/trace/trace_uprobe.c 						call->event.type, size, 0, 0);
call             1076 kernel/trace/trace_uprobe.c static int probe_event_enable(struct trace_event_call *call,
call             1084 kernel/trace/trace_uprobe.c 	tp = trace_probe_primary_from_call(call);
call             1137 kernel/trace/trace_uprobe.c static void probe_event_disable(struct trace_event_call *call,
call             1142 kernel/trace/trace_uprobe.c 	tp = trace_probe_primary_from_call(call);
call             1257 kernel/trace/trace_uprobe.c static int uprobe_perf_close(struct trace_event_call *call,
call             1264 kernel/trace/trace_uprobe.c 	tp = trace_probe_primary_from_call(call);
call             1282 kernel/trace/trace_uprobe.c static int uprobe_perf_open(struct trace_event_call *call,
call             1289 kernel/trace/trace_uprobe.c 	tp = trace_probe_primary_from_call(call);
call             1300 kernel/trace/trace_uprobe.c 			uprobe_perf_close(call, event);
call             1329 kernel/trace/trace_uprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
call             1336 kernel/trace/trace_uprobe.c 	if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs))
call             1347 kernel/trace/trace_uprobe.c 	head = this_cpu_ptr(call->perf_events);
call             1372 kernel/trace/trace_uprobe.c 	perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
call             1533 kernel/trace/trace_uprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tu->tp);
call             1535 kernel/trace/trace_uprobe.c 	call->event.funcs = &uprobe_funcs;
call             1536 kernel/trace/trace_uprobe.c 	call->class->define_fields = uprobe_event_define_fields;
call             1538 kernel/trace/trace_uprobe.c 	call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY;
call             1539 kernel/trace/trace_uprobe.c 	call->class->reg = trace_uprobe_register;
call             1051 net/ax25/af_ax25.c 	ax25_address call;
call             1069 net/ax25/af_ax25.c 		call = user->call;
call             1075 net/ax25/af_ax25.c 		call = addr->fsa_ax25.sax25_call;
call             1086 net/ax25/af_ax25.c 	ax25->source_addr = call;
call              408 net/ax25/ax25_route.c 		ax25->source_addr = user->call;
call               80 net/ax25/ax25_uid.c 			if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0) {
call              109 net/ax25/ax25_uid.c 		ax25_uid->call = sax->sax25_call;
call              124 net/ax25/ax25_uid.c 			if (ax25cmp(&sax->sax25_call, &ax25_uid->call) == 0)
call              176 net/ax25/ax25_uid.c 			ax2asc(buf, &pt->call));
call              719 net/compat.c   COMPAT_SYSCALL_DEFINE2(socketcall, int, call, u32 __user *, args)
call              726 net/compat.c   	if (call < SYS_SOCKET || call > SYS_SENDMMSG)
call              728 net/compat.c   	len = nas[call];
call              742 net/compat.c   	switch (call) {
call             1964 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_none,
call             1968 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_create,
call             1973 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_destroy,
call             1978 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_flush,
call             1983 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_rename,
call             1988 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_swap,
call             1993 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_dump,
call             1998 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_dump,
call             2003 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_uadd,
call             2008 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_udel,
call             2013 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_utest,
call             2018 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_header,
call             2023 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_type,
call             2028 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_protocol,
call             2033 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_byname,
call             2038 net/netfilter/ipset/ip_set_core.c 		.call		= ip_set_byindex,
call             3515 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_NEW]		= { .call = ctnetlink_new_conntrack,
call             3518 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET] 		= { .call = ctnetlink_get_conntrack,
call             3521 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_DELETE]  	= { .call = ctnetlink_del_conntrack,
call             3524 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET_CTRZERO] 	= { .call = ctnetlink_get_conntrack,
call             3527 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET_STATS_CPU]	= { .call = ctnetlink_stat_ct_cpu },
call             3528 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET_STATS]	= { .call = ctnetlink_stat_ct },
call             3529 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET_DYING]	= { .call = ctnetlink_get_ct_dying },
call             3530 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_CT_GET_UNCONFIRMED]	= { .call = ctnetlink_get_ct_unconfirmed },
call             3534 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_EXP_GET]		= { .call = ctnetlink_get_expect,
call             3537 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_EXP_NEW]		= { .call = ctnetlink_new_expect,
call             3540 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_EXP_DELETE]		= { .call = ctnetlink_del_expect,
call             3543 net/netfilter/nf_conntrack_netlink.c 	[IPCTNL_MSG_EXP_GET_STATS_CPU]	= { .call = ctnetlink_stat_exp_cpu },
call              228 net/netfilter/nfnetlink.c 			else if (nc->call)
call              229 net/netfilter/nfnetlink.c 				err = nc->call(net, net->nfnl, skb, nlh,
call              380 net/netfilter/nfnetlink_acct.c 	[NFNL_MSG_ACCT_NEW]		= { .call = nfnl_acct_new,
call              383 net/netfilter/nfnetlink_acct.c 	[NFNL_MSG_ACCT_GET] 		= { .call = nfnl_acct_get,
call              386 net/netfilter/nfnetlink_acct.c 	[NFNL_MSG_ACCT_GET_CTRZERO] 	= { .call = nfnl_acct_get,
call              389 net/netfilter/nfnetlink_acct.c 	[NFNL_MSG_ACCT_DEL]		= { .call = nfnl_acct_del,
call              751 net/netfilter/nfnetlink_cthelper.c 	[NFNL_MSG_CTHELPER_NEW]		= { .call = nfnl_cthelper_new,
call              754 net/netfilter/nfnetlink_cthelper.c 	[NFNL_MSG_CTHELPER_GET]		= { .call = nfnl_cthelper_get,
call              757 net/netfilter/nfnetlink_cthelper.c 	[NFNL_MSG_CTHELPER_DEL]		= { .call = nfnl_cthelper_del,
call              548 net/netfilter/nfnetlink_cttimeout.c 	[IPCTNL_MSG_TIMEOUT_NEW]	= { .call = cttimeout_new_timeout,
call              551 net/netfilter/nfnetlink_cttimeout.c 	[IPCTNL_MSG_TIMEOUT_GET]	= { .call = cttimeout_get_timeout,
call              554 net/netfilter/nfnetlink_cttimeout.c 	[IPCTNL_MSG_TIMEOUT_DELETE]	= { .call = cttimeout_del_timeout,
call              557 net/netfilter/nfnetlink_cttimeout.c 	[IPCTNL_MSG_TIMEOUT_DEFAULT_SET]= { .call = cttimeout_default_set,
call              560 net/netfilter/nfnetlink_cttimeout.c 	[IPCTNL_MSG_TIMEOUT_DEFAULT_GET]= { .call = cttimeout_default_get,
call              996 net/netfilter/nfnetlink_log.c 	[NFULNL_MSG_PACKET]	= { .call = nfulnl_recv_unsupp,
call              998 net/netfilter/nfnetlink_log.c 	[NFULNL_MSG_CONFIG]	= { .call = nfulnl_recv_config,
call              379 net/netfilter/nfnetlink_osf.c 		.call		= nfnl_osf_add_callback,
call              384 net/netfilter/nfnetlink_osf.c 		.call		= nfnl_osf_remove_callback,
call             1387 net/netfilter/nfnetlink_queue.c 	[NFQNL_MSG_CONFIG]	= { .call = nfqnl_recv_config,
call              584 net/netrom/af_netrom.c 			nr->user_addr   = user->call;
call              658 net/netrom/af_netrom.c 			nr->user_addr   = user->call;
call              220 net/rose/af_rose.c static struct sock *rose_find_listener(rose_address *addr, ax25_address *call)
call              229 net/rose/af_rose.c 		    !ax25cmp(&rose->source_call, call) &&
call              664 net/rose/af_rose.c 		rose->source_call = user->call;
call              772 net/rose/af_rose.c 		rose->source_call = user->call;
call              293 net/rxrpc/af_rxrpc.c 	struct rxrpc_call *call;
call              322 net/rxrpc/af_rxrpc.c 	call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp, debug_id);
call              324 net/rxrpc/af_rxrpc.c 	if (!IS_ERR(call)) {
call              325 net/rxrpc/af_rxrpc.c 		call->notify_rx = notify_rx;
call              326 net/rxrpc/af_rxrpc.c 		mutex_unlock(&call->user_mutex);
call              330 net/rxrpc/af_rxrpc.c 	_leave(" = %p", call);
call              331 net/rxrpc/af_rxrpc.c 	return call;
call              351 net/rxrpc/af_rxrpc.c void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
call              353 net/rxrpc/af_rxrpc.c 	_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
call              355 net/rxrpc/af_rxrpc.c 	mutex_lock(&call->user_mutex);
call              356 net/rxrpc/af_rxrpc.c 	rxrpc_release_call(rxrpc_sk(sock->sk), call);
call              359 net/rxrpc/af_rxrpc.c 	if (call->notify_rx) {
call              360 net/rxrpc/af_rxrpc.c 		spin_lock_bh(&call->notify_lock);
call              361 net/rxrpc/af_rxrpc.c 		call->notify_rx = rxrpc_dummy_notify_rx;
call              362 net/rxrpc/af_rxrpc.c 		spin_unlock_bh(&call->notify_lock);
call              365 net/rxrpc/af_rxrpc.c 	mutex_unlock(&call->user_mutex);
call              366 net/rxrpc/af_rxrpc.c 	rxrpc_put_call(call, rxrpc_call_put_kernel);
call              379 net/rxrpc/af_rxrpc.c 			     const struct rxrpc_call *call)
call              381 net/rxrpc/af_rxrpc.c 	return call->state != RXRPC_CALL_COMPLETE;
call              393 net/rxrpc/af_rxrpc.c u32 rxrpc_kernel_get_epoch(struct socket *sock, struct rxrpc_call *call)
call              395 net/rxrpc/af_rxrpc.c 	return call->conn->proto.epoch;
call              428 net/rxrpc/af_rxrpc.c void rxrpc_kernel_set_max_life(struct socket *sock, struct rxrpc_call *call,
call              433 net/rxrpc/af_rxrpc.c 	mutex_lock(&call->user_mutex);
call              437 net/rxrpc/af_rxrpc.c 	WRITE_ONCE(call->expect_term_by, hard_timeout);
call              438 net/rxrpc/af_rxrpc.c 	rxrpc_reduce_call_timer(call, hard_timeout, now, rxrpc_timer_set_for_hard);
call              440 net/rxrpc/af_rxrpc.c 	mutex_unlock(&call->user_mutex);
call              426 net/rxrpc/ar-internal.h 		struct rxrpc_call __rcu	*call;		/* Active call */
call              731 net/rxrpc/ar-internal.h 	struct rxrpc_call_params call;
call              766 net/rxrpc/ar-internal.h static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
call              771 net/rxrpc/ar-internal.h 	trace_rxrpc_timer(call, why, now);
call              772 net/rxrpc/ar-internal.h 	timer_reduce(&call->timer, expire_at);
call              802 net/rxrpc/ar-internal.h static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
call              804 net/rxrpc/ar-internal.h 	return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
call              807 net/rxrpc/ar-internal.h static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
call              809 net/rxrpc/ar-internal.h 	return !rxrpc_is_service_call(call);
call              815 net/rxrpc/ar-internal.h static inline bool __rxrpc_set_call_completion(struct rxrpc_call *call,
call              820 net/rxrpc/ar-internal.h 	if (call->state < RXRPC_CALL_COMPLETE) {
call              821 net/rxrpc/ar-internal.h 		call->abort_code = abort_code;
call              822 net/rxrpc/ar-internal.h 		call->error = error;
call              823 net/rxrpc/ar-internal.h 		call->completion = compl,
call              824 net/rxrpc/ar-internal.h 		call->state = RXRPC_CALL_COMPLETE;
call              825 net/rxrpc/ar-internal.h 		trace_rxrpc_call_complete(call);
call              826 net/rxrpc/ar-internal.h 		wake_up(&call->waitq);
call              832 net/rxrpc/ar-internal.h static inline bool rxrpc_set_call_completion(struct rxrpc_call *call,
call              839 net/rxrpc/ar-internal.h 	write_lock_bh(&call->state_lock);
call              840 net/rxrpc/ar-internal.h 	ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
call              841 net/rxrpc/ar-internal.h 	write_unlock_bh(&call->state_lock);
call              848 net/rxrpc/ar-internal.h static inline bool __rxrpc_call_completed(struct rxrpc_call *call)
call              850 net/rxrpc/ar-internal.h 	return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
call              853 net/rxrpc/ar-internal.h static inline bool rxrpc_call_completed(struct rxrpc_call *call)
call              857 net/rxrpc/ar-internal.h 	write_lock_bh(&call->state_lock);
call              858 net/rxrpc/ar-internal.h 	ret = __rxrpc_call_completed(call);
call              859 net/rxrpc/ar-internal.h 	write_unlock_bh(&call->state_lock);
call              866 net/rxrpc/ar-internal.h static inline bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
call              870 net/rxrpc/ar-internal.h 	trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
call              872 net/rxrpc/ar-internal.h 	return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
call              876 net/rxrpc/ar-internal.h static inline bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
call              881 net/rxrpc/ar-internal.h 	write_lock_bh(&call->state_lock);
call              882 net/rxrpc/ar-internal.h 	ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
call              883 net/rxrpc/ar-internal.h 	write_unlock_bh(&call->state_lock);
call              890 net/rxrpc/ar-internal.h static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
call              898 net/rxrpc/ar-internal.h 	trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
call              899 net/rxrpc/ar-internal.h 	return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
call              902 net/rxrpc/ar-internal.h #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
call              903 net/rxrpc/ar-internal.h 	__rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
call               37 net/rxrpc/call_accept.c 	struct rxrpc_call *call;
call               94 net/rxrpc/call_accept.c 	call = rxrpc_alloc_call(rx, gfp, debug_id);
call               95 net/rxrpc/call_accept.c 	if (!call)
call               97 net/rxrpc/call_accept.c 	call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
call               98 net/rxrpc/call_accept.c 	call->state = RXRPC_CALL_SERVER_PREALLOC;
call              100 net/rxrpc/call_accept.c 	trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
call              101 net/rxrpc/call_accept.c 			 atomic_read(&call->usage),
call              123 net/rxrpc/call_accept.c 		call->user_call_ID = user_call_ID;
call              124 net/rxrpc/call_accept.c 		call->notify_rx = notify_rx;
call              125 net/rxrpc/call_accept.c 		rxrpc_get_call(call, rxrpc_call_got_kernel);
call              126 net/rxrpc/call_accept.c 		user_attach_call(call, user_call_ID);
call              127 net/rxrpc/call_accept.c 		rxrpc_get_call(call, rxrpc_call_got_userid);
call              128 net/rxrpc/call_accept.c 		rb_link_node(&call->sock_node, parent, pp);
call              129 net/rxrpc/call_accept.c 		rb_insert_color(&call->sock_node, &rx->calls);
call              130 net/rxrpc/call_accept.c 		set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
call              133 net/rxrpc/call_accept.c 	list_add(&call->sock_link, &rx->sock_calls);
call              137 net/rxrpc/call_accept.c 	rxnet = call->rxnet;
call              139 net/rxrpc/call_accept.c 	list_add_tail(&call->link, &rxnet->calls);
call              142 net/rxrpc/call_accept.c 	b->call_backlog[call_head] = call;
call              144 net/rxrpc/call_accept.c 	_leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
call              149 net/rxrpc/call_accept.c 	rxrpc_cleanup_call(call);
call              226 net/rxrpc/call_accept.c 		struct rxrpc_call *call = b->call_backlog[tail];
call              227 net/rxrpc/call_accept.c 		rcu_assign_pointer(call->socket, rx);
call              229 net/rxrpc/call_accept.c 			_debug("discard %lx", call->user_call_ID);
call              230 net/rxrpc/call_accept.c 			rx->discard_new_call(call, call->user_call_ID);
call              231 net/rxrpc/call_accept.c 			rxrpc_put_call(call, rxrpc_call_put_kernel);
call              233 net/rxrpc/call_accept.c 		rxrpc_call_completed(call);
call              234 net/rxrpc/call_accept.c 		rxrpc_release_call(rx, call);
call              235 net/rxrpc/call_accept.c 		rxrpc_put_call(call, rxrpc_call_put);
call              246 net/rxrpc/call_accept.c static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
call              251 net/rxrpc/call_accept.c 	if (call->peer->rtt_count < 3 ||
call              252 net/rxrpc/call_accept.c 	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
call              253 net/rxrpc/call_accept.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
call              271 net/rxrpc/call_accept.c 	struct rxrpc_call *call;
call              321 net/rxrpc/call_accept.c 	call = b->call_backlog[call_tail];
call              326 net/rxrpc/call_accept.c 	rxrpc_see_call(call);
call              327 net/rxrpc/call_accept.c 	call->conn = conn;
call              328 net/rxrpc/call_accept.c 	call->security = conn->security;
call              329 net/rxrpc/call_accept.c 	call->peer = rxrpc_get_peer(conn->params.peer);
call              330 net/rxrpc/call_accept.c 	call->cong_cwnd = call->peer->cong_cwnd;
call              331 net/rxrpc/call_accept.c 	return call;
call              357 net/rxrpc/call_accept.c 	struct rxrpc_call *call = NULL;
call              382 net/rxrpc/call_accept.c 	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
call              384 net/rxrpc/call_accept.c 	if (!call) {
call              389 net/rxrpc/call_accept.c 	trace_rxrpc_receive(call, rxrpc_receive_incoming,
call              393 net/rxrpc/call_accept.c 	rxrpc_incoming_call(rx, call, skb);
call              394 net/rxrpc/call_accept.c 	conn = call->conn;
call              397 net/rxrpc/call_accept.c 		rx->notify_new_call(&rx->sk, call, call->user_call_ID);
call              405 net/rxrpc/call_accept.c 		set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
call              406 net/rxrpc/call_accept.c 		rxrpc_queue_conn(call->conn);
call              410 net/rxrpc/call_accept.c 		write_lock(&call->state_lock);
call              411 net/rxrpc/call_accept.c 		if (call->state < RXRPC_CALL_COMPLETE) {
call              413 net/rxrpc/call_accept.c 				call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
call              415 net/rxrpc/call_accept.c 				call->state = RXRPC_CALL_SERVER_ACCEPTING;
call              417 net/rxrpc/call_accept.c 		write_unlock(&call->state_lock);
call              421 net/rxrpc/call_accept.c 		rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
call              425 net/rxrpc/call_accept.c 		rxrpc_abort_call("CON", call, sp->hdr.seq,
call              434 net/rxrpc/call_accept.c 	rxrpc_send_ping(call, skb);
call              436 net/rxrpc/call_accept.c 	if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
call              437 net/rxrpc/call_accept.c 		rxrpc_notify_socket(call);
call              444 net/rxrpc/call_accept.c 	rxrpc_put_call(call, rxrpc_call_put);
call              446 net/rxrpc/call_accept.c 	_leave(" = %p{%d}", call, call->debug_id);
call              447 net/rxrpc/call_accept.c 	return call;
call              464 net/rxrpc/call_accept.c 	__acquires(call->user_mutex)
call              466 net/rxrpc/call_accept.c 	struct rxrpc_call *call;
call              488 net/rxrpc/call_accept.c 		call = rb_entry(parent, struct rxrpc_call, sock_node);
call              490 net/rxrpc/call_accept.c 		if (user_call_ID < call->user_call_ID)
call              492 net/rxrpc/call_accept.c 		else if (user_call_ID > call->user_call_ID)
call              501 net/rxrpc/call_accept.c 	call = list_entry(rx->to_be_accepted.next,
call              510 net/rxrpc/call_accept.c 	if (mutex_lock_interruptible(&call->user_mutex) < 0) {
call              517 net/rxrpc/call_accept.c 	list_del_init(&call->accept_link);
call              519 net/rxrpc/call_accept.c 	rxrpc_see_call(call);
call              526 net/rxrpc/call_accept.c 		call = rb_entry(parent, struct rxrpc_call, sock_node);
call              528 net/rxrpc/call_accept.c 		if (user_call_ID < call->user_call_ID)
call              530 net/rxrpc/call_accept.c 		else if (user_call_ID > call->user_call_ID)
call              536 net/rxrpc/call_accept.c 	write_lock_bh(&call->state_lock);
call              537 net/rxrpc/call_accept.c 	switch (call->state) {
call              539 net/rxrpc/call_accept.c 		call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
call              542 net/rxrpc/call_accept.c 		ret = call->error;
call              549 net/rxrpc/call_accept.c 	call->notify_rx = notify_rx;
call              550 net/rxrpc/call_accept.c 	call->user_call_ID = user_call_ID;
call              551 net/rxrpc/call_accept.c 	rxrpc_get_call(call, rxrpc_call_got_userid);
call              552 net/rxrpc/call_accept.c 	rb_link_node(&call->sock_node, parent, pp);
call              553 net/rxrpc/call_accept.c 	rb_insert_color(&call->sock_node, &rx->calls);
call              554 net/rxrpc/call_accept.c 	if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
call              557 net/rxrpc/call_accept.c 	write_unlock_bh(&call->state_lock);
call              559 net/rxrpc/call_accept.c 	rxrpc_notify_socket(call);
call              562 net/rxrpc/call_accept.c 	_leave(" = %p{%d}", call, call->debug_id);
call              563 net/rxrpc/call_accept.c 	return call;
call              566 net/rxrpc/call_accept.c 	_debug("release %p", call);
call              567 net/rxrpc/call_accept.c 	write_unlock_bh(&call->state_lock);
call              569 net/rxrpc/call_accept.c 	rxrpc_release_call(rx, call);
call              570 net/rxrpc/call_accept.c 	rxrpc_put_call(call, rxrpc_call_put);
call              589 net/rxrpc/call_accept.c 	struct rxrpc_call *call;
call              607 net/rxrpc/call_accept.c 	call = list_entry(rx->to_be_accepted.next,
call              609 net/rxrpc/call_accept.c 	list_del_init(&call->accept_link);
call              611 net/rxrpc/call_accept.c 	rxrpc_see_call(call);
call              613 net/rxrpc/call_accept.c 	write_lock_bh(&call->state_lock);
call              614 net/rxrpc/call_accept.c 	switch (call->state) {
call              616 net/rxrpc/call_accept.c 		__rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED);
call              620 net/rxrpc/call_accept.c 		ret = call->error;
call              627 net/rxrpc/call_accept.c 	write_unlock_bh(&call->state_lock);
call              630 net/rxrpc/call_accept.c 		rxrpc_send_abort_packet(call);
call              631 net/rxrpc/call_accept.c 		rxrpc_release_call(rx, call);
call              632 net/rxrpc/call_accept.c 		rxrpc_put_call(call, rxrpc_call_put);
call               23 net/rxrpc/call_event.c static void rxrpc_propose_ping(struct rxrpc_call *call,
call               28 net/rxrpc/call_event.c 		    !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
call               29 net/rxrpc/call_event.c 			rxrpc_queue_call(call);
call               34 net/rxrpc/call_event.c 		if (time_before(ping_at, call->ping_at)) {
call               35 net/rxrpc/call_event.c 			WRITE_ONCE(call->ping_at, ping_at);
call               36 net/rxrpc/call_event.c 			rxrpc_reduce_call_timer(call, ping_at, now,
call               45 net/rxrpc/call_event.c static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
call               57 net/rxrpc/call_event.c 		rxrpc_propose_ping(call, immediate, background);
call               66 net/rxrpc/call_event.c 	       call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
call               67 net/rxrpc/call_event.c 	if (ack_reason == call->ackr_reason) {
call               70 net/rxrpc/call_event.c 			call->ackr_serial = serial;
call               74 net/rxrpc/call_event.c 	} else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
call               75 net/rxrpc/call_event.c 		call->ackr_reason = ack_reason;
call               76 net/rxrpc/call_event.c 		call->ackr_serial = serial;
call              104 net/rxrpc/call_event.c 	if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
call              107 net/rxrpc/call_event.c 		_debug("immediate ACK %lx", call->events);
call              108 net/rxrpc/call_event.c 		if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
call              110 net/rxrpc/call_event.c 			rxrpc_queue_call(call);
call              114 net/rxrpc/call_event.c 		if (call->peer->srtt_us != 0)
call              115 net/rxrpc/call_event.c 			ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
call              119 net/rxrpc/call_event.c 		ack_at += READ_ONCE(call->tx_backoff);
call              121 net/rxrpc/call_event.c 		if (time_before(ack_at, call->ack_at)) {
call              122 net/rxrpc/call_event.c 			WRITE_ONCE(call->ack_at, ack_at);
call              123 net/rxrpc/call_event.c 			rxrpc_reduce_call_timer(call, ack_at, now,
call              129 net/rxrpc/call_event.c 	trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
call              136 net/rxrpc/call_event.c void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
call              140 net/rxrpc/call_event.c 	spin_lock_bh(&call->lock);
call              141 net/rxrpc/call_event.c 	__rxrpc_propose_ACK(call, ack_reason, serial,
call              143 net/rxrpc/call_event.c 	spin_unlock_bh(&call->lock);
call              149 net/rxrpc/call_event.c static void rxrpc_congestion_timeout(struct rxrpc_call *call)
call              151 net/rxrpc/call_event.c 	set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
call              157 net/rxrpc/call_event.c static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
call              166 net/rxrpc/call_event.c 	_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
call              168 net/rxrpc/call_event.c 	rto_j = call->peer->rto_j;
call              173 net/rxrpc/call_event.c 	spin_lock_bh(&call->lock);
call              175 net/rxrpc/call_event.c 	cursor = call->tx_hard_ack;
call              176 net/rxrpc/call_event.c 	top = call->tx_top;
call              185 net/rxrpc/call_event.c 	trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
call              189 net/rxrpc/call_event.c 		annotation = call->rxtx_annotations[ix];
call              195 net/rxrpc/call_event.c 		skb = call->rxtx_buffer[ix];
call              209 net/rxrpc/call_event.c 		call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
call              211 net/rxrpc/call_event.c 		trace_rxrpc_retransmit(call, seq, annotation | anno_type,
call              217 net/rxrpc/call_event.c 	WRITE_ONCE(call->resend_at, resend_at);
call              220 net/rxrpc/call_event.c 		rxrpc_congestion_timeout(call);
call              227 net/rxrpc/call_event.c 		rxrpc_reduce_call_timer(call, resend_at, now_j,
call              229 net/rxrpc/call_event.c 		spin_unlock_bh(&call->lock);
call              230 net/rxrpc/call_event.c 		ack_ts = ktime_sub(now, call->acks_latest_ts);
call              231 net/rxrpc/call_event.c 		if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
call              233 net/rxrpc/call_event.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
call              235 net/rxrpc/call_event.c 		rxrpc_send_ack_packet(call, true, NULL);
call              246 net/rxrpc/call_event.c 		annotation = call->rxtx_annotations[ix];
call              251 net/rxrpc/call_event.c 		skb = call->rxtx_buffer[ix];
call              253 net/rxrpc/call_event.c 		spin_unlock_bh(&call->lock);
call              255 net/rxrpc/call_event.c 		if (rxrpc_send_data_packet(call, skb, true) < 0) {
call              260 net/rxrpc/call_event.c 		if (rxrpc_is_client_call(call))
call              261 net/rxrpc/call_event.c 			rxrpc_expose_client_call(call);
call              264 net/rxrpc/call_event.c 		spin_lock_bh(&call->lock);
call              271 net/rxrpc/call_event.c 		if (after(seq, call->tx_hard_ack)) {
call              272 net/rxrpc/call_event.c 			annotation = call->rxtx_annotations[ix];
call              280 net/rxrpc/call_event.c 			call->rxtx_annotations[ix] = annotation;
call              283 net/rxrpc/call_event.c 		if (after(call->tx_hard_ack, seq))
call              284 net/rxrpc/call_event.c 			seq = call->tx_hard_ack;
call              288 net/rxrpc/call_event.c 	spin_unlock_bh(&call->lock);
call              298 net/rxrpc/call_event.c 	struct rxrpc_call *call =
call              304 net/rxrpc/call_event.c 	rxrpc_see_call(call);
call              308 net/rxrpc/call_event.c 	       call->debug_id, rxrpc_call_states[call->state], call->events);
call              316 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
call              317 net/rxrpc/call_event.c 		rxrpc_send_abort_packet(call);
call              321 net/rxrpc/call_event.c 	if (call->state == RXRPC_CALL_COMPLETE) {
call              322 net/rxrpc/call_event.c 		del_timer_sync(&call->timer);
call              323 net/rxrpc/call_event.c 		rxrpc_notify_socket(call);
call              329 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_rx_by);
call              331 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
call              332 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
call              335 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_req_by);
call              336 net/rxrpc/call_event.c 	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
call              338 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
call              339 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
call              342 net/rxrpc/call_event.c 	t = READ_ONCE(call->expect_term_by);
call              344 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
call              345 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
call              348 net/rxrpc/call_event.c 	t = READ_ONCE(call->ack_at);
call              350 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
call              351 net/rxrpc/call_event.c 		cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
call              352 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_ACK, &call->events);
call              355 net/rxrpc/call_event.c 	t = READ_ONCE(call->ack_lost_at);
call              357 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
call              358 net/rxrpc/call_event.c 		cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
call              359 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
call              362 net/rxrpc/call_event.c 	t = READ_ONCE(call->keepalive_at);
call              364 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
call              365 net/rxrpc/call_event.c 		cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
call              366 net/rxrpc/call_event.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
call              368 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_PING, &call->events);
call              371 net/rxrpc/call_event.c 	t = READ_ONCE(call->ping_at);
call              373 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
call              374 net/rxrpc/call_event.c 		cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
call              375 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_PING, &call->events);
call              378 net/rxrpc/call_event.c 	t = READ_ONCE(call->resend_at);
call              380 net/rxrpc/call_event.c 		trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
call              381 net/rxrpc/call_event.c 		cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
call              382 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_RESEND, &call->events);
call              386 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
call              387 net/rxrpc/call_event.c 		if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
call              388 net/rxrpc/call_event.c 		    (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
call              389 net/rxrpc/call_event.c 			trace_rxrpc_call_reset(call);
call              390 net/rxrpc/call_event.c 			rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
call              392 net/rxrpc/call_event.c 			rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
call              394 net/rxrpc/call_event.c 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
call              399 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
call              400 net/rxrpc/call_event.c 		call->acks_lost_top = call->tx_top;
call              401 net/rxrpc/call_event.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
call              403 net/rxrpc/call_event.c 		send_ack = &call->acks_lost_ping;
call              406 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
call              408 net/rxrpc/call_event.c 		if (call->ackr_reason) {
call              409 net/rxrpc/call_event.c 			rxrpc_send_ack_packet(call, false, send_ack);
call              414 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
call              415 net/rxrpc/call_event.c 		rxrpc_send_ack_packet(call, true, NULL);
call              419 net/rxrpc/call_event.c 	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
call              420 net/rxrpc/call_event.c 		rxrpc_resend(call, now);
call              425 net/rxrpc/call_event.c 	next = call->expect_rx_by;
call              429 net/rxrpc/call_event.c 	set(call->expect_req_by);
call              430 net/rxrpc/call_event.c 	set(call->expect_term_by);
call              431 net/rxrpc/call_event.c 	set(call->ack_at);
call              432 net/rxrpc/call_event.c 	set(call->ack_lost_at);
call              433 net/rxrpc/call_event.c 	set(call->resend_at);
call              434 net/rxrpc/call_event.c 	set(call->keepalive_at);
call              435 net/rxrpc/call_event.c 	set(call->ping_at);
call              441 net/rxrpc/call_event.c 	rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
call              444 net/rxrpc/call_event.c 	if (call->events && call->state < RXRPC_CALL_COMPLETE)
call              448 net/rxrpc/call_event.c 	rxrpc_put_call(call, rxrpc_call_put);
call              454 net/rxrpc/call_event.c 	__rxrpc_queue_call(call);
call               46 net/rxrpc/call_object.c 	struct rxrpc_call *call = from_timer(call, t, timer);
call               48 net/rxrpc/call_object.c 	_enter("%d", call->debug_id);
call               50 net/rxrpc/call_object.c 	if (call->state < RXRPC_CALL_COMPLETE) {
call               51 net/rxrpc/call_object.c 		trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
call               52 net/rxrpc/call_object.c 		rxrpc_queue_call(call);
call               65 net/rxrpc/call_object.c 	struct rxrpc_call *call;
call               74 net/rxrpc/call_object.c 		call = rb_entry(p, struct rxrpc_call, sock_node);
call               76 net/rxrpc/call_object.c 		if (user_call_ID < call->user_call_ID)
call               78 net/rxrpc/call_object.c 		else if (user_call_ID > call->user_call_ID)
call               89 net/rxrpc/call_object.c 	rxrpc_get_call(call, rxrpc_call_got);
call               91 net/rxrpc/call_object.c 	_leave(" = %p [%d]", call, atomic_read(&call->usage));
call               92 net/rxrpc/call_object.c 	return call;
call              101 net/rxrpc/call_object.c 	struct rxrpc_call *call;
call              104 net/rxrpc/call_object.c 	call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
call              105 net/rxrpc/call_object.c 	if (!call)
call              108 net/rxrpc/call_object.c 	call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
call              111 net/rxrpc/call_object.c 	if (!call->rxtx_buffer)
call              114 net/rxrpc/call_object.c 	call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
call              115 net/rxrpc/call_object.c 	if (!call->rxtx_annotations)
call              118 net/rxrpc/call_object.c 	mutex_init(&call->user_mutex);
call              124 net/rxrpc/call_object.c 		lockdep_set_class(&call->user_mutex,
call              127 net/rxrpc/call_object.c 	timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
call              128 net/rxrpc/call_object.c 	INIT_WORK(&call->processor, &rxrpc_process_call);
call              129 net/rxrpc/call_object.c 	INIT_LIST_HEAD(&call->link);
call              130 net/rxrpc/call_object.c 	INIT_LIST_HEAD(&call->chan_wait_link);
call              131 net/rxrpc/call_object.c 	INIT_LIST_HEAD(&call->accept_link);
call              132 net/rxrpc/call_object.c 	INIT_LIST_HEAD(&call->recvmsg_link);
call              133 net/rxrpc/call_object.c 	INIT_LIST_HEAD(&call->sock_link);
call              134 net/rxrpc/call_object.c 	init_waitqueue_head(&call->waitq);
call              135 net/rxrpc/call_object.c 	spin_lock_init(&call->lock);
call              136 net/rxrpc/call_object.c 	spin_lock_init(&call->notify_lock);
call              137 net/rxrpc/call_object.c 	spin_lock_init(&call->input_lock);
call              138 net/rxrpc/call_object.c 	rwlock_init(&call->state_lock);
call              139 net/rxrpc/call_object.c 	atomic_set(&call->usage, 1);
call              140 net/rxrpc/call_object.c 	call->debug_id = debug_id;
call              141 net/rxrpc/call_object.c 	call->tx_total_len = -1;
call              142 net/rxrpc/call_object.c 	call->next_rx_timo = 20 * HZ;
call              143 net/rxrpc/call_object.c 	call->next_req_timo = 1 * HZ;
call              145 net/rxrpc/call_object.c 	memset(&call->sock_node, 0xed, sizeof(call->sock_node));
call              148 net/rxrpc/call_object.c 	call->rx_winsize = rxrpc_rx_window_size;
call              149 net/rxrpc/call_object.c 	call->tx_winsize = 16;
call              150 net/rxrpc/call_object.c 	call->rx_expect_next = 1;
call              152 net/rxrpc/call_object.c 	call->cong_cwnd = 2;
call              153 net/rxrpc/call_object.c 	call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
call              155 net/rxrpc/call_object.c 	call->rxnet = rxnet;
call              157 net/rxrpc/call_object.c 	return call;
call              160 net/rxrpc/call_object.c 	kfree(call->rxtx_buffer);
call              162 net/rxrpc/call_object.c 	kmem_cache_free(rxrpc_call_jar, call);
call              174 net/rxrpc/call_object.c 	struct rxrpc_call *call;
call              179 net/rxrpc/call_object.c 	call = rxrpc_alloc_call(rx, gfp, debug_id);
call              180 net/rxrpc/call_object.c 	if (!call)
call              182 net/rxrpc/call_object.c 	call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
call              183 net/rxrpc/call_object.c 	call->service_id = srx->srx_service;
call              184 net/rxrpc/call_object.c 	call->tx_phase = true;
call              186 net/rxrpc/call_object.c 	call->acks_latest_ts = now;
call              187 net/rxrpc/call_object.c 	call->cong_tstamp = now;
call              189 net/rxrpc/call_object.c 	_leave(" = %p", call);
call              190 net/rxrpc/call_object.c 	return call;
call              196 net/rxrpc/call_object.c static void rxrpc_start_call_timer(struct rxrpc_call *call)
call              201 net/rxrpc/call_object.c 	call->ack_at = j;
call              202 net/rxrpc/call_object.c 	call->ack_lost_at = j;
call              203 net/rxrpc/call_object.c 	call->resend_at = j;
call              204 net/rxrpc/call_object.c 	call->ping_at = j;
call              205 net/rxrpc/call_object.c 	call->expect_rx_by = j;
call              206 net/rxrpc/call_object.c 	call->expect_req_by = j;
call              207 net/rxrpc/call_object.c 	call->expect_term_by = j;
call              208 net/rxrpc/call_object.c 	call->timer.expires = now;
call              223 net/rxrpc/call_object.c 	__acquires(&call->user_mutex)
call              225 net/rxrpc/call_object.c 	struct rxrpc_call *call, *xcall;
call              233 net/rxrpc/call_object.c 	call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
call              234 net/rxrpc/call_object.c 	if (IS_ERR(call)) {
call              236 net/rxrpc/call_object.c 		_leave(" = %ld", PTR_ERR(call));
call              237 net/rxrpc/call_object.c 		return call;
call              240 net/rxrpc/call_object.c 	call->interruptibility = p->interruptibility;
call              241 net/rxrpc/call_object.c 	call->tx_total_len = p->tx_total_len;
call              242 net/rxrpc/call_object.c 	trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
call              243 net/rxrpc/call_object.c 			 atomic_read(&call->usage),
call              249 net/rxrpc/call_object.c 	mutex_lock(&call->user_mutex);
call              268 net/rxrpc/call_object.c 	rcu_assign_pointer(call->socket, rx);
call              269 net/rxrpc/call_object.c 	call->user_call_ID = p->user_call_ID;
call              270 net/rxrpc/call_object.c 	__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
call              271 net/rxrpc/call_object.c 	rxrpc_get_call(call, rxrpc_call_got_userid);
call              272 net/rxrpc/call_object.c 	rb_link_node(&call->sock_node, parent, pp);
call              273 net/rxrpc/call_object.c 	rb_insert_color(&call->sock_node, &rx->calls);
call              274 net/rxrpc/call_object.c 	list_add(&call->sock_link, &rx->sock_calls);
call              278 net/rxrpc/call_object.c 	rxnet = call->rxnet;
call              280 net/rxrpc/call_object.c 	list_add_tail(&call->link, &rxnet->calls);
call              289 net/rxrpc/call_object.c 	ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
call              293 net/rxrpc/call_object.c 	trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
call              294 net/rxrpc/call_object.c 			 atomic_read(&call->usage), here, NULL);
call              296 net/rxrpc/call_object.c 	rxrpc_start_call_timer(call);
call              298 net/rxrpc/call_object.c 	_net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
call              300 net/rxrpc/call_object.c 	_leave(" = %p [new]", call);
call              301 net/rxrpc/call_object.c 	return call;
call              314 net/rxrpc/call_object.c 	__rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
call              316 net/rxrpc/call_object.c 	trace_rxrpc_call(call->debug_id, rxrpc_call_error,
call              317 net/rxrpc/call_object.c 			 atomic_read(&call->usage), here, ERR_PTR(ret));
call              318 net/rxrpc/call_object.c 	rxrpc_release_call(rx, call);
call              319 net/rxrpc/call_object.c 	mutex_unlock(&call->user_mutex);
call              320 net/rxrpc/call_object.c 	rxrpc_put_call(call, rxrpc_call_put);
call              330 net/rxrpc/call_object.c 			 struct rxrpc_call *call,
call              333 net/rxrpc/call_object.c 	struct rxrpc_connection *conn = call->conn;
call              337 net/rxrpc/call_object.c 	_enter(",%d", call->conn->debug_id);
call              339 net/rxrpc/call_object.c 	rcu_assign_pointer(call->socket, rx);
call              340 net/rxrpc/call_object.c 	call->call_id		= sp->hdr.callNumber;
call              341 net/rxrpc/call_object.c 	call->service_id	= sp->hdr.serviceId;
call              342 net/rxrpc/call_object.c 	call->cid		= sp->hdr.cid;
call              343 net/rxrpc/call_object.c 	call->state		= RXRPC_CALL_SERVER_ACCEPTING;
call              345 net/rxrpc/call_object.c 		call->state	= RXRPC_CALL_SERVER_SECURING;
call              346 net/rxrpc/call_object.c 	call->cong_tstamp	= skb->tstamp;
call              355 net/rxrpc/call_object.c 	conn->channels[chan].call_counter = call->call_id;
call              356 net/rxrpc/call_object.c 	conn->channels[chan].call_id = call->call_id;
call              357 net/rxrpc/call_object.c 	rcu_assign_pointer(conn->channels[chan].call, call);
call              360 net/rxrpc/call_object.c 	hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
call              363 net/rxrpc/call_object.c 	_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
call              365 net/rxrpc/call_object.c 	rxrpc_start_call_timer(call);
call              372 net/rxrpc/call_object.c bool rxrpc_queue_call(struct rxrpc_call *call)
call              375 net/rxrpc/call_object.c 	int n = atomic_fetch_add_unless(&call->usage, 1, 0);
call              378 net/rxrpc/call_object.c 	if (rxrpc_queue_work(&call->processor))
call              379 net/rxrpc/call_object.c 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
call              382 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
call              389 net/rxrpc/call_object.c bool __rxrpc_queue_call(struct rxrpc_call *call)
call              392 net/rxrpc/call_object.c 	int n = atomic_read(&call->usage);
call              394 net/rxrpc/call_object.c 	if (rxrpc_queue_work(&call->processor))
call              395 net/rxrpc/call_object.c 		trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
call              398 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put_noqueue);
call              405 net/rxrpc/call_object.c void rxrpc_see_call(struct rxrpc_call *call)
call              408 net/rxrpc/call_object.c 	if (call) {
call              409 net/rxrpc/call_object.c 		int n = atomic_read(&call->usage);
call              411 net/rxrpc/call_object.c 		trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
call              419 net/rxrpc/call_object.c void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
call              422 net/rxrpc/call_object.c 	int n = atomic_inc_return(&call->usage);
call              424 net/rxrpc/call_object.c 	trace_rxrpc_call(call->debug_id, op, n, here, NULL);
call              430 net/rxrpc/call_object.c static void rxrpc_cleanup_ring(struct rxrpc_call *call)
call              435 net/rxrpc/call_object.c 		rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
call              436 net/rxrpc/call_object.c 		call->rxtx_buffer[i] = NULL;
call              443 net/rxrpc/call_object.c void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
call              446 net/rxrpc/call_object.c 	struct rxrpc_connection *conn = call->conn;
call              449 net/rxrpc/call_object.c 	_enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
call              451 net/rxrpc/call_object.c 	trace_rxrpc_call(call->debug_id, rxrpc_call_release,
call              452 net/rxrpc/call_object.c 			 atomic_read(&call->usage),
call              453 net/rxrpc/call_object.c 			 here, (const void *)call->flags);
call              455 net/rxrpc/call_object.c 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
call              457 net/rxrpc/call_object.c 	spin_lock_bh(&call->lock);
call              458 net/rxrpc/call_object.c 	if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
call              460 net/rxrpc/call_object.c 	spin_unlock_bh(&call->lock);
call              462 net/rxrpc/call_object.c 	del_timer_sync(&call->timer);
call              467 net/rxrpc/call_object.c 	if (!list_empty(&call->recvmsg_link)) {
call              469 net/rxrpc/call_object.c 		       call, call->events, call->flags);
call              470 net/rxrpc/call_object.c 		list_del(&call->recvmsg_link);
call              475 net/rxrpc/call_object.c 	call->recvmsg_link.next = NULL;
call              476 net/rxrpc/call_object.c 	call->recvmsg_link.prev = NULL;
call              480 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put);
call              484 net/rxrpc/call_object.c 	if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
call              485 net/rxrpc/call_object.c 		rb_erase(&call->sock_node, &rx->calls);
call              486 net/rxrpc/call_object.c 		memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
call              487 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put_userid);
call              490 net/rxrpc/call_object.c 	list_del(&call->sock_link);
call              493 net/rxrpc/call_object.c 	_debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
call              495 net/rxrpc/call_object.c 	if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
call              496 net/rxrpc/call_object.c 		rxrpc_disconnect_call(call);
call              497 net/rxrpc/call_object.c 	if (call->security)
call              498 net/rxrpc/call_object.c 		call->security->free_call_crypto(call);
call              500 net/rxrpc/call_object.c 	rxrpc_cleanup_ring(call);
call              509 net/rxrpc/call_object.c 	struct rxrpc_call *call;
call              514 net/rxrpc/call_object.c 		call = list_entry(rx->to_be_accepted.next,
call              516 net/rxrpc/call_object.c 		list_del(&call->accept_link);
call              517 net/rxrpc/call_object.c 		rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
call              518 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put);
call              522 net/rxrpc/call_object.c 		call = list_entry(rx->sock_calls.next,
call              524 net/rxrpc/call_object.c 		rxrpc_get_call(call, rxrpc_call_got);
call              525 net/rxrpc/call_object.c 		rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
call              526 net/rxrpc/call_object.c 		rxrpc_send_abort_packet(call);
call              527 net/rxrpc/call_object.c 		rxrpc_release_call(rx, call);
call              528 net/rxrpc/call_object.c 		rxrpc_put_call(call, rxrpc_call_put);
call              537 net/rxrpc/call_object.c void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
call              539 net/rxrpc/call_object.c 	struct rxrpc_net *rxnet = call->rxnet;
call              541 net/rxrpc/call_object.c 	unsigned int debug_id = call->debug_id;
call              544 net/rxrpc/call_object.c 	ASSERT(call != NULL);
call              546 net/rxrpc/call_object.c 	n = atomic_dec_return(&call->usage);
call              550 net/rxrpc/call_object.c 		_debug("call %d dead", call->debug_id);
call              551 net/rxrpc/call_object.c 		ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
call              553 net/rxrpc/call_object.c 		if (!list_empty(&call->link)) {
call              555 net/rxrpc/call_object.c 			list_del_init(&call->link);
call              559 net/rxrpc/call_object.c 		rxrpc_cleanup_call(call);
call              568 net/rxrpc/call_object.c 	struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
call              569 net/rxrpc/call_object.c 	struct rxrpc_net *rxnet = call->rxnet;
call              571 net/rxrpc/call_object.c 	rxrpc_put_connection(call->conn);
call              572 net/rxrpc/call_object.c 	rxrpc_put_peer(call->peer);
call              573 net/rxrpc/call_object.c 	kfree(call->rxtx_buffer);
call              574 net/rxrpc/call_object.c 	kfree(call->rxtx_annotations);
call              575 net/rxrpc/call_object.c 	kmem_cache_free(rxrpc_call_jar, call);
call              585 net/rxrpc/call_object.c 	struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
call              588 net/rxrpc/call_object.c 		INIT_WORK(&call->processor, rxrpc_destroy_call);
call              589 net/rxrpc/call_object.c 		if (!rxrpc_queue_work(&call->processor))
call              592 net/rxrpc/call_object.c 		rxrpc_destroy_call(&call->processor);
call              599 net/rxrpc/call_object.c void rxrpc_cleanup_call(struct rxrpc_call *call)
call              601 net/rxrpc/call_object.c 	_net("DESTROY CALL %d", call->debug_id);
call              603 net/rxrpc/call_object.c 	memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
call              605 net/rxrpc/call_object.c 	del_timer_sync(&call->timer);
call              607 net/rxrpc/call_object.c 	ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
call              608 net/rxrpc/call_object.c 	ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
call              610 net/rxrpc/call_object.c 	rxrpc_cleanup_ring(call);
call              611 net/rxrpc/call_object.c 	rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
call              613 net/rxrpc/call_object.c 	call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
call              623 net/rxrpc/call_object.c 	struct rxrpc_call *call;
call              631 net/rxrpc/call_object.c 			call = list_entry(rxnet->calls.next,
call              633 net/rxrpc/call_object.c 			_debug("Zapping call %p", call);
call              635 net/rxrpc/call_object.c 			rxrpc_see_call(call);
call              636 net/rxrpc/call_object.c 			list_del_init(&call->link);
call              639 net/rxrpc/call_object.c 			       call, atomic_read(&call->usage),
call              640 net/rxrpc/call_object.c 			       rxrpc_call_states[call->state],
call              641 net/rxrpc/call_object.c 			       call->flags, call->events);
call              276 net/rxrpc/conn_client.c 				 struct rxrpc_call *call,
call              287 net/rxrpc/conn_client.c 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
call              293 net/rxrpc/conn_client.c 	call->cong_cwnd = cp->peer->cong_cwnd;
call              294 net/rxrpc/conn_client.c 	if (call->cong_cwnd >= call->cong_ssthresh)
call              295 net/rxrpc/conn_client.c 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call              297 net/rxrpc/conn_client.c 		call->cong_mode = RXRPC_CALL_SLOW_START;
call              352 net/rxrpc/conn_client.c 	list_add(&call->chan_wait_link, &candidate->waiting_calls);
call              355 net/rxrpc/conn_client.c 		call->conn = candidate;
call              356 net/rxrpc/conn_client.c 		call->security = candidate->security;
call              357 net/rxrpc/conn_client.c 		call->security_ix = candidate->security_ix;
call              358 net/rxrpc/conn_client.c 		call->service_id = candidate->service_id;
call              407 net/rxrpc/conn_client.c 	call->conn = candidate;
call              408 net/rxrpc/conn_client.c 	call->security = candidate->security;
call              409 net/rxrpc/conn_client.c 	call->security_ix = candidate->security_ix;
call              410 net/rxrpc/conn_client.c 	call->service_id = candidate->service_id;
call              430 net/rxrpc/conn_client.c 	call->conn = conn;
call              431 net/rxrpc/conn_client.c 	call->security = conn->security;
call              432 net/rxrpc/conn_client.c 	call->security_ix = conn->security_ix;
call              433 net/rxrpc/conn_client.c 	call->service_id = conn->service_id;
call              434 net/rxrpc/conn_client.c 	list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
call              539 net/rxrpc/conn_client.c 	rcu_assign_pointer(chan->call, NULL);
call              552 net/rxrpc/conn_client.c 	struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
call              563 net/rxrpc/conn_client.c 	write_lock_bh(&call->state_lock);
call              564 net/rxrpc/conn_client.c 	call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
call              565 net/rxrpc/conn_client.c 	write_unlock_bh(&call->state_lock);
call              567 net/rxrpc/conn_client.c 	rxrpc_see_call(call);
call              568 net/rxrpc/conn_client.c 	list_del_init(&call->chan_wait_link);
call              570 net/rxrpc/conn_client.c 	call->peer	= rxrpc_get_peer(conn->params.peer);
call              571 net/rxrpc/conn_client.c 	call->cid	= conn->proto.cid | channel;
call              572 net/rxrpc/conn_client.c 	call->call_id	= call_id;
call              574 net/rxrpc/conn_client.c 	trace_rxrpc_connect_call(call);
call              576 net/rxrpc/conn_client.c 	     call->cid, call->call_id, call->debug_id, conn->debug_id);
call              590 net/rxrpc/conn_client.c 	chan->call_debug_id = call->debug_id;
call              591 net/rxrpc/conn_client.c 	rcu_assign_pointer(chan->call, call);
call              592 net/rxrpc/conn_client.c 	wake_up(&call->waitq);
call              642 net/rxrpc/conn_client.c static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
call              646 net/rxrpc/conn_client.c 	_enter("%d", call->debug_id);
call              648 net/rxrpc/conn_client.c 	if (!call->call_id) {
call              656 net/rxrpc/conn_client.c 		add_wait_queue_exclusive(&call->waitq, &myself);
call              658 net/rxrpc/conn_client.c 			switch (call->interruptibility) {
call              668 net/rxrpc/conn_client.c 			if (call->call_id)
call              670 net/rxrpc/conn_client.c 			if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
call              671 net/rxrpc/conn_client.c 			     call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
call              678 net/rxrpc/conn_client.c 		remove_wait_queue(&call->waitq, &myself);
call              695 net/rxrpc/conn_client.c 		       struct rxrpc_call *call,
call              703 net/rxrpc/conn_client.c 	_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
call              708 net/rxrpc/conn_client.c 	ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
call              712 net/rxrpc/conn_client.c 	rxrpc_animate_client_conn(rxnet, call->conn);
call              713 net/rxrpc/conn_client.c 	rxrpc_activate_channels(call->conn);
call              715 net/rxrpc/conn_client.c 	ret = rxrpc_wait_for_channel(call, gfp);
call              717 net/rxrpc/conn_client.c 		trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
call              718 net/rxrpc/conn_client.c 		rxrpc_disconnect_client_call(call);
call              722 net/rxrpc/conn_client.c 	spin_lock_bh(&call->conn->params.peer->lock);
call              723 net/rxrpc/conn_client.c 	hlist_add_head_rcu(&call->error_link,
call              724 net/rxrpc/conn_client.c 			   &call->conn->params.peer->error_targets);
call              725 net/rxrpc/conn_client.c 	spin_unlock_bh(&call->conn->params.peer->lock);
call              752 net/rxrpc/conn_client.c void rxrpc_expose_client_call(struct rxrpc_call *call)
call              754 net/rxrpc/conn_client.c 	unsigned int channel = call->cid & RXRPC_CHANNELMASK;
call              755 net/rxrpc/conn_client.c 	struct rxrpc_connection *conn = call->conn;
call              758 net/rxrpc/conn_client.c 	if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
call              786 net/rxrpc/conn_client.c void rxrpc_disconnect_client_call(struct rxrpc_call *call)
call              788 net/rxrpc/conn_client.c 	struct rxrpc_connection *conn = call->conn;
call              795 net/rxrpc/conn_client.c 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
call              797 net/rxrpc/conn_client.c 	cid = call->cid;
call              808 net/rxrpc/conn_client.c 	if (!list_empty(&call->chan_wait_link)) {
call              810 net/rxrpc/conn_client.c 		ASSERTCMP(call->call_id, ==, 0);
call              811 net/rxrpc/conn_client.c 		ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
call              812 net/rxrpc/conn_client.c 		list_del_init(&call->chan_wait_link);
call              827 net/rxrpc/conn_client.c 	if (rcu_access_pointer(chan->call) != call) {
call              841 net/rxrpc/conn_client.c 	if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
call              842 net/rxrpc/conn_client.c 		_debug("exposed %u,%u", call->call_id, call->abort_code);
call              843 net/rxrpc/conn_client.c 		__rxrpc_disconnect_call(conn, call);
call              858 net/rxrpc/conn_client.c 	if (call->completion == RXRPC_CALL_SUCCEEDED &&
call              859 net/rxrpc/conn_client.c 	    test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
call              155 net/rxrpc/conn_event.c 	struct rxrpc_call *call;
call              163 net/rxrpc/conn_event.c 		call = rcu_dereference_protected(
call              164 net/rxrpc/conn_event.c 			conn->channels[i].call,
call              166 net/rxrpc/conn_event.c 		if (call) {
call              168 net/rxrpc/conn_event.c 				trace_rxrpc_abort(call->debug_id,
call              169 net/rxrpc/conn_event.c 						  "CON", call->cid,
call              170 net/rxrpc/conn_event.c 						  call->call_id, 0,
call              174 net/rxrpc/conn_event.c 				trace_rxrpc_rx_abort(call, serial,
call              176 net/rxrpc/conn_event.c 			if (rxrpc_set_call_completion(call, compl,
call              179 net/rxrpc/conn_event.c 				rxrpc_notify_socket(call);
call              267 net/rxrpc/conn_event.c static void rxrpc_call_is_secure(struct rxrpc_call *call)
call              269 net/rxrpc/conn_event.c 	_enter("%p", call);
call              270 net/rxrpc/conn_event.c 	if (call) {
call              271 net/rxrpc/conn_event.c 		write_lock_bh(&call->state_lock);
call              272 net/rxrpc/conn_event.c 		if (call->state == RXRPC_CALL_SERVER_SECURING) {
call              273 net/rxrpc/conn_event.c 			call->state = RXRPC_CALL_SERVER_ACCEPTING;
call              274 net/rxrpc/conn_event.c 			rxrpc_notify_socket(call);
call              276 net/rxrpc/conn_event.c 		write_unlock_bh(&call->state_lock);
call              352 net/rxrpc/conn_event.c 						conn->channels[loop].call,
call              167 net/rxrpc/conn_object.c 			     struct rxrpc_call *call)
call              170 net/rxrpc/conn_object.c 		&conn->channels[call->cid & RXRPC_CHANNELMASK];
call              172 net/rxrpc/conn_object.c 	_enter("%d,%x", conn->debug_id, call->cid);
call              174 net/rxrpc/conn_object.c 	if (rcu_access_pointer(chan->call) == call) {
call              178 net/rxrpc/conn_object.c 		trace_rxrpc_disconnect_call(call);
call              179 net/rxrpc/conn_object.c 		switch (call->completion) {
call              181 net/rxrpc/conn_object.c 			chan->last_seq = call->rx_hard_ack;
call              185 net/rxrpc/conn_object.c 			chan->last_abort = call->abort_code;
call              199 net/rxrpc/conn_object.c 		rcu_assign_pointer(chan->call, NULL);
call              209 net/rxrpc/conn_object.c void rxrpc_disconnect_call(struct rxrpc_call *call)
call              211 net/rxrpc/conn_object.c 	struct rxrpc_connection *conn = call->conn;
call              213 net/rxrpc/conn_object.c 	call->peer->cong_cwnd = call->cong_cwnd;
call              216 net/rxrpc/conn_object.c 	hlist_del_rcu(&call->error_link);
call              219 net/rxrpc/conn_object.c 	if (rxrpc_is_client_call(call))
call              220 net/rxrpc/conn_object.c 		return rxrpc_disconnect_client_call(call);
call              223 net/rxrpc/conn_object.c 	__rxrpc_disconnect_call(conn, call);
call              226 net/rxrpc/conn_object.c 	set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
call              237 net/rxrpc/conn_object.c 	ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
call              238 net/rxrpc/conn_object.c 	       !rcu_access_pointer(conn->channels[1].call) &&
call              239 net/rxrpc/conn_object.c 	       !rcu_access_pointer(conn->channels[2].call) &&
call              240 net/rxrpc/conn_object.c 	       !rcu_access_pointer(conn->channels[3].call));
call               27 net/rxrpc/input.c 			      struct rxrpc_call *call, rxrpc_seq_t seq)
call               29 net/rxrpc/input.c 	if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
call               30 net/rxrpc/input.c 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
call               31 net/rxrpc/input.c 		rxrpc_queue_call(call);
call               38 net/rxrpc/input.c static void rxrpc_congestion_management(struct rxrpc_call *call,
call               44 net/rxrpc/input.c 	unsigned int cumulative_acks = call->cong_cumul_acks;
call               45 net/rxrpc/input.c 	unsigned int cwnd = call->cong_cwnd;
call               49 net/rxrpc/input.c 		(call->tx_top - call->tx_hard_ack) - summary->nr_acks;
call               51 net/rxrpc/input.c 	if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
call               53 net/rxrpc/input.c 		call->cong_ssthresh = max_t(unsigned int,
call               56 net/rxrpc/input.c 		if (cwnd >= call->cong_ssthresh &&
call               57 net/rxrpc/input.c 		    call->cong_mode == RXRPC_CALL_SLOW_START) {
call               58 net/rxrpc/input.c 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call               59 net/rxrpc/input.c 			call->cong_tstamp = skb->tstamp;
call               69 net/rxrpc/input.c 	summary->mode = call->cong_mode;
call               70 net/rxrpc/input.c 	summary->cwnd = call->cong_cwnd;
call               71 net/rxrpc/input.c 	summary->ssthresh = call->cong_ssthresh;
call               73 net/rxrpc/input.c 	summary->dup_acks = call->cong_dup_acks;
call               75 net/rxrpc/input.c 	switch (call->cong_mode) {
call               81 net/rxrpc/input.c 		if (cwnd >= call->cong_ssthresh) {
call               82 net/rxrpc/input.c 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call               83 net/rxrpc/input.c 			call->cong_tstamp = skb->tstamp;
call               94 net/rxrpc/input.c 		if (call->peer->rtt_count == 0)
call               97 net/rxrpc/input.c 				 ktime_add_us(call->cong_tstamp,
call               98 net/rxrpc/input.c 					      call->peer->srtt_us >> 3)))
call              101 net/rxrpc/input.c 		call->cong_tstamp = skb->tstamp;
call              112 net/rxrpc/input.c 			call->cong_dup_acks = 1;
call              113 net/rxrpc/input.c 			if (call->cong_extra > 1)
call              114 net/rxrpc/input.c 				call->cong_extra = 1;
call              118 net/rxrpc/input.c 		call->cong_dup_acks++;
call              119 net/rxrpc/input.c 		if (call->cong_dup_acks < 3)
call              123 net/rxrpc/input.c 		call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
call              124 net/rxrpc/input.c 		call->cong_ssthresh = max_t(unsigned int,
call              126 net/rxrpc/input.c 		cwnd = call->cong_ssthresh + 3;
call              127 net/rxrpc/input.c 		call->cong_extra = 0;
call              128 net/rxrpc/input.c 		call->cong_dup_acks = 0;
call              136 net/rxrpc/input.c 			call->cong_dup_acks++;
call              137 net/rxrpc/input.c 			if (call->cong_dup_acks == 2) {
call              139 net/rxrpc/input.c 				call->cong_dup_acks = 0;
call              144 net/rxrpc/input.c 			cwnd = call->cong_ssthresh;
call              157 net/rxrpc/input.c 	call->cong_dup_acks = 0;
call              158 net/rxrpc/input.c 	call->cong_extra = 0;
call              159 net/rxrpc/input.c 	call->cong_tstamp = skb->tstamp;
call              160 net/rxrpc/input.c 	if (cwnd < call->cong_ssthresh)
call              161 net/rxrpc/input.c 		call->cong_mode = RXRPC_CALL_SLOW_START;
call              163 net/rxrpc/input.c 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
call              169 net/rxrpc/input.c 	call->cong_cwnd = cwnd;
call              170 net/rxrpc/input.c 	call->cong_cumul_acks = cumulative_acks;
call              171 net/rxrpc/input.c 	trace_rxrpc_congest(call, summary, acked_serial, change);
call              172 net/rxrpc/input.c 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
call              173 net/rxrpc/input.c 		rxrpc_queue_call(call);
call              178 net/rxrpc/input.c 	call->cong_mode = RXRPC_CALL_PACKET_LOSS;
call              179 net/rxrpc/input.c 	call->cong_dup_acks = 0;
call              186 net/rxrpc/input.c 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
call              188 net/rxrpc/input.c 	    summary->nr_acks != call->tx_top - call->tx_hard_ack) {
call              189 net/rxrpc/input.c 		call->cong_extra++;
call              190 net/rxrpc/input.c 		wake_up(&call->waitq);
call              198 net/rxrpc/input.c static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
call              206 net/rxrpc/input.c 	if (call->acks_lowest_nak == call->tx_hard_ack) {
call              207 net/rxrpc/input.c 		call->acks_lowest_nak = to;
call              208 net/rxrpc/input.c 	} else if (before_eq(call->acks_lowest_nak, to)) {
call              210 net/rxrpc/input.c 		call->acks_lowest_nak = to;
call              213 net/rxrpc/input.c 	spin_lock(&call->lock);
call              215 net/rxrpc/input.c 	while (before(call->tx_hard_ack, to)) {
call              216 net/rxrpc/input.c 		call->tx_hard_ack++;
call              217 net/rxrpc/input.c 		ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
call              218 net/rxrpc/input.c 		skb = call->rxtx_buffer[ix];
call              219 net/rxrpc/input.c 		annotation = call->rxtx_annotations[ix];
call              221 net/rxrpc/input.c 		call->rxtx_buffer[ix] = NULL;
call              222 net/rxrpc/input.c 		call->rxtx_annotations[ix] = 0;
call              227 net/rxrpc/input.c 			set_bit(RXRPC_CALL_TX_LAST, &call->flags);
call              234 net/rxrpc/input.c 	spin_unlock(&call->lock);
call              236 net/rxrpc/input.c 	trace_rxrpc_transmit(call, (rot_last ?
call              239 net/rxrpc/input.c 	wake_up(&call->waitq);
call              257 net/rxrpc/input.c static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
call              262 net/rxrpc/input.c 	ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
call              264 net/rxrpc/input.c 	write_lock(&call->state_lock);
call              266 net/rxrpc/input.c 	state = call->state;
call              271 net/rxrpc/input.c 			call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
call              273 net/rxrpc/input.c 			call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
call              277 net/rxrpc/input.c 		__rxrpc_call_completed(call);
call              278 net/rxrpc/input.c 		rxrpc_notify_socket(call);
call              279 net/rxrpc/input.c 		state = call->state;
call              286 net/rxrpc/input.c 	write_unlock(&call->state_lock);
call              288 net/rxrpc/input.c 		trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
call              290 net/rxrpc/input.c 		trace_rxrpc_transmit(call, rxrpc_transmit_end);
call              295 net/rxrpc/input.c 	write_unlock(&call->state_lock);
call              296 net/rxrpc/input.c 	kdebug("end_tx %s", rxrpc_call_states[call->state]);
call              297 net/rxrpc/input.c 	rxrpc_proto_abort(abort_why, call, call->tx_top);
call              304 net/rxrpc/input.c static bool rxrpc_receiving_reply(struct rxrpc_call *call)
call              308 net/rxrpc/input.c 	rxrpc_seq_t top = READ_ONCE(call->tx_top);
call              310 net/rxrpc/input.c 	if (call->ackr_reason) {
call              311 net/rxrpc/input.c 		spin_lock_bh(&call->lock);
call              312 net/rxrpc/input.c 		call->ackr_reason = 0;
call              313 net/rxrpc/input.c 		spin_unlock_bh(&call->lock);
call              316 net/rxrpc/input.c 		WRITE_ONCE(call->resend_at, timo);
call              317 net/rxrpc/input.c 		WRITE_ONCE(call->ack_at, timo);
call              318 net/rxrpc/input.c 		trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
call              321 net/rxrpc/input.c 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
call              322 net/rxrpc/input.c 		if (!rxrpc_rotate_tx_window(call, top, &summary)) {
call              323 net/rxrpc/input.c 			rxrpc_proto_abort("TXL", call, top);
call              327 net/rxrpc/input.c 	if (!rxrpc_end_tx_phase(call, true, "ETD"))
call              329 net/rxrpc/input.c 	call->tx_phase = false;
call              391 net/rxrpc/input.c static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
call              403 net/rxrpc/input.c 		call->nr_jumbo_bad++;
call              412 net/rxrpc/input.c static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
call              423 net/rxrpc/input.c 	       call->rx_hard_ack, call->rx_top, skb->len, seq0);
call              428 net/rxrpc/input.c 	state = READ_ONCE(call->state);
call              434 net/rxrpc/input.c 	if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
call              435 net/rxrpc/input.c 		unsigned long timo = READ_ONCE(call->next_req_timo);
call              441 net/rxrpc/input.c 			WRITE_ONCE(call->expect_req_by, expect_req_by);
call              442 net/rxrpc/input.c 			rxrpc_reduce_call_timer(call, expect_req_by, now,
call              447 net/rxrpc/input.c 	spin_lock(&call->input_lock);
call              454 net/rxrpc/input.c 	    !rxrpc_receiving_reply(call))
call              457 net/rxrpc/input.c 	call->ackr_prev_seq = seq0;
call              458 net/rxrpc/input.c 	hard_ack = READ_ONCE(call->rx_hard_ack);
call              462 net/rxrpc/input.c 		if (call->nr_jumbo_bad > 3) {
call              481 net/rxrpc/input.c 			if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
call              482 net/rxrpc/input.c 			    seq != call->rx_top) {
call              483 net/rxrpc/input.c 				rxrpc_proto_abort("LSN", call, seq);
call              487 net/rxrpc/input.c 			if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
call              488 net/rxrpc/input.c 			    after_eq(seq, call->rx_top)) {
call              489 net/rxrpc/input.c 				rxrpc_proto_abort("LSA", call, seq);
call              501 net/rxrpc/input.c 		trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
call              509 net/rxrpc/input.c 		if (call->rxtx_buffer[ix]) {
call              510 net/rxrpc/input.c 			rxrpc_input_dup_data(call, seq, nr_subpackets > 1,
call              520 net/rxrpc/input.c 		if (after(seq, hard_ack + call->rx_winsize)) {
call              525 net/rxrpc/input.c 					call->nr_jumbo_bad++;
call              548 net/rxrpc/input.c 		call->rxtx_annotations[ix] = annotation;
call              550 net/rxrpc/input.c 		call->rxtx_buffer[ix] = skb;
call              551 net/rxrpc/input.c 		if (after(seq, call->rx_top)) {
call              552 net/rxrpc/input.c 			smp_store_release(&call->rx_top, seq);
call              553 net/rxrpc/input.c 		} else if (before(seq, call->rx_top)) {
call              572 net/rxrpc/input.c 			set_bit(RXRPC_CALL_RX_LAST, &call->flags);
call              577 net/rxrpc/input.c 			trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
call              579 net/rxrpc/input.c 			trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
call              582 net/rxrpc/input.c 		if (after_eq(seq, call->rx_expect_next)) {
call              583 net/rxrpc/input.c 			if (after(seq, call->rx_expect_next)) {
call              584 net/rxrpc/input.c 				_net("OOS %u > %u", seq, call->rx_expect_next);
call              588 net/rxrpc/input.c 			call->rx_expect_next = seq + 1;
call              594 net/rxrpc/input.c 		rxrpc_propose_ACK(call, ack, ack_serial,
call              598 net/rxrpc/input.c 		rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
call              602 net/rxrpc/input.c 	trace_rxrpc_notify_socket(call->debug_id, serial);
call              603 net/rxrpc/input.c 	rxrpc_notify_socket(call);
call              606 net/rxrpc/input.c 	spin_unlock(&call->input_lock);
call              614 net/rxrpc/input.c static void rxrpc_input_requested_ack(struct rxrpc_call *call,
call              625 net/rxrpc/input.c 		skb = call->rxtx_buffer[ix];
call              640 net/rxrpc/input.c 	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
call              651 net/rxrpc/input.c static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
call              656 net/rxrpc/input.c 	spin_lock_bh(&call->lock);
call              658 net/rxrpc/input.c 	bottom = call->tx_hard_ack + 1;
call              659 net/rxrpc/input.c 	top = call->acks_lost_top;
call              663 net/rxrpc/input.c 			u8 annotation = call->rxtx_annotations[ix];
call              670 net/rxrpc/input.c 			call->rxtx_annotations[ix] = annotation;
call              675 net/rxrpc/input.c 	spin_unlock_bh(&call->lock);
call              677 net/rxrpc/input.c 	if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
call              678 net/rxrpc/input.c 		rxrpc_queue_call(call);
call              684 net/rxrpc/input.c static void rxrpc_input_ping_response(struct rxrpc_call *call,
call              692 net/rxrpc/input.c 	ping_time = call->ping_time;
call              694 net/rxrpc/input.c 	ping_serial = READ_ONCE(call->ping_serial);
call              696 net/rxrpc/input.c 	if (orig_serial == call->acks_lost_ping)
call              697 net/rxrpc/input.c 		rxrpc_input_check_for_lost_ack(call);
call              700 net/rxrpc/input.c 	    !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
call              705 net/rxrpc/input.c 	rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
call              712 net/rxrpc/input.c static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
call              726 net/rxrpc/input.c 	if (call->tx_winsize != rwind) {
call              729 net/rxrpc/input.c 		if (rwind > call->tx_winsize)
call              731 net/rxrpc/input.c 		trace_rxrpc_rx_rwind_change(call, sp->hdr.serial,
call              733 net/rxrpc/input.c 		call->tx_winsize = rwind;
call              736 net/rxrpc/input.c 	if (call->cong_ssthresh > rwind)
call              737 net/rxrpc/input.c 		call->cong_ssthresh = rwind;
call              741 net/rxrpc/input.c 	peer = call->peer;
call              751 net/rxrpc/input.c 		wake_up(&call->waitq);
call              763 net/rxrpc/input.c static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
call              772 net/rxrpc/input.c 		annotation = call->rxtx_annotations[ix];
call              781 net/rxrpc/input.c 			call->rxtx_annotations[ix] =
call              786 net/rxrpc/input.c 			    call->acks_lowest_nak != seq) {
call              787 net/rxrpc/input.c 				call->acks_lowest_nak = seq;
call              796 net/rxrpc/input.c 			call->rxtx_annotations[ix] =
call              800 net/rxrpc/input.c 			return rxrpc_proto_abort("SFT", call, 0);
call              809 net/rxrpc/input.c static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
call              812 net/rxrpc/input.c 	rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
call              820 net/rxrpc/input.c 	if (after_eq(prev_pkt, call->ackr_prev_seq))
call              824 net/rxrpc/input.c 	if (after_eq(prev_pkt, base + call->tx_winsize))
call              839 net/rxrpc/input.c static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
call              857 net/rxrpc/input.c 		return rxrpc_proto_abort("XAK", call, 0);
call              869 net/rxrpc/input.c 	trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
call              874 net/rxrpc/input.c 		rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
call              877 net/rxrpc/input.c 		rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
call              882 net/rxrpc/input.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
call              886 net/rxrpc/input.c 		rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
call              892 net/rxrpc/input.c 	if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
call              893 net/rxrpc/input.c 		trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
call              894 net/rxrpc/input.c 					   first_soft_ack, call->ackr_first_seq,
call              895 net/rxrpc/input.c 					   prev_pkt, call->ackr_prev_seq);
call              903 net/rxrpc/input.c 		return rxrpc_proto_abort("XAI", call, 0);
call              905 net/rxrpc/input.c 	spin_lock(&call->input_lock);
call              908 net/rxrpc/input.c 	if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
call              909 net/rxrpc/input.c 		trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
call              910 net/rxrpc/input.c 					   first_soft_ack, call->ackr_first_seq,
call              911 net/rxrpc/input.c 					   prev_pkt, call->ackr_prev_seq);
call              914 net/rxrpc/input.c 	call->acks_latest_ts = skb->tstamp;
call              916 net/rxrpc/input.c 	call->ackr_first_seq = first_soft_ack;
call              917 net/rxrpc/input.c 	call->ackr_prev_seq = prev_pkt;
call              921 net/rxrpc/input.c 		rxrpc_input_ackinfo(call, skb, &buf.info);
call              924 net/rxrpc/input.c 		rxrpc_proto_abort("AK0", call, 0);
call              929 net/rxrpc/input.c 	switch (READ_ONCE(call->state)) {
call              939 net/rxrpc/input.c 	if (before(hard_ack, call->tx_hard_ack) ||
call              940 net/rxrpc/input.c 	    after(hard_ack, call->tx_top)) {
call              941 net/rxrpc/input.c 		rxrpc_proto_abort("AKW", call, 0);
call              944 net/rxrpc/input.c 	if (nr_acks > call->tx_top - hard_ack) {
call              945 net/rxrpc/input.c 		rxrpc_proto_abort("AKN", call, 0);
call              949 net/rxrpc/input.c 	if (after(hard_ack, call->tx_hard_ack)) {
call              950 net/rxrpc/input.c 		if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
call              951 net/rxrpc/input.c 			rxrpc_end_tx_phase(call, false, "ETA");
call              958 net/rxrpc/input.c 			rxrpc_proto_abort("XSA", call, 0);
call              961 net/rxrpc/input.c 		rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
call              965 net/rxrpc/input.c 	if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
call              967 net/rxrpc/input.c 	    summary.nr_acks == call->tx_top - hard_ack &&
call              968 net/rxrpc/input.c 	    rxrpc_is_client_call(call))
call              969 net/rxrpc/input.c 		rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
call              973 net/rxrpc/input.c 	rxrpc_congestion_management(call, skb, &summary, acked_serial);
call              975 net/rxrpc/input.c 	spin_unlock(&call->input_lock);
call              981 net/rxrpc/input.c static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
call              988 net/rxrpc/input.c 	spin_lock(&call->input_lock);
call              990 net/rxrpc/input.c 	if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
call              991 net/rxrpc/input.c 		rxrpc_end_tx_phase(call, false, "ETL");
call              993 net/rxrpc/input.c 	spin_unlock(&call->input_lock);
call              999 net/rxrpc/input.c static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
call             1012 net/rxrpc/input.c 	trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
call             1016 net/rxrpc/input.c 	if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
call             1018 net/rxrpc/input.c 		rxrpc_notify_socket(call);
call             1024 net/rxrpc/input.c static void rxrpc_input_call_packet(struct rxrpc_call *call,
call             1030 net/rxrpc/input.c 	_enter("%p,%p", call, skb);
call             1032 net/rxrpc/input.c 	timo = READ_ONCE(call->next_rx_timo);
call             1037 net/rxrpc/input.c 		WRITE_ONCE(call->expect_rx_by, expect_rx_by);
call             1038 net/rxrpc/input.c 		rxrpc_reduce_call_timer(call, expect_rx_by, now,
call             1044 net/rxrpc/input.c 		rxrpc_input_data(call, skb);
call             1048 net/rxrpc/input.c 		rxrpc_input_ack(call, skb);
call             1061 net/rxrpc/input.c 		rxrpc_input_abort(call, skb);
call             1065 net/rxrpc/input.c 		rxrpc_input_ackall(call, skb);
call             1085 net/rxrpc/input.c 					  struct rxrpc_call *call)
call             1087 net/rxrpc/input.c 	switch (READ_ONCE(call->state)) {
call             1089 net/rxrpc/input.c 		rxrpc_call_completed(call);
call             1094 net/rxrpc/input.c 		if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
call             1095 net/rxrpc/input.c 			set_bit(RXRPC_CALL_EV_ABORT, &call->events);
call             1096 net/rxrpc/input.c 			rxrpc_queue_call(call);
call             1098 net/rxrpc/input.c 		trace_rxrpc_improper_term(call);
call             1103 net/rxrpc/input.c 	__rxrpc_disconnect_call(conn, call);
call             1105 net/rxrpc/input.c 	rxrpc_notify_socket(call);
call             1199 net/rxrpc/input.c 	struct rxrpc_call *call = NULL;
call             1360 net/rxrpc/input.c 			if (chan->call ||
call             1383 net/rxrpc/input.c 		call = rcu_dereference(chan->call);
call             1388 net/rxrpc/input.c 			if (call)
call             1389 net/rxrpc/input.c 				rxrpc_input_implicit_end_call(rx, conn, call);
call             1390 net/rxrpc/input.c 			call = NULL;
call             1393 net/rxrpc/input.c 		if (call) {
call             1394 net/rxrpc/input.c 			if (sp->hdr.serviceId != call->service_id)
call             1395 net/rxrpc/input.c 				call->service_id = sp->hdr.serviceId;
call             1396 net/rxrpc/input.c 			if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
call             1397 net/rxrpc/input.c 				call->rx_serial = sp->hdr.serial;
call             1398 net/rxrpc/input.c 			if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
call             1399 net/rxrpc/input.c 				set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
call             1403 net/rxrpc/input.c 	if (!call || atomic_read(&call->usage) == 0) {
call             1409 net/rxrpc/input.c 		call = rxrpc_new_incoming_call(local, rx, skb);
call             1410 net/rxrpc/input.c 		if (!call)
call             1417 net/rxrpc/input.c 	rxrpc_input_call_packet(call, skb);
call               21 net/rxrpc/insecure.c static int none_secure_packet(struct rxrpc_call *call,
call               29 net/rxrpc/insecure.c static int none_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call               36 net/rxrpc/insecure.c static void none_free_call_crypto(struct rxrpc_call *call)
call               40 net/rxrpc/insecure.c static void none_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
call               36 net/rxrpc/output.c static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
call               39 net/rxrpc/output.c 		u16 tx_backoff = READ_ONCE(call->tx_backoff);
call               42 net/rxrpc/output.c 			WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
call               44 net/rxrpc/output.c 		WRITE_ONCE(call->tx_backoff, 0);
call               56 net/rxrpc/output.c static void rxrpc_set_keepalive(struct rxrpc_call *call)
call               58 net/rxrpc/output.c 	unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
call               61 net/rxrpc/output.c 	WRITE_ONCE(call->keepalive_at, keepalive_at);
call               62 net/rxrpc/output.c 	rxrpc_reduce_call_timer(call, keepalive_at, now,
call               70 net/rxrpc/output.c 				 struct rxrpc_call *call,
call               83 net/rxrpc/output.c 	serial = call->ackr_serial;
call               84 net/rxrpc/output.c 	hard_ack = READ_ONCE(call->rx_hard_ack);
call               85 net/rxrpc/output.c 	top = smp_load_acquire(&call->rx_top);
call               92 net/rxrpc/output.c 	pkt->ack.previousPacket	= htonl(call->ackr_prev_seq);
call              104 net/rxrpc/output.c 			if (call->rxtx_buffer[ix])
call              114 net/rxrpc/output.c 	jmax = (call->nr_jumbo_bad > 3) ? 1 : rxrpc_rx_jumbo_max;
call              117 net/rxrpc/output.c 	pkt->ackinfo.rwind	= htonl(call->rx_winsize);
call              129 net/rxrpc/output.c int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
call              142 net/rxrpc/output.c 	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
call              149 net/rxrpc/output.c 	conn = call->conn;
call              151 net/rxrpc/output.c 	msg.msg_name	= &call->peer->srx.transport;
call              152 net/rxrpc/output.c 	msg.msg_namelen	= call->peer->srx.transport_len;
call              158 net/rxrpc/output.c 	pkt->whdr.cid		= htonl(call->cid);
call              159 net/rxrpc/output.c 	pkt->whdr.callNumber	= htonl(call->call_id);
call              164 net/rxrpc/output.c 	pkt->whdr.securityIndex	= call->security_ix;
call              166 net/rxrpc/output.c 	pkt->whdr.serviceId	= htons(call->service_id);
call              168 net/rxrpc/output.c 	spin_lock_bh(&call->lock);
call              172 net/rxrpc/output.c 		reason = call->ackr_reason;
call              173 net/rxrpc/output.c 		if (!call->ackr_reason) {
call              174 net/rxrpc/output.c 			spin_unlock_bh(&call->lock);
call              178 net/rxrpc/output.c 		call->ackr_reason = 0;
call              180 net/rxrpc/output.c 	n = rxrpc_fill_out_ack(conn, call, pkt, &hard_ack, &top, reason);
call              182 net/rxrpc/output.c 	spin_unlock_bh(&call->lock);
call              192 net/rxrpc/output.c 	trace_rxrpc_tx_ack(call->debug_id, serial,
call              200 net/rxrpc/output.c 		call->ping_serial = serial;
call              207 net/rxrpc/output.c 		call->ping_time = ktime_get_real();
call              208 net/rxrpc/output.c 		set_bit(RXRPC_CALL_PINGING, &call->flags);
call              209 net/rxrpc/output.c 		trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_ping, serial);
call              215 net/rxrpc/output.c 		trace_rxrpc_tx_fail(call->debug_id, serial, ret,
call              218 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
call              220 net/rxrpc/output.c 	rxrpc_tx_backoff(call, ret);
call              222 net/rxrpc/output.c 	if (call->state < RXRPC_CALL_COMPLETE) {
call              225 net/rxrpc/output.c 				clear_bit(RXRPC_CALL_PINGING, &call->flags);
call              226 net/rxrpc/output.c 			rxrpc_propose_ACK(call, pkt->ack.reason,
call              231 net/rxrpc/output.c 			spin_lock_bh(&call->lock);
call              232 net/rxrpc/output.c 			if (after(hard_ack, call->ackr_consumed))
call              233 net/rxrpc/output.c 				call->ackr_consumed = hard_ack;
call              234 net/rxrpc/output.c 			if (after(top, call->ackr_seen))
call              235 net/rxrpc/output.c 				call->ackr_seen = top;
call              236 net/rxrpc/output.c 			spin_unlock_bh(&call->lock);
call              239 net/rxrpc/output.c 		rxrpc_set_keepalive(call);
call              250 net/rxrpc/output.c int rxrpc_send_abort_packet(struct rxrpc_call *call)
call              265 net/rxrpc/output.c 	if (rxrpc_is_client_call(call) &&
call              266 net/rxrpc/output.c 	    test_bit(RXRPC_CALL_TX_LAST, &call->flags))
call              269 net/rxrpc/output.c 	if (test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
call              272 net/rxrpc/output.c 	conn = call->conn;
call              274 net/rxrpc/output.c 	msg.msg_name	= &call->peer->srx.transport;
call              275 net/rxrpc/output.c 	msg.msg_namelen	= call->peer->srx.transport_len;
call              281 net/rxrpc/output.c 	pkt.whdr.cid		= htonl(call->cid);
call              282 net/rxrpc/output.c 	pkt.whdr.callNumber	= htonl(call->call_id);
call              287 net/rxrpc/output.c 	pkt.whdr.securityIndex	= call->security_ix;
call              289 net/rxrpc/output.c 	pkt.whdr.serviceId	= htons(call->service_id);
call              290 net/rxrpc/output.c 	pkt.abort_code		= htonl(call->abort_code);
call              302 net/rxrpc/output.c 		trace_rxrpc_tx_fail(call->debug_id, serial, ret,
call              305 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
call              307 net/rxrpc/output.c 	rxrpc_tx_backoff(call, ret);
call              314 net/rxrpc/output.c int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
call              317 net/rxrpc/output.c 	struct rxrpc_connection *conn = call->conn;
call              332 net/rxrpc/output.c 	whdr.cid	= htonl(call->cid);
call              333 net/rxrpc/output.c 	whdr.callNumber	= htonl(call->call_id);
call              339 net/rxrpc/output.c 	whdr.securityIndex = call->security_ix;
call              341 net/rxrpc/output.c 	whdr.serviceId	= htons(call->service_id);
call              353 net/rxrpc/output.c 	msg.msg_name = &call->peer->srx.transport;
call              354 net/rxrpc/output.c 	msg.msg_namelen = call->peer->srx.transport_len;
call              369 net/rxrpc/output.c 	    (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
call              371 net/rxrpc/output.c 	     call->cong_mode == RXRPC_CALL_SLOW_START ||
call              372 net/rxrpc/output.c 	     (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
call              373 net/rxrpc/output.c 	     ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
call              381 net/rxrpc/output.c 			trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
call              387 net/rxrpc/output.c 	trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
call              392 net/rxrpc/output.c 	if (iov[1].iov_len >= call->peer->maxdata)
call              412 net/rxrpc/output.c 		trace_rxrpc_tx_fail(call->debug_id, serial, ret,
call              415 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &whdr,
call              417 net/rxrpc/output.c 	rxrpc_tx_backoff(call, ret);
call              424 net/rxrpc/output.c 			call->peer->rtt_last_req = skb->tstamp;
call              425 net/rxrpc/output.c 			trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
call              426 net/rxrpc/output.c 			if (call->peer->rtt_count > 1) {
call              429 net/rxrpc/output.c 				ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
call              431 net/rxrpc/output.c 				WRITE_ONCE(call->ack_lost_at, ack_lost_at);
call              432 net/rxrpc/output.c 				rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
call              439 net/rxrpc/output.c 				      &call->flags)) {
call              442 net/rxrpc/output.c 			expect_rx_by = nowj + call->next_rx_timo;
call              443 net/rxrpc/output.c 			WRITE_ONCE(call->expect_rx_by, expect_rx_by);
call              444 net/rxrpc/output.c 			rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
call              448 net/rxrpc/output.c 		rxrpc_set_keepalive(call);
call              455 net/rxrpc/output.c 		if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
call              456 net/rxrpc/output.c 			rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
call              460 net/rxrpc/output.c 	_leave(" = %d [%u]", ret, call->peer->maxdata);
call              495 net/rxrpc/output.c 		trace_rxrpc_tx_fail(call->debug_id, serial, ret,
call              498 net/rxrpc/output.c 		trace_rxrpc_tx_packet(call->debug_id, &whdr,
call              500 net/rxrpc/output.c 	rxrpc_tx_backoff(call, ret);
call              288 net/rxrpc/peer_event.c 	struct rxrpc_call *call;
call              290 net/rxrpc/peer_event.c 	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
call              291 net/rxrpc/peer_event.c 		rxrpc_see_call(call);
call              292 net/rxrpc/peer_event.c 		if (call->state < RXRPC_CALL_COMPLETE &&
call              293 net/rxrpc/peer_event.c 		    rxrpc_set_call_completion(call, compl, 0, -error))
call              294 net/rxrpc/peer_event.c 			rxrpc_notify_socket(call);
call              492 net/rxrpc/peer_object.c void rxrpc_kernel_get_peer(struct socket *sock, struct rxrpc_call *call,
call              495 net/rxrpc/peer_object.c 	*_srx = call->peer->srx;
call              506 net/rxrpc/peer_object.c u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call)
call              508 net/rxrpc/peer_object.c 	return call->peer->srtt_us >> 3;
call               60 net/rxrpc/proc.c 	struct rxrpc_call *call;
call               75 net/rxrpc/proc.c 	call = list_entry(v, struct rxrpc_call, link);
call               77 net/rxrpc/proc.c 	rx = rcu_dereference(call->socket);
call               88 net/rxrpc/proc.c 	peer = call->peer;
call               94 net/rxrpc/proc.c 	if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
call               95 net/rxrpc/proc.c 		timeout = READ_ONCE(call->expect_rx_by);
call               99 net/rxrpc/proc.c 	tx_hard_ack = READ_ONCE(call->tx_hard_ack);
call              100 net/rxrpc/proc.c 	rx_hard_ack = READ_ONCE(call->rx_hard_ack);
call              106 net/rxrpc/proc.c 		   call->service_id,
call              107 net/rxrpc/proc.c 		   call->cid,
call              108 net/rxrpc/proc.c 		   call->call_id,
call              109 net/rxrpc/proc.c 		   rxrpc_is_service_call(call) ? "Svc" : "Clt",
call              110 net/rxrpc/proc.c 		   atomic_read(&call->usage),
call              111 net/rxrpc/proc.c 		   rxrpc_call_states[call->state],
call              112 net/rxrpc/proc.c 		   call->abort_code,
call              113 net/rxrpc/proc.c 		   call->user_call_ID,
call              114 net/rxrpc/proc.c 		   tx_hard_ack, READ_ONCE(call->tx_top) - tx_hard_ack,
call              115 net/rxrpc/proc.c 		   rx_hard_ack, READ_ONCE(call->rx_top) - rx_hard_ack,
call              116 net/rxrpc/proc.c 		   call->rx_serial,
call               23 net/rxrpc/recvmsg.c void rxrpc_notify_socket(struct rxrpc_call *call)
call               28 net/rxrpc/recvmsg.c 	_enter("%d", call->debug_id);
call               30 net/rxrpc/recvmsg.c 	if (!list_empty(&call->recvmsg_link))
call               35 net/rxrpc/recvmsg.c 	rx = rcu_dereference(call->socket);
call               38 net/rxrpc/recvmsg.c 		if (call->notify_rx) {
call               39 net/rxrpc/recvmsg.c 			spin_lock_bh(&call->notify_lock);
call               40 net/rxrpc/recvmsg.c 			call->notify_rx(sk, call, call->user_call_ID);
call               41 net/rxrpc/recvmsg.c 			spin_unlock_bh(&call->notify_lock);
call               44 net/rxrpc/recvmsg.c 			if (list_empty(&call->recvmsg_link)) {
call               45 net/rxrpc/recvmsg.c 				rxrpc_get_call(call, rxrpc_call_got);
call               46 net/rxrpc/recvmsg.c 				list_add_tail(&call->recvmsg_link, &rx->recvmsg_q);
call               64 net/rxrpc/recvmsg.c static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
call               69 net/rxrpc/recvmsg.c 	switch (call->completion) {
call               72 net/rxrpc/recvmsg.c 		if (rxrpc_is_service_call(call))
call               76 net/rxrpc/recvmsg.c 		tmp = call->abort_code;
call               80 net/rxrpc/recvmsg.c 		tmp = call->abort_code;
call               84 net/rxrpc/recvmsg.c 		tmp = -call->error;
call               88 net/rxrpc/recvmsg.c 		tmp = -call->error;
call               92 net/rxrpc/recvmsg.c 		pr_err("Invalid terminal call state %u\n", call->state);
call               97 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_terminal, call->rx_hard_ack,
call               98 net/rxrpc/recvmsg.c 			    call->rx_pkt_offset, call->rx_pkt_len, ret);
call              110 net/rxrpc/recvmsg.c 				  struct rxrpc_call *call,
call              120 net/rxrpc/recvmsg.c 		list_del_init(&call->recvmsg_link);
call              123 net/rxrpc/recvmsg.c 		rxrpc_get_call(call, rxrpc_call_got);
call              125 net/rxrpc/recvmsg.c 		list_add_tail(&call->accept_link, &rx->to_be_accepted);
call              129 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret);
call              136 net/rxrpc/recvmsg.c static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
call              138 net/rxrpc/recvmsg.c 	_enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
call              140 net/rxrpc/recvmsg.c 	trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
call              141 net/rxrpc/recvmsg.c 	ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
call              143 net/rxrpc/recvmsg.c 	if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
call              144 net/rxrpc/recvmsg.c 		rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, serial, false, true,
call              149 net/rxrpc/recvmsg.c 	write_lock_bh(&call->state_lock);
call              151 net/rxrpc/recvmsg.c 	switch (call->state) {
call              153 net/rxrpc/recvmsg.c 		__rxrpc_call_completed(call);
call              154 net/rxrpc/recvmsg.c 		write_unlock_bh(&call->state_lock);
call              158 net/rxrpc/recvmsg.c 		call->tx_phase = true;
call              159 net/rxrpc/recvmsg.c 		call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
call              160 net/rxrpc/recvmsg.c 		call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
call              161 net/rxrpc/recvmsg.c 		write_unlock_bh(&call->state_lock);
call              162 net/rxrpc/recvmsg.c 		rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial, false, true,
call              166 net/rxrpc/recvmsg.c 		write_unlock_bh(&call->state_lock);
call              174 net/rxrpc/recvmsg.c static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
call              184 net/rxrpc/recvmsg.c 	_enter("%d", call->debug_id);
call              186 net/rxrpc/recvmsg.c 	hard_ack = call->rx_hard_ack;
call              187 net/rxrpc/recvmsg.c 	top = smp_load_acquire(&call->rx_top);
call              192 net/rxrpc/recvmsg.c 	skb = call->rxtx_buffer[ix];
call              196 net/rxrpc/recvmsg.c 	subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
call              203 net/rxrpc/recvmsg.c 	call->rxtx_buffer[ix] = NULL;
call              204 net/rxrpc/recvmsg.c 	call->rxtx_annotations[ix] = 0;
call              206 net/rxrpc/recvmsg.c 	smp_store_release(&call->rx_hard_ack, hard_ack);
call              210 net/rxrpc/recvmsg.c 	trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack);
call              212 net/rxrpc/recvmsg.c 		rxrpc_end_rx_phase(call, serial);
call              215 net/rxrpc/recvmsg.c 		if (after_eq(hard_ack, call->ackr_consumed + 2) ||
call              216 net/rxrpc/recvmsg.c 		    after_eq(top, call->ackr_seen + 2) ||
call              217 net/rxrpc/recvmsg.c 		    (hard_ack == top && after(hard_ack, call->ackr_consumed)))
call              218 net/rxrpc/recvmsg.c 			rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
call              221 net/rxrpc/recvmsg.c 		if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
call              222 net/rxrpc/recvmsg.c 			rxrpc_send_ack_packet(call, false, NULL);
call              232 net/rxrpc/recvmsg.c static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call              254 net/rxrpc/recvmsg.c 	return call->security->verify_packet(call, skb, offset, len,
call              268 net/rxrpc/recvmsg.c static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
call              290 net/rxrpc/recvmsg.c 		ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
call              299 net/rxrpc/recvmsg.c 	call->security->locate_data(call, skb, _offset, _len);
call              308 net/rxrpc/recvmsg.c static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
call              321 net/rxrpc/recvmsg.c 	if (test_and_clear_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags) &&
call              322 net/rxrpc/recvmsg.c 	    call->ackr_reason)
call              323 net/rxrpc/recvmsg.c 		rxrpc_send_ack_packet(call, false, NULL);
call              325 net/rxrpc/recvmsg.c 	rx_pkt_offset = call->rx_pkt_offset;
call              326 net/rxrpc/recvmsg.c 	rx_pkt_len = call->rx_pkt_len;
call              327 net/rxrpc/recvmsg.c 	rx_pkt_last = call->rx_pkt_last;
call              329 net/rxrpc/recvmsg.c 	if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
call              330 net/rxrpc/recvmsg.c 		seq = call->rx_hard_ack;
call              336 net/rxrpc/recvmsg.c 	hard_ack = call->rx_hard_ack;
call              339 net/rxrpc/recvmsg.c 	while (top = smp_load_acquire(&call->rx_top),
call              343 net/rxrpc/recvmsg.c 		skb = call->rxtx_buffer[ix];
call              345 net/rxrpc/recvmsg.c 			trace_rxrpc_recvmsg(call, rxrpc_recvmsg_hole, seq,
call              355 net/rxrpc/recvmsg.c 			serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET;
call              356 net/rxrpc/recvmsg.c 			trace_rxrpc_receive(call, rxrpc_receive_front,
call              364 net/rxrpc/recvmsg.c 			ret2 = rxrpc_locate_data(call, skb,
call              365 net/rxrpc/recvmsg.c 						 &call->rxtx_annotations[ix],
call              368 net/rxrpc/recvmsg.c 			trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
call              375 net/rxrpc/recvmsg.c 			trace_rxrpc_recvmsg(call, rxrpc_recvmsg_cont, seq,
call              399 net/rxrpc/recvmsg.c 			trace_rxrpc_recvmsg(call, rxrpc_recvmsg_full, seq,
call              408 net/rxrpc/recvmsg.c 			rxrpc_rotate_rx_window(call);
call              413 net/rxrpc/recvmsg.c 			ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
call              423 net/rxrpc/recvmsg.c 		call->rx_pkt_offset = rx_pkt_offset;
call              424 net/rxrpc/recvmsg.c 		call->rx_pkt_len = rx_pkt_len;
call              425 net/rxrpc/recvmsg.c 		call->rx_pkt_last = rx_pkt_last;
call              428 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
call              431 net/rxrpc/recvmsg.c 		set_bit(RXRPC_CALL_RX_UNDERRUN, &call->flags);
call              443 net/rxrpc/recvmsg.c 	struct rxrpc_call *call;
call              473 net/rxrpc/recvmsg.c 			call = NULL;
call              502 net/rxrpc/recvmsg.c 	call = list_entry(l, struct rxrpc_call, recvmsg_link);
call              504 net/rxrpc/recvmsg.c 		list_del_init(&call->recvmsg_link);
call              506 net/rxrpc/recvmsg.c 		rxrpc_get_call(call, rxrpc_call_got);
call              509 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0, 0, 0, 0);
call              514 net/rxrpc/recvmsg.c 	if (!mutex_trylock(&call->user_mutex)) {
call              519 net/rxrpc/recvmsg.c 		if (mutex_lock_interruptible(&call->user_mutex) < 0)
call              525 net/rxrpc/recvmsg.c 	if (test_bit(RXRPC_CALL_RELEASED, &call->flags))
call              528 net/rxrpc/recvmsg.c 	if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
call              530 net/rxrpc/recvmsg.c 			unsigned int id32 = call->user_call_ID;
call              535 net/rxrpc/recvmsg.c 			unsigned long idl = call->user_call_ID;
call              546 net/rxrpc/recvmsg.c 		size_t len = sizeof(call->peer->srx);
call              548 net/rxrpc/recvmsg.c 		memcpy(msg->msg_name, &call->peer->srx, len);
call              549 net/rxrpc/recvmsg.c 		srx->srx_service = call->service_id;
call              553 net/rxrpc/recvmsg.c 	switch (READ_ONCE(call->state)) {
call              555 net/rxrpc/recvmsg.c 		ret = rxrpc_recvmsg_new_call(rx, call, msg, flags);
call              560 net/rxrpc/recvmsg.c 		ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
call              565 net/rxrpc/recvmsg.c 		if (after(call->rx_top, call->rx_hard_ack) &&
call              566 net/rxrpc/recvmsg.c 		    call->rxtx_buffer[(call->rx_hard_ack + 1) & RXRPC_RXTX_BUFF_MASK])
call              567 net/rxrpc/recvmsg.c 			rxrpc_notify_socket(call);
call              577 net/rxrpc/recvmsg.c 	if (call->state == RXRPC_CALL_COMPLETE) {
call              578 net/rxrpc/recvmsg.c 		ret = rxrpc_recvmsg_term(call, msg);
call              582 net/rxrpc/recvmsg.c 			rxrpc_release_call(rx, call);
call              594 net/rxrpc/recvmsg.c 	mutex_unlock(&call->user_mutex);
call              595 net/rxrpc/recvmsg.c 	rxrpc_put_call(call, rxrpc_call_put);
call              596 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
call              602 net/rxrpc/recvmsg.c 		list_add(&call->recvmsg_link, &rx->recvmsg_q);
call              604 net/rxrpc/recvmsg.c 		trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0, 0, 0, 0);
call              606 net/rxrpc/recvmsg.c 		rxrpc_put_call(call, rxrpc_call_put);
call              611 net/rxrpc/recvmsg.c 	trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
call              618 net/rxrpc/recvmsg.c 	call = NULL;
call              641 net/rxrpc/recvmsg.c int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
call              649 net/rxrpc/recvmsg.c 	       call->debug_id, rxrpc_call_states[call->state],
call              652 net/rxrpc/recvmsg.c 	ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING);
call              654 net/rxrpc/recvmsg.c 	mutex_lock(&call->user_mutex);
call              656 net/rxrpc/recvmsg.c 	switch (READ_ONCE(call->state)) {
call              660 net/rxrpc/recvmsg.c 		ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
call              694 net/rxrpc/recvmsg.c 	switch (call->ackr_reason) {
call              702 net/rxrpc/recvmsg.c 		rxrpc_send_ack_packet(call, false, NULL);
call              706 net/rxrpc/recvmsg.c 		*_service = call->service_id;
call              707 net/rxrpc/recvmsg.c 	mutex_unlock(&call->user_mutex);
call              712 net/rxrpc/recvmsg.c 	trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
call              716 net/rxrpc/recvmsg.c 	trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
call              720 net/rxrpc/recvmsg.c 	*_abort = call->abort_code;
call              721 net/rxrpc/recvmsg.c 	ret = call->error;
call              722 net/rxrpc/recvmsg.c 	if (call->completion == RXRPC_CALL_SUCCEEDED) {
call              740 net/rxrpc/recvmsg.c bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
call              747 net/rxrpc/recvmsg.c 	mutex_lock(&call->user_mutex);
call              749 net/rxrpc/recvmsg.c 	if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
call              752 net/rxrpc/recvmsg.c 	hard_ack = call->rx_hard_ack;
call              757 net/rxrpc/recvmsg.c 	top = smp_load_acquire(&call->rx_top);
call              761 net/rxrpc/recvmsg.c 	skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
call              769 net/rxrpc/recvmsg.c 	mutex_unlock(&call->user_mutex);
call              148 net/rxrpc/rtt.c void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
call              152 net/rxrpc/rtt.c 	struct rxrpc_peer *peer = call->peer;
call              165 net/rxrpc/rtt.c 	trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial,
call              150 net/rxrpc/rxkad.c static struct skcipher_request *rxkad_get_call_crypto(struct rxrpc_call *call)
call              152 net/rxrpc/rxkad.c 	struct crypto_skcipher *tfm = &call->conn->cipher->base;
call              153 net/rxrpc/rxkad.c 	struct skcipher_request	*cipher_req = call->cipher_req;
call              159 net/rxrpc/rxkad.c 		call->cipher_req = cipher_req;
call              168 net/rxrpc/rxkad.c static void rxkad_free_call_crypto(struct rxrpc_call *call)
call              170 net/rxrpc/rxkad.c 	if (call->cipher_req)
call              171 net/rxrpc/rxkad.c 		skcipher_request_free(call->cipher_req);
call              172 net/rxrpc/rxkad.c 	call->cipher_req = NULL;
call              178 net/rxrpc/rxkad.c static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
call              192 net/rxrpc/rxkad.c 	check = sp->hdr.seq ^ call->call_id;
call              202 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              215 net/rxrpc/rxkad.c static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
call              234 net/rxrpc/rxkad.c 	check = sp->hdr.seq ^ call->call_id;
call              241 net/rxrpc/rxkad.c 	token = call->conn->params.key->payload.data[0];
call              245 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              255 net/rxrpc/rxkad.c 	len = data_size + call->conn->size_align - 1;
call              256 net/rxrpc/rxkad.c 	len &= ~(call->conn->size_align - 1);
call              276 net/rxrpc/rxkad.c static int rxkad_secure_packet(struct rxrpc_call *call,
call              291 net/rxrpc/rxkad.c 	       call->debug_id, key_serial(call->conn->params.key),
call              294 net/rxrpc/rxkad.c 	if (!call->conn->cipher)
call              297 net/rxrpc/rxkad.c 	ret = key_validate(call->conn->params.key);
call              301 net/rxrpc/rxkad.c 	req = rxkad_get_call_crypto(call);
call              306 net/rxrpc/rxkad.c 	memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
call              309 net/rxrpc/rxkad.c 	x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
call              311 net/rxrpc/rxkad.c 	call->crypto_buf[0] = htonl(call->call_id);
call              312 net/rxrpc/rxkad.c 	call->crypto_buf[1] = htonl(x);
call              314 net/rxrpc/rxkad.c 	sg_init_one(&sg, call->crypto_buf, 8);
call              315 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              321 net/rxrpc/rxkad.c 	y = ntohl(call->crypto_buf[1]);
call              327 net/rxrpc/rxkad.c 	switch (call->conn->params.security_level) {
call              332 net/rxrpc/rxkad.c 		ret = rxkad_secure_packet_auth(call, skb, data_size, sechdr,
call              336 net/rxrpc/rxkad.c 		ret = rxkad_secure_packet_encrypt(call, skb, data_size,
call              351 net/rxrpc/rxkad.c static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
call              367 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
call              383 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              391 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
call              402 net/rxrpc/rxkad.c 	check ^= seq ^ call->call_id;
call              405 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
call              411 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
call              421 net/rxrpc/rxkad.c 		rxrpc_send_abort_packet(call);
call              428 net/rxrpc/rxkad.c static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
call              445 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
call              472 net/rxrpc/rxkad.c 	token = call->conn->params.key->payload.data[0];
call              475 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              485 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
call              496 net/rxrpc/rxkad.c 	check ^= seq ^ call->call_id;
call              499 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
call              505 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
call              515 net/rxrpc/rxkad.c 		rxrpc_send_abort_packet(call);
call              527 net/rxrpc/rxkad.c static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call              539 net/rxrpc/rxkad.c 	       call->debug_id, key_serial(call->conn->params.key), seq);
call              541 net/rxrpc/rxkad.c 	if (!call->conn->cipher)
call              544 net/rxrpc/rxkad.c 	req = rxkad_get_call_crypto(call);
call              549 net/rxrpc/rxkad.c 	memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
call              552 net/rxrpc/rxkad.c 	x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
call              554 net/rxrpc/rxkad.c 	call->crypto_buf[0] = htonl(call->call_id);
call              555 net/rxrpc/rxkad.c 	call->crypto_buf[1] = htonl(x);
call              557 net/rxrpc/rxkad.c 	sg_init_one(&sg, call->crypto_buf, 8);
call              558 net/rxrpc/rxkad.c 	skcipher_request_set_sync_tfm(req, call->conn->cipher);
call              564 net/rxrpc/rxkad.c 	y = ntohl(call->crypto_buf[1]);
call              570 net/rxrpc/rxkad.c 		aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
call              575 net/rxrpc/rxkad.c 	switch (call->conn->params.security_level) {
call              579 net/rxrpc/rxkad.c 		return rxkad_verify_packet_1(call, skb, offset, len, seq, req);
call              581 net/rxrpc/rxkad.c 		return rxkad_verify_packet_2(call, skb, offset, len, seq, req);
call              588 net/rxrpc/rxkad.c 		rxrpc_send_abort_packet(call);
call              595 net/rxrpc/rxkad.c static void rxkad_locate_data_1(struct rxrpc_call *call, struct sk_buff *skb,
call              609 net/rxrpc/rxkad.c static void rxkad_locate_data_2(struct rxrpc_call *call, struct sk_buff *skb,
call              623 net/rxrpc/rxkad.c static void rxkad_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
call              626 net/rxrpc/rxkad.c 	switch (call->conn->params.security_level) {
call              628 net/rxrpc/rxkad.c 		rxkad_locate_data_1(call, skb, _offset, _len);
call              631 net/rxrpc/rxkad.c 		rxkad_locate_data_2(call, skb, _offset, _len);
call             1174 net/rxrpc/rxkad.c 		struct rxrpc_call *call;
call             1187 net/rxrpc/rxkad.c 			call = rcu_dereference_protected(
call             1188 net/rxrpc/rxkad.c 				conn->channels[i].call,
call             1190 net/rxrpc/rxkad.c 			if (call && call->state < RXRPC_CALL_COMPLETE)
call               23 net/rxrpc/sendmsg.c static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win)
call               26 net/rxrpc/sendmsg.c 		min_t(unsigned int, call->tx_winsize,
call               27 net/rxrpc/sendmsg.c 		      call->cong_cwnd + call->cong_extra);
call               28 net/rxrpc/sendmsg.c 	rxrpc_seq_t tx_win = READ_ONCE(call->tx_hard_ack);
call               32 net/rxrpc/sendmsg.c 	return call->tx_top - tx_win < win_size;
call               39 net/rxrpc/sendmsg.c 					 struct rxrpc_call *call,
call               44 net/rxrpc/sendmsg.c 		if (rxrpc_check_tx_space(call, NULL))
call               47 net/rxrpc/sendmsg.c 		if (call->state >= RXRPC_CALL_COMPLETE)
call               48 net/rxrpc/sendmsg.c 			return call->error;
call               53 net/rxrpc/sendmsg.c 		trace_rxrpc_transmit(call, rxrpc_transmit_wait);
call               54 net/rxrpc/sendmsg.c 		mutex_unlock(&call->user_mutex);
call               56 net/rxrpc/sendmsg.c 		if (mutex_lock_interruptible(&call->user_mutex) < 0)
call               66 net/rxrpc/sendmsg.c 					    struct rxrpc_call *call)
call               71 net/rxrpc/sendmsg.c 	rtt = READ_ONCE(call->peer->srtt_us) >> 3;
call               77 net/rxrpc/sendmsg.c 	tx_start = READ_ONCE(call->tx_hard_ack);
call               82 net/rxrpc/sendmsg.c 		tx_win = READ_ONCE(call->tx_hard_ack);
call               83 net/rxrpc/sendmsg.c 		if (rxrpc_check_tx_space(call, &tx_win))
call               86 net/rxrpc/sendmsg.c 		if (call->state >= RXRPC_CALL_COMPLETE)
call               87 net/rxrpc/sendmsg.c 			return call->error;
call               98 net/rxrpc/sendmsg.c 		trace_rxrpc_transmit(call, rxrpc_transmit_wait);
call              107 net/rxrpc/sendmsg.c 					    struct rxrpc_call *call,
call              112 net/rxrpc/sendmsg.c 		if (rxrpc_check_tx_space(call, NULL))
call              115 net/rxrpc/sendmsg.c 		if (call->state >= RXRPC_CALL_COMPLETE)
call              116 net/rxrpc/sendmsg.c 			return call->error;
call              118 net/rxrpc/sendmsg.c 		trace_rxrpc_transmit(call, rxrpc_transmit_wait);
call              128 net/rxrpc/sendmsg.c 				    struct rxrpc_call *call,
call              136 net/rxrpc/sendmsg.c 	       call->tx_hard_ack, call->tx_top, call->tx_winsize);
call              138 net/rxrpc/sendmsg.c 	add_wait_queue(&call->waitq, &myself);
call              140 net/rxrpc/sendmsg.c 	switch (call->interruptibility) {
call              143 net/rxrpc/sendmsg.c 			ret = rxrpc_wait_for_tx_window_waitall(rx, call);
call              145 net/rxrpc/sendmsg.c 			ret = rxrpc_wait_for_tx_window_intr(rx, call, timeo);
call              150 net/rxrpc/sendmsg.c 		ret = rxrpc_wait_for_tx_window_nonintr(rx, call, timeo);
call              154 net/rxrpc/sendmsg.c 	remove_wait_queue(&call->waitq, &myself);
call              163 net/rxrpc/sendmsg.c static inline void rxrpc_instant_resend(struct rxrpc_call *call, int ix)
call              165 net/rxrpc/sendmsg.c 	spin_lock_bh(&call->lock);
call              167 net/rxrpc/sendmsg.c 	if (call->state < RXRPC_CALL_COMPLETE) {
call              168 net/rxrpc/sendmsg.c 		call->rxtx_annotations[ix] =
call              169 net/rxrpc/sendmsg.c 			(call->rxtx_annotations[ix] & RXRPC_TX_ANNO_LAST) |
call              171 net/rxrpc/sendmsg.c 		if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
call              172 net/rxrpc/sendmsg.c 			rxrpc_queue_call(call);
call              175 net/rxrpc/sendmsg.c 	spin_unlock_bh(&call->lock);
call              182 net/rxrpc/sendmsg.c static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
call              186 net/rxrpc/sendmsg.c 		notify_end_tx(&rx->sk, call, call->user_call_ID);
call              194 net/rxrpc/sendmsg.c static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
call              206 net/rxrpc/sendmsg.c 	ASSERTCMP(seq, ==, call->tx_top + 1);
call              218 net/rxrpc/sendmsg.c 	call->rxtx_annotations[ix] = annotation;
call              220 net/rxrpc/sendmsg.c 	call->rxtx_buffer[ix] = skb;
call              221 net/rxrpc/sendmsg.c 	call->tx_top = seq;
call              223 net/rxrpc/sendmsg.c 		trace_rxrpc_transmit(call, rxrpc_transmit_queue_last);
call              225 net/rxrpc/sendmsg.c 		trace_rxrpc_transmit(call, rxrpc_transmit_queue);
call              227 net/rxrpc/sendmsg.c 	if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
call              229 net/rxrpc/sendmsg.c 		write_lock_bh(&call->state_lock);
call              230 net/rxrpc/sendmsg.c 		switch (call->state) {
call              232 net/rxrpc/sendmsg.c 			call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
call              233 net/rxrpc/sendmsg.c 			rxrpc_notify_end_tx(rx, call, notify_end_tx);
call              236 net/rxrpc/sendmsg.c 			call->state = RXRPC_CALL_SERVER_SEND_REPLY;
call              238 net/rxrpc/sendmsg.c 			WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
call              239 net/rxrpc/sendmsg.c 			if (call->ackr_reason == RXRPC_ACK_DELAY)
call              240 net/rxrpc/sendmsg.c 				call->ackr_reason = 0;
call              241 net/rxrpc/sendmsg.c 			trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
call              246 net/rxrpc/sendmsg.c 			call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
call              247 net/rxrpc/sendmsg.c 			rxrpc_notify_end_tx(rx, call, notify_end_tx);
call              252 net/rxrpc/sendmsg.c 		write_unlock_bh(&call->state_lock);
call              255 net/rxrpc/sendmsg.c 	if (seq == 1 && rxrpc_is_client_call(call))
call              256 net/rxrpc/sendmsg.c 		rxrpc_expose_client_call(call);
call              258 net/rxrpc/sendmsg.c 	ret = rxrpc_send_data_packet(call, skb, false);
call              264 net/rxrpc/sendmsg.c 			rxrpc_set_call_completion(call,
call              267 net/rxrpc/sendmsg.c 			rxrpc_notify_socket(call);
call              271 net/rxrpc/sendmsg.c 		rxrpc_instant_resend(call, ix);
call              274 net/rxrpc/sendmsg.c 		unsigned long resend_at = now + call->peer->rto_j;
call              276 net/rxrpc/sendmsg.c 		WRITE_ONCE(call->resend_at, resend_at);
call              277 net/rxrpc/sendmsg.c 		rxrpc_reduce_call_timer(call, resend_at, now,
call              293 net/rxrpc/sendmsg.c 			   struct rxrpc_call *call,
call              314 net/rxrpc/sendmsg.c 	if (call->tx_total_len != -1) {
call              315 net/rxrpc/sendmsg.c 		if (len > call->tx_total_len)
call              317 net/rxrpc/sendmsg.c 		if (!more && len != call->tx_total_len)
call              321 net/rxrpc/sendmsg.c 	skb = call->tx_pending;
call              322 net/rxrpc/sendmsg.c 	call->tx_pending = NULL;
call              328 net/rxrpc/sendmsg.c 		if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
call              329 net/rxrpc/sendmsg.c 			rxrpc_send_ack_packet(call, false, NULL);
call              336 net/rxrpc/sendmsg.c 			if (!rxrpc_check_tx_space(call, NULL)) {
call              340 net/rxrpc/sendmsg.c 				ret = rxrpc_wait_for_tx_window(rx, call,
call              348 net/rxrpc/sendmsg.c 			max -= call->conn->security_size;
call              349 net/rxrpc/sendmsg.c 			max &= ~(call->conn->size_align - 1UL);
call              355 net/rxrpc/sendmsg.c 			space = chunk + call->conn->size_align;
call              356 net/rxrpc/sendmsg.c 			space &= ~(call->conn->size_align - 1UL);
call              358 net/rxrpc/sendmsg.c 			size = space + call->conn->security_size;
call              376 net/rxrpc/sendmsg.c 			_debug("HS: %u", call->conn->security_size);
call              377 net/rxrpc/sendmsg.c 			skb_reserve(skb, call->conn->security_size);
call              378 net/rxrpc/sendmsg.c 			skb->len += call->conn->security_size;
call              413 net/rxrpc/sendmsg.c 			if (call->tx_total_len != -1)
call              414 net/rxrpc/sendmsg.c 				call->tx_total_len -= copy;
call              419 net/rxrpc/sendmsg.c 		if (call->state == RXRPC_CALL_COMPLETE)
call              425 net/rxrpc/sendmsg.c 			struct rxrpc_connection *conn = call->conn;
call              439 net/rxrpc/sendmsg.c 			seq = call->tx_top + 1;
call              447 net/rxrpc/sendmsg.c 			else if (call->tx_top - call->tx_hard_ack <
call              448 net/rxrpc/sendmsg.c 				 call->tx_winsize)
call              451 net/rxrpc/sendmsg.c 			ret = call->security->secure_packet(
call              452 net/rxrpc/sendmsg.c 				call, skb, skb->mark, skb->head);
call              456 net/rxrpc/sendmsg.c 			ret = rxrpc_queue_packet(rx, call, skb,
call              467 net/rxrpc/sendmsg.c 	call->tx_pending = skb;
call              473 net/rxrpc/sendmsg.c 	_leave(" = %d", call->error);
call              474 net/rxrpc/sendmsg.c 	return call->error;
call              514 net/rxrpc/sendmsg.c 				p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
call              518 net/rxrpc/sendmsg.c 				p->call.user_call_ID = *(unsigned long *)
call              556 net/rxrpc/sendmsg.c 			if (p->call.tx_total_len != -1 || len != sizeof(__s64))
call              558 net/rxrpc/sendmsg.c 			p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
call              559 net/rxrpc/sendmsg.c 			if (p->call.tx_total_len < 0)
call              566 net/rxrpc/sendmsg.c 			memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
call              567 net/rxrpc/sendmsg.c 			p->call.nr_timeouts = len / 4;
call              568 net/rxrpc/sendmsg.c 			if (p->call.timeouts.hard > INT_MAX / HZ)
call              570 net/rxrpc/sendmsg.c 			if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
call              572 net/rxrpc/sendmsg.c 			if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
call              583 net/rxrpc/sendmsg.c 	if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
call              598 net/rxrpc/sendmsg.c 	__acquires(&call->user_mutex)
call              601 net/rxrpc/sendmsg.c 	struct rxrpc_call *call;
call              624 net/rxrpc/sendmsg.c 	call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL,
call              629 net/rxrpc/sendmsg.c 	_leave(" = %p\n", call);
call              630 net/rxrpc/sendmsg.c 	return call;
call              640 net/rxrpc/sendmsg.c 	__releases(&call->user_mutex)
call              643 net/rxrpc/sendmsg.c 	struct rxrpc_call *call;
call              648 net/rxrpc/sendmsg.c 		.call.tx_total_len	= -1,
call              649 net/rxrpc/sendmsg.c 		.call.user_call_ID	= 0,
call              650 net/rxrpc/sendmsg.c 		.call.nr_timeouts	= 0,
call              651 net/rxrpc/sendmsg.c 		.call.interruptibility	= RXRPC_INTERRUPTIBLE,
call              668 net/rxrpc/sendmsg.c 		call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
call              670 net/rxrpc/sendmsg.c 		if (IS_ERR(call))
call              671 net/rxrpc/sendmsg.c 			return PTR_ERR(call);
call              676 net/rxrpc/sendmsg.c 	call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
call              677 net/rxrpc/sendmsg.c 	if (!call) {
call              681 net/rxrpc/sendmsg.c 		call = rxrpc_new_client_call_for_sendmsg(rx, msg, &p);
call              683 net/rxrpc/sendmsg.c 		if (IS_ERR(call))
call              684 net/rxrpc/sendmsg.c 			return PTR_ERR(call);
call              687 net/rxrpc/sendmsg.c 		switch (READ_ONCE(call->state)) {
call              693 net/rxrpc/sendmsg.c 			rxrpc_put_call(call, rxrpc_call_put);
call              700 net/rxrpc/sendmsg.c 		ret = mutex_lock_interruptible(&call->user_mutex);
call              707 net/rxrpc/sendmsg.c 		if (p.call.tx_total_len != -1) {
call              709 net/rxrpc/sendmsg.c 			if (call->tx_total_len != -1 ||
call              710 net/rxrpc/sendmsg.c 			    call->tx_pending ||
call              711 net/rxrpc/sendmsg.c 			    call->tx_top != 0)
call              713 net/rxrpc/sendmsg.c 			call->tx_total_len = p.call.tx_total_len;
call              717 net/rxrpc/sendmsg.c 	switch (p.call.nr_timeouts) {
call              719 net/rxrpc/sendmsg.c 		j = msecs_to_jiffies(p.call.timeouts.normal);
call              720 net/rxrpc/sendmsg.c 		if (p.call.timeouts.normal > 0 && j == 0)
call              722 net/rxrpc/sendmsg.c 		WRITE_ONCE(call->next_rx_timo, j);
call              725 net/rxrpc/sendmsg.c 		j = msecs_to_jiffies(p.call.timeouts.idle);
call              726 net/rxrpc/sendmsg.c 		if (p.call.timeouts.idle > 0 && j == 0)
call              728 net/rxrpc/sendmsg.c 		WRITE_ONCE(call->next_req_timo, j);
call              731 net/rxrpc/sendmsg.c 		if (p.call.timeouts.hard > 0) {
call              732 net/rxrpc/sendmsg.c 			j = msecs_to_jiffies(p.call.timeouts.hard);
call              735 net/rxrpc/sendmsg.c 			WRITE_ONCE(call->expect_term_by, j);
call              736 net/rxrpc/sendmsg.c 			rxrpc_reduce_call_timer(call, j, now,
call              742 net/rxrpc/sendmsg.c 	state = READ_ONCE(call->state);
call              744 net/rxrpc/sendmsg.c 	       call->debug_id, call->user_call_ID, state, call->conn);
call              751 net/rxrpc/sendmsg.c 		if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
call              752 net/rxrpc/sendmsg.c 			ret = rxrpc_send_abort_packet(call);
call              755 net/rxrpc/sendmsg.c 	} else if (rxrpc_is_client_call(call) &&
call              759 net/rxrpc/sendmsg.c 	} else if (rxrpc_is_service_call(call) &&
call              765 net/rxrpc/sendmsg.c 		ret = rxrpc_send_data(rx, call, msg, len, NULL);
call              769 net/rxrpc/sendmsg.c 	mutex_unlock(&call->user_mutex);
call              771 net/rxrpc/sendmsg.c 	rxrpc_put_call(call, rxrpc_call_put);
call              793 net/rxrpc/sendmsg.c int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
call              799 net/rxrpc/sendmsg.c 	_enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
call              804 net/rxrpc/sendmsg.c 	mutex_lock(&call->user_mutex);
call              807 net/rxrpc/sendmsg.c 	       call->debug_id, call->user_call_ID, call->state, call->conn);
call              809 net/rxrpc/sendmsg.c 	switch (READ_ONCE(call->state)) {
call              813 net/rxrpc/sendmsg.c 		ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
call              817 net/rxrpc/sendmsg.c 		read_lock_bh(&call->state_lock);
call              818 net/rxrpc/sendmsg.c 		ret = call->error;
call              819 net/rxrpc/sendmsg.c 		read_unlock_bh(&call->state_lock);
call              823 net/rxrpc/sendmsg.c 		trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
call              828 net/rxrpc/sendmsg.c 	mutex_unlock(&call->user_mutex);
call              845 net/rxrpc/sendmsg.c bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
call              850 net/rxrpc/sendmsg.c 	_enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
call              852 net/rxrpc/sendmsg.c 	mutex_lock(&call->user_mutex);
call              854 net/rxrpc/sendmsg.c 	aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
call              856 net/rxrpc/sendmsg.c 		rxrpc_send_abort_packet(call);
call              858 net/rxrpc/sendmsg.c 	mutex_unlock(&call->user_mutex);
call              875 net/rxrpc/sendmsg.c void rxrpc_kernel_set_tx_length(struct socket *sock, struct rxrpc_call *call,
call              878 net/rxrpc/sendmsg.c 	WARN_ON(call->tx_total_len != -1);
call              879 net/rxrpc/sendmsg.c 	call->tx_total_len = tx_total_len;
call             2818 net/socket.c   SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
call             2825 net/socket.c   	if (call < 1 || call > SYS_SENDMMSG)
call             2827 net/socket.c   	call = array_index_nospec(call, SYS_SENDMMSG + 1);
call             2829 net/socket.c   	len = nargs[call];
call             2837 net/socket.c   	err = audit_socketcall(nargs[call] / sizeof(unsigned long), a);
call             2844 net/socket.c   	switch (call) {
call              394 scripts/gcc-plugins/latent_entropy_plugin.c 		gcall *call;
call              400 scripts/gcc-plugins/latent_entropy_plugin.c 		call = as_a_gcall(stmt);
call              401 scripts/gcc-plugins/latent_entropy_plugin.c 		if (!gimple_call_tail_p(call))
call              439 scripts/gcc-plugins/latent_entropy_plugin.c 	gimple assign, call;
call              450 scripts/gcc-plugins/latent_entropy_plugin.c 	call = gimple_build_call(fndecl, 1, integer_zero_node);
call              451 scripts/gcc-plugins/latent_entropy_plugin.c 	gimple_call_set_lhs(call, frame_addr);
call              452 scripts/gcc-plugins/latent_entropy_plugin.c 	gsi_insert_before(&gsi, call, GSI_NEW_STMT);
call              453 scripts/gcc-plugins/latent_entropy_plugin.c 	update_stmt(call);
call              848 sound/core/pcm.c #define pcm_call_notify(pcm, call)					\
call              852 sound/core/pcm.c 			_notify->call(pcm);				\
call              855 sound/core/pcm.c #define pcm_call_notify(pcm, call) do {} while (0)
call              577 tools/perf/util/callchain.c 		struct callchain_list *call;
call              579 tools/perf/util/callchain.c 		call = zalloc(sizeof(*call));
call              580 tools/perf/util/callchain.c 		if (!call) {
call              584 tools/perf/util/callchain.c 		call->ip = cursor_node->ip;
call              585 tools/perf/util/callchain.c 		call->ms.sym = cursor_node->sym;
call              586 tools/perf/util/callchain.c 		call->ms.map = map__get(cursor_node->map);
call              587 tools/perf/util/callchain.c 		call->srcline = cursor_node->srcline;
call              590 tools/perf/util/callchain.c 			call->branch_count = 1;
call              597 tools/perf/util/callchain.c 				call->brtype_stat.branch_to = true;
call              600 tools/perf/util/callchain.c 					call->predicted_count = 1;
call              603 tools/perf/util/callchain.c 					call->abort_count = 1;
call              605 tools/perf/util/callchain.c 				branch_type_count(&call->brtype_stat,
call              613 tools/perf/util/callchain.c 				call->brtype_stat.branch_to = false;
call              614 tools/perf/util/callchain.c 				call->cycles_count =
call              616 tools/perf/util/callchain.c 				call->iter_count = cursor_node->nr_loop_iter;
call              617 tools/perf/util/callchain.c 				call->iter_cycles = cursor_node->iter_cycles;
call              621 tools/perf/util/callchain.c 		list_add_tail(&call->list, &node->val);
call              641 tools/perf/util/callchain.c 		struct callchain_list *call, *tmp;
call              643 tools/perf/util/callchain.c 		list_for_each_entry_safe(call, tmp, &new->val, list) {
call              644 tools/perf/util/callchain.c 			list_del_init(&call->list);
call              645 tools/perf/util/callchain.c 			map__zput(call->ms.map);
call              646 tools/perf/util/callchain.c 			free(call);
call               85 tools/virtio/ringtest/main.h void call(void);
call              269 tools/virtio/ringtest/ring.c 		call();
call              332 tools/virtio/ringtest/virtio_ring_0_9.c 		call();
call               26 tools/virtio/virtio_test.c 	int call;
call               86 tools/virtio/virtio_test.c 	file.fd = info->call;
call               97 tools/virtio/virtio_test.c 	info->call = eventfd(0, EFD_NONBLOCK);
call              109 tools/virtio/virtio_test.c 	dev->fds[info->idx].fd = info->call;