sw 93 arch/alpha/include/uapi/asm/fpu.h ieee_swcr_to_fpcr(unsigned long sw) sw 96 arch/alpha/include/uapi/asm/fpu.h fp = (sw & IEEE_STATUS_MASK) << 35; sw 97 arch/alpha/include/uapi/asm/fpu.h fp |= (sw & IEEE_MAP_DMZ) << 36; sw 98 arch/alpha/include/uapi/asm/fpu.h fp |= (sw & IEEE_STATUS_MASK ? FPCR_SUM : 0); sw 99 arch/alpha/include/uapi/asm/fpu.h fp |= (~sw & (IEEE_TRAP_ENABLE_INV sw 102 arch/alpha/include/uapi/asm/fpu.h fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; sw 103 arch/alpha/include/uapi/asm/fpu.h fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); sw 104 arch/alpha/include/uapi/asm/fpu.h fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; sw 111 arch/alpha/include/uapi/asm/fpu.h unsigned long sw; sw 112 arch/alpha/include/uapi/asm/fpu.h sw = (fp >> 35) & IEEE_STATUS_MASK; sw 113 arch/alpha/include/uapi/asm/fpu.h sw |= (fp >> 36) & IEEE_MAP_DMZ; sw 114 arch/alpha/include/uapi/asm/fpu.h sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV sw 117 arch/alpha/include/uapi/asm/fpu.h sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); sw 118 arch/alpha/include/uapi/asm/fpu.h sw |= (fp >> 47) & IEEE_MAP_UMZ; sw 119 arch/alpha/include/uapi/asm/fpu.h sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; sw 120 arch/alpha/include/uapi/asm/fpu.h return sw; sw 292 arch/alpha/kernel/process.c struct switch_stack * sw = ((struct switch_stack *) pt) - 1; sw 303 arch/alpha/kernel/process.c dest[ 9] = sw->r9; sw 304 arch/alpha/kernel/process.c dest[10] = sw->r10; sw 305 arch/alpha/kernel/process.c dest[11] = sw->r11; sw 306 arch/alpha/kernel/process.c dest[12] = sw->r12; sw 307 arch/alpha/kernel/process.c dest[13] = sw->r13; sw 308 arch/alpha/kernel/process.c dest[14] = sw->r14; sw 309 arch/alpha/kernel/process.c dest[15] = sw->r15; sw 345 arch/alpha/kernel/process.c struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1; sw 346 arch/alpha/kernel/process.c memcpy(dest, sw->fp, 32 * 8); sw 152 arch/alpha/kernel/signal.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 157 arch/alpha/kernel/signal.c sw->r26 = (unsigned long) ret_from_sys_call; sw 168 arch/alpha/kernel/signal.c err |= __get_user(sw->r9, sc->sc_regs+9); sw 169 arch/alpha/kernel/signal.c err |= __get_user(sw->r10, sc->sc_regs+10); sw 170 arch/alpha/kernel/signal.c err |= __get_user(sw->r11, sc->sc_regs+11); sw 171 arch/alpha/kernel/signal.c err |= __get_user(sw->r12, sc->sc_regs+12); sw 172 arch/alpha/kernel/signal.c err |= __get_user(sw->r13, sc->sc_regs+13); sw 173 arch/alpha/kernel/signal.c err |= __get_user(sw->r14, sc->sc_regs+14); sw 174 arch/alpha/kernel/signal.c err |= __get_user(sw->r15, sc->sc_regs+15); sw 193 arch/alpha/kernel/signal.c err |= __get_user(sw->fp[i], sc->sc_fpregs+i); sw 194 arch/alpha/kernel/signal.c err |= __get_user(sw->fp[31], &sc->sc_fpcr); sw 274 arch/alpha/kernel/signal.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 291 arch/alpha/kernel/signal.c err |= __put_user(sw->r9 , sc->sc_regs+9); sw 292 arch/alpha/kernel/signal.c err |= __put_user(sw->r10 , sc->sc_regs+10); sw 293 arch/alpha/kernel/signal.c err |= __put_user(sw->r11 , sc->sc_regs+11); sw 294 arch/alpha/kernel/signal.c err |= __put_user(sw->r12 , sc->sc_regs+12); sw 295 arch/alpha/kernel/signal.c err |= __put_user(sw->r13 , sc->sc_regs+13); sw 296 arch/alpha/kernel/signal.c err |= __put_user(sw->r14 , sc->sc_regs+14); sw 297 arch/alpha/kernel/signal.c err |= __put_user(sw->r15 , sc->sc_regs+15); sw 316 arch/alpha/kernel/signal.c err |= __put_user(sw->fp[i], sc->sc_fpregs+i); sw 318 arch/alpha/kernel/signal.c err |= __put_user(sw->fp[31], &sc->sc_fpcr); sw 57 arch/arm/mach-imx/common.h void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw); sw 58 arch/arm/mach-imx/common.h void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw); sw 34 arch/arm/mach-imx/gpc.c void imx_gpc_set_arm_power_up_timing(u32 sw2iso, u32 sw) sw 37 arch/arm/mach-imx/gpc.c (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PUPSCR); sw 40 arch/arm/mach-imx/gpc.c void imx_gpc_set_arm_power_down_timing(u32 sw2iso, u32 sw) sw 43 arch/arm/mach-imx/gpc.c (sw << GPC_PGC_SW_SHIFT), gpc_base + GPC_PGC_CPU_PDNSCR); sw 32 arch/csky/kernel/process.c struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; sw 34 arch/csky/kernel/process.c return sw->r15; sw 69 arch/ia64/include/asm/unwind.h struct switch_stack *sw; sw 119 arch/ia64/include/asm/unwind.h struct switch_stack *sw); sw 144 arch/ia64/kernel/crash.c current->thread.ksp = (__u64)info->sw - 16; sw 95 arch/ia64/kernel/machine_kexec.c current->thread.ksp = (__u64)info->sw - 16; sw 965 arch/ia64/kernel/mca.c const struct switch_stack *sw, sw 1109 arch/ia64/kernel/mca.c memcpy(old_sw, sw, sizeof(*sw)); sw 1282 arch/ia64/kernel/mca.c ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, sw 1302 arch/ia64/kernel/mca.c previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA"); sw 1660 arch/ia64/kernel/mca.c ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, sw 1676 arch/ia64/kernel/mca.c previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT"); sw 463 arch/ia64/kernel/process.c if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0) sw 466 arch/ia64/kernel/process.c ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end), sw 252 arch/ia64/kernel/ptrace.c get_rnat (struct task_struct *task, struct switch_stack *sw, sw 263 arch/ia64/kernel/ptrace.c kbsp = (unsigned long *) sw->ar_bspstore; sw 294 arch/ia64/kernel/ptrace.c rnat0 = sw->ar_rnat; sw 301 arch/ia64/kernel/ptrace.c rnat1 = sw->ar_rnat; sw 312 arch/ia64/kernel/ptrace.c put_rnat (struct task_struct *task, struct switch_stack *sw, sw 323 arch/ia64/kernel/ptrace.c kbsp = (unsigned long *) sw->ar_bspstore; sw 372 arch/ia64/kernel/ptrace.c sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m); sw 379 arch/ia64/kernel/ptrace.c sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m); sw 539 arch/ia64/kernel/ptrace.c ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw, sw 547 arch/ia64/kernel/ptrace.c ret = ia64_peek(child, sw, user_rbs_end, addr, &val); sw 559 arch/ia64/kernel/ptrace.c ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw, sw 572 arch/ia64/kernel/ptrace.c ret = ia64_poke(child, sw, user_rbs_end, addr, val); sw 593 arch/ia64/kernel/ptrace.c fn(info->task, info->sw, pt->ar_bspstore, urbs_end); sw 833 arch/ia64/kernel/ptrace.c struct switch_stack *sw; sw 843 arch/ia64/kernel/ptrace.c sw = (struct switch_stack *) (child->thread.ksp + 16); sw 942 arch/ia64/kernel/ptrace.c retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sw 976 arch/ia64/kernel/ptrace.c struct switch_stack *sw; sw 988 arch/ia64/kernel/ptrace.c sw = (struct switch_stack *) (child->thread.ksp + 16); sw 1078 arch/ia64/kernel/ptrace.c retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sw 310 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *) regs - 1; sw 330 arch/ia64/kernel/unaligned.c r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx); sw 332 arch/ia64/kernel/unaligned.c on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore); sw 333 arch/ia64/kernel/unaligned.c addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx); sw 337 arch/ia64/kernel/unaligned.c if ((unsigned long) rnat_addr >= sw->ar_bspstore) sw 338 arch/ia64/kernel/unaligned.c rnat_addr = &sw->ar_rnat; sw 361 arch/ia64/kernel/unaligned.c ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); sw 365 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); sw 374 arch/ia64/kernel/unaligned.c ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats); sw 383 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *) regs - 1; sw 403 arch/ia64/kernel/unaligned.c r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx); sw 405 arch/ia64/kernel/unaligned.c on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore); sw 406 arch/ia64/kernel/unaligned.c addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx); sw 412 arch/ia64/kernel/unaligned.c if ((unsigned long) rnat_addr >= sw->ar_bspstore) sw 413 arch/ia64/kernel/unaligned.c rnat_addr = &sw->ar_rnat; sw 432 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val); sw 440 arch/ia64/kernel/unaligned.c ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats); sw 456 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *) regs - 1; sw 478 arch/ia64/kernel/unaligned.c addr = (unsigned long)sw; sw 479 arch/ia64/kernel/unaligned.c unat = &sw->ar_unat; sw 482 arch/ia64/kernel/unaligned.c unat = &sw->caller_unat; sw 485 arch/ia64/kernel/unaligned.c addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum)); sw 522 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 544 arch/ia64/kernel/unaligned.c addr = (unsigned long)sw; sw 583 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *) regs - 1; sw 614 arch/ia64/kernel/unaligned.c addr = FR_IN_SW(regnum) ? (unsigned long)sw sw 630 arch/ia64/kernel/unaligned.c struct switch_stack *sw = (struct switch_stack *) regs - 1; sw 652 arch/ia64/kernel/unaligned.c addr = (unsigned long)sw; sw 653 arch/ia64/kernel/unaligned.c unat = &sw->ar_unat; sw 656 arch/ia64/kernel/unaligned.c unat = &sw->caller_unat; sw 350 arch/ia64/kernel/unwind.c nat_addr = &info->sw->ar_rnat; sw 355 arch/ia64/kernel/unwind.c addr = &info->sw->r4 + (regnum - 4); sw 356 arch/ia64/kernel/unwind.c nat_addr = &info->sw->ar_unat; sw 366 arch/ia64/kernel/unwind.c nat_addr = &info->sw->caller_unat; sw 381 arch/ia64/kernel/unwind.c nat_addr = &info->sw->ar_rnat; sw 425 arch/ia64/kernel/unwind.c addr = &info->sw->b1 + (regnum - 1); sw 460 arch/ia64/kernel/unwind.c addr = &info->sw->f2 + (regnum - 2); sw 467 arch/ia64/kernel/unwind.c addr = &info->sw->f12 + (regnum - 12); sw 471 arch/ia64/kernel/unwind.c addr = &info->sw->f16 + (regnum - 16); sw 504 arch/ia64/kernel/unwind.c addr = &info->sw->ar_bspstore; sw 510 arch/ia64/kernel/unwind.c addr = &info->sw->ar_bspstore; sw 516 arch/ia64/kernel/unwind.c addr = &info->sw->ar_pfs; sw 522 arch/ia64/kernel/unwind.c addr = &info->sw->ar_rnat; sw 528 arch/ia64/kernel/unwind.c addr = &info->sw->caller_unat; sw 534 arch/ia64/kernel/unwind.c addr = &info->sw->ar_lc; sw 550 arch/ia64/kernel/unwind.c addr = &info->sw->ar_fpsr; sw 598 arch/ia64/kernel/unwind.c addr = &info->sw->pr; sw 1794 arch/ia64/kernel/unwind.c state->pri_unat_loc = &state->sw->caller_unat; sw 1822 arch/ia64/kernel/unwind.c s[val] = (unsigned long) state->sw + off; sw 2002 arch/ia64/kernel/unwind.c struct switch_stack *sw, unsigned long stktop) sw 2022 arch/ia64/kernel/unwind.c rbstop = sw->ar_bspstore; sw 2036 arch/ia64/kernel/unwind.c info->sw = sw; sw 2038 arch/ia64/kernel/unwind.c info->pr = sw->pr; sw 2047 arch/ia64/kernel/unwind.c info->pr, (unsigned long) info->sw, info->sp); sw 2052 arch/ia64/kernel/unwind.c unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw) sw 2056 arch/ia64/kernel/unwind.c init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16); sw 2057 arch/ia64/kernel/unwind.c info->cfm_loc = &sw->ar_pfs; sw 2060 arch/ia64/kernel/unwind.c info->ip = sw->b0; sw 2074 arch/ia64/kernel/unwind.c struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); sw 2077 arch/ia64/kernel/unwind.c unw_init_frame_info(info, t, sw); sw 73 arch/ia64/oprofile/backtrace.c struct switch_stack *sw; sw 77 arch/ia64/oprofile/backtrace.c sw = (struct switch_stack *)(info+1); sw 79 arch/ia64/oprofile/backtrace.c sw = (struct switch_stack *)(((unsigned long)sw + 15) & ~15); sw 81 arch/ia64/oprofile/backtrace.c unw_init_frame_info(&bt->frame, current, sw); sw 97 arch/m68k/include/asm/elf.h struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \ sw 98 arch/m68k/include/asm/elf.h pr_reg[5] = sw->d6; \ sw 99 arch/m68k/include/asm/elf.h pr_reg[6] = sw->d7; \ sw 100 arch/m68k/include/asm/elf.h pr_reg[10] = sw->a3; \ sw 101 arch/m68k/include/asm/elf.h pr_reg[11] = sw->a4; \ sw 102 arch/m68k/include/asm/elf.h pr_reg[12] = sw->a5; \ sw 103 arch/m68k/include/asm/elf.h pr_reg[13] = sw->a6; \ sw 126 arch/m68k/kernel/process.c struct switch_stack sw; sw 145 arch/m68k/kernel/process.c frame->sw.a3 = usp; /* function */ sw 146 arch/m68k/kernel/process.c frame->sw.d7 = arg; sw 147 arch/m68k/kernel/process.c frame->sw.retpc = (unsigned long)ret_from_kernel_thread; sw 154 arch/m68k/kernel/process.c frame->sw.retpc = (unsigned long)ret_from_fork; sw 653 arch/m68k/kernel/signal.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 685 arch/m68k/kernel/signal.c : "a" (sw), "d" (fsize), "d" (frame_offset/4-1), sw 732 arch/m68k/kernel/signal.c rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw, sw 753 arch/m68k/kernel/signal.c err |= __get_user(sw->d6, &gregs[6]); sw 754 arch/m68k/kernel/signal.c err |= __get_user(sw->d7, &gregs[7]); sw 758 arch/m68k/kernel/signal.c err |= __get_user(sw->a3, &gregs[11]); sw 759 arch/m68k/kernel/signal.c err |= __get_user(sw->a4, &gregs[12]); sw 760 arch/m68k/kernel/signal.c err |= __get_user(sw->a5, &gregs[13]); sw 761 arch/m68k/kernel/signal.c err |= __get_user(sw->a6, &gregs[14]); sw 785 arch/m68k/kernel/signal.c asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) sw 810 arch/m68k/kernel/signal.c asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) sw 823 arch/m68k/kernel/signal.c if (rt_restore_ucontext(regs, sw, &frame->uc)) sw 850 arch/m68k/kernel/signal.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 861 arch/m68k/kernel/signal.c err |= __put_user(sw->d6, &gregs[6]); sw 862 arch/m68k/kernel/signal.c err |= __put_user(sw->d7, &gregs[7]); sw 866 arch/m68k/kernel/signal.c err |= __put_user(sw->a3, &gregs[11]); sw 867 arch/m68k/kernel/signal.c err |= __put_user(sw->a4, &gregs[12]); sw 868 arch/m68k/kernel/signal.c err |= __put_user(sw->a5, &gregs[13]); sw 869 arch/m68k/kernel/signal.c err |= __put_user(sw->a6, &gregs[14]); sw 811 arch/mips/alchemy/devboards/db1200.c unsigned short sw; sw 880 arch/mips/alchemy/devboards/db1200.c sw = bcsr_read(BCSR_SWITCHES); sw 881 arch/mips/alchemy/devboards/db1200.c if (sw & BCSR_SWITCHES_DIP_8) { sw 903 arch/mips/alchemy/devboards/db1200.c sw &= BCSR_SWITCHES_DIP_8 | BCSR_SWITCHES_DIP_7; sw 904 arch/mips/alchemy/devboards/db1200.c if (sw == BCSR_SWITCHES_DIP_8) { sw 114 arch/mips/include/asm/asm-eva.h #define kernel_sw(reg, addr) sw reg, addr sw 136 arch/mips/include/asm/asm.h #define REG_S sw sw 159 arch/mips/include/asm/asm.h #define INT_S sw sw 196 arch/mips/include/asm/asm.h #define LONG_S sw sw 245 arch/mips/include/asm/asm.h #define PTR_S sw sw 36 arch/mips/include/asm/asmmacro-32.h sw \tmp, THREAD_FCR31(\thread) sw 69 arch/mips/include/asm/asmmacro.h sw \reg, TI_PRE_COUNT($28) sw 79 arch/mips/include/asm/asmmacro.h sw \reg, TI_PRE_COUNT($28) sw 104 arch/mips/include/asm/asmmacro.h sw \tmp, THREAD_FCR31(\thread) sw 557 arch/mips/include/asm/asmmacro.h sw $1, THREAD_MSA_CSR(\thread) sw 69 arch/mips/include/asm/ftrace.h safe_store(STR(sw), src, dst, error) sw 315 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 320 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 325 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 330 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 335 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 340 arch/mips/net/ebpf_jit.c emit_instr_long(ctx, sd, sw, sw 1381 arch/mips/net/ebpf_jit.c emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst); sw 1491 arch/mips/net/ebpf_jit.c emit_instr(ctx, sw, src, mem_off, dst); sw 62 arch/nios2/include/asm/elf.h struct switch_stack *sw = ((struct switch_stack *)regs) - 1; \ sw 63 arch/nios2/include/asm/elf.h pr_reg[23] = sw->r16; \ sw 64 arch/nios2/include/asm/elf.h pr_reg[24] = sw->r17; \ sw 65 arch/nios2/include/asm/elf.h pr_reg[25] = sw->r18; \ sw 66 arch/nios2/include/asm/elf.h pr_reg[26] = sw->r19; \ sw 67 arch/nios2/include/asm/elf.h pr_reg[27] = sw->r20; \ sw 68 arch/nios2/include/asm/elf.h pr_reg[28] = sw->r21; \ sw 69 arch/nios2/include/asm/elf.h pr_reg[29] = sw->r22; \ sw 70 arch/nios2/include/asm/elf.h pr_reg[30] = sw->r23; \ sw 71 arch/nios2/include/asm/elf.h pr_reg[31] = sw->fp; \ sw 72 arch/nios2/include/asm/elf.h pr_reg[32] = sw->gp; \ sw 73 arch/nios2/include/asm/elf.h pr_reg[33] = sw->ra; \ sw 28 arch/nios2/kernel/ptrace.c const struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 49 arch/nios2/kernel/ptrace.c REG_O_RANGE(sw, PTR_R16, PTR_R23); sw 74 arch/nios2/kernel/ptrace.c const struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 95 arch/nios2/kernel/ptrace.c REG_IN_RANGE(sw, PTR_R16, PTR_R23); sw 38 arch/nios2/kernel/signal.c struct switch_stack *sw, sw 67 arch/nios2/kernel/signal.c err |= __get_user(sw->r16, &gregs[15]); sw 68 arch/nios2/kernel/signal.c err |= __get_user(sw->r17, &gregs[16]); sw 69 arch/nios2/kernel/signal.c err |= __get_user(sw->r18, &gregs[17]); sw 70 arch/nios2/kernel/signal.c err |= __get_user(sw->r19, &gregs[18]); sw 71 arch/nios2/kernel/signal.c err |= __get_user(sw->r20, &gregs[19]); sw 72 arch/nios2/kernel/signal.c err |= __get_user(sw->r21, &gregs[20]); sw 73 arch/nios2/kernel/signal.c err |= __get_user(sw->r22, &gregs[21]); sw 74 arch/nios2/kernel/signal.c err |= __get_user(sw->r23, &gregs[22]); sw 76 arch/nios2/kernel/signal.c err |= __get_user(sw->fp, &gregs[24]); /* Verify, should this be sw 78 arch/nios2/kernel/signal.c err |= __get_user(sw->gp, &gregs[25]); /* Verify, should this be sw 101 arch/nios2/kernel/signal.c asmlinkage int do_rt_sigreturn(struct switch_stack *sw) sw 103 arch/nios2/kernel/signal.c struct pt_regs *regs = (struct pt_regs *)(sw + 1); sw 117 arch/nios2/kernel/signal.c if (rt_restore_ucontext(regs, sw, &frame->uc, &rval)) sw 129 arch/nios2/kernel/signal.c struct switch_stack *sw = (struct switch_stack *)regs - 1; sw 149 arch/nios2/kernel/signal.c err |= __put_user(sw->r16, &gregs[15]); sw 150 arch/nios2/kernel/signal.c err |= __put_user(sw->r17, &gregs[16]); sw 151 arch/nios2/kernel/signal.c err |= __put_user(sw->r18, &gregs[17]); sw 152 arch/nios2/kernel/signal.c err |= __put_user(sw->r19, &gregs[18]); sw 153 arch/nios2/kernel/signal.c err |= __put_user(sw->r20, &gregs[19]); sw 154 arch/nios2/kernel/signal.c err |= __put_user(sw->r21, &gregs[20]); sw 155 arch/nios2/kernel/signal.c err |= __put_user(sw->r22, &gregs[21]); sw 156 arch/nios2/kernel/signal.c err |= __put_user(sw->r23, &gregs[22]); sw 158 arch/nios2/kernel/signal.c err |= __put_user(sw->fp, &gregs[24]); sw 159 arch/nios2/kernel/signal.c err |= __put_user(sw->gp, &gregs[25]); sw 96 arch/parisc/kernel/traps.c struct { u32 sw[2]; } s; sw 110 arch/parisc/kernel/traps.c printbinary(buf, s.sw[0], 32); sw 112 arch/parisc/kernel/traps.c printk("%sFPER1: %08x\n", level, s.sw[1]); sw 71 arch/parisc/math-emu/driver.c unsigned int orig_sw, sw; sw 96 arch/parisc/math-emu/driver.c memcpy(&sw, frcopy, sizeof(sw)); sw 100 arch/parisc/math-emu/driver.c printbinary(sw, 32); sw 450 arch/powerpc/sysdev/fsl_rio.c int paw, aw, sw; sw 602 arch/powerpc/sysdev/fsl_rio.c sw = *cell; sw 604 arch/powerpc/sysdev/fsl_rio.c sw = of_n_size_cells(np); sw 608 arch/powerpc/sysdev/fsl_rio.c range_size = of_read_number(dt_range + aw + paw, sw); sw 24 arch/riscv/include/asm/asm.h #define REG_S __REG_SEL(sd, sw) sw 679 arch/sh/boards/mach-se/7724/setup.c u16 sw = __raw_readw(SW4140); /* select camera, monitor */ sw 904 arch/sh/boards/mach-se/7724/setup.c if (sw & SW41_B) { sw 914 arch/sh/boards/mach-se/7724/setup.c if (sw & SW41_A) { sw 110 arch/x86/include/uapi/asm/sigcontext.h __u32 sw; sw 84 arch/x86/platform/olpc/olpc-xo1-sci.c if (!!test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == state) sw 128 arch/x86/platform/olpc/olpc-xo1-sci.c if (!!test_bit(SW_LID, lid_switch_idev->sw) == !lid_open) sw 275 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 277 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 279 drivers/block/swim3.c out_8(&sw->control_bis, SELECT); sw 281 drivers/block/swim3.c out_8(&sw->control_bic, SELECT); sw 282 drivers/block/swim3.c out_8(&sw->select, sel & CA_MASK); sw 287 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 291 drivers/block/swim3.c out_8(&sw->select, sw->select | LSTRB); sw 293 drivers/block/swim3.c out_8(&sw->select, sw->select & ~LSTRB); sw 299 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 304 drivers/block/swim3.c stat = in_8(&sw->status); sw 375 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 378 drivers/block/swim3.c in_8(&sw->intr); /* clear SEEN_SECTOR bit */ sw 379 drivers/block/swim3.c in_8(&sw->error); sw 380 drivers/block/swim3.c out_8(&sw->intr_enable, SEEN_SECTOR); sw 381 drivers/block/swim3.c out_8(&sw->control_bis, DO_ACTION); sw 388 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 392 drivers/block/swim3.c sw->nseek = n; sw 395 drivers/block/swim3.c sw->nseek = -n; sw 399 drivers/block/swim3.c in_8(&sw->error); sw 401 drivers/block/swim3.c out_8(&sw->intr_enable, SEEK_DONE); sw 402 drivers/block/swim3.c out_8(&sw->control_bis, DO_SEEK); sw 419 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 441 drivers/block/swim3.c out_8(&sw->sector, fs->req_sector); sw 442 drivers/block/swim3.c out_8(&sw->nsect, n); sw 443 drivers/block/swim3.c out_8(&sw->gap3, 0); sw 457 drivers/block/swim3.c out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); sw 458 drivers/block/swim3.c in_8(&sw->error); sw 459 drivers/block/swim3.c out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); sw 461 drivers/block/swim3.c out_8(&sw->control_bis, WRITE_SECTORS); sw 462 drivers/block/swim3.c in_8(&sw->intr); sw 465 drivers/block/swim3.c out_8(&sw->intr_enable, TRANSFER_DONE); sw 466 drivers/block/swim3.c out_8(&sw->control_bis, DO_ACTION); sw 542 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 549 drivers/block/swim3.c out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); sw 550 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 551 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 566 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 573 drivers/block/swim3.c out_8(&sw->control_bic, DO_SEEK); sw 574 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 575 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 585 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 593 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 598 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 614 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 627 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 628 drivers/block/swim3.c out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); sw 629 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 641 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 652 drivers/block/swim3.c intr = in_8(&sw->intr); sw 653 drivers/block/swim3.c err = (intr & ERROR_INTR)? in_8(&sw->error): 0; sw 660 drivers/block/swim3.c out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS); sw 661 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 662 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 665 drivers/block/swim3.c if (sw->ctrack == 0xff) { sw 677 drivers/block/swim3.c fs->cur_cyl = sw->ctrack; sw 678 drivers/block/swim3.c fs->cur_sector = sw->csect; sw 688 drivers/block/swim3.c if (sw->nseek == 0) { sw 689 drivers/block/swim3.c out_8(&sw->control_bic, DO_SEEK); sw 690 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 691 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 701 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 709 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 710 drivers/block/swim3.c out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); sw 711 drivers/block/swim3.c out_8(&sw->select, RELAX); sw 911 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 918 drivers/block/swim3.c out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2); sw 919 drivers/block/swim3.c out_8(&sw->control_bic, 0xff); sw 920 drivers/block/swim3.c out_8(&sw->mode, 0x95); sw 922 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 923 drivers/block/swim3.c out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE); sw 963 drivers/block/swim3.c out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE); sw 991 drivers/block/swim3.c struct swim3 __iomem *sw = fs->swim3; sw 1000 drivers/block/swim3.c out_8(&sw->control_bic, 0xff); sw 1016 drivers/block/swim3.c struct swim3 __iomem *sw; sw 1023 drivers/block/swim3.c sw = fs->swim3; sw 1025 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 1026 drivers/block/swim3.c out_8(&sw->control_bis, DRIVE_ENABLE); sw 1068 drivers/block/swim3.c struct swim3 __iomem *sw; sw 1073 drivers/block/swim3.c sw = fs->swim3; sw 1079 drivers/block/swim3.c out_8(&sw->intr_enable, 0); sw 1080 drivers/block/swim3.c in_8(&sw->intr); sw 1081 drivers/block/swim3.c in_8(&sw->error); sw 772 drivers/dma/mmp_pdma.c struct mmp_pdma_desc_sw *sw; sw 789 drivers/dma/mmp_pdma.c list_for_each_entry(sw, &chan->chain_running, node) { sw 793 drivers/dma/mmp_pdma.c start = sw->desc.dtadr; sw 795 drivers/dma/mmp_pdma.c start = sw->desc.dsadr; sw 797 drivers/dma/mmp_pdma.c len = sw->desc.dcmd & DCMD_LENGTH; sw 828 drivers/dma/mmp_pdma.c if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN)) sw 831 drivers/dma/mmp_pdma.c if (sw->async_tx.cookie == cookie) { sw 288 drivers/edac/cpc925_edac.c int len, sw, aw; sw 296 drivers/edac/cpc925_edac.c sw = of_n_size_cells(np); sw 304 drivers/edac/cpc925_edac.c size = of_read_number(reg, sw); sw 305 drivers/edac/cpc925_edac.c reg += sw; sw 272 drivers/gpu/drm/amd/amdgpu/amdgpu.h bool sw; sw 1675 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (!adev->ip_blocks[i].status.sw) sw 1700 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (!adev->ip_blocks[i].status.sw) sw 1785 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c adev->ip_blocks[i].status.sw = true; sw 2097 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c if (!adev->ip_blocks[i].status.sw) sw 2114 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c adev->ip_blocks[i].status.sw = false; sw 6424 drivers/gpu/drm/i915/display/intel_dp.c struct edp_power_seq *sw = &intel_dp->pps_delays; sw 6428 drivers/gpu/drm/i915/display/intel_dp.c if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || sw 6429 drivers/gpu/drm/i915/display/intel_dp.c hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { sw 6431 drivers/gpu/drm/i915/display/intel_dp.c intel_pps_dump_state("sw", sw); sw 540 drivers/gpu/drm/i915/display/intel_overlay.c u32 sw; sw 543 drivers/gpu/drm/i915/display/intel_overlay.c sw = ALIGN((offset & 31) + width, 32); sw 545 drivers/gpu/drm/i915/display/intel_overlay.c sw = ALIGN((offset & 63) + width, 64); sw 547 drivers/gpu/drm/i915/display/intel_overlay.c if (sw == 0) sw 550 drivers/gpu/drm/i915/display/intel_overlay.c return (sw - 32) >> 3; sw 214 drivers/gpu/drm/nouveau/dispnv50/atom.h u16 sw; sw 50 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw); sw 281 drivers/gpu/drm/nouveau/dispnv50/wndw.c asyw->scale.sw = asyw->state.src_w >> 16; sw 177 drivers/gpu/drm/nouveau/include/nvkm/core/device.h struct nvkm_sw *sw; sw 250 drivers/gpu/drm/nouveau/include/nvkm/core/device.h int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **); sw 13 drivers/gpu/drm/nouveau/include/nvkm/engine/sw.h bool nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data); sw 95 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv04_sw_new, sw 116 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv04_sw_new, sw 158 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 180 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 202 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 224 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 246 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 268 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 290 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 312 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 334 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 356 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 378 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 401 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 424 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 446 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 469 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 495 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 521 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 547 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 573 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 599 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 625 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 651 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 677 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 703 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 729 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 755 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 781 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 807 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 836 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 862 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 888 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 914 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv10_sw_new, sw 945 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 977 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1009 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1041 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1073 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1106 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1137 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1172 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1205 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1238 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1270 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1302 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1335 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = nv50_sw_new, sw 1372 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1408 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1444 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1481 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1518 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1555 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1591 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1626 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1662 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1701 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1740 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1779 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1804 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1842 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1880 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1918 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1956 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 1990 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2024 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2059 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2094 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2129 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2154 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2190 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2226 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2262 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2298 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2334 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2370 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2394 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c .sw = gf100_sw_new, sw 2715 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c _(SW , device->sw , &device->sw->engine); sw 3203 drivers/gpu/drm/nouveau/nvkm/engine/device/base.c _(NVKM_ENGINE_SW , sw); sw 421 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c if (device->sw) { sw 422 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) sw 701 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (device->sw) { sw 702 drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data)) sw 110 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c struct nvkm_sw *sw = device->sw; sw 126 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c if (!(engine & mask) && sw) sw 127 drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c handled = nvkm_sw_mthd(sw, chid, subc, mthd, data); sw 30 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c nvkm_sw_mthd(struct nvkm_sw *sw, int chid, int subc, u32 mthd, u32 data) sw 36 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c spin_lock_irqsave(&sw->engine.lock, flags); sw 37 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c list_for_each_entry(chan, &sw->chan, head) { sw 41 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c list_add(&chan->head, &sw->chan); sw 45 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c spin_unlock_irqrestore(&sw->engine.lock, flags); sw 61 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c struct nvkm_sw *sw = nvkm_sw(oclass->engine); sw 64 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c while (sw->func->sclass[c].ctor) { sw 66 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c oclass->engn = &sw->func->sclass[index]; sw 67 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c oclass->base = sw->func->sclass[index].base; sw 81 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c struct nvkm_sw *sw = nvkm_sw(oclass->engine); sw 82 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c return sw->func->chan_new(sw, fifoch, oclass, pobject); sw 102 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c struct nvkm_sw *sw; sw 104 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c if (!(sw = *psw = kzalloc(sizeof(*sw), GFP_KERNEL))) sw 106 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c INIT_LIST_HEAD(&sw->chan); sw 107 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c sw->func = func; sw 109 drivers/gpu/drm/nouveau/nvkm/engine/sw/base.c return nvkm_engine_ctor(&nvkm_sw, device, index, true, &sw->engine); sw 76 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c struct nvkm_sw *sw = chan->sw; sw 84 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c spin_lock_irqsave(&sw->engine.lock, flags); sw 86 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c spin_unlock_irqrestore(&sw->engine.lock, flags); sw 96 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c nvkm_sw_chan_ctor(const struct nvkm_sw_chan_func *func, struct nvkm_sw *sw, sw 104 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c chan->sw = sw; sw 106 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c spin_lock_irqsave(&sw->engine.lock, flags); sw 107 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c list_add(&chan->head, &sw->chan); sw 108 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.c spin_unlock_irqrestore(&sw->engine.lock, flags); sw 13 drivers/gpu/drm/nouveau/nvkm/engine/sw/chan.h struct nvkm_sw *sw; sw 43 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c struct nvkm_sw *sw = chan->base.sw; sw 44 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c struct nvkm_device *device = sw->engine.subdev.device; sw 105 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c gf100_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch, sw 109 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c struct nvkm_disp *disp = sw->engine.subdev.device->disp; sw 117 drivers/gpu/drm/nouveau/nvkm/engine/sw/gf100.c ret = nvkm_sw_chan_ctor(&gf100_sw_chan, sw, fifoch, oclass, sw 109 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c nv04_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo, sw 119 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv04.c return nvkm_sw_chan_ctor(&nv04_sw_chan, sw, fifo, oclass, &chan->base); sw 39 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c nv10_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifo, sw 48 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv10.c return nvkm_sw_chan_ctor(&nv10_sw_chan, sw, fifo, oclass, chan); sw 43 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c struct nvkm_sw *sw = chan->base.sw; sw 44 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c struct nvkm_device *device = sw->engine.subdev.device; sw 100 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c nv50_sw_chan_new(struct nvkm_sw *sw, struct nvkm_fifo_chan *fifoch, sw 103 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c struct nvkm_disp *disp = sw->engine.subdev.device->disp; sw 111 drivers/gpu/drm/nouveau/nvkm/engine/sw/nv50.c ret = nvkm_sw_chan_ctor(&nv50_sw_chan, sw, fifoch, oclass, &chan->base); sw 1166 drivers/input/evdev.c return evdev_handle_get_val(client, dev, EV_SW, dev->sw, sw 303 drivers/input/input.c !!test_bit(code, dev->sw) != !!value) { sw 305 drivers/input/input.c __change_bit(code, dev->sw); sw 1512 drivers/input/input.c INPUT_DEV_CAP_ATTR(SW, sw); sw 2081 drivers/input/input.c INPUT_CLEANSE_BITMASK(dev, SW, sw); sw 199 drivers/input/joystick/sidewinder.c #define GB(pos,num) sw_get_bits(buf, pos, num, sw->bits) sw 285 drivers/input/joystick/sidewinder.c static int sw_parse(unsigned char *buf, struct sw *sw) sw 290 drivers/input/joystick/sidewinder.c switch (sw->type) { sw 297 drivers/input/joystick/sidewinder.c dev = sw->dev[0]; sw 319 drivers/input/joystick/sidewinder.c for (i = 0; i < sw->number; i ++) { sw 324 drivers/input/joystick/sidewinder.c input_report_abs(sw->dev[i], ABS_X, GB(i*15+3,1) - GB(i*15+2,1)); sw 325 drivers/input/joystick/sidewinder.c input_report_abs(sw->dev[i], ABS_Y, GB(i*15+0,1) - GB(i*15+1,1)); sw 328 drivers/input/joystick/sidewinder.c input_report_key(sw->dev[i], sw_btn[SW_ID_GP][j], !GB(i*15+j+4,1)); sw 330 drivers/input/joystick/sidewinder.c input_sync(sw->dev[i]); sw 341 drivers/input/joystick/sidewinder.c dev = sw->dev[0]; sw 362 drivers/input/joystick/sidewinder.c dev = sw->dev[0]; sw 387 drivers/input/joystick/sidewinder.c dev = sw->dev[0]; sw 409 drivers/input/joystick/sidewinder.c static int sw_read(struct sw *sw) sw 414 drivers/input/joystick/sidewinder.c i = sw_read_packet(sw->gameport, buf, sw->length, 0); sw 416 drivers/input/joystick/sidewinder.c if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */ sw 420 drivers/input/joystick/sidewinder.c " - going to reinitialize.\n", sw->gameport->phys); sw 421 drivers/input/joystick/sidewinder.c sw->fail = SW_FAIL; /* Reinitialize */ sw 437 drivers/input/joystick/sidewinder.c if (i == sw->length && !sw_parse(buf, sw)) { /* Parse data */ sw 439 drivers/input/joystick/sidewinder.c sw->fail = 0; sw 440 drivers/input/joystick/sidewinder.c sw->ok++; sw 442 drivers/input/joystick/sidewinder.c if (sw->type == SW_ID_3DP && sw->length == 66 /* Many packets OK */ sw 443 drivers/input/joystick/sidewinder.c && sw->ok > SW_OK) { sw 446 drivers/input/joystick/sidewinder.c " - enabling optimization again.\n", sw->gameport->phys); sw 447 drivers/input/joystick/sidewinder.c sw->length = 22; sw 453 drivers/input/joystick/sidewinder.c sw->ok = 0; sw 454 drivers/input/joystick/sidewinder.c sw->fail++; sw 456 drivers/input/joystick/sidewinder.c if (sw->type == SW_ID_3DP && sw->length == 22 && sw->fail > SW_BAD) { /* Consecutive bad packets */ sw 459 drivers/input/joystick/sidewinder.c " - disabling optimization.\n", sw->gameport->phys); sw 460 drivers/input/joystick/sidewinder.c sw->length = 66; sw 463 drivers/input/joystick/sidewinder.c if (sw->fail < SW_FAIL) sw 467 drivers/input/joystick/sidewinder.c " - reinitializing joystick.\n", sw->gameport->phys); sw 469 drivers/input/joystick/sidewinder.c if (!i && sw->type == SW_ID_3DP) { /* 3D Pro can be in analog mode */ sw 471 drivers/input/joystick/sidewinder.c sw_init_digital(sw->gameport); sw 475 drivers/input/joystick/sidewinder.c i = sw_read_packet(sw->gameport, buf, SW_LENGTH, 0); /* Read normal data packet */ sw 477 drivers/input/joystick/sidewinder.c sw_read_packet(sw->gameport, buf, SW_LENGTH, i); /* Read ID packet, this initializes the stick */ sw 479 drivers/input/joystick/sidewinder.c sw->fail = SW_FAIL; sw 486 drivers/input/joystick/sidewinder.c struct sw *sw = gameport_get_drvdata(gameport); sw 488 drivers/input/joystick/sidewinder.c sw->reads++; sw 489 drivers/input/joystick/sidewinder.c if (sw_read(sw)) sw 490 drivers/input/joystick/sidewinder.c sw->bads++; sw 495 drivers/input/joystick/sidewinder.c struct sw *sw = input_get_drvdata(dev); sw 497 drivers/input/joystick/sidewinder.c gameport_start_polling(sw->gameport); sw 503 drivers/input/joystick/sidewinder.c struct sw *sw = input_get_drvdata(dev); sw 505 drivers/input/joystick/sidewinder.c gameport_stop_polling(sw->gameport); sw 572 drivers/input/joystick/sidewinder.c struct sw *sw; sw 583 drivers/input/joystick/sidewinder.c sw = kzalloc(sizeof(struct sw), GFP_KERNEL); sw 586 drivers/input/joystick/sidewinder.c if (!sw || !buf || !idbuf) { sw 591 drivers/input/joystick/sidewinder.c sw->gameport = gameport; sw 593 drivers/input/joystick/sidewinder.c gameport_set_drvdata(gameport, sw); sw 636 drivers/input/joystick/sidewinder.c sw->type = -1; sw 650 drivers/input/joystick/sidewinder.c sw->number = 1; sw 651 drivers/input/joystick/sidewinder.c sw->gameport = gameport; sw 652 drivers/input/joystick/sidewinder.c sw->length = i; sw 653 drivers/input/joystick/sidewinder.c sw->bits = m; sw 659 drivers/input/joystick/sidewinder.c sw->number++; /* fall through */ sw 663 drivers/input/joystick/sidewinder.c sw->type = SW_ID_FSP; sw 666 drivers/input/joystick/sidewinder.c sw->number++; /* fall through */ sw 668 drivers/input/joystick/sidewinder.c sw->number++; /* fall through */ sw 670 drivers/input/joystick/sidewinder.c sw->type = SW_ID_GP; sw 674 drivers/input/joystick/sidewinder.c sw->type = SW_ID_FFW; sw 678 drivers/input/joystick/sidewinder.c sw->type = SW_ID_FFP; sw 681 drivers/input/joystick/sidewinder.c sw->type = SW_ID_PP; sw 684 drivers/input/joystick/sidewinder.c sw->bits = 3; /* fall through */ sw 686 drivers/input/joystick/sidewinder.c sw->length = 22; /* fall through */ sw 688 drivers/input/joystick/sidewinder.c sw->type = SW_ID_3DP; sw 695 drivers/input/joystick/sidewinder.c } while (k && sw->type == -1); sw 697 drivers/input/joystick/sidewinder.c if (sw->type == -1) { sw 717 drivers/input/joystick/sidewinder.c for (i = 0; i < sw->number; i++) { sw 720 drivers/input/joystick/sidewinder.c snprintf(sw->name, sizeof(sw->name), sw 721 drivers/input/joystick/sidewinder.c "Microsoft SideWinder %s", sw_name[sw->type]); sw 722 drivers/input/joystick/sidewinder.c snprintf(sw->phys[i], sizeof(sw->phys[i]), sw 725 drivers/input/joystick/sidewinder.c sw->dev[i] = input_dev = input_allocate_device(); sw 731 drivers/input/joystick/sidewinder.c input_dev->name = sw->name; sw 732 drivers/input/joystick/sidewinder.c input_dev->phys = sw->phys[i]; sw 735 drivers/input/joystick/sidewinder.c input_dev->id.product = sw->type; sw 739 drivers/input/joystick/sidewinder.c input_set_drvdata(input_dev, sw); sw 746 drivers/input/joystick/sidewinder.c for (j = 0; (bits = sw_bit[sw->type][j]); j++) { sw 749 drivers/input/joystick/sidewinder.c code = sw_abs[sw->type][j]; sw 760 drivers/input/joystick/sidewinder.c for (j = 0; (code = sw_btn[sw->type][j]); j++) sw 763 drivers/input/joystick/sidewinder.c dbg("%s%s [%d-bit id %d data %d]\n", sw->name, comment, m, l, k); sw 765 drivers/input/joystick/sidewinder.c err = input_register_device(sw->dev[i]); sw 775 drivers/input/joystick/sidewinder.c fail4: input_free_device(sw->dev[i]); sw 777 drivers/input/joystick/sidewinder.c input_unregister_device(sw->dev[i]); sw 780 drivers/input/joystick/sidewinder.c kfree(sw); sw 786 drivers/input/joystick/sidewinder.c struct sw *sw = gameport_get_drvdata(gameport); sw 789 drivers/input/joystick/sidewinder.c for (i = 0; i < sw->number; i++) sw 790 drivers/input/joystick/sidewinder.c input_unregister_device(sw->dev[i]); sw 793 drivers/input/joystick/sidewinder.c kfree(sw); sw 216 drivers/input/misc/ad714x.c struct ad714x_button_drv *sw = &ad714x->sw->button[idx]; sw 218 drivers/input/misc/ad714x.c switch (sw->state) { sw 223 drivers/input/misc/ad714x.c input_report_key(sw->input, hw->keycode, 1); sw 224 drivers/input/misc/ad714x.c input_sync(sw->input); sw 225 drivers/input/misc/ad714x.c sw->state = ACTIVE; sw 233 drivers/input/misc/ad714x.c input_report_key(sw->input, hw->keycode, 0); sw 234 drivers/input/misc/ad714x.c input_sync(sw->input); sw 235 drivers/input/misc/ad714x.c sw->state = IDLE; sw 269 drivers/input/misc/ad714x.c struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx]; sw 271 drivers/input/misc/ad714x.c sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage, sw 275 drivers/input/misc/ad714x.c sw->highest_stage); sw 294 drivers/input/misc/ad714x.c struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx]; sw 296 drivers/input/misc/ad714x.c sw->abs_pos = ad714x_cal_abs_pos(ad714x, hw->start_stage, hw->end_stage, sw 297 drivers/input/misc/ad714x.c sw->highest_stage, hw->max_coord); sw 300 drivers/input/misc/ad714x.c sw->abs_pos); sw 315 drivers/input/misc/ad714x.c struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx]; sw 317 drivers/input/misc/ad714x.c sw->flt_pos = (sw->flt_pos * (10 - 4) + sw 318 drivers/input/misc/ad714x.c sw->abs_pos * 4)/10; sw 321 drivers/input/misc/ad714x.c sw->flt_pos); sw 341 drivers/input/misc/ad714x.c struct ad714x_slider_drv *sw = &ad714x->sw->slider[idx]; sw 350 drivers/input/misc/ad714x.c switch (sw->state) { sw 353 drivers/input/misc/ad714x.c sw->state = JITTER; sw 367 drivers/input/misc/ad714x.c sw->flt_pos = sw->abs_pos; sw 368 drivers/input/misc/ad714x.c sw->state = ACTIVE; sw 379 drivers/input/misc/ad714x.c input_report_abs(sw->input, ABS_X, sw->flt_pos); sw 380 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, 1); sw 386 drivers/input/misc/ad714x.c sw->state = IDLE; sw 387 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, 0); sw 391 drivers/input/misc/ad714x.c input_sync(sw->input); sw 410 drivers/input/misc/ad714x.c struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx]; sw 412 drivers/input/misc/ad714x.c sw->pre_highest_stage = sw->highest_stage; sw 413 drivers/input/misc/ad714x.c sw->highest_stage = ad714x_cal_highest_stage(ad714x, hw->start_stage, sw 417 drivers/input/misc/ad714x.c sw->highest_stage); sw 452 drivers/input/misc/ad714x.c struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx]; sw 457 drivers/input/misc/ad714x.c first_before = (sw->highest_stage + stage_num - 1) % stage_num; sw 458 drivers/input/misc/ad714x.c highest = sw->highest_stage; sw 459 drivers/input/misc/ad714x.c first_after = (sw->highest_stage + stage_num + 1) % stage_num; sw 471 drivers/input/misc/ad714x.c sw->abs_pos = ((hw->max_coord / (hw->end_stage - hw->start_stage)) * sw 474 drivers/input/misc/ad714x.c if (sw->abs_pos > hw->max_coord) sw 475 drivers/input/misc/ad714x.c sw->abs_pos = hw->max_coord; sw 476 drivers/input/misc/ad714x.c else if (sw->abs_pos < 0) sw 477 drivers/input/misc/ad714x.c sw->abs_pos = 0; sw 483 drivers/input/misc/ad714x.c struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx]; sw 484 drivers/input/misc/ad714x.c if (((sw->pre_highest_stage == hw->end_stage) && sw 485 drivers/input/misc/ad714x.c (sw->highest_stage == hw->start_stage)) || sw 486 drivers/input/misc/ad714x.c ((sw->pre_highest_stage == hw->start_stage) && sw 487 drivers/input/misc/ad714x.c (sw->highest_stage == hw->end_stage))) sw 488 drivers/input/misc/ad714x.c sw->flt_pos = sw->abs_pos; sw 490 drivers/input/misc/ad714x.c sw->flt_pos = ((sw->flt_pos * 30) + (sw->abs_pos * 71)) / 100; sw 492 drivers/input/misc/ad714x.c if (sw->flt_pos > hw->max_coord) sw 493 drivers/input/misc/ad714x.c sw->flt_pos = hw->max_coord; sw 513 drivers/input/misc/ad714x.c struct ad714x_wheel_drv *sw = &ad714x->sw->wheel[idx]; sw 522 drivers/input/misc/ad714x.c switch (sw->state) { sw 525 drivers/input/misc/ad714x.c sw->state = JITTER; sw 539 drivers/input/misc/ad714x.c sw->flt_pos = sw->abs_pos; sw 540 drivers/input/misc/ad714x.c sw->state = ACTIVE; sw 551 drivers/input/misc/ad714x.c input_report_abs(sw->input, ABS_WHEEL, sw 552 drivers/input/misc/ad714x.c sw->flt_pos); sw 553 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, 1); sw 559 drivers/input/misc/ad714x.c sw->state = IDLE; sw 560 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, 0); sw 565 drivers/input/misc/ad714x.c input_sync(sw->input); sw 597 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 599 drivers/input/misc/ad714x.c sw->x_highest_stage = ad714x_cal_highest_stage(ad714x, sw 601 drivers/input/misc/ad714x.c sw->y_highest_stage = ad714x_cal_highest_stage(ad714x, sw 606 drivers/input/misc/ad714x.c idx, sw->x_highest_stage, sw->y_highest_stage); sw 618 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 621 drivers/input/misc/ad714x.c for (i = hw->x_start_stage; i < sw->x_highest_stage; i++) { sw 627 drivers/input/misc/ad714x.c for (i = sw->x_highest_stage; i < hw->x_end_stage; i++) { sw 633 drivers/input/misc/ad714x.c for (i = hw->y_start_stage; i < sw->y_highest_stage; i++) { sw 639 drivers/input/misc/ad714x.c for (i = sw->y_highest_stage; i < hw->y_end_stage; i++) { sw 657 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 659 drivers/input/misc/ad714x.c sw->x_abs_pos = ad714x_cal_abs_pos(ad714x, hw->x_start_stage, sw 660 drivers/input/misc/ad714x.c hw->x_end_stage, sw->x_highest_stage, hw->x_max_coord); sw 661 drivers/input/misc/ad714x.c sw->y_abs_pos = ad714x_cal_abs_pos(ad714x, hw->y_start_stage, sw 662 drivers/input/misc/ad714x.c hw->y_end_stage, sw->y_highest_stage, hw->y_max_coord); sw 665 drivers/input/misc/ad714x.c sw->x_abs_pos, sw->y_abs_pos); sw 670 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 672 drivers/input/misc/ad714x.c sw->x_flt_pos = (sw->x_flt_pos * (10 - 4) + sw 673 drivers/input/misc/ad714x.c sw->x_abs_pos * 4)/10; sw 674 drivers/input/misc/ad714x.c sw->y_flt_pos = (sw->y_flt_pos * (10 - 4) + sw 675 drivers/input/misc/ad714x.c sw->y_abs_pos * 4)/10; sw 678 drivers/input/misc/ad714x.c idx, sw->x_flt_pos, sw->y_flt_pos); sw 702 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 709 drivers/input/misc/ad714x.c if (!sw->left_ep) { sw 711 drivers/input/misc/ad714x.c sw->left_ep = 1; sw 712 drivers/input/misc/ad714x.c sw->left_ep_val = sw 718 drivers/input/misc/ad714x.c LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->left_ep_val)) sw 719 drivers/input/misc/ad714x.c sw->left_ep = 0; sw 726 drivers/input/misc/ad714x.c if (!sw->right_ep) { sw 728 drivers/input/misc/ad714x.c sw->right_ep = 1; sw 729 drivers/input/misc/ad714x.c sw->right_ep_val = sw 735 drivers/input/misc/ad714x.c LEFT_RIGHT_END_POINT_DEAVTIVALION_LEVEL + sw->right_ep_val)) sw 736 drivers/input/misc/ad714x.c sw->right_ep = 0; sw 743 drivers/input/misc/ad714x.c if (!sw->top_ep) { sw 745 drivers/input/misc/ad714x.c sw->top_ep = 1; sw 746 drivers/input/misc/ad714x.c sw->top_ep_val = sw 752 drivers/input/misc/ad714x.c TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->top_ep_val)) sw 753 drivers/input/misc/ad714x.c sw->top_ep = 0; sw 760 drivers/input/misc/ad714x.c if (!sw->bottom_ep) { sw 762 drivers/input/misc/ad714x.c sw->bottom_ep = 1; sw 763 drivers/input/misc/ad714x.c sw->bottom_ep_val = sw 769 drivers/input/misc/ad714x.c TOP_BOTTOM_END_POINT_DEAVTIVALION_LEVEL + sw->bottom_ep_val)) sw 770 drivers/input/misc/ad714x.c sw->bottom_ep = 0; sw 773 drivers/input/misc/ad714x.c return sw->left_ep || sw->right_ep || sw->top_ep || sw->bottom_ep; sw 794 drivers/input/misc/ad714x.c struct ad714x_touchpad_drv *sw = &ad714x->sw->touchpad[idx]; sw 806 drivers/input/misc/ad714x.c switch (sw->state) { sw 809 drivers/input/misc/ad714x.c sw->state = JITTER; sw 828 drivers/input/misc/ad714x.c sw->x_flt_pos = sw->x_abs_pos; sw 829 drivers/input/misc/ad714x.c sw->y_flt_pos = sw->y_abs_pos; sw 830 drivers/input/misc/ad714x.c sw->state = ACTIVE; sw 844 drivers/input/misc/ad714x.c input_report_abs(sw->input, ABS_X, sw 845 drivers/input/misc/ad714x.c sw->x_flt_pos); sw 846 drivers/input/misc/ad714x.c input_report_abs(sw->input, ABS_Y, sw 847 drivers/input/misc/ad714x.c sw->y_flt_pos); sw 848 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, sw 856 drivers/input/misc/ad714x.c sw->state = IDLE; sw 857 drivers/input/misc/ad714x.c input_report_key(sw->input, BTN_TOUCH, 0); sw 861 drivers/input/misc/ad714x.c input_sync(sw->input); sw 992 drivers/input/misc/ad714x.c ad714x = devm_kzalloc(dev, sizeof(*ad714x) + sizeof(*ad714x->sw) + sw 1005 drivers/input/misc/ad714x.c ad714x->sw = drv_mem; sw 1006 drivers/input/misc/ad714x.c drv_mem += sizeof(*ad714x->sw); sw 1007 drivers/input/misc/ad714x.c ad714x->sw->slider = sd_drv = drv_mem; sw 1009 drivers/input/misc/ad714x.c ad714x->sw->wheel = wl_drv = drv_mem; sw 1011 drivers/input/misc/ad714x.c ad714x->sw->touchpad = tp_drv = drv_mem; sw 1013 drivers/input/misc/ad714x.c ad714x->sw->button = bt_drv = drv_mem; sw 32 drivers/input/misc/ad714x.h struct ad714x_driver_data *sw; sw 388 drivers/input/misc/wistron_btns.c { KE_SW, 0x4a, {.sw = {SW_LID, 1}} }, /* lid close */ sw 389 drivers/input/misc/wistron_btns.c { KE_SW, 0x4b, {.sw = {SW_LID, 0}} }, /* lid open */ sw 589 drivers/input/misc/wistron_btns.c { KE_SW, 0x4a, {.sw = {SW_LID, 1}} }, /* lid close */ sw 590 drivers/input/misc/wistron_btns.c { KE_SW, 0x4b, {.sw = {SW_LID, 0}} }, /* lid open */ sw 202 drivers/input/sparse-keymap.c __set_bit(entry->sw.code, dev->swbit); sw 249 drivers/input/sparse-keymap.c value = ke->sw.value; sw 253 drivers/input/sparse-keymap.c input_report_switch(dev, ke->sw.code, value); sw 353 drivers/media/pci/ttpci/av7110_av.c int sw; sw 368 drivers/media/pci/ttpci/av7110_av.c sw = (p[3] & 0x0F); sw 369 drivers/media/pci/ttpci/av7110_av.c ret = av7110_set_vidmode(av7110, sw2mode[sw]); sw 371 drivers/media/pci/ttpci/av7110_av.c dprintk(2, "playback %dx%d fr=%d\n", hsize, vsize, sw); sw 607 drivers/media/platform/exynos-gsc/gsc-core.c int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw, sw 625 drivers/media/platform/exynos-gsc/gsc-core.c if ((sw / tmp_w) > sc_down_max || sw 627 drivers/media/platform/exynos-gsc/gsc-core.c (tmp_w / sw) > var->sc_up_max || sw 398 drivers/media/platform/exynos-gsc/gsc-core.h int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw, sw 192 drivers/media/platform/exynos4-is/fimc-core.c int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh, sw 199 drivers/media/platform/exynos4-is/fimc-core.c return (sw == dw && sh == dh) ? 0 : -EINVAL; sw 201 drivers/media/platform/exynos4-is/fimc-core.c if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh)) sw 629 drivers/media/platform/exynos4-is/fimc-core.h int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh, sw 71 drivers/mtd/nand/onenand/omap2.c bool sr, bool sw, sw 103 drivers/mtd/nand/onenand/omap2.c if (sw) sw 439 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c struct aq_hw_atl_utils_fw_rpc_tid_s sw; sw 452 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c sw.tid = 0xFFFFU & (++self->rpc_tid); sw 453 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c sw.len = (u16)rpc_size; sw 454 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val); sw 464 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c struct aq_hw_atl_utils_fw_rpc_tid_s sw; sw 468 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR); sw 470 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c self->rpc_tid = sw.tid; sw 474 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c sw.tid == fw.tid, sw 484 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c err = hw_atl_utils_fw_rpc_call(self, sw.len); sw 488 drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c } while (sw.tid != fw.tid || 0xFFFFU == fw.len); sw 320 drivers/net/ethernet/emulex/benet/be_hw.h u8 sw; /* dword 1 */ sw 353 drivers/net/ethernet/emulex/benet/be_hw.h u8 sw; /* dword 1 */ sw 438 drivers/net/ethernet/intel/ice/ice_common.c struct ice_switch_info *sw; sw 442 drivers/net/ethernet/intel/ice/ice_common.c sw = hw->switch_info; sw 444 drivers/net/ethernet/intel/ice/ice_common.c if (!sw) sw 447 drivers/net/ethernet/intel/ice/ice_common.c INIT_LIST_HEAD(&sw->vsi_list_map_head); sw 458 drivers/net/ethernet/intel/ice/ice_common.c struct ice_switch_info *sw = hw->switch_info; sw 464 drivers/net/ethernet/intel/ice/ice_common.c list_for_each_entry_safe(v_pos_map, v_tmp_map, &sw->vsi_list_map_head, sw 482 drivers/net/ethernet/intel/ice/ice_common.c devm_kfree(ice_hw_to_dev(hw), sw->recp_list); sw 483 drivers/net/ethernet/intel/ice/ice_common.c devm_kfree(ice_hw_to_dev(hw), sw); sw 3404 drivers/net/ethernet/intel/ice/ice_common.c struct ice_switch_info *sw = hw->switch_info; sw 3414 drivers/net/ethernet/intel/ice/ice_common.c list_replace_init(&sw->recp_list[i].filt_rules, sw 3415 drivers/net/ethernet/intel/ice/ice_common.c &sw->recp_list[i].filt_replay_rules); sw 960 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 973 drivers/net/ethernet/intel/ice/ice_switch.c list_add(&v_map->list_entry, &sw->vsi_list_map_head); sw 1172 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1178 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; sw 1179 drivers/net/ethernet/intel/ice/ice_switch.c rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; sw 1332 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1335 drivers/net/ethernet/intel/ice/ice_switch.c list_head = &sw->recp_list[recp_id].filt_rules; sw 1363 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1367 drivers/net/ethernet/intel/ice/ice_switch.c list_head = &sw->recp_list[recp_id].filt_rules; sw 1392 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1403 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[recp_id].filt_rule_lock; sw 1560 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1572 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[recp_id].filt_rule_lock; sw 1659 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw; sw 1669 drivers/net/ethernet/intel/ice/ice_switch.c sw = hw->switch_info; sw 1670 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock; sw 1716 drivers/net/ethernet/intel/ice/ice_switch.c rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules; sw 1803 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 1828 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; sw 2156 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2160 drivers/net/ethernet/intel/ice/ice_switch.c list_head = &sw->recp_list[recp_id].filt_rules; sw 2420 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2437 drivers/net/ethernet/intel/ice/ice_switch.c rule_head = &sw->recp_list[recipe_id].filt_rules; sw 2438 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[recipe_id].filt_rule_lock; sw 2602 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2611 drivers/net/ethernet/intel/ice/ice_switch.c vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock; sw 2612 drivers/net/ethernet/intel/ice/ice_switch.c vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules; sw 2650 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2659 drivers/net/ethernet/intel/ice/ice_switch.c rule_lock = &sw->recp_list[lkup].filt_rule_lock; sw 2660 drivers/net/ethernet/intel/ice/ice_switch.c rule_head = &sw->recp_list[lkup].filt_rules; sw 2778 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2785 drivers/net/ethernet/intel/ice/ice_switch.c head = &sw->recp_list[i].filt_replay_rules; sw 2801 drivers/net/ethernet/intel/ice/ice_switch.c struct ice_switch_info *sw = hw->switch_info; sw 2804 drivers/net/ethernet/intel/ice/ice_switch.c if (!sw) sw 2808 drivers/net/ethernet/intel/ice/ice_switch.c if (!list_empty(&sw->recp_list[i].filt_replay_rules)) { sw 2811 drivers/net/ethernet/intel/ice/ice_switch.c l_head = &sw->recp_list[i].filt_replay_rules; sw 175 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c struct mlx5e_sw_stats *s = &priv->stats.sw; sw 203 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw 168 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i); sw 174 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c struct mlx5e_sw_stats *s = &priv->stats.sw; sw 316 drivers/net/ethernet/mellanox/mlx5/core/en_stats.h struct mlx5e_sw_stats sw; sw 142 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c memcpy(&priv->stats.sw, &s, sizeof(s)); sw 148 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c struct mlx5e_sw_stats *sstats = &priv->stats.sw; sw 980 drivers/net/ethernet/micrel/ksz884x.c struct ksz_sw_desc sw; sw 1598 drivers/net/ethernet/micrel/ksz884x.c desc->sw.ctrl.tx.hw_owned = 1; sw 1599 drivers/net/ethernet/micrel/ksz884x.c if (desc->sw.buf_size != desc->sw.buf.data) { sw 1600 drivers/net/ethernet/micrel/ksz884x.c desc->sw.buf_size = desc->sw.buf.data; sw 1601 drivers/net/ethernet/micrel/ksz884x.c desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data); sw 1603 drivers/net/ethernet/micrel/ksz884x.c desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data); sw 1612 drivers/net/ethernet/micrel/ksz884x.c (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK; sw 1622 drivers/net/ethernet/micrel/ksz884x.c desc->sw.buf.rx.buf_size = len; sw 1632 drivers/net/ethernet/micrel/ksz884x.c (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK; sw 1642 drivers/net/ethernet/micrel/ksz884x.c desc->sw.buf.tx.buf_size = len; sw 2259 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 2263 drivers/net/ethernet/micrel/ksz884x.c sw->port_cfg[port].rx_rate[prio] = sw 2264 drivers/net/ethernet/micrel/ksz884x.c sw->port_cfg[port].tx_rate[prio] = 0; sw 2580 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 2586 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[0] = 0; sw 2587 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[1] = 0; sw 2588 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[2] = 1; sw 2589 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[3] = 1; sw 2590 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[4] = 2; sw 2591 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[5] = 2; sw 2592 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[6] = 3; sw 2593 drivers/net/ethernet/micrel/ksz884x.c sw->p_802_1p[7] = 3; sw 2600 drivers/net/ethernet/micrel/ksz884x.c sw->diffserv[tos] = 0; sw 2609 drivers/net/ethernet/micrel/ksz884x.c sw->port_cfg[port].port_prio = 0; sw 2610 drivers/net/ethernet/micrel/ksz884x.c sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio); sw 2642 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 2647 drivers/net/ethernet/micrel/ksz884x.c &sw->vlan_table[entry].vid, sw 2648 drivers/net/ethernet/micrel/ksz884x.c &sw->vlan_table[entry].fid, sw 2649 drivers/net/ethernet/micrel/ksz884x.c &sw->vlan_table[entry].member); sw 2653 drivers/net/ethernet/micrel/ksz884x.c port_get_def_vid(hw, port, &sw->port_cfg[port].vid); sw 2654 drivers/net/ethernet/micrel/ksz884x.c sw->port_cfg[port].member = PORT_MASK; sw 3841 drivers/net/ethernet/micrel/ksz884x.c previous->sw.buf.rx.end_of_ring = 1; sw 3842 drivers/net/ethernet/micrel/ksz884x.c previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data); sw 3978 drivers/net/ethernet/micrel/ksz884x.c hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1; sw 4004 drivers/net/ethernet/micrel/ksz884x.c cur->sw.buf.tx.last_seg = 1; sw 4008 drivers/net/ethernet/micrel/ksz884x.c cur->sw.buf.tx.intr = 1; sw 4014 drivers/net/ethernet/micrel/ksz884x.c cur->sw.buf.tx.dest_port = hw->dst_ports; sw 4711 drivers/net/ethernet/micrel/ksz884x.c (desc)->sw.buf.tx.csum_gen_tcp = 1; sw 4712 drivers/net/ethernet/micrel/ksz884x.c (desc)->sw.buf.tx.csum_gen_udp = 1; sw 5322 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 5325 drivers/net/ethernet/micrel/ksz884x.c if (!sw->member) { sw 5330 drivers/net/ethernet/micrel/ksz884x.c if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state) sw 5331 drivers/net/ethernet/micrel/ksz884x.c member = HOST_MASK | sw->member; sw 5334 drivers/net/ethernet/micrel/ksz884x.c if (member != sw->port_cfg[port].member) sw 5698 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 5703 drivers/net/ethernet/micrel/ksz884x.c if (sw->member & port) { sw 5704 drivers/net/ethernet/micrel/ksz884x.c sw->member &= ~port; sw 6895 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = hw->ksz_switch; sw 6900 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[5] = (u8) data[0]; sw 6901 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[4] = (u8)(data[0] >> 8); sw 6902 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[3] = (u8) data[1]; sw 6903 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[2] = (u8)(data[1] >> 8); sw 6904 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[1] = (u8) data[2]; sw 6905 drivers/net/ethernet/micrel/ksz884x.c sw->other_addr[0] = (u8)(data[2] >> 8); sw 6930 drivers/net/ethernet/micrel/ksz884x.c struct ksz_switch *sw = NULL; sw 7010 drivers/net/ethernet/micrel/ksz884x.c sw = hw->ksz_switch; sw 7039 drivers/net/ethernet/micrel/ksz884x.c memcpy(sw->other_addr, hw->override_addr, ETH_ALEN); sw 7092 drivers/net/ethernet/micrel/ksz884x.c memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN); sw 7093 drivers/net/ethernet/micrel/ksz884x.c if (ether_addr_equal(sw->other_addr, hw->override_addr)) sw 321 drivers/net/ethernet/netronome/nfp/bpf/jit.c u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both, sw 350 drivers/net/ethernet/netronome/nfp/bpf/jit.c FIELD_PREP(OP_SHF_SW, sw) | sw 55 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) sw 57 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->type = fw->type; sw 58 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size; sw 59 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo); sw 60 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo); sw 64 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->target = NFP_RTSYM_TARGET_LMEM; sw 67 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->target = NFP_RTSYM_TARGET_EMU_CACHE; sw 70 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->target = fw->target; sw 75 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->domain = nfp_meid(fw->island, fw->menum); sw 77 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->domain = fw->island; sw 79 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c sw->domain = -1; sw 156 drivers/net/wimax/i2400m/op-rfkill.c enum i2400m_rf_switch_status hw, sw; sw 159 drivers/net/wimax/i2400m/op-rfkill.c sw = le32_to_cpu(rfss->sw_rf_switch); sw 163 drivers/net/wimax/i2400m/op-rfkill.c i2400m, rfss, hw, sw); sw 172 drivers/net/wimax/i2400m/op-rfkill.c switch (sw) { sw 180 drivers/net/wimax/i2400m/op-rfkill.c dev_err(dev, "HW BUG? Unknown RF SW state 0x%x\n", sw); sw 195 drivers/net/wimax/i2400m/op-rfkill.c i2400m, rfss, hw, sw); sw 1460 drivers/net/wireless/intel/iwlegacy/3945-mac.c il->isr_stats.sw++; sw 4464 drivers/net/wireless/intel/iwlegacy/4965-mac.c il->isr_stats.sw++; sw 1008 drivers/net/wireless/intel/iwlegacy/common.h u32 sw; sw 662 drivers/net/wireless/intel/iwlegacy/debug.c il->isr_stats.sw); sw 663 drivers/net/wireless/intel/iwlegacy/debug.c if (il->isr_stats.sw || il->isr_stats.hw) { sw 279 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; sw 321 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; sw 335 drivers/net/wireless/intel/iwlwifi/mvm/nvm.c return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, sw 124 drivers/net/wireless/intel/iwlwifi/pcie/internal.h u32 sw; sw 1881 drivers/net/wireless/intel/iwlwifi/pcie/rx.c isr_stats->sw++; sw 2169 drivers/net/wireless/intel/iwlwifi/pcie/rx.c isr_stats->sw++; sw 2204 drivers/net/wireless/intel/iwlwifi/pcie/rx.c isr_stats->sw++; sw 2233 drivers/net/wireless/intel/iwlwifi/pcie/rx.c isr_stats->sw++; sw 2659 drivers/net/wireless/intel/iwlwifi/pcie/trans.c isr_stats->sw); sw 2660 drivers/net/wireless/intel/iwlwifi/pcie/trans.c if (isr_stats->sw || isr_stats->hw) { sw 42 drivers/platform/x86/intel-vbtn.c { KE_SW, 0xCA, { .sw = { SW_DOCK, 1 } } }, /* Docked */ sw 43 drivers/platform/x86/intel-vbtn.c { KE_SW, 0xCB, { .sw = { SW_DOCK, 0 } } }, /* Undocked */ sw 44 drivers/platform/x86/intel-vbtn.c { KE_SW, 0xCC, { .sw = { SW_TABLET_MODE, 1 } } }, /* Tablet */ sw 45 drivers/platform/x86/intel-vbtn.c { KE_SW, 0xCD, { .sw = { SW_TABLET_MODE, 0 } } }, /* Laptop */ sw 174 drivers/power/supply/lp8727_charger.c static inline void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw) sw 176 drivers/power/supply/lp8727_charger.c lp8727_write_byte(pchg, LP8727_SWCTRL, sw); sw 28 drivers/s390/char/ctrlchar.c void schedule_sysrq_work(struct sysrq_work *sw) sw 30 drivers/s390/char/ctrlchar.c INIT_WORK(&sw->work, ctrlchar_handle_sysrq); sw 31 drivers/s390/char/ctrlchar.c schedule_work(&sw->work); sw 31 drivers/s390/char/ctrlchar.h void schedule_sysrq_work(struct sysrq_work *sw); sw 1808 drivers/s390/net/ctcm_fsms.c header->sw.th_last_seq = wch->th_seq_num; sw 639 drivers/s390/net/ctcm_main.c header->sw.th_last_seq = ch->th_seq_num; sw 131 drivers/s390/net/ctcm_mpc.c __u32 ct, sw, rm, dup; sw 139 drivers/s390/net/ctcm_mpc.c sw = 0; sw 146 drivers/s390/net/ctcm_mpc.c if (sw == 0) { sw 153 drivers/s390/net/ctcm_mpc.c if ((sw == 4) || (sw == 12)) sw 155 drivers/s390/net/ctcm_mpc.c if (sw == 8) sw 163 drivers/s390/net/ctcm_mpc.c basc[sw] = *ptr; sw 165 drivers/s390/net/ctcm_mpc.c basc[sw] = '.'; sw 167 drivers/s390/net/ctcm_mpc.c basc[sw+1] = '\0'; sw 168 drivers/s390/net/ctcm_mpc.c sw++; sw 170 drivers/s390/net/ctcm_mpc.c if (sw != 16) sw 186 drivers/s390/net/ctcm_mpc.c sw = 0; sw 190 drivers/s390/net/ctcm_mpc.c if (sw != 0) { sw 191 drivers/s390/net/ctcm_mpc.c for ( ; rm > 0; rm--, sw++) { sw 192 drivers/s390/net/ctcm_mpc.c if ((sw == 4) || (sw == 12)) sw 194 drivers/s390/net/ctcm_mpc.c if (sw == 8) sw 670 drivers/s390/net/ctcm_mpc.c header->sw.th_last_seq = ch->th_seq_num; sw 107 drivers/s390/net/ctcm_mpc.h struct th_addon sw; sw 531 drivers/scsi/libsas/sas_discover.c static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) sw 538 drivers/scsi/libsas/sas_discover.c queue_work(ha->disco_q, &sw->work); sw 542 drivers/scsi/libsas/sas_discover.c struct sas_work *sw, sw 549 drivers/scsi/libsas/sas_discover.c sas_chain_work(ha, sw); sw 13 drivers/scsi/libsas/sas_event.c int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) sw 23 drivers/scsi/libsas/sas_event.c if (list_empty(&sw->drain_node)) sw 24 drivers/scsi/libsas/sas_event.c list_add_tail(&sw->drain_node, &ha->defer_q); sw 26 drivers/scsi/libsas/sas_event.c rc = queue_work(ha->event_q, &sw->work); sw 47 drivers/scsi/libsas/sas_event.c struct sas_work *sw, *_sw; sw 60 drivers/scsi/libsas/sas_event.c list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { sw 61 drivers/scsi/libsas/sas_event.c list_del_init(&sw->drain_node); sw 62 drivers/scsi/libsas/sas_event.c ret = sas_queue_work(ha, sw); sw 64 drivers/scsi/libsas/sas_event.c sas_free_event(to_asd_sas_event(&sw->work)); sw 69 drivers/scsi/libsas/sas_internal.h int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); sw 41 drivers/staging/vt6656/int.h u8 sw[2]; sw 28 drivers/thunderbolt/cap.c struct tb_switch *sw = port->sw; sw 36 drivers/thunderbolt/cap.c if (tb_switch_is_lr(sw)) sw 38 drivers/thunderbolt/cap.c else if (tb_switch_is_er(sw)) sw 43 drivers/thunderbolt/cap.c ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1); sw 52 drivers/thunderbolt/cap.c return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); sw 63 drivers/thunderbolt/cap.c if (tb_switch_is_lr(port->sw)) { sw 116 drivers/thunderbolt/cap.c static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) sw 118 drivers/thunderbolt/cap.c int offset = sw->config.first_cap_offset; sw 124 drivers/thunderbolt/cap.c ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); sw 147 drivers/thunderbolt/cap.c int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec) sw 152 drivers/thunderbolt/cap.c offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE); sw 159 drivers/thunderbolt/cap.c ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); sw 55 drivers/thunderbolt/dma_port.c struct tb_switch *sw; sw 168 drivers/thunderbolt/dma_port.c static int dma_find_port(struct tb_switch *sw) sw 181 drivers/thunderbolt/dma_port.c ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i], sw 201 drivers/thunderbolt/dma_port.c struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) sw 206 drivers/thunderbolt/dma_port.c port = dma_find_port(sw); sw 220 drivers/thunderbolt/dma_port.c dma->sw = sw; sw 243 drivers/thunderbolt/dma_port.c struct tb_switch *sw = dma->sw; sw 249 drivers/thunderbolt/dma_port.c ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port, sw 281 drivers/thunderbolt/dma_port.c struct tb_switch *sw = dma->sw; sw 285 drivers/thunderbolt/dma_port.c ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port, sw 294 drivers/thunderbolt/dma_port.c ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, sw 305 drivers/thunderbolt/dma_port.c struct tb_switch *sw = dma->sw; sw 322 drivers/thunderbolt/dma_port.c return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port, sw 329 drivers/thunderbolt/dma_port.c struct tb_switch *sw = dma->sw; sw 336 drivers/thunderbolt/dma_port.c ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, sw 483 drivers/thunderbolt/dma_port.c struct tb_switch *sw = dma->sw; sw 487 drivers/thunderbolt/dma_port.c ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, sw 21 drivers/thunderbolt/dma_port.h struct tb_dma_port *dma_port_alloc(struct tb_switch *sw); sw 584 drivers/thunderbolt/domain.c int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) sw 592 drivers/thunderbolt/domain.c parent_sw = tb_to_switch(sw->dev.parent); sw 596 drivers/thunderbolt/domain.c return tb->cm_ops->approve_switch(tb, sw); sw 610 drivers/thunderbolt/domain.c int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw) sw 619 drivers/thunderbolt/domain.c parent_sw = tb_to_switch(sw->dev.parent); sw 623 drivers/thunderbolt/domain.c ret = tb->cm_ops->add_switch_key(tb, sw); sw 627 drivers/thunderbolt/domain.c return tb->cm_ops->approve_switch(tb, sw); sw 642 drivers/thunderbolt/domain.c int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw) sw 656 drivers/thunderbolt/domain.c parent_sw = tb_to_switch(sw->dev.parent); sw 661 drivers/thunderbolt/domain.c ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response); sw 669 drivers/thunderbolt/domain.c ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE); sw 696 drivers/thunderbolt/domain.c return tb->cm_ops->approve_switch(tb, sw); sw 17 drivers/thunderbolt/eeprom.c static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) sw 19 drivers/thunderbolt/eeprom.c return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); sw 25 drivers/thunderbolt/eeprom.c static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) sw 27 drivers/thunderbolt/eeprom.c return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); sw 41 drivers/thunderbolt/eeprom.c static int tb_eeprom_active(struct tb_switch *sw, bool enable) sw 44 drivers/thunderbolt/eeprom.c int res = tb_eeprom_ctl_read(sw, &ctl); sw 49 drivers/thunderbolt/eeprom.c res = tb_eeprom_ctl_write(sw, &ctl); sw 53 drivers/thunderbolt/eeprom.c return tb_eeprom_ctl_write(sw, &ctl); sw 56 drivers/thunderbolt/eeprom.c res = tb_eeprom_ctl_write(sw, &ctl); sw 60 drivers/thunderbolt/eeprom.c return tb_eeprom_ctl_write(sw, &ctl); sw 70 drivers/thunderbolt/eeprom.c static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, sw 75 drivers/thunderbolt/eeprom.c res = tb_eeprom_ctl_write(sw, ctl); sw 80 drivers/thunderbolt/eeprom.c res = tb_eeprom_ctl_write(sw, ctl); sw 84 drivers/thunderbolt/eeprom.c res = tb_eeprom_ctl_read(sw, ctl); sw 89 drivers/thunderbolt/eeprom.c return tb_eeprom_ctl_write(sw, ctl); sw 95 drivers/thunderbolt/eeprom.c static int tb_eeprom_out(struct tb_switch *sw, u8 val) sw 99 drivers/thunderbolt/eeprom.c int res = tb_eeprom_ctl_read(sw, &ctl); sw 104 drivers/thunderbolt/eeprom.c res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT); sw 115 drivers/thunderbolt/eeprom.c static int tb_eeprom_in(struct tb_switch *sw, u8 *val) sw 119 drivers/thunderbolt/eeprom.c int res = tb_eeprom_ctl_read(sw, &ctl); sw 125 drivers/thunderbolt/eeprom.c res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN); sw 136 drivers/thunderbolt/eeprom.c static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, sw 140 drivers/thunderbolt/eeprom.c res = tb_eeprom_active(sw, true); sw 143 drivers/thunderbolt/eeprom.c res = tb_eeprom_out(sw, 3); sw 146 drivers/thunderbolt/eeprom.c res = tb_eeprom_out(sw, offset >> 8); sw 149 drivers/thunderbolt/eeprom.c res = tb_eeprom_out(sw, offset); sw 153 drivers/thunderbolt/eeprom.c res = tb_eeprom_in(sw, val + i); sw 157 drivers/thunderbolt/eeprom.c return tb_eeprom_active(sw, false); sw 244 drivers/thunderbolt/eeprom.c static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) sw 248 drivers/thunderbolt/eeprom.c if (!sw->cap_plug_events) { sw 249 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); sw 252 drivers/thunderbolt/eeprom.c res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, sw 258 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "no NVM\n"); sw 263 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", sw 277 drivers/thunderbolt/eeprom.c int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) sw 282 drivers/thunderbolt/eeprom.c int res = tb_eeprom_get_drom_offset(sw, &drom_offset); sw 290 drivers/thunderbolt/eeprom.c res = tb_eeprom_read_n(sw, drom_offset, data, 9); sw 296 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n", sw 305 drivers/thunderbolt/eeprom.c static int tb_drom_parse_entry_generic(struct tb_switch *sw, sw 314 drivers/thunderbolt/eeprom.c sw->vendor_name = kstrndup(entry->data, sw 316 drivers/thunderbolt/eeprom.c if (!sw->vendor_name) sw 321 drivers/thunderbolt/eeprom.c sw->device_name = kstrndup(entry->data, sw 323 drivers/thunderbolt/eeprom.c if (!sw->device_name) sw 331 drivers/thunderbolt/eeprom.c static int tb_drom_parse_entry_port(struct tb_switch *sw, sw 342 drivers/thunderbolt/eeprom.c if (header->index > sw->config.max_port_number) { sw 343 drivers/thunderbolt/eeprom.c dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n"); sw 347 drivers/thunderbolt/eeprom.c port = &sw->ports[header->index]; sw 360 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, sw 368 drivers/thunderbolt/eeprom.c &port->sw->ports[entry->dual_link_port_nr]; sw 378 drivers/thunderbolt/eeprom.c static int tb_drom_parse_entries(struct tb_switch *sw) sw 380 drivers/thunderbolt/eeprom.c struct tb_drom_header *header = (void *) sw->drom; sw 386 drivers/thunderbolt/eeprom.c struct tb_drom_entry_header *entry = (void *) (sw->drom + pos); sw 389 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "drom buffer overrun, aborting\n"); sw 395 drivers/thunderbolt/eeprom.c res = tb_drom_parse_entry_generic(sw, entry); sw 398 drivers/thunderbolt/eeprom.c res = tb_drom_parse_entry_port(sw, entry); sw 412 drivers/thunderbolt/eeprom.c static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) sw 414 drivers/thunderbolt/eeprom.c struct device *dev = &sw->tb->nhi->pdev->dev; sw 421 drivers/thunderbolt/eeprom.c sw->drom = kmalloc(len, GFP_KERNEL); sw 422 drivers/thunderbolt/eeprom.c if (!sw->drom) sw 425 drivers/thunderbolt/eeprom.c res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom, sw 430 drivers/thunderbolt/eeprom.c *size = ((struct tb_drom_header *)sw->drom)->data_len + sw 438 drivers/thunderbolt/eeprom.c kfree(sw->drom); sw 439 drivers/thunderbolt/eeprom.c sw->drom = NULL; sw 443 drivers/thunderbolt/eeprom.c static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size) sw 448 drivers/thunderbolt/eeprom.c if (!sw->dma_port) sw 451 drivers/thunderbolt/eeprom.c ret = tb_sw_read(sw, &drom_offset, TB_CFG_SWITCH, sw 452 drivers/thunderbolt/eeprom.c sw->cap_plug_events + 12, 1); sw 459 drivers/thunderbolt/eeprom.c ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size, sw 466 drivers/thunderbolt/eeprom.c sw->drom = kzalloc(*size, GFP_KERNEL); sw 467 drivers/thunderbolt/eeprom.c if (!sw->drom) sw 470 drivers/thunderbolt/eeprom.c ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size); sw 478 drivers/thunderbolt/eeprom.c tb_drom_read_uid_only(sw, &sw->uid); sw 482 drivers/thunderbolt/eeprom.c kfree(sw->drom); sw 483 drivers/thunderbolt/eeprom.c sw->drom = NULL; sw 490 drivers/thunderbolt/eeprom.c int tb_drom_read(struct tb_switch *sw) sw 497 drivers/thunderbolt/eeprom.c if (sw->drom) sw 500 drivers/thunderbolt/eeprom.c if (tb_route(sw) == 0) { sw 505 drivers/thunderbolt/eeprom.c if (tb_drom_copy_efi(sw, &size) == 0) sw 509 drivers/thunderbolt/eeprom.c if (tb_drom_copy_nvm(sw, &size) == 0) sw 516 drivers/thunderbolt/eeprom.c tb_drom_read_uid_only(sw, &sw->uid); sw 518 drivers/thunderbolt/eeprom.c sw->ports[1].link_nr = 0; sw 519 drivers/thunderbolt/eeprom.c sw->ports[2].link_nr = 1; sw 520 drivers/thunderbolt/eeprom.c sw->ports[1].dual_link_port = &sw->ports[2]; sw 521 drivers/thunderbolt/eeprom.c sw->ports[2].dual_link_port = &sw->ports[1]; sw 523 drivers/thunderbolt/eeprom.c sw->ports[3].link_nr = 0; sw 524 drivers/thunderbolt/eeprom.c sw->ports[4].link_nr = 1; sw 525 drivers/thunderbolt/eeprom.c sw->ports[3].dual_link_port = &sw->ports[4]; sw 526 drivers/thunderbolt/eeprom.c sw->ports[4].dual_link_port = &sw->ports[3]; sw 531 drivers/thunderbolt/eeprom.c res = tb_eeprom_get_drom_offset(sw, &drom_offset); sw 535 drivers/thunderbolt/eeprom.c res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2); sw 540 drivers/thunderbolt/eeprom.c tb_sw_dbg(sw, "reading drom (length: %#x)\n", size); sw 542 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "drom too small, aborting\n"); sw 546 drivers/thunderbolt/eeprom.c sw->drom = kzalloc(size, GFP_KERNEL); sw 547 drivers/thunderbolt/eeprom.c if (!sw->drom) sw 549 drivers/thunderbolt/eeprom.c res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size); sw 554 drivers/thunderbolt/eeprom.c header = (void *) sw->drom; sw 557 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "drom size mismatch, aborting\n"); sw 563 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, sw 568 drivers/thunderbolt/eeprom.c if (!sw->uid) sw 569 drivers/thunderbolt/eeprom.c sw->uid = header->uid; sw 570 drivers/thunderbolt/eeprom.c sw->vendor = header->vendor_id; sw 571 drivers/thunderbolt/eeprom.c sw->device = header->model_id; sw 573 drivers/thunderbolt/eeprom.c crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); sw 575 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, sw 581 drivers/thunderbolt/eeprom.c tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n", sw 584 drivers/thunderbolt/eeprom.c return tb_drom_parse_entries(sw); sw 586 drivers/thunderbolt/eeprom.c kfree(sw->drom); sw 587 drivers/thunderbolt/eeprom.c sw->drom = NULL; sw 360 drivers/thunderbolt/icm.c struct icm_fr_pkg_get_topology_response *switches, *sw; sw 377 drivers/thunderbolt/icm.c sw = &switches[0]; sw 378 drivers/thunderbolt/icm.c index = icm_fr_get_switch_index(sw->ports[link]); sw 384 drivers/thunderbolt/icm.c sw = &switches[index]; sw 388 drivers/thunderbolt/icm.c if (!(sw->first_data & ICM_SWITCH_USED)) { sw 393 drivers/thunderbolt/icm.c for (j = 0; j < ARRAY_SIZE(sw->ports); j++) { sw 394 drivers/thunderbolt/icm.c index = icm_fr_get_switch_index(sw->ports[j]); sw 395 drivers/thunderbolt/icm.c if (index > sw->switch_index) { sw 396 drivers/thunderbolt/icm.c sw = &switches[index]; sw 402 drivers/thunderbolt/icm.c *route = get_route(sw->route_hi, sw->route_lo); sw 436 drivers/thunderbolt/icm.c static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw) sw 443 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 445 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 446 drivers/thunderbolt/icm.c request.connection_key = sw->connection_key; sw 463 drivers/thunderbolt/icm.c static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw) sw 470 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 472 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 473 drivers/thunderbolt/icm.c request.connection_key = sw->connection_key; sw 474 drivers/thunderbolt/icm.c memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); sw 490 drivers/thunderbolt/icm.c static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, sw 498 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 500 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 501 drivers/thunderbolt/icm.c request.connection_key = sw->connection_key; sw 573 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 578 drivers/thunderbolt/icm.c sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); sw 579 drivers/thunderbolt/icm.c if (IS_ERR(sw)) sw 582 drivers/thunderbolt/icm.c sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL); sw 583 drivers/thunderbolt/icm.c if (!sw->uuid) { sw 584 drivers/thunderbolt/icm.c tb_sw_warn(sw, "cannot allocate memory for switch\n"); sw 585 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 588 drivers/thunderbolt/icm.c sw->connection_id = connection_id; sw 589 drivers/thunderbolt/icm.c sw->connection_key = connection_key; sw 590 drivers/thunderbolt/icm.c sw->link = link; sw 591 drivers/thunderbolt/icm.c sw->depth = depth; sw 592 drivers/thunderbolt/icm.c sw->authorized = authorized; sw 593 drivers/thunderbolt/icm.c sw->security_level = security_level; sw 594 drivers/thunderbolt/icm.c sw->boot = boot; sw 595 drivers/thunderbolt/icm.c init_completion(&sw->rpm_complete); sw 599 drivers/thunderbolt/icm.c sw->rpm = !!(vss->flags & INTEL_VSS_FLAGS_RTD3); sw 602 drivers/thunderbolt/icm.c tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); sw 603 drivers/thunderbolt/icm.c tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); sw 605 drivers/thunderbolt/icm.c ret = tb_switch_add(sw); sw 607 drivers/thunderbolt/icm.c tb_port_at(tb_route(sw), parent_sw)->remote = NULL; sw 608 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 609 drivers/thunderbolt/icm.c sw = ERR_PTR(ret); sw 616 drivers/thunderbolt/icm.c return sw; sw 619 drivers/thunderbolt/icm.c static void update_switch(struct tb_switch *parent_sw, struct tb_switch *sw, sw 624 drivers/thunderbolt/icm.c tb_port_at(tb_route(sw), parent_sw)->remote = NULL; sw 626 drivers/thunderbolt/icm.c tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); sw 629 drivers/thunderbolt/icm.c sw->config.route_hi = upper_32_bits(route); sw 630 drivers/thunderbolt/icm.c sw->config.route_lo = lower_32_bits(route); sw 631 drivers/thunderbolt/icm.c sw->connection_id = connection_id; sw 632 drivers/thunderbolt/icm.c sw->connection_key = connection_key; sw 633 drivers/thunderbolt/icm.c sw->link = link; sw 634 drivers/thunderbolt/icm.c sw->depth = depth; sw 635 drivers/thunderbolt/icm.c sw->boot = boot; sw 638 drivers/thunderbolt/icm.c sw->is_unplugged = false; sw 641 drivers/thunderbolt/icm.c complete(&sw->rpm_complete); sw 644 drivers/thunderbolt/icm.c static void remove_switch(struct tb_switch *sw) sw 648 drivers/thunderbolt/icm.c parent_sw = tb_to_switch(sw->dev.parent); sw 649 drivers/thunderbolt/icm.c tb_port_at(tb_route(sw), parent_sw)->remote = NULL; sw 650 drivers/thunderbolt/icm.c tb_switch_remove(sw); sw 653 drivers/thunderbolt/icm.c static void add_xdomain(struct tb_switch *sw, u64 route, sw 659 drivers/thunderbolt/icm.c pm_runtime_get_sync(&sw->dev); sw 661 drivers/thunderbolt/icm.c xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); sw 668 drivers/thunderbolt/icm.c tb_port_at(route, sw)->xdomain = xd; sw 673 drivers/thunderbolt/icm.c pm_runtime_mark_last_busy(&sw->dev); sw 674 drivers/thunderbolt/icm.c pm_runtime_put_autosuspend(&sw->dev); sw 686 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 688 drivers/thunderbolt/icm.c sw = tb_to_switch(xd->dev.parent); sw 689 drivers/thunderbolt/icm.c tb_port_at(xd->route, sw)->xdomain = NULL; sw 699 drivers/thunderbolt/icm.c struct tb_switch *sw, *parent_sw; sw 724 drivers/thunderbolt/icm.c sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); sw 725 drivers/thunderbolt/icm.c if (sw) { sw 728 drivers/thunderbolt/icm.c parent_sw = tb_to_switch(sw->dev.parent); sw 729 drivers/thunderbolt/icm.c sw_phy_port = tb_phy_port_from_link(sw->link); sw 740 drivers/thunderbolt/icm.c if (sw->depth == depth && sw_phy_port == phy_port && sw 741 drivers/thunderbolt/icm.c !!sw->authorized == authorized) { sw 746 drivers/thunderbolt/icm.c if (sw->link != link) { sw 751 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 755 drivers/thunderbolt/icm.c route = tb_route(sw); sw 758 drivers/thunderbolt/icm.c update_switch(parent_sw, sw, route, pkg->connection_id, sw 760 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 769 drivers/thunderbolt/icm.c remove_switch(sw); sw 770 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 779 drivers/thunderbolt/icm.c sw = tb_switch_find_by_link_depth(tb, link, depth); sw 780 drivers/thunderbolt/icm.c if (!sw) { sw 785 drivers/thunderbolt/icm.c sw = tb_switch_find_by_link_depth(tb, dual_link, depth); sw 787 drivers/thunderbolt/icm.c if (sw) { sw 788 drivers/thunderbolt/icm.c remove_switch(sw); sw 789 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 827 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 839 drivers/thunderbolt/icm.c sw = tb_switch_find_by_link_depth(tb, link, depth); sw 840 drivers/thunderbolt/icm.c if (!sw) { sw 846 drivers/thunderbolt/icm.c remove_switch(sw); sw 847 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 856 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 918 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); sw 919 drivers/thunderbolt/icm.c if (sw) { sw 920 drivers/thunderbolt/icm.c remove_switch(sw); sw 921 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 924 drivers/thunderbolt/icm.c sw = tb_switch_find_by_link_depth(tb, link, depth); sw 925 drivers/thunderbolt/icm.c if (!sw) { sw 931 drivers/thunderbolt/icm.c add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, sw 933 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 987 drivers/thunderbolt/icm.c static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw) sw 994 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 996 drivers/thunderbolt/icm.c request.route_lo = sw->config.route_lo; sw 997 drivers/thunderbolt/icm.c request.route_hi = sw->config.route_hi; sw 998 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 1014 drivers/thunderbolt/icm.c static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw) sw 1021 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 1023 drivers/thunderbolt/icm.c request.route_lo = sw->config.route_lo; sw 1024 drivers/thunderbolt/icm.c request.route_hi = sw->config.route_hi; sw 1025 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 1026 drivers/thunderbolt/icm.c memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE); sw 1042 drivers/thunderbolt/icm.c static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, sw 1050 drivers/thunderbolt/icm.c memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid)); sw 1052 drivers/thunderbolt/icm.c request.route_lo = sw->config.route_lo; sw 1053 drivers/thunderbolt/icm.c request.route_hi = sw->config.route_hi; sw 1054 drivers/thunderbolt/icm.c request.connection_id = sw->connection_id; sw 1146 drivers/thunderbolt/icm.c struct tb_switch *sw, *parent_sw; sw 1173 drivers/thunderbolt/icm.c sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid); sw 1174 drivers/thunderbolt/icm.c if (sw) { sw 1176 drivers/thunderbolt/icm.c if (tb_route(sw) == route && !!sw->authorized == authorized) { sw 1177 drivers/thunderbolt/icm.c parent_sw = tb_to_switch(sw->dev.parent); sw 1178 drivers/thunderbolt/icm.c update_switch(parent_sw, sw, route, pkg->connection_id, sw 1180 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1184 drivers/thunderbolt/icm.c remove_switch(sw); sw 1185 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1189 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); sw 1190 drivers/thunderbolt/icm.c if (sw) { sw 1191 drivers/thunderbolt/icm.c remove_switch(sw); sw 1192 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1208 drivers/thunderbolt/icm.c sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, sw 1211 drivers/thunderbolt/icm.c if (!IS_ERR(sw) && force_rtd3) sw 1212 drivers/thunderbolt/icm.c sw->rpm = true; sw 1228 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 1233 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); sw 1234 drivers/thunderbolt/icm.c if (!sw) { sw 1239 drivers/thunderbolt/icm.c remove_switch(sw); sw 1240 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1249 drivers/thunderbolt/icm.c struct tb_switch *sw; sw 1281 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); sw 1282 drivers/thunderbolt/icm.c if (sw) { sw 1283 drivers/thunderbolt/icm.c remove_switch(sw); sw 1284 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1287 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, get_parent_route(route)); sw 1288 drivers/thunderbolt/icm.c if (!sw) { sw 1293 drivers/thunderbolt/icm.c add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); sw 1294 drivers/thunderbolt/icm.c tb_switch_put(sw); sw 1894 drivers/thunderbolt/icm.c static void icm_unplug_children(struct tb_switch *sw) sw 1898 drivers/thunderbolt/icm.c if (tb_route(sw)) sw 1899 drivers/thunderbolt/icm.c sw->is_unplugged = true; sw 1901 drivers/thunderbolt/icm.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1902 drivers/thunderbolt/icm.c struct tb_port *port = &sw->ports[i]; sw 1907 drivers/thunderbolt/icm.c icm_unplug_children(port->remote->sw); sw 1913 drivers/thunderbolt/icm.c struct tb_switch *sw = tb_to_switch(dev); sw 1915 drivers/thunderbolt/icm.c if (sw) sw 1916 drivers/thunderbolt/icm.c complete(&sw->rpm_complete); sw 1920 drivers/thunderbolt/icm.c static void remove_unplugged_switch(struct tb_switch *sw) sw 1922 drivers/thunderbolt/icm.c pm_runtime_get_sync(sw->dev.parent); sw 1929 drivers/thunderbolt/icm.c complete_rpm(&sw->dev, NULL); sw 1930 drivers/thunderbolt/icm.c bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); sw 1931 drivers/thunderbolt/icm.c tb_switch_remove(sw); sw 1933 drivers/thunderbolt/icm.c pm_runtime_mark_last_busy(sw->dev.parent); sw 1934 drivers/thunderbolt/icm.c pm_runtime_put_autosuspend(sw->dev.parent); sw 1937 drivers/thunderbolt/icm.c static void icm_free_unplugged_children(struct tb_switch *sw) sw 1941 drivers/thunderbolt/icm.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1942 drivers/thunderbolt/icm.c struct tb_port *port = &sw->ports[i]; sw 1948 drivers/thunderbolt/icm.c if (port->remote->sw->is_unplugged) { sw 1949 drivers/thunderbolt/icm.c remove_unplugged_switch(port->remote->sw); sw 1952 drivers/thunderbolt/icm.c icm_free_unplugged_children(port->remote->sw); sw 2005 drivers/thunderbolt/icm.c static int icm_runtime_suspend_switch(struct tb_switch *sw) sw 2007 drivers/thunderbolt/icm.c if (tb_route(sw)) sw 2008 drivers/thunderbolt/icm.c reinit_completion(&sw->rpm_complete); sw 2012 drivers/thunderbolt/icm.c static int icm_runtime_resume_switch(struct tb_switch *sw) sw 2014 drivers/thunderbolt/icm.c if (tb_route(sw)) { sw 2015 drivers/thunderbolt/icm.c if (!wait_for_completion_timeout(&sw->rpm_complete, sw 2017 drivers/thunderbolt/icm.c dev_dbg(&sw->dev, "runtime resuming timed out\n"); sw 16 drivers/thunderbolt/lc.c int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid) sw 18 drivers/thunderbolt/lc.c if (!sw->cap_lc) sw 20 drivers/thunderbolt/lc.c return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4); sw 23 drivers/thunderbolt/lc.c static int read_lc_desc(struct tb_switch *sw, u32 *desc) sw 25 drivers/thunderbolt/lc.c if (!sw->cap_lc) sw 27 drivers/thunderbolt/lc.c return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1); sw 32 drivers/thunderbolt/lc.c struct tb_switch *sw = port->sw; sw 36 drivers/thunderbolt/lc.c ret = read_lc_desc(sw, &desc); sw 45 drivers/thunderbolt/lc.c return sw->cap_lc + start + phys * size; sw 51 drivers/thunderbolt/lc.c struct tb_switch *sw = port->sw; sw 55 drivers/thunderbolt/lc.c if (sw->generation < 2) sw 62 drivers/thunderbolt/lc.c ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); sw 82 drivers/thunderbolt/lc.c return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); sw 92 drivers/thunderbolt/lc.c int tb_lc_configure_link(struct tb_switch *sw) sw 97 drivers/thunderbolt/lc.c if (!sw->config.enabled || !tb_route(sw)) sw 100 drivers/thunderbolt/lc.c up = tb_upstream_port(sw); sw 101 drivers/thunderbolt/lc.c down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); sw 123 drivers/thunderbolt/lc.c void tb_lc_unconfigure_link(struct tb_switch *sw) sw 127 drivers/thunderbolt/lc.c if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw)) sw 130 drivers/thunderbolt/lc.c up = tb_upstream_port(sw); sw 131 drivers/thunderbolt/lc.c down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); sw 144 drivers/thunderbolt/lc.c int tb_lc_set_sleep(struct tb_switch *sw) sw 149 drivers/thunderbolt/lc.c if (sw->generation < 2) sw 152 drivers/thunderbolt/lc.c ret = read_lc_desc(sw, &desc); sw 163 drivers/thunderbolt/lc.c unsigned int offset = sw->cap_lc + start + i * size; sw 166 drivers/thunderbolt/lc.c ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, sw 172 drivers/thunderbolt/lc.c ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, sw 39 drivers/thunderbolt/path.c struct tb_switch *sw; sw 46 drivers/thunderbolt/path.c sw = port->sw; sw 57 drivers/thunderbolt/path.c out_port = &sw->ports[hop.out_port]; sw 107 drivers/thunderbolt/path.c struct tb_switch *sw; sw 130 drivers/thunderbolt/path.c sw = p->sw; sw 142 drivers/thunderbolt/path.c out_port = &sw->ports[hop.out_port]; sw 156 drivers/thunderbolt/path.c path->tb = src->sw->tb; sw 172 drivers/thunderbolt/path.c sw = p->sw; sw 183 drivers/thunderbolt/path.c out_port = &sw->ports[hop.out_port]; sw 245 drivers/thunderbolt/path.c num_hops = abs(tb_route_length(tb_route(src->sw)) - sw 246 drivers/thunderbolt/path.c tb_route_length(tb_route(dst->sw))) + 1; sw 418 drivers/thunderbolt/path.c path->name, tb_route(path->hops[0].in_port->sw), sw 420 drivers/thunderbolt/path.c tb_route(path->hops[path->path_length - 1].out_port->sw), sw 446 drivers/thunderbolt/path.c path->name, tb_route(path->hops[0].in_port->sw), sw 448 drivers/thunderbolt/path.c tb_route(path->hops[path->path_length - 1].out_port->sw), sw 531 drivers/thunderbolt/path.c if (path->hops[i].in_port->sw->is_unplugged) sw 533 drivers/thunderbolt/path.c if (path->hops[i].out_port->sw->is_unplugged) sw 46 drivers/thunderbolt/switch.c static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw) sw 51 drivers/thunderbolt/switch.c if (uuid_equal(&st->uuid, sw->uuid)) sw 58 drivers/thunderbolt/switch.c static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status) sw 63 drivers/thunderbolt/switch.c st = __nvm_get_auth_status(sw); sw 69 drivers/thunderbolt/switch.c static void nvm_set_auth_status(const struct tb_switch *sw, u32 status) sw 73 drivers/thunderbolt/switch.c if (WARN_ON(!sw->uuid)) sw 77 drivers/thunderbolt/switch.c st = __nvm_get_auth_status(sw); sw 84 drivers/thunderbolt/switch.c memcpy(&st->uuid, sw->uuid, sizeof(st->uuid)); sw 94 drivers/thunderbolt/switch.c static void nvm_clear_auth_status(const struct tb_switch *sw) sw 99 drivers/thunderbolt/switch.c st = __nvm_get_auth_status(sw); sw 107 drivers/thunderbolt/switch.c static int nvm_validate_and_write(struct tb_switch *sw) sw 110 drivers/thunderbolt/switch.c const u8 *buf = sw->nvm->buf; sw 117 drivers/thunderbolt/switch.c image_size = sw->nvm->buf_data_size; sw 141 drivers/thunderbolt/switch.c if (!sw->safe_mode) { sw 149 drivers/thunderbolt/switch.c if (device_id != sw->config.device_id) sw 152 drivers/thunderbolt/switch.c if (sw->generation < 3) { sw 154 drivers/thunderbolt/switch.c ret = dma_port_flash_write(sw->dma_port, sw 166 drivers/thunderbolt/switch.c return dma_port_flash_write(sw->dma_port, 0, buf, image_size); sw 169 drivers/thunderbolt/switch.c static int nvm_authenticate_host(struct tb_switch *sw) sw 178 drivers/thunderbolt/switch.c if (!sw->safe_mode) { sw 181 drivers/thunderbolt/switch.c ret = tb_domain_disconnect_all_paths(sw->tb); sw 188 drivers/thunderbolt/switch.c ret = dma_port_flash_update_auth(sw->dma_port); sw 196 drivers/thunderbolt/switch.c tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n"); sw 197 drivers/thunderbolt/switch.c if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0) sw 198 drivers/thunderbolt/switch.c nvm_set_auth_status(sw, status); sw 205 drivers/thunderbolt/switch.c dma_port_power_cycle(sw->dma_port); sw 209 drivers/thunderbolt/switch.c static int nvm_authenticate_device(struct tb_switch *sw) sw 213 drivers/thunderbolt/switch.c ret = dma_port_flash_update_auth(sw->dma_port); sw 234 drivers/thunderbolt/switch.c ret = dma_port_flash_update_auth_status(sw->dma_port, &status); sw 239 drivers/thunderbolt/switch.c tb_sw_warn(sw, "failed to authenticate NVM\n"); sw 240 drivers/thunderbolt/switch.c nvm_set_auth_status(sw, status); sw 243 drivers/thunderbolt/switch.c tb_sw_info(sw, "power cycling the switch now\n"); sw 244 drivers/thunderbolt/switch.c dma_port_power_cycle(sw->dma_port); sw 257 drivers/thunderbolt/switch.c struct tb_switch *sw = priv; sw 260 drivers/thunderbolt/switch.c pm_runtime_get_sync(&sw->dev); sw 262 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) { sw 267 drivers/thunderbolt/switch.c ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); sw 268 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 271 drivers/thunderbolt/switch.c pm_runtime_mark_last_busy(&sw->dev); sw 272 drivers/thunderbolt/switch.c pm_runtime_put_autosuspend(&sw->dev); sw 286 drivers/thunderbolt/switch.c struct tb_switch *sw = priv; sw 289 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) sw 298 drivers/thunderbolt/switch.c if (!sw->nvm->buf) { sw 299 drivers/thunderbolt/switch.c sw->nvm->buf = vmalloc(NVM_MAX_SIZE); sw 300 drivers/thunderbolt/switch.c if (!sw->nvm->buf) { sw 306 drivers/thunderbolt/switch.c sw->nvm->buf_data_size = offset + bytes; sw 307 drivers/thunderbolt/switch.c memcpy(sw->nvm->buf + offset, val, bytes); sw 310 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 315 drivers/thunderbolt/switch.c static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, sw 337 drivers/thunderbolt/switch.c config.dev = &sw->dev; sw 339 drivers/thunderbolt/switch.c config.priv = sw; sw 344 drivers/thunderbolt/switch.c static int tb_switch_nvm_add(struct tb_switch *sw) sw 351 drivers/thunderbolt/switch.c if (!sw->dma_port) sw 365 drivers/thunderbolt/switch.c if (!sw->safe_mode) { sw 368 drivers/thunderbolt/switch.c ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, sw 373 drivers/thunderbolt/switch.c hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; sw 377 drivers/thunderbolt/switch.c ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, sw 385 drivers/thunderbolt/switch.c nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); sw 393 drivers/thunderbolt/switch.c if (!sw->no_nvm_upgrade) { sw 394 drivers/thunderbolt/switch.c nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); sw 402 drivers/thunderbolt/switch.c sw->nvm = nvm; sw 415 drivers/thunderbolt/switch.c static void tb_switch_nvm_remove(struct tb_switch *sw) sw 419 drivers/thunderbolt/switch.c nvm = sw->nvm; sw 420 drivers/thunderbolt/switch.c sw->nvm = NULL; sw 427 drivers/thunderbolt/switch.c nvm_clear_auth_status(sw); sw 579 drivers/thunderbolt/switch.c if (credits == 0 || port->sw->is_unplugged) sw 645 drivers/thunderbolt/switch.c tb_dbg(port->sw->tb, " Port %d: not implemented\n", sw 666 drivers/thunderbolt/switch.c tb_dump_port(port->sw->tb, &port->config); sw 773 drivers/thunderbolt/switch.c if (prev->sw == end->sw) { sw 779 drivers/thunderbolt/switch.c if (start->sw->config.depth < end->sw->config.depth) { sw 781 drivers/thunderbolt/switch.c prev->remote->sw->config.depth > prev->sw->config.depth) sw 784 drivers/thunderbolt/switch.c next = tb_port_at(tb_route(end->sw), prev->sw); sw 789 drivers/thunderbolt/switch.c next = tb_upstream_port(prev->sw); sw 963 drivers/thunderbolt/switch.c static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) sw 966 drivers/thunderbolt/switch.c sw->vendor_id, sw->device_id, sw->revision, sw 967 drivers/thunderbolt/switch.c sw->thunderbolt_version); sw 968 drivers/thunderbolt/switch.c tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number); sw 972 drivers/thunderbolt/switch.c sw->upstream_port_number, sw->depth, sw 973 drivers/thunderbolt/switch.c (((u64) sw->route_hi) << 32) | sw->route_lo, sw 974 drivers/thunderbolt/switch.c sw->enabled, sw->plug_events_delay); sw 976 drivers/thunderbolt/switch.c sw->__unknown1, sw->__unknown4); sw 1010 drivers/thunderbolt/switch.c static int tb_plug_events_active(struct tb_switch *sw, bool active) sw 1015 drivers/thunderbolt/switch.c if (!sw->config.enabled) sw 1018 drivers/thunderbolt/switch.c sw->config.plug_events_delay = 0xff; sw 1019 drivers/thunderbolt/switch.c res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1); sw 1023 drivers/thunderbolt/switch.c res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); sw 1029 drivers/thunderbolt/switch.c switch (sw->config.device_id) { sw 1040 drivers/thunderbolt/switch.c return tb_sw_write(sw, &data, TB_CFG_SWITCH, sw 1041 drivers/thunderbolt/switch.c sw->cap_plug_events + 1, 1); sw 1048 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1050 drivers/thunderbolt/switch.c return sprintf(buf, "%u\n", sw->authorized); sw 1053 drivers/thunderbolt/switch.c static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) sw 1057 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) sw 1060 drivers/thunderbolt/switch.c if (sw->authorized) sw 1066 drivers/thunderbolt/switch.c if (sw->key) sw 1067 drivers/thunderbolt/switch.c ret = tb_domain_approve_switch_key(sw->tb, sw); sw 1069 drivers/thunderbolt/switch.c ret = tb_domain_approve_switch(sw->tb, sw); sw 1074 drivers/thunderbolt/switch.c if (sw->key) sw 1075 drivers/thunderbolt/switch.c ret = tb_domain_challenge_switch_key(sw->tb, sw); sw 1083 drivers/thunderbolt/switch.c sw->authorized = val; sw 1085 drivers/thunderbolt/switch.c kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); sw 1089 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 1097 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1107 drivers/thunderbolt/switch.c pm_runtime_get_sync(&sw->dev); sw 1108 drivers/thunderbolt/switch.c ret = tb_switch_set_authorized(sw, val); sw 1109 drivers/thunderbolt/switch.c pm_runtime_mark_last_busy(&sw->dev); sw 1110 drivers/thunderbolt/switch.c pm_runtime_put_autosuspend(&sw->dev); sw 1119 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1121 drivers/thunderbolt/switch.c return sprintf(buf, "%u\n", sw->boot); sw 1128 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1130 drivers/thunderbolt/switch.c return sprintf(buf, "%#x\n", sw->device); sw 1137 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1139 drivers/thunderbolt/switch.c return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); sw 1146 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1149 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) sw 1152 drivers/thunderbolt/switch.c if (sw->key) sw 1153 drivers/thunderbolt/switch.c ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); sw 1157 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 1164 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1174 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) sw 1177 drivers/thunderbolt/switch.c if (sw->authorized) { sw 1180 drivers/thunderbolt/switch.c kfree(sw->key); sw 1182 drivers/thunderbolt/switch.c sw->key = NULL; sw 1184 drivers/thunderbolt/switch.c sw->key = kmemdup(key, sizeof(key), GFP_KERNEL); sw 1185 drivers/thunderbolt/switch.c if (!sw->key) sw 1190 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 1195 drivers/thunderbolt/switch.c static void nvm_authenticate_start(struct tb_switch *sw) sw 1205 drivers/thunderbolt/switch.c root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); sw 1210 drivers/thunderbolt/switch.c static void nvm_authenticate_complete(struct tb_switch *sw) sw 1214 drivers/thunderbolt/switch.c root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); sw 1222 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1225 drivers/thunderbolt/switch.c nvm_get_auth_status(sw, &status); sw 1232 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1236 drivers/thunderbolt/switch.c pm_runtime_get_sync(&sw->dev); sw 1238 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) { sw 1244 drivers/thunderbolt/switch.c if (!sw->nvm) { sw 1254 drivers/thunderbolt/switch.c nvm_clear_auth_status(sw); sw 1257 drivers/thunderbolt/switch.c if (!sw->nvm->buf) { sw 1262 drivers/thunderbolt/switch.c ret = nvm_validate_and_write(sw); sw 1266 drivers/thunderbolt/switch.c sw->nvm->authenticating = true; sw 1268 drivers/thunderbolt/switch.c if (!tb_route(sw)) { sw 1273 drivers/thunderbolt/switch.c nvm_authenticate_start(sw); sw 1274 drivers/thunderbolt/switch.c ret = nvm_authenticate_host(sw); sw 1276 drivers/thunderbolt/switch.c ret = nvm_authenticate_device(sw); sw 1281 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 1283 drivers/thunderbolt/switch.c pm_runtime_mark_last_busy(&sw->dev); sw 1284 drivers/thunderbolt/switch.c pm_runtime_put_autosuspend(&sw->dev); sw 1295 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1298 drivers/thunderbolt/switch.c if (!mutex_trylock(&sw->tb->lock)) sw 1301 drivers/thunderbolt/switch.c if (sw->safe_mode) sw 1303 drivers/thunderbolt/switch.c else if (!sw->nvm) sw 1306 drivers/thunderbolt/switch.c ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); sw 1308 drivers/thunderbolt/switch.c mutex_unlock(&sw->tb->lock); sw 1317 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1319 drivers/thunderbolt/switch.c return sprintf(buf, "%#x\n", sw->vendor); sw 1326 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1328 drivers/thunderbolt/switch.c return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); sw 1335 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1337 drivers/thunderbolt/switch.c return sprintf(buf, "%pUb\n", sw->uuid); sw 1359 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1362 drivers/thunderbolt/switch.c if (!sw->device) sw 1365 drivers/thunderbolt/switch.c if (!sw->device_name) sw 1368 drivers/thunderbolt/switch.c if (!sw->vendor) sw 1371 drivers/thunderbolt/switch.c if (!sw->vendor_name) sw 1374 drivers/thunderbolt/switch.c if (tb_route(sw) && sw 1375 drivers/thunderbolt/switch.c sw->tb->security_level == TB_SECURITY_SECURE && sw 1376 drivers/thunderbolt/switch.c sw->security_level == TB_SECURITY_SECURE) sw 1380 drivers/thunderbolt/switch.c if (sw->dma_port && !sw->no_nvm_upgrade) sw 1384 drivers/thunderbolt/switch.c if (sw->dma_port) sw 1388 drivers/thunderbolt/switch.c if (tb_route(sw)) sw 1393 drivers/thunderbolt/switch.c return sw->safe_mode ? 0 : attr->mode; sw 1408 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1411 drivers/thunderbolt/switch.c dma_port_free(sw->dma_port); sw 1413 drivers/thunderbolt/switch.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1414 drivers/thunderbolt/switch.c if (!sw->ports[i].disabled) { sw 1415 drivers/thunderbolt/switch.c ida_destroy(&sw->ports[i].in_hopids); sw 1416 drivers/thunderbolt/switch.c ida_destroy(&sw->ports[i].out_hopids); sw 1420 drivers/thunderbolt/switch.c kfree(sw->uuid); sw 1421 drivers/thunderbolt/switch.c kfree(sw->device_name); sw 1422 drivers/thunderbolt/switch.c kfree(sw->vendor_name); sw 1423 drivers/thunderbolt/switch.c kfree(sw->ports); sw 1424 drivers/thunderbolt/switch.c kfree(sw->drom); sw 1425 drivers/thunderbolt/switch.c kfree(sw->key); sw 1426 drivers/thunderbolt/switch.c kfree(sw); sw 1435 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1436 drivers/thunderbolt/switch.c const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; sw 1439 drivers/thunderbolt/switch.c return cm_ops->runtime_suspend_switch(sw); sw 1446 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 1447 drivers/thunderbolt/switch.c const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; sw 1450 drivers/thunderbolt/switch.c return cm_ops->runtime_resume_switch(sw); sw 1465 drivers/thunderbolt/switch.c static int tb_switch_get_generation(struct tb_switch *sw) sw 1467 drivers/thunderbolt/switch.c switch (sw->config.device_id) { sw 1500 drivers/thunderbolt/switch.c tb_sw_warn(sw, "unsupported switch device id %#x\n", sw 1501 drivers/thunderbolt/switch.c sw->config.device_id); sw 1523 drivers/thunderbolt/switch.c struct tb_switch *sw; sw 1536 drivers/thunderbolt/switch.c sw = kzalloc(sizeof(*sw), GFP_KERNEL); sw 1537 drivers/thunderbolt/switch.c if (!sw) sw 1540 drivers/thunderbolt/switch.c sw->tb = tb; sw 1541 drivers/thunderbolt/switch.c ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); sw 1546 drivers/thunderbolt/switch.c tb_dump_switch(tb, &sw->config); sw 1549 drivers/thunderbolt/switch.c sw->config.upstream_port_number = upstream_port; sw 1550 drivers/thunderbolt/switch.c sw->config.depth = depth; sw 1551 drivers/thunderbolt/switch.c sw->config.route_hi = upper_32_bits(route); sw 1552 drivers/thunderbolt/switch.c sw->config.route_lo = lower_32_bits(route); sw 1553 drivers/thunderbolt/switch.c sw->config.enabled = 0; sw 1556 drivers/thunderbolt/switch.c sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), sw 1558 drivers/thunderbolt/switch.c if (!sw->ports) { sw 1563 drivers/thunderbolt/switch.c for (i = 0; i <= sw->config.max_port_number; i++) { sw 1565 drivers/thunderbolt/switch.c sw->ports[i].sw = sw; sw 1566 drivers/thunderbolt/switch.c sw->ports[i].port = i; sw 1569 drivers/thunderbolt/switch.c sw->generation = tb_switch_get_generation(sw); sw 1571 drivers/thunderbolt/switch.c ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); sw 1573 drivers/thunderbolt/switch.c tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); sw 1576 drivers/thunderbolt/switch.c sw->cap_plug_events = ret; sw 1578 drivers/thunderbolt/switch.c ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); sw 1580 drivers/thunderbolt/switch.c sw->cap_lc = ret; sw 1584 drivers/thunderbolt/switch.c sw->authorized = true; sw 1586 drivers/thunderbolt/switch.c device_initialize(&sw->dev); sw 1587 drivers/thunderbolt/switch.c sw->dev.parent = parent; sw 1588 drivers/thunderbolt/switch.c sw->dev.bus = &tb_bus_type; sw 1589 drivers/thunderbolt/switch.c sw->dev.type = &tb_switch_type; sw 1590 drivers/thunderbolt/switch.c sw->dev.groups = switch_groups; sw 1591 drivers/thunderbolt/switch.c dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); sw 1593 drivers/thunderbolt/switch.c return sw; sw 1596 drivers/thunderbolt/switch.c kfree(sw->ports); sw 1597 drivers/thunderbolt/switch.c kfree(sw); sw 1619 drivers/thunderbolt/switch.c struct tb_switch *sw; sw 1621 drivers/thunderbolt/switch.c sw = kzalloc(sizeof(*sw), GFP_KERNEL); sw 1622 drivers/thunderbolt/switch.c if (!sw) sw 1625 drivers/thunderbolt/switch.c sw->tb = tb; sw 1626 drivers/thunderbolt/switch.c sw->config.depth = tb_route_length(route); sw 1627 drivers/thunderbolt/switch.c sw->config.route_hi = upper_32_bits(route); sw 1628 drivers/thunderbolt/switch.c sw->config.route_lo = lower_32_bits(route); sw 1629 drivers/thunderbolt/switch.c sw->safe_mode = true; sw 1631 drivers/thunderbolt/switch.c device_initialize(&sw->dev); sw 1632 drivers/thunderbolt/switch.c sw->dev.parent = parent; sw 1633 drivers/thunderbolt/switch.c sw->dev.bus = &tb_bus_type; sw 1634 drivers/thunderbolt/switch.c sw->dev.type = &tb_switch_type; sw 1635 drivers/thunderbolt/switch.c sw->dev.groups = switch_groups; sw 1636 drivers/thunderbolt/switch.c dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw)); sw 1638 drivers/thunderbolt/switch.c return sw; sw 1651 drivers/thunderbolt/switch.c int tb_switch_configure(struct tb_switch *sw) sw 1653 drivers/thunderbolt/switch.c struct tb *tb = sw->tb; sw 1657 drivers/thunderbolt/switch.c route = tb_route(sw); sw 1659 drivers/thunderbolt/switch.c route, tb_route_length(route), sw->config.upstream_port_number); sw 1661 drivers/thunderbolt/switch.c if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) sw 1662 drivers/thunderbolt/switch.c tb_sw_warn(sw, "unknown switch vendor id %#x\n", sw 1663 drivers/thunderbolt/switch.c sw->config.vendor_id); sw 1665 drivers/thunderbolt/switch.c sw->config.enabled = 1; sw 1668 drivers/thunderbolt/switch.c ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); sw 1672 drivers/thunderbolt/switch.c ret = tb_lc_configure_link(sw); sw 1676 drivers/thunderbolt/switch.c return tb_plug_events_active(sw, true); sw 1679 drivers/thunderbolt/switch.c static int tb_switch_set_uuid(struct tb_switch *sw) sw 1684 drivers/thunderbolt/switch.c if (sw->uuid) sw 1691 drivers/thunderbolt/switch.c ret = tb_lc_read_uuid(sw, uuid); sw 1699 drivers/thunderbolt/switch.c uuid[0] = sw->uid & 0xffffffff; sw 1700 drivers/thunderbolt/switch.c uuid[1] = (sw->uid >> 32) & 0xffffffff; sw 1705 drivers/thunderbolt/switch.c sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL); sw 1706 drivers/thunderbolt/switch.c if (!sw->uuid) sw 1711 drivers/thunderbolt/switch.c static int tb_switch_add_dma_port(struct tb_switch *sw) sw 1716 drivers/thunderbolt/switch.c switch (sw->generation) { sw 1719 drivers/thunderbolt/switch.c if (tb_route(sw)) sw 1724 drivers/thunderbolt/switch.c ret = tb_switch_set_uuid(sw); sw 1734 drivers/thunderbolt/switch.c if (!sw->safe_mode) sw 1740 drivers/thunderbolt/switch.c if (!tb_route(sw) && sw->config.enabled) sw 1743 drivers/thunderbolt/switch.c sw->dma_port = dma_port_alloc(sw); sw 1744 drivers/thunderbolt/switch.c if (!sw->dma_port) sw 1747 drivers/thunderbolt/switch.c if (sw->no_nvm_upgrade) sw 1756 drivers/thunderbolt/switch.c nvm_get_auth_status(sw, &status); sw 1758 drivers/thunderbolt/switch.c if (!tb_route(sw)) sw 1759 drivers/thunderbolt/switch.c nvm_authenticate_complete(sw); sw 1768 drivers/thunderbolt/switch.c ret = dma_port_flash_update_auth_status(sw->dma_port, &status); sw 1773 drivers/thunderbolt/switch.c if (!tb_route(sw)) sw 1774 drivers/thunderbolt/switch.c nvm_authenticate_complete(sw); sw 1777 drivers/thunderbolt/switch.c tb_sw_info(sw, "switch flash authentication failed\n"); sw 1778 drivers/thunderbolt/switch.c nvm_set_auth_status(sw, status); sw 1781 drivers/thunderbolt/switch.c tb_sw_info(sw, "power cycling the switch now\n"); sw 1782 drivers/thunderbolt/switch.c dma_port_power_cycle(sw->dma_port); sw 1803 drivers/thunderbolt/switch.c int tb_switch_add(struct tb_switch *sw) sw 1814 drivers/thunderbolt/switch.c ret = tb_switch_add_dma_port(sw); sw 1818 drivers/thunderbolt/switch.c if (!sw->safe_mode) { sw 1820 drivers/thunderbolt/switch.c ret = tb_drom_read(sw); sw 1822 drivers/thunderbolt/switch.c tb_sw_warn(sw, "tb_eeprom_read_rom failed\n"); sw 1825 drivers/thunderbolt/switch.c tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); sw 1827 drivers/thunderbolt/switch.c ret = tb_switch_set_uuid(sw); sw 1831 drivers/thunderbolt/switch.c for (i = 0; i <= sw->config.max_port_number; i++) { sw 1832 drivers/thunderbolt/switch.c if (sw->ports[i].disabled) { sw 1833 drivers/thunderbolt/switch.c tb_port_dbg(&sw->ports[i], "disabled by eeprom\n"); sw 1836 drivers/thunderbolt/switch.c ret = tb_init_port(&sw->ports[i]); sw 1842 drivers/thunderbolt/switch.c ret = device_add(&sw->dev); sw 1846 drivers/thunderbolt/switch.c if (tb_route(sw)) { sw 1847 drivers/thunderbolt/switch.c dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n", sw 1848 drivers/thunderbolt/switch.c sw->vendor, sw->device); sw 1849 drivers/thunderbolt/switch.c if (sw->vendor_name && sw->device_name) sw 1850 drivers/thunderbolt/switch.c dev_info(&sw->dev, "%s %s\n", sw->vendor_name, sw 1851 drivers/thunderbolt/switch.c sw->device_name); sw 1854 drivers/thunderbolt/switch.c ret = tb_switch_nvm_add(sw); sw 1856 drivers/thunderbolt/switch.c device_del(&sw->dev); sw 1860 drivers/thunderbolt/switch.c pm_runtime_set_active(&sw->dev); sw 1861 drivers/thunderbolt/switch.c if (sw->rpm) { sw 1862 drivers/thunderbolt/switch.c pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); sw 1863 drivers/thunderbolt/switch.c pm_runtime_use_autosuspend(&sw->dev); sw 1864 drivers/thunderbolt/switch.c pm_runtime_mark_last_busy(&sw->dev); sw 1865 drivers/thunderbolt/switch.c pm_runtime_enable(&sw->dev); sw 1866 drivers/thunderbolt/switch.c pm_request_autosuspend(&sw->dev); sw 1880 drivers/thunderbolt/switch.c void tb_switch_remove(struct tb_switch *sw) sw 1884 drivers/thunderbolt/switch.c if (sw->rpm) { sw 1885 drivers/thunderbolt/switch.c pm_runtime_get_sync(&sw->dev); sw 1886 drivers/thunderbolt/switch.c pm_runtime_disable(&sw->dev); sw 1890 drivers/thunderbolt/switch.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1891 drivers/thunderbolt/switch.c if (tb_port_has_remote(&sw->ports[i])) { sw 1892 drivers/thunderbolt/switch.c tb_switch_remove(sw->ports[i].remote->sw); sw 1893 drivers/thunderbolt/switch.c sw->ports[i].remote = NULL; sw 1894 drivers/thunderbolt/switch.c } else if (sw->ports[i].xdomain) { sw 1895 drivers/thunderbolt/switch.c tb_xdomain_remove(sw->ports[i].xdomain); sw 1896 drivers/thunderbolt/switch.c sw->ports[i].xdomain = NULL; sw 1900 drivers/thunderbolt/switch.c if (!sw->is_unplugged) sw 1901 drivers/thunderbolt/switch.c tb_plug_events_active(sw, false); sw 1902 drivers/thunderbolt/switch.c tb_lc_unconfigure_link(sw); sw 1904 drivers/thunderbolt/switch.c tb_switch_nvm_remove(sw); sw 1906 drivers/thunderbolt/switch.c if (tb_route(sw)) sw 1907 drivers/thunderbolt/switch.c dev_info(&sw->dev, "device disconnected\n"); sw 1908 drivers/thunderbolt/switch.c device_unregister(&sw->dev); sw 1914 drivers/thunderbolt/switch.c void tb_sw_set_unplugged(struct tb_switch *sw) sw 1917 drivers/thunderbolt/switch.c if (sw == sw->tb->root_switch) { sw 1918 drivers/thunderbolt/switch.c tb_sw_WARN(sw, "cannot unplug root switch\n"); sw 1921 drivers/thunderbolt/switch.c if (sw->is_unplugged) { sw 1922 drivers/thunderbolt/switch.c tb_sw_WARN(sw, "is_unplugged already set\n"); sw 1925 drivers/thunderbolt/switch.c sw->is_unplugged = true; sw 1926 drivers/thunderbolt/switch.c for (i = 0; i <= sw->config.max_port_number; i++) { sw 1927 drivers/thunderbolt/switch.c if (tb_port_has_remote(&sw->ports[i])) sw 1928 drivers/thunderbolt/switch.c tb_sw_set_unplugged(sw->ports[i].remote->sw); sw 1929 drivers/thunderbolt/switch.c else if (sw->ports[i].xdomain) sw 1930 drivers/thunderbolt/switch.c sw->ports[i].xdomain->is_unplugged = true; sw 1934 drivers/thunderbolt/switch.c int tb_switch_resume(struct tb_switch *sw) sw 1937 drivers/thunderbolt/switch.c tb_sw_dbg(sw, "resuming switch\n"); sw 1943 drivers/thunderbolt/switch.c if (tb_route(sw)) { sw 1951 drivers/thunderbolt/switch.c err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw)); sw 1953 drivers/thunderbolt/switch.c tb_sw_info(sw, "switch not present anymore\n"); sw 1957 drivers/thunderbolt/switch.c err = tb_drom_read_uid_only(sw, &uid); sw 1959 drivers/thunderbolt/switch.c tb_sw_warn(sw, "uid read failed\n"); sw 1962 drivers/thunderbolt/switch.c if (sw->uid != uid) { sw 1963 drivers/thunderbolt/switch.c tb_sw_info(sw, sw 1965 drivers/thunderbolt/switch.c sw->uid, uid); sw 1971 drivers/thunderbolt/switch.c err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); sw 1975 drivers/thunderbolt/switch.c err = tb_lc_configure_link(sw); sw 1979 drivers/thunderbolt/switch.c err = tb_plug_events_active(sw, true); sw 1984 drivers/thunderbolt/switch.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1985 drivers/thunderbolt/switch.c struct tb_port *port = &sw->ports[i]; sw 1994 drivers/thunderbolt/switch.c tb_sw_set_unplugged(port->remote->sw); sw 1998 drivers/thunderbolt/switch.c if (tb_switch_resume(port->remote->sw)) { sw 2001 drivers/thunderbolt/switch.c tb_sw_set_unplugged(port->remote->sw); sw 2008 drivers/thunderbolt/switch.c void tb_switch_suspend(struct tb_switch *sw) sw 2011 drivers/thunderbolt/switch.c err = tb_plug_events_active(sw, false); sw 2015 drivers/thunderbolt/switch.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 2016 drivers/thunderbolt/switch.c if (tb_port_has_remote(&sw->ports[i])) sw 2017 drivers/thunderbolt/switch.c tb_switch_suspend(sw->ports[i].remote->sw); sw 2020 drivers/thunderbolt/switch.c tb_lc_set_sleep(sw); sw 2033 drivers/thunderbolt/switch.c struct tb_switch *sw = tb_to_switch(dev); sw 2036 drivers/thunderbolt/switch.c if (!sw) sw 2038 drivers/thunderbolt/switch.c if (sw->tb != lookup->tb) sw 2042 drivers/thunderbolt/switch.c return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid)); sw 2045 drivers/thunderbolt/switch.c return sw->config.route_lo == lower_32_bits(lookup->route) && sw 2046 drivers/thunderbolt/switch.c sw->config.route_hi == upper_32_bits(lookup->route); sw 2051 drivers/thunderbolt/switch.c return !sw->depth; sw 2053 drivers/thunderbolt/switch.c return sw->link == lookup->link && sw->depth == lookup->depth; sw 59 drivers/thunderbolt/tb.c static void tb_discover_tunnels(struct tb_switch *sw) sw 61 drivers/thunderbolt/tb.c struct tb *tb = sw->tb; sw 66 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 69 drivers/thunderbolt/tb.c port = &sw->ports[i]; sw 87 drivers/thunderbolt/tb.c struct tb_switch *parent = tunnel->dst_port->sw; sw 89 drivers/thunderbolt/tb.c while (parent != tunnel->src_port->sw) { sw 98 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 99 drivers/thunderbolt/tb.c if (tb_port_has_remote(&sw->ports[i])) sw 100 drivers/thunderbolt/tb.c tb_discover_tunnels(sw->ports[i].remote->sw); sw 106 drivers/thunderbolt/tb.c struct tb_switch *sw = port->sw; sw 107 drivers/thunderbolt/tb.c struct tb *tb = sw->tb; sw 118 drivers/thunderbolt/tb.c xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, sw 121 drivers/thunderbolt/tb.c tb_port_at(route, sw)->xdomain = xd; sw 131 drivers/thunderbolt/tb.c static void tb_scan_switch(struct tb_switch *sw) sw 134 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) sw 135 drivers/thunderbolt/tb.c tb_scan_port(&sw->ports[i]); sw 143 drivers/thunderbolt/tb.c struct tb_cm *tcm = tb_priv(port->sw->tb); sw 145 drivers/thunderbolt/tb.c struct tb_switch *sw; sw 153 drivers/thunderbolt/tb.c tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port, sw 171 drivers/thunderbolt/tb.c sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, sw 173 drivers/thunderbolt/tb.c if (IS_ERR(sw)) { sw 179 drivers/thunderbolt/tb.c if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL) sw 184 drivers/thunderbolt/tb.c if (tb_switch_configure(sw)) { sw 185 drivers/thunderbolt/tb.c tb_switch_put(sw); sw 204 drivers/thunderbolt/tb.c dev_set_uevent_suppress(&sw->dev, true); sw 206 drivers/thunderbolt/tb.c if (tb_switch_add(sw)) { sw 207 drivers/thunderbolt/tb.c tb_switch_put(sw); sw 212 drivers/thunderbolt/tb.c upstream_port = tb_upstream_port(sw); sw 220 drivers/thunderbolt/tb.c tb_scan_switch(sw); sw 264 drivers/thunderbolt/tb.c static void tb_free_unplugged_children(struct tb_switch *sw) sw 267 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 268 drivers/thunderbolt/tb.c struct tb_port *port = &sw->ports[i]; sw 273 drivers/thunderbolt/tb.c if (port->remote->sw->is_unplugged) { sw 274 drivers/thunderbolt/tb.c tb_switch_remove(port->remote->sw); sw 279 drivers/thunderbolt/tb.c tb_free_unplugged_children(port->remote->sw); sw 289 drivers/thunderbolt/tb.c static struct tb_port *tb_find_port(struct tb_switch *sw, sw 293 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) sw 294 drivers/thunderbolt/tb.c if (sw->ports[i].config.type == type) sw 295 drivers/thunderbolt/tb.c return &sw->ports[i]; sw 304 drivers/thunderbolt/tb.c static struct tb_port *tb_find_unused_port(struct tb_switch *sw, sw 309 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 310 drivers/thunderbolt/tb.c if (tb_is_upstream_port(&sw->ports[i])) sw 312 drivers/thunderbolt/tb.c if (sw->ports[i].config.type != type) sw 314 drivers/thunderbolt/tb.c if (!sw->ports[i].cap_adap) sw 316 drivers/thunderbolt/tb.c if (tb_port_is_enabled(&sw->ports[i])) sw 318 drivers/thunderbolt/tb.c return &sw->ports[i]; sw 323 drivers/thunderbolt/tb.c static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, sw 331 drivers/thunderbolt/tb.c if (!tb_route(sw)) { sw 339 drivers/thunderbolt/tb.c if (tb_switch_is_cr(sw)) sw 341 drivers/thunderbolt/tb.c else if (tb_switch_is_fr(sw)) sw 347 drivers/thunderbolt/tb.c if (WARN_ON(index > sw->config.max_port_number)) sw 349 drivers/thunderbolt/tb.c if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) sw 351 drivers/thunderbolt/tb.c if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) sw 354 drivers/thunderbolt/tb.c return &sw->ports[index]; sw 358 drivers/thunderbolt/tb.c return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); sw 364 drivers/thunderbolt/tb.c struct tb_switch *sw = out->sw; sw 372 drivers/thunderbolt/tb.c sw = tb_to_switch(sw->dev.parent); sw 373 drivers/thunderbolt/tb.c if (!sw) sw 375 drivers/thunderbolt/tb.c in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN); sw 399 drivers/thunderbolt/tb.c static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) sw 406 drivers/thunderbolt/tb.c up = tb_find_port(sw, TB_TYPE_PCIE_UP); sw 414 drivers/thunderbolt/tb.c parent_sw = tb_to_switch(sw->dev.parent); sw 415 drivers/thunderbolt/tb.c port = tb_port_at(tb_route(sw), parent_sw); sw 440 drivers/thunderbolt/tb.c struct tb_switch *sw; sw 442 drivers/thunderbolt/tb.c sw = tb_to_switch(xd->dev.parent); sw 443 drivers/thunderbolt/tb.c dst_port = tb_port_at(xd->route, sw); sw 471 drivers/thunderbolt/tb.c struct tb_switch *sw; sw 473 drivers/thunderbolt/tb.c sw = tb_to_switch(xd->dev.parent); sw 474 drivers/thunderbolt/tb.c dst_port = tb_port_at(xd->route, sw); sw 506 drivers/thunderbolt/tb.c struct tb_switch *sw; sw 512 drivers/thunderbolt/tb.c sw = tb_switch_find_by_route(tb, ev->route); sw 513 drivers/thunderbolt/tb.c if (!sw) { sw 519 drivers/thunderbolt/tb.c if (ev->port > sw->config.max_port_number) { sw 525 drivers/thunderbolt/tb.c port = &sw->ports[ev->port]; sw 534 drivers/thunderbolt/tb.c tb_sw_set_unplugged(port->remote->sw); sw 536 drivers/thunderbolt/tb.c tb_switch_remove(port->remote->sw); sw 576 drivers/thunderbolt/tb.c tb_switch_put(sw); sw 633 drivers/thunderbolt/tb.c struct tb_switch *sw = tb_to_switch(dev); sw 640 drivers/thunderbolt/tb.c if (sw->boot) sw 641 drivers/thunderbolt/tb.c sw->authorized = 1; sw 735 drivers/thunderbolt/tb.c static int tb_free_unplugged_xdomains(struct tb_switch *sw) sw 739 drivers/thunderbolt/tb.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 740 drivers/thunderbolt/tb.c struct tb_port *port = &sw->ports[i]; sw 749 drivers/thunderbolt/tb.c ret += tb_free_unplugged_xdomains(port->remote->sw); sw 138 drivers/thunderbolt/tb.h struct tb_switch *sw; sw 278 drivers/thunderbolt/tb.h int (*runtime_suspend_switch)(struct tb_switch *sw); sw 279 drivers/thunderbolt/tb.h int (*runtime_resume_switch)(struct tb_switch *sw); sw 284 drivers/thunderbolt/tb.h int (*approve_switch)(struct tb *tb, struct tb_switch *sw); sw 285 drivers/thunderbolt/tb.h int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); sw 286 drivers/thunderbolt/tb.h int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, sw 312 drivers/thunderbolt/tb.h static inline struct tb_port *tb_upstream_port(struct tb_switch *sw) sw 314 drivers/thunderbolt/tb.h return &sw->ports[sw->config.upstream_port_number]; sw 326 drivers/thunderbolt/tb.h const struct tb_port *upstream_port = tb_upstream_port(port->sw); sw 330 drivers/thunderbolt/tb.h static inline u64 tb_route(const struct tb_switch *sw) sw 332 drivers/thunderbolt/tb.h return ((u64) sw->config.route_hi) << 32 | sw->config.route_lo; sw 335 drivers/thunderbolt/tb.h static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) sw 339 drivers/thunderbolt/tb.h port = route >> (sw->config.depth * 8); sw 340 drivers/thunderbolt/tb.h if (WARN_ON(port > sw->config.max_port_number)) sw 342 drivers/thunderbolt/tb.h return &sw->ports[port]; sw 388 drivers/thunderbolt/tb.h static inline int tb_sw_read(struct tb_switch *sw, void *buffer, sw 391 drivers/thunderbolt/tb.h if (sw->is_unplugged) sw 393 drivers/thunderbolt/tb.h return tb_cfg_read(sw->tb->ctl, sw 395 drivers/thunderbolt/tb.h tb_route(sw), sw 402 drivers/thunderbolt/tb.h static inline int tb_sw_write(struct tb_switch *sw, void *buffer, sw 405 drivers/thunderbolt/tb.h if (sw->is_unplugged) sw 407 drivers/thunderbolt/tb.h return tb_cfg_write(sw->tb->ctl, sw 409 drivers/thunderbolt/tb.h tb_route(sw), sw 419 drivers/thunderbolt/tb.h if (port->sw->is_unplugged) sw 421 drivers/thunderbolt/tb.h return tb_cfg_read(port->sw->tb->ctl, sw 423 drivers/thunderbolt/tb.h tb_route(port->sw), sw 433 drivers/thunderbolt/tb.h if (port->sw->is_unplugged) sw 435 drivers/thunderbolt/tb.h return tb_cfg_write(port->sw->tb->ctl, sw 437 drivers/thunderbolt/tb.h tb_route(port->sw), sw 450 drivers/thunderbolt/tb.h #define __TB_SW_PRINT(level, sw, fmt, arg...) \ sw 452 drivers/thunderbolt/tb.h const struct tb_switch *__sw = (sw); \ sw 456 drivers/thunderbolt/tb.h #define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg) sw 457 drivers/thunderbolt/tb.h #define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg) sw 458 drivers/thunderbolt/tb.h #define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg) sw 459 drivers/thunderbolt/tb.h #define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg) sw 464 drivers/thunderbolt/tb.h level(__port->sw->tb, "%llx:%x: " fmt, \ sw 465 drivers/thunderbolt/tb.h tb_route(__port->sw), __port->port, ## arg); \ sw 497 drivers/thunderbolt/tb.h int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); sw 498 drivers/thunderbolt/tb.h int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); sw 499 drivers/thunderbolt/tb.h int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); sw 521 drivers/thunderbolt/tb.h int tb_switch_configure(struct tb_switch *sw); sw 522 drivers/thunderbolt/tb.h int tb_switch_add(struct tb_switch *sw); sw 523 drivers/thunderbolt/tb.h void tb_switch_remove(struct tb_switch *sw); sw 524 drivers/thunderbolt/tb.h void tb_switch_suspend(struct tb_switch *sw); sw 525 drivers/thunderbolt/tb.h int tb_switch_resume(struct tb_switch *sw); sw 527 drivers/thunderbolt/tb.h void tb_sw_set_unplugged(struct tb_switch *sw); sw 533 drivers/thunderbolt/tb.h static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) sw 535 drivers/thunderbolt/tb.h if (sw) sw 536 drivers/thunderbolt/tb.h get_device(&sw->dev); sw 537 drivers/thunderbolt/tb.h return sw; sw 540 drivers/thunderbolt/tb.h static inline void tb_switch_put(struct tb_switch *sw) sw 542 drivers/thunderbolt/tb.h put_device(&sw->dev); sw 557 drivers/thunderbolt/tb.h static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) sw 559 drivers/thunderbolt/tb.h return tb_to_switch(sw->dev.parent); sw 562 drivers/thunderbolt/tb.h static inline bool tb_switch_is_lr(const struct tb_switch *sw) sw 564 drivers/thunderbolt/tb.h return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; sw 567 drivers/thunderbolt/tb.h static inline bool tb_switch_is_er(const struct tb_switch *sw) sw 569 drivers/thunderbolt/tb.h return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; sw 572 drivers/thunderbolt/tb.h static inline bool tb_switch_is_cr(const struct tb_switch *sw) sw 574 drivers/thunderbolt/tb.h switch (sw->config.device_id) { sw 583 drivers/thunderbolt/tb.h static inline bool tb_switch_is_fr(const struct tb_switch *sw) sw 585 drivers/thunderbolt/tb.h switch (sw->config.device_id) { sw 605 drivers/thunderbolt/tb.h int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); sw 630 drivers/thunderbolt/tb.h int tb_drom_read(struct tb_switch *sw); sw 631 drivers/thunderbolt/tb.h int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); sw 633 drivers/thunderbolt/tb.h int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); sw 634 drivers/thunderbolt/tb.h int tb_lc_configure_link(struct tb_switch *sw); sw 635 drivers/thunderbolt/tb.h void tb_lc_unconfigure_link(struct tb_switch *sw); sw 636 drivers/thunderbolt/tb.h int tb_lc_set_sleep(struct tb_switch *sw); sw 652 drivers/thunderbolt/tb.h return tb_route(port->sw) sw 653 drivers/thunderbolt/tb.h | ((u64) port->port << (port->sw->config.depth * 8)); sw 39 drivers/thunderbolt/tunnel.c tb_route(__tunnel->src_port->sw), \ sw 41 drivers/thunderbolt/tunnel.c tb_route(__tunnel->dst_port->sw), \ sw 239 drivers/thunderbolt/tunnel.c if (in->sw->generation < 2 || out->sw->generation < 2) sw 1404 drivers/thunderbolt/xdomain.c static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, sw 1409 drivers/thunderbolt/xdomain.c for (i = 1; i <= sw->config.max_port_number; i++) { sw 1410 drivers/thunderbolt/xdomain.c struct tb_port *port = &sw->ports[i]; sw 1429 drivers/thunderbolt/xdomain.c xd = switch_find_xdomain(port->remote->sw, lookup); sw 250 drivers/usb/class/cdc-wdm.c goto sw; /* halt is cleared in work */ sw 305 drivers/usb/class/cdc-wdm.c sw: sw 42 drivers/usb/roles/class.c int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role) sw 46 drivers/usb/roles/class.c if (IS_ERR_OR_NULL(sw)) sw 49 drivers/usb/roles/class.c mutex_lock(&sw->lock); sw 51 drivers/usb/roles/class.c ret = sw->set(sw->dev.parent, role); sw 53 drivers/usb/roles/class.c sw->role = role; sw 55 drivers/usb/roles/class.c mutex_unlock(&sw->lock); sw 68 drivers/usb/roles/class.c enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) sw 72 drivers/usb/roles/class.c if (IS_ERR_OR_NULL(sw)) sw 75 drivers/usb/roles/class.c mutex_lock(&sw->lock); sw 77 drivers/usb/roles/class.c if (sw->get) sw 78 drivers/usb/roles/class.c role = sw->get(sw->dev.parent); sw 80 drivers/usb/roles/class.c role = sw->role; sw 82 drivers/usb/roles/class.c mutex_unlock(&sw->lock); sw 127 drivers/usb/roles/class.c struct usb_role_switch *sw; sw 129 drivers/usb/roles/class.c sw = usb_role_switch_is_parent(dev_fwnode(dev)); sw 130 drivers/usb/roles/class.c if (!sw) sw 131 drivers/usb/roles/class.c sw = device_connection_find_match(dev, "usb-role-switch", NULL, sw 134 drivers/usb/roles/class.c if (!IS_ERR_OR_NULL(sw)) sw 135 drivers/usb/roles/class.c WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); sw 137 drivers/usb/roles/class.c return sw; sw 150 drivers/usb/roles/class.c struct usb_role_switch *sw; sw 152 drivers/usb/roles/class.c sw = usb_role_switch_is_parent(fwnode); sw 153 drivers/usb/roles/class.c if (!sw) sw 154 drivers/usb/roles/class.c sw = fwnode_connection_find_match(fwnode, "usb-role-switch", sw 156 drivers/usb/roles/class.c if (!IS_ERR_OR_NULL(sw)) sw 157 drivers/usb/roles/class.c WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); sw 159 drivers/usb/roles/class.c return sw; sw 169 drivers/usb/roles/class.c void usb_role_switch_put(struct usb_role_switch *sw) sw 171 drivers/usb/roles/class.c if (!IS_ERR_OR_NULL(sw)) { sw 172 drivers/usb/roles/class.c module_put(sw->dev.parent->driver->owner); sw 173 drivers/usb/roles/class.c put_device(&sw->dev); sw 182 drivers/usb/roles/class.c struct usb_role_switch *sw = to_role_switch(dev); sw 184 drivers/usb/roles/class.c if (sw->allow_userspace_control) sw 199 drivers/usb/roles/class.c struct usb_role_switch *sw = to_role_switch(dev); sw 200 drivers/usb/roles/class.c enum usb_role role = usb_role_switch_get_role(sw); sw 208 drivers/usb/roles/class.c struct usb_role_switch *sw = to_role_switch(dev); sw 221 drivers/usb/roles/class.c ret = usb_role_switch_set_role(sw, ret); sw 258 drivers/usb/roles/class.c struct usb_role_switch *sw = to_role_switch(dev); sw 260 drivers/usb/roles/class.c kfree(sw); sw 288 drivers/usb/roles/class.c struct usb_role_switch *sw; sw 294 drivers/usb/roles/class.c sw = kzalloc(sizeof(*sw), GFP_KERNEL); sw 295 drivers/usb/roles/class.c if (!sw) sw 298 drivers/usb/roles/class.c mutex_init(&sw->lock); sw 300 drivers/usb/roles/class.c sw->allow_userspace_control = desc->allow_userspace_control; sw 301 drivers/usb/roles/class.c sw->usb2_port = desc->usb2_port; sw 302 drivers/usb/roles/class.c sw->usb3_port = desc->usb3_port; sw 303 drivers/usb/roles/class.c sw->udc = desc->udc; sw 304 drivers/usb/roles/class.c sw->set = desc->set; sw 305 drivers/usb/roles/class.c sw->get = desc->get; sw 307 drivers/usb/roles/class.c sw->dev.parent = parent; sw 308 drivers/usb/roles/class.c sw->dev.fwnode = desc->fwnode; sw 309 drivers/usb/roles/class.c sw->dev.class = role_class; sw 310 drivers/usb/roles/class.c sw->dev.type = &usb_role_dev_type; sw 311 drivers/usb/roles/class.c dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent)); sw 313 drivers/usb/roles/class.c ret = device_register(&sw->dev); sw 315 drivers/usb/roles/class.c put_device(&sw->dev); sw 321 drivers/usb/roles/class.c return sw; sw 331 drivers/usb/roles/class.c void usb_role_switch_unregister(struct usb_role_switch *sw) sw 333 drivers/usb/roles/class.c if (!IS_ERR_OR_NULL(sw)) sw 334 drivers/usb/roles/class.c device_unregister(&sw->dev); sw 52 drivers/usb/typec/class.c struct typec_switch *sw; sw 1279 drivers/usb/typec/class.c typec_switch_put(port->sw); sw 1449 drivers/usb/typec/class.c if (port->sw) { sw 1450 drivers/usb/typec/class.c ret = port->sw->set(port->sw, orientation); sw 1593 drivers/usb/typec/class.c port->sw = typec_switch_get(&port->dev); sw 1594 drivers/usb/typec/class.c if (IS_ERR(port->sw)) { sw 1595 drivers/usb/typec/class.c ret = PTR_ERR(port->sw); sw 72 drivers/usb/typec/mux.c struct typec_switch *sw; sw 74 drivers/usb/typec/mux.c sw = device_connection_find_match(dev, "orientation-switch", NULL, sw 76 drivers/usb/typec/mux.c if (!IS_ERR_OR_NULL(sw)) sw 77 drivers/usb/typec/mux.c WARN_ON(!try_module_get(sw->dev.parent->driver->owner)); sw 79 drivers/usb/typec/mux.c return sw; sw 89 drivers/usb/typec/mux.c void typec_switch_put(struct typec_switch *sw) sw 91 drivers/usb/typec/mux.c if (!IS_ERR_OR_NULL(sw)) { sw 92 drivers/usb/typec/mux.c module_put(sw->dev.parent->driver->owner); sw 93 drivers/usb/typec/mux.c put_device(&sw->dev); sw 122 drivers/usb/typec/mux.c struct typec_switch *sw; sw 128 drivers/usb/typec/mux.c sw = kzalloc(sizeof(*sw), GFP_KERNEL); sw 129 drivers/usb/typec/mux.c if (!sw) sw 132 drivers/usb/typec/mux.c sw->set = desc->set; sw 134 drivers/usb/typec/mux.c device_initialize(&sw->dev); sw 135 drivers/usb/typec/mux.c sw->dev.parent = parent; sw 136 drivers/usb/typec/mux.c sw->dev.fwnode = desc->fwnode; sw 137 drivers/usb/typec/mux.c sw->dev.class = &typec_mux_class; sw 138 drivers/usb/typec/mux.c sw->dev.type = &typec_switch_dev_type; sw 139 drivers/usb/typec/mux.c sw->dev.driver_data = desc->drvdata; sw 140 drivers/usb/typec/mux.c dev_set_name(&sw->dev, "%s-switch", dev_name(parent)); sw 142 drivers/usb/typec/mux.c ret = device_add(&sw->dev); sw 145 drivers/usb/typec/mux.c put_device(&sw->dev); sw 149 drivers/usb/typec/mux.c return sw; sw 159 drivers/usb/typec/mux.c void typec_switch_unregister(struct typec_switch *sw) sw 161 drivers/usb/typec/mux.c if (!IS_ERR_OR_NULL(sw)) sw 162 drivers/usb/typec/mux.c device_unregister(&sw->dev); sw 166 drivers/usb/typec/mux.c void typec_switch_set_drvdata(struct typec_switch *sw, void *data) sw 168 drivers/usb/typec/mux.c dev_set_drvdata(&sw->dev, data); sw 172 drivers/usb/typec/mux.c void *typec_switch_get_drvdata(struct typec_switch *sw) sw 174 drivers/usb/typec/mux.c return dev_get_drvdata(&sw->dev); sw 26 drivers/usb/typec/mux/pi3usb30532.c struct typec_switch *sw; sw 48 drivers/usb/typec/mux/pi3usb30532.c static int pi3usb30532_sw_set(struct typec_switch *sw, sw 51 drivers/usb/typec/mux/pi3usb30532.c struct pi3usb30532 *pi = typec_switch_get_drvdata(sw); sw 139 drivers/usb/typec/mux/pi3usb30532.c pi->sw = typec_switch_register(dev, &sw_desc); sw 140 drivers/usb/typec/mux/pi3usb30532.c if (IS_ERR(pi->sw)) { sw 142 drivers/usb/typec/mux/pi3usb30532.c PTR_ERR(pi->sw)); sw 143 drivers/usb/typec/mux/pi3usb30532.c return PTR_ERR(pi->sw); sw 152 drivers/usb/typec/mux/pi3usb30532.c typec_switch_unregister(pi->sw); sw 167 drivers/usb/typec/mux/pi3usb30532.c typec_switch_unregister(pi->sw); sw 8 drivers/video/fbdev/mb862xx/mb862xxfb.h unsigned short sw; sw 329 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { sw 333 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c (l1_cfg->sw << 11) / l1_cfg->dw)); sw 336 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c } else if ((l1_cfg->sw <= l1_cfg->dw) && sw 341 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c (l1_cfg->sw << 11) / l1_cfg->dw)); sw 343 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack(l1_cfg->sw >> 1, l1_cfg->sh)); sw 368 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack(l1_cfg->sh, l1_cfg->sw)); sw 525 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c par->l1_cfg.sw = 720; sw 531 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c stride = par->l1_cfg.sw * (fbi->var.bits_per_pixel / 8); sw 108 include/linux/console.h int do_take_over_console(const struct consw *sw, int first, int last, int deflt); sw 109 include/linux/console.h void give_up_console(const struct consw *sw); sw 177 include/linux/input.h unsigned long sw[BITS_TO_LONGS(SW_CNT)]; sw 40 include/linux/input/sparse-keymap.h } sw; sw 208 include/linux/rfkill.h void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw); sw 273 include/linux/rfkill.h static inline void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) sw 64 include/linux/usb/role.h static inline enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw) sw 80 include/linux/usb/role.h static inline void usb_role_switch_put(struct usb_role_switch *sw) { } sw 198 include/linux/usb/typec.h struct typec_switch *sw; sw 13 include/linux/usb/typec_mux.h typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw, sw 23 include/linux/usb/typec_mux.h void typec_switch_put(struct typec_switch *sw); sw 27 include/linux/usb/typec_mux.h void typec_switch_unregister(struct typec_switch *sw); sw 29 include/linux/usb/typec_mux.h void typec_switch_set_drvdata(struct typec_switch *sw, void *data); sw 30 include/linux/usb/typec_mux.h void *typec_switch_get_drvdata(struct typec_switch *sw); sw 323 include/net/tls.h struct tls_sw_context_rx sw; sw 216 include/scsi/libsas.h static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *)) sw 218 include/scsi/libsas.h INIT_WORK(&sw->work, fn); sw 219 include/scsi/libsas.h INIT_LIST_HEAD(&sw->drain_node); sw 606 net/rfkill/core.c void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) sw 621 net/rfkill/core.c __rfkill_set_sw_state(rfkill, sw); sw 632 net/rfkill/core.c if (swprev != sw || hwprev != hw) sw 262 net/rfkill/input.c handle->dev->sw)); sw 869 net/tls/tls_device.c ctx->sw.decrypted |= is_decrypted; sw 1175 sound/pci/azt3328.c const struct snd_kcontrol_new *sw; sw 1195 sound/pci/azt3328.c sw = snd_azf3328_mixer_controls; sw 1197 sound/pci/azt3328.c ++idx, ++sw) { sw 1198 sound/pci/azt3328.c if ((err = snd_ctl_add(chip->card, snd_ctl_new1(sw, chip))) < 0) sw 2656 sound/pci/cmipci.c struct snd_kcontrol_new *sw; sw 2683 sound/pci/cmipci.c sw = snd_cmipci_mixer_switches; sw 2684 sound/pci/cmipci.c for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_mixer_switches); idx++, sw++) { sw 2685 sound/pci/cmipci.c err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); sw 2696 sound/pci/cmipci.c sw = snd_cmipci_8738_mixer_switches; sw 2697 sound/pci/cmipci.c for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_8738_mixer_switches); idx++, sw++) { sw 2698 sound/pci/cmipci.c err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); sw 2714 sound/pci/cmipci.c sw = snd_cmipci_old_mixer_switches; sw 2715 sound/pci/cmipci.c for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_old_mixer_switches); idx++, sw++) { sw 2716 sound/pci/cmipci.c err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); sw 2723 sound/pci/cmipci.c sw = snd_cmipci_extra_mixer_switches; sw 2724 sound/pci/cmipci.c for (idx = 0; idx < ARRAY_SIZE(snd_cmipci_extra_mixer_switches); idx++, sw++) { sw 2725 sound/pci/cmipci.c err = snd_ctl_add(cm->card, snd_ctl_new1(sw, cm)); sw 1443 sound/pci/emu10k1/emufx.c #define _A_SWITCH(icode, ptr, dst, src, sw) \ sw 1444 sound/pci/emu10k1/emufx.c A_OP((icode), ptr, iMACINT0, dst, A_C_00000000, src, sw); sw 1445 sound/pci/emu10k1/emufx.c #define A_SWITCH(icode, ptr, dst, src, sw) \ sw 1446 sound/pci/emu10k1/emufx.c _A_SWITCH(icode, ptr, A_GPR(dst), A_GPR(src), A_GPR(sw)) sw 1814 sound/pci/emu10k1/emufx.c #define _SWITCH(icode, ptr, dst, src, sw) \ sw 1815 sound/pci/emu10k1/emufx.c OP((icode), ptr, iMACINT0, dst, C_00000000, src, sw); sw 1816 sound/pci/emu10k1/emufx.c #define SWITCH(icode, ptr, dst, src, sw) \ sw 1817 sound/pci/emu10k1/emufx.c _SWITCH(icode, ptr, GPR(dst), GPR(src), GPR(sw)) sw 1818 sound/pci/emu10k1/emufx.c #define SWITCH_IN(icode, ptr, dst, src, sw) \ sw 1819 sound/pci/emu10k1/emufx.c _SWITCH(icode, ptr, GPR(dst), EXTIN(src), GPR(sw)) sw 1649 sound/pci/emu10k1/emumixer.c unsigned int reg, val, sw; sw 1652 sound/pci/emu10k1/emumixer.c sw = ucontrol->value.integer.value[0]; sw 1654 sound/pci/emu10k1/emumixer.c sw = !sw; sw 1660 sound/pci/emu10k1/emumixer.c val = sw ? A_IOCFG_GPOUT0 : 0; sw 1669 sound/pci/emu10k1/emumixer.c val = sw ? HCFG_GPOUT0 : 0; sw 3755 sound/pci/hda/hda_generic.c int vol, sw; sw 3757 sound/pci/hda/hda_generic.c vol = sw = 0; sw 3772 sound/pci/hda/hda_generic.c if (!sw) sw 3773 sound/pci/hda/hda_generic.c sw = path->ctls[NID_PATH_MUTE_CTL]; sw 3774 sound/pci/hda/hda_generic.c else if (sw != path->ctls[NID_PATH_MUTE_CTL]) { sw 3776 sound/pci/hda/hda_generic.c if (!same_amp_caps(codec, sw, sw 3785 sound/pci/hda/hda_generic.c err = create_single_cap_vol_ctl(codec, n, vol, sw, sw 3788 sound/pci/hda/hda_generic.c err = create_bind_cap_vol_ctl(codec, n, vol, sw); sw 752 sound/pci/ice1712/quartet.c QTET_CONTROL("Analog In 1/2 to Monitor 1/2", sw, IN12_MON12), sw 753 sound/pci/ice1712/quartet.c QTET_CONTROL("Analog In 1/2 to Monitor 3/4", sw, IN12_MON34), sw 754 sound/pci/ice1712/quartet.c QTET_CONTROL("Analog In 3/4 to Monitor 1/2", sw, IN34_MON12), sw 755 sound/pci/ice1712/quartet.c QTET_CONTROL("Analog In 3/4 to Monitor 3/4", sw, IN34_MON34), sw 756 sound/pci/ice1712/quartet.c QTET_CONTROL("Output 1/2 to Monitor 3/4", sw, OUT12_MON34), sw 757 sound/pci/ice1712/quartet.c QTET_CONTROL("Output 3/4 to Monitor 1/2", sw, OUT34_MON12), sw 944 sound/ppc/tumbler.c struct snd_kcontrol *sw) sw 950 sound/ppc/tumbler.c &sw->id);