sh 8 arch/alpha/math-emu/sfp-util.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 9 arch/alpha/math-emu/sfp-util.h ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) sh 11 arch/alpha/math-emu/sfp-util.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 12 arch/alpha/math-emu/sfp-util.h ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl))) sh 547 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; sh 554 arch/mips/alchemy/common/clock.c v &= ~(0xff << sh); sh 555 arch/mips/alchemy/common/clock.c v |= div << sh; sh 667 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; sh 679 arch/mips/alchemy/common/clock.c v &= ~(0xff << sh); sh 680 arch/mips/alchemy/common/clock.c v |= (div & 0xff) << sh; sh 691 arch/mips/alchemy/common/clock.c int sh = c->shift + 2; sh 695 arch/mips/alchemy/common/clock.c t = parent_rate / (((v >> sh) & 0xff) + 1); sh 272 arch/mips/boot/elf2ecoff.c Elf32_Shdr *sh; sh 335 arch/mips/boot/elf2ecoff.c sh = (Elf32_Shdr *) saveRead(infile, ex.e_shoff, sh 339 arch/mips/boot/elf2ecoff.c convert_elf_shdrs(sh, ex.e_shnum); sh 117 arch/mips/include/asm/asm-eva.h #define kernel_sh(reg, addr) sh reg, addr sh 218 arch/mips/include/asm/uasm.h # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh) sh 220 arch/mips/include/asm/uasm.h # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) sh 221 arch/mips/include/asm/uasm.h # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) sh 222 arch/mips/include/asm/uasm.h # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) sh 223 arch/mips/include/asm/uasm.h # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh) sh 234 arch/mips/include/asm/uasm.h # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh) sh 236 arch/mips/include/asm/uasm.h # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) sh 237 arch/mips/include/asm/uasm.h # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) sh 238 arch/mips/include/asm/uasm.h # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) sh 239 arch/mips/include/asm/uasm.h # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) sh 1378 arch/mips/net/ebpf_jit.c emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst); sh 1488 arch/mips/net/ebpf_jit.c emit_instr(ctx, sh, src, mem_off, dst); sh 218 arch/mips/pci/pci-ar2315.c u32 sh = (where & 3) * 8; sh 241 arch/mips/pci/pci-ar2315.c value = (value & ~(mask << sh)) | *ptr << sh; sh 247 arch/mips/pci/pci-ar2315.c *ptr = (value >> sh) & mask; sh 112 arch/nds32/include/asm/sfp-machine.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 116 arch/nds32/include/asm/sfp-machine.h (sh) = (ah) + (bh) + (__x < (al)); \ sh 120 arch/nds32/include/asm/sfp-machine.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 124 arch/nds32/include/asm/sfp-machine.h (sh) = (ah) - (bh) - (__x > (al)); \ sh 216 arch/powerpc/include/asm/sfp-machine.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 220 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ sh 223 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (ah), "%r" (al), "rI" (bl));\ sh 226 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) \ sh 239 arch/powerpc/include/asm/sfp-machine.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 243 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ sh 246 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (bh), "rI" (al), "r" (bl));\ sh 249 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ sh 252 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) : "r" (ah), "rI" (al), "r" (bl));\ sh 255 arch/powerpc/include/asm/sfp-machine.h : "=r" (sh), "=&r" (sl) \ sh 1171 arch/powerpc/lib/sstep.c unsigned int mb, me, sh; sh 1459 arch/powerpc/lib/sstep.c sh = rb | ((instr & 2) << 4); sh 1460 arch/powerpc/lib/sstep.c val = ROTATE(val, sh); sh 1469 arch/powerpc/lib/sstep.c val &= MASK64(mb, 63 - sh); sh 1472 arch/powerpc/lib/sstep.c imm = MASK64(mb, 63 - sh); sh 1479 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x3f; sh 1480 arch/powerpc/lib/sstep.c val = ROTATE(val, sh); sh 1548 arch/powerpc/lib/sstep.c for (sh = 0; sh < 8; ++sh) { sh 1549 arch/powerpc/lib/sstep.c if (instr & (0x80000 >> sh)) sh 1562 arch/powerpc/lib/sstep.c for (sh = 0; sh < 8; ++sh) { sh 1563 arch/powerpc/lib/sstep.c if (instr & (0x80000 >> sh)) sh 1877 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x3f; sh 1878 arch/powerpc/lib/sstep.c if (sh < 32) sh 1879 arch/powerpc/lib/sstep.c op->val = (regs->gpr[rd] << sh) & 0xffffffffUL; sh 1885 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x3f; sh 1886 arch/powerpc/lib/sstep.c if (sh < 32) sh 1887 arch/powerpc/lib/sstep.c op->val = (regs->gpr[rd] & 0xffffffffUL) >> sh; sh 1894 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x3f; sh 1896 arch/powerpc/lib/sstep.c op->val = ival >> (sh < 32 ? sh : 31); sh 1898 arch/powerpc/lib/sstep.c if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) sh 1907 arch/powerpc/lib/sstep.c sh = rb; sh 1909 arch/powerpc/lib/sstep.c op->val = ival >> sh; sh 1911 arch/powerpc/lib/sstep.c if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) sh 1920 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x7f; sh 1921 arch/powerpc/lib/sstep.c if (sh < 64) sh 1922 arch/powerpc/lib/sstep.c op->val = regs->gpr[rd] << sh; sh 1928 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x7f; sh 1929 arch/powerpc/lib/sstep.c if (sh < 64) sh 1930 arch/powerpc/lib/sstep.c op->val = regs->gpr[rd] >> sh; sh 1937 arch/powerpc/lib/sstep.c sh = regs->gpr[rb] & 0x7f; sh 1939 arch/powerpc/lib/sstep.c op->val = ival >> (sh < 64 ? sh : 63); sh 1941 arch/powerpc/lib/sstep.c if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) sh 1951 arch/powerpc/lib/sstep.c sh = rb | ((instr & 2) << 4); sh 1953 arch/powerpc/lib/sstep.c op->val = ival >> sh; sh 1955 arch/powerpc/lib/sstep.c if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) sh 1967 arch/powerpc/lib/sstep.c sh = rb | ((instr & 2) << 4); sh 1969 arch/powerpc/lib/sstep.c if (sh) sh 1970 arch/powerpc/lib/sstep.c op->val = ROTATE(val, sh) & MASK64(0, 63 - sh); sh 135 arch/powerpc/perf/power5+-pmu.c int pmc, byte, unit, sh; sh 143 arch/powerpc/perf/power5+-pmu.c sh = (pmc - 1) * 2; sh 144 arch/powerpc/perf/power5+-pmu.c mask |= 2 << sh; sh 145 arch/powerpc/perf/power5+-pmu.c value |= 1 << sh; sh 168 arch/powerpc/perf/power5+-pmu.c sh = grsel_shift[bit]; sh 169 arch/powerpc/perf/power5+-pmu.c mask |= (unsigned long)fmask << sh; sh 171 arch/powerpc/perf/power5+-pmu.c << sh; sh 139 arch/powerpc/perf/power5-pmu.c int pmc, byte, unit, sh; sh 148 arch/powerpc/perf/power5-pmu.c sh = (pmc - 1) * 2; sh 149 arch/powerpc/perf/power5-pmu.c mask |= 2 << sh; sh 150 arch/powerpc/perf/power5-pmu.c value |= 1 << sh; sh 175 arch/powerpc/perf/power5-pmu.c sh = grsel_shift[bit]; sh 176 arch/powerpc/perf/power5-pmu.c mask |= (unsigned long)fmask << sh; sh 178 arch/powerpc/perf/power5-pmu.c << sh; sh 269 arch/powerpc/perf/power6-pmu.c int pmc, byte, sh, subunit; sh 276 arch/powerpc/perf/power6-pmu.c sh = (pmc - 1) * 2; sh 277 arch/powerpc/perf/power6-pmu.c mask |= 2 << sh; sh 278 arch/powerpc/perf/power6-pmu.c value |= 1 << sh; sh 282 arch/powerpc/perf/power6-pmu.c sh = byte * 4 + (16 - PM_UNIT_SH); sh 283 arch/powerpc/perf/power6-pmu.c mask |= PM_UNIT_MSKS << sh; sh 284 arch/powerpc/perf/power6-pmu.c value |= (unsigned long)(event & PM_UNIT_MSKS) << sh; sh 84 arch/powerpc/perf/power7-pmu.c int pmc, sh, unit; sh 91 arch/powerpc/perf/power7-pmu.c sh = (pmc - 1) * 2; sh 92 arch/powerpc/perf/power7-pmu.c mask |= 2 << sh; sh 93 arch/powerpc/perf/power7-pmu.c value |= 1 << sh; sh 193 arch/powerpc/perf/ppc970-pmu.c int pmc, byte, unit, sh, spcsel; sh 201 arch/powerpc/perf/ppc970-pmu.c sh = (pmc - 1) * 2; sh 202 arch/powerpc/perf/ppc970-pmu.c mask |= 2 << sh; sh 203 arch/powerpc/perf/ppc970-pmu.c value |= 1 << sh; sh 2753 arch/powerpc/xmon/ppc-opc.c #define XTLB(op, xop, sh) (X ((op), (xop)) | ((((unsigned long)(sh)) & 0x1f) << 11)) sh 6 arch/sh/math-emu/sfp-util.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 10 arch/sh/math-emu/sfp-util.h (sh) = (ah) + (bh) + (__x < (al)); \ sh 14 arch/sh/math-emu/sfp-util.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 18 arch/sh/math-emu/sfp-util.h (sh) = (ah) - (bh) - (__x > (al)); \ sh 7 arch/sparc/math-emu/sfp-util_32.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 10 arch/sparc/math-emu/sfp-util_32.h : "=r" (sh), \ sh 17 arch/sparc/math-emu/sfp-util_32.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 20 arch/sparc/math-emu/sfp-util_32.h : "=r" (sh), \ sh 15 arch/sparc/math-emu/sfp-util_64.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 21 arch/sparc/math-emu/sfp-util_64.h : "=r" (sh), \ sh 29 arch/sparc/math-emu/sfp-util_64.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 35 arch/sparc/math-emu/sfp-util_64.h : "=r" (sh), \ sh 66 arch/sparc/vdso/vdso2c.h ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) + sh 68 arch/sparc/vdso/vdso2c.h if (GET_BE(&sh->sh_type) == SHT_SYMTAB) sh 69 arch/sparc/vdso/vdso2c.h symtab_hdr = sh; sh 73 arch/x86/entry/vdso/vdso2c.h ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) + sh 75 arch/x86/entry/vdso/vdso2c.h if (GET_LE(&sh->sh_type) == SHT_SYMTAB) sh 76 arch/x86/entry/vdso/vdso2c.h symtab_hdr = sh; sh 78 arch/x86/entry/vdso/vdso2c.h if (!strcmp(secstrings + GET_LE(&sh->sh_name), sh 80 arch/x86/entry/vdso/vdso2c.h alt_sec = sh; sh 487 crypto/twofish_common.c ctx->s[3][i] = mds[3][q1[(b) ^ sd] ^ sh] sh 495 crypto/twofish_common.c ctx->s[3][i] = mds[3][q1[q1[(a) ^ sd] ^ sh] ^ sl]; sh 503 crypto/twofish_common.c ctx->s[3][i] = mds[3][q1[q1[q0[(b) ^ sd] ^ sh] ^ sl] ^ sp]; sh 579 crypto/twofish_common.c u8 sa = 0, sb = 0, sc = 0, sd = 0, se = 0, sf = 0, sg = 0, sh = 0; sh 604 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 8, 0x00, 0x2D, 0x01, 0x2D); /* 01 A4 02 A4 */ sh 605 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 9, 0x2D, 0xA4, 0x44, 0x8A); /* A4 56 A1 55 */ sh 606 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 10, 0x8A, 0xD5, 0xBF, 0xD1); /* 55 82 FC 87 */ sh 607 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 11, 0xD1, 0x7F, 0x3D, 0x99); /* 87 F3 C1 5A */ sh 608 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 12, 0x99, 0x46, 0x66, 0x96); /* 5A 1E 47 58 */ sh 609 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 13, 0x96, 0x3C, 0x5B, 0xED); /* 58 C6 AE DB */ sh 610 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 14, 0xED, 0x37, 0x4F, 0xE0); /* DB 68 3D 9E */ sh 611 crypto/twofish_common.c CALC_S (se, sf, sg, sh, 15, 0xE0, 0xD0, 0x8C, 0x17); /* 9E E5 19 03 */ sh 454 drivers/bluetooth/bluecard_cs.c struct hci_sco_hdr *sh; sh 472 drivers/bluetooth/bluecard_cs.c sh = hci_sco_hdr(info->rx_skb); sh 474 drivers/bluetooth/bluecard_cs.c info->rx_count = sh->dlen; sh 294 drivers/bluetooth/bt3c_cs.c struct hci_sco_hdr *sh; sh 312 drivers/bluetooth/bt3c_cs.c sh = hci_sco_hdr(info->rx_skb); sh 314 drivers/bluetooth/bt3c_cs.c info->rx_count = sh->dlen; sh 1146 drivers/gpu/drm/amd/amdgpu/amdgpu.h #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) sh 623 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c uint32_t offset, se, sh, cu, wave, simd, data[32]; sh 631 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c sh = (*pos & GENMASK_ULL(22, 15)) >> 15; sh 638 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c amdgpu_gfx_select_se_sh(adev, se, sh, cu); sh 695 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data; sh 703 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c sh = (*pos & GENMASK_ULL(27, 20)) >> 20; sh 716 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c amdgpu_gfx_select_se_sh(adev, se, sh, cu); sh 146 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c unsigned se, sh, cu; sh 157 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); sh 163 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c if (se < max_se && sh < max_sh && cu < 16) { sh 164 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); sh 165 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c mask[se * max_sh + sh] |= 1u << cu; sh 168 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c se, sh, cu); sh 333 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) sh 102 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c int i, se, sh, cu = 0; sh 110 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) sh 111 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]); sh 215 drivers/gpu/drm/nouveau/dispnv50/atom.h u16 sh; sh 50 drivers/gpu/drm/nouveau/dispnv50/ovly507e.c evo_data(push, asyw->scale.sh << 16 | asyw->scale.sw); sh 282 drivers/gpu/drm/nouveau/dispnv50/wndw.c asyw->scale.sh = asyw->state.src_h >> 16; sh 470 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c u32 reg, sh, gpio_val; sh 479 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c sh = (func.line & 0x7) << 2; sh 481 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c if (gpio_val & (8 << sh)) sh 486 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgt215.c ram_mask(fuc, gpio[reg], (0x3 << sh), ((val | 0x2) << sh)); sh 196 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c u32 reg, sh, gpio_val; sh 205 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c sh = (func.line & 0x7) << 2; sh 208 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c if (gpio_val & (8 << sh)) sh 213 drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramnv50.c ram_mask(hwsq, gpio[reg], (0x3 << sh), ((val | 0x2) << sh)); sh 137 drivers/gpu/drm/radeon/cik.c static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); sh 6539 drivers/gpu/drm/radeon/cik.c static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) sh 6544 drivers/gpu/drm/radeon/cik.c cik_select_se_sh(rdev, se, sh); sh 125 drivers/gpu/drm/radeon/si.c static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); sh 5299 drivers/gpu/drm/radeon/si.c static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh) sh 5304 drivers/gpu/drm/radeon/si.c si_select_se_sh(rdev, se, sh); sh 3444 drivers/infiniband/hw/qib/qib_iba7322.c int lsb, reg, sh; sh 3509 drivers/infiniband/hw/qib/qib_iba7322.c sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * sh 3512 drivers/infiniband/hw/qib/qib_iba7322.c redirect[reg] |= ((u64) msixnum) << sh; sh 310 drivers/md/raid5-cache.c struct stripe_head *sh, int disks) sh 314 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) { sh 315 drivers/md/raid5-cache.c if (sh->dev[i].written) { sh 316 drivers/md/raid5-cache.c set_bit(R5_UPTODATE, &sh->dev[i].flags); sh 317 drivers/md/raid5-cache.c r5c_return_dev_pending_writes(conf, &sh->dev[i]); sh 318 drivers/md/raid5-cache.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, sh 320 drivers/md/raid5-cache.c !test_bit(STRIPE_DEGRADED, &sh->state), sh 451 drivers/md/raid5-cache.c void r5c_make_stripe_write_out(struct stripe_head *sh) sh 453 drivers/md/raid5-cache.c struct r5conf *conf = sh->raid_conf; sh 458 drivers/md/raid5-cache.c WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 459 drivers/md/raid5-cache.c clear_bit(STRIPE_R5C_CACHING, &sh->state); sh 461 drivers/md/raid5-cache.c if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 465 drivers/md/raid5-cache.c static void r5c_handle_data_cached(struct stripe_head *sh) sh 469 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) sh 470 drivers/md/raid5-cache.c if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { sh 471 drivers/md/raid5-cache.c set_bit(R5_InJournal, &sh->dev[i].flags); sh 472 drivers/md/raid5-cache.c clear_bit(R5_LOCKED, &sh->dev[i].flags); sh 474 drivers/md/raid5-cache.c clear_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 481 drivers/md/raid5-cache.c static void r5c_handle_parity_cached(struct stripe_head *sh) sh 485 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) sh 486 drivers/md/raid5-cache.c if (test_bit(R5_InJournal, &sh->dev[i].flags)) sh 487 drivers/md/raid5-cache.c set_bit(R5_Wantwrite, &sh->dev[i].flags); sh 494 drivers/md/raid5-cache.c static void r5c_finish_cache_stripe(struct stripe_head *sh) sh 496 drivers/md/raid5-cache.c struct r5l_log *log = sh->raid_conf->log; sh 499 drivers/md/raid5-cache.c BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 506 drivers/md/raid5-cache.c set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); sh 507 drivers/md/raid5-cache.c } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { sh 508 drivers/md/raid5-cache.c r5c_handle_data_cached(sh); sh 510 drivers/md/raid5-cache.c r5c_handle_parity_cached(sh); sh 511 drivers/md/raid5-cache.c set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); sh 517 drivers/md/raid5-cache.c struct stripe_head *sh, *next; sh 519 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { sh 520 drivers/md/raid5-cache.c list_del_init(&sh->log_list); sh 522 drivers/md/raid5-cache.c r5c_finish_cache_stripe(sh); sh 524 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 525 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 904 drivers/md/raid5-cache.c static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, sh 924 drivers/md/raid5-cache.c if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) sh 927 drivers/md/raid5-cache.c for (i = 0; i < sh->disks; i++) { sh 928 drivers/md/raid5-cache.c if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || sh 929 drivers/md/raid5-cache.c test_bit(R5_InJournal, &sh->dev[i].flags)) sh 931 drivers/md/raid5-cache.c if (i == sh->pd_idx || i == sh->qd_idx) sh 933 drivers/md/raid5-cache.c if (test_bit(R5_WantFUA, &sh->dev[i].flags) && sh 943 drivers/md/raid5-cache.c raid5_compute_blocknr(sh, i, 0), sh 944 drivers/md/raid5-cache.c sh->dev[i].log_checksum, 0, false); sh 945 drivers/md/raid5-cache.c r5l_append_payload_page(log, sh->dev[i].page); sh 950 drivers/md/raid5-cache.c sh->sector, sh->dev[sh->pd_idx].log_checksum, sh 951 drivers/md/raid5-cache.c sh->dev[sh->qd_idx].log_checksum, true); sh 952 drivers/md/raid5-cache.c r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); sh 953 drivers/md/raid5-cache.c r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); sh 956 drivers/md/raid5-cache.c sh->sector, sh->dev[sh->pd_idx].log_checksum, sh 958 drivers/md/raid5-cache.c r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); sh 962 drivers/md/raid5-cache.c list_add_tail(&sh->log_list, &io->stripe_list); sh 964 drivers/md/raid5-cache.c sh->log_io = io; sh 969 drivers/md/raid5-cache.c if (sh->log_start == MaxSector) { sh 970 drivers/md/raid5-cache.c BUG_ON(!list_empty(&sh->r5c)); sh 971 drivers/md/raid5-cache.c sh->log_start = io->log_start; sh 973 drivers/md/raid5-cache.c list_add_tail(&sh->r5c, sh 983 drivers/md/raid5-cache.c struct stripe_head *sh) sh 986 drivers/md/raid5-cache.c list_add_tail(&sh->log_list, &log->no_space_stripes); sh 994 drivers/md/raid5-cache.c int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) sh 996 drivers/md/raid5-cache.c struct r5conf *conf = sh->raid_conf; sh 1007 drivers/md/raid5-cache.c if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || sh 1008 drivers/md/raid5-cache.c test_bit(STRIPE_SYNCING, &sh->state)) { sh 1010 drivers/md/raid5-cache.c clear_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 1014 drivers/md/raid5-cache.c WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 1016 drivers/md/raid5-cache.c for (i = 0; i < sh->disks; i++) { sh 1019 drivers/md/raid5-cache.c if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || sh 1020 drivers/md/raid5-cache.c test_bit(R5_InJournal, &sh->dev[i].flags)) sh 1025 drivers/md/raid5-cache.c if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) sh 1027 drivers/md/raid5-cache.c addr = kmap_atomic(sh->dev[i].page); sh 1028 drivers/md/raid5-cache.c sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, sh 1032 drivers/md/raid5-cache.c parity_pages = 1 + !!(sh->qd_idx >= 0); sh 1035 drivers/md/raid5-cache.c set_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 1040 drivers/md/raid5-cache.c clear_bit(STRIPE_DELAYED, &sh->state); sh 1041 drivers/md/raid5-cache.c atomic_inc(&sh->count); sh 1049 drivers/md/raid5-cache.c r5l_add_no_space_stripe(log, sh); sh 1052 drivers/md/raid5-cache.c ret = r5l_log_stripe(log, sh, data_pages, parity_pages); sh 1055 drivers/md/raid5-cache.c list_add_tail(&sh->log_list, sh 1066 drivers/md/raid5-cache.c sh->log_start == MaxSector) { sh 1067 drivers/md/raid5-cache.c r5l_add_no_space_stripe(log, sh); sh 1071 drivers/md/raid5-cache.c if (sh->log_start == log->last_checkpoint) sh 1074 drivers/md/raid5-cache.c r5l_add_no_space_stripe(log, sh); sh 1076 drivers/md/raid5-cache.c ret = r5l_log_stripe(log, sh, data_pages, parity_pages); sh 1079 drivers/md/raid5-cache.c list_add_tail(&sh->log_list, sh 1136 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1140 drivers/md/raid5-cache.c sh = list_first_entry(&log->no_space_stripes, sh 1142 drivers/md/raid5-cache.c list_del_init(&sh->log_list); sh 1143 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 1144 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 1156 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1170 drivers/md/raid5-cache.c sh = list_first_entry(&conf->log->stripe_in_journal_list, sh 1172 drivers/md/raid5-cache.c new_cp = sh->log_start; sh 1187 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1192 drivers/md/raid5-cache.c sh = list_first_entry(&log->no_mem_stripes, sh 1194 drivers/md/raid5-cache.c list_del_init(&sh->log_list); sh 1195 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 1196 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 1246 drivers/md/raid5-cache.c void r5l_stripe_write_finished(struct stripe_head *sh) sh 1250 drivers/md/raid5-cache.c io = sh->log_io; sh 1251 drivers/md/raid5-cache.c sh->log_io = NULL; sh 1366 drivers/md/raid5-cache.c static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) sh 1368 drivers/md/raid5-cache.c BUG_ON(list_empty(&sh->lru)); sh 1369 drivers/md/raid5-cache.c BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 1370 drivers/md/raid5-cache.c BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); sh 1376 drivers/md/raid5-cache.c BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); sh 1379 drivers/md/raid5-cache.c list_del_init(&sh->lru); sh 1380 drivers/md/raid5-cache.c atomic_inc(&sh->count); sh 1382 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 1384 drivers/md/raid5-cache.c r5c_make_stripe_write_out(sh); sh 1386 drivers/md/raid5-cache.c if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) sh 1390 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 1402 drivers/md/raid5-cache.c struct stripe_head *sh, *next; sh 1409 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { sh 1410 drivers/md/raid5-cache.c r5c_flush_stripe(conf, sh); sh 1416 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, sh 1418 drivers/md/raid5-cache.c r5c_flush_stripe(conf, sh); sh 1427 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1472 drivers/md/raid5-cache.c list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { sh 1481 drivers/md/raid5-cache.c if (!list_empty(&sh->lru) && sh 1482 drivers/md/raid5-cache.c !test_bit(STRIPE_HANDLE, &sh->state) && sh 1483 drivers/md/raid5-cache.c atomic_read(&sh->count) == 0) { sh 1484 drivers/md/raid5-cache.c r5c_flush_stripe(conf, sh); sh 1813 drivers/md/raid5-cache.c struct stripe_head *sh, sh 1824 drivers/md/raid5-cache.c &dd_idx, sh); sh 1825 drivers/md/raid5-cache.c r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); sh 1826 drivers/md/raid5-cache.c sh->dev[dd_idx].log_checksum = sh 1830 drivers/md/raid5-cache.c set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); sh 1831 drivers/md/raid5-cache.c set_bit(STRIPE_R5C_CACHING, &sh->state); sh 1835 drivers/md/raid5-cache.c struct stripe_head *sh, sh 1844 drivers/md/raid5-cache.c r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); sh 1845 drivers/md/raid5-cache.c sh->dev[sh->pd_idx].log_checksum = sh 1847 drivers/md/raid5-cache.c set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); sh 1849 drivers/md/raid5-cache.c if (sh->qd_idx >= 0) { sh 1851 drivers/md/raid5-cache.c log, ctx, sh->dev[sh->qd_idx].page, sh 1853 drivers/md/raid5-cache.c sh->dev[sh->qd_idx].log_checksum = sh 1855 drivers/md/raid5-cache.c set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); sh 1857 drivers/md/raid5-cache.c clear_bit(STRIPE_R5C_CACHING, &sh->state); sh 1860 drivers/md/raid5-cache.c static void r5l_recovery_reset_stripe(struct stripe_head *sh) sh 1864 drivers/md/raid5-cache.c sh->state = 0; sh 1865 drivers/md/raid5-cache.c sh->log_start = MaxSector; sh 1866 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) sh 1867 drivers/md/raid5-cache.c sh->dev[i].flags = 0; sh 1872 drivers/md/raid5-cache.c struct stripe_head *sh, sh 1879 drivers/md/raid5-cache.c for (disk_index = 0; disk_index < sh->disks; disk_index++) { sh 1880 drivers/md/raid5-cache.c if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) sh 1882 drivers/md/raid5-cache.c if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) sh 1895 drivers/md/raid5-cache.c for (disk_index = 0; disk_index < sh->disks; disk_index++) { sh 1896 drivers/md/raid5-cache.c if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) sh 1905 drivers/md/raid5-cache.c sync_page_io(rdev, sh->sector, PAGE_SIZE, sh 1906 drivers/md/raid5-cache.c sh->dev[disk_index].page, REQ_OP_WRITE, 0, sh 1915 drivers/md/raid5-cache.c sync_page_io(rrdev, sh->sector, PAGE_SIZE, sh 1916 drivers/md/raid5-cache.c sh->dev[disk_index].page, REQ_OP_WRITE, 0, sh 1925 drivers/md/raid5-cache.c r5l_recovery_reset_stripe(sh); sh 1934 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1936 drivers/md/raid5-cache.c sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0); sh 1937 drivers/md/raid5-cache.c if (!sh) sh 1940 drivers/md/raid5-cache.c r5l_recovery_reset_stripe(sh); sh 1942 drivers/md/raid5-cache.c return sh; sh 1948 drivers/md/raid5-cache.c struct stripe_head *sh; sh 1950 drivers/md/raid5-cache.c list_for_each_entry(sh, list, lru) sh 1951 drivers/md/raid5-cache.c if (sh->sector == sect) sh 1952 drivers/md/raid5-cache.c return sh; sh 1960 drivers/md/raid5-cache.c struct stripe_head *sh, *next; sh 1962 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { sh 1963 drivers/md/raid5-cache.c r5l_recovery_reset_stripe(sh); sh 1964 drivers/md/raid5-cache.c list_del_init(&sh->lru); sh 1965 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 1973 drivers/md/raid5-cache.c struct stripe_head *sh, *next; sh 1975 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, cached_stripe_list, lru) sh 1976 drivers/md/raid5-cache.c if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { sh 1977 drivers/md/raid5-cache.c r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); sh 1978 drivers/md/raid5-cache.c list_del_init(&sh->lru); sh 1979 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 2090 drivers/md/raid5-cache.c struct stripe_head *sh; sh 2120 drivers/md/raid5-cache.c sh = r5c_recovery_lookup_stripe(cached_stripe_list, sh 2122 drivers/md/raid5-cache.c if (sh) { sh 2123 drivers/md/raid5-cache.c WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 2124 drivers/md/raid5-cache.c r5l_recovery_reset_stripe(sh); sh 2125 drivers/md/raid5-cache.c list_del_init(&sh->lru); sh 2126 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 2142 drivers/md/raid5-cache.c sh = r5c_recovery_lookup_stripe(cached_stripe_list, sh 2145 drivers/md/raid5-cache.c if (!sh) { sh 2146 drivers/md/raid5-cache.c sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1); sh 2151 drivers/md/raid5-cache.c if (!sh) { sh 2154 drivers/md/raid5-cache.c sh = r5c_recovery_alloc_stripe( sh 2157 drivers/md/raid5-cache.c if (!sh) { sh 2172 drivers/md/raid5-cache.c sh = r5c_recovery_alloc_stripe( sh 2175 drivers/md/raid5-cache.c if (!sh) { sh 2180 drivers/md/raid5-cache.c list_add_tail(&sh->lru, cached_stripe_list); sh 2184 drivers/md/raid5-cache.c if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && sh 2185 drivers/md/raid5-cache.c test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { sh 2186 drivers/md/raid5-cache.c r5l_recovery_replay_one_stripe(conf, sh, ctx); sh 2187 drivers/md/raid5-cache.c list_move_tail(&sh->lru, cached_stripe_list); sh 2189 drivers/md/raid5-cache.c r5l_recovery_load_data(log, sh, ctx, payload, sh 2192 drivers/md/raid5-cache.c r5l_recovery_load_parity(log, sh, ctx, payload, sh 2213 drivers/md/raid5-cache.c struct stripe_head *sh) sh 2218 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) { sh 2219 drivers/md/raid5-cache.c dev = sh->dev + i; sh 2246 drivers/md/raid5-cache.c struct stripe_head *sh; sh 2275 drivers/md/raid5-cache.c list_for_each_entry(sh, &ctx->cached_list, lru) { sh 2276 drivers/md/raid5-cache.c WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 2277 drivers/md/raid5-cache.c r5c_recovery_load_one_stripe(log, sh); sh 2357 drivers/md/raid5-cache.c struct stripe_head *sh; sh 2371 drivers/md/raid5-cache.c list_for_each_entry(sh, &ctx->cached_list, lru) { sh 2377 drivers/md/raid5-cache.c WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 2384 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) { sh 2385 drivers/md/raid5-cache.c struct r5dev *dev = &sh->dev[i]; sh 2395 drivers/md/raid5-cache.c raid5_compute_blocknr(sh, i, 0)); sh 2415 drivers/md/raid5-cache.c sh->log_start = ctx->pos; sh 2416 drivers/md/raid5-cache.c list_add_tail(&sh->r5c, &log->stripe_in_journal_list); sh 2420 drivers/md/raid5-cache.c next_checkpoint = sh->log_start; sh 2432 drivers/md/raid5-cache.c struct stripe_head *sh, *next; sh 2439 drivers/md/raid5-cache.c list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { sh 2440 drivers/md/raid5-cache.c r5c_make_stripe_write_out(sh); sh 2441 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 2442 drivers/md/raid5-cache.c list_del_init(&sh->lru); sh 2443 drivers/md/raid5-cache.c raid5_release_stripe(sh); sh 2635 drivers/md/raid5-cache.c struct stripe_head *sh, sh 2650 drivers/md/raid5-cache.c if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { sh 2668 drivers/md/raid5-cache.c set_bit(STRIPE_R5C_CACHING, &sh->state); sh 2679 drivers/md/raid5-cache.c if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { sh 2680 drivers/md/raid5-cache.c r5c_make_stripe_write_out(sh); sh 2685 drivers/md/raid5-cache.c dev = &sh->dev[i]; sh 2689 drivers/md/raid5-cache.c r5c_make_stripe_write_out(sh); sh 2695 drivers/md/raid5-cache.c if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && sh 2696 drivers/md/raid5-cache.c !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { sh 2697 drivers/md/raid5-cache.c tree_index = r5c_tree_index(conf, sh->sector); sh 2718 drivers/md/raid5-cache.c r5c_make_stripe_write_out(sh); sh 2728 drivers/md/raid5-cache.c set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); sh 2733 drivers/md/raid5-cache.c dev = &sh->dev[i]; sh 2749 drivers/md/raid5-cache.c set_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 2758 drivers/md/raid5-cache.c void r5c_release_extra_page(struct stripe_head *sh) sh 2760 drivers/md/raid5-cache.c struct r5conf *conf = sh->raid_conf; sh 2765 drivers/md/raid5-cache.c sh->dev[0].orig_page == conf->disks[0].extra_page; sh 2767 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) sh 2768 drivers/md/raid5-cache.c if (sh->dev[i].page != sh->dev[i].orig_page) { sh 2769 drivers/md/raid5-cache.c struct page *p = sh->dev[i].orig_page; sh 2771 drivers/md/raid5-cache.c sh->dev[i].orig_page = sh->dev[i].page; sh 2772 drivers/md/raid5-cache.c clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); sh 2784 drivers/md/raid5-cache.c void r5c_use_extra_page(struct stripe_head *sh) sh 2786 drivers/md/raid5-cache.c struct r5conf *conf = sh->raid_conf; sh 2790 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) { sh 2791 drivers/md/raid5-cache.c dev = &sh->dev[i]; sh 2803 drivers/md/raid5-cache.c struct stripe_head *sh, sh 2813 drivers/md/raid5-cache.c if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) sh 2816 drivers/md/raid5-cache.c WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); sh 2817 drivers/md/raid5-cache.c clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); sh 2822 drivers/md/raid5-cache.c for (i = sh->disks; i--; ) { sh 2823 drivers/md/raid5-cache.c clear_bit(R5_InJournal, &sh->dev[i].flags); sh 2824 drivers/md/raid5-cache.c if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) sh 2834 drivers/md/raid5-cache.c if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) sh 2842 drivers/md/raid5-cache.c list_del_init(&sh->r5c); sh 2844 drivers/md/raid5-cache.c sh->log_start = MaxSector; sh 2850 drivers/md/raid5-cache.c if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || sh 2851 drivers/md/raid5-cache.c test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { sh 2852 drivers/md/raid5-cache.c tree_index = r5c_tree_index(conf, sh->sector); sh 2869 drivers/md/raid5-cache.c if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { sh 2875 drivers/md/raid5-cache.c if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { sh 2881 drivers/md/raid5-cache.c r5l_append_flush_payload(log, sh->sector); sh 2883 drivers/md/raid5-cache.c if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) sh 2884 drivers/md/raid5-cache.c set_bit(STRIPE_HANDLE, &sh->state); sh 2887 drivers/md/raid5-cache.c int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) sh 2889 drivers/md/raid5-cache.c struct r5conf *conf = sh->raid_conf; sh 2897 drivers/md/raid5-cache.c for (i = 0; i < sh->disks; i++) { sh 2900 drivers/md/raid5-cache.c if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) sh 2902 drivers/md/raid5-cache.c addr = kmap_atomic(sh->dev[i].page); sh 2903 drivers/md/raid5-cache.c sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, sh 2914 drivers/md/raid5-cache.c clear_bit(STRIPE_DELAYED, &sh->state); sh 2915 drivers/md/raid5-cache.c atomic_inc(&sh->count); sh 2922 drivers/md/raid5-cache.c sh->log_start == MaxSector) sh 2923 drivers/md/raid5-cache.c r5l_add_no_space_stripe(log, sh); sh 2925 drivers/md/raid5-cache.c if (sh->log_start == log->last_checkpoint) sh 2928 drivers/md/raid5-cache.c r5l_add_no_space_stripe(log, sh); sh 2930 drivers/md/raid5-cache.c ret = r5l_log_stripe(log, sh, pages, 0); sh 2933 drivers/md/raid5-cache.c list_add_tail(&sh->log_list, &log->no_mem_stripes); sh 10 drivers/md/raid5-log.h extern void r5l_stripe_write_finished(struct stripe_head *sh); sh 16 drivers/md/raid5-log.h r5c_try_caching_write(struct r5conf *conf, struct stripe_head *sh, sh 19 drivers/md/raid5-log.h r5c_finish_stripe_write_out(struct r5conf *conf, struct stripe_head *sh, sh 21 drivers/md/raid5-log.h extern void r5c_release_extra_page(struct stripe_head *sh); sh 22 drivers/md/raid5-log.h extern void r5c_use_extra_page(struct stripe_head *sh); sh 25 drivers/md/raid5-log.h struct stripe_head *sh, int disks); sh 26 drivers/md/raid5-log.h extern int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh); sh 27 drivers/md/raid5-log.h extern void r5c_make_stripe_write_out(struct stripe_head *sh); sh 38 drivers/md/raid5-log.h ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, sh 42 drivers/md/raid5-log.h extern int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh); sh 44 drivers/md/raid5-log.h extern void ppl_stripe_write_finished(struct stripe_head *sh); sh 60 drivers/md/raid5-log.h static inline int log_stripe(struct stripe_head *sh, struct stripe_head_state *s) sh 62 drivers/md/raid5-log.h struct r5conf *conf = sh->raid_conf; sh 65 drivers/md/raid5-log.h if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { sh 69 drivers/md/raid5-log.h return r5l_write_stripe(conf->log, sh); sh 70 drivers/md/raid5-log.h } else if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) { sh 72 drivers/md/raid5-log.h return r5c_cache_data(conf->log, sh); sh 75 drivers/md/raid5-log.h return ppl_write_stripe(conf, sh); sh 81 drivers/md/raid5-log.h static inline void log_stripe_write_finished(struct stripe_head *sh) sh 83 drivers/md/raid5-log.h struct r5conf *conf = sh->raid_conf; sh 86 drivers/md/raid5-log.h r5l_stripe_write_finished(sh); sh 88 drivers/md/raid5-log.h ppl_stripe_write_finished(sh); sh 158 drivers/md/raid5-ppl.c ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, sh 161 drivers/md/raid5-ppl.c int disks = sh->disks; sh 163 drivers/md/raid5-ppl.c int count = 0, pd_idx = sh->pd_idx, i; sh 166 drivers/md/raid5-ppl.c pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); sh 174 drivers/md/raid5-ppl.c if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { sh 180 drivers/md/raid5-ppl.c srcs[count++] = sh->dev[pd_idx].page; sh 181 drivers/md/raid5-ppl.c } else if (sh->reconstruct_state == reconstruct_state_drain_run) { sh 184 drivers/md/raid5-ppl.c struct r5dev *dev = &sh->dev[i]; sh 193 drivers/md/raid5-ppl.c NULL, sh, (void *) (srcs + sh->disks + 2)); sh 196 drivers/md/raid5-ppl.c tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE, sh 199 drivers/md/raid5-ppl.c tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE, sh 233 drivers/md/raid5-ppl.c struct stripe_head *sh) sh 266 drivers/md/raid5-ppl.c static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) sh 274 drivers/md/raid5-ppl.c struct r5conf *conf = sh->raid_conf; sh 276 drivers/md/raid5-ppl.c pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); sh 288 drivers/md/raid5-ppl.c io = ppl_new_iounit(log, sh); sh 298 drivers/md/raid5-ppl.c for (i = 0; i < sh->disks; i++) { sh 299 drivers/md/raid5-ppl.c struct r5dev *dev = &sh->dev[i]; sh 301 drivers/md/raid5-ppl.c if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) { sh 327 drivers/md/raid5-ppl.c if ((sh->sector == sh_last->sector + STRIPE_SECTORS) && sh 338 drivers/md/raid5-ppl.c e->parity_disk = cpu_to_le32(sh->pd_idx); sh 345 drivers/md/raid5-ppl.c if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) { sh 349 drivers/md/raid5-ppl.c page_address(sh->ppl_page), sh 353 drivers/md/raid5-ppl.c list_add_tail(&sh->log_list, &io->stripe_list); sh 355 drivers/md/raid5-ppl.c sh->ppl_io = io; sh 360 drivers/md/raid5-ppl.c int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh) sh 363 drivers/md/raid5-ppl.c struct ppl_io_unit *io = sh->ppl_io; sh 366 drivers/md/raid5-ppl.c if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page || sh 367 drivers/md/raid5-ppl.c !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || sh 368 drivers/md/raid5-ppl.c !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) { sh 369 drivers/md/raid5-ppl.c clear_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 373 drivers/md/raid5-ppl.c log = &ppl_conf->child_logs[sh->pd_idx]; sh 382 drivers/md/raid5-ppl.c set_bit(STRIPE_LOG_TRAPPED, &sh->state); sh 383 drivers/md/raid5-ppl.c clear_bit(STRIPE_DELAYED, &sh->state); sh 384 drivers/md/raid5-ppl.c atomic_inc(&sh->count); sh 386 drivers/md/raid5-ppl.c if (ppl_log_stripe(log, sh)) { sh 388 drivers/md/raid5-ppl.c list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes); sh 402 drivers/md/raid5-ppl.c struct stripe_head *sh, *next; sh 409 drivers/md/raid5-ppl.c list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { sh 410 drivers/md/raid5-ppl.c list_del_init(&sh->log_list); sh 412 drivers/md/raid5-ppl.c set_bit(STRIPE_HANDLE, &sh->state); sh 413 drivers/md/raid5-ppl.c raid5_release_stripe(sh); sh 435 drivers/md/raid5-ppl.c struct stripe_head *sh; sh 482 drivers/md/raid5-ppl.c list_for_each_entry(sh, &io->stripe_list, log_list) { sh 483 drivers/md/raid5-ppl.c for (i = 0; i < sh->disks; i++) { sh 484 drivers/md/raid5-ppl.c struct r5dev *dev = &sh->dev[i]; sh 493 drivers/md/raid5-ppl.c if (test_bit(STRIPE_FULL_WRITE, &sh->state)) sh 496 drivers/md/raid5-ppl.c if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { sh 505 drivers/md/raid5-ppl.c bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); sh 572 drivers/md/raid5-ppl.c struct stripe_head *sh; sh 574 drivers/md/raid5-ppl.c sh = list_first_entry(&ppl_conf->no_mem_stripes, sh 576 drivers/md/raid5-ppl.c list_del_init(&sh->log_list); sh 577 drivers/md/raid5-ppl.c set_bit(STRIPE_HANDLE, &sh->state); sh 578 drivers/md/raid5-ppl.c raid5_release_stripe(sh); sh 701 drivers/md/raid5-ppl.c void ppl_stripe_write_finished(struct stripe_head *sh) sh 705 drivers/md/raid5-ppl.c io = sh->ppl_io; sh 706 drivers/md/raid5-ppl.c sh->ppl_io = NULL; sh 856 drivers/md/raid5-ppl.c struct stripe_head sh; sh 944 drivers/md/raid5-ppl.c 0, &disk, &sh); sh 945 drivers/md/raid5-ppl.c BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); sh 946 drivers/md/raid5-ppl.c parity_rdev = conf->disks[sh.pd_idx].rdev; sh 112 drivers/md/raid5.c static inline int raid6_d0(struct stripe_head *sh) sh 114 drivers/md/raid5.c if (sh->ddf_layout) sh 118 drivers/md/raid5.c if (sh->qd_idx == sh->disks - 1) sh 121 drivers/md/raid5.c return sh->qd_idx + 1; sh 134 drivers/md/raid5.c static int raid6_idx_to_slot(int idx, struct stripe_head *sh, sh 139 drivers/md/raid5.c if (sh->ddf_layout) sh 141 drivers/md/raid5.c if (idx == sh->pd_idx) sh 143 drivers/md/raid5.c if (idx == sh->qd_idx) sh 145 drivers/md/raid5.c if (!sh->ddf_layout) sh 152 drivers/md/raid5.c static int stripe_operations_active(struct stripe_head *sh) sh 154 drivers/md/raid5.c return sh->check_state || sh->reconstruct_state || sh 155 drivers/md/raid5.c test_bit(STRIPE_BIOFILL_RUN, &sh->state) || sh 156 drivers/md/raid5.c test_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 159 drivers/md/raid5.c static bool stripe_is_lowprio(struct stripe_head *sh) sh 161 drivers/md/raid5.c return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || sh 162 drivers/md/raid5.c test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && sh 163 drivers/md/raid5.c !test_bit(STRIPE_R5C_CACHING, &sh->state); sh 166 drivers/md/raid5.c static void raid5_wakeup_stripe_thread(struct stripe_head *sh) sh 168 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 171 drivers/md/raid5.c int i, cpu = sh->cpu; sh 175 drivers/md/raid5.c sh->cpu = cpu; sh 178 drivers/md/raid5.c if (list_empty(&sh->lru)) { sh 181 drivers/md/raid5.c if (stripe_is_lowprio(sh)) sh 182 drivers/md/raid5.c list_add_tail(&sh->lru, &group->loprio_list); sh 184 drivers/md/raid5.c list_add_tail(&sh->lru, &group->handle_list); sh 186 drivers/md/raid5.c sh->group = group; sh 194 drivers/md/raid5.c group = conf->worker_groups + cpu_to_group(sh->cpu); sh 198 drivers/md/raid5.c queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); sh 205 drivers/md/raid5.c queue_work_on(sh->cpu, raid5_wq, sh 212 drivers/md/raid5.c static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, sh 218 drivers/md/raid5.c BUG_ON(!list_empty(&sh->lru)); sh 222 drivers/md/raid5.c for (i = sh->disks; i--; ) sh 223 drivers/md/raid5.c if (test_bit(R5_InJournal, &sh->dev[i].flags)) sh 232 drivers/md/raid5.c if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || sh 234 drivers/md/raid5.c !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { sh 235 drivers/md/raid5.c if (test_bit(STRIPE_R5C_CACHING, &sh->state)) sh 236 drivers/md/raid5.c r5c_make_stripe_write_out(sh); sh 237 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 240 drivers/md/raid5.c if (test_bit(STRIPE_HANDLE, &sh->state)) { sh 241 drivers/md/raid5.c if (test_bit(STRIPE_DELAYED, &sh->state) && sh 242 drivers/md/raid5.c !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 243 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->delayed_list); sh 244 drivers/md/raid5.c else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && sh 245 drivers/md/raid5.c sh->bm_seq - conf->seq_write > 0) sh 246 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->bitmap_list); sh 248 drivers/md/raid5.c clear_bit(STRIPE_DELAYED, &sh->state); sh 249 drivers/md/raid5.c clear_bit(STRIPE_BIT_DELAY, &sh->state); sh 251 drivers/md/raid5.c if (stripe_is_lowprio(sh)) sh 252 drivers/md/raid5.c list_add_tail(&sh->lru, sh 255 drivers/md/raid5.c list_add_tail(&sh->lru, sh 258 drivers/md/raid5.c raid5_wakeup_stripe_thread(sh); sh 264 drivers/md/raid5.c BUG_ON(stripe_operations_active(sh)); sh 265 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 270 drivers/md/raid5.c if (!test_bit(STRIPE_EXPANDING, &sh->state)) { sh 272 drivers/md/raid5.c list_add_tail(&sh->lru, temp_inactive_list); sh 274 drivers/md/raid5.c WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); sh 276 drivers/md/raid5.c list_add_tail(&sh->lru, temp_inactive_list); sh 279 drivers/md/raid5.c if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) sh 281 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) sh 283 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); sh 291 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); sh 297 drivers/md/raid5.c static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, sh 300 drivers/md/raid5.c if (atomic_dec_and_test(&sh->count)) sh 301 drivers/md/raid5.c do_release_stripe(conf, sh, temp_inactive_list); sh 357 drivers/md/raid5.c struct stripe_head *sh, *t; sh 363 drivers/md/raid5.c llist_for_each_entry_safe(sh, t, head, release_list) { sh 368 drivers/md/raid5.c clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); sh 374 drivers/md/raid5.c hash = sh->hash_lock_index; sh 375 drivers/md/raid5.c __release_stripe(conf, sh, &temp_inactive_list[hash]); sh 382 drivers/md/raid5.c void raid5_release_stripe(struct stripe_head *sh) sh 384 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 392 drivers/md/raid5.c if (atomic_add_unless(&sh->count, -1, 1)) sh 396 drivers/md/raid5.c test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) sh 398 drivers/md/raid5.c wakeup = llist_add(&sh->release_list, &conf->released_stripes); sh 404 drivers/md/raid5.c if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { sh 406 drivers/md/raid5.c hash = sh->hash_lock_index; sh 407 drivers/md/raid5.c do_release_stripe(conf, sh, &list); sh 413 drivers/md/raid5.c static inline void remove_hash(struct stripe_head *sh) sh 416 drivers/md/raid5.c (unsigned long long)sh->sector); sh 418 drivers/md/raid5.c hlist_del_init(&sh->hash); sh 421 drivers/md/raid5.c static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) sh 423 drivers/md/raid5.c struct hlist_head *hp = stripe_hash(conf, sh->sector); sh 426 drivers/md/raid5.c (unsigned long long)sh->sector); sh 428 drivers/md/raid5.c hlist_add_head(&sh->hash, hp); sh 434 drivers/md/raid5.c struct stripe_head *sh = NULL; sh 440 drivers/md/raid5.c sh = list_entry(first, struct stripe_head, lru); sh 442 drivers/md/raid5.c remove_hash(sh); sh 444 drivers/md/raid5.c BUG_ON(hash != sh->hash_lock_index); sh 448 drivers/md/raid5.c return sh; sh 451 drivers/md/raid5.c static void shrink_buffers(struct stripe_head *sh) sh 455 drivers/md/raid5.c int num = sh->raid_conf->pool_size; sh 458 drivers/md/raid5.c WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); sh 459 drivers/md/raid5.c p = sh->dev[i].page; sh 462 drivers/md/raid5.c sh->dev[i].page = NULL; sh 467 drivers/md/raid5.c static int grow_buffers(struct stripe_head *sh, gfp_t gfp) sh 470 drivers/md/raid5.c int num = sh->raid_conf->pool_size; sh 478 drivers/md/raid5.c sh->dev[i].page = page; sh 479 drivers/md/raid5.c sh->dev[i].orig_page = page; sh 486 drivers/md/raid5.c struct stripe_head *sh); sh 488 drivers/md/raid5.c static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) sh 490 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 493 drivers/md/raid5.c BUG_ON(atomic_read(&sh->count) != 0); sh 494 drivers/md/raid5.c BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); sh 495 drivers/md/raid5.c BUG_ON(stripe_operations_active(sh)); sh 496 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 502 drivers/md/raid5.c sh->generation = conf->generation - previous; sh 503 drivers/md/raid5.c sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; sh 504 drivers/md/raid5.c sh->sector = sector; sh 505 drivers/md/raid5.c stripe_set_idx(sector, conf, previous, sh); sh 506 drivers/md/raid5.c sh->state = 0; sh 508 drivers/md/raid5.c for (i = sh->disks; i--; ) { sh 509 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 514 drivers/md/raid5.c (unsigned long long)sh->sector, i, dev->toread, sh 520 drivers/md/raid5.c dev->sector = raid5_compute_blocknr(sh, i, previous); sh 524 drivers/md/raid5.c sh->overwrite_disks = 0; sh 525 drivers/md/raid5.c insert_hash(conf, sh); sh 526 drivers/md/raid5.c sh->cpu = smp_processor_id(); sh 527 drivers/md/raid5.c set_bit(STRIPE_BATCH_READY, &sh->state); sh 533 drivers/md/raid5.c struct stripe_head *sh; sh 536 drivers/md/raid5.c hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) sh 537 drivers/md/raid5.c if (sh->sector == sector && sh->generation == generation) sh 538 drivers/md/raid5.c return sh; sh 629 drivers/md/raid5.c struct stripe_head *sh; sh 641 drivers/md/raid5.c sh = __find_stripe(conf, sector, conf->generation - previous); sh 642 drivers/md/raid5.c if (!sh) { sh 644 drivers/md/raid5.c sh = get_free_stripe(conf, hash); sh 645 drivers/md/raid5.c if (!sh && !test_bit(R5_DID_ALLOC, sh 650 drivers/md/raid5.c if (noblock && sh == NULL) sh 654 drivers/md/raid5.c if (!sh) { sh 669 drivers/md/raid5.c init_stripe(sh, sector, previous); sh 670 drivers/md/raid5.c atomic_inc(&sh->count); sh 672 drivers/md/raid5.c } else if (!atomic_inc_not_zero(&sh->count)) { sh 674 drivers/md/raid5.c if (!atomic_read(&sh->count)) { sh 675 drivers/md/raid5.c if (!test_bit(STRIPE_HANDLE, &sh->state)) sh 677 drivers/md/raid5.c BUG_ON(list_empty(&sh->lru) && sh 678 drivers/md/raid5.c !test_bit(STRIPE_EXPANDING, &sh->state)); sh 682 drivers/md/raid5.c list_del_init(&sh->lru); sh 685 drivers/md/raid5.c if (sh->group) { sh 686 drivers/md/raid5.c sh->group->stripes_cnt--; sh 687 drivers/md/raid5.c sh->group = NULL; sh 690 drivers/md/raid5.c atomic_inc(&sh->count); sh 693 drivers/md/raid5.c } while (sh == NULL); sh 696 drivers/md/raid5.c return sh; sh 699 drivers/md/raid5.c static bool is_full_stripe_write(struct stripe_head *sh) sh 701 drivers/md/raid5.c BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); sh 702 drivers/md/raid5.c return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); sh 727 drivers/md/raid5.c static bool stripe_can_batch(struct stripe_head *sh) sh 729 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 733 drivers/md/raid5.c return test_bit(STRIPE_BATCH_READY, &sh->state) && sh 734 drivers/md/raid5.c !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && sh 735 drivers/md/raid5.c is_full_stripe_write(sh); sh 739 drivers/md/raid5.c static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) sh 748 drivers/md/raid5.c tmp_sec = sh->sector; sh 751 drivers/md/raid5.c head_sector = sh->sector - STRIPE_SECTORS; sh 784 drivers/md/raid5.c lock_two_stripes(head, sh); sh 786 drivers/md/raid5.c if (!stripe_can_batch(head) || !stripe_can_batch(sh)) sh 789 drivers/md/raid5.c if (sh->batch_head) sh 793 drivers/md/raid5.c while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) sh 795 drivers/md/raid5.c if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || sh 796 drivers/md/raid5.c bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) sh 813 drivers/md/raid5.c sh->batch_head = head->batch_head; sh 819 drivers/md/raid5.c list_add(&sh->batch_list, &head->batch_list); sh 823 drivers/md/raid5.c sh->batch_head = head->batch_head; sh 825 drivers/md/raid5.c list_add_tail(&sh->batch_list, &head->batch_list); sh 829 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 834 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { sh 835 drivers/md/raid5.c int seq = sh->bm_seq; sh 836 drivers/md/raid5.c if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && sh 837 drivers/md/raid5.c sh->batch_head->bm_seq > seq) sh 838 drivers/md/raid5.c seq = sh->batch_head->bm_seq; sh 839 drivers/md/raid5.c set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); sh 840 drivers/md/raid5.c sh->batch_head->bm_seq = seq; sh 843 drivers/md/raid5.c atomic_inc(&sh->count); sh 845 drivers/md/raid5.c unlock_two_stripes(head, sh); sh 853 drivers/md/raid5.c static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) sh 863 drivers/md/raid5.c if (sh->generation == conf->generation - 1) sh 979 drivers/md/raid5.c static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) sh 981 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 982 drivers/md/raid5.c int i, disks = sh->disks; sh 983 drivers/md/raid5.c struct stripe_head *head_sh = sh; sh 989 drivers/md/raid5.c if (log_stripe(sh, s) == 0) sh 1000 drivers/md/raid5.c sh = head_sh; sh 1001 drivers/md/raid5.c if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { sh 1003 drivers/md/raid5.c if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) sh 1005 drivers/md/raid5.c if (test_bit(R5_Discard, &sh->dev[i].flags)) sh 1007 drivers/md/raid5.c } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) sh 1010 drivers/md/raid5.c &sh->dev[i].flags)) { sh 1015 drivers/md/raid5.c if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) sh 1019 drivers/md/raid5.c bi = &sh->dev[i].req; sh 1020 drivers/md/raid5.c rbi = &sh->dev[i].rreq; /* For writing to replacement */ sh 1060 drivers/md/raid5.c int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, sh 1094 drivers/md/raid5.c set_bit(STRIPE_IO_STARTED, &sh->state); sh 1101 drivers/md/raid5.c bi->bi_private = sh; sh 1104 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, sh 1106 drivers/md/raid5.c atomic_inc(&sh->count); sh 1107 drivers/md/raid5.c if (sh != head_sh) sh 1109 drivers/md/raid5.c if (use_new_offset(conf, sh)) sh 1110 drivers/md/raid5.c bi->bi_iter.bi_sector = (sh->sector sh 1113 drivers/md/raid5.c bi->bi_iter.bi_sector = (sh->sector sh 1118 drivers/md/raid5.c if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) sh 1119 drivers/md/raid5.c WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); sh 1122 drivers/md/raid5.c test_bit(R5_InJournal, &sh->dev[i].flags)) sh 1128 drivers/md/raid5.c sh->dev[i].vec.bv_page = sh->dev[i].orig_page; sh 1130 drivers/md/raid5.c sh->dev[i].vec.bv_page = sh->dev[i].page; sh 1135 drivers/md/raid5.c bi->bi_write_hint = sh->dev[i].write_hint; sh 1137 drivers/md/raid5.c sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; sh 1145 drivers/md/raid5.c set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); sh 1150 drivers/md/raid5.c sh->dev[i].sector); sh 1161 drivers/md/raid5.c set_bit(STRIPE_IO_STARTED, &sh->state); sh 1167 drivers/md/raid5.c rbi->bi_private = sh; sh 1171 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, sh 1173 drivers/md/raid5.c atomic_inc(&sh->count); sh 1174 drivers/md/raid5.c if (sh != head_sh) sh 1176 drivers/md/raid5.c if (use_new_offset(conf, sh)) sh 1177 drivers/md/raid5.c rbi->bi_iter.bi_sector = (sh->sector sh 1180 drivers/md/raid5.c rbi->bi_iter.bi_sector = (sh->sector sh 1182 drivers/md/raid5.c if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) sh 1183 drivers/md/raid5.c WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); sh 1184 drivers/md/raid5.c sh->dev[i].rvec.bv_page = sh->dev[i].page; sh 1189 drivers/md/raid5.c rbi->bi_write_hint = sh->dev[i].write_hint; sh 1190 drivers/md/raid5.c sh->dev[i].write_hint = RWF_WRITE_LIFE_NOT_SET; sh 1200 drivers/md/raid5.c sh->dev[i].sector); sh 1208 drivers/md/raid5.c set_bit(STRIPE_DEGRADED, &sh->state); sh 1210 drivers/md/raid5.c bi->bi_opf, i, (unsigned long long)sh->sector); sh 1211 drivers/md/raid5.c clear_bit(R5_LOCKED, &sh->dev[i].flags); sh 1212 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 1217 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, struct stripe_head, sh 1219 drivers/md/raid5.c if (sh != head_sh) sh 1230 drivers/md/raid5.c struct stripe_head *sh, int no_skipcopy) sh 1268 drivers/md/raid5.c if (sh->raid_conf->skip_copy && sh 1293 drivers/md/raid5.c struct stripe_head *sh = stripe_head_ref; sh 1297 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1300 drivers/md/raid5.c for (i = sh->disks; i--; ) { sh 1301 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1322 drivers/md/raid5.c clear_bit(STRIPE_BIOFILL_RUN, &sh->state); sh 1324 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 1325 drivers/md/raid5.c raid5_release_stripe(sh); sh 1328 drivers/md/raid5.c static void ops_run_biofill(struct stripe_head *sh) sh 1334 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 1336 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1338 drivers/md/raid5.c for (i = sh->disks; i--; ) { sh 1339 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1342 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 1345 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 1349 drivers/md/raid5.c dev->sector, tx, sh, 0); sh 1355 drivers/md/raid5.c atomic_inc(&sh->count); sh 1356 drivers/md/raid5.c init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); sh 1360 drivers/md/raid5.c static void mark_target_uptodate(struct stripe_head *sh, int target) sh 1367 drivers/md/raid5.c tgt = &sh->dev[target]; sh 1375 drivers/md/raid5.c struct stripe_head *sh = stripe_head_ref; sh 1378 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1381 drivers/md/raid5.c mark_target_uptodate(sh, sh->ops.target); sh 1382 drivers/md/raid5.c mark_target_uptodate(sh, sh->ops.target2); sh 1384 drivers/md/raid5.c clear_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 1385 drivers/md/raid5.c if (sh->check_state == check_state_compute_run) sh 1386 drivers/md/raid5.c sh->check_state = check_state_compute_result; sh 1387 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 1388 drivers/md/raid5.c raid5_release_stripe(sh); sh 1398 drivers/md/raid5.c static addr_conv_t *to_addr_conv(struct stripe_head *sh, sh 1401 drivers/md/raid5.c return (void *) (to_addr_page(percpu, i) + sh->disks + 2); sh 1405 drivers/md/raid5.c ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) sh 1407 drivers/md/raid5.c int disks = sh->disks; sh 1409 drivers/md/raid5.c int target = sh->ops.target; sh 1410 drivers/md/raid5.c struct r5dev *tgt = &sh->dev[target]; sh 1417 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 1420 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, target); sh 1425 drivers/md/raid5.c xor_srcs[count++] = sh->dev[i].page; sh 1427 drivers/md/raid5.c atomic_inc(&sh->count); sh 1430 drivers/md/raid5.c ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); sh 1449 drivers/md/raid5.c struct stripe_head *sh, sh 1452 drivers/md/raid5.c int disks = sh->disks; sh 1453 drivers/md/raid5.c int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); sh 1454 drivers/md/raid5.c int d0_idx = raid6_d0(sh); sh 1464 drivers/md/raid5.c int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); sh 1465 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1467 drivers/md/raid5.c if (i == sh->qd_idx || i == sh->pd_idx || sh 1476 drivers/md/raid5.c srcs[slot] = sh->dev[i].orig_page; sh 1478 drivers/md/raid5.c srcs[slot] = sh->dev[i].page; sh 1487 drivers/md/raid5.c ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) sh 1489 drivers/md/raid5.c int disks = sh->disks; sh 1492 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 1500 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 1501 drivers/md/raid5.c if (sh->ops.target < 0) sh 1502 drivers/md/raid5.c target = sh->ops.target2; sh 1503 drivers/md/raid5.c else if (sh->ops.target2 < 0) sh 1504 drivers/md/raid5.c target = sh->ops.target; sh 1510 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, target); sh 1512 drivers/md/raid5.c tgt = &sh->dev[target]; sh 1516 drivers/md/raid5.c atomic_inc(&sh->count); sh 1519 drivers/md/raid5.c count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); sh 1523 drivers/md/raid5.c ops_complete_compute, sh, sh 1524 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1532 drivers/md/raid5.c blocks[count++] = sh->dev[i].page; sh 1536 drivers/md/raid5.c NULL, ops_complete_compute, sh, sh 1537 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1545 drivers/md/raid5.c ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) sh 1547 drivers/md/raid5.c int i, count, disks = sh->disks; sh 1548 drivers/md/raid5.c int syndrome_disks = sh->ddf_layout ? disks : disks-2; sh 1549 drivers/md/raid5.c int d0_idx = raid6_d0(sh); sh 1551 drivers/md/raid5.c int target = sh->ops.target; sh 1552 drivers/md/raid5.c int target2 = sh->ops.target2; sh 1553 drivers/md/raid5.c struct r5dev *tgt = &sh->dev[target]; sh 1554 drivers/md/raid5.c struct r5dev *tgt2 = &sh->dev[target2]; sh 1559 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 1561 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, target, target2); sh 1574 drivers/md/raid5.c int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); sh 1576 drivers/md/raid5.c blocks[slot] = sh->dev[i].page; sh 1589 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, faila, failb); sh 1591 drivers/md/raid5.c atomic_inc(&sh->count); sh 1598 drivers/md/raid5.c ops_complete_compute, sh, sh 1599 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1605 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 1617 drivers/md/raid5.c blocks[count++] = sh->dev[i].page; sh 1619 drivers/md/raid5.c dest = sh->dev[data_target].page; sh 1623 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1627 drivers/md/raid5.c count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); sh 1629 drivers/md/raid5.c ops_complete_compute, sh, sh 1630 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1636 drivers/md/raid5.c ops_complete_compute, sh, sh 1637 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 1654 drivers/md/raid5.c struct stripe_head *sh = stripe_head_ref; sh 1657 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1659 drivers/md/raid5.c if (r5c_is_writeback(sh->raid_conf->log)) sh 1664 drivers/md/raid5.c r5c_release_extra_page(sh); sh 1668 drivers/md/raid5.c ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, sh 1671 drivers/md/raid5.c int disks = sh->disks; sh 1673 drivers/md/raid5.c int count = 0, pd_idx = sh->pd_idx, i; sh 1677 drivers/md/raid5.c struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; sh 1679 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 1681 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1684 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1693 drivers/md/raid5.c ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); sh 1700 drivers/md/raid5.c ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, sh 1708 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1710 drivers/md/raid5.c count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); sh 1713 drivers/md/raid5.c ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); sh 1720 drivers/md/raid5.c ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) sh 1722 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 1723 drivers/md/raid5.c int disks = sh->disks; sh 1725 drivers/md/raid5.c struct stripe_head *head_sh = sh; sh 1728 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1734 drivers/md/raid5.c sh = head_sh; sh 1739 drivers/md/raid5.c dev = &sh->dev[i]; sh 1745 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 1748 drivers/md/raid5.c sh->overwrite_disks = 0; sh 1751 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 1764 drivers/md/raid5.c dev->sector, tx, sh, sh 1777 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, sh 1780 drivers/md/raid5.c if (sh == head_sh) sh 1792 drivers/md/raid5.c struct stripe_head *sh = stripe_head_ref; sh 1793 drivers/md/raid5.c int disks = sh->disks; sh 1794 drivers/md/raid5.c int pd_idx = sh->pd_idx; sh 1795 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 1800 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1803 drivers/md/raid5.c fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); sh 1804 drivers/md/raid5.c sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); sh 1805 drivers/md/raid5.c discard |= test_bit(R5_Discard, &sh->dev[i].flags); sh 1809 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1814 drivers/md/raid5.c if (test_bit(STRIPE_EXPAND_READY, &sh->state)) sh 1824 drivers/md/raid5.c if (sh->reconstruct_state == reconstruct_state_drain_run) sh 1825 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_drain_result; sh 1826 drivers/md/raid5.c else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) sh 1827 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_prexor_drain_result; sh 1829 drivers/md/raid5.c BUG_ON(sh->reconstruct_state != reconstruct_state_run); sh 1830 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_result; sh 1833 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 1834 drivers/md/raid5.c raid5_release_stripe(sh); sh 1838 drivers/md/raid5.c ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, sh 1841 drivers/md/raid5.c int disks = sh->disks; sh 1844 drivers/md/raid5.c int count, pd_idx = sh->pd_idx, i; sh 1849 drivers/md/raid5.c struct stripe_head *head_sh = sh; sh 1853 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1855 drivers/md/raid5.c for (i = 0; i < sh->disks; i++) { sh 1858 drivers/md/raid5.c if (!test_bit(R5_Discard, &sh->dev[i].flags)) sh 1861 drivers/md/raid5.c if (i >= sh->disks) { sh 1862 drivers/md/raid5.c atomic_inc(&sh->count); sh 1863 drivers/md/raid5.c set_bit(R5_Discard, &sh->dev[pd_idx].flags); sh 1864 drivers/md/raid5.c ops_complete_reconstruct(sh); sh 1875 drivers/md/raid5.c xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; sh 1877 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1883 drivers/md/raid5.c xor_dest = sh->dev[pd_idx].page; sh 1885 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 1897 drivers/md/raid5.c list_first_entry(&sh->batch_list, sh 1905 drivers/md/raid5.c to_addr_conv(sh, percpu, j)); sh 1909 drivers/md/raid5.c to_addr_conv(sh, percpu, j)); sh 1918 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, struct stripe_head, sh 1925 drivers/md/raid5.c ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, sh 1931 drivers/md/raid5.c struct stripe_head *head_sh = sh; sh 1936 drivers/md/raid5.c pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); sh 1938 drivers/md/raid5.c for (i = 0; i < sh->disks; i++) { sh 1939 drivers/md/raid5.c if (sh->pd_idx == i || sh->qd_idx == i) sh 1941 drivers/md/raid5.c if (!test_bit(R5_Discard, &sh->dev[i].flags)) sh 1944 drivers/md/raid5.c if (i >= sh->disks) { sh 1945 drivers/md/raid5.c atomic_inc(&sh->count); sh 1946 drivers/md/raid5.c set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); sh 1947 drivers/md/raid5.c set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); sh 1948 drivers/md/raid5.c ops_complete_reconstruct(sh); sh 1955 drivers/md/raid5.c if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { sh 1963 drivers/md/raid5.c count = set_syndrome_sources(blocks, sh, synflags); sh 1965 drivers/md/raid5.c list_first_entry(&sh->batch_list, sh 1971 drivers/md/raid5.c head_sh, to_addr_conv(sh, percpu, j)); sh 1974 drivers/md/raid5.c to_addr_conv(sh, percpu, j)); sh 1978 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, struct stripe_head, sh 1986 drivers/md/raid5.c struct stripe_head *sh = stripe_head_ref; sh 1989 drivers/md/raid5.c (unsigned long long)sh->sector); sh 1991 drivers/md/raid5.c sh->check_state = check_state_check_result; sh 1992 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 1993 drivers/md/raid5.c raid5_release_stripe(sh); sh 1996 drivers/md/raid5.c static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) sh 1998 drivers/md/raid5.c int disks = sh->disks; sh 1999 drivers/md/raid5.c int pd_idx = sh->pd_idx; sh 2000 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 2009 drivers/md/raid5.c (unsigned long long)sh->sector); sh 2011 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 2013 drivers/md/raid5.c xor_dest = sh->dev[pd_idx].page; sh 2018 drivers/md/raid5.c xor_srcs[count++] = sh->dev[i].page; sh 2022 drivers/md/raid5.c to_addr_conv(sh, percpu, 0)); sh 2024 drivers/md/raid5.c &sh->ops.zero_sum_result, &submit); sh 2026 drivers/md/raid5.c atomic_inc(&sh->count); sh 2027 drivers/md/raid5.c init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); sh 2031 drivers/md/raid5.c static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) sh 2038 drivers/md/raid5.c (unsigned long long)sh->sector, checkp); sh 2040 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 2041 drivers/md/raid5.c count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); sh 2045 drivers/md/raid5.c atomic_inc(&sh->count); sh 2047 drivers/md/raid5.c sh, to_addr_conv(sh, percpu, 0)); sh 2049 drivers/md/raid5.c &sh->ops.zero_sum_result, percpu->spare_page, &submit); sh 2052 drivers/md/raid5.c static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) sh 2054 drivers/md/raid5.c int overlap_clear = 0, i, disks = sh->disks; sh 2056 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 2064 drivers/md/raid5.c ops_run_biofill(sh); sh 2070 drivers/md/raid5.c tx = ops_run_compute5(sh, percpu); sh 2072 drivers/md/raid5.c if (sh->ops.target2 < 0 || sh->ops.target < 0) sh 2073 drivers/md/raid5.c tx = ops_run_compute6_1(sh, percpu); sh 2075 drivers/md/raid5.c tx = ops_run_compute6_2(sh, percpu); sh 2084 drivers/md/raid5.c tx = ops_run_prexor5(sh, percpu, tx); sh 2086 drivers/md/raid5.c tx = ops_run_prexor6(sh, percpu, tx); sh 2090 drivers/md/raid5.c tx = ops_run_partial_parity(sh, percpu, tx); sh 2093 drivers/md/raid5.c tx = ops_run_biodrain(sh, tx); sh 2099 drivers/md/raid5.c ops_run_reconstruct5(sh, percpu, tx); sh 2101 drivers/md/raid5.c ops_run_reconstruct6(sh, percpu, tx); sh 2105 drivers/md/raid5.c if (sh->check_state == check_state_run) sh 2106 drivers/md/raid5.c ops_run_check_p(sh, percpu); sh 2107 drivers/md/raid5.c else if (sh->check_state == check_state_run_q) sh 2108 drivers/md/raid5.c ops_run_check_pq(sh, percpu, 0); sh 2109 drivers/md/raid5.c else if (sh->check_state == check_state_run_pq) sh 2110 drivers/md/raid5.c ops_run_check_pq(sh, percpu, 1); sh 2115 drivers/md/raid5.c if (overlap_clear && !sh->batch_head) sh 2117 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 2119 drivers/md/raid5.c wake_up(&sh->raid_conf->wait_for_overlap); sh 2124 drivers/md/raid5.c static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) sh 2126 drivers/md/raid5.c if (sh->ppl_page) sh 2127 drivers/md/raid5.c __free_page(sh->ppl_page); sh 2128 drivers/md/raid5.c kmem_cache_free(sc, sh); sh 2134 drivers/md/raid5.c struct stripe_head *sh; sh 2137 drivers/md/raid5.c sh = kmem_cache_zalloc(sc, gfp); sh 2138 drivers/md/raid5.c if (sh) { sh 2139 drivers/md/raid5.c spin_lock_init(&sh->stripe_lock); sh 2140 drivers/md/raid5.c spin_lock_init(&sh->batch_lock); sh 2141 drivers/md/raid5.c INIT_LIST_HEAD(&sh->batch_list); sh 2142 drivers/md/raid5.c INIT_LIST_HEAD(&sh->lru); sh 2143 drivers/md/raid5.c INIT_LIST_HEAD(&sh->r5c); sh 2144 drivers/md/raid5.c INIT_LIST_HEAD(&sh->log_list); sh 2145 drivers/md/raid5.c atomic_set(&sh->count, 1); sh 2146 drivers/md/raid5.c sh->raid_conf = conf; sh 2147 drivers/md/raid5.c sh->log_start = MaxSector; sh 2149 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 2156 drivers/md/raid5.c sh->ppl_page = alloc_page(gfp); sh 2157 drivers/md/raid5.c if (!sh->ppl_page) { sh 2158 drivers/md/raid5.c free_stripe(sc, sh); sh 2159 drivers/md/raid5.c sh = NULL; sh 2163 drivers/md/raid5.c return sh; sh 2167 drivers/md/raid5.c struct stripe_head *sh; sh 2169 drivers/md/raid5.c sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); sh 2170 drivers/md/raid5.c if (!sh) sh 2173 drivers/md/raid5.c if (grow_buffers(sh, gfp)) { sh 2174 drivers/md/raid5.c shrink_buffers(sh); sh 2175 drivers/md/raid5.c free_stripe(conf->slab_cache, sh); sh 2178 drivers/md/raid5.c sh->hash_lock_index = sh 2183 drivers/md/raid5.c raid5_release_stripe(sh); sh 2435 drivers/md/raid5.c struct stripe_head *sh; sh 2439 drivers/md/raid5.c sh = get_free_stripe(conf, hash); sh 2441 drivers/md/raid5.c if (!sh) sh 2443 drivers/md/raid5.c BUG_ON(atomic_read(&sh->count)); sh 2444 drivers/md/raid5.c shrink_buffers(sh); sh 2445 drivers/md/raid5.c free_stripe(conf->slab_cache, sh); sh 2463 drivers/md/raid5.c struct stripe_head *sh = bi->bi_private; sh 2464 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 2465 drivers/md/raid5.c int disks = sh->disks, i; sh 2471 drivers/md/raid5.c if (bi == &sh->dev[i].req) sh 2475 drivers/md/raid5.c (unsigned long long)sh->sector, i, atomic_read(&sh->count), sh 2482 drivers/md/raid5.c if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) sh 2492 drivers/md/raid5.c if (use_new_offset(conf, sh)) sh 2493 drivers/md/raid5.c s = sh->sector + rdev->new_data_offset; sh 2495 drivers/md/raid5.c s = sh->sector + rdev->data_offset; sh 2497 drivers/md/raid5.c set_bit(R5_UPTODATE, &sh->dev[i].flags); sh 2498 drivers/md/raid5.c if (test_bit(R5_ReadError, &sh->dev[i].flags)) { sh 2509 drivers/md/raid5.c clear_bit(R5_ReadError, &sh->dev[i].flags); sh 2510 drivers/md/raid5.c clear_bit(R5_ReWrite, &sh->dev[i].flags); sh 2511 drivers/md/raid5.c } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) sh 2512 drivers/md/raid5.c clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); sh 2514 drivers/md/raid5.c if (test_bit(R5_InJournal, &sh->dev[i].flags)) sh 2519 drivers/md/raid5.c set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); sh 2528 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[i].flags); sh 2531 drivers/md/raid5.c if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) sh 2544 drivers/md/raid5.c } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { sh 2565 drivers/md/raid5.c && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) sh 2568 drivers/md/raid5.c if (sh->qd_idx >= 0 && sh->pd_idx == i) sh 2569 drivers/md/raid5.c set_bit(R5_ReadError, &sh->dev[i].flags); sh 2570 drivers/md/raid5.c else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { sh 2571 drivers/md/raid5.c set_bit(R5_ReadError, &sh->dev[i].flags); sh 2572 drivers/md/raid5.c clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); sh 2574 drivers/md/raid5.c set_bit(R5_ReadNoMerge, &sh->dev[i].flags); sh 2576 drivers/md/raid5.c clear_bit(R5_ReadError, &sh->dev[i].flags); sh 2577 drivers/md/raid5.c clear_bit(R5_ReWrite, &sh->dev[i].flags); sh 2581 drivers/md/raid5.c rdev, sh->sector, STRIPE_SECTORS, 0))) sh 2587 drivers/md/raid5.c clear_bit(R5_LOCKED, &sh->dev[i].flags); sh 2588 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 2589 drivers/md/raid5.c raid5_release_stripe(sh); sh 2594 drivers/md/raid5.c struct stripe_head *sh = bi->bi_private; sh 2595 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 2596 drivers/md/raid5.c int disks = sh->disks, i; sh 2603 drivers/md/raid5.c if (bi == &sh->dev[i].req) { sh 2607 drivers/md/raid5.c if (bi == &sh->dev[i].rreq) { sh 2621 drivers/md/raid5.c (unsigned long long)sh->sector, i, atomic_read(&sh->count), sh 2632 drivers/md/raid5.c else if (is_badblock(rdev, sh->sector, sh 2635 drivers/md/raid5.c set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); sh 2638 drivers/md/raid5.c set_bit(STRIPE_DEGRADED, &sh->state); sh 2640 drivers/md/raid5.c set_bit(R5_WriteError, &sh->dev[i].flags); sh 2644 drivers/md/raid5.c } else if (is_badblock(rdev, sh->sector, sh 2647 drivers/md/raid5.c set_bit(R5_MadeGood, &sh->dev[i].flags); sh 2648 drivers/md/raid5.c if (test_bit(R5_ReadError, &sh->dev[i].flags)) sh 2653 drivers/md/raid5.c set_bit(R5_ReWrite, &sh->dev[i].flags); sh 2658 drivers/md/raid5.c if (sh->batch_head && bi->bi_status && !replacement) sh 2659 drivers/md/raid5.c set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); sh 2662 drivers/md/raid5.c if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) sh 2663 drivers/md/raid5.c clear_bit(R5_LOCKED, &sh->dev[i].flags); sh 2664 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 2665 drivers/md/raid5.c raid5_release_stripe(sh); sh 2667 drivers/md/raid5.c if (sh->batch_head && sh != sh->batch_head) sh 2668 drivers/md/raid5.c raid5_release_stripe(sh->batch_head); sh 2715 drivers/md/raid5.c struct stripe_head *sh) sh 2903 drivers/md/raid5.c if (sh) { sh 2904 drivers/md/raid5.c sh->pd_idx = pd_idx; sh 2905 drivers/md/raid5.c sh->qd_idx = qd_idx; sh 2906 drivers/md/raid5.c sh->ddf_layout = ddf_layout; sh 2915 drivers/md/raid5.c sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) sh 2917 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 2918 drivers/md/raid5.c int raid_disks = sh->disks; sh 2920 drivers/md/raid5.c sector_t new_sector = sh->sector, check; sh 2935 drivers/md/raid5.c if (i == sh->pd_idx) sh 2943 drivers/md/raid5.c if (i > sh->pd_idx) sh 2948 drivers/md/raid5.c if (i < sh->pd_idx) sh 2950 drivers/md/raid5.c i -= (sh->pd_idx + 1); sh 2962 drivers/md/raid5.c if (i == sh->qd_idx) sh 2969 drivers/md/raid5.c if (sh->pd_idx == raid_disks-1) sh 2971 drivers/md/raid5.c else if (i > sh->pd_idx) sh 2976 drivers/md/raid5.c if (sh->pd_idx == raid_disks-1) sh 2980 drivers/md/raid5.c if (i < sh->pd_idx) sh 2982 drivers/md/raid5.c i -= (sh->pd_idx + 2); sh 2992 drivers/md/raid5.c if (sh->pd_idx == 0) sh 2996 drivers/md/raid5.c if (i < sh->pd_idx) sh 2998 drivers/md/raid5.c i -= (sh->pd_idx + 1); sh 3003 drivers/md/raid5.c if (i > sh->pd_idx) sh 3008 drivers/md/raid5.c if (i < sh->pd_idx) sh 3010 drivers/md/raid5.c i -= (sh->pd_idx + 1); sh 3026 drivers/md/raid5.c if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx sh 3027 drivers/md/raid5.c || sh2.qd_idx != sh->qd_idx) { sh 3092 drivers/md/raid5.c schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, sh 3095 drivers/md/raid5.c int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; sh 3096 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 3106 drivers/md/raid5.c r5c_release_extra_page(sh); sh 3109 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3130 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_drain_run; sh 3133 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_run; sh 3138 drivers/md/raid5.c if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) sh 3141 drivers/md/raid5.c BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || sh 3142 drivers/md/raid5.c test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); sh 3144 drivers/md/raid5.c (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || sh 3145 drivers/md/raid5.c test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); sh 3148 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3167 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_prexor_drain_run; sh 3176 drivers/md/raid5.c set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); sh 3177 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); sh 3181 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 3182 drivers/md/raid5.c struct r5dev *dev = &sh->dev[qd_idx]; sh 3189 drivers/md/raid5.c if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && sh 3191 drivers/md/raid5.c !test_bit(STRIPE_FULL_WRITE, &sh->state) && sh 3192 drivers/md/raid5.c test_bit(R5_Insync, &sh->dev[pd_idx].flags)) sh 3196 drivers/md/raid5.c __func__, (unsigned long long)sh->sector, sh 3205 drivers/md/raid5.c static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, sh 3209 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 3214 drivers/md/raid5.c (unsigned long long)sh->sector); sh 3216 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 3217 drivers/md/raid5.c sh->dev[dd_idx].write_hint = bi->bi_write_hint; sh 3219 drivers/md/raid5.c if (sh->batch_head) sh 3222 drivers/md/raid5.c bip = &sh->dev[dd_idx].towrite; sh 3226 drivers/md/raid5.c bip = &sh->dev[dd_idx].toread; sh 3249 drivers/md/raid5.c for (i = 0; i < sh->disks; i++) { sh 3250 drivers/md/raid5.c if (i != sh->pd_idx && sh 3251 drivers/md/raid5.c (i == dd_idx || sh->dev[i].towrite)) { sh 3252 drivers/md/raid5.c sector = sh->dev[i].sector; sh 3266 drivers/md/raid5.c clear_bit(STRIPE_BATCH_READY, &sh->state); sh 3277 drivers/md/raid5.c sector_t sector = sh->dev[dd_idx].sector; sh 3278 drivers/md/raid5.c for (bi=sh->dev[dd_idx].towrite; sh 3279 drivers/md/raid5.c sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && sh 3281 drivers/md/raid5.c bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { sh 3285 drivers/md/raid5.c if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) sh 3286 drivers/md/raid5.c if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) sh 3287 drivers/md/raid5.c sh->overwrite_disks++; sh 3292 drivers/md/raid5.c (unsigned long long)sh->sector, dd_idx); sh 3307 drivers/md/raid5.c set_bit(STRIPE_BITMAP_PENDING, &sh->state); sh 3308 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 3309 drivers/md/raid5.c md_bitmap_startwrite(conf->mddev->bitmap, sh->sector, sh 3311 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 3312 drivers/md/raid5.c clear_bit(STRIPE_BITMAP_PENDING, &sh->state); sh 3313 drivers/md/raid5.c if (!sh->batch_head) { sh 3314 drivers/md/raid5.c sh->bm_seq = conf->seq_flush+1; sh 3315 drivers/md/raid5.c set_bit(STRIPE_BIT_DELAY, &sh->state); sh 3318 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 3320 drivers/md/raid5.c if (stripe_can_batch(sh)) sh 3321 drivers/md/raid5.c stripe_add_to_batch_list(conf, sh); sh 3325 drivers/md/raid5.c set_bit(R5_Overlap, &sh->dev[dd_idx].flags); sh 3326 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 3333 drivers/md/raid5.c struct stripe_head *sh) sh 3345 drivers/md/raid5.c &dd_idx, sh); sh 3349 drivers/md/raid5.c handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh 3353 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 3358 drivers/md/raid5.c if (test_bit(R5_ReadError, &sh->dev[i].flags)) { sh 3371 drivers/md/raid5.c sh->sector, sh 3377 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 3379 drivers/md/raid5.c bi = sh->dev[i].towrite; sh 3380 drivers/md/raid5.c sh->dev[i].towrite = NULL; sh 3381 drivers/md/raid5.c sh->overwrite_disks = 0; sh 3382 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 3386 drivers/md/raid5.c log_stripe_write_finished(sh); sh 3388 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) sh 3392 drivers/md/raid5.c sh->dev[i].sector + STRIPE_SECTORS) { sh 3393 drivers/md/raid5.c struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); sh 3400 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, sh 3404 drivers/md/raid5.c bi = sh->dev[i].written; sh 3405 drivers/md/raid5.c sh->dev[i].written = NULL; sh 3406 drivers/md/raid5.c if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { sh 3407 drivers/md/raid5.c WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); sh 3408 drivers/md/raid5.c sh->dev[i].page = sh->dev[i].orig_page; sh 3413 drivers/md/raid5.c sh->dev[i].sector + STRIPE_SECTORS) { sh 3414 drivers/md/raid5.c struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); sh 3424 drivers/md/raid5.c if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && sh 3426 drivers/md/raid5.c (!test_bit(R5_Insync, &sh->dev[i].flags) || sh 3427 drivers/md/raid5.c test_bit(R5_ReadError, &sh->dev[i].flags))) { sh 3428 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 3429 drivers/md/raid5.c bi = sh->dev[i].toread; sh 3430 drivers/md/raid5.c sh->dev[i].toread = NULL; sh 3431 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 3432 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) sh 3437 drivers/md/raid5.c sh->dev[i].sector + STRIPE_SECTORS) { sh 3439 drivers/md/raid5.c r5_next_bio(bi, sh->dev[i].sector); sh 3446 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, sh 3451 drivers/md/raid5.c clear_bit(R5_LOCKED, &sh->dev[i].flags); sh 3456 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) sh 3462 drivers/md/raid5.c handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, sh 3468 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 3469 drivers/md/raid5.c clear_bit(STRIPE_SYNCING, &sh->state); sh 3470 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) sh 3491 drivers/md/raid5.c && !rdev_set_badblocks(rdev, sh->sector, sh 3498 drivers/md/raid5.c && !rdev_set_badblocks(rdev, sh->sector, sh 3510 drivers/md/raid5.c static int want_replace(struct stripe_head *sh, int disk_idx) sh 3516 drivers/md/raid5.c rdev = rcu_dereference(sh->raid_conf->disks[disk_idx].replacement); sh 3520 drivers/md/raid5.c && (rdev->recovery_offset <= sh->sector sh 3521 drivers/md/raid5.c || rdev->mddev->recovery_cp <= sh->sector)) sh 3527 drivers/md/raid5.c static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, sh 3530 drivers/md/raid5.c struct r5dev *dev = &sh->dev[disk_idx]; sh 3531 drivers/md/raid5.c struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], sh 3532 drivers/md/raid5.c &sh->dev[s->failed_num[1]] }; sh 3549 drivers/md/raid5.c (s->replacing && want_replace(sh, disk_idx))) sh 3574 drivers/md/raid5.c !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 3602 drivers/md/raid5.c if (sh->raid_conf->level != 6 && sh 3603 drivers/md/raid5.c sh->sector < sh->raid_conf->mddev->recovery_cp) sh 3607 drivers/md/raid5.c if (s->failed_num[i] != sh->pd_idx && sh 3608 drivers/md/raid5.c s->failed_num[i] != sh->qd_idx && sh 3623 drivers/md/raid5.c static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, sh 3626 drivers/md/raid5.c struct r5dev *dev = &sh->dev[disk_idx]; sh 3629 drivers/md/raid5.c if (need_this_block(sh, s, disk_idx, disks)) { sh 3635 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 3647 drivers/md/raid5.c ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || sh 3654 drivers/md/raid5.c (unsigned long long)sh->sector, disk_idx); sh 3655 drivers/md/raid5.c set_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 3658 drivers/md/raid5.c sh->ops.target = disk_idx; sh 3659 drivers/md/raid5.c sh->ops.target2 = -1; /* no 2nd target */ sh 3678 drivers/md/raid5.c &sh->dev[other].flags)) sh 3683 drivers/md/raid5.c (unsigned long long)sh->sector, sh 3685 drivers/md/raid5.c set_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 3687 drivers/md/raid5.c set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); sh 3688 drivers/md/raid5.c set_bit(R5_Wantcompute, &sh->dev[other].flags); sh 3689 drivers/md/raid5.c sh->ops.target = disk_idx; sh 3690 drivers/md/raid5.c sh->ops.target2 = other; sh 3709 drivers/md/raid5.c static void handle_stripe_fill(struct stripe_head *sh, sh 3719 drivers/md/raid5.c if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && sh 3720 drivers/md/raid5.c !sh->reconstruct_state) { sh 3730 drivers/md/raid5.c if (test_bit(STRIPE_R5C_CACHING, &sh->state)) sh 3731 drivers/md/raid5.c r5c_make_stripe_write_out(sh); sh 3736 drivers/md/raid5.c if (fetch_block(sh, s, i, disks)) sh 3740 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 3751 drivers/md/raid5.c struct stripe_head *sh, int disks) sh 3756 drivers/md/raid5.c struct stripe_head *head_sh = sh; sh 3760 drivers/md/raid5.c if (sh->dev[i].written) { sh 3761 drivers/md/raid5.c dev = &sh->dev[i]; sh 3787 drivers/md/raid5.c md_bitmap_endwrite(conf->mddev->bitmap, sh->sector, sh 3789 drivers/md/raid5.c !test_bit(STRIPE_DEGRADED, &sh->state), sh 3792 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, sh 3795 drivers/md/raid5.c if (sh != head_sh) { sh 3796 drivers/md/raid5.c dev = &sh->dev[i]; sh 3800 drivers/md/raid5.c sh = head_sh; sh 3801 drivers/md/raid5.c dev = &sh->dev[i]; sh 3806 drivers/md/raid5.c log_stripe_write_finished(sh); sh 3809 drivers/md/raid5.c test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { sh 3811 drivers/md/raid5.c clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); sh 3812 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); sh 3813 drivers/md/raid5.c if (sh->qd_idx >= 0) { sh 3814 drivers/md/raid5.c clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); sh 3815 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); sh 3818 drivers/md/raid5.c clear_bit(STRIPE_DISCARD, &sh->state); sh 3825 drivers/md/raid5.c hash = sh->hash_lock_index; sh 3827 drivers/md/raid5.c remove_hash(sh); sh 3830 drivers/md/raid5.c sh = list_first_entry(&sh->batch_list, sh 3832 drivers/md/raid5.c if (sh != head_sh) sh 3835 drivers/md/raid5.c sh = head_sh; sh 3837 drivers/md/raid5.c if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) sh 3838 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 3842 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) sh 3866 drivers/md/raid5.c struct stripe_head *sh, sh 3881 drivers/md/raid5.c (recovery_cp < MaxSector && sh->sector >= recovery_cp && sh 3889 drivers/md/raid5.c (unsigned long long)sh->sector); sh 3892 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3894 drivers/md/raid5.c i == sh->pd_idx || i == sh->qd_idx || sh 3906 drivers/md/raid5.c i != sh->pd_idx && i != sh->qd_idx && sh 3918 drivers/md/raid5.c (unsigned long long)sh->sector, sh->state, rmw, rcw); sh 3919 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 3925 drivers/md/raid5.c (unsigned long long)sh->sector, rmw); sh 3927 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3930 drivers/md/raid5.c !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { sh 3945 drivers/md/raid5.c r5c_use_extra_page(sh); sh 3950 drivers/md/raid5.c set_bit(STRIPE_DELAYED, &sh->state); sh 3957 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3959 drivers/md/raid5.c i == sh->pd_idx || i == sh->qd_idx || sh 3966 drivers/md/raid5.c &sh->state)) { sh 3973 drivers/md/raid5.c set_bit(STRIPE_DELAYED, &sh->state); sh 3974 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 3984 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 3986 drivers/md/raid5.c i != sh->pd_idx && i != sh->qd_idx && sh 3993 drivers/md/raid5.c &sh->state)) { sh 4001 drivers/md/raid5.c set_bit(STRIPE_DELAYED, &sh->state); sh 4002 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4008 drivers/md/raid5.c (unsigned long long)sh->sector, sh 4009 drivers/md/raid5.c rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); sh 4013 drivers/md/raid5.c !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 4014 drivers/md/raid5.c set_bit(STRIPE_DELAYED, &sh->state); sh 4026 drivers/md/raid5.c if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && sh 4028 drivers/md/raid5.c !test_bit(STRIPE_BIT_DELAY, &sh->state))) sh 4029 drivers/md/raid5.c schedule_reconstruction(sh, s, rcw == 0, 0); sh 4033 drivers/md/raid5.c static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, sh 4038 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 4039 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4041 drivers/md/raid5.c switch (sh->check_state) { sh 4046 drivers/md/raid5.c sh->check_state = check_state_run; sh 4048 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); sh 4052 drivers/md/raid5.c dev = &sh->dev[s->failed_num[0]]; sh 4055 drivers/md/raid5.c sh->check_state = check_state_idle; sh 4057 drivers/md/raid5.c dev = &sh->dev[sh->pd_idx]; sh 4060 drivers/md/raid5.c if (test_bit(STRIPE_INSYNC, &sh->state)) sh 4071 drivers/md/raid5.c clear_bit(STRIPE_DEGRADED, &sh->state); sh 4072 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4077 drivers/md/raid5.c sh->check_state = check_state_idle; sh 4089 drivers/md/raid5.c if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) sh 4093 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4098 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4101 drivers/md/raid5.c (unsigned long long) sh->sector, sh 4102 drivers/md/raid5.c (unsigned long long) sh->sector + sh 4105 drivers/md/raid5.c sh->check_state = check_state_compute_run; sh 4106 drivers/md/raid5.c set_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 4109 drivers/md/raid5.c &sh->dev[sh->pd_idx].flags); sh 4110 drivers/md/raid5.c sh->ops.target = sh->pd_idx; sh 4111 drivers/md/raid5.c sh->ops.target2 = -1; sh 4120 drivers/md/raid5.c __func__, sh->check_state, sh 4121 drivers/md/raid5.c (unsigned long long) sh->sector); sh 4126 drivers/md/raid5.c static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, sh 4130 drivers/md/raid5.c int pd_idx = sh->pd_idx; sh 4131 drivers/md/raid5.c int qd_idx = sh->qd_idx; sh 4134 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 4135 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4145 drivers/md/raid5.c switch (sh->check_state) { sh 4153 drivers/md/raid5.c sh->check_state = check_state_run; sh 4159 drivers/md/raid5.c if (sh->check_state == check_state_run) sh 4160 drivers/md/raid5.c sh->check_state = check_state_run_pq; sh 4162 drivers/md/raid5.c sh->check_state = check_state_run_q; sh 4166 drivers/md/raid5.c sh->ops.zero_sum_result = 0; sh 4168 drivers/md/raid5.c if (sh->check_state == check_state_run) { sh 4170 drivers/md/raid5.c clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); sh 4173 drivers/md/raid5.c if (sh->check_state >= check_state_run && sh 4174 drivers/md/raid5.c sh->check_state <= check_state_run_pq) { sh 4186 drivers/md/raid5.c sh->check_state = check_state_idle; sh 4189 drivers/md/raid5.c if (test_bit(STRIPE_INSYNC, &sh->state)) sh 4197 drivers/md/raid5.c dev = &sh->dev[s->failed_num[1]]; sh 4203 drivers/md/raid5.c dev = &sh->dev[s->failed_num[0]]; sh 4208 drivers/md/raid5.c if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { sh 4209 drivers/md/raid5.c dev = &sh->dev[pd_idx]; sh 4214 drivers/md/raid5.c if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { sh 4215 drivers/md/raid5.c dev = &sh->dev[qd_idx]; sh 4223 drivers/md/raid5.c dev - (struct r5dev *) &sh->dev)) { sh 4228 drivers/md/raid5.c clear_bit(STRIPE_DEGRADED, &sh->state); sh 4230 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4237 drivers/md/raid5.c sh->check_state = check_state_idle; sh 4243 drivers/md/raid5.c if (sh->ops.zero_sum_result == 0) { sh 4246 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4252 drivers/md/raid5.c sh->check_state = check_state_compute_result; sh 4263 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4266 drivers/md/raid5.c (unsigned long long) sh->sector, sh 4267 drivers/md/raid5.c (unsigned long long) sh->sector + sh 4270 drivers/md/raid5.c int *target = &sh->ops.target; sh 4272 drivers/md/raid5.c sh->ops.target = -1; sh 4273 drivers/md/raid5.c sh->ops.target2 = -1; sh 4274 drivers/md/raid5.c sh->check_state = check_state_compute_run; sh 4275 drivers/md/raid5.c set_bit(STRIPE_COMPUTE_RUN, &sh->state); sh 4277 drivers/md/raid5.c if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { sh 4279 drivers/md/raid5.c &sh->dev[pd_idx].flags); sh 4281 drivers/md/raid5.c target = &sh->ops.target2; sh 4284 drivers/md/raid5.c if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { sh 4286 drivers/md/raid5.c &sh->dev[qd_idx].flags); sh 4297 drivers/md/raid5.c __func__, sh->check_state, sh 4298 drivers/md/raid5.c (unsigned long long) sh->sector); sh 4303 drivers/md/raid5.c static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) sh 4311 drivers/md/raid5.c BUG_ON(sh->batch_head); sh 4312 drivers/md/raid5.c clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); sh 4313 drivers/md/raid5.c for (i = 0; i < sh->disks; i++) sh 4314 drivers/md/raid5.c if (i != sh->pd_idx && i != sh->qd_idx) { sh 4319 drivers/md/raid5.c sector_t bn = raid5_compute_blocknr(sh, i, 1); sh 4339 drivers/md/raid5.c sh->dev[i].page, 0, 0, STRIPE_SIZE, sh 4374 drivers/md/raid5.c static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) sh 4376 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 4377 drivers/md/raid5.c int disks = sh->disks; sh 4384 drivers/md/raid5.c s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; sh 4385 drivers/md/raid5.c s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; sh 4398 drivers/md/raid5.c dev = &sh->dev[i]; sh 4409 drivers/md/raid5.c !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) sh 4438 drivers/md/raid5.c rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && sh 4439 drivers/md/raid5.c !is_badblock(rdev, sh->sector, STRIPE_SECTORS, sh 4453 drivers/md/raid5.c is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, sh 4480 drivers/md/raid5.c else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) sh 4550 drivers/md/raid5.c if (test_bit(STRIPE_SYNCING, &sh->state)) { sh 4560 drivers/md/raid5.c sh->sector >= conf->mddev->recovery_cp || sh 4569 drivers/md/raid5.c static int clear_batch_ready(struct stripe_head *sh) sh 4576 drivers/md/raid5.c if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) sh 4577 drivers/md/raid5.c return (sh->batch_head && sh->batch_head != sh); sh 4578 drivers/md/raid5.c spin_lock(&sh->stripe_lock); sh 4579 drivers/md/raid5.c if (!sh->batch_head) { sh 4580 drivers/md/raid5.c spin_unlock(&sh->stripe_lock); sh 4588 drivers/md/raid5.c if (sh->batch_head != sh) { sh 4589 drivers/md/raid5.c spin_unlock(&sh->stripe_lock); sh 4592 drivers/md/raid5.c spin_lock(&sh->batch_lock); sh 4593 drivers/md/raid5.c list_for_each_entry(tmp, &sh->batch_list, batch_list) sh 4595 drivers/md/raid5.c spin_unlock(&sh->batch_lock); sh 4596 drivers/md/raid5.c spin_unlock(&sh->stripe_lock); sh 4608 drivers/md/raid5.c struct stripe_head *sh, *next; sh 4612 drivers/md/raid5.c list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { sh 4614 drivers/md/raid5.c list_del_init(&sh->batch_list); sh 4616 drivers/md/raid5.c WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | sh 4628 drivers/md/raid5.c "stripe state: %lx\n", sh->state); sh 4633 drivers/md/raid5.c set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | sh 4639 drivers/md/raid5.c sh->check_state = head_sh->check_state; sh 4640 drivers/md/raid5.c sh->reconstruct_state = head_sh->reconstruct_state; sh 4641 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 4642 drivers/md/raid5.c sh->batch_head = NULL; sh 4643 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 4644 drivers/md/raid5.c for (i = 0; i < sh->disks; i++) { sh 4645 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) sh 4647 drivers/md/raid5.c sh->dev[i].flags = head_sh->dev[i].flags & sh 4651 drivers/md/raid5.c sh->state & handle_flags) sh 4652 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4653 drivers/md/raid5.c raid5_release_stripe(sh); sh 4668 drivers/md/raid5.c static void handle_stripe(struct stripe_head *sh) sh 4671 drivers/md/raid5.c struct r5conf *conf = sh->raid_conf; sh 4674 drivers/md/raid5.c int disks = sh->disks; sh 4677 drivers/md/raid5.c clear_bit(STRIPE_HANDLE, &sh->state); sh 4678 drivers/md/raid5.c if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { sh 4681 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4685 drivers/md/raid5.c if (clear_batch_ready(sh) ) { sh 4686 drivers/md/raid5.c clear_bit_unlock(STRIPE_ACTIVE, &sh->state); sh 4690 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) sh 4691 drivers/md/raid5.c break_stripe_batch_list(sh, 0); sh 4693 drivers/md/raid5.c if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { sh 4694 drivers/md/raid5.c spin_lock(&sh->stripe_lock); sh 4699 drivers/md/raid5.c if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && sh 4700 drivers/md/raid5.c !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && sh 4701 drivers/md/raid5.c !test_bit(STRIPE_DISCARD, &sh->state) && sh 4702 drivers/md/raid5.c test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { sh 4703 drivers/md/raid5.c set_bit(STRIPE_SYNCING, &sh->state); sh 4704 drivers/md/raid5.c clear_bit(STRIPE_INSYNC, &sh->state); sh 4705 drivers/md/raid5.c clear_bit(STRIPE_REPLACED, &sh->state); sh 4707 drivers/md/raid5.c spin_unlock(&sh->stripe_lock); sh 4709 drivers/md/raid5.c clear_bit(STRIPE_DELAYED, &sh->state); sh 4713 drivers/md/raid5.c (unsigned long long)sh->sector, sh->state, sh 4714 drivers/md/raid5.c atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, sh 4715 drivers/md/raid5.c sh->check_state, sh->reconstruct_state); sh 4717 drivers/md/raid5.c analyse_stripe(sh, &s); sh 4719 drivers/md/raid5.c if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) sh 4724 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4731 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4739 drivers/md/raid5.c if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { sh 4741 drivers/md/raid5.c set_bit(STRIPE_BIOFILL_RUN, &sh->state); sh 4757 drivers/md/raid5.c sh->check_state = 0; sh 4758 drivers/md/raid5.c sh->reconstruct_state = 0; sh 4759 drivers/md/raid5.c break_stripe_batch_list(sh, 0); sh 4761 drivers/md/raid5.c handle_failed_stripe(conf, sh, &s, disks); sh 4763 drivers/md/raid5.c handle_failed_sync(conf, sh, &s); sh 4770 drivers/md/raid5.c if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) sh 4772 drivers/md/raid5.c if (sh->reconstruct_state == reconstruct_state_drain_result || sh 4773 drivers/md/raid5.c sh->reconstruct_state == reconstruct_state_prexor_drain_result) { sh 4774 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_idle; sh 4779 drivers/md/raid5.c BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && sh 4780 drivers/md/raid5.c !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); sh 4781 drivers/md/raid5.c BUG_ON(sh->qd_idx >= 0 && sh 4782 drivers/md/raid5.c !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && sh 4783 drivers/md/raid5.c !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); sh 4785 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 4787 drivers/md/raid5.c (i == sh->pd_idx || i == sh->qd_idx || sh 4797 drivers/md/raid5.c ((i == sh->pd_idx || i == sh->qd_idx) && sh 4799 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4802 drivers/md/raid5.c if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 4810 drivers/md/raid5.c pdev = &sh->dev[sh->pd_idx]; sh 4811 drivers/md/raid5.c s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) sh 4812 drivers/md/raid5.c || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); sh 4813 drivers/md/raid5.c qdev = &sh->dev[sh->qd_idx]; sh 4814 drivers/md/raid5.c s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) sh 4815 drivers/md/raid5.c || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) sh 4827 drivers/md/raid5.c handle_stripe_clean_event(conf, sh, disks); sh 4830 drivers/md/raid5.c r5c_handle_cached_data_endio(conf, sh, disks); sh 4831 drivers/md/raid5.c log_stripe_write_finished(sh); sh 4842 drivers/md/raid5.c handle_stripe_fill(sh, &s, disks); sh 4849 drivers/md/raid5.c r5c_finish_stripe_write_out(conf, sh, &s); sh 4860 drivers/md/raid5.c if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { sh 4863 drivers/md/raid5.c handle_stripe_dirtying(conf, sh, &s, disks); sh 4869 drivers/md/raid5.c ret = r5c_try_caching_write(conf, sh, &s, sh 4880 drivers/md/raid5.c (!test_bit(STRIPE_R5C_CACHING, &sh->state) && sh 4882 drivers/md/raid5.c ret = handle_stripe_dirtying(conf, sh, &s, sh 4895 drivers/md/raid5.c if (sh->check_state || sh 4897 drivers/md/raid5.c !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && sh 4898 drivers/md/raid5.c !test_bit(STRIPE_INSYNC, &sh->state))) { sh 4900 drivers/md/raid5.c handle_parity_checks6(conf, sh, &s, disks); sh 4902 drivers/md/raid5.c handle_parity_checks5(conf, sh, &s, disks); sh 4906 drivers/md/raid5.c && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) sh 4907 drivers/md/raid5.c && !test_bit(STRIPE_REPLACED, &sh->state)) { sh 4910 drivers/md/raid5.c if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { sh 4911 drivers/md/raid5.c WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); sh 4912 drivers/md/raid5.c set_bit(R5_WantReplace, &sh->dev[i].flags); sh 4913 drivers/md/raid5.c set_bit(R5_LOCKED, &sh->dev[i].flags); sh 4917 drivers/md/raid5.c set_bit(STRIPE_INSYNC, &sh->state); sh 4918 drivers/md/raid5.c set_bit(STRIPE_REPLACED, &sh->state); sh 4921 drivers/md/raid5.c !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && sh 4922 drivers/md/raid5.c test_bit(STRIPE_INSYNC, &sh->state)) { sh 4924 drivers/md/raid5.c clear_bit(STRIPE_SYNCING, &sh->state); sh 4925 drivers/md/raid5.c if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) sh 4934 drivers/md/raid5.c struct r5dev *dev = &sh->dev[s.failed_num[i]]; sh 4954 drivers/md/raid5.c if (sh->reconstruct_state == reconstruct_state_result) { sh 4956 drivers/md/raid5.c = raid5_get_active_stripe(conf, sh->sector, 1, 1, 1); sh 4961 drivers/md/raid5.c set_bit(STRIPE_DELAYED, &sh->state); sh 4962 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 4972 drivers/md/raid5.c sh->reconstruct_state = reconstruct_state_idle; sh 4973 drivers/md/raid5.c clear_bit(STRIPE_EXPANDING, &sh->state); sh 4975 drivers/md/raid5.c set_bit(R5_Wantwrite, &sh->dev[i].flags); sh 4976 drivers/md/raid5.c set_bit(R5_LOCKED, &sh->dev[i].flags); sh 4981 drivers/md/raid5.c if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && sh 4982 drivers/md/raid5.c !sh->reconstruct_state) { sh 4984 drivers/md/raid5.c sh->disks = conf->raid_disks; sh 4985 drivers/md/raid5.c stripe_set_idx(sh->sector, conf, 0, sh); sh 4986 drivers/md/raid5.c schedule_reconstruction(sh, &s, 1, 1); sh 4987 drivers/md/raid5.c } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { sh 4988 drivers/md/raid5.c clear_bit(STRIPE_EXPAND_READY, &sh->state); sh 4995 drivers/md/raid5.c !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) sh 4996 drivers/md/raid5.c handle_stripe_expansion(conf, sh); sh 5016 drivers/md/raid5.c struct r5dev *dev = &sh->dev[i]; sh 5020 drivers/md/raid5.c if (!rdev_set_badblocks(rdev, sh->sector, sh 5027 drivers/md/raid5.c rdev_clear_badblocks(rdev, sh->sector, sh 5036 drivers/md/raid5.c rdev_clear_badblocks(rdev, sh->sector, sh 5043 drivers/md/raid5.c raid_run_ops(sh, s.ops_request); sh 5045 drivers/md/raid5.c ops_run_io(sh, &s); sh 5058 drivers/md/raid5.c clear_bit_unlock(STRIPE_ACTIVE, &sh->state); sh 5066 drivers/md/raid5.c struct stripe_head *sh; sh 5067 drivers/md/raid5.c sh = list_entry(l, struct stripe_head, lru); sh 5069 drivers/md/raid5.c clear_bit(STRIPE_DELAYED, &sh->state); sh 5070 drivers/md/raid5.c if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 5072 drivers/md/raid5.c list_add_tail(&sh->lru, &conf->hold_list); sh 5073 drivers/md/raid5.c raid5_wakeup_stripe_thread(sh); sh 5086 drivers/md/raid5.c struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); sh 5088 drivers/md/raid5.c list_del_init(&sh->lru); sh 5089 drivers/md/raid5.c atomic_inc(&sh->count); sh 5090 drivers/md/raid5.c hash = sh->hash_lock_index; sh 5091 drivers/md/raid5.c __release_stripe(conf, sh, &temp_inactive_list[hash]); sh 5327 drivers/md/raid5.c struct stripe_head *sh, *tmp; sh 5337 drivers/md/raid5.c sh = NULL; sh 5363 drivers/md/raid5.c sh = list_entry(handle_list->next, typeof(*sh), lru); sh 5367 drivers/md/raid5.c else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { sh 5387 drivers/md/raid5.c sh = tmp; sh 5392 drivers/md/raid5.c if (sh) { sh 5400 drivers/md/raid5.c if (!sh) { sh 5410 drivers/md/raid5.c sh->group = NULL; sh 5412 drivers/md/raid5.c list_del_init(&sh->lru); sh 5413 drivers/md/raid5.c BUG_ON(atomic_inc_return(&sh->count) != 1); sh 5414 drivers/md/raid5.c return sh; sh 5427 drivers/md/raid5.c struct stripe_head *sh; sh 5436 drivers/md/raid5.c sh = list_first_entry(&cb->list, struct stripe_head, lru); sh 5437 drivers/md/raid5.c list_del_init(&sh->lru); sh 5444 drivers/md/raid5.c clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); sh 5449 drivers/md/raid5.c hash = sh->hash_lock_index; sh 5450 drivers/md/raid5.c __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); sh 5463 drivers/md/raid5.c struct stripe_head *sh) sh 5471 drivers/md/raid5.c raid5_release_stripe(sh); sh 5484 drivers/md/raid5.c if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) sh 5485 drivers/md/raid5.c list_add_tail(&sh->lru, &cb->list); sh 5487 drivers/md/raid5.c raid5_release_stripe(sh); sh 5494 drivers/md/raid5.c struct stripe_head *sh; sh 5520 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, logical_sector, 0, 0, 0); sh 5523 drivers/md/raid5.c set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); sh 5524 drivers/md/raid5.c if (test_bit(STRIPE_SYNCING, &sh->state)) { sh 5525 drivers/md/raid5.c raid5_release_stripe(sh); sh 5529 drivers/md/raid5.c clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); sh 5530 drivers/md/raid5.c spin_lock_irq(&sh->stripe_lock); sh 5532 drivers/md/raid5.c if (d == sh->pd_idx || d == sh->qd_idx) sh 5534 drivers/md/raid5.c if (sh->dev[d].towrite || sh->dev[d].toread) { sh 5535 drivers/md/raid5.c set_bit(R5_Overlap, &sh->dev[d].flags); sh 5536 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 5537 drivers/md/raid5.c raid5_release_stripe(sh); sh 5542 drivers/md/raid5.c set_bit(STRIPE_DISCARD, &sh->state); sh 5544 drivers/md/raid5.c sh->overwrite_disks = 0; sh 5546 drivers/md/raid5.c if (d == sh->pd_idx || d == sh->qd_idx) sh 5548 drivers/md/raid5.c sh->dev[d].towrite = bi; sh 5549 drivers/md/raid5.c set_bit(R5_OVERWRITE, &sh->dev[d].flags); sh 5552 drivers/md/raid5.c sh->overwrite_disks++; sh 5554 drivers/md/raid5.c spin_unlock_irq(&sh->stripe_lock); sh 5560 drivers/md/raid5.c sh->sector, sh 5563 drivers/md/raid5.c sh->bm_seq = conf->seq_flush + 1; sh 5564 drivers/md/raid5.c set_bit(STRIPE_BIT_DELAY, &sh->state); sh 5567 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 5568 drivers/md/raid5.c clear_bit(STRIPE_DELAYED, &sh->state); sh 5569 drivers/md/raid5.c if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 5571 drivers/md/raid5.c release_stripe_plug(mddev, sh); sh 5583 drivers/md/raid5.c struct stripe_head *sh; sh 5676 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, new_sector, previous, sh 5678 drivers/md/raid5.c if (sh) { sh 5697 drivers/md/raid5.c raid5_release_stripe(sh); sh 5707 drivers/md/raid5.c raid5_release_stripe(sh); sh 5711 drivers/md/raid5.c if (test_bit(STRIPE_EXPANDING, &sh->state) || sh 5712 drivers/md/raid5.c !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { sh 5718 drivers/md/raid5.c raid5_release_stripe(sh); sh 5724 drivers/md/raid5.c set_bit(STRIPE_R5C_PREFLUSH, &sh->state); sh 5729 drivers/md/raid5.c if (!sh->batch_head || sh == sh->batch_head) sh 5730 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 5731 drivers/md/raid5.c clear_bit(STRIPE_DELAYED, &sh->state); sh 5732 drivers/md/raid5.c if ((!sh->batch_head || sh == sh->batch_head) && sh 5734 drivers/md/raid5.c !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) sh 5736 drivers/md/raid5.c release_stripe_plug(mddev, sh); sh 5765 drivers/md/raid5.c struct stripe_head *sh; sh 5916 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, stripe_addr+i, 0, 0, 1); sh 5917 drivers/md/raid5.c set_bit(STRIPE_EXPANDING, &sh->state); sh 5922 drivers/md/raid5.c for (j=sh->disks; j--;) { sh 5924 drivers/md/raid5.c if (j == sh->pd_idx) sh 5927 drivers/md/raid5.c j == sh->qd_idx) sh 5929 drivers/md/raid5.c s = raid5_compute_blocknr(sh, j, 0); sh 5934 drivers/md/raid5.c memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); sh 5935 drivers/md/raid5.c set_bit(R5_Expanded, &sh->dev[j].flags); sh 5936 drivers/md/raid5.c set_bit(R5_UPTODATE, &sh->dev[j].flags); sh 5939 drivers/md/raid5.c set_bit(STRIPE_EXPAND_READY, &sh->state); sh 5940 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 5942 drivers/md/raid5.c list_add(&sh->lru, &stripes); sh 5965 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, first_sector, 1, 0, 1); sh 5966 drivers/md/raid5.c set_bit(STRIPE_EXPAND_SOURCE, &sh->state); sh 5967 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 5968 drivers/md/raid5.c raid5_release_stripe(sh); sh 5975 drivers/md/raid5.c sh = list_entry(stripes.next, struct stripe_head, lru); sh 5976 drivers/md/raid5.c list_del_init(&sh->lru); sh 5977 drivers/md/raid5.c raid5_release_stripe(sh); sh 6026 drivers/md/raid5.c struct stripe_head *sh; sh 6084 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, sector_nr, 0, 1, 0); sh 6085 drivers/md/raid5.c if (sh == NULL) { sh 6086 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, sector_nr, 0, 0, 0); sh 6107 drivers/md/raid5.c set_bit(STRIPE_SYNC_REQUESTED, &sh->state); sh 6108 drivers/md/raid5.c set_bit(STRIPE_HANDLE, &sh->state); sh 6110 drivers/md/raid5.c raid5_release_stripe(sh); sh 6128 drivers/md/raid5.c struct stripe_head *sh; sh 6149 drivers/md/raid5.c sh = raid5_get_active_stripe(conf, sector, 0, 1, 1); sh 6151 drivers/md/raid5.c if (!sh) { sh 6158 drivers/md/raid5.c if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { sh 6159 drivers/md/raid5.c raid5_release_stripe(sh); sh 6165 drivers/md/raid5.c set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); sh 6166 drivers/md/raid5.c handle_stripe(sh); sh 6167 drivers/md/raid5.c raid5_release_stripe(sh); sh 6184 drivers/md/raid5.c struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; sh 6189 drivers/md/raid5.c (sh = __get_priority_stripe(conf, group)) != NULL) sh 6190 drivers/md/raid5.c batch[batch_size++] = sh; sh 757 drivers/md/raid5.h extern sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous); sh 758 drivers/md/raid5.h extern void raid5_release_stripe(struct stripe_head *sh); sh 761 drivers/md/raid5.h struct stripe_head *sh); sh 258 drivers/media/platform/exynos-gsc/gsc-core.c void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh) sh 261 drivers/media/platform/exynos-gsc/gsc-core.c *sh = 4; sh 264 drivers/media/platform/exynos-gsc/gsc-core.c *sh = 3; sh 268 drivers/media/platform/exynos-gsc/gsc-core.c *sh = 2; sh 270 drivers/media/platform/exynos-gsc/gsc-core.c *sh = 0; sh 272 drivers/media/platform/exynos-gsc/gsc-core.c *sh = 1; sh 607 drivers/media/platform/exynos-gsc/gsc-core.c int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw, sh 626 drivers/media/platform/exynos-gsc/gsc-core.c (sh / tmp_h) > sc_down_max || sh 628 drivers/media/platform/exynos-gsc/gsc-core.c (tmp_h / sh) > var->sc_up_max) sh 394 drivers/media/platform/exynos-gsc/gsc-core.h void gsc_get_prescaler_shfactor(u32 hratio, u32 vratio, u32 *sh); sh 398 drivers/media/platform/exynos-gsc/gsc-core.h int gsc_check_scaler_ratio(struct gsc_variant *var, int sw, int sh, int dw, sh 192 drivers/media/platform/exynos4-is/fimc-core.c int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh, sh 199 drivers/media/platform/exynos4-is/fimc-core.c return (sw == dw && sh == dh) ? 0 : -EINVAL; sh 201 drivers/media/platform/exynos4-is/fimc-core.c if ((sw >= SCALER_MAX_HRATIO * dw) || (sh >= SCALER_MAX_VRATIO * dh)) sh 209 drivers/media/platform/exynos4-is/fimc-core.c u32 sh = 6; sh 214 drivers/media/platform/exynos4-is/fimc-core.c while (sh--) { sh 215 drivers/media/platform/exynos4-is/fimc-core.c u32 tmp = 1 << sh; sh 217 drivers/media/platform/exynos4-is/fimc-core.c *shift = sh, *ratio = tmp; sh 629 drivers/media/platform/exynos4-is/fimc-core.h int fimc_check_scaler_ratio(struct fimc_ctx *ctx, int sw, int sh, sh 126 drivers/media/platform/s3c-camif/camif-core.c unsigned int sh = 6; sh 131 drivers/media/platform/s3c-camif/camif-core.c while (sh--) { sh 132 drivers/media/platform/s3c-camif/camif-core.c unsigned int tmp = 1 << sh; sh 134 drivers/media/platform/s3c-camif/camif-core.c *shift = sh, *ratio = tmp; sh 383 drivers/message/fusion/mptbase.c hd = shost_priv(ioc->sh); sh 1850 drivers/message/fusion/mptbase.c ioc->sh = NULL; sh 678 drivers/message/fusion/mptbase.h struct Scsi_Host *sh; /* Scsi Host pointer */ sh 1310 drivers/message/fusion/mptctl.c if (ioc->sh) { sh 1311 drivers/message/fusion/mptctl.c shost_for_each_device(sdev, ioc->sh) { sh 1422 drivers/message/fusion/mptctl.c if (ioc->sh){ sh 1423 drivers/message/fusion/mptctl.c shost_for_each_device(sdev, ioc->sh) { sh 1853 drivers/message/fusion/mptctl.c if (ioc->sh) { sh 1896 drivers/message/fusion/mptctl.c shost_for_each_device(sdev, ioc->sh) { sh 1943 drivers/message/fusion/mptctl.c if (!ioc->sh) { sh 1958 drivers/message/fusion/mptctl.c if (ioc->sh) { sh 2374 drivers/message/fusion/mptctl.c if (ioc->sh != NULL) sh 2375 drivers/message/fusion/mptctl.c karg.host_no = ioc->sh->host_no; sh 2448 drivers/message/fusion/mptctl.c if (ioc->sh != NULL) { sh 2449 drivers/message/fusion/mptctl.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 2587 drivers/message/fusion/mptctl.c if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL)) sh 2590 drivers/message/fusion/mptctl.c if (ioc->sh->host_no != karg.hdr.host) sh 2673 drivers/message/fusion/mptctl.c hd = shost_priv(ioc->sh); sh 208 drivers/message/fusion/mptfc.c ioc->name, ioc->sh->host_no, sh 222 drivers/message/fusion/mptfc.c ioc->name, ioc->sh->host_no, sh 229 drivers/message/fusion/mptfc.c ioc->name, ioc->sh->host_no, sh 461 drivers/message/fusion/mptfc.c rport = fc_remote_port_add(ioc->sh, channel, &rport_ids); sh 489 drivers/message/fusion/mptfc.c ioc->sh->host_no, sh 982 drivers/message/fusion/mptfc.c struct Scsi_Host *sh; sh 990 drivers/message/fusion/mptfc.c sh = ioc->sh; sh 992 drivers/message/fusion/mptfc.c sn = fc_host_symbolic_name(sh); sh 998 drivers/message/fusion/mptfc.c fc_host_tgtid_bind_type(sh) = FC_TGTID_BIND_BY_WWPN; sh 1000 drivers/message/fusion/mptfc.c fc_host_maxframe_size(sh) = pp0->MaxFrameSize; sh 1002 drivers/message/fusion/mptfc.c fc_host_node_name(sh) = sh 1005 drivers/message/fusion/mptfc.c fc_host_port_name(sh) = sh 1008 drivers/message/fusion/mptfc.c fc_host_port_id(sh) = pp0->PortIdentifier; sh 1017 drivers/message/fusion/mptfc.c fc_host_supported_classes(sh) = cos; sh 1029 drivers/message/fusion/mptfc.c fc_host_speed(sh) = speed; sh 1040 drivers/message/fusion/mptfc.c fc_host_supported_speeds(sh) = speed; sh 1047 drivers/message/fusion/mptfc.c fc_host_port_state(sh) = port_state; sh 1058 drivers/message/fusion/mptfc.c fc_host_port_type(sh) = port_type; sh 1060 drivers/message/fusion/mptfc.c fc_host_fabric_name(sh) = sh 1107 drivers/message/fusion/mptfc.c ioc->sh->host_no, sh 1162 drivers/message/fusion/mptfc.c ioc->sh->host_no, sh 1171 drivers/message/fusion/mptfc.c struct Scsi_Host *sh; sh 1223 drivers/message/fusion/mptfc.c sh = scsi_host_alloc(&mptfc_driver_template, sizeof(MPT_SCSI_HOST)); sh 1225 drivers/message/fusion/mptfc.c if (!sh) { sh 1242 drivers/message/fusion/mptfc.c ioc->sh = sh; sh 1244 drivers/message/fusion/mptfc.c sh->io_port = 0; sh 1245 drivers/message/fusion/mptfc.c sh->n_io_port = 0; sh 1246 drivers/message/fusion/mptfc.c sh->irq = 0; sh 1249 drivers/message/fusion/mptfc.c sh->max_cmd_len = 16; sh 1251 drivers/message/fusion/mptfc.c sh->max_id = ioc->pfacts->MaxDevices; sh 1252 drivers/message/fusion/mptfc.c sh->max_lun = max_lun; sh 1256 drivers/message/fusion/mptfc.c sh->unique_id = ioc->id; sh 1278 drivers/message/fusion/mptfc.c if (numSGE < sh->sg_tablesize) { sh 1282 drivers/message/fusion/mptfc.c ioc->name, numSGE, sh->sg_tablesize)); sh 1283 drivers/message/fusion/mptfc.c sh->sg_tablesize = numSGE; sh 1288 drivers/message/fusion/mptfc.c hd = shost_priv(sh); sh 1306 drivers/message/fusion/mptfc.c sh->transportt = mptfc_transport_template; sh 1307 drivers/message/fusion/mptfc.c error = scsi_add_host (sh, &ioc->pcidev->dev); sh 1317 drivers/message/fusion/mptfc.c "mptfc_wq_%d", sh->host_no); sh 1346 drivers/message/fusion/mptfc.c scsi_remove_host(sh); sh 1380 drivers/message/fusion/mptfc.c if (ioc->sh == NULL || sh 1381 drivers/message/fusion/mptfc.c ((hd = shost_priv(ioc->sh)) == NULL)) sh 1510 drivers/message/fusion/mptfc.c fc_remove_host(ioc->sh); sh 1527 drivers/message/fusion/mptfc.c scsi_remove_host(ioc->sh); sh 338 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 597 drivers/message/fusion/mptsas.c shost_for_each_device(sdev, ioc->sh) { sh 972 drivers/message/fusion/mptsas.c shost_for_each_device(sdev, ioc->sh) { sh 1113 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1160 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1195 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1296 drivers/message/fusion/mptsas.c hd = shost_priv(ioc->sh); sh 3342 drivers/message/fusion/mptsas.c mptsas_probe_one_phy(&ioc->sh->shost_gendev, sh 3743 drivers/message/fusion/mptsas.c shost_for_each_device(sdev, ioc->sh) { sh 4004 drivers/message/fusion/mptsas.c sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, sh 4013 drivers/message/fusion/mptsas.c scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, sh 4076 drivers/message/fusion/mptsas.c shost_for_each_device(sdev, ioc->sh) { sh 4536 drivers/message/fusion/mptsas.c scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, sh 4656 drivers/message/fusion/mptsas.c sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, sh 5106 drivers/message/fusion/mptsas.c sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0); sh 5127 drivers/message/fusion/mptsas.c struct Scsi_Host *sh; sh 5183 drivers/message/fusion/mptsas.c sh = scsi_host_alloc(&mptsas_driver_template, sizeof(MPT_SCSI_HOST)); sh 5184 drivers/message/fusion/mptsas.c if (!sh) { sh 5196 drivers/message/fusion/mptsas.c ioc->sh = sh; sh 5198 drivers/message/fusion/mptsas.c sh->io_port = 0; sh 5199 drivers/message/fusion/mptsas.c sh->n_io_port = 0; sh 5200 drivers/message/fusion/mptsas.c sh->irq = 0; sh 5203 drivers/message/fusion/mptsas.c sh->max_cmd_len = 16; sh 5204 drivers/message/fusion/mptsas.c sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue); sh 5205 drivers/message/fusion/mptsas.c sh->max_id = -1; sh 5206 drivers/message/fusion/mptsas.c sh->max_lun = max_lun; sh 5207 drivers/message/fusion/mptsas.c sh->transportt = mptsas_transport_template; sh 5211 drivers/message/fusion/mptsas.c sh->unique_id = ioc->id; sh 5239 drivers/message/fusion/mptsas.c if (numSGE < sh->sg_tablesize) { sh 5243 drivers/message/fusion/mptsas.c ioc->name, numSGE, sh->sg_tablesize)); sh 5244 drivers/message/fusion/mptsas.c sh->sg_tablesize = numSGE; sh 5258 drivers/message/fusion/mptsas.c ioc->name, mpt_loadtime_max_sectors, sh->max_sectors)); sh 5259 drivers/message/fusion/mptsas.c sh->max_sectors = mpt_loadtime_max_sectors; sh 5262 drivers/message/fusion/mptsas.c hd = shost_priv(sh); sh 5293 drivers/message/fusion/mptsas.c error = scsi_add_host(sh, &ioc->pcidev->dev); sh 5328 drivers/message/fusion/mptsas.c if (!ioc->sh) { sh 5339 drivers/message/fusion/mptsas.c sas_remove_host(ioc->sh); sh 598 drivers/message/fusion/mptscsih.c hd = shost_priv(ioc->sh); sh 1177 drivers/message/fusion/mptscsih.c struct Scsi_Host *host = ioc->sh; sh 1202 drivers/message/fusion/mptscsih.c ioc->sh = NULL; sh 1232 drivers/message/fusion/mptscsih.c scsi_block_requests(ioc->sh); sh 1251 drivers/message/fusion/mptscsih.c scsi_unblock_requests(ioc->sh); sh 2333 drivers/message/fusion/mptscsih.c max_depth = ioc->sh->can_queue; sh 2353 drivers/message/fusion/mptscsih.c struct Scsi_Host *sh = sdev->host; sh 2357 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(sh); sh 2550 drivers/message/fusion/mptscsih.c if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL) sh 2553 drivers/message/fusion/mptscsih.c hd = shost_priv(ioc->sh); sh 1116 drivers/message/fusion/mptspi.c struct Scsi_Host *shost = ioc->sh; sh 1147 drivers/message/fusion/mptspi.c scsi_scan_target(&ioc->sh->shost_gendev, 1, disk, 0, SCSI_SCAN_RESCAN); sh 1157 drivers/message/fusion/mptspi.c shost_printk(KERN_ERR, ioc->sh, MYIOC_s_FMT sh 1173 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1270 drivers/message/fusion/mptspi.c shost_for_each_device(sdev, ioc->sh) { sh 1281 drivers/message/fusion/mptspi.c shost_for_each_device(sdev, ioc->sh) sh 1315 drivers/message/fusion/mptspi.c ioc->sh) { sh 1316 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1332 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); sh 1354 drivers/message/fusion/mptspi.c struct Scsi_Host *sh; sh 1406 drivers/message/fusion/mptspi.c sh = scsi_host_alloc(&mptspi_driver_template, sizeof(MPT_SCSI_HOST)); sh 1408 drivers/message/fusion/mptspi.c if (!sh) { sh 1419 drivers/message/fusion/mptspi.c sh->no_write_same = 1; sh 1425 drivers/message/fusion/mptspi.c ioc->sh = sh; sh 1427 drivers/message/fusion/mptspi.c sh->io_port = 0; sh 1428 drivers/message/fusion/mptspi.c sh->n_io_port = 0; sh 1429 drivers/message/fusion/mptspi.c sh->irq = 0; sh 1432 drivers/message/fusion/mptspi.c sh->max_cmd_len = 16; sh 1444 drivers/message/fusion/mptspi.c sh->max_id = ioc->devices_per_bus; sh 1446 drivers/message/fusion/mptspi.c sh->max_lun = MPT_LAST_LUN + 1; sh 1451 drivers/message/fusion/mptspi.c sh->max_channel = 1; sh 1453 drivers/message/fusion/mptspi.c sh->max_channel = 0; sh 1454 drivers/message/fusion/mptspi.c sh->this_id = ioc->pfacts[0].PortSCSIID; sh 1458 drivers/message/fusion/mptspi.c sh->unique_id = ioc->id; sh 1480 drivers/message/fusion/mptspi.c if (numSGE < sh->sg_tablesize) { sh 1484 drivers/message/fusion/mptspi.c ioc->name, numSGE, sh->sg_tablesize)); sh 1485 drivers/message/fusion/mptspi.c sh->sg_tablesize = numSGE; sh 1490 drivers/message/fusion/mptspi.c hd = shost_priv(sh); sh 1519 drivers/message/fusion/mptspi.c sh->transportt = mptspi_transport_template; sh 1521 drivers/message/fusion/mptspi.c error = scsi_add_host (sh, &ioc->pcidev->dev); sh 1536 drivers/message/fusion/mptspi.c scsi_scan_host(sh); sh 1549 drivers/message/fusion/mptspi.c scsi_remove_host(ioc->sh); sh 496 drivers/misc/sgi-gru/gruhandles.h #define GRU_PAGESIZE(sh) ((((sh) > 20 ? (sh) + 2 : (sh)) >> 1) - 6) sh 497 drivers/misc/sgi-gru/gruhandles.h #define GRU_SIZEAVAIL(sh) (1UL << GRU_PAGESIZE(sh)) sh 3027 drivers/net/ethernet/broadcom/bnxt/bnxt.c bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS); sh 3065 drivers/net/ethernet/broadcom/bnxt/bnxt.c if ((sh && i < bp->tx_nr_rings) || sh 3066 drivers/net/ethernet/broadcom/bnxt/bnxt.c (!sh && i >= bp->rx_nr_rings)) { sh 5954 drivers/net/ethernet/broadcom/bnxt/bnxt.c bool sh = false; sh 5960 drivers/net/ethernet/broadcom/bnxt/bnxt.c sh = true; sh 6001 drivers/net/ethernet/broadcom/bnxt/bnxt.c rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); sh 6004 drivers/net/ethernet/broadcom/bnxt/bnxt.c cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings; sh 10342 drivers/net/ethernet/broadcom/bnxt/bnxt.c int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, sh 10353 drivers/net/ethernet/broadcom/bnxt/bnxt.c rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh); sh 10370 drivers/net/ethernet/broadcom/bnxt/bnxt.c cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; sh 10919 drivers/net/ethernet/broadcom/bnxt/bnxt.c bool sh = false; sh 10932 drivers/net/ethernet/broadcom/bnxt/bnxt.c sh = true; sh 10935 drivers/net/ethernet/broadcom/bnxt/bnxt.c sh, tc, bp->tx_nr_rings_xdp); sh 10951 drivers/net/ethernet/broadcom/bnxt/bnxt.c bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : sh 11561 drivers/net/ethernet/broadcom/bnxt/bnxt.c static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) sh 11568 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (sh) sh 11580 drivers/net/ethernet/broadcom/bnxt/bnxt.c rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh); sh 11585 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (sh) sh 11595 drivers/net/ethernet/broadcom/bnxt/bnxt.c if (sh) sh 1997 drivers/net/ethernet/broadcom/bnxt/bnxt.h int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, sh 812 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c bool sh = false; sh 832 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c sh = true; sh 836 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c req_tx_rings = sh ? channel->combined_count : channel->tx_count; sh 837 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c req_rx_rings = sh ? channel->combined_count : channel->rx_count; sh 839 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (!sh) { sh 845 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c rc = bnxt_check_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, tx_xdp); sh 865 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c if (sh) { sh 879 drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : sh 1263 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct skb_shared_info *sh = skb_shinfo(skb); sh 1266 drivers/net/ethernet/cavium/thunder/nicvf_queues.c unsigned int p_len = sh->gso_size; sh 1274 drivers/net/ethernet/cavium/thunder/nicvf_queues.c for (segment = 0; segment < sh->gso_segs; segment++) { sh 1282 drivers/net/ethernet/cavium/thunder/nicvf_queues.c f_size = skb_frag_size(&sh->frags[f_id]); sh 1301 drivers/net/ethernet/cavium/thunder/nicvf_queues.c return num_edescs + sh->gso_segs; sh 7677 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, recr2, sh, 0x00, 8, 1); sh 1587 drivers/net/ethernet/netronome/nfp/bpf/jit.c SHF_SC_R_SHF, rvalue.sh - 1); sh 1594 drivers/net/ethernet/netronome/nfp/bpf/jit.c dst_b, SHF_SC_R_SHF, rvalue.sh); sh 1524 drivers/net/fddi/skfp/skfddi.c unsigned char s[255], sh[10]; sh 1535 drivers/net/fddi/skfp/skfddi.c sprintf(sh, "%02x ", Data[j + i]); sh 1536 drivers/net/fddi/skfp/skfddi.c strcat(s, sh); sh 990 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared *sh) sh 1035 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->flags = le32_to_cpu(sh_le.flags); sh 1036 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->trap_addr = le32_to_cpu(sh_le.trap_addr); sh 1037 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr); sh 1038 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr); sh 1039 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->assert_line = le32_to_cpu(sh_le.assert_line); sh 1040 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->console_addr = le32_to_cpu(sh_le.console_addr); sh 1041 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr); sh 1043 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) { sh 1046 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->flags & SDPCM_SHARED_VERSION_MASK); sh 1060 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared sh; sh 1062 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (brcmf_sdio_readshared(bus, &sh) == 0) sh 1063 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c bus->console_addr = sh.console_addr; sh 2935 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared *sh) sh 2943 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c addr = sh->console_addr + offsetof(struct rte_console, log_le); sh 2950 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c addr = sh->console_addr + offsetof(struct rte_console, log_le.buf_size); sh 2957 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c addr = sh->console_addr + offsetof(struct rte_console, log_le.idx); sh 2992 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared *sh) sh 2997 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if ((sh->flags & SDPCM_SHARED_TRAP) == 0) { sh 3002 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdiod_ramrw(bus->sdiodev, false, sh->trap_addr, (u8 *)&tr, sh 3017 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.pc), sh->trap_addr, sh 3031 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c le32_to_cpu(tr.pc), sh->trap_addr, sh 3040 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared *sh) sh 3046 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if ((sh->flags & SDPCM_SHARED_ASSERT_BUILT) == 0) { sh 3049 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c } else if ((sh->flags & SDPCM_SHARED_ASSERT) == 0) { sh 3055 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (sh->assert_file_addr != 0) { sh 3057 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->assert_file_addr, (u8 *)file, 80); sh 3061 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (sh->assert_exp_addr != 0) { sh 3063 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c sh->assert_exp_addr, (u8 *)expr, 80); sh 3070 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c file, sh->assert_line, expr); sh 3077 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared sh; sh 3079 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdio_readshared(bus, &sh); sh 3084 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if ((sh.flags & SDPCM_SHARED_ASSERT_BUILT) == 0) sh 3086 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c else if (sh.flags & SDPCM_SHARED_ASSERT) sh 3089 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (sh.flags & SDPCM_SHARED_TRAP) { sh 3091 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c brcmf_sdio_trap_info(NULL, bus, &sh); sh 3100 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct sdpcm_shared sh; sh 3102 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdio_readshared(bus, &sh); sh 3106 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdio_assert_info(seq, bus, &sh); sh 3110 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdio_trap_info(seq, bus, &sh); sh 3114 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c error = brcmf_sdio_dump_console(seq, bus, &sh); sh 121 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_ucode_wake_override_phyreg_set(pi->sh->physhim); sh 127 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_ucode_wake_override_phyreg_clear(pi->sh->physhim); sh 133 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, MCTL_LOCK_RADIO); sh 144 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0); sh 174 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((D11REV_GE(pi->sh->corerev, 24)) || sh 175 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (D11REV_IS(pi->sh->corerev, 22) sh 190 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((D11REV_GE(pi->sh->corerev, 24)) || sh 191 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (D11REV_IS(pi->sh->corerev, 22) sh 212 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (D11REV_GE(pi->sh->corerev, 24)) { sh 356 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c struct shared_phy *sh; sh 358 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC); sh 359 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (sh == NULL) sh 362 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->physhim = shp->physhim; sh 363 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->unit = shp->unit; sh 364 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->corerev = shp->corerev; sh 366 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->vid = shp->vid; sh 367 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->did = shp->did; sh 368 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->chip = shp->chip; sh 369 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->chiprev = shp->chiprev; sh 370 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->chippkg = shp->chippkg; sh 371 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->sromrev = shp->sromrev; sh 372 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->boardtype = shp->boardtype; sh 373 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->boardrev = shp->boardrev; sh 374 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->boardflags = shp->boardflags; sh 375 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->boardflags2 = shp->boardflags2; sh 377 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->fast_timer = PHY_SW_TIMER_FAST; sh 378 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->slow_timer = PHY_SW_TIMER_SLOW; sh 379 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->glacial_timer = PHY_SW_TIMER_GLACIAL; sh 381 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->rssi_mode = RSSI_ANT_MERGE_MAX; sh 383 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c return sh; sh 391 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!pi->sh->up) { sh 418 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, sh 427 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (D11REV_IS(sh->corerev, 4)) sh 437 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi = sh->phy_head; sh 439 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); sh 449 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh = sh; sh 462 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags); sh 520 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phyrxchain = 0x3; sh 548 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->phycal_timer = wlapi_init_timer(pi->sh->physhim, sh 564 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->next = pi->sh->phy_head; sh 565 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c sh->phy_head = pi; sh 589 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (pi->sh->phy_head == pi) sh 590 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_head = pi->next; sh 591 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c else if (pi->sh->phy_head->next == pi) sh 592 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_head->next = NULL; sh 718 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!pi || !pi->sh) sh 721 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->clk = newstate; sh 728 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!pi || !pi->sh) sh 731 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->up = newstate; sh 766 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_bw_set(pi->sh->physhim, sh 777 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) sh 783 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi, pi->sh->rx_antdiv); sh 830 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 && sh 831 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->chiprev == 1) { sh 840 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && sh 841 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (pi->sh->chiprev == 1) && sh 874 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && sh 875 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (pi->sh->chiprev == 1) && sh 911 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) && sh 912 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (pi->sh->chiprev == 1)) { sh 998 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN, sh 1003 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (D11REV_GE(pi->sh->corerev, 11)) sh 1082 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->nphy_perical_last = pi->sh->now - pi->sh->glacial_timer; sh 1093 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_0, NULL_TSSI_W); sh 1094 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_B_TSSI_1, NULL_TSSI_W); sh 1095 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_0, NULL_TSSI_W); sh 1096 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_G_TSSI_1, NULL_TSSI_W); sh 1177 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_CURCHANNEL, m_cur_channel); sh 1352 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 1358 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 1374 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (pi->sh->up) { sh 1383 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 1389 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 1813 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->machwcap = machwcap; sh 1864 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 1868 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 1891 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!pi->sh->clk) sh 1897 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_MAX, 63); sh 1898 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_N, sh 1901 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_TARGET, sh 1904 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_TXPWR_CUR, sh 1912 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->physhim, sh 1914 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, offset + 6, sh 1916 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, offset + 14, sh 1920 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_mhf(pi->sh->physhim, MHF2, MHF2_HWPWRCTL, sh 1928 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_OFDM_OFFSET, sh 1962 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 1972 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2068 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2072 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2097 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c } else if (pi->hwpwrctrl && pi->sh->up) { sh 2145 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->rx_antdiv = val; sh 2147 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!(ISNPHY(pi) && D11REV_IS(pi->sh->corerev, 16))) { sh 2149 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, sh 2152 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_ANTDIV, 0, sh 2159 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (!pi->sh->clk) sh 2165 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2180 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2218 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_noise_window[pi->sh->phy_noise_index] = sh 2220 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_noise_index = sh 2221 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c MODINC(pi->sh->phy_noise_index, MA_WINDOW_SZ); sh 2245 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP(idx)); sh 2246 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c hi = wlapi_bmac_read_shm(pi->sh->physhim, sh 2286 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); sh 2289 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP0); sh 2290 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP1); sh 2293 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c lo = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP2); sh 2294 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c hi = wlapi_bmac_read_shm(pi->sh->physhim, M_PWRIND_MAP3); sh 2299 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c status_1 = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_0); sh 2318 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c jssi_aux = wlapi_bmac_read_shm(pi->sh->physhim, M_JSSI_AUX); sh 2353 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->phynoise_now = pi->sh->now; sh 2375 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_JSSI_0, 0); sh 2376 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); sh 2377 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); sh 2378 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); sh 2379 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); sh 2384 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2388 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2395 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP0, 0); sh 2396 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP1, 0); sh 2397 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0); sh 2398 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0); sh 2418 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2424 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2525 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((pi->sh->corerev >= 11) sh 2585 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->now++; sh 2596 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if (pi->phynoise_state && (pi->sh->now - pi->phynoise_now) > 5) sh 2600 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c ((pi->sh->now - pi->phycal_txpower) >= pi->sh->fast_timer)) { sh 2603 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->phycal_txpower = pi->sh->now; sh 2614 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c ((pi->sh->now - pi->nphy_perical_last) >= sh 2615 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->glacial_timer)) sh 2624 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c ((pi->sh->now - pi->phy_lastcal) >= sh 2625 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->glacial_timer)) { sh 2648 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_noise_window[i] = (s8) (rssi & 0xff); sh 2651 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_noise_window[i] = sh 2654 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phy_noise_index = 0; sh 2798 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->hw_phytxchain = txchain; sh 2799 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->hw_phyrxchain = rxchain; sh 2800 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phytxchain = txchain; sh 2801 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phyrxchain = rxchain; sh 2802 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); sh 2809 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->sh->phytxchain = txchain; sh 2814 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c pi->pubpi.phy_corenum = (u8)hweight8(pi->sh->phyrxchain); sh 2821 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c *txchain = pi->sh->phytxchain; sh 2822 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c *rxchain = pi->sh->phyrxchain; sh 2837 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2839 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c wlapi_enable_mac(pi->sh->physhim); sh 2883 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) && sh 2884 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c (pi->sh->boardflags & BFL_FEM)) { sh 2887 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c txant = wlapi_bmac_get_txant(pi->sh->physhim); sh 172 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_hal.h struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh, sh 533 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h struct shared_phy *sh; sh 138 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_bmac_read_shm((pi)->sh->physhim, M_UCODE_MACSTAT + \ sh 1601 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_switch_macfreq(pi->sh->physhim, enable); sh 1817 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (!(pi->sh->boardflags & BFL_FEM)) { sh 2129 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) { sh 2870 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 2921 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 2952 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 3062 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 3079 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 3138 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4018 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 4044 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4062 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 4102 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4128 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 4144 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4176 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 4178 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c pi->phy_lastcal = pi->sh->now; sh 4188 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4204 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c pi->phy_lastcal = pi->sh->now; sh 4215 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); sh 4216 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 4257 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlapi_enable_mac(pi->sh->physhim); sh 4355 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) sh 4367 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) sh 4450 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (!(pi->sh->boardrev < 0x1204)) sh 4579 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) sh 4594 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (!(pi->sh->boardflags & BFL_FEM)) { sh 4617 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM_BT) { sh 4627 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (!(pi->sh->boardflags & BFL_FEM)) { sh 4643 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) sh 4658 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_EXTLNA) sh 4664 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_EXTLNA_5GHz) sh 4674 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) { sh 4675 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM_BT) { sh 4676 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardrev < 0x1250) sh 4684 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM_BT) sh 4717 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (!(pi->sh->boardflags & BFL_FEM)) sh 4729 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) { sh 4844 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if ((pi->sh->boardflags & BFL_FEM) sh 5025 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (pi->sh->boardflags & BFL_FEM) sh 5050 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c if (0 == (pi->sh->boardflags & BFL_NOPA)) { sh 14350 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh && (pi->sh->_rifs_phy != rifs)) sh 14351 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->_rifs_phy = rifs; sh 14366 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->boardflags2 & BFL2_TXPWRCTRL_EN) && sh 14367 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c NREV_GE(pi->pubpi.phy_rev, 2) && (pi->sh->sromrev >= 4)) sh 14369 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c else if ((pi->sh->sromrev >= 4) sh 14370 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c && (pi->sh->boardflags2 & BFL2_5G_PWRGAIN)) sh 14380 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->sromrev >= 9) sh 14611 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_SPUR_WAR) sh 14615 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_2G_SPUR_WAR) sh 15641 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags & BFL_EXTLNA) { sh 15709 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags & BFL_EXTLNA) { sh 15836 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags & BFL_EXTLNA_5GHz) { sh 16287 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->sromrev >= 8) sh 16288 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c && (pi->sh->boardflags2 & BFL2_IPALVLSHIFT_3P3)) sh 16646 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->hw_phyrxchain != 0x3) && sh 16647 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) { sh 16839 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlc_phy_war_txchain_upd_nphy(pi, pi->sh->hw_phytxchain); sh 16841 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (((pi->sh->boardflags2 & BFL2_APLL_WAR) && sh 16843 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (((pi->sh->boardflags2 & BFL2_GPLL_WAR) || sh 16844 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->boardflags2 & BFL2_GPLL_WAR2)) && sh 16889 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_SINGLEANT_CCK) sh 16890 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mhf(pi->sh->physhim, MHF4, sh 16922 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD || sh 16923 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->boardtype == 0x8b)) { sh 16980 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mhf(pi->sh->physhim, MHF3, sh 17578 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { sh 17579 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); sh 17594 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) sh 17595 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0); sh 17597 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->sromrev < 4) { sh 17710 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { sh 17711 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); sh 17730 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) sh 17731 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0); sh 17798 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->chip == BCMA_CHIP_ID_BCM47162) sh 17989 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->hw_phytxchain != 3) { sh 18589 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ, tx_comp.a0); sh 18590 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 2, tx_comp.b0); sh 18591 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 4, tx_comp.a1); sh 18592 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_20IN40_IQ + 6, tx_comp.b1); sh 18856 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX1, 0xFFFF); sh 18857 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_CURR_IDX2, 0xFFFF); sh 19161 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) && sh 19162 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) { sh 19222 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c ((pi->sh->chippkg == BCMA_PKG_ID_BCM4717) || sh 19223 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) { sh 19224 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->boardflags & BFL_EXTLNA) && sh 19230 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357)) sh 19250 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c && pi->sh->boardflags2 & BFL2_INTERNDET_TXIQCAL))); sh 19300 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_SKWRKFEM_BRD) sh 19340 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON); sh 19345 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF); sh 19347 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_macphyclk_set(pi->sh->physhim, ON); sh 19471 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->phyrxchain != 0x3) sh 19473 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->phyrxchain); sh 19547 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlc_phy_nphy_tkip_rifs_war(pi, pi->sh->_rifs_phy); sh 19563 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON); sh 19570 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF); sh 19643 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->phyrxchain = rxcore_bitmask; sh 19645 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (!pi->sh->clk) sh 19651 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 19711 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 20062 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->boardflags2 & BFL2_LEGACY) sh 20063 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c || (pi->sh->boardflags2 & BFL2_XTALBUFOUTEN)) sh 20094 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (((pi->sh->sromrev >= 4) sh 20095 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c && !(pi->sh->boardflags2 & BFL2_RXBB_INT_REG_DIS)) sh 20096 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c || ((pi->sh->sromrev < 4))) { sh 20604 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_GPLL_WAR) { sh 20611 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) || sh 20612 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) { sh 20630 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->boardflags2 & BFL2_GPLL_WAR2) && sh 20645 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->boardflags2 & BFL2_APLL_WAR) { sh 20673 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) || sh 20674 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chip == BCMA_CHIP_ID_BCM47162)) { sh 20685 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 || sh 20686 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->chip == BCMA_CHIP_ID_BCM43225) && sh 20687 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) { sh 20795 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 || sh 20796 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->chip == BCMA_CHIP_ID_BCM43225) && sh 20797 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) sh 21111 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) sh 21112 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c && (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) { sh 21122 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) || sh 21123 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) { sh 21127 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false); sh 21130 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true); sh 21133 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) || sh 21134 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chip == BCMA_CHIP_ID_BCM43225)) { sh 21150 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (!((pi->sh->chip == BCMA_CHIP_ID_BCM4716) || sh 21151 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (pi->sh->chip == BCMA_CHIP_ID_BCM47162))) sh 21152 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_core_phypll_reset(pi->sh->physhim); sh 21183 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_bw_set(pi->sh->physhim, CHSPEC_BW(chanspec)); sh 21310 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 16)) { sh 21314 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 21323 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 16) && !suspended) sh 21324 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 22114 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->chip == BCMA_CHIP_ID_BCM5357) { sh 23007 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MAX) sh 23009 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_MIN) sh 23011 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c else if (pi->sh->rssi_mode == RSSI_ANT_MERGE_AVG) sh 24866 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->sh->chip == BCMA_CHIP_ID_BCM47162) { sh 25169 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 25179 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->nphy_papd_last_cal = pi->sh->now; sh 25421 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 25451 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000); sh 25453 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 25497 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 25498 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, sh 25500 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 25511 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->nphy_perical_last = pi->sh->now; sh 25529 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c pi->nphy_perical_last = pi->sh->now; sh 25658 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 27747 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (pi->sh->sromrev < 4) { sh 28138 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) { sh 28139 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK); sh 28146 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) sh 28147 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, 0); sh 28256 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR, sh 28315 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_mhf(pi->sh->physhim, MHF1, MHF1_IQSWAP_WAR, sh 28470 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, ON); sh 28527 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_bmac_phyclk_fgc(pi->sh->physhim, OFF); sh 28614 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_suspend_mac_and_wait(pi->sh->physhim); sh 28623 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c wlapi_enable_mac(pi->sh->physhim); sh 2500 drivers/net/wireless/intel/iwlegacy/4965-mac.c int i, sh, ack; sh 2518 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4); sh 2519 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (sh < 0) /* tbw something is wrong with indices */ sh 2520 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh += 0x100; sh 2522 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (agg->frame_count > (64 - sh)) { sh 2528 drivers/net/wireless/intel/iwlegacy/4965-mac.c bitmap = le64_to_cpu(ba_resp->bitmap) >> sh; sh 2656 drivers/net/wireless/intel/iwlegacy/4965-mac.c int i, sh, idx; sh 2724 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = idx - start; sh 2725 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (sh > 64) { sh 2726 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = (start - idx) + 0xff; sh 2727 drivers/net/wireless/intel/iwlegacy/4965-mac.c bitmap = bitmap << sh; sh 2728 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = 0; sh 2730 drivers/net/wireless/intel/iwlegacy/4965-mac.c } else if (sh < -64) sh 2731 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = 0xff - (start - idx); sh 2732 drivers/net/wireless/intel/iwlegacy/4965-mac.c else if (sh < 0) { sh 2733 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = start - idx; sh 2735 drivers/net/wireless/intel/iwlegacy/4965-mac.c bitmap = bitmap << sh; sh 2736 drivers/net/wireless/intel/iwlegacy/4965-mac.c sh = 0; sh 2738 drivers/net/wireless/intel/iwlegacy/4965-mac.c bitmap |= 1ULL << sh; sh 794 drivers/phy/rockchip/phy-rockchip-inno-usb2.c unsigned int sh = rport->port_cfg->utmi_hstdet.bitend - sh 817 drivers/phy/rockchip/phy-rockchip-inno-usb2.c (((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh); sh 484 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c #define CYGNUS_PIN_GROUP(group_name, off, sh, al) \ sh 491 drivers/pinctrl/bcm/pinctrl-cygnus-mux.c .shift = sh, \ sh 372 drivers/pinctrl/bcm/pinctrl-ns2-mux.c #define NS2_PIN_GROUP(group_name, ba, off, sh, ma, al) \ sh 380 drivers/pinctrl/bcm/pinctrl-ns2-mux.c .shift = sh, \ sh 227 drivers/pinctrl/bcm/pinctrl-nsp-mux.c #define NSP_PIN_GROUP(group_name, ba, sh, ma, al) \ sh 234 drivers/pinctrl/bcm/pinctrl-nsp-mux.c .shift = sh, \ sh 193 drivers/scsi/aha1542.c static int aha1542_test_port(struct Scsi_Host *sh) sh 199 drivers/scsi/aha1542.c if (inb(STATUS(sh->io_port)) == 0xff) sh 205 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ sh 207 drivers/scsi/aha1542.c outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); sh 212 drivers/scsi/aha1542.c if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) sh 216 drivers/scsi/aha1542.c if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) sh 222 drivers/scsi/aha1542.c aha1542_outb(sh->io_port, CMD_INQUIRY); sh 225 drivers/scsi/aha1542.c if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) sh 227 drivers/scsi/aha1542.c inquiry_result[i] = inb(DATA(sh->io_port)); sh 231 drivers/scsi/aha1542.c if (inb(STATUS(sh->io_port)) & DF) sh 235 drivers/scsi/aha1542.c if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) sh 239 drivers/scsi/aha1542.c outb(IRST, CONTROL(sh->io_port)); sh 261 drivers/scsi/aha1542.c struct Scsi_Host *sh = dev_id; sh 262 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 274 drivers/scsi/aha1542.c flag = inb(INTRFLAGS(sh->io_port)); sh 275 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); sh 286 drivers/scsi/aha1542.c printk("status %02x\n", inb(STATUS(sh->io_port))); sh 291 drivers/scsi/aha1542.c spin_lock_irqsave(sh->host_lock, flags); sh 293 drivers/scsi/aha1542.c flag = inb(INTRFLAGS(sh->io_port)); sh 307 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 322 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 325 drivers/scsi/aha1542.c shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); sh 336 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", sh 344 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); sh 350 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 351 drivers/scsi/aha1542.c shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); sh 352 drivers/scsi/aha1542.c shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, sh 377 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, sh 392 drivers/scsi/aha1542.c static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) sh 395 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 418 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", sh 430 drivers/scsi/aha1542.c acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain, sh 432 drivers/scsi/aha1542.c if (dma_mapping_error(sh->dma_dev, acmd->chain_handle)) sh 439 drivers/scsi/aha1542.c spin_lock_irqsave(sh->host_lock, flags); sh 461 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); sh 491 drivers/scsi/aha1542.c shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain); sh 510 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 522 drivers/scsi/aha1542.c static void setup_mailboxes(struct Scsi_Host *sh) sh 524 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 534 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ sh 536 drivers/scsi/aha1542.c if (aha1542_out(sh->io_port, mb_cmd, 5)) sh 537 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); sh 538 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 541 drivers/scsi/aha1542.c static int aha1542_getconfig(struct Scsi_Host *sh) sh 545 drivers/scsi/aha1542.c i = inb(STATUS(sh->io_port)); sh 547 drivers/scsi/aha1542.c i = inb(DATA(sh->io_port)); sh 549 drivers/scsi/aha1542.c aha1542_outb(sh->io_port, CMD_RETCONF); sh 550 drivers/scsi/aha1542.c aha1542_in(sh->io_port, inquiry_result, 3, 0); sh 551 drivers/scsi/aha1542.c if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) sh 552 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "error querying board settings\n"); sh 553 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 556 drivers/scsi/aha1542.c sh->dma_channel = 7; sh 559 drivers/scsi/aha1542.c sh->dma_channel = 6; sh 562 drivers/scsi/aha1542.c sh->dma_channel = 5; sh 565 drivers/scsi/aha1542.c sh->dma_channel = 0; sh 570 drivers/scsi/aha1542.c sh->dma_channel = 0xFF; sh 573 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); sh 578 drivers/scsi/aha1542.c sh->irq = 15; sh 581 drivers/scsi/aha1542.c sh->irq = 14; sh 584 drivers/scsi/aha1542.c sh->irq = 12; sh 587 drivers/scsi/aha1542.c sh->irq = 11; sh 590 drivers/scsi/aha1542.c sh->irq = 10; sh 593 drivers/scsi/aha1542.c sh->irq = 9; sh 596 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); sh 599 drivers/scsi/aha1542.c sh->this_id = inquiry_result[2] & 7; sh 606 drivers/scsi/aha1542.c static int aha1542_mbenable(struct Scsi_Host *sh) sh 614 drivers/scsi/aha1542.c aha1542_outb(sh->io_port, CMD_EXTBIOS); sh 615 drivers/scsi/aha1542.c if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) sh 617 drivers/scsi/aha1542.c if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) sh 619 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 629 drivers/scsi/aha1542.c if (aha1542_out(sh->io_port, mbenable_cmd, 3)) sh 634 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); sh 636 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 641 drivers/scsi/aha1542.c static int aha1542_query(struct Scsi_Host *sh) sh 643 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 646 drivers/scsi/aha1542.c i = inb(STATUS(sh->io_port)); sh 648 drivers/scsi/aha1542.c i = inb(DATA(sh->io_port)); sh 650 drivers/scsi/aha1542.c aha1542_outb(sh->io_port, CMD_INQUIRY); sh 651 drivers/scsi/aha1542.c aha1542_in(sh->io_port, inquiry_result, 4, 0); sh 652 drivers/scsi/aha1542.c if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) sh 653 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "error querying card type\n"); sh 654 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 665 drivers/scsi/aha1542.c shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); sh 672 drivers/scsi/aha1542.c aha1542->bios_translation = aha1542_mbenable(sh); sh 696 drivers/scsi/aha1542.c static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) sh 701 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 702 drivers/scsi/aha1542.c if (aha1542_out(sh->io_port, oncmd, 2)) sh 709 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 710 drivers/scsi/aha1542.c if (aha1542_out(sh->io_port, offcmd, 2)) sh 717 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 718 drivers/scsi/aha1542.c if (aha1542_out(sh->io_port, dmacmd, 2)) sh 721 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 724 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); sh 725 drivers/scsi/aha1542.c aha1542_intr_reset(sh->io_port); sh 732 drivers/scsi/aha1542.c struct Scsi_Host *sh; sh 742 drivers/scsi/aha1542.c sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); sh 743 drivers/scsi/aha1542.c if (!sh) sh 745 drivers/scsi/aha1542.c aha1542 = shost_priv(sh); sh 747 drivers/scsi/aha1542.c sh->unique_id = base_io; sh 748 drivers/scsi/aha1542.c sh->io_port = base_io; sh 749 drivers/scsi/aha1542.c sh->n_io_port = AHA1542_REGION_SIZE; sh 753 drivers/scsi/aha1542.c if (!aha1542_test_port(sh)) sh 756 drivers/scsi/aha1542.c aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); sh 757 drivers/scsi/aha1542.c if (aha1542_query(sh)) sh 759 drivers/scsi/aha1542.c if (aha1542_getconfig(sh) == -1) sh 762 drivers/scsi/aha1542.c if (sh->dma_channel != 0xFF) sh 763 drivers/scsi/aha1542.c snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); sh 764 drivers/scsi/aha1542.c shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", sh 765 drivers/scsi/aha1542.c sh->this_id, base_io, sh->irq, dma_info); sh 767 drivers/scsi/aha1542.c shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); sh 784 drivers/scsi/aha1542.c setup_mailboxes(sh); sh 786 drivers/scsi/aha1542.c if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { sh 787 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); sh 790 drivers/scsi/aha1542.c if (sh->dma_channel != 0xFF) { sh 791 drivers/scsi/aha1542.c if (request_dma(sh->dma_channel, "aha1542")) { sh 792 drivers/scsi/aha1542.c shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); sh 795 drivers/scsi/aha1542.c if (sh->dma_channel == 0 || sh->dma_channel >= 5) { sh 796 drivers/scsi/aha1542.c set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); sh 797 drivers/scsi/aha1542.c enable_dma(sh->dma_channel); sh 801 drivers/scsi/aha1542.c if (scsi_add_host(sh, pdev)) sh 804 drivers/scsi/aha1542.c scsi_scan_host(sh); sh 806 drivers/scsi/aha1542.c return sh; sh 809 drivers/scsi/aha1542.c if (sh->dma_channel != 0xff) sh 810 drivers/scsi/aha1542.c free_dma(sh->dma_channel); sh 812 drivers/scsi/aha1542.c free_irq(sh->irq, sh); sh 820 drivers/scsi/aha1542.c scsi_host_put(sh); sh 827 drivers/scsi/aha1542.c static int aha1542_release(struct Scsi_Host *sh) sh 829 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 830 drivers/scsi/aha1542.c struct device *dev = sh->dma_dev; sh 832 drivers/scsi/aha1542.c scsi_remove_host(sh); sh 833 drivers/scsi/aha1542.c if (sh->dma_channel != 0xff) sh 834 drivers/scsi/aha1542.c free_dma(sh->dma_channel); sh 839 drivers/scsi/aha1542.c if (sh->irq) sh 840 drivers/scsi/aha1542.c free_irq(sh->irq, sh); sh 841 drivers/scsi/aha1542.c if (sh->io_port && sh->n_io_port) sh 842 drivers/scsi/aha1542.c release_region(sh->io_port, sh->n_io_port); sh 843 drivers/scsi/aha1542.c scsi_host_put(sh); sh 854 drivers/scsi/aha1542.c struct Scsi_Host *sh = cmd->device->host; sh 855 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 863 drivers/scsi/aha1542.c spin_lock_irqsave(sh->host_lock, flags); sh 901 drivers/scsi/aha1542.c aha1542_outb(sh->io_port, CMD_START_SCSI); sh 902 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 912 drivers/scsi/aha1542.c struct Scsi_Host *sh = cmd->device->host; sh 913 drivers/scsi/aha1542.c struct aha1542_hostdata *aha1542 = shost_priv(sh); sh 917 drivers/scsi/aha1542.c spin_lock_irqsave(sh->host_lock, flags); sh 928 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 967 drivers/scsi/aha1542.c spin_unlock_irqrestore(sh->host_lock, flags); sh 1019 drivers/scsi/aha1542.c struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); sh 1021 drivers/scsi/aha1542.c if (!sh) sh 1024 drivers/scsi/aha1542.c dev_set_drvdata(pdev, sh); sh 1055 drivers/scsi/aha1542.c struct Scsi_Host *sh; sh 1072 drivers/scsi/aha1542.c sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); sh 1073 drivers/scsi/aha1542.c if (!sh) sh 1076 drivers/scsi/aha1542.c pnp_set_drvdata(pdev, sh); sh 973 drivers/scsi/esas2r/esas2r.h int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh); sh 648 drivers/scsi/esas2r/esas2r_main.c int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh) sh 650 drivers/scsi/esas2r/esas2r_main.c struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; sh 655 drivers/scsi/esas2r/esas2r_main.c esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no); sh 703 drivers/scsi/esas2r/esas2r_main.c const char *esas2r_info(struct Scsi_Host *sh) sh 705 drivers/scsi/esas2r/esas2r_main.c struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata; sh 708 drivers/scsi/esas2r/esas2r_main.c esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev), sh 717 drivers/scsi/esas2r/esas2r_main.c esas2r_proc_host = sh; sh 722 drivers/scsi/esas2r/esas2r_main.c esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev), sh 730 drivers/scsi/esas2r/esas2r_main.c sh->hostt->proc_dir, sh 735 drivers/scsi/esas2r/esas2r_main.c &(sh->shost_gendev), sh 177 drivers/scsi/fdomain.c static int fdomain_select(struct Scsi_Host *sh, int target) sh 181 drivers/scsi/fdomain.c struct fdomain *fd = shost_priv(sh); sh 184 drivers/scsi/fdomain.c outb(BIT(sh->this_id) | BIT(target), fd->base + REG_SCSI_DATA_NOACK); sh 264 drivers/scsi/fdomain.c struct Scsi_Host *sh = container_of((void *)fd, struct Scsi_Host, sh 271 drivers/scsi/fdomain.c spin_lock_irqsave(sh->host_lock, flags); sh 375 drivers/scsi/fdomain.c spin_unlock_irqrestore(sh->host_lock, flags); sh 397 drivers/scsi/fdomain.c static int fdomain_queue(struct Scsi_Host *sh, struct scsi_cmnd *cmd) sh 409 drivers/scsi/fdomain.c spin_lock_irqsave(sh->host_lock, flags); sh 424 drivers/scsi/fdomain.c spin_unlock_irqrestore(sh->host_lock, flags); sh 431 drivers/scsi/fdomain.c struct Scsi_Host *sh = cmd->device->host; sh 432 drivers/scsi/fdomain.c struct fdomain *fd = shost_priv(sh); sh 438 drivers/scsi/fdomain.c spin_lock_irqsave(sh->host_lock, flags); sh 446 drivers/scsi/fdomain.c spin_unlock_irqrestore(sh->host_lock, flags); sh 452 drivers/scsi/fdomain.c struct Scsi_Host *sh = cmd->device->host; sh 453 drivers/scsi/fdomain.c struct fdomain *fd = shost_priv(sh); sh 456 drivers/scsi/fdomain.c spin_lock_irqsave(sh->host_lock, flags); sh 458 drivers/scsi/fdomain.c spin_unlock_irqrestore(sh->host_lock, flags); sh 507 drivers/scsi/fdomain.c struct Scsi_Host *sh; sh 529 drivers/scsi/fdomain.c sh = scsi_host_alloc(&fdomain_template, sizeof(struct fdomain)); sh 530 drivers/scsi/fdomain.c if (!sh) sh 534 drivers/scsi/fdomain.c sh->this_id = this_id & 0x07; sh 536 drivers/scsi/fdomain.c sh->irq = irq; sh 537 drivers/scsi/fdomain.c sh->io_port = base; sh 538 drivers/scsi/fdomain.c sh->n_io_port = FDOMAIN_REGION_SIZE; sh 540 drivers/scsi/fdomain.c fd = shost_priv(sh); sh 551 drivers/scsi/fdomain.c shost_printk(KERN_INFO, sh, "%s chip at 0x%x irq %d SCSI ID %d\n", sh 553 drivers/scsi/fdomain.c base, irq, sh->this_id); sh 555 drivers/scsi/fdomain.c if (scsi_add_host(sh, dev)) sh 558 drivers/scsi/fdomain.c scsi_scan_host(sh); sh 560 drivers/scsi/fdomain.c return sh; sh 565 drivers/scsi/fdomain.c scsi_host_put(sh); sh 570 drivers/scsi/fdomain.c int fdomain_destroy(struct Scsi_Host *sh) sh 572 drivers/scsi/fdomain.c struct fdomain *fd = shost_priv(sh); sh 575 drivers/scsi/fdomain.c scsi_remove_host(sh); sh 576 drivers/scsi/fdomain.c if (sh->irq) sh 577 drivers/scsi/fdomain.c free_irq(sh->irq, fd); sh 578 drivers/scsi/fdomain.c scsi_host_put(sh); sh 114 drivers/scsi/fdomain.h int fdomain_destroy(struct Scsi_Host *sh); sh 88 drivers/scsi/fdomain_isa.c struct Scsi_Host *sh; sh 139 drivers/scsi/fdomain_isa.c sh = fdomain_create(base, irq, this_id, dev); sh 140 drivers/scsi/fdomain_isa.c if (!sh) { sh 145 drivers/scsi/fdomain_isa.c dev_set_drvdata(dev, sh); sh 154 drivers/scsi/fdomain_isa.c struct Scsi_Host *sh; sh 168 drivers/scsi/fdomain_isa.c sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev); sh 169 drivers/scsi/fdomain_isa.c if (!sh) { sh 175 drivers/scsi/fdomain_isa.c dev_set_drvdata(dev, sh); sh 181 drivers/scsi/fdomain_isa.c struct Scsi_Host *sh = dev_get_drvdata(dev); sh 182 drivers/scsi/fdomain_isa.c int base = sh->io_port; sh 184 drivers/scsi/fdomain_isa.c fdomain_destroy(sh); sh 11 drivers/scsi/fdomain_pci.c struct Scsi_Host *sh; sh 25 drivers/scsi/fdomain_pci.c sh = fdomain_create(pci_resource_start(pdev, 0), pdev->irq, 7, sh 27 drivers/scsi/fdomain_pci.c if (!sh) sh 30 drivers/scsi/fdomain_pci.c pci_set_drvdata(pdev, sh); sh 43 drivers/scsi/fdomain_pci.c struct Scsi_Host *sh = pci_get_drvdata(pdev); sh 45 drivers/scsi/fdomain_pci.c fdomain_destroy(sh); sh 277 drivers/scsi/hpsa.c static int hpsa_scan_finished(struct Scsi_Host *sh, sh 337 drivers/scsi/hpsa.c static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) sh 339 drivers/scsi/hpsa.c unsigned long *priv = shost_priv(sh); sh 5625 drivers/scsi/hpsa.c static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd) sh 5697 drivers/scsi/hpsa.c static void hpsa_scan_start(struct Scsi_Host *sh) sh 5699 drivers/scsi/hpsa.c struct ctlr_info *h = shost_to_hba(sh); sh 5775 drivers/scsi/hpsa.c static int hpsa_scan_finished(struct Scsi_Host *sh, sh 5778 drivers/scsi/hpsa.c struct ctlr_info *h = shost_to_hba(sh); sh 5790 drivers/scsi/hpsa.c struct Scsi_Host *sh; sh 5792 drivers/scsi/hpsa.c sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); sh 5793 drivers/scsi/hpsa.c if (sh == NULL) { sh 5798 drivers/scsi/hpsa.c sh->io_port = 0; sh 5799 drivers/scsi/hpsa.c sh->n_io_port = 0; sh 5800 drivers/scsi/hpsa.c sh->this_id = -1; sh 5801 drivers/scsi/hpsa.c sh->max_channel = 3; sh 5802 drivers/scsi/hpsa.c sh->max_cmd_len = MAX_COMMAND_SIZE; sh 5803 drivers/scsi/hpsa.c sh->max_lun = HPSA_MAX_LUN; sh 5804 drivers/scsi/hpsa.c sh->max_id = HPSA_MAX_LUN; sh 5805 drivers/scsi/hpsa.c sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS; sh 5806 drivers/scsi/hpsa.c sh->cmd_per_lun = sh->can_queue; sh 5807 drivers/scsi/hpsa.c sh->sg_tablesize = h->maxsgentries; sh 5808 drivers/scsi/hpsa.c sh->transportt = hpsa_sas_transport_template; sh 5809 drivers/scsi/hpsa.c sh->hostdata[0] = (unsigned long) h; sh 5810 drivers/scsi/hpsa.c sh->irq = pci_irq_vector(h->pdev, 0); sh 5811 drivers/scsi/hpsa.c sh->unique_id = sh->irq; sh 5813 drivers/scsi/hpsa.c h->scsi_host = sh; sh 8472 drivers/scsi/hpsa.c struct Scsi_Host *sh = NULL; sh 8486 drivers/scsi/hpsa.c sh = scsi_host_get(h->scsi_host); sh 8487 drivers/scsi/hpsa.c if (sh != NULL) { sh 8488 drivers/scsi/hpsa.c hpsa_scan_start(sh); sh 8489 drivers/scsi/hpsa.c scsi_host_put(sh); sh 642 drivers/scsi/ips.c ips_release(struct Scsi_Host *sh) sh 650 drivers/scsi/ips.c scsi_remove_host(sh); sh 652 drivers/scsi/ips.c for (i = 0; i < IPS_MAX_ADAPTERS && ips_sh[i] != sh; i++) ; sh 661 drivers/scsi/ips.c ha = IPS_HA(sh); sh 699 drivers/scsi/ips.c scsi_host_put(sh); sh 6655 drivers/scsi/ips.c struct Scsi_Host *sh; sh 6657 drivers/scsi/ips.c sh = scsi_host_alloc(&ips_driver_template, sizeof (ips_ha_t)); sh 6658 drivers/scsi/ips.c if (!sh) { sh 6663 drivers/scsi/ips.c ha = IPS_HA(sh); sh 6676 drivers/scsi/ips.c sh->unique_id = (ha->io_addr) ? ha->io_addr : ha->mem_addr; sh 6677 drivers/scsi/ips.c sh->sg_tablesize = sh->hostt->sg_tablesize; sh 6678 drivers/scsi/ips.c sh->can_queue = sh->hostt->can_queue; sh 6679 drivers/scsi/ips.c sh->cmd_per_lun = sh->hostt->cmd_per_lun; sh 6680 drivers/scsi/ips.c sh->max_sectors = 128; sh 6682 drivers/scsi/ips.c sh->max_id = ha->ntargets; sh 6683 drivers/scsi/ips.c sh->max_lun = ha->nlun; sh 6684 drivers/scsi/ips.c sh->max_channel = ha->nbus - 1; sh 6685 drivers/scsi/ips.c sh->can_queue = ha->max_cmds - 1; sh 6687 drivers/scsi/ips.c if (scsi_add_host(sh, &ha->pcidev->dev)) sh 6690 drivers/scsi/ips.c ips_sh[index] = sh; sh 6693 drivers/scsi/ips.c scsi_scan_host(sh); sh 6700 drivers/scsi/ips.c scsi_host_put(sh); sh 6713 drivers/scsi/ips.c struct Scsi_Host *sh = pci_get_drvdata(pci_dev); sh 6717 drivers/scsi/ips.c ips_release(sh); sh 34 drivers/scsi/pcmcia/fdomain_cs.c struct Scsi_Host *sh; sh 51 drivers/scsi/pcmcia/fdomain_cs.c sh = fdomain_create(link->resource[0]->start, link->irq, 7, &link->dev); sh 52 drivers/scsi/pcmcia/fdomain_cs.c if (!sh) { sh 58 drivers/scsi/pcmcia/fdomain_cs.c link->priv = sh; sh 6548 drivers/scsi/qla2xxx/qla_target.c struct Scsi_Host *sh = vha->host; sh 6557 drivers/scsi/qla2xxx/qla_target.c scsi_host_put(sh); sh 1677 drivers/scsi/qla2xxx/tcm_qla2xxx.c struct Scsi_Host *sh = base_vha->host; sh 1703 drivers/scsi/qla2xxx/tcm_qla2xxx.c vport = fc_vport_create(sh, 0, &vport_id); sh 194 drivers/scsi/virtio_scsi.c struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); sh 195 drivers/scsi/virtio_scsi.c struct virtio_scsi *vscsi = shost_priv(sh); sh 222 drivers/scsi/virtio_scsi.c struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); sh 223 drivers/scsi/virtio_scsi.c struct virtio_scsi *vscsi = shost_priv(sh); sh 399 drivers/scsi/virtio_scsi.c struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); sh 400 drivers/scsi/virtio_scsi.c struct virtio_scsi *vscsi = shost_priv(sh); sh 937 drivers/scsi/virtio_scsi.c struct Scsi_Host *sh = virtio_scsi_host(vdev); sh 938 drivers/scsi/virtio_scsi.c struct virtio_scsi *vscsi = shost_priv(sh); sh 207 drivers/scsi/wd719x.c static int wd719x_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) sh 212 drivers/scsi/wd719x.c struct wd719x *wd = shost_priv(sh); sh 272 drivers/scsi/wd719x.c spin_lock_irqsave(wd->sh->host_lock, flags); sh 276 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 287 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 474 drivers/scsi/wd719x.c spin_lock_irqsave(wd->sh->host_lock, flags); sh 478 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 495 drivers/scsi/wd719x.c spin_lock_irqsave(wd->sh->host_lock, flags); sh 504 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 528 drivers/scsi/wd719x.c spin_lock_irqsave(wd->sh->host_lock, flags); sh 539 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 663 drivers/scsi/wd719x.c spin_lock_irqsave(wd->sh->host_lock, flags); sh 671 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 708 drivers/scsi/wd719x.c spin_unlock_irqrestore(wd->sh->host_lock, flags); sh 808 drivers/scsi/wd719x.c static int wd719x_board_found(struct Scsi_Host *sh) sh 810 drivers/scsi/wd719x.c struct wd719x *wd = shost_priv(sh); sh 818 drivers/scsi/wd719x.c sh->base = pci_resource_start(wd->pdev, 0); sh 822 drivers/scsi/wd719x.c wd->sh = sh; sh 823 drivers/scsi/wd719x.c sh->irq = wd->pdev->irq; sh 860 drivers/scsi/wd719x.c sh->this_id = wd->params->own_scsi_id & WD719X_EE_SCSI_ID_MASK; sh 863 drivers/scsi/wd719x.c card_types[wd->type], sh->base, sh->irq, sh->this_id); sh 898 drivers/scsi/wd719x.c struct Scsi_Host *sh; sh 920 drivers/scsi/wd719x.c sh = scsi_host_alloc(&wd719x_template, sizeof(struct wd719x)); sh 921 drivers/scsi/wd719x.c if (!sh) sh 924 drivers/scsi/wd719x.c wd = shost_priv(sh); sh 930 drivers/scsi/wd719x.c err = wd719x_board_found(sh); sh 934 drivers/scsi/wd719x.c err = scsi_add_host(sh, &wd->pdev->dev); sh 938 drivers/scsi/wd719x.c scsi_scan_host(sh); sh 940 drivers/scsi/wd719x.c pci_set_drvdata(pdev, sh); sh 948 drivers/scsi/wd719x.c scsi_host_put(sh); sh 960 drivers/scsi/wd719x.c struct Scsi_Host *sh = pci_get_drvdata(pdev); sh 961 drivers/scsi/wd719x.c struct wd719x *wd = shost_priv(sh); sh 963 drivers/scsi/wd719x.c scsi_remove_host(sh); sh 969 drivers/scsi/wd719x.c scsi_host_put(sh); sh 65 drivers/scsi/wd719x.h struct Scsi_Host *sh; /* pointer to host structure */ sh 167 drivers/target/loopback/tcm_loop.c static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc) sh 321 drivers/target/loopback/tcm_loop.c struct Scsi_Host *sh; sh 326 drivers/target/loopback/tcm_loop.c sh = scsi_host_alloc(&tcm_loop_driver_template, sh 328 drivers/target/loopback/tcm_loop.c if (!sh) { sh 332 drivers/target/loopback/tcm_loop.c tl_hba->sh = sh; sh 337 drivers/target/loopback/tcm_loop.c *((struct tcm_loop_hba **)sh->hostdata) = tl_hba; sh 341 drivers/target/loopback/tcm_loop.c sh->max_id = 2; sh 342 drivers/target/loopback/tcm_loop.c sh->max_lun = 0; sh 343 drivers/target/loopback/tcm_loop.c sh->max_channel = 0; sh 344 drivers/target/loopback/tcm_loop.c sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE; sh 350 drivers/target/loopback/tcm_loop.c scsi_host_set_prot(sh, host_prot); sh 351 drivers/target/loopback/tcm_loop.c scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC); sh 353 drivers/target/loopback/tcm_loop.c error = scsi_add_host(sh, &tl_hba->dev); sh 356 drivers/target/loopback/tcm_loop.c scsi_host_put(sh); sh 365 drivers/target/loopback/tcm_loop.c struct Scsi_Host *sh; sh 368 drivers/target/loopback/tcm_loop.c sh = tl_hba->sh; sh 370 drivers/target/loopback/tcm_loop.c scsi_remove_host(sh); sh 371 drivers/target/loopback/tcm_loop.c scsi_host_put(sh); sh 638 drivers/target/loopback/tcm_loop.c scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun); sh 655 drivers/target/loopback/tcm_loop.c sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt, sh 942 drivers/target/loopback/tcm_loop.c tl_hba->sh->host_no, tl_tpg->tl_tpgt); sh 1033 drivers/target/loopback/tcm_loop.c struct Scsi_Host *sh; sh 1083 drivers/target/loopback/tcm_loop.c sh = tl_hba->sh; sh 1086 drivers/target/loopback/tcm_loop.c tcm_loop_dump_proto_id(tl_hba), name, sh->host_no); sh 1101 drivers/target/loopback/tcm_loop.c tl_hba->sh->host_no); sh 52 drivers/target/loopback/tcm_loop.h struct Scsi_Host *sh; sh 98 drivers/target/target_core_pscsi.c struct Scsi_Host *sh = phv->phv_lld_host; sh 103 drivers/target/target_core_pscsi.c if (!sh) sh 110 drivers/target/target_core_pscsi.c " %s\n", hba->hba_id, (sh->hostt->name) ? sh 111 drivers/target/target_core_pscsi.c (sh->hostt->name) : "Unknown"); sh 113 drivers/target/target_core_pscsi.c scsi_host_put(sh); sh 120 drivers/target/target_core_pscsi.c sh = scsi_host_lookup(phv->phv_host_id); sh 121 drivers/target/target_core_pscsi.c if (!sh) { sh 127 drivers/target/target_core_pscsi.c phv->phv_lld_host = sh; sh 131 drivers/target/target_core_pscsi.c hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown"); sh 355 drivers/target/target_core_pscsi.c __releases(sh->host_lock) sh 359 drivers/target/target_core_pscsi.c struct Scsi_Host *sh = sd->host; sh 365 drivers/target/target_core_pscsi.c sh->host_no, sd->channel, sd->id, sd->lun); sh 366 drivers/target/target_core_pscsi.c spin_unlock_irq(sh->host_lock); sh 369 drivers/target/target_core_pscsi.c spin_unlock_irq(sh->host_lock); sh 392 drivers/target/target_core_pscsi.c sh->host_no, sd->channel, sd->id, sd->lun); sh 400 drivers/target/target_core_pscsi.c __releases(sh->host_lock) sh 403 drivers/target/target_core_pscsi.c struct Scsi_Host *sh = sd->host; sh 408 drivers/target/target_core_pscsi.c sh->host_no, sd->channel, sd->id, sd->lun); sh 409 drivers/target/target_core_pscsi.c spin_unlock_irq(sh->host_lock); sh 412 drivers/target/target_core_pscsi.c spin_unlock_irq(sh->host_lock); sh 420 drivers/target/target_core_pscsi.c phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, sh 432 drivers/target/target_core_pscsi.c struct Scsi_Host *sh = phv->phv_lld_host; sh 448 drivers/target/target_core_pscsi.c if (!sh) { sh 480 drivers/target/target_core_pscsi.c sh = phv->phv_lld_host; sh 482 drivers/target/target_core_pscsi.c sh = scsi_host_lookup(pdv->pdv_host_id); sh 483 drivers/target/target_core_pscsi.c if (!sh) { sh 488 drivers/target/target_core_pscsi.c pdv->pdv_lld_host = sh; sh 498 drivers/target/target_core_pscsi.c spin_lock_irq(sh->host_lock); sh 499 drivers/target/target_core_pscsi.c list_for_each_entry(sd, &sh->__devices, siblings) { sh 521 drivers/target/target_core_pscsi.c scsi_host_put(sh); sh 531 drivers/target/target_core_pscsi.c spin_unlock_irq(sh->host_lock); sh 533 drivers/target/target_core_pscsi.c pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no, sh 537 drivers/target/target_core_pscsi.c scsi_host_put(sh); sh 986 drivers/usb/gadget/function/uvc_configfs.c struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \ sh 989 drivers/usb/gadget/function/uvc_configfs.c struct mutex *su_mutex = &sh->item.ci_group->cg_subsys->su_mutex;\ sh 994 drivers/usb/gadget/function/uvc_configfs.c opts_item = sh->item.ci_parent->ci_parent->ci_parent; \ sh 998 drivers/usb/gadget/function/uvc_configfs.c result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\ sh 132 drivers/usb/storage/sierra_ms.c struct Scsi_Host *sh; sh 138 drivers/usb/storage/sierra_ms.c sh = us_to_host(us); sh 139 drivers/usb/storage/sierra_ms.c scsi_get_host_dev(sh); sh 9 drivers/video/fbdev/mb862xx/mb862xxfb.h unsigned short sh; sh 329 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { sh 332 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack((l1_cfg->sh << 11) / l1_cfg->dh, sh 337 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c (l1_cfg->sh <= l1_cfg->dh)) { sh 340 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack((l1_cfg->sh << 11) / l1_cfg->dh, sh 343 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack(l1_cfg->sw >> 1, l1_cfg->sh)); sh 368 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c pack(l1_cfg->sh, l1_cfg->sw)); sh 526 drivers/video/fbdev/mb862xx/mb862xxfbdrv.c par->l1_cfg.sh = 576; sh 172 drivers/video/fbdev/pxa3xx-gcu.c struct pxa3xx_gcu_shared *sh = priv->shared; sh 179 drivers/video/fbdev/pxa3xx-gcu.c sh->hw_running ? "running" : "idle ", sh 146 drivers/xen/events/events_2l.c struct shared_info *sh, sh 149 drivers/xen/events/events_2l.c return sh->evtchn_pending[idx] & sh 151 drivers/xen/events/events_2l.c ~sh->evtchn_mask[idx]; sh 267 drivers/xen/events/events_2l.c struct shared_info *sh = HYPERVISOR_shared_info; sh 293 drivers/xen/events/events_2l.c for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) sh 295 drivers/xen/events/events_2l.c (int)sizeof(sh->evtchn_pending[0])*2, sh 296 drivers/xen/events/events_2l.c sh->evtchn_pending[i], sh 299 drivers/xen/events/events_2l.c for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) sh 301 drivers/xen/events/events_2l.c (int)(sizeof(sh->evtchn_mask[0])*2), sh 302 drivers/xen/events/events_2l.c sh->evtchn_mask[i], sh 306 drivers/xen/events/events_2l.c for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) sh 308 drivers/xen/events/events_2l.c (int)(sizeof(sh->evtchn_mask[0])*2), sh 309 drivers/xen/events/events_2l.c sh->evtchn_pending[i] & ~sh->evtchn_mask[i], sh 319 drivers/xen/events/events_2l.c for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) { sh 320 drivers/xen/events/events_2l.c xen_ulong_t pending = sh->evtchn_pending[i] sh 321 drivers/xen/events/events_2l.c & ~sh->evtchn_mask[i] sh 324 drivers/xen/events/events_2l.c (int)(sizeof(sh->evtchn_mask[0])*2), sh 330 drivers/xen/events/events_2l.c if (sync_test_bit(i, BM(sh->evtchn_pending))) { sh 337 drivers/xen/events/events_2l.c !sync_test_bit(i, BM(sh->evtchn_mask)) sh 2040 fs/btrfs/ioctl.c struct btrfs_ioctl_search_header sh; sh 2067 fs/btrfs/ioctl.c if (sizeof(sh) + item_len > *buf_size) { sh 2078 fs/btrfs/ioctl.c *buf_size = sizeof(sh) + item_len; sh 2083 fs/btrfs/ioctl.c if (sizeof(sh) + item_len + *sk_offset > *buf_size) { sh 2088 fs/btrfs/ioctl.c sh.objectid = key->objectid; sh 2089 fs/btrfs/ioctl.c sh.offset = key->offset; sh 2090 fs/btrfs/ioctl.c sh.type = key->type; sh 2091 fs/btrfs/ioctl.c sh.len = item_len; sh 2092 fs/btrfs/ioctl.c sh.transid = found_transid; sh 2095 fs/btrfs/ioctl.c if (copy_to_user(ubuf + *sk_offset, &sh, sizeof(sh))) { sh 2100 fs/btrfs/ioctl.c *sk_offset += sizeof(sh); sh 41 include/linux/reciprocal_div.h u8 sh, exp; sh 54 include/net/sctp/checksum.h struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); sh 55 include/net/sctp/checksum.h __le32 old = sh->checksum; sh 58 include/net/sctp/checksum.h sh->checksum = 0; sh 61 include/net/sctp/checksum.h sh->checksum = old; sh 586 include/video/pm3fb.h #define PM3FBDestReadMode_StripeHeight(sh) (((sh) & 0x7) << 7) sh 619 include/video/pm3fb.h #define PM3FBSourceReadMode_StripeHeight(sh) (((sh) & 0x7) << 7) sh 203 lib/digsig.c struct signature_hdr *sh = (struct signature_hdr *)sig; sh 209 lib/digsig.c if (siglen < sizeof(*sh) + 2) sh 212 lib/digsig.c if (sh->algo != PUBKEY_ALGO_RSA) sh 215 lib/digsig.c sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); sh 243 lib/digsig.c crypto_shash_update(desc, sig, sizeof(*sh)); sh 249 lib/digsig.c err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), sh 63 lib/math/reciprocal_div.c R.sh = post_shift; sh 114 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 117 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 123 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 126 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 176 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 179 lib/mpi/longlong.h : "=r" (sh), \ sh 185 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 188 lib/mpi/longlong.h : "=r" (sh), \ sh 263 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 266 lib/mpi/longlong.h : "=g" ((USItype)(sh)), \ sh 272 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 275 lib/mpi/longlong.h : "=g" ((USItype)(sh)), \ sh 300 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 303 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 309 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 312 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 397 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 400 lib/mpi/longlong.h : "=r" (sh), \ sh 406 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 409 lib/mpi/longlong.h : "=r" (sh), \ sh 450 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 454 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 460 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 464 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 515 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 518 lib/mpi/longlong.h : "=d" ((USItype)(sh)), \ sh 524 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 527 lib/mpi/longlong.h : "=d" ((USItype)(sh)), \ sh 594 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 597 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 603 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 606 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 721 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 725 lib/mpi/longlong.h : "=r" (sh), \ sh 732 lib/mpi/longlong.h : "=r" (sh), \ sh 739 lib/mpi/longlong.h : "=r" (sh), \ sh 746 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 750 lib/mpi/longlong.h : "=r" (sh), \ sh 757 lib/mpi/longlong.h : "=r" (sh), \ sh 764 lib/mpi/longlong.h : "=r" (sh), \ sh 771 lib/mpi/longlong.h : "=r" (sh), \ sh 778 lib/mpi/longlong.h : "=r" (sh), \ sh 839 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 842 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 848 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 851 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 874 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 877 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 883 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 886 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 950 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 953 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 960 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 963 lib/mpi/longlong.h : "=r" ((USItype)(sh)), \ sh 1145 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 1148 lib/mpi/longlong.h : "=g" ((USItype)(sh)), \ sh 1154 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 1157 lib/mpi/longlong.h : "=g" ((USItype)(sh)), \ sh 1193 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 1195 lib/mpi/longlong.h : "=r" ((unsigned int)(sh)), \ sh 1201 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 1203 lib/mpi/longlong.h : "=r" ((unsigned int)(sh)), \ sh 1250 lib/mpi/longlong.h #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ sh 1254 lib/mpi/longlong.h (sh) = (ah) + (bh) + (__x < (al)); \ sh 1260 lib/mpi/longlong.h #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ sh 1264 lib/mpi/longlong.h (sh) = (ah) - (bh) - (__x > (al)); \ sh 721 net/core/skbuff.c struct skb_shared_info *sh = skb_shinfo(skb); sh 754 net/core/skbuff.c sh->tx_flags, sh->nr_frags, sh 755 net/core/skbuff.c sh->gso_size, sh->gso_type, sh->gso_segs, sh 43 net/netfilter/ipset/ip_set_getport.c const struct sctphdr *sh; sh 45 net/netfilter/ipset/ip_set_getport.c sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh); sh 46 net/netfilter/ipset/ip_set_getport.c if (!sh) sh 50 net/netfilter/ipset/ip_set_getport.c *port = src ? sh->source : sh->dest; sh 23 net/netfilter/ipvs/ip_vs_proto_sctp.c struct sctphdr *sh, _sctph; sh 27 net/netfilter/ipvs/ip_vs_proto_sctp.c sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph); sh 28 net/netfilter/ipvs/ip_vs_proto_sctp.c if (sh) { sh 36 net/netfilter/ipvs/ip_vs_proto_sctp.c ports = &sh->source; sh 189 net/netfilter/ipvs/ip_vs_proto_sctp.c struct sctphdr *sh; sh 199 net/netfilter/ipvs/ip_vs_proto_sctp.c sh = (struct sctphdr *)(skb->data + sctphoff); sh 200 net/netfilter/ipvs/ip_vs_proto_sctp.c cmp = sh->checksum; sh 271 net/netfilter/nf_conntrack_proto_sctp.c const struct sctphdr *sh, unsigned int dataoff) sh 295 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag) sh 309 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag); sh 310 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; sh 315 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag); sh 316 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag; sh 329 net/netfilter/nf_conntrack_proto_sctp.c const struct sctphdr *sh; sh 339 net/netfilter/nf_conntrack_proto_sctp.c if (skb_ensure_writable(skb, dataoff + sizeof(*sh))) { sh 343 net/netfilter/nf_conntrack_proto_sctp.c sh = (const struct sctphdr *)(skb->data + dataoff); sh 344 net/netfilter/nf_conntrack_proto_sctp.c if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { sh 365 net/netfilter/nf_conntrack_proto_sctp.c const struct sctphdr *sh; sh 376 net/netfilter/nf_conntrack_proto_sctp.c sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); sh 377 net/netfilter/nf_conntrack_proto_sctp.c if (sh == NULL) sh 390 net/netfilter/nf_conntrack_proto_sctp.c if (!sctp_new(ct, skb, sh, dataoff)) sh 402 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[dir]) { sh 413 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != 0) sh 417 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir] && sh 418 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[!dir]) sh 422 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir] && sh 423 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag != ct->proto.sctp.vtag[!dir] && sh 428 net/netfilter/nf_conntrack_proto_sctp.c if (sh->vtag != ct->proto.sctp.vtag[dir]) sh 434 net/netfilter/nf_conntrack_proto_sctp.c sh->vtag, dir); sh 435 net/netfilter/nf_conntrack_proto_sctp.c ct->proto.sctp.vtag[dir] = sh->vtag; sh 436 net/netfilter/nf_conntrack_proto_sctp.c } else if (sh->vtag != ct->proto.sctp.vtag[dir]) { sh 122 net/netfilter/xt_sctp.c const struct sctphdr *sh; sh 130 net/netfilter/xt_sctp.c sh = skb_header_pointer(skb, par->thoff, sizeof(_sh), &_sh); sh 131 net/netfilter/xt_sctp.c if (sh == NULL) { sh 136 net/netfilter/xt_sctp.c pr_debug("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest)); sh 138 net/netfilter/xt_sctp.c return SCCHECK(ntohs(sh->source) >= info->spts[0] sh 139 net/netfilter/xt_sctp.c && ntohs(sh->source) <= info->spts[1], sh 141 net/netfilter/xt_sctp.c SCCHECK(ntohs(sh->dest) >= info->dpts[0] sh 142 net/netfilter/xt_sctp.c && ntohs(sh->dest) <= info->dpts[1], sh 714 net/openvswitch/actions.c struct sctphdr *sh; sh 722 net/openvswitch/actions.c sh = sctp_hdr(skb); sh 723 net/openvswitch/actions.c old_csum = sh->checksum; sh 726 net/openvswitch/actions.c sh->source = OVS_MASKED(sh->source, key->sctp_src, mask->sctp_src); sh 727 net/openvswitch/actions.c sh->dest = OVS_MASKED(sh->dest, key->sctp_dst, mask->sctp_dst); sh 732 net/openvswitch/actions.c sh->checksum = old_csum ^ old_correct_csum ^ new_csum; sh 735 net/openvswitch/actions.c flow_key->tp.src = sh->source; sh 736 net/openvswitch/actions.c flow_key->tp.dst = sh->dest; sh 70 net/sctp/input.c struct sctphdr *sh = sctp_hdr(skb); sh 71 net/sctp/input.c __le32 cmp = sh->checksum; sh 1103 net/sctp/input.c struct sctphdr *sh = sctp_hdr(skb); sh 1134 net/sctp/input.c af->from_addr_param(paddr, params.addr, sh->source, 0); sh 481 net/sctp/ipv6.c struct sctphdr *sh = sctp_hdr(skb); sh 489 net/sctp/ipv6.c sa->sin6_port = sh->source; sh 492 net/sctp/ipv6.c sa->sin6_port = sh->dest; sh 842 net/sctp/ipv6.c struct sctphdr *sh; sh 848 net/sctp/ipv6.c sh = sctp_hdr(skb); sh 852 net/sctp/ipv6.c addr->v4.sin_port = sh->source; sh 857 net/sctp/ipv6.c addr->v6.sin6_port = sh->source; sh 38 net/sctp/offload.c struct sctphdr *sh; sh 43 net/sctp/offload.c sh = sctp_hdr(skb); sh 44 net/sctp/offload.c if (!pskb_may_pull(skb, sizeof(*sh))) sh 47 net/sctp/offload.c __skb_pull(skb, sizeof(*sh)); sh 75 net/sctp/offload.c sh = sctp_hdr(skb); sh 76 net/sctp/offload.c sh->checksum = sctp_gso_make_checksum(skb); sh 525 net/sctp/output.c struct sctphdr *sh = sh 528 net/sctp/output.c sh->checksum = sctp_compute_cksum(head, 0); sh 553 net/sctp/output.c struct sctphdr *sh; sh 580 net/sctp/output.c sh = skb_push(head, sizeof(struct sctphdr)); sh 582 net/sctp/output.c sh->source = htons(packet->source_port); sh 583 net/sctp/output.c sh->dest = htons(packet->destination_port); sh 584 net/sctp/output.c sh->vtag = htonl(packet->vtag); sh 585 net/sctp/output.c sh->checksum = 0; sh 218 net/sctp/protocol.c struct sctphdr *sh = sctp_hdr(skb); sh 224 net/sctp/protocol.c sa->sin_port = sh->source; sh 227 net/sctp/protocol.c sa->sin_port = sh->dest; sh 913 net/sctp/protocol.c struct sctphdr *sh = sctp_hdr(skb); sh 917 net/sctp/protocol.c sin->sin_port = sh->source; sh 564 net/wireless/util.c struct skb_shared_info *sh = skb_shinfo(skb); sh 569 net/wireless/util.c skb_add_rx_frag(skb, sh->nr_frags, page, page_offset, len, size); sh 576 net/wireless/util.c struct skb_shared_info *sh = skb_shinfo(skb); sh 577 net/wireless/util.c const skb_frag_t *frag = &sh->frags[0]; sh 1233 net/xfrm/xfrm_user.c struct xfrmu_sadhinfo sh; sh 1246 net/xfrm/xfrm_user.c sh.sadhmcnt = si.sadhmcnt; sh 1247 net/xfrm/xfrm_user.c sh.sadhcnt = si.sadhcnt; sh 1251 net/xfrm/xfrm_user.c err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); sh 87 security/lsm_audit.c struct sctphdr *sh = sctp_hdr(skb); sh 88 security/lsm_audit.c if (sh == NULL) sh 90 security/lsm_audit.c ad->u.net->sport = sh->source; sh 91 security/lsm_audit.c ad->u.net->dport = sh->dest; sh 167 security/lsm_audit.c struct sctphdr _sctph, *sh; sh 169 security/lsm_audit.c sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); sh 170 security/lsm_audit.c if (sh == NULL) sh 172 security/lsm_audit.c ad->u.net->sport = sh->source; sh 173 security/lsm_audit.c ad->u.net->dport = sh->dest; sh 4248 security/selinux/hooks.c struct sctphdr _sctph, *sh; sh 4254 security/selinux/hooks.c sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); sh 4255 security/selinux/hooks.c if (sh == NULL) sh 4258 security/selinux/hooks.c ad->u.net->sport = sh->source; sh 4259 security/selinux/hooks.c ad->u.net->dport = sh->dest; sh 4338 security/selinux/hooks.c struct sctphdr _sctph, *sh; sh 4340 security/selinux/hooks.c sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); sh 4341 security/selinux/hooks.c if (sh == NULL) sh 4344 security/selinux/hooks.c ad->u.net->sport = sh->source; sh 4345 security/selinux/hooks.c ad->u.net->dport = sh->dest; sh 424 tools/lib/bpf/btf.c GElf_Shdr sh; sh 428 tools/lib/bpf/btf.c if (gelf_getshdr(scn, &sh) != &sh) { sh 433 tools/lib/bpf/btf.c name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name); sh 683 tools/lib/bpf/libbpf.c GElf_Shdr sh; sh 686 tools/lib/bpf/libbpf.c if (gelf_getshdr(scn, &sh) != &sh) { sh 692 tools/lib/bpf/libbpf.c sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); sh 1347 tools/lib/bpf/libbpf.c GElf_Shdr sh; sh 1353 tools/lib/bpf/libbpf.c if (gelf_getshdr(scn, &sh) != &sh) sh 1356 tools/lib/bpf/libbpf.c if (sh.sh_flags & SHF_EXECINSTR) sh 1536 tools/lib/bpf/libbpf.c GElf_Shdr sh; sh 1540 tools/lib/bpf/libbpf.c if (gelf_getshdr(scn, &sh) != &sh) { sh 1546 tools/lib/bpf/libbpf.c name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name); sh 1561 tools/lib/bpf/libbpf.c (int)sh.sh_link, (unsigned long)sh.sh_flags, sh 1562 tools/lib/bpf/libbpf.c (int)sh.sh_type); sh 1584 tools/lib/bpf/libbpf.c } else if (sh.sh_type == SHT_SYMTAB) { sh 1591 tools/lib/bpf/libbpf.c obj->efile.strtabidx = sh.sh_link; sh 1592 tools/lib/bpf/libbpf.c } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) { sh 1593 tools/lib/bpf/libbpf.c if (sh.sh_flags & SHF_EXECINSTR) { sh 1616 tools/lib/bpf/libbpf.c } else if (sh.sh_type == SHT_REL) { sh 1619 tools/lib/bpf/libbpf.c int sec = sh.sh_info; /* points to other section */ sh 1638 tools/lib/bpf/libbpf.c obj->efile.reloc[nr_reloc].shdr = sh; sh 1640 tools/lib/bpf/libbpf.c } else if (sh.sh_type == SHT_NOBITS && strcmp(name, ".bss") == 0) { sh 240 tools/objtool/check.c if (!(sec->sh.sh_flags & SHF_EXECINSTR)) sh 171 tools/objtool/elf.c if (!gelf_getshdr(s, &sec->sh)) { sh 176 tools/objtool/elf.c sec->name = elf_strptr(elf->elf, shstrndx, sec->sh.sh_name); sh 182 tools/objtool/elf.c if (sec->sh.sh_size != 0) { sh 189 tools/objtool/elf.c sec->data->d_size != sec->sh.sh_size) { sh 195 tools/objtool/elf.c sec->len = sec->sh.sh_size; sh 221 tools/objtool/elf.c symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize; sh 239 tools/objtool/elf.c sym->name = elf_strptr(elf->elf, symtab->sh.sh_link, sh 358 tools/objtool/elf.c if (sec->sh.sh_type != SHT_RELA) sh 370 tools/objtool/elf.c for (i = 0; i < sec->sh.sh_size / sec->sh.sh_entsize; i++) { sh 517 tools/objtool/elf.c if (!gelf_getshdr(s, &sec->sh)) { sh 522 tools/objtool/elf.c sec->sh.sh_size = size; sh 523 tools/objtool/elf.c sec->sh.sh_entsize = entsize; sh 524 tools/objtool/elf.c sec->sh.sh_type = SHT_PROGBITS; sh 525 tools/objtool/elf.c sec->sh.sh_addralign = 1; sh 526 tools/objtool/elf.c sec->sh.sh_flags = SHF_ALLOC; sh 554 tools/objtool/elf.c sec->sh.sh_name = shstrtab->len; sh 583 tools/objtool/elf.c sec->sh.sh_type = SHT_RELA; sh 584 tools/objtool/elf.c sec->sh.sh_addralign = 8; sh 585 tools/objtool/elf.c sec->sh.sh_link = find_section_by_name(elf, ".symtab")->idx; sh 586 tools/objtool/elf.c sec->sh.sh_info = base->idx; sh 587 tools/objtool/elf.c sec->sh.sh_flags = SHF_INFO_LINK; sh 612 tools/objtool/elf.c sec->sh.sh_size = size; sh 638 tools/objtool/elf.c if (!gelf_update_shdr(s, &sec->sh)) { sh 28 tools/objtool/elf.h GElf_Shdr sh; sh 72 tools/objtool/orc_dump.c GElf_Shdr sh; sh 111 tools/objtool/orc_dump.c if (!gelf_getshdr(scn, &sh)) { sh 116 tools/objtool/orc_dump.c name = elf_strptr(elf, shstrtab_idx, sh.sh_name); sh 134 tools/objtool/orc_dump.c orc_size = sh.sh_size; sh 137 tools/objtool/orc_dump.c orc_ip_addr = sh.sh_addr; sh 171 tools/objtool/orc_dump.c if (!gelf_getshdr(scn, &sh)) { sh 176 tools/objtool/orc_dump.c name = elf_strptr(elf, shstrtab_idx, sh.sh_name);