ic 246 arch/arc/include/asm/arcregs.h unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8; ic 248 arch/arc/include/asm/arcregs.h unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5; ic 384 arch/arc/kernel/setup.c IS_AVAIL3(erp.ic, !ctl.dpi, "IC "), ic 1221 arch/arc/mm/cache.c struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; ic 1223 arch/arc/mm/cache.c if (!ic->line_len) ic 1226 arch/arc/mm/cache.c if (ic->line_len != L1_CACHE_BYTES) ic 1228 arch/arc/mm/cache.c ic->line_len, L1_CACHE_BYTES); ic 1234 arch/arc/mm/cache.c if (is_isa_arcv2() && ic->alias) ic 426 arch/arm64/include/asm/assembler.h USER(\label, ic ivau, \tmp2) // invalidate I line PoU ic 76 arch/ia64/include/asm/native/inst.h ssm psr.ic | PSR_DEFAULT_BITS \ ic 82 arch/ia64/include/asm/native/inst.h ssm psr.ic \ ic 87 arch/ia64/include/asm/native/inst.h rsm psr.ic ic 96 arch/ia64/include/asm/native/inst.h rsm psr.i | psr.ic ic 502 arch/ia64/include/asm/pal.h ic : 1, /* Failure in icache */ ic 718 arch/ia64/include/asm/pal.h #define pmci_cache_instr_cache_fail pme_cache.ic ic 93 arch/ia64/include/asm/processor.h __u64 ic : 1; ic 902 arch/ia64/kernel/mca.c if (ia64_psr(regs)->ic) { ic 252 arch/parisc/include/asm/pdcpat.h u64 ic:1; /* interleaving had to be changed ? */ ic 502 arch/powerpc/include/asm/hvcall.h u64 ic; ic 572 arch/powerpc/include/asm/kvm_host.h ulong ic; ic 321 arch/powerpc/include/asm/rtas.h struct { __be32 count, index; } ic; ic 522 arch/powerpc/kernel/asm-offsets.c OFFSET(VCPU_IC, kvm_vcpu, arch.ic); ic 679 arch/powerpc/kvm/book3s.c *val = get_reg_val(id, vcpu->arch.ic); ic 764 arch/powerpc/kvm/book3s.c vcpu->arch.ic = set_reg_val(id, *val); ic 935 arch/powerpc/kvm/book3s_emulate.c *spr_val = vcpu->arch.ic; ic 3432 arch/powerpc/kvm/book3s_hv.c mtspr(SPRN_IC, vcpu->arch.ic); ic 3467 arch/powerpc/kvm/book3s_hv.c vcpu->arch.ic = mfspr(SPRN_IC); ic 41 arch/powerpc/kvm/book3s_hv_nested.c hr->ic = vcpu->arch.ic; ic 79 arch/powerpc/kvm/book3s_hv_nested.c hr->ic = swab64(hr->ic); ic 105 arch/powerpc/kvm/book3s_hv_nested.c hr->ic = vcpu->arch.ic; ic 159 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic = hr->ic; ic 181 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic = hr->ic; ic 302 arch/powerpc/kvm/book3s_hv_nested.c delta_ic = vcpu->arch.ic - l2_hv.ic; ic 317 arch/powerpc/kvm/book3s_hv_nested.c vcpu->arch.ic += delta_ic; ic 268 arch/powerpc/kvm/book3s_pr.c vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; ic 110 arch/powerpc/platforms/cell/spider-pic.c u32 ic; ic 120 arch/powerpc/platforms/cell/spider-pic.c ic = 0x3; ic 123 arch/powerpc/platforms/cell/spider-pic.c ic = 0x2; ic 126 arch/powerpc/platforms/cell/spider-pic.c ic = 0x0; ic 130 arch/powerpc/platforms/cell/spider-pic.c ic = 0x1; ic 143 arch/powerpc/platforms/cell/spider-pic.c out_be32(cfg, old_mask | (ic << 24) | (0x7 << 16) | ic 144 arch/powerpc/platforms/powermac/setup.c const unsigned int *ic = ic 153 arch/powerpc/platforms/powermac/setup.c if (ic) ic 154 arch/powerpc/platforms/powermac/setup.c seq_printf(m, " %dK instruction", *ic / 1024); ic 157 arch/powerpc/platforms/powermac/setup.c (ic? " +": ""), *dc / 1024); ic 268 arch/powerpc/platforms/powermac/setup.c struct device_node *cpu, *ic; ic 296 arch/powerpc/platforms/powermac/setup.c ic = of_find_node_with_property(NULL, "interrupt-controller"); ic 297 arch/powerpc/platforms/powermac/setup.c if (ic) { ic 299 arch/powerpc/platforms/powermac/setup.c of_node_put(ic); ic 348 arch/powerpc/platforms/pseries/dlpar.c hp_elog->_drc_u.ic.count = ic 349 arch/powerpc/platforms/pseries/dlpar.c be32_to_cpu(hp_elog->_drc_u.ic.count); ic 350 arch/powerpc/platforms/pseries/dlpar.c hp_elog->_drc_u.ic.index = ic 351 arch/powerpc/platforms/pseries/dlpar.c be32_to_cpu(hp_elog->_drc_u.ic.index); ic 477 arch/powerpc/platforms/pseries/dlpar.c hp_elog->_drc_u.ic.count = cpu_to_be32(count); ic 478 arch/powerpc/platforms/pseries/dlpar.c hp_elog->_drc_u.ic.index = cpu_to_be32(index); ic 895 arch/powerpc/platforms/pseries/hotplug-memory.c count = hp_elog->_drc_u.ic.count; ic 896 arch/powerpc/platforms/pseries/hotplug-memory.c drc_index = hp_elog->_drc_u.ic.index; ic 916 arch/powerpc/platforms/pseries/hotplug-memory.c count = hp_elog->_drc_u.ic.count; ic 917 arch/powerpc/platforms/pseries/hotplug-memory.c drc_index = hp_elog->_drc_u.ic.index; ic 53 arch/s390/include/uapi/asm/runtime_instr.h __u32 ic : 4; ic 279 arch/s390/kvm/gaccess.c union ipte_control old, new, *ic; ic 287 arch/s390/kvm/gaccess.c ic = kvm_s390_get_ipte_control(vcpu->kvm); ic 289 arch/s390/kvm/gaccess.c old = READ_ONCE(*ic); ic 297 arch/s390/kvm/gaccess.c } while (cmpxchg(&ic->val, old.val, new.val) != old.val); ic 305 arch/s390/kvm/gaccess.c union ipte_control old, new, *ic; ic 312 arch/s390/kvm/gaccess.c ic = kvm_s390_get_ipte_control(vcpu->kvm); ic 314 arch/s390/kvm/gaccess.c old = READ_ONCE(*ic); ic 317 arch/s390/kvm/gaccess.c } while (cmpxchg(&ic->val, old.val, new.val) != old.val); ic 326 arch/s390/kvm/gaccess.c union ipte_control old, new, *ic; ic 330 arch/s390/kvm/gaccess.c ic = kvm_s390_get_ipte_control(vcpu->kvm); ic 332 arch/s390/kvm/gaccess.c old = READ_ONCE(*ic); ic 341 arch/s390/kvm/gaccess.c } while (cmpxchg(&ic->val, old.val, new.val) != old.val); ic 347 arch/s390/kvm/gaccess.c union ipte_control old, new, *ic; ic 350 arch/s390/kvm/gaccess.c ic = kvm_s390_get_ipte_control(vcpu->kvm); ic 352 arch/s390/kvm/gaccess.c old = READ_ONCE(*ic); ic 357 arch/s390/kvm/gaccess.c } while (cmpxchg(&ic->val, old.val, new.val) != old.val); ic 33 drivers/base/attribute_container.c struct internal_container *ic = ic 35 drivers/base/attribute_container.c get_device(&ic->classdev); ic 40 drivers/base/attribute_container.c struct internal_container *ic = ic 42 drivers/base/attribute_container.c put_device(&ic->classdev); ic 56 drivers/base/attribute_container.c struct internal_container *ic = ic 58 drivers/base/attribute_container.c return ic->cont; ic 114 drivers/base/attribute_container.c struct internal_container *ic ic 118 drivers/base/attribute_container.c kfree(ic); ic 150 drivers/base/attribute_container.c struct internal_container *ic; ic 158 drivers/base/attribute_container.c ic = kzalloc(sizeof(*ic), GFP_KERNEL); ic 159 drivers/base/attribute_container.c if (!ic) { ic 164 drivers/base/attribute_container.c ic->cont = cont; ic 165 drivers/base/attribute_container.c device_initialize(&ic->classdev); ic 166 drivers/base/attribute_container.c ic->classdev.parent = get_device(dev); ic 167 drivers/base/attribute_container.c ic->classdev.class = cont->class; ic 169 drivers/base/attribute_container.c dev_set_name(&ic->classdev, "%s", dev_name(dev)); ic 171 drivers/base/attribute_container.c fn(cont, dev, &ic->classdev); ic 173 drivers/base/attribute_container.c attribute_container_add_class_device(&ic->classdev); ic 174 drivers/base/attribute_container.c klist_add_tail(&ic->node, &cont->containers); ic 215 drivers/base/attribute_container.c struct internal_container *ic; ic 224 drivers/base/attribute_container.c klist_for_each_entry(ic, &cont->containers, node, &iter) { ic 225 drivers/base/attribute_container.c if (dev != ic->classdev.parent) ic 227 drivers/base/attribute_container.c klist_del(&ic->node); ic 229 drivers/base/attribute_container.c fn(cont, dev, &ic->classdev); ic 231 drivers/base/attribute_container.c attribute_container_remove_attrs(&ic->classdev); ic 232 drivers/base/attribute_container.c device_unregister(&ic->classdev); ic 259 drivers/base/attribute_container.c struct internal_container *ic; ic 270 drivers/base/attribute_container.c klist_for_each_entry(ic, &cont->containers, node, &iter) { ic 271 drivers/base/attribute_container.c if (dev == ic->classdev.parent) ic 272 drivers/base/attribute_container.c fn(cont, dev, &ic->classdev); ic 427 drivers/base/attribute_container.c struct internal_container *ic; ic 430 drivers/base/attribute_container.c klist_for_each_entry(ic, &cont->containers, node, &iter) { ic 431 drivers/base/attribute_container.c if (ic->classdev.parent == dev) { ic 432 drivers/base/attribute_container.c cdev = &ic->classdev; ic 185 drivers/edac/pnd2_edac.h u32 ic : 1; ic 663 drivers/gpio/gpio-aspeed.c struct irq_chip *ic = irq_desc_get_chip(desc); ic 669 drivers/gpio/gpio-aspeed.c chained_irq_enter(ic, desc); ic 684 drivers/gpio/gpio-aspeed.c chained_irq_exit(ic, desc); ic 545 drivers/gpio/gpio-eic-sprd.c struct irq_chip *ic = irq_desc_get_chip(desc); ic 549 drivers/gpio/gpio-eic-sprd.c chained_irq_enter(ic, desc); ic 564 drivers/gpio/gpio-eic-sprd.c chained_irq_exit(ic, desc); ic 56 drivers/gpio/gpio-lpc18xx.c static inline void lpc18xx_gpio_pin_ic_isel(struct lpc18xx_gpio_pin_ic *ic, ic 59 drivers/gpio/gpio-lpc18xx.c u32 val = readl_relaxed(ic->base + LPC18XX_GPIO_PIN_IC_ISEL); ic 66 drivers/gpio/gpio-lpc18xx.c writel_relaxed(val, ic->base + LPC18XX_GPIO_PIN_IC_ISEL); ic 69 drivers/gpio/gpio-lpc18xx.c static inline void lpc18xx_gpio_pin_ic_set(struct lpc18xx_gpio_pin_ic *ic, ic 72 drivers/gpio/gpio-lpc18xx.c writel_relaxed(BIT(pin), ic->base + reg); ic 77 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic = d->chip_data; ic 80 drivers/gpio/gpio-lpc18xx.c raw_spin_lock(&ic->lock); ic 83 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 87 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 90 drivers/gpio/gpio-lpc18xx.c raw_spin_unlock(&ic->lock); ic 97 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic = d->chip_data; ic 100 drivers/gpio/gpio-lpc18xx.c raw_spin_lock(&ic->lock); ic 103 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 107 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 110 drivers/gpio/gpio-lpc18xx.c raw_spin_unlock(&ic->lock); ic 117 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic = d->chip_data; ic 120 drivers/gpio/gpio-lpc18xx.c raw_spin_lock(&ic->lock); ic 123 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 126 drivers/gpio/gpio-lpc18xx.c raw_spin_unlock(&ic->lock); ic 133 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic = d->chip_data; ic 135 drivers/gpio/gpio-lpc18xx.c raw_spin_lock(&ic->lock); ic 138 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, true); ic 139 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 142 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, true); ic 143 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_set(ic, d->hwirq, ic 146 drivers/gpio/gpio-lpc18xx.c lpc18xx_gpio_pin_ic_isel(ic, d->hwirq, false); ic 149 drivers/gpio/gpio-lpc18xx.c raw_spin_unlock(&ic->lock); ic 168 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic = domain->host_data; ic 195 drivers/gpio/gpio-lpc18xx.c &lpc18xx_gpio_pin_ic, ic); ic 209 drivers/gpio/gpio-lpc18xx.c struct lpc18xx_gpio_pin_ic *ic; ic 222 drivers/gpio/gpio-lpc18xx.c ic = devm_kzalloc(dev, sizeof(*ic), GFP_KERNEL); ic 223 drivers/gpio/gpio-lpc18xx.c if (!ic) ic 237 drivers/gpio/gpio-lpc18xx.c ic->base = devm_ioremap_resource(dev, &res); ic 238 drivers/gpio/gpio-lpc18xx.c if (IS_ERR(ic->base)) { ic 239 drivers/gpio/gpio-lpc18xx.c ret = PTR_ERR(ic->base); ic 243 drivers/gpio/gpio-lpc18xx.c raw_spin_lock_init(&ic->lock); ic 245 drivers/gpio/gpio-lpc18xx.c ic->domain = irq_domain_add_hierarchy(parent_domain, 0, ic 249 drivers/gpio/gpio-lpc18xx.c ic); ic 250 drivers/gpio/gpio-lpc18xx.c if (!ic->domain) { ic 256 drivers/gpio/gpio-lpc18xx.c gc->pin_ic = ic; ic 261 drivers/gpio/gpio-lpc18xx.c devm_iounmap(dev, ic->base); ic 263 drivers/gpio/gpio-lpc18xx.c devm_kfree(dev, ic); ic 100 drivers/gpio/gpio-siox.c struct irq_chip *ic = irq_data_get_irq_chip(d); ic 102 drivers/gpio/gpio-siox.c container_of(ic, struct gpio_siox_ddata, ichip); ic 111 drivers/gpio/gpio-siox.c struct irq_chip *ic = irq_data_get_irq_chip(d); ic 113 drivers/gpio/gpio-siox.c container_of(ic, struct gpio_siox_ddata, ichip); ic 122 drivers/gpio/gpio-siox.c struct irq_chip *ic = irq_data_get_irq_chip(d); ic 124 drivers/gpio/gpio-siox.c container_of(ic, struct gpio_siox_ddata, ichip); ic 133 drivers/gpio/gpio-siox.c struct irq_chip *ic = irq_data_get_irq_chip(d); ic 135 drivers/gpio/gpio-siox.c container_of(ic, struct gpio_siox_ddata, ichip); ic 187 drivers/gpio/gpio-sprd.c struct irq_chip *ic = irq_desc_get_chip(desc); ic 191 drivers/gpio/gpio-sprd.c chained_irq_enter(ic, desc); ic 206 drivers/gpio/gpio-sprd.c chained_irq_exit(ic, desc); ic 35 drivers/gpio/gpio-sta2x11.c u32 ic; ic 294 drivers/gpio/gpio-sta2x11.c writel(1 << nr, ®s->ic); ic 371 drivers/gpio/gpio-sta2x11.c writel(~0, &chip->regs[i]->ic); ic 92 drivers/gpio/gpio-tegra.c struct irq_chip ic; ic 600 drivers/gpio/gpio-tegra.c tgi->ic.name = "GPIO"; ic 601 drivers/gpio/gpio-tegra.c tgi->ic.irq_ack = tegra_gpio_irq_ack; ic 602 drivers/gpio/gpio-tegra.c tgi->ic.irq_mask = tegra_gpio_irq_mask; ic 603 drivers/gpio/gpio-tegra.c tgi->ic.irq_unmask = tegra_gpio_irq_unmask; ic 604 drivers/gpio/gpio-tegra.c tgi->ic.irq_set_type = tegra_gpio_irq_set_type; ic 605 drivers/gpio/gpio-tegra.c tgi->ic.irq_shutdown = tegra_gpio_irq_shutdown; ic 607 drivers/gpio/gpio-tegra.c tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake; ic 662 drivers/gpio/gpio-tegra.c irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq); ic 32 drivers/gpio/gpio-vf610.c struct irq_chip ic; ic 247 drivers/gpio/gpio-vf610.c struct irq_chip *ic; ic 314 drivers/gpio/gpio-vf610.c ic = &port->ic; ic 315 drivers/gpio/gpio-vf610.c ic->name = "gpio-vf610"; ic 316 drivers/gpio/gpio-vf610.c ic->irq_ack = vf610_gpio_irq_ack; ic 317 drivers/gpio/gpio-vf610.c ic->irq_mask = vf610_gpio_irq_mask; ic 318 drivers/gpio/gpio-vf610.c ic->irq_unmask = vf610_gpio_irq_unmask; ic 319 drivers/gpio/gpio-vf610.c ic->irq_set_type = vf610_gpio_irq_set_type; ic 320 drivers/gpio/gpio-vf610.c ic->irq_set_wake = vf610_gpio_irq_set_wake; ic 330 drivers/gpio/gpio-vf610.c girq->chip = ic; ic 351 drivers/gpio/sgpio-aspeed.c struct irq_chip *ic = irq_desc_get_chip(desc); ic 356 drivers/gpio/sgpio-aspeed.c chained_irq_enter(ic, desc); ic 370 drivers/gpio/sgpio-aspeed.c chained_irq_exit(ic, desc); ic 91 drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h struct i2c_client *ic; ic 53 drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c therm->ic = client; ic 103 drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c if (therm->ic) ic 115 drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c if (therm->ic) ic 164 drivers/gpu/ipu-v3/ipu-ic.c static inline u32 ipu_ic_read(struct ipu_ic *ic, unsigned offset) ic 166 drivers/gpu/ipu-v3/ipu-ic.c return readl(ic->priv->base + offset); ic 169 drivers/gpu/ipu-v3/ipu-ic.c static inline void ipu_ic_write(struct ipu_ic *ic, u32 value, unsigned offset) ic 171 drivers/gpu/ipu-v3/ipu-ic.c writel(value, ic->priv->base + offset); ic 174 drivers/gpu/ipu-v3/ipu-ic.c static int init_csc(struct ipu_ic *ic, ic 178 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 185 drivers/gpu/ipu-v3/ipu-ic.c (priv->tpmem_base + ic->reg->tpmem_csc[csc_index]); ic 216 drivers/gpu/ipu-v3/ipu-ic.c static int calc_resize_coeffs(struct ipu_ic *ic, ic 221 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 268 drivers/gpu/ipu-v3/ipu-ic.c void ipu_ic_task_enable(struct ipu_ic *ic) ic 270 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 276 drivers/gpu/ipu-v3/ipu-ic.c ic_conf = ipu_ic_read(ic, IC_CONF); ic 278 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_en; ic 280 drivers/gpu/ipu-v3/ipu-ic.c if (ic->rotation) ic 281 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_rot_en; ic 283 drivers/gpu/ipu-v3/ipu-ic.c if (ic->in_cs.cs != ic->out_cs.cs) ic 284 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_csc1_en; ic 286 drivers/gpu/ipu-v3/ipu-ic.c if (ic->graphics) { ic 287 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_cmb_en; ic 288 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_csc1_en; ic 290 drivers/gpu/ipu-v3/ipu-ic.c if (ic->g_in_cs.cs != ic->out_cs.cs) ic 291 drivers/gpu/ipu-v3/ipu-ic.c ic_conf |= ic->bit->ic_conf_csc2_en; ic 294 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_conf, IC_CONF); ic 300 drivers/gpu/ipu-v3/ipu-ic.c void ipu_ic_task_disable(struct ipu_ic *ic) ic 302 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 308 drivers/gpu/ipu-v3/ipu-ic.c ic_conf = ipu_ic_read(ic, IC_CONF); ic 310 drivers/gpu/ipu-v3/ipu-ic.c ic_conf &= ~(ic->bit->ic_conf_en | ic 311 drivers/gpu/ipu-v3/ipu-ic.c ic->bit->ic_conf_csc1_en | ic 312 drivers/gpu/ipu-v3/ipu-ic.c ic->bit->ic_conf_rot_en); ic 313 drivers/gpu/ipu-v3/ipu-ic.c if (ic->bit->ic_conf_csc2_en) ic 314 drivers/gpu/ipu-v3/ipu-ic.c ic_conf &= ~ic->bit->ic_conf_csc2_en; ic 315 drivers/gpu/ipu-v3/ipu-ic.c if (ic->bit->ic_conf_cmb_en) ic 316 drivers/gpu/ipu-v3/ipu-ic.c ic_conf &= ~ic->bit->ic_conf_cmb_en; ic 318 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_conf, IC_CONF); ic 324 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_task_graphics_init(struct ipu_ic *ic, ic 329 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 335 drivers/gpu/ipu-v3/ipu-ic.c if (ic->task == IC_TASK_ENCODER) ic 340 drivers/gpu/ipu-v3/ipu-ic.c ic_conf = ipu_ic_read(ic, IC_CONF); ic 342 drivers/gpu/ipu-v3/ipu-ic.c if (!(ic_conf & ic->bit->ic_conf_csc1_en)) { ic 356 drivers/gpu/ipu-v3/ipu-ic.c ret = init_csc(ic, &csc1, 0); ic 361 drivers/gpu/ipu-v3/ipu-ic.c ic->g_in_cs = *g_in_cs; ic 362 drivers/gpu/ipu-v3/ipu-ic.c csc2.in_cs = ic->g_in_cs; ic 363 drivers/gpu/ipu-v3/ipu-ic.c csc2.out_cs = ic->out_cs; ic 369 drivers/gpu/ipu-v3/ipu-ic.c ret = init_csc(ic, &csc2, 1); ic 375 drivers/gpu/ipu-v3/ipu-ic.c reg = ipu_ic_read(ic, IC_CMBP_1); ic 376 drivers/gpu/ipu-v3/ipu-ic.c reg &= ~(0xff << ic->bit->ic_cmb_galpha_bit); ic 377 drivers/gpu/ipu-v3/ipu-ic.c reg |= (galpha << ic->bit->ic_cmb_galpha_bit); ic 378 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, reg, IC_CMBP_1); ic 384 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, colorkey, IC_CMBP_2); ic 388 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_conf, IC_CONF); ic 390 drivers/gpu/ipu-v3/ipu-ic.c ic->graphics = true; ic 397 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_task_init_rsc(struct ipu_ic *ic, ic 403 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 411 drivers/gpu/ipu-v3/ipu-ic.c ret = calc_resize_coeffs(ic, in_height, out_height, ic 419 drivers/gpu/ipu-v3/ipu-ic.c ret = calc_resize_coeffs(ic, in_width, out_width, ic 429 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, rsc, ic->reg->rsc); ic 432 drivers/gpu/ipu-v3/ipu-ic.c ic->in_cs = csc->in_cs; ic 433 drivers/gpu/ipu-v3/ipu-ic.c ic->out_cs = csc->out_cs; ic 435 drivers/gpu/ipu-v3/ipu-ic.c ret = init_csc(ic, csc, 0); ic 441 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_task_init(struct ipu_ic *ic, ic 446 drivers/gpu/ipu-v3/ipu-ic.c return ipu_ic_task_init_rsc(ic, csc, ic 452 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, ic 456 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 477 drivers/gpu/ipu-v3/ipu-ic.c ic_idmac_1 = ipu_ic_read(ic, IC_IDMAC_1); ic 478 drivers/gpu/ipu-v3/ipu-ic.c ic_idmac_2 = ipu_ic_read(ic, IC_IDMAC_2); ic 479 drivers/gpu/ipu-v3/ipu-ic.c ic_idmac_3 = ipu_ic_read(ic, IC_IDMAC_3); ic 579 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_idmac_1, IC_IDMAC_1); ic 580 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_idmac_2, IC_IDMAC_2); ic 581 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_write(ic, ic_idmac_3, IC_IDMAC_3); ic 584 drivers/gpu/ipu-v3/ipu-ic.c ic->rotation = true; ic 592 drivers/gpu/ipu-v3/ipu-ic.c static void ipu_irt_enable(struct ipu_ic *ic) ic 594 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 602 drivers/gpu/ipu-v3/ipu-ic.c static void ipu_irt_disable(struct ipu_ic *ic) ic 604 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 612 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_enable(struct ipu_ic *ic) ic 614 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 624 drivers/gpu/ipu-v3/ipu-ic.c if (ic->rotation) ic 625 drivers/gpu/ipu-v3/ipu-ic.c ipu_irt_enable(ic); ic 633 drivers/gpu/ipu-v3/ipu-ic.c int ipu_ic_disable(struct ipu_ic *ic) ic 635 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 648 drivers/gpu/ipu-v3/ipu-ic.c if (ic->rotation) ic 649 drivers/gpu/ipu-v3/ipu-ic.c ipu_irt_disable(ic); ic 651 drivers/gpu/ipu-v3/ipu-ic.c ic->rotation = ic->graphics = false; ic 663 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic *ic, *ret; ic 668 drivers/gpu/ipu-v3/ipu-ic.c ic = &priv->task[task]; ic 672 drivers/gpu/ipu-v3/ipu-ic.c if (ic->in_use) { ic 677 drivers/gpu/ipu-v3/ipu-ic.c ic->in_use = true; ic 678 drivers/gpu/ipu-v3/ipu-ic.c ret = ic; ic 686 drivers/gpu/ipu-v3/ipu-ic.c void ipu_ic_put(struct ipu_ic *ic) ic 688 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 692 drivers/gpu/ipu-v3/ipu-ic.c ic->in_use = false; ic 735 drivers/gpu/ipu-v3/ipu-ic.c void ipu_ic_dump(struct ipu_ic *ic) ic 737 drivers/gpu/ipu-v3/ipu-ic.c struct ipu_ic_priv *priv = ic->priv; ic 741 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_CONF)); ic 743 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_PRP_ENC_RSC)); ic 745 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_PRP_VF_RSC)); ic 747 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_PP_RSC)); ic 749 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_CMBP_1)); ic 751 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_CMBP_2)); ic 753 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_IDMAC_1)); ic 755 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_IDMAC_2)); ic 757 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_IDMAC_3)); ic 759 drivers/gpu/ipu-v3/ipu-ic.c ipu_ic_read(ic, IC_IDMAC_4)); ic 185 drivers/gpu/ipu-v3/ipu-image-convert.c struct ipu_ic *ic; ic 1264 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_task_disable(chan->ic); ic 1274 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_disable(chan->ic); ic 1354 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_task_idma_init(chan->ic, channel, width, height, ic 1405 drivers/gpu/ipu-v3/ipu-image-convert.c ret = ipu_ic_task_init_rsc(chan->ic, &ctx->csc, ic 1442 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_enable(chan->ic); ic 1464 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_task_enable(chan->ic); ic 1817 drivers/gpu/ipu-v3/ipu-image-convert.c if (!IS_ERR_OR_NULL(chan->ic)) ic 1818 drivers/gpu/ipu-v3/ipu-image-convert.c ipu_ic_put(chan->ic); ic 1832 drivers/gpu/ipu-v3/ipu-image-convert.c chan->ic = ipu_ic_get(priv->ipu, chan->ic_task); ic 1833 drivers/gpu/ipu-v3/ipu-image-convert.c if (IS_ERR(chan->ic)) { ic 1835 drivers/gpu/ipu-v3/ipu-image-convert.c ret = PTR_ERR(chan->ic); ic 34 drivers/irqchip/irq-lpc32xx.c static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg) ic 36 drivers/irqchip/irq-lpc32xx.c return readl_relaxed(ic->base + reg); ic 39 drivers/irqchip/irq-lpc32xx.c static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic, ic 42 drivers/irqchip/irq-lpc32xx.c writel_relaxed(val, ic->base + reg); ic 47 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); ic 50 drivers/irqchip/irq-lpc32xx.c val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask; ic 51 drivers/irqchip/irq-lpc32xx.c lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val); ic 56 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); ic 59 drivers/irqchip/irq-lpc32xx.c val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask; ic 60 drivers/irqchip/irq-lpc32xx.c lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val); ic 65 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); ic 68 drivers/irqchip/irq-lpc32xx.c lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask); ic 73 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d); ic 101 drivers/irqchip/irq-lpc32xx.c val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL); ic 106 drivers/irqchip/irq-lpc32xx.c lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val); ic 108 drivers/irqchip/irq-lpc32xx.c val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE); ic 116 drivers/irqchip/irq-lpc32xx.c lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val); ic 123 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc; ic 124 drivers/irqchip/irq-lpc32xx.c u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq; ic 135 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc); ic 137 drivers/irqchip/irq-lpc32xx.c u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq; ic 144 drivers/irqchip/irq-lpc32xx.c generic_handle_irq(irq_find_mapping(ic->domain, irq)); ic 153 drivers/irqchip/irq-lpc32xx.c struct lpc32xx_irq_chip *ic = id->host_data; ic 155 drivers/irqchip/irq-lpc32xx.c irq_set_chip_data(virq, ic); ic 156 drivers/irqchip/irq-lpc32xx.c irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq); ic 96 drivers/md/dm-integrity.c #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) ic 283 drivers/md/dm-integrity.c struct dm_integrity_c *ic; ic 301 drivers/md/dm-integrity.c struct dm_integrity_c *ic; ic 313 drivers/md/dm-integrity.c struct dm_integrity_c *ic; ic 371 drivers/md/dm-integrity.c static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err) ic 374 drivers/md/dm-integrity.c atomic64_inc(&ic->number_of_mismatches); ic 375 drivers/md/dm-integrity.c if (!cmpxchg(&ic->failed, 0, err)) ic 379 drivers/md/dm-integrity.c static int dm_integrity_failed(struct dm_integrity_c *ic) ic 381 drivers/md/dm-integrity.c return READ_ONCE(ic->failed); ic 384 drivers/md/dm-integrity.c static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, ic 391 drivers/md/dm-integrity.c return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j); ic 394 drivers/md/dm-integrity.c static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector, ic 397 drivers/md/dm-integrity.c if (!ic->meta_dev) { ic 398 drivers/md/dm-integrity.c __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors; ic 407 drivers/md/dm-integrity.c #define sector_to_block(ic, n) \ ic 409 drivers/md/dm-integrity.c BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \ ic 410 drivers/md/dm-integrity.c (n) >>= (ic)->sb->log2_sectors_per_block; \ ic 413 drivers/md/dm-integrity.c static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area, ic 419 drivers/md/dm-integrity.c ms = area << ic->sb->log2_interleave_sectors; ic 420 drivers/md/dm-integrity.c if (likely(ic->log2_metadata_run >= 0)) ic 421 drivers/md/dm-integrity.c ms += area << ic->log2_metadata_run; ic 423 drivers/md/dm-integrity.c ms += area * ic->metadata_run; ic 424 drivers/md/dm-integrity.c ms >>= ic->log2_buffer_sectors; ic 426 drivers/md/dm-integrity.c sector_to_block(ic, offset); ic 428 drivers/md/dm-integrity.c if (likely(ic->log2_tag_size >= 0)) { ic 429 drivers/md/dm-integrity.c ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size); ic 430 drivers/md/dm-integrity.c mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); ic 432 drivers/md/dm-integrity.c ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors); ic 433 drivers/md/dm-integrity.c mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1); ic 439 drivers/md/dm-integrity.c static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset) ic 443 drivers/md/dm-integrity.c if (ic->meta_dev) ic 446 drivers/md/dm-integrity.c result = area << ic->sb->log2_interleave_sectors; ic 447 drivers/md/dm-integrity.c if (likely(ic->log2_metadata_run >= 0)) ic 448 drivers/md/dm-integrity.c result += (area + 1) << ic->log2_metadata_run; ic 450 drivers/md/dm-integrity.c result += (area + 1) * ic->metadata_run; ic 452 drivers/md/dm-integrity.c result += (sector_t)ic->initial_sectors + offset; ic 453 drivers/md/dm-integrity.c result += ic->start; ic 458 drivers/md/dm-integrity.c static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) ic 460 drivers/md/dm-integrity.c if (unlikely(*sec_ptr >= ic->journal_sections)) ic 461 drivers/md/dm-integrity.c *sec_ptr -= ic->journal_sections; ic 464 drivers/md/dm-integrity.c static void sb_set_version(struct dm_integrity_c *ic) ic 466 drivers/md/dm-integrity.c if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) ic 467 drivers/md/dm-integrity.c ic->sb->version = SB_VERSION_3; ic 468 drivers/md/dm-integrity.c else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ic 469 drivers/md/dm-integrity.c ic->sb->version = SB_VERSION_2; ic 471 drivers/md/dm-integrity.c ic->sb->version = SB_VERSION_1; ic 474 drivers/md/dm-integrity.c static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) ic 482 drivers/md/dm-integrity.c io_req.mem.ptr.addr = ic->sb; ic 484 drivers/md/dm-integrity.c io_req.client = ic->io; ic 485 drivers/md/dm-integrity.c io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; ic 486 drivers/md/dm-integrity.c io_loc.sector = ic->start; ic 490 drivers/md/dm-integrity.c sb_set_version(ic); ic 500 drivers/md/dm-integrity.c static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap, ic 506 drivers/md/dm-integrity.c if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) { ic 510 drivers/md/dm-integrity.c ic->sb->log2_sectors_per_block, ic 511 drivers/md/dm-integrity.c ic->log2_blocks_per_bitmap_bit, ic 519 drivers/md/dm-integrity.c bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); ic 521 drivers/md/dm-integrity.c (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); ic 605 drivers/md/dm-integrity.c static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src) ic 607 drivers/md/dm-integrity.c unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); ic 617 drivers/md/dm-integrity.c static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector) ic 619 drivers/md/dm-integrity.c unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); ic 622 drivers/md/dm-integrity.c BUG_ON(bitmap_block >= ic->n_bitmap_blocks); ic 623 drivers/md/dm-integrity.c return &ic->bbs[bitmap_block]; ic 626 drivers/md/dm-integrity.c static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset, ic 630 drivers/md/dm-integrity.c unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors; ic 632 drivers/md/dm-integrity.c if (unlikely(section >= ic->journal_sections) || ic 635 drivers/md/dm-integrity.c function, section, offset, ic->journal_sections, limit); ic 641 drivers/md/dm-integrity.c static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset, ic 646 drivers/md/dm-integrity.c access_journal_check(ic, section, offset, false, "page_list_location"); ic 648 drivers/md/dm-integrity.c sector = section * ic->journal_section_sectors + offset; ic 654 drivers/md/dm-integrity.c static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl, ic 660 drivers/md/dm-integrity.c page_list_location(ic, section, offset, &pl_index, &pl_offset); ic 670 drivers/md/dm-integrity.c static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset) ic 672 drivers/md/dm-integrity.c return access_page_list(ic, ic->journal, section, offset, NULL); ic 675 drivers/md/dm-integrity.c static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n) ic 680 drivers/md/dm-integrity.c access_journal_check(ic, section, n, true, "access_journal_entry"); ic 685 drivers/md/dm-integrity.c js = access_journal(ic, section, rel_sector); ic 686 drivers/md/dm-integrity.c return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size); ic 689 drivers/md/dm-integrity.c static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n) ic 691 drivers/md/dm-integrity.c n <<= ic->sb->log2_sectors_per_block; ic 695 drivers/md/dm-integrity.c access_journal_check(ic, section, n, false, "access_journal_data"); ic 697 drivers/md/dm-integrity.c return access_journal(ic, section, n); ic 700 drivers/md/dm-integrity.c static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE]) ic 702 drivers/md/dm-integrity.c SHASH_DESC_ON_STACK(desc, ic->journal_mac); ic 706 drivers/md/dm-integrity.c desc->tfm = ic->journal_mac; ic 710 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_init", r); ic 714 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_entries; j++) { ic 715 drivers/md/dm-integrity.c struct journal_entry *je = access_journal_entry(ic, section, j); ic 718 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_update", r); ic 723 drivers/md/dm-integrity.c size = crypto_shash_digestsize(ic->journal_mac); ic 728 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_final", r); ic 736 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "digest_size", -EINVAL); ic 741 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_final", r); ic 752 drivers/md/dm-integrity.c static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr) ic 757 drivers/md/dm-integrity.c if (!ic->journal_mac) ic 760 drivers/md/dm-integrity.c section_mac(ic, section, result); ic 763 drivers/md/dm-integrity.c struct journal_sector *js = access_journal(ic, section, j); ic 769 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "journal mac", -EILSEQ); ic 782 drivers/md/dm-integrity.c static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, ic 786 drivers/md/dm-integrity.c size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT; ic 791 drivers/md/dm-integrity.c source_pl = ic->journal; ic 792 drivers/md/dm-integrity.c target_pl = ic->journal_io; ic 794 drivers/md/dm-integrity.c source_pl = ic->journal_io; ic 795 drivers/md/dm-integrity.c target_pl = ic->journal; ic 798 drivers/md/dm-integrity.c page_list_location(ic, section, 0, &pl_index, &pl_offset); ic 814 drivers/md/dm-integrity.c rw_section_mac(ic, section, true); ic 819 drivers/md/dm-integrity.c page_list_location(ic, section, 0, §ion_index, &dummy); ic 825 drivers/md/dm-integrity.c src_pages[1] = ic->journal_xor[pl_index].page; ic 844 drivers/md/dm-integrity.c complete(&comp->ic->crypto_backoff); ic 847 drivers/md/dm-integrity.c dm_integrity_io_error(comp->ic, "asynchronous encrypt", err); ic 866 drivers/md/dm-integrity.c wait_for_completion(&comp->ic->crypto_backoff); ic 867 drivers/md/dm-integrity.c reinit_completion(&comp->ic->crypto_backoff); ic 870 drivers/md/dm-integrity.c dm_integrity_io_error(comp->ic, "encrypt", r); ic 874 drivers/md/dm-integrity.c static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, ic 883 drivers/md/dm-integrity.c source_sg = ic->journal_scatterlist; ic 884 drivers/md/dm-integrity.c target_sg = ic->journal_io_scatterlist; ic 886 drivers/md/dm-integrity.c source_sg = ic->journal_io_scatterlist; ic 887 drivers/md/dm-integrity.c target_sg = ic->journal_scatterlist; ic 896 drivers/md/dm-integrity.c rw_section_mac(ic, section, true); ic 898 drivers/md/dm-integrity.c req = ic->sk_requests[section]; ic 899 drivers/md/dm-integrity.c ivsize = crypto_skcipher_ivsize(ic->journal_crypt); ic 918 drivers/md/dm-integrity.c static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section, ic 921 drivers/md/dm-integrity.c if (ic->journal_xor) ic 922 drivers/md/dm-integrity.c return xor_journal(ic, encrypt, section, n_sections, comp); ic 924 drivers/md/dm-integrity.c return crypt_journal(ic, encrypt, section, n_sections, comp); ic 931 drivers/md/dm-integrity.c dm_integrity_io_error(comp->ic, "writing journal", -EIO); ic 935 drivers/md/dm-integrity.c static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags, ic 943 drivers/md/dm-integrity.c if (unlikely(dm_integrity_failed(ic))) { ic 955 drivers/md/dm-integrity.c if (ic->journal_io) ic 956 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal_io[pl_index]; ic 958 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal[pl_index]; ic 966 drivers/md/dm-integrity.c io_req.client = ic->io; ic 967 drivers/md/dm-integrity.c io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev; ic 968 drivers/md/dm-integrity.c io_loc.sector = ic->start + SB_SECTORS + sector; ic 973 drivers/md/dm-integrity.c dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r); ic 981 drivers/md/dm-integrity.c static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section, ic 986 drivers/md/dm-integrity.c sector = section * ic->journal_section_sectors; ic 987 drivers/md/dm-integrity.c n_sectors = n_sections * ic->journal_section_sectors; ic 989 drivers/md/dm-integrity.c rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp); ic 992 drivers/md/dm-integrity.c static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections) ic 999 drivers/md/dm-integrity.c io_comp.ic = ic; ic 1002 drivers/md/dm-integrity.c if (commit_start + commit_sections <= ic->journal_sections) { ic 1004 drivers/md/dm-integrity.c if (ic->journal_io) { ic 1005 drivers/md/dm-integrity.c crypt_comp_1.ic = ic; ic 1008 drivers/md/dm-integrity.c encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1); ic 1012 drivers/md/dm-integrity.c rw_section_mac(ic, commit_start + i, true); ic 1014 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, ic 1019 drivers/md/dm-integrity.c to_end = ic->journal_sections - commit_start; ic 1020 drivers/md/dm-integrity.c if (ic->journal_io) { ic 1021 drivers/md/dm-integrity.c crypt_comp_1.ic = ic; ic 1024 drivers/md/dm-integrity.c encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1); ic 1026 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); ic 1029 drivers/md/dm-integrity.c encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1); ic 1032 drivers/md/dm-integrity.c crypt_comp_2.ic = ic; ic 1035 drivers/md/dm-integrity.c encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2); ic 1037 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); ic 1042 drivers/md/dm-integrity.c rw_section_mac(ic, commit_start + i, true); ic 1043 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp); ic 1045 drivers/md/dm-integrity.c rw_section_mac(ic, i, true); ic 1047 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp); ic 1053 drivers/md/dm-integrity.c static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset, ic 1061 drivers/md/dm-integrity.c BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1)); ic 1063 drivers/md/dm-integrity.c if (unlikely(dm_integrity_failed(ic))) { ic 1068 drivers/md/dm-integrity.c sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset; ic 1076 drivers/md/dm-integrity.c io_req.mem.ptr.pl = &ic->journal[pl_index]; ic 1080 drivers/md/dm-integrity.c io_req.client = ic->io; ic 1081 drivers/md/dm-integrity.c io_loc.bdev = ic->dev->bdev; ic 1098 drivers/md/dm-integrity.c static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) ic 1100 drivers/md/dm-integrity.c struct rb_node **n = &ic->in_progress.rb_node; ic 1103 drivers/md/dm-integrity.c BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1)); ic 1107 drivers/md/dm-integrity.c list_for_each_entry(range, &ic->wait_list, wait_entry) { ic 1129 drivers/md/dm-integrity.c rb_insert_color(&new_range->node, &ic->in_progress); ic 1134 drivers/md/dm-integrity.c static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range) ic 1136 drivers/md/dm-integrity.c rb_erase(&range->node, &ic->in_progress); ic 1137 drivers/md/dm-integrity.c while (unlikely(!list_empty(&ic->wait_list))) { ic 1139 drivers/md/dm-integrity.c list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); ic 1143 drivers/md/dm-integrity.c if (!add_new_range(ic, last_range, false)) { ic 1145 drivers/md/dm-integrity.c list_add(&last_range->wait_entry, &ic->wait_list); ic 1153 drivers/md/dm-integrity.c static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range) ic 1157 drivers/md/dm-integrity.c spin_lock_irqsave(&ic->endio_wait.lock, flags); ic 1158 drivers/md/dm-integrity.c remove_range_unlocked(ic, range); ic 1159 drivers/md/dm-integrity.c spin_unlock_irqrestore(&ic->endio_wait.lock, flags); ic 1162 drivers/md/dm-integrity.c static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) ic 1165 drivers/md/dm-integrity.c list_add_tail(&new_range->wait_entry, &ic->wait_list); ic 1169 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1171 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 1175 drivers/md/dm-integrity.c static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range) ic 1177 drivers/md/dm-integrity.c if (unlikely(!add_new_range(ic, new_range, true))) ic 1178 drivers/md/dm-integrity.c wait_and_add_new_range(ic, new_range); ic 1187 drivers/md/dm-integrity.c static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector) ic 1195 drivers/md/dm-integrity.c link = &ic->journal_tree_root.rb_node; ic 1209 drivers/md/dm-integrity.c rb_insert_color(&node->node, &ic->journal_tree_root); ic 1212 drivers/md/dm-integrity.c static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node) ic 1215 drivers/md/dm-integrity.c rb_erase(&node->node, &ic->journal_tree_root); ic 1221 drivers/md/dm-integrity.c static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector) ic 1223 drivers/md/dm-integrity.c struct rb_node *n = ic->journal_tree_root.rb_node; ic 1229 drivers/md/dm-integrity.c found = j - ic->journal_tree; ic 1242 drivers/md/dm-integrity.c static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector) ic 1247 drivers/md/dm-integrity.c if (unlikely(pos >= ic->journal_entries)) ic 1249 drivers/md/dm-integrity.c node = &ic->journal_tree[pos]; ic 1263 drivers/md/dm-integrity.c static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node) ic 1280 drivers/md/dm-integrity.c next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries; ic 1281 drivers/md/dm-integrity.c if (next_section >= ic->committed_section && ic 1282 drivers/md/dm-integrity.c next_section < ic->committed_section + ic->n_committed_sections) ic 1284 drivers/md/dm-integrity.c if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections) ic 1294 drivers/md/dm-integrity.c static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block, ic 1303 drivers/md/dm-integrity.c r = dm_integrity_failed(ic); ic 1307 drivers/md/dm-integrity.c data = dm_bufio_read(ic->bufio, *metadata_block, &b); ic 1311 drivers/md/dm-integrity.c to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size); ic 1336 drivers/md/dm-integrity.c if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) { ic 1346 drivers/md/dm-integrity.c static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) ic 1349 drivers/md/dm-integrity.c r = dm_bufio_write_dirty_buffers(ic->bufio); ic 1351 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing tags", r); ic 1354 drivers/md/dm-integrity.c static void sleep_on_endio_wait(struct dm_integrity_c *ic) ic 1357 drivers/md/dm-integrity.c __add_wait_queue(&ic->endio_wait, &wait); ic 1359 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1361 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 1362 drivers/md/dm-integrity.c __remove_wait_queue(&ic->endio_wait, &wait); ic 1367 drivers/md/dm-integrity.c struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer); ic 1369 drivers/md/dm-integrity.c if (likely(!dm_integrity_failed(ic))) ic 1370 drivers/md/dm-integrity.c queue_work(ic->commit_wq, &ic->commit_work); ic 1373 drivers/md/dm-integrity.c static void schedule_autocommit(struct dm_integrity_c *ic) ic 1375 drivers/md/dm-integrity.c if (!timer_pending(&ic->autocommit_timer)) ic 1376 drivers/md/dm-integrity.c mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies); ic 1379 drivers/md/dm-integrity.c static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio) ic 1384 drivers/md/dm-integrity.c spin_lock_irqsave(&ic->endio_wait.lock, flags); ic 1386 drivers/md/dm-integrity.c bio_list_add(&ic->flush_bio_list, bio); ic 1387 drivers/md/dm-integrity.c spin_unlock_irqrestore(&ic->endio_wait.lock, flags); ic 1389 drivers/md/dm-integrity.c queue_work(ic->commit_wq, &ic->commit_work); ic 1392 drivers/md/dm-integrity.c static void do_endio(struct dm_integrity_c *ic, struct bio *bio) ic 1394 drivers/md/dm-integrity.c int r = dm_integrity_failed(ic); ic 1397 drivers/md/dm-integrity.c if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) { ic 1399 drivers/md/dm-integrity.c spin_lock_irqsave(&ic->endio_wait.lock, flags); ic 1400 drivers/md/dm-integrity.c bio_list_add(&ic->synchronous_bios, bio); ic 1401 drivers/md/dm-integrity.c queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); ic 1402 drivers/md/dm-integrity.c spin_unlock_irqrestore(&ic->endio_wait.lock, flags); ic 1408 drivers/md/dm-integrity.c static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio) ic 1412 drivers/md/dm-integrity.c if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic))) ic 1413 drivers/md/dm-integrity.c submit_flush_bio(ic, dio); ic 1415 drivers/md/dm-integrity.c do_endio(ic, bio); ic 1421 drivers/md/dm-integrity.c struct dm_integrity_c *ic = dio->ic; ic 1424 drivers/md/dm-integrity.c remove_range(ic, &dio->range); ic 1427 drivers/md/dm-integrity.c schedule_autocommit(ic); ic 1437 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); ic 1440 drivers/md/dm-integrity.c do_endio_flush(ic, dio); ic 1458 drivers/md/dm-integrity.c static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector, ic 1462 drivers/md/dm-integrity.c SHASH_DESC_ON_STACK(req, ic->internal_hash); ic 1466 drivers/md/dm-integrity.c req->tfm = ic->internal_hash; ic 1470 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_init", r); ic 1476 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_update", r); ic 1480 drivers/md/dm-integrity.c r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT); ic 1482 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_update", r); ic 1488 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "crypto_shash_final", r); ic 1492 drivers/md/dm-integrity.c digest_size = crypto_shash_digestsize(ic->internal_hash); ic 1493 drivers/md/dm-integrity.c if (unlikely(digest_size < ic->tag_size)) ic 1494 drivers/md/dm-integrity.c memset(result + digest_size, 0, ic->tag_size - digest_size); ic 1500 drivers/md/dm-integrity.c get_random_bytes(result, ic->tag_size); ic 1506 drivers/md/dm-integrity.c struct dm_integrity_c *ic = dio->ic; ic 1510 drivers/md/dm-integrity.c if (ic->internal_hash) { ic 1513 drivers/md/dm-integrity.c unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); ic 1516 drivers/md/dm-integrity.c unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0; ic 1521 drivers/md/dm-integrity.c if (unlikely(ic->mode == 'R')) ic 1524 drivers/md/dm-integrity.c checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space, ic 1544 drivers/md/dm-integrity.c integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr); ic 1545 drivers/md/dm-integrity.c checksums_ptr += ic->tag_size; ic 1546 drivers/md/dm-integrity.c sectors_to_process -= ic->sectors_per_block; ic 1547 drivers/md/dm-integrity.c pos += ic->sectors_per_block << SECTOR_SHIFT; ic 1548 drivers/md/dm-integrity.c sector += ic->sectors_per_block; ic 1552 drivers/md/dm-integrity.c r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset, ic 1557 drivers/md/dm-integrity.c (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size))); ic 1559 drivers/md/dm-integrity.c atomic64_inc(&ic->number_of_mismatches); ic 1585 drivers/md/dm-integrity.c sector_to_block(ic, data_to_process); ic 1586 drivers/md/dm-integrity.c data_to_process *= ic->tag_size; ic 1595 drivers/md/dm-integrity.c r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset, ic 1615 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ic 1621 drivers/md/dm-integrity.c dio->ic = ic; ic 1625 drivers/md/dm-integrity.c submit_flush_bio(ic, dio); ic 1639 drivers/md/dm-integrity.c if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { ic 1642 drivers/md/dm-integrity.c (unsigned long long)ic->provided_data_sectors); ic 1645 drivers/md/dm-integrity.c if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) { ic 1647 drivers/md/dm-integrity.c ic->sectors_per_block, ic 1652 drivers/md/dm-integrity.c if (ic->sectors_per_block > 1) { ic 1656 drivers/md/dm-integrity.c if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) { ic 1658 drivers/md/dm-integrity.c bv.bv_offset, bv.bv_len, ic->sectors_per_block); ic 1665 drivers/md/dm-integrity.c if (!ic->internal_hash) { ic 1667 drivers/md/dm-integrity.c unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; ic 1668 drivers/md/dm-integrity.c if (ic->log2_tag_size >= 0) ic 1669 drivers/md/dm-integrity.c wanted_tag_size <<= ic->log2_tag_size; ic 1671 drivers/md/dm-integrity.c wanted_tag_size *= ic->tag_size; ic 1685 drivers/md/dm-integrity.c if (unlikely(ic->mode == 'R') && unlikely(dio->write)) ic 1688 drivers/md/dm-integrity.c get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); ic 1689 drivers/md/dm-integrity.c dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); ic 1690 drivers/md/dm-integrity.c bio->bi_iter.bi_sector = get_data_sector(ic, area, offset); ic 1699 drivers/md/dm-integrity.c struct dm_integrity_c *ic = dio->ic; ic 1719 drivers/md/dm-integrity.c struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry); ic 1730 drivers/md/dm-integrity.c __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); ic 1735 drivers/md/dm-integrity.c js = access_journal_data(ic, journal_section, journal_entry); ic 1743 drivers/md/dm-integrity.c } while (++s < ic->sectors_per_block); ic 1745 drivers/md/dm-integrity.c if (ic->internal_hash) { ic 1748 drivers/md/dm-integrity.c integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack); ic 1749 drivers/md/dm-integrity.c if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) { ic 1757 drivers/md/dm-integrity.c if (!ic->internal_hash) { ic 1759 drivers/md/dm-integrity.c unsigned tag_todo = ic->tag_size; ic 1760 drivers/md/dm-integrity.c char *tag_ptr = journal_entry_tag(ic, je); ic 1785 drivers/md/dm-integrity.c js = access_journal_data(ic, journal_section, journal_entry); ic 1786 drivers/md/dm-integrity.c memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT); ic 1791 drivers/md/dm-integrity.c } while (++s < ic->sectors_per_block); ic 1793 drivers/md/dm-integrity.c if (ic->internal_hash) { ic 1794 drivers/md/dm-integrity.c unsigned digest_size = crypto_shash_digestsize(ic->internal_hash); ic 1795 drivers/md/dm-integrity.c if (unlikely(digest_size > ic->tag_size)) { ic 1797 drivers/md/dm-integrity.c integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack); ic 1798 drivers/md/dm-integrity.c memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size); ic 1800 drivers/md/dm-integrity.c integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je)); ic 1805 drivers/md/dm-integrity.c logical_sector += ic->sectors_per_block; ic 1808 drivers/md/dm-integrity.c if (unlikely(journal_entry == ic->journal_section_entries)) { ic 1811 drivers/md/dm-integrity.c wraparound_section(ic, &journal_section); ic 1814 drivers/md/dm-integrity.c bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT; ic 1815 drivers/md/dm-integrity.c } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT); ic 1824 drivers/md/dm-integrity.c if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) ic 1825 drivers/md/dm-integrity.c wake_up(&ic->copy_to_journal_wait); ic 1826 drivers/md/dm-integrity.c if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { ic 1827 drivers/md/dm-integrity.c queue_work(ic->commit_wq, &ic->commit_work); ic 1829 drivers/md/dm-integrity.c schedule_autocommit(ic); ic 1832 drivers/md/dm-integrity.c remove_range(ic, &dio->range); ic 1839 drivers/md/dm-integrity.c get_area_and_offset(ic, dio->range.logical_sector, &area, &offset); ic 1840 drivers/md/dm-integrity.c dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset); ic 1849 drivers/md/dm-integrity.c struct dm_integrity_c *ic = dio->ic; ic 1854 drivers/md/dm-integrity.c bool need_sync_io = ic->internal_hash && !dio->write; ic 1858 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); ic 1863 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 1865 drivers/md/dm-integrity.c if (unlikely(dm_integrity_failed(ic))) { ic 1866 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1867 drivers/md/dm-integrity.c do_endio(ic, bio); ic 1872 drivers/md/dm-integrity.c if (likely(ic->mode == 'J')) { ic 1878 drivers/md/dm-integrity.c (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block); ic 1882 drivers/md/dm-integrity.c sleep_on_endio_wait(ic); ic 1885 drivers/md/dm-integrity.c range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block; ic 1886 drivers/md/dm-integrity.c ic->free_sectors -= range_sectors; ic 1887 drivers/md/dm-integrity.c journal_section = ic->free_section; ic 1888 drivers/md/dm-integrity.c journal_entry = ic->free_section_entry; ic 1890 drivers/md/dm-integrity.c next_entry = ic->free_section_entry + range_sectors; ic 1891 drivers/md/dm-integrity.c ic->free_section_entry = next_entry % ic->journal_section_entries; ic 1892 drivers/md/dm-integrity.c ic->free_section += next_entry / ic->journal_section_entries; ic 1893 drivers/md/dm-integrity.c ic->n_uncommitted_sections += next_entry / ic->journal_section_entries; ic 1894 drivers/md/dm-integrity.c wraparound_section(ic, &ic->free_section); ic 1896 drivers/md/dm-integrity.c pos = journal_section * ic->journal_section_entries + journal_entry; ic 1903 drivers/md/dm-integrity.c add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i); ic 1905 drivers/md/dm-integrity.c if (unlikely(pos >= ic->journal_entries)) ic 1908 drivers/md/dm-integrity.c je = access_journal_entry(ic, ws, we); ic 1912 drivers/md/dm-integrity.c if (unlikely(we == ic->journal_section_entries)) { ic 1915 drivers/md/dm-integrity.c wraparound_section(ic, &ws); ic 1917 drivers/md/dm-integrity.c } while ((i += ic->sectors_per_block) < dio->range.n_sectors); ic 1919 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1923 drivers/md/dm-integrity.c journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); ic 1930 drivers/md/dm-integrity.c for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) { ic 1931 drivers/md/dm-integrity.c if (!test_journal_node(ic, jp, dio->range.logical_sector + i)) ic 1938 drivers/md/dm-integrity.c if (unlikely(!add_new_range(ic, &dio->range, true))) { ic 1946 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1948 drivers/md/dm-integrity.c queue_work(ic->wait_wq, &dio->work); ic 1952 drivers/md/dm-integrity.c dio->range.n_sectors = ic->sectors_per_block; ic 1953 drivers/md/dm-integrity.c wait_and_add_new_range(ic, &dio->range); ic 1961 drivers/md/dm-integrity.c unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector); ic 1963 drivers/md/dm-integrity.c remove_range_unlocked(ic, &dio->range); ic 1968 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 1971 drivers/md/dm-integrity.c journal_section = journal_read_pos / ic->journal_section_entries; ic 1972 drivers/md/dm-integrity.c journal_entry = journal_read_pos % ic->journal_section_entries; ic 1976 drivers/md/dm-integrity.c if (ic->mode == 'B' && dio->write) { ic 1977 drivers/md/dm-integrity.c if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, ic 1981 drivers/md/dm-integrity.c bbs = sector_to_bitmap_block(ic, dio->range.logical_sector); ic 1985 drivers/md/dm-integrity.c queue_work(ic->writer_wq, &bbs->work); ic 1999 drivers/md/dm-integrity.c bio_set_dev(bio, ic->dev->bdev); ic 2009 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && ic 2010 drivers/md/dm-integrity.c dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector)) ic 2012 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2013 drivers/md/dm-integrity.c if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector, ic 2026 drivers/md/dm-integrity.c queue_work(ic->metadata_wq, &dio->work); ic 2035 drivers/md/dm-integrity.c do_endio_flush(ic, dio); ic 2046 drivers/md/dm-integrity.c static void pad_uncommitted(struct dm_integrity_c *ic) ic 2048 drivers/md/dm-integrity.c if (ic->free_section_entry) { ic 2049 drivers/md/dm-integrity.c ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry; ic 2050 drivers/md/dm-integrity.c ic->free_section_entry = 0; ic 2051 drivers/md/dm-integrity.c ic->free_section++; ic 2052 drivers/md/dm-integrity.c wraparound_section(ic, &ic->free_section); ic 2053 drivers/md/dm-integrity.c ic->n_uncommitted_sections++; ic 2055 drivers/md/dm-integrity.c if (WARN_ON(ic->journal_sections * ic->journal_section_entries != ic 2056 drivers/md/dm-integrity.c (ic->n_uncommitted_sections + ic->n_committed_sections) * ic 2057 drivers/md/dm-integrity.c ic->journal_section_entries + ic->free_sectors)) { ic 2061 drivers/md/dm-integrity.c ic->journal_sections, ic->journal_section_entries, ic 2062 drivers/md/dm-integrity.c ic->n_uncommitted_sections, ic->n_committed_sections, ic 2063 drivers/md/dm-integrity.c ic->journal_section_entries, ic->free_sectors); ic 2069 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work); ic 2074 drivers/md/dm-integrity.c del_timer(&ic->autocommit_timer); ic 2076 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2077 drivers/md/dm-integrity.c flushes = bio_list_get(&ic->flush_bio_list); ic 2078 drivers/md/dm-integrity.c if (unlikely(ic->mode != 'J')) { ic 2079 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2080 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2084 drivers/md/dm-integrity.c pad_uncommitted(ic); ic 2085 drivers/md/dm-integrity.c commit_start = ic->uncommitted_section; ic 2086 drivers/md/dm-integrity.c commit_sections = ic->n_uncommitted_sections; ic 2087 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2094 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_entries; j++) { ic 2096 drivers/md/dm-integrity.c je = access_journal_entry(ic, i, j); ic 2097 drivers/md/dm-integrity.c io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je)); ic 2099 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_sectors; j++) { ic 2101 drivers/md/dm-integrity.c js = access_journal(ic, i, j); ic 2102 drivers/md/dm-integrity.c js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq); ic 2105 drivers/md/dm-integrity.c if (unlikely(i >= ic->journal_sections)) ic 2106 drivers/md/dm-integrity.c ic->commit_seq = next_commit_seq(ic->commit_seq); ic 2107 drivers/md/dm-integrity.c wraparound_section(ic, &i); ic 2111 drivers/md/dm-integrity.c write_journal(ic, commit_start, commit_sections); ic 2113 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2114 drivers/md/dm-integrity.c ic->uncommitted_section += commit_sections; ic 2115 drivers/md/dm-integrity.c wraparound_section(ic, &ic->uncommitted_section); ic 2116 drivers/md/dm-integrity.c ic->n_uncommitted_sections -= commit_sections; ic 2117 drivers/md/dm-integrity.c ic->n_committed_sections += commit_sections; ic 2118 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2120 drivers/md/dm-integrity.c if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) ic 2121 drivers/md/dm-integrity.c queue_work(ic->writer_wq, &ic->writer_work); ic 2127 drivers/md/dm-integrity.c do_endio(ic, flushes); ic 2136 drivers/md/dm-integrity.c struct dm_integrity_c *ic = comp->ic; ic 2137 drivers/md/dm-integrity.c remove_range(ic, &io->range); ic 2138 drivers/md/dm-integrity.c mempool_free(io, &ic->journal_io_mempool); ic 2140 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "copying from journal", -EIO); ic 2144 drivers/md/dm-integrity.c static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js, ic 2151 drivers/md/dm-integrity.c } while (++s < ic->sectors_per_block); ic 2154 drivers/md/dm-integrity.c static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start, ic 2163 drivers/md/dm-integrity.c comp.ic = ic; ic 2168 drivers/md/dm-integrity.c for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) { ic 2172 drivers/md/dm-integrity.c rw_section_mac(ic, i, false); ic 2173 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_entries; j++) { ic 2174 drivers/md/dm-integrity.c struct journal_entry *je = access_journal_entry(ic, i, j); ic 2186 drivers/md/dm-integrity.c if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) { ic 2187 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "invalid sector in journal", -EIO); ic 2188 drivers/md/dm-integrity.c sec &= ~(sector_t)(ic->sectors_per_block - 1); ic 2191 drivers/md/dm-integrity.c get_area_and_offset(ic, sec, &area, &offset); ic 2192 drivers/md/dm-integrity.c restore_last_bytes(ic, access_journal_data(ic, i, j), je); ic 2193 drivers/md/dm-integrity.c for (k = j + 1; k < ic->journal_section_entries; k++) { ic 2194 drivers/md/dm-integrity.c struct journal_entry *je2 = access_journal_entry(ic, i, k); ic 2200 drivers/md/dm-integrity.c get_area_and_offset(ic, sec2, &area2, &offset2); ic 2201 drivers/md/dm-integrity.c if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block)) ic 2203 drivers/md/dm-integrity.c restore_last_bytes(ic, access_journal_data(ic, i, k), je2); ic 2207 drivers/md/dm-integrity.c io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO); ic 2210 drivers/md/dm-integrity.c io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block; ic 2212 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2213 drivers/md/dm-integrity.c add_new_range_and_wait(ic, &io->range); ic 2216 drivers/md/dm-integrity.c struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries]; ic 2219 drivers/md/dm-integrity.c while (j < k && find_newer_committed_node(ic, §ion_node[j])) { ic 2220 drivers/md/dm-integrity.c struct journal_entry *je2 = access_journal_entry(ic, i, j); ic 2223 drivers/md/dm-integrity.c remove_journal_node(ic, §ion_node[j]); ic 2225 drivers/md/dm-integrity.c sec += ic->sectors_per_block; ic 2226 drivers/md/dm-integrity.c offset += ic->sectors_per_block; ic 2228 drivers/md/dm-integrity.c while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) { ic 2229 drivers/md/dm-integrity.c struct journal_entry *je2 = access_journal_entry(ic, i, k - 1); ic 2232 drivers/md/dm-integrity.c remove_journal_node(ic, §ion_node[k - 1]); ic 2236 drivers/md/dm-integrity.c remove_range_unlocked(ic, &io->range); ic 2237 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2238 drivers/md/dm-integrity.c mempool_free(io, &ic->journal_io_mempool); ic 2242 drivers/md/dm-integrity.c remove_journal_node(ic, §ion_node[l]); ic 2245 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2247 drivers/md/dm-integrity.c metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); ic 2250 drivers/md/dm-integrity.c struct journal_entry *je2 = access_journal_entry(ic, i, l); ic 2256 drivers/md/dm-integrity.c ic->internal_hash) { ic 2259 drivers/md/dm-integrity.c integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block), ic 2260 drivers/md/dm-integrity.c (char *)access_journal_data(ic, i, l), test_tag); ic 2261 drivers/md/dm-integrity.c if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) ic 2262 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ); ic 2266 drivers/md/dm-integrity.c r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset, ic 2267 drivers/md/dm-integrity.c ic->tag_size, TAG_WRITE); ic 2269 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "reading tags", r); ic 2274 drivers/md/dm-integrity.c copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block, ic 2275 drivers/md/dm-integrity.c (k - j) << ic->sb->log2_sectors_per_block, ic 2276 drivers/md/dm-integrity.c get_data_sector(ic, area, offset), ic 2283 drivers/md/dm-integrity.c dm_bufio_write_dirty_buffers_async(ic->bufio); ic 2290 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2295 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work); ic 2301 drivers/md/dm-integrity.c if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev) ic 2304 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2305 drivers/md/dm-integrity.c write_start = ic->committed_section; ic 2306 drivers/md/dm-integrity.c write_sections = ic->n_committed_sections; ic 2307 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2312 drivers/md/dm-integrity.c do_journal_write(ic, write_start, write_sections, false); ic 2314 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2316 drivers/md/dm-integrity.c ic->committed_section += write_sections; ic 2317 drivers/md/dm-integrity.c wraparound_section(ic, &ic->committed_section); ic 2318 drivers/md/dm-integrity.c ic->n_committed_sections -= write_sections; ic 2320 drivers/md/dm-integrity.c prev_free_sectors = ic->free_sectors; ic 2321 drivers/md/dm-integrity.c ic->free_sectors += write_sections * ic->journal_section_entries; ic 2323 drivers/md/dm-integrity.c wake_up_locked(&ic->endio_wait); ic 2325 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2328 drivers/md/dm-integrity.c static void recalc_write_super(struct dm_integrity_c *ic) ic 2332 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2333 drivers/md/dm-integrity.c if (dm_integrity_failed(ic)) ic 2336 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_WRITE, 0); ic 2338 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing superblock", r); ic 2343 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work); ic 2356 drivers/md/dm-integrity.c DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector)); ic 2358 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2362 drivers/md/dm-integrity.c if (unlikely(dm_suspended(ic->ti))) ic 2365 drivers/md/dm-integrity.c range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); ic 2366 drivers/md/dm-integrity.c if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { ic 2367 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2369 drivers/md/dm-integrity.c queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); ic 2374 drivers/md/dm-integrity.c get_area_and_offset(ic, range.logical_sector, &area, &offset); ic 2375 drivers/md/dm-integrity.c range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector); ic 2376 drivers/md/dm-integrity.c if (!ic->meta_dev) ic 2377 drivers/md/dm-integrity.c range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset); ic 2379 drivers/md/dm-integrity.c add_new_range_and_wait(ic, &range); ic 2380 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2384 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2385 drivers/md/dm-integrity.c if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) { ic 2388 drivers/md/dm-integrity.c while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, ic 2389 drivers/md/dm-integrity.c ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { ic 2390 drivers/md/dm-integrity.c logical_sector += ic->sectors_per_block; ic 2391 drivers/md/dm-integrity.c n_sectors -= ic->sectors_per_block; ic 2394 drivers/md/dm-integrity.c while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block, ic 2395 drivers/md/dm-integrity.c ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) { ic 2396 drivers/md/dm-integrity.c n_sectors -= ic->sectors_per_block; ic 2399 drivers/md/dm-integrity.c get_area_and_offset(ic, logical_sector, &area, &offset); ic 2405 drivers/md/dm-integrity.c recalc_write_super(ic); ic 2406 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2407 drivers/md/dm-integrity.c queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); ic 2412 drivers/md/dm-integrity.c if (unlikely(dm_integrity_failed(ic))) ic 2418 drivers/md/dm-integrity.c io_req.mem.ptr.addr = ic->recalc_buffer; ic 2420 drivers/md/dm-integrity.c io_req.client = ic->io; ic 2421 drivers/md/dm-integrity.c io_loc.bdev = ic->dev->bdev; ic 2422 drivers/md/dm-integrity.c io_loc.sector = get_data_sector(ic, area, offset); ic 2427 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "reading data", r); ic 2431 drivers/md/dm-integrity.c t = ic->recalc_tags; ic 2432 drivers/md/dm-integrity.c for (i = 0; i < n_sectors; i += ic->sectors_per_block) { ic 2433 drivers/md/dm-integrity.c integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t); ic 2434 drivers/md/dm-integrity.c t += ic->tag_size; ic 2437 drivers/md/dm-integrity.c metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset); ic 2439 drivers/md/dm-integrity.c r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE); ic 2441 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing tags", r); ic 2448 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2449 drivers/md/dm-integrity.c remove_range_unlocked(ic, &range); ic 2450 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors); ic 2454 drivers/md/dm-integrity.c remove_range(ic, &range); ic 2458 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2460 drivers/md/dm-integrity.c recalc_write_super(ic); ic 2466 drivers/md/dm-integrity.c struct dm_integrity_c *ic = bbs->ic; ic 2483 drivers/md/dm-integrity.c if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, ic 2485 drivers/md/dm-integrity.c remove_range(ic, &dio->range); ic 2487 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); ic 2489 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, dio->range.logical_sector, ic 2498 drivers/md/dm-integrity.c rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, ic 2505 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector, ic 2508 drivers/md/dm-integrity.c remove_range(ic, &dio->range); ic 2510 drivers/md/dm-integrity.c queue_work(ic->offload_wq, &dio->work); ic 2513 drivers/md/dm-integrity.c queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval); ic 2518 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work); ic 2523 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2526 drivers/md/dm-integrity.c range.n_sectors = ic->provided_data_sectors; ic 2528 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2529 drivers/md/dm-integrity.c add_new_range_and_wait(ic, &range); ic 2530 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2532 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2533 drivers/md/dm-integrity.c if (ic->meta_dev) ic 2534 drivers/md/dm-integrity.c blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL); ic 2536 drivers/md/dm-integrity.c limit = ic->provided_data_sectors; ic 2537 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { ic 2538 drivers/md/dm-integrity.c limit = le64_to_cpu(ic->sb->recalc_sector) ic 2539 drivers/md/dm-integrity.c >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit) ic 2540 drivers/md/dm-integrity.c << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); ic 2543 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR); ic 2544 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR); ic 2546 drivers/md/dm-integrity.c rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic 2547 drivers/md/dm-integrity.c ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic 2549 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2550 drivers/md/dm-integrity.c remove_range_unlocked(ic, &range); ic 2551 drivers/md/dm-integrity.c while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) { ic 2553 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2554 drivers/md/dm-integrity.c spin_lock_irq(&ic->endio_wait.lock); ic 2556 drivers/md/dm-integrity.c spin_unlock_irq(&ic->endio_wait.lock); ic 2560 drivers/md/dm-integrity.c static void init_journal(struct dm_integrity_c *ic, unsigned start_section, ic 2570 drivers/md/dm-integrity.c wraparound_section(ic, &i); ic 2571 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_sectors; j++) { ic 2572 drivers/md/dm-integrity.c struct journal_sector *js = access_journal(ic, i, j); ic 2574 drivers/md/dm-integrity.c js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq); ic 2576 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_entries; j++) { ic 2577 drivers/md/dm-integrity.c struct journal_entry *je = access_journal_entry(ic, i, j); ic 2582 drivers/md/dm-integrity.c write_journal(ic, start_section, n_sections); ic 2585 drivers/md/dm-integrity.c static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id) ic 2589 drivers/md/dm-integrity.c if (dm_integrity_commit_id(ic, i, j, k) == id) ic 2592 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "journal commit id", -EIO); ic 2596 drivers/md/dm-integrity.c static void replay_journal(struct dm_integrity_c *ic) ic 2606 drivers/md/dm-integrity.c if (ic->mode == 'R') ic 2609 drivers/md/dm-integrity.c if (ic->journal_uptodate) ic 2615 drivers/md/dm-integrity.c if (!ic->just_formatted) { ic 2617 drivers/md/dm-integrity.c rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL); ic 2618 drivers/md/dm-integrity.c if (ic->journal_io) ic 2619 drivers/md/dm-integrity.c DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal"); ic 2620 drivers/md/dm-integrity.c if (ic->journal_io) { ic 2622 drivers/md/dm-integrity.c crypt_comp.ic = ic; ic 2625 drivers/md/dm-integrity.c encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp); ic 2628 drivers/md/dm-integrity.c DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal"); ic 2631 drivers/md/dm-integrity.c if (dm_integrity_failed(ic)) ic 2637 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_sections; i++) { ic 2638 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_sectors; j++) { ic 2640 drivers/md/dm-integrity.c struct journal_sector *js = access_journal(ic, i, j); ic 2641 drivers/md/dm-integrity.c k = find_commit_seq(ic, i, j, js->commit_id); ic 2648 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_entries; j++) { ic 2649 drivers/md/dm-integrity.c struct journal_entry *je = access_journal_entry(ic, i, j); ic 2667 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "journal commit ids", -EIO); ic 2682 drivers/md/dm-integrity.c if (unlikely(write_start >= ic->journal_sections)) ic 2684 drivers/md/dm-integrity.c wraparound_section(ic, &write_start); ic 2687 drivers/md/dm-integrity.c for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) { ic 2688 drivers/md/dm-integrity.c for (j = 0; j < ic->journal_section_sectors; j++) { ic 2689 drivers/md/dm-integrity.c struct journal_sector *js = access_journal(ic, i, j); ic 2691 drivers/md/dm-integrity.c if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) { ic 2698 drivers/md/dm-integrity.c i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq); ic 2703 drivers/md/dm-integrity.c if (unlikely(i >= ic->journal_sections)) ic 2705 drivers/md/dm-integrity.c wraparound_section(ic, &i); ic 2712 drivers/md/dm-integrity.c do_journal_write(ic, write_start, write_sections, true); ic 2715 drivers/md/dm-integrity.c if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) { ic 2717 drivers/md/dm-integrity.c ic->commit_seq = want_commit_seq; ic 2718 drivers/md/dm-integrity.c DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq); ic 2727 drivers/md/dm-integrity.c init_journal(ic, s, 1, erase_seq); ic 2729 drivers/md/dm-integrity.c wraparound_section(ic, &s); ic 2730 drivers/md/dm-integrity.c if (ic->journal_sections >= 2) { ic 2731 drivers/md/dm-integrity.c init_journal(ic, s, ic->journal_sections - 2, erase_seq); ic 2732 drivers/md/dm-integrity.c s += ic->journal_sections - 2; ic 2733 drivers/md/dm-integrity.c wraparound_section(ic, &s); ic 2734 drivers/md/dm-integrity.c init_journal(ic, s, 1, erase_seq); ic 2738 drivers/md/dm-integrity.c ic->commit_seq = next_commit_seq(erase_seq); ic 2741 drivers/md/dm-integrity.c ic->committed_section = continue_section; ic 2742 drivers/md/dm-integrity.c ic->n_committed_sections = 0; ic 2744 drivers/md/dm-integrity.c ic->uncommitted_section = continue_section; ic 2745 drivers/md/dm-integrity.c ic->n_uncommitted_sections = 0; ic 2747 drivers/md/dm-integrity.c ic->free_section = continue_section; ic 2748 drivers/md/dm-integrity.c ic->free_section_entry = 0; ic 2749 drivers/md/dm-integrity.c ic->free_sectors = ic->journal_entries; ic 2751 drivers/md/dm-integrity.c ic->journal_tree_root = RB_ROOT; ic 2752 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_entries; i++) ic 2753 drivers/md/dm-integrity.c init_journal_node(&ic->journal_tree[i]); ic 2756 drivers/md/dm-integrity.c static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic) ic 2760 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2761 drivers/md/dm-integrity.c ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1; ic 2762 drivers/md/dm-integrity.c ic->synchronous_mode = 1; ic 2764 drivers/md/dm-integrity.c cancel_delayed_work_sync(&ic->bitmap_flush_work); ic 2765 drivers/md/dm-integrity.c queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); ic 2766 drivers/md/dm-integrity.c flush_workqueue(ic->commit_wq); ic 2772 drivers/md/dm-integrity.c struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier); ic 2776 drivers/md/dm-integrity.c dm_integrity_enter_synchronous_mode(ic); ic 2783 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ic 2786 drivers/md/dm-integrity.c WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier)); ic 2788 drivers/md/dm-integrity.c del_timer_sync(&ic->autocommit_timer); ic 2790 drivers/md/dm-integrity.c if (ic->recalc_wq) ic 2791 drivers/md/dm-integrity.c drain_workqueue(ic->recalc_wq); ic 2793 drivers/md/dm-integrity.c if (ic->mode == 'B') ic 2794 drivers/md/dm-integrity.c cancel_delayed_work_sync(&ic->bitmap_flush_work); ic 2796 drivers/md/dm-integrity.c queue_work(ic->commit_wq, &ic->commit_work); ic 2797 drivers/md/dm-integrity.c drain_workqueue(ic->commit_wq); ic 2799 drivers/md/dm-integrity.c if (ic->mode == 'J') { ic 2800 drivers/md/dm-integrity.c if (ic->meta_dev) ic 2801 drivers/md/dm-integrity.c queue_work(ic->writer_wq, &ic->writer_work); ic 2802 drivers/md/dm-integrity.c drain_workqueue(ic->writer_wq); ic 2803 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2806 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2807 drivers/md/dm-integrity.c dm_integrity_flush_buffers(ic); ic 2810 drivers/md/dm-integrity.c init_journal(ic, 0, ic->journal_sections, 0); ic 2811 drivers/md/dm-integrity.c ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic 2812 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); ic 2814 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing superblock", r); ic 2818 drivers/md/dm-integrity.c BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); ic 2820 drivers/md/dm-integrity.c ic->journal_uptodate = true; ic 2825 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ic 2829 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) { ic 2831 drivers/md/dm-integrity.c rw_journal_sectors(ic, REQ_OP_READ, 0, 0, ic 2832 drivers/md/dm-integrity.c ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic 2833 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2834 drivers/md/dm-integrity.c if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) { ic 2835 drivers/md/dm-integrity.c block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal); ic 2836 drivers/md/dm-integrity.c block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal); ic 2837 drivers/md/dm-integrity.c if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, ic 2839 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic 2840 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(0); ic 2844 drivers/md/dm-integrity.c ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit); ic 2845 drivers/md/dm-integrity.c ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; ic 2846 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); ic 2847 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET); ic 2848 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET); ic 2849 drivers/md/dm-integrity.c rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic 2850 drivers/md/dm-integrity.c ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic 2851 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic 2852 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(0); ic 2855 drivers/md/dm-integrity.c if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit && ic 2856 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR))) { ic 2857 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic 2858 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(0); ic 2860 drivers/md/dm-integrity.c init_journal(ic, 0, ic->journal_sections, 0); ic 2861 drivers/md/dm-integrity.c replay_journal(ic); ic 2862 drivers/md/dm-integrity.c ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic 2864 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); ic 2866 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing superblock", r); ic 2868 drivers/md/dm-integrity.c replay_journal(ic); ic 2869 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2870 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP); ic 2871 drivers/md/dm-integrity.c ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit; ic 2872 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); ic 2874 drivers/md/dm-integrity.c dm_integrity_io_error(ic, "writing superblock", r); ic 2876 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); ic 2877 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); ic 2878 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); ic 2879 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && ic 2880 drivers/md/dm-integrity.c le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) { ic 2881 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector), ic 2882 drivers/md/dm-integrity.c ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); ic 2883 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector), ic 2884 drivers/md/dm-integrity.c ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); ic 2885 drivers/md/dm-integrity.c block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector), ic 2886 drivers/md/dm-integrity.c ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET); ic 2888 drivers/md/dm-integrity.c rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0, ic 2889 drivers/md/dm-integrity.c ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL); ic 2893 drivers/md/dm-integrity.c DEBUG_print("testing recalc: %x\n", ic->sb->flags); ic 2894 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { ic 2895 drivers/md/dm-integrity.c __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector); ic 2896 drivers/md/dm-integrity.c DEBUG_print("recalc pos: %lx / %lx\n", (long)recalc_pos, ic->provided_data_sectors); ic 2897 drivers/md/dm-integrity.c if (recalc_pos < ic->provided_data_sectors) { ic 2898 drivers/md/dm-integrity.c queue_work(ic->recalc_wq, &ic->recalc_work); ic 2899 drivers/md/dm-integrity.c } else if (recalc_pos > ic->provided_data_sectors) { ic 2900 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors); ic 2901 drivers/md/dm-integrity.c recalc_write_super(ic); ic 2905 drivers/md/dm-integrity.c ic->reboot_notifier.notifier_call = dm_integrity_reboot; ic 2906 drivers/md/dm-integrity.c ic->reboot_notifier.next = NULL; ic 2907 drivers/md/dm-integrity.c ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */ ic 2908 drivers/md/dm-integrity.c WARN_ON(register_reboot_notifier(&ic->reboot_notifier)); ic 2912 drivers/md/dm-integrity.c dm_integrity_enter_synchronous_mode(ic); ic 2919 drivers/md/dm-integrity.c struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private; ic 2926 drivers/md/dm-integrity.c (unsigned long long)atomic64_read(&ic->number_of_mismatches), ic 2927 drivers/md/dm-integrity.c (unsigned long long)ic->provided_data_sectors); ic 2928 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ic 2929 drivers/md/dm-integrity.c DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector)); ic 2935 drivers/md/dm-integrity.c __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100; ic 2936 drivers/md/dm-integrity.c watermark_percentage += ic->journal_entries / 2; ic 2937 drivers/md/dm-integrity.c do_div(watermark_percentage, ic->journal_entries); ic 2939 drivers/md/dm-integrity.c arg_count += !!ic->meta_dev; ic 2940 drivers/md/dm-integrity.c arg_count += ic->sectors_per_block != 1; ic 2941 drivers/md/dm-integrity.c arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)); ic 2942 drivers/md/dm-integrity.c arg_count += ic->mode == 'J'; ic 2943 drivers/md/dm-integrity.c arg_count += ic->mode == 'J'; ic 2944 drivers/md/dm-integrity.c arg_count += ic->mode == 'B'; ic 2945 drivers/md/dm-integrity.c arg_count += ic->mode == 'B'; ic 2946 drivers/md/dm-integrity.c arg_count += !!ic->internal_hash_alg.alg_string; ic 2947 drivers/md/dm-integrity.c arg_count += !!ic->journal_crypt_alg.alg_string; ic 2948 drivers/md/dm-integrity.c arg_count += !!ic->journal_mac_alg.alg_string; ic 2949 drivers/md/dm-integrity.c DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start, ic 2950 drivers/md/dm-integrity.c ic->tag_size, ic->mode, arg_count); ic 2951 drivers/md/dm-integrity.c if (ic->meta_dev) ic 2952 drivers/md/dm-integrity.c DMEMIT(" meta_device:%s", ic->meta_dev->name); ic 2953 drivers/md/dm-integrity.c if (ic->sectors_per_block != 1) ic 2954 drivers/md/dm-integrity.c DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT); ic 2955 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ic 2957 drivers/md/dm-integrity.c DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS); ic 2958 drivers/md/dm-integrity.c DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors); ic 2959 drivers/md/dm-integrity.c DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors); ic 2960 drivers/md/dm-integrity.c if (ic->mode == 'J') { ic 2962 drivers/md/dm-integrity.c DMEMIT(" commit_time:%u", ic->autocommit_msec); ic 2964 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 2965 drivers/md/dm-integrity.c DMEMIT(" sectors_per_bit:%llu", (unsigned long long)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit); ic 2966 drivers/md/dm-integrity.c DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval)); ic 2971 drivers/md/dm-integrity.c if (ic->a.alg_string) { \ ic 2972 drivers/md/dm-integrity.c DMEMIT(" %s:%s", n, ic->a.alg_string); \ ic 2973 drivers/md/dm-integrity.c if (ic->a.key_string) \ ic 2974 drivers/md/dm-integrity.c DMEMIT(":%s", ic->a.key_string);\ ic 2988 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ic 2990 drivers/md/dm-integrity.c if (!ic->meta_dev) ic 2991 drivers/md/dm-integrity.c return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data); ic 2993 drivers/md/dm-integrity.c return fn(ti, ic->dev, 0, ti->len, data); ic 2998 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ic 3000 drivers/md/dm-integrity.c if (ic->sectors_per_block > 1) { ic 3001 drivers/md/dm-integrity.c limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; ic 3002 drivers/md/dm-integrity.c limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; ic 3003 drivers/md/dm-integrity.c blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); ic 3007 drivers/md/dm-integrity.c static void calculate_journal_section_size(struct dm_integrity_c *ic) ic 3011 drivers/md/dm-integrity.c ic->journal_sections = le32_to_cpu(ic->sb->journal_sections); ic 3012 drivers/md/dm-integrity.c ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size, ic 3015 drivers/md/dm-integrity.c if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) ic 3017 drivers/md/dm-integrity.c ic->journal_entries_per_sector = sector_space / ic->journal_entry_size; ic 3018 drivers/md/dm-integrity.c ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS; ic 3019 drivers/md/dm-integrity.c ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS; ic 3020 drivers/md/dm-integrity.c ic->journal_entries = ic->journal_section_entries * ic->journal_sections; ic 3023 drivers/md/dm-integrity.c static int calculate_device_limits(struct dm_integrity_c *ic) ic 3027 drivers/md/dm-integrity.c calculate_journal_section_size(ic); ic 3028 drivers/md/dm-integrity.c initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections; ic 3029 drivers/md/dm-integrity.c if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX) ic 3031 drivers/md/dm-integrity.c ic->initial_sectors = initial_sectors; ic 3033 drivers/md/dm-integrity.c if (!ic->meta_dev) { ic 3036 drivers/md/dm-integrity.c ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block), ic 3038 drivers/md/dm-integrity.c if (!(ic->metadata_run & (ic->metadata_run - 1))) ic 3039 drivers/md/dm-integrity.c ic->log2_metadata_run = __ffs(ic->metadata_run); ic 3041 drivers/md/dm-integrity.c ic->log2_metadata_run = -1; ic 3043 drivers/md/dm-integrity.c get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset); ic 3044 drivers/md/dm-integrity.c last_sector = get_data_sector(ic, last_area, last_offset); ic 3045 drivers/md/dm-integrity.c if (last_sector < ic->start || last_sector >= ic->meta_device_sectors) ic 3048 drivers/md/dm-integrity.c __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size; ic 3049 drivers/md/dm-integrity.c meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1)) ic 3050 drivers/md/dm-integrity.c >> (ic->log2_buffer_sectors + SECTOR_SHIFT); ic 3051 drivers/md/dm-integrity.c meta_size <<= ic->log2_buffer_sectors; ic 3052 drivers/md/dm-integrity.c if (ic->initial_sectors + meta_size < ic->initial_sectors || ic 3053 drivers/md/dm-integrity.c ic->initial_sectors + meta_size > ic->meta_device_sectors) ic 3055 drivers/md/dm-integrity.c ic->metadata_run = 1; ic 3056 drivers/md/dm-integrity.c ic->log2_metadata_run = 0; ic 3062 drivers/md/dm-integrity.c static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors) ic 3067 drivers/md/dm-integrity.c memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT); ic 3068 drivers/md/dm-integrity.c memcpy(ic->sb->magic, SB_MAGIC, 8); ic 3069 drivers/md/dm-integrity.c ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size); ic 3070 drivers/md/dm-integrity.c ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block); ic 3071 drivers/md/dm-integrity.c if (ic->journal_mac_alg.alg_string) ic 3072 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC); ic 3074 drivers/md/dm-integrity.c calculate_journal_section_size(ic); ic 3075 drivers/md/dm-integrity.c journal_sections = journal_sectors / ic->journal_section_sectors; ic 3079 drivers/md/dm-integrity.c if (!ic->meta_dev) { ic 3080 drivers/md/dm-integrity.c ic->sb->journal_sections = cpu_to_le32(journal_sections); ic 3083 drivers/md/dm-integrity.c ic->sb->log2_interleave_sectors = __fls(interleave_sectors); ic 3084 drivers/md/dm-integrity.c ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); ic 3085 drivers/md/dm-integrity.c ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors); ic 3087 drivers/md/dm-integrity.c ic->provided_data_sectors = 0; ic 3088 drivers/md/dm-integrity.c for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) { ic 3089 drivers/md/dm-integrity.c __u64 prev_data_sectors = ic->provided_data_sectors; ic 3091 drivers/md/dm-integrity.c ic->provided_data_sectors |= (sector_t)1 << test_bit; ic 3092 drivers/md/dm-integrity.c if (calculate_device_limits(ic)) ic 3093 drivers/md/dm-integrity.c ic->provided_data_sectors = prev_data_sectors; ic 3095 drivers/md/dm-integrity.c if (!ic->provided_data_sectors) ic 3098 drivers/md/dm-integrity.c ic->sb->log2_interleave_sectors = 0; ic 3099 drivers/md/dm-integrity.c ic->provided_data_sectors = ic->data_device_sectors; ic 3100 drivers/md/dm-integrity.c ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1); ic 3103 drivers/md/dm-integrity.c ic->sb->journal_sections = cpu_to_le32(0); ic 3105 drivers/md/dm-integrity.c __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections); ic 3109 drivers/md/dm-integrity.c ic->sb->journal_sections = cpu_to_le32(test_journal_sections); ic 3110 drivers/md/dm-integrity.c if (calculate_device_limits(ic)) ic 3111 drivers/md/dm-integrity.c ic->sb->journal_sections = cpu_to_le32(prev_journal_sections); ic 3114 drivers/md/dm-integrity.c if (!le32_to_cpu(ic->sb->journal_sections)) { ic 3115 drivers/md/dm-integrity.c if (ic->log2_buffer_sectors > 3) { ic 3116 drivers/md/dm-integrity.c ic->log2_buffer_sectors--; ic 3123 drivers/md/dm-integrity.c ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors); ic 3125 drivers/md/dm-integrity.c sb_set_version(ic); ic 3130 drivers/md/dm-integrity.c static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) ic 3137 drivers/md/dm-integrity.c bi.tuple_size = ic->tag_size; ic 3139 drivers/md/dm-integrity.c bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT; ic 3180 drivers/md/dm-integrity.c static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl) ic 3183 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_sections; i++) ic 3188 drivers/md/dm-integrity.c static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, ic 3194 drivers/md/dm-integrity.c sl = kvmalloc_array(ic->journal_sections, ic 3200 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_sections; i++) { ic 3207 drivers/md/dm-integrity.c page_list_location(ic, i, 0, &start_index, &start_offset); ic 3208 drivers/md/dm-integrity.c page_list_location(ic, i, ic->journal_section_sectors - 1, ic 3216 drivers/md/dm-integrity.c dm_integrity_free_journal_scatterlist(ic, sl); ic 3307 drivers/md/dm-integrity.c static int create_journal(struct dm_integrity_c *ic, char **error) ic 3315 drivers/md/dm-integrity.c ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL); ic 3316 drivers/md/dm-integrity.c ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL); ic 3317 drivers/md/dm-integrity.c ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL); ic 3318 drivers/md/dm-integrity.c ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL); ic 3320 drivers/md/dm-integrity.c journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors, ic 3328 drivers/md/dm-integrity.c ic->journal_pages = journal_pages; ic 3330 drivers/md/dm-integrity.c ic->journal = dm_integrity_alloc_page_list(ic->journal_pages); ic 3331 drivers/md/dm-integrity.c if (!ic->journal) { ic 3336 drivers/md/dm-integrity.c if (ic->journal_crypt_alg.alg_string) { ic 3340 drivers/md/dm-integrity.c comp.ic = ic; ic 3341 drivers/md/dm-integrity.c ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0); ic 3342 drivers/md/dm-integrity.c if (IS_ERR(ic->journal_crypt)) { ic 3344 drivers/md/dm-integrity.c r = PTR_ERR(ic->journal_crypt); ic 3345 drivers/md/dm-integrity.c ic->journal_crypt = NULL; ic 3348 drivers/md/dm-integrity.c ivsize = crypto_skcipher_ivsize(ic->journal_crypt); ic 3349 drivers/md/dm-integrity.c blocksize = crypto_skcipher_blocksize(ic->journal_crypt); ic 3351 drivers/md/dm-integrity.c if (ic->journal_crypt_alg.key) { ic 3352 drivers/md/dm-integrity.c r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key, ic 3353 drivers/md/dm-integrity.c ic->journal_crypt_alg.key_size); ic 3360 drivers/md/dm-integrity.c ic->journal_crypt_alg.alg_string, blocksize, ivsize); ic 3362 drivers/md/dm-integrity.c ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages); ic 3363 drivers/md/dm-integrity.c if (!ic->journal_io) { ic 3372 drivers/md/dm-integrity.c req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); ic 3386 drivers/md/dm-integrity.c ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages); ic 3387 drivers/md/dm-integrity.c if (!ic->journal_xor) { ic 3393 drivers/md/dm-integrity.c sg = kvmalloc_array(ic->journal_pages + 1, ic 3401 drivers/md/dm-integrity.c sg_init_table(sg, ic->journal_pages + 1); ic 3402 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_pages; i++) { ic 3403 drivers/md/dm-integrity.c char *va = lowmem_page_address(ic->journal_xor[i].page); ic 3407 drivers/md/dm-integrity.c sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids); ic 3410 drivers/md/dm-integrity.c PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv); ic 3416 drivers/md/dm-integrity.c r = dm_integrity_failed(ic); ic 3421 drivers/md/dm-integrity.c DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data"); ic 3423 drivers/md/dm-integrity.c crypto_free_skcipher(ic->journal_crypt); ic 3424 drivers/md/dm-integrity.c ic->journal_crypt = NULL; ic 3428 drivers/md/dm-integrity.c req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); ic 3449 drivers/md/dm-integrity.c ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal); ic 3450 drivers/md/dm-integrity.c if (!ic->journal_scatterlist) { ic 3455 drivers/md/dm-integrity.c ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io); ic 3456 drivers/md/dm-integrity.c if (!ic->journal_io_scatterlist) { ic 3461 drivers/md/dm-integrity.c ic->sk_requests = kvmalloc_array(ic->journal_sections, ic 3464 drivers/md/dm-integrity.c if (!ic->sk_requests) { ic 3469 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_sections; i++) { ic 3485 drivers/md/dm-integrity.c r = dm_integrity_failed(ic); ic 3491 drivers/md/dm-integrity.c section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL); ic 3506 drivers/md/dm-integrity.c section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT; ic 3507 drivers/md/dm-integrity.c ic->sk_requests[i] = section_req; ic 3517 drivers/md/dm-integrity.c if (ic->commit_ids[j] == ic->commit_ids[i]) { ic 3518 drivers/md/dm-integrity.c ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1); ic 3522 drivers/md/dm-integrity.c DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]); ic 3525 drivers/md/dm-integrity.c journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node); ic 3531 drivers/md/dm-integrity.c ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); ic 3532 drivers/md/dm-integrity.c if (!ic->journal_tree) { ic 3570 drivers/md/dm-integrity.c struct dm_integrity_c *ic; ic 3594 drivers/md/dm-integrity.c ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL); ic 3595 drivers/md/dm-integrity.c if (!ic) { ic 3599 drivers/md/dm-integrity.c ti->private = ic; ic 3601 drivers/md/dm-integrity.c ic->ti = ti; ic 3603 drivers/md/dm-integrity.c ic->in_progress = RB_ROOT; ic 3604 drivers/md/dm-integrity.c INIT_LIST_HEAD(&ic->wait_list); ic 3605 drivers/md/dm-integrity.c init_waitqueue_head(&ic->endio_wait); ic 3606 drivers/md/dm-integrity.c bio_list_init(&ic->flush_bio_list); ic 3607 drivers/md/dm-integrity.c init_waitqueue_head(&ic->copy_to_journal_wait); ic 3608 drivers/md/dm-integrity.c init_completion(&ic->crypto_backoff); ic 3609 drivers/md/dm-integrity.c atomic64_set(&ic->number_of_mismatches, 0); ic 3610 drivers/md/dm-integrity.c ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL; ic 3612 drivers/md/dm-integrity.c r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev); ic 3623 drivers/md/dm-integrity.c ic->start = start; ic 3626 drivers/md/dm-integrity.c if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) { ic 3635 drivers/md/dm-integrity.c ic->mode = argv[3][0]; ic 3647 drivers/md/dm-integrity.c ic->sectors_per_block = 1; ic 3676 drivers/md/dm-integrity.c if (ic->meta_dev) { ic 3677 drivers/md/dm-integrity.c dm_put_device(ti, ic->meta_dev); ic 3678 drivers/md/dm-integrity.c ic->meta_dev = NULL; ic 3681 drivers/md/dm-integrity.c dm_table_get_mode(ti->table), &ic->meta_dev); ic 3694 drivers/md/dm-integrity.c ic->sectors_per_block = val >> SECTOR_SHIFT; ic 3702 drivers/md/dm-integrity.c ic->bitmap_flush_interval = msecs_to_jiffies(val); ic 3704 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, ic 3709 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, ic 3714 drivers/md/dm-integrity.c r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, ic 3719 drivers/md/dm-integrity.c ic->recalculate_flag = true; ic 3727 drivers/md/dm-integrity.c ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT; ic 3728 drivers/md/dm-integrity.c if (!ic->meta_dev) ic 3729 drivers/md/dm-integrity.c ic->meta_device_sectors = ic->data_device_sectors; ic 3731 drivers/md/dm-integrity.c ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT; ic 3735 drivers/md/dm-integrity.c ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR); ic 3740 drivers/md/dm-integrity.c ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT); ic 3742 drivers/md/dm-integrity.c r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error, ic 3747 drivers/md/dm-integrity.c r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error, ic 3752 drivers/md/dm-integrity.c if (!ic->tag_size) { ic 3753 drivers/md/dm-integrity.c if (!ic->internal_hash) { ic 3758 drivers/md/dm-integrity.c ic->tag_size = crypto_shash_digestsize(ic->internal_hash); ic 3760 drivers/md/dm-integrity.c if (ic->tag_size > MAX_TAG_SIZE) { ic 3765 drivers/md/dm-integrity.c if (!(ic->tag_size & (ic->tag_size - 1))) ic 3766 drivers/md/dm-integrity.c ic->log2_tag_size = __ffs(ic->tag_size); ic 3768 drivers/md/dm-integrity.c ic->log2_tag_size = -1; ic 3770 drivers/md/dm-integrity.c if (ic->mode == 'B' && !ic->internal_hash) { ic 3776 drivers/md/dm-integrity.c ic->autocommit_jiffies = msecs_to_jiffies(sync_msec); ic 3777 drivers/md/dm-integrity.c ic->autocommit_msec = sync_msec; ic 3778 drivers/md/dm-integrity.c timer_setup(&ic->autocommit_timer, autocommit_fn, 0); ic 3780 drivers/md/dm-integrity.c ic->io = dm_io_client_create(); ic 3781 drivers/md/dm-integrity.c if (IS_ERR(ic->io)) { ic 3782 drivers/md/dm-integrity.c r = PTR_ERR(ic->io); ic 3783 drivers/md/dm-integrity.c ic->io = NULL; ic 3788 drivers/md/dm-integrity.c r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache); ic 3794 drivers/md/dm-integrity.c ic->metadata_wq = alloc_workqueue("dm-integrity-metadata", ic 3796 drivers/md/dm-integrity.c if (!ic->metadata_wq) { ic 3806 drivers/md/dm-integrity.c ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); ic 3807 drivers/md/dm-integrity.c if (!ic->wait_wq) { ic 3813 drivers/md/dm-integrity.c ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM, ic 3815 drivers/md/dm-integrity.c if (!ic->offload_wq) { ic 3821 drivers/md/dm-integrity.c ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1); ic 3822 drivers/md/dm-integrity.c if (!ic->commit_wq) { ic 3827 drivers/md/dm-integrity.c INIT_WORK(&ic->commit_work, integrity_commit); ic 3829 drivers/md/dm-integrity.c if (ic->mode == 'J' || ic->mode == 'B') { ic 3830 drivers/md/dm-integrity.c ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1); ic 3831 drivers/md/dm-integrity.c if (!ic->writer_wq) { ic 3836 drivers/md/dm-integrity.c INIT_WORK(&ic->writer_work, integrity_writer); ic 3839 drivers/md/dm-integrity.c ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL); ic 3840 drivers/md/dm-integrity.c if (!ic->sb) { ic 3846 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_READ, 0); ic 3852 drivers/md/dm-integrity.c if (memcmp(ic->sb->magic, SB_MAGIC, 8)) { ic 3853 drivers/md/dm-integrity.c if (ic->mode != 'R') { ic 3854 drivers/md/dm-integrity.c if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) { ic 3861 drivers/md/dm-integrity.c r = initialize_superblock(ic, journal_sectors, interleave_sectors); ic 3866 drivers/md/dm-integrity.c if (ic->mode != 'R') ic 3870 drivers/md/dm-integrity.c if (!ic->sb->version || ic->sb->version > SB_VERSION_3) { ic 3875 drivers/md/dm-integrity.c if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) { ic 3880 drivers/md/dm-integrity.c if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) { ic 3885 drivers/md/dm-integrity.c if (!le32_to_cpu(ic->sb->journal_sections)) { ic 3891 drivers/md/dm-integrity.c if (!ic->meta_dev) { ic 3892 drivers/md/dm-integrity.c if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS || ic 3893 drivers/md/dm-integrity.c ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) { ic 3899 drivers/md/dm-integrity.c if (ic->sb->log2_interleave_sectors) { ic 3905 drivers/md/dm-integrity.c ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors); ic 3906 drivers/md/dm-integrity.c if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) { ic 3912 drivers/md/dm-integrity.c if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) { ic 3919 drivers/md/dm-integrity.c r = calculate_device_limits(ic); ic 3921 drivers/md/dm-integrity.c if (ic->meta_dev) { ic 3922 drivers/md/dm-integrity.c if (ic->log2_buffer_sectors > 3) { ic 3923 drivers/md/dm-integrity.c ic->log2_buffer_sectors--; ic 3933 drivers/md/dm-integrity.c if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block) ic 3934 drivers/md/dm-integrity.c log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block; ic 3936 drivers/md/dm-integrity.c bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3); ic 3939 drivers/md/dm-integrity.c while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit) ic 3942 drivers/md/dm-integrity.c log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block; ic 3943 drivers/md/dm-integrity.c ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; ic 3945 drivers/md/dm-integrity.c ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit; ic 3947 drivers/md/dm-integrity.c n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) ic 3949 drivers/md/dm-integrity.c ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8); ic 3951 drivers/md/dm-integrity.c if (!ic->meta_dev) ic 3952 drivers/md/dm-integrity.c ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run)); ic 3954 drivers/md/dm-integrity.c if (ti->len > ic->provided_data_sectors) { ic 3961 drivers/md/dm-integrity.c threshold = (__u64)ic->journal_entries * (100 - journal_watermark); ic 3964 drivers/md/dm-integrity.c ic->free_sectors_threshold = threshold; ic 3967 drivers/md/dm-integrity.c DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size)); ic 3968 drivers/md/dm-integrity.c DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size); ic 3969 drivers/md/dm-integrity.c DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector); ic 3970 drivers/md/dm-integrity.c DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries); ic 3971 drivers/md/dm-integrity.c DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors); ic 3972 drivers/md/dm-integrity.c DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections)); ic 3973 drivers/md/dm-integrity.c DEBUG_print(" journal_entries %u\n", ic->journal_entries); ic 3974 drivers/md/dm-integrity.c DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors); ic 3975 drivers/md/dm-integrity.c DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT); ic 3976 drivers/md/dm-integrity.c DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors); ic 3977 drivers/md/dm-integrity.c DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run); ic 3978 drivers/md/dm-integrity.c DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run); ic 3979 drivers/md/dm-integrity.c DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors, ic 3980 drivers/md/dm-integrity.c (unsigned long long)ic->provided_data_sectors); ic 3981 drivers/md/dm-integrity.c DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors); ic 3984 drivers/md/dm-integrity.c if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) { ic 3985 drivers/md/dm-integrity.c ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING); ic 3986 drivers/md/dm-integrity.c ic->sb->recalc_sector = cpu_to_le64(0); ic 3989 drivers/md/dm-integrity.c if (ic->internal_hash) { ic 3990 drivers/md/dm-integrity.c ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1); ic 3991 drivers/md/dm-integrity.c if (!ic->recalc_wq ) { ic 3996 drivers/md/dm-integrity.c INIT_WORK(&ic->recalc_work, integrity_recalc); ic 3997 drivers/md/dm-integrity.c ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT); ic 3998 drivers/md/dm-integrity.c if (!ic->recalc_buffer) { ic 4003 drivers/md/dm-integrity.c ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block, ic 4004 drivers/md/dm-integrity.c ic->tag_size, GFP_KERNEL); ic 4005 drivers/md/dm-integrity.c if (!ic->recalc_tags) { ic 4012 drivers/md/dm-integrity.c ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, ic 4013 drivers/md/dm-integrity.c 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL); ic 4014 drivers/md/dm-integrity.c if (IS_ERR(ic->bufio)) { ic 4015 drivers/md/dm-integrity.c r = PTR_ERR(ic->bufio); ic 4017 drivers/md/dm-integrity.c ic->bufio = NULL; ic 4020 drivers/md/dm-integrity.c dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors); ic 4022 drivers/md/dm-integrity.c if (ic->mode != 'R') { ic 4023 drivers/md/dm-integrity.c r = create_journal(ic, &ti->error); ic 4029 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 4031 drivers/md/dm-integrity.c unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE); ic 4033 drivers/md/dm-integrity.c ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); ic 4034 drivers/md/dm-integrity.c if (!ic->recalc_bitmap) { ic 4038 drivers/md/dm-integrity.c ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages); ic 4039 drivers/md/dm-integrity.c if (!ic->may_write_bitmap) { ic 4043 drivers/md/dm-integrity.c ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL); ic 4044 drivers/md/dm-integrity.c if (!ic->bbs) { ic 4048 drivers/md/dm-integrity.c INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work); ic 4049 drivers/md/dm-integrity.c for (i = 0; i < ic->n_bitmap_blocks; i++) { ic 4050 drivers/md/dm-integrity.c struct bitmap_block_status *bbs = &ic->bbs[i]; ic 4054 drivers/md/dm-integrity.c bbs->ic = ic; ic 4063 drivers/md/dm-integrity.c bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset; ic 4070 drivers/md/dm-integrity.c init_journal(ic, 0, ic->journal_sections, 0); ic 4071 drivers/md/dm-integrity.c r = dm_integrity_failed(ic); ic 4076 drivers/md/dm-integrity.c r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA); ic 4081 drivers/md/dm-integrity.c ic->just_formatted = true; ic 4084 drivers/md/dm-integrity.c if (!ic->meta_dev) { ic 4085 drivers/md/dm-integrity.c r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors); ic 4089 drivers/md/dm-integrity.c if (ic->mode == 'B') { ic 4090 drivers/md/dm-integrity.c unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8); ic 4101 drivers/md/dm-integrity.c if (!ic->internal_hash) ic 4102 drivers/md/dm-integrity.c dm_integrity_set(ti, ic); ic 4116 drivers/md/dm-integrity.c struct dm_integrity_c *ic = ti->private; ic 4118 drivers/md/dm-integrity.c BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); ic 4119 drivers/md/dm-integrity.c BUG_ON(!list_empty(&ic->wait_list)); ic 4121 drivers/md/dm-integrity.c if (ic->metadata_wq) ic 4122 drivers/md/dm-integrity.c destroy_workqueue(ic->metadata_wq); ic 4123 drivers/md/dm-integrity.c if (ic->wait_wq) ic 4124 drivers/md/dm-integrity.c destroy_workqueue(ic->wait_wq); ic 4125 drivers/md/dm-integrity.c if (ic->offload_wq) ic 4126 drivers/md/dm-integrity.c destroy_workqueue(ic->offload_wq); ic 4127 drivers/md/dm-integrity.c if (ic->commit_wq) ic 4128 drivers/md/dm-integrity.c destroy_workqueue(ic->commit_wq); ic 4129 drivers/md/dm-integrity.c if (ic->writer_wq) ic 4130 drivers/md/dm-integrity.c destroy_workqueue(ic->writer_wq); ic 4131 drivers/md/dm-integrity.c if (ic->recalc_wq) ic 4132 drivers/md/dm-integrity.c destroy_workqueue(ic->recalc_wq); ic 4133 drivers/md/dm-integrity.c vfree(ic->recalc_buffer); ic 4134 drivers/md/dm-integrity.c kvfree(ic->recalc_tags); ic 4135 drivers/md/dm-integrity.c kvfree(ic->bbs); ic 4136 drivers/md/dm-integrity.c if (ic->bufio) ic 4137 drivers/md/dm-integrity.c dm_bufio_client_destroy(ic->bufio); ic 4138 drivers/md/dm-integrity.c mempool_exit(&ic->journal_io_mempool); ic 4139 drivers/md/dm-integrity.c if (ic->io) ic 4140 drivers/md/dm-integrity.c dm_io_client_destroy(ic->io); ic 4141 drivers/md/dm-integrity.c if (ic->dev) ic 4142 drivers/md/dm-integrity.c dm_put_device(ti, ic->dev); ic 4143 drivers/md/dm-integrity.c if (ic->meta_dev) ic 4144 drivers/md/dm-integrity.c dm_put_device(ti, ic->meta_dev); ic 4145 drivers/md/dm-integrity.c dm_integrity_free_page_list(ic->journal); ic 4146 drivers/md/dm-integrity.c dm_integrity_free_page_list(ic->journal_io); ic 4147 drivers/md/dm-integrity.c dm_integrity_free_page_list(ic->journal_xor); ic 4148 drivers/md/dm-integrity.c dm_integrity_free_page_list(ic->recalc_bitmap); ic 4149 drivers/md/dm-integrity.c dm_integrity_free_page_list(ic->may_write_bitmap); ic 4150 drivers/md/dm-integrity.c if (ic->journal_scatterlist) ic 4151 drivers/md/dm-integrity.c dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist); ic 4152 drivers/md/dm-integrity.c if (ic->journal_io_scatterlist) ic 4153 drivers/md/dm-integrity.c dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist); ic 4154 drivers/md/dm-integrity.c if (ic->sk_requests) { ic 4157 drivers/md/dm-integrity.c for (i = 0; i < ic->journal_sections; i++) { ic 4158 drivers/md/dm-integrity.c struct skcipher_request *req = ic->sk_requests[i]; ic 4164 drivers/md/dm-integrity.c kvfree(ic->sk_requests); ic 4166 drivers/md/dm-integrity.c kvfree(ic->journal_tree); ic 4167 drivers/md/dm-integrity.c if (ic->sb) ic 4168 drivers/md/dm-integrity.c free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT); ic 4170 drivers/md/dm-integrity.c if (ic->internal_hash) ic 4171 drivers/md/dm-integrity.c crypto_free_shash(ic->internal_hash); ic 4172 drivers/md/dm-integrity.c free_alg(&ic->internal_hash_alg); ic 4174 drivers/md/dm-integrity.c if (ic->journal_crypt) ic 4175 drivers/md/dm-integrity.c crypto_free_skcipher(ic->journal_crypt); ic 4176 drivers/md/dm-integrity.c free_alg(&ic->journal_crypt_alg); ic 4178 drivers/md/dm-integrity.c if (ic->journal_mac) ic 4179 drivers/md/dm-integrity.c crypto_free_shash(ic->journal_mac); ic 4180 drivers/md/dm-integrity.c free_alg(&ic->journal_mac_alg); ic 4182 drivers/md/dm-integrity.c kfree(ic); ic 344 drivers/mmc/core/block.c struct mmc_ioc_cmd ic; ic 362 drivers/mmc/core/block.c if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { ic 367 drivers/mmc/core/block.c idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; ic 379 drivers/mmc/core/block.c idata->ic.data_ptr, idata->buf_bytes); ic 396 drivers/mmc/core/block.c struct mmc_ioc_cmd *ic = &idata->ic; ic 398 drivers/mmc/core/block.c if (copy_to_user(&(ic_ptr->response), ic->response, ic 399 drivers/mmc/core/block.c sizeof(ic->response))) ic 402 drivers/mmc/core/block.c if (!idata->ic.write_flag) { ic 403 drivers/mmc/core/block.c if (copy_to_user((void __user *)(unsigned long)ic->data_ptr, ic 518 drivers/mmc/core/block.c cmd.opcode = idata->ic.opcode; ic 519 drivers/mmc/core/block.c cmd.arg = idata->ic.arg; ic 520 drivers/mmc/core/block.c cmd.flags = idata->ic.flags; ic 525 drivers/mmc/core/block.c data.blksz = idata->ic.blksz; ic 526 drivers/mmc/core/block.c data.blocks = idata->ic.blocks; ic 530 drivers/mmc/core/block.c if (idata->ic.write_flag) ic 539 drivers/mmc/core/block.c if (idata->ic.data_timeout_ns) ic 540 drivers/mmc/core/block.c data.timeout_ns = idata->ic.data_timeout_ns; ic 552 drivers/mmc/core/block.c data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; ic 564 drivers/mmc/core/block.c if (idata->ic.is_acmd) { ic 577 drivers/mmc/core/block.c sbc.arg = data.blocks | (idata->ic.write_flag & BIT(31)); ic 628 drivers/mmc/core/block.c if (idata->ic.postsleep_min_us) ic 629 drivers/mmc/core/block.c usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); ic 631 drivers/mmc/core/block.c memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); ic 672 drivers/mmc/core/block.c idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); ic 742 drivers/mmc/core/block.c idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0); ic 242 drivers/net/ethernet/freescale/gianfar.h #define get_icft_value(ic) (((unsigned long)ic & IC_ICFT_MASK) >> \ ic 244 drivers/net/ethernet/freescale/gianfar.h #define get_ictt_value(ic) ((unsigned long)ic & IC_ICTT_MASK) ic 371 drivers/net/wireless/realtek/rtw88/rtw8822c.c u32 ic = 0, qc = 0, temp = 0; ic 405 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc); ic 407 drivers/net/wireless/realtek/rtw88/rtw8822c.c "[DACK] before: i=0x%x, q=0x%x\n", ic, qc); ic 410 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic != 0x0) { ic 411 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x400 - ic; ic 412 drivers/net/wireless/realtek/rtw88/rtw8822c.c *adc_ic = ic; ic 418 drivers/net/wireless/realtek/rtw88/rtw8822c.c temp = (ic & 0x3ff) | ((qc & 0x3ff) << 10); ic 425 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc); ic 427 drivers/net/wireless/realtek/rtw88/rtw8822c.c "[DACK] after: i=0x%08x, q=0x%08x\n", ic, qc); ic 428 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic >= 0x200) ic 429 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x400 - ic; ic 432 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic < 5 && qc < 5) ic 494 drivers/net/wireless/realtek/rtw88/rtw8822c.c u32 ic, qc, ic_in, qc_in; ic 509 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = ic_in; ic 513 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic != 0x0) ic 514 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x400 - ic; ic 517 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic < 0x300) { ic 518 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = ic * 2 * 6 / 5; ic 519 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = ic + 0x80; ic 521 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = (0x400 - ic) * 2 * 6 / 5; ic 522 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x7f - ic; ic 532 drivers/net/wireless/realtek/rtw88/rtw8822c.c *ic_out = ic; ic 536 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw_dbg(rtwdev, RTW_DBG_RFK, "[DACK] after i=0x%x, q=0x%x\n", ic, qc); ic 546 drivers/net/wireless/realtek/rtw88/rtw8822c.c u32 ic, qc; ic 551 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = *ic_in; ic 560 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw_write32_mask(rtwdev, base_addr + 0xbc, 0xf0000000, ic & 0xf); ic 561 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw_write32_mask(rtwdev, base_addr + 0xc0, 0xf, (ic & 0xf0) >> 4); ic 577 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (!check_hw_ready(rtwdev, read_addr + 0x24, 0x07f80000, ic) || ic 590 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822c_dac_cal_rf_mode(rtwdev, &ic, &qc); ic 591 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic >= 0x10) ic 592 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = ic - 0x10; ic 594 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x400 - (0x10 - ic); ic 601 drivers/net/wireless/realtek/rtw88/rtw8822c.c *i_out = ic; ic 604 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic >= 0x200) ic 605 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic = 0x400 - ic; ic 609 drivers/net/wireless/realtek/rtw88/rtw8822c.c *ic_in = ic; ic 920 drivers/net/wireless/realtek/rtw88/rtw8822c.c u32 ic = 0, qc = 0, i; ic 938 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822c_dac_cal_step2(rtwdev, RF_PATH_A, &ic, &qc); ic 939 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic_a = ic; ic 943 drivers/net/wireless/realtek/rtw88/rtw8822c.c &ic, &qc, &i_a, &q_a); ic 945 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic < 5 && qc < 5) ic 954 drivers/net/wireless/realtek/rtw88/rtw8822c.c rtw8822c_dac_cal_step2(rtwdev, RF_PATH_B, &ic, &qc); ic 955 drivers/net/wireless/realtek/rtw88/rtw8822c.c ic_b = ic; ic 959 drivers/net/wireless/realtek/rtw88/rtw8822c.c &ic, &qc, &i_b, &q_b); ic 961 drivers/net/wireless/realtek/rtw88/rtw8822c.c if (ic < 5 && qc < 5) ic 29 drivers/nfc/pn533/pn533.c u8 ic; ic 2388 drivers/nfc/pn533/pn533.c fv->ic = resp->data[0]; ic 2573 drivers/nfc/pn533/pn533.c fw_ver.ic, fw_ver.ver, fw_ver.rev); ic 340 drivers/s390/cio/chsc.c u32 ic:8; ic 410 drivers/s390/cio/chsc.c sei_area->rs, sei_area->rsid, lir->ic, iuparams, ic 416 drivers/s390/cio/chsc.c sei_area->rs, sei_area->rsid, lir->ic, iuparams, ic 585 drivers/scsi/gdth.c writeb(0x00, &dp6_ptr->u.ic.S_Status); ic 586 drivers/scsi/gdth.c writeb(0x00, &dp6_ptr->u.ic.Cmd_Index); ic 588 drivers/scsi/gdth.c writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]); ic 589 drivers/scsi/gdth.c writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx); ic 593 drivers/scsi/gdth.c while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) { ic 601 drivers/scsi/gdth.c prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]); ic 602 drivers/scsi/gdth.c writeb(0, &dp6_ptr->u.ic.S_Status); ic 614 drivers/scsi/gdth.c writel(0x00, &dp6_ptr->u.ic.S_Info[0]); ic 615 drivers/scsi/gdth.c writel(0x00, &dp6_ptr->u.ic.S_Info[1]); ic 616 drivers/scsi/gdth.c writel(0x00, &dp6_ptr->u.ic.S_Info[2]); ic 617 drivers/scsi/gdth.c writel(0x00, &dp6_ptr->u.ic.S_Info[3]); ic 618 drivers/scsi/gdth.c writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx); ic 622 drivers/scsi/gdth.c while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) { ic 630 drivers/scsi/gdth.c writeb(0, &dp6_ptr->u.ic.S_Status); ic 695 drivers/scsi/gdth.c writeb(0x00, &dp6c_ptr->u.ic.S_Status); ic 696 drivers/scsi/gdth.c writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index); ic 698 drivers/scsi/gdth.c writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]); ic 699 drivers/scsi/gdth.c writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx); ic 705 drivers/scsi/gdth.c while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) { ic 713 drivers/scsi/gdth.c prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]); ic 714 drivers/scsi/gdth.c writeb(0, &dp6c_ptr->u.ic.Status); ic 725 drivers/scsi/gdth.c writel(0x00, &dp6c_ptr->u.ic.S_Info[0]); ic 726 drivers/scsi/gdth.c writel(0x00, &dp6c_ptr->u.ic.S_Info[1]); ic 727 drivers/scsi/gdth.c writel(0x00, &dp6c_ptr->u.ic.S_Info[2]); ic 728 drivers/scsi/gdth.c writel(0x00, &dp6c_ptr->u.ic.S_Info[3]); ic 729 drivers/scsi/gdth.c writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx); ic 735 drivers/scsi/gdth.c while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) { ic 743 drivers/scsi/gdth.c writeb(0, &dp6c_ptr->u.ic.S_Status); ic 812 drivers/scsi/gdth.c writeb(0x00, &dp6m_ptr->u.ic.S_Status); ic 813 drivers/scsi/gdth.c writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index); ic 815 drivers/scsi/gdth.c writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]); ic 816 drivers/scsi/gdth.c writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx); ic 820 drivers/scsi/gdth.c while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) { ic 828 drivers/scsi/gdth.c prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]); ic 829 drivers/scsi/gdth.c writeb(0, &dp6m_ptr->u.ic.S_Status); ic 840 drivers/scsi/gdth.c writel(0x00, &dp6m_ptr->u.ic.S_Info[0]); ic 841 drivers/scsi/gdth.c writel(0x00, &dp6m_ptr->u.ic.S_Info[1]); ic 842 drivers/scsi/gdth.c writel(0x00, &dp6m_ptr->u.ic.S_Info[2]); ic 843 drivers/scsi/gdth.c writel(0x00, &dp6m_ptr->u.ic.S_Info[3]); ic 844 drivers/scsi/gdth.c writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx); ic 848 drivers/scsi/gdth.c while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) { ic 856 drivers/scsi/gdth.c writeb(0, &dp6m_ptr->u.ic.S_Status); ic 859 drivers/scsi/gdth.c writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx); ic 863 drivers/scsi/gdth.c while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) { ic 871 drivers/scsi/gdth.c prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16); ic 872 drivers/scsi/gdth.c writeb(0, &dp6m_ptr->u.ic.S_Status); ic 896 drivers/scsi/gdth.c writeb(0, &dp6_ptr->u.ic.Cmd_Index); ic 919 drivers/scsi/gdth.c readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index); ic 936 drivers/scsi/gdth.c gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); ic 970 drivers/scsi/gdth.c writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0); ic 1006 drivers/scsi/gdth.c &dp6_ptr->u.ic.comm_queue[cmd_no].offset); ic 1008 drivers/scsi/gdth.c &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id); ic 1009 drivers/scsi/gdth.c memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); ic 1013 drivers/scsi/gdth.c &dp6c_ptr->u.ic.comm_queue[cmd_no].offset); ic 1015 drivers/scsi/gdth.c &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id); ic 1016 drivers/scsi/gdth.c memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); ic 1020 drivers/scsi/gdth.c &dp6m_ptr->u.ic.comm_queue[cmd_no].offset); ic 1022 drivers/scsi/gdth.c &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id); ic 1023 drivers/scsi/gdth.c memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count); ic 2516 drivers/scsi/gdth.c ha->status = readw(&dp6_ptr->u.ic.Status); ic 2520 drivers/scsi/gdth.c ha->info = readl(&dp6_ptr->u.ic.Info[0]); ic 2521 drivers/scsi/gdth.c ha->service = readw(&dp6_ptr->u.ic.Service); ic 2522 drivers/scsi/gdth.c ha->info2 = readl(&dp6_ptr->u.ic.Info[1]); ic 2525 drivers/scsi/gdth.c writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */ ic 703 drivers/scsi/gdth.h gdt_dpr_if ic; /* interface area */ ic 719 drivers/scsi/gdth.h gdt_dpr_if ic; /* interface area */ ic 759 drivers/scsi/gdth.h gdt_dpr_if ic; /* interface area */ ic 790 drivers/scsi/gdth.h gdt_dpr_if ic; /* interface area */ ic 62 drivers/staging/media/imx/imx-ic-prpencvf.c struct ipu_ic *ic; ic 133 drivers/staging/media/imx/imx-ic-prpencvf.c if (priv->ic) ic 134 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_put(priv->ic); ic 135 drivers/staging/media/imx/imx-ic-prpencvf.c priv->ic = NULL; ic 153 drivers/staging/media/imx/imx-ic-prpencvf.c struct ipu_ic *ic; ic 157 drivers/staging/media/imx/imx-ic-prpencvf.c ic = ipu_ic_get(ic_priv->ipu, task); ic 158 drivers/staging/media/imx/imx-ic-prpencvf.c if (IS_ERR(ic)) { ic 160 drivers/staging/media/imx/imx-ic-prpencvf.c ret = PTR_ERR(ic); ic 163 drivers/staging/media/imx/imx-ic-prpencvf.c priv->ic = ic; ic 435 drivers/staging/media/imx/imx-ic-prpencvf.c ret = ipu_ic_task_idma_init(priv->ic, channel, ic 488 drivers/staging/media/imx/imx-ic-prpencvf.c ret = ipu_ic_task_init(priv->ic, &csc, ic 532 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_enable(priv->ic); ic 546 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_task_enable(priv->ic); ic 563 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_task_disable(priv->ic); ic 571 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_disable(priv->ic); ic 604 drivers/staging/media/imx/imx-ic-prpencvf.c ret = ipu_ic_task_init(priv->ic, &csc, ic 624 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_dump(priv->ic); ic 627 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_enable(priv->ic); ic 637 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_task_enable(priv->ic); ic 648 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_task_disable(priv->ic); ic 650 drivers/staging/media/imx/imx-ic-prpencvf.c ipu_ic_disable(priv->ic); ic 14 drivers/staging/rtl8188eu/hal/odm_hwconfig.c #define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig##txt##ic(dm_odm)) ic 15 drivers/staging/rtl8188eu/hal/odm_hwconfig.c #define READ_AND_CONFIG_TC(ic, txt) (ODM_ReadAndConfig_TC##txt##ic(dm_odm)) ic 10 drivers/staging/rtl8723bs/hal/odm_HWConfig.c #define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig_MP_##ic##txt(pDM_Odm)) ic 12 drivers/staging/rtl8723bs/hal/odm_HWConfig.c #define GET_VERSION_MP(ic, txt) (ODM_GetVersion_MP_##ic##txt()) ic 13 drivers/staging/rtl8723bs/hal/odm_HWConfig.c #define GET_VERSION(ic, txt) (pDM_Odm->bIsMPChip?GET_VERSION_MP(ic, txt):GET_VERSION_TC(ic, txt)) ic 15 drivers/staging/rtl8723bs/hal/odm_HWConfig.h #define AGC_DIFF_CONFIG_MP(ic, band) (ODM_ReadAndConfig_MP_##ic##_AGC_TAB_DIFF(pDM_Odm, Array_MP_##ic##_AGC_TAB_DIFF_##band, \ ic 16 drivers/staging/rtl8723bs/hal/odm_HWConfig.h sizeof(Array_MP_##ic##_AGC_TAB_DIFF_##band)/sizeof(u32))) ic 17 drivers/staging/rtl8723bs/hal/odm_HWConfig.h #define AGC_DIFF_CONFIG_TC(ic, band) (ODM_ReadAndConfig_TC_##ic##_AGC_TAB_DIFF(pDM_Odm, Array_TC_##ic##_AGC_TAB_DIFF_##band, \ ic 18 drivers/staging/rtl8723bs/hal/odm_HWConfig.h sizeof(Array_TC_##ic##_AGC_TAB_DIFF_##band)/sizeof(u32))) ic 20 drivers/staging/rtl8723bs/hal/odm_HWConfig.h #define AGC_DIFF_CONFIG(ic, band)\ ic 23 drivers/staging/rtl8723bs/hal/odm_HWConfig.h AGC_DIFF_CONFIG_MP(ic, band);\ ic 25 drivers/staging/rtl8723bs/hal/odm_HWConfig.h AGC_DIFF_CONFIG_TC(ic, band);\ ic 1566 drivers/staging/speakup/main.c static void update_color_buffer(struct vc_data *vc, const u16 *ic, int len) ic 1581 drivers/staging/speakup/main.c if (ic[i] > 32) { ic 1582 drivers/staging/speakup/main.c speakup_console[vc_num]->ht.highbuf[bi][hi] = ic[i]; ic 1584 drivers/staging/speakup/main.c } else if ((ic[i] == 32) && (hi != 0)) { ic 1588 drivers/staging/speakup/main.c ic[i]; ic 704 drivers/tty/serial/sirfsoc_uart.c unsigned long ic; ic 796 drivers/tty/serial/sirfsoc_uart.c for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++) ic 797 drivers/tty/serial/sirfsoc_uart.c if (baud_rate == baudrate_to_regv[ic].baud_rate) ic 798 drivers/tty/serial/sirfsoc_uart.c clk_div_reg = baudrate_to_regv[ic].reg_val; ic 107 drivers/usb/serial/safe_serial.c #define MY_USB_DEVICE(vend, prod, dc, ic, isc) \ ic 115 drivers/usb/serial/safe_serial.c .bInterfaceClass = (ic), \ ic 37 fs/jffs2/build.c next_inode(int *i, struct jffs2_inode_cache *ic, struct jffs2_sb_info *c) ic 40 fs/jffs2/build.c if (ic->next) ic 41 fs/jffs2/build.c return ic->next; ic 46 fs/jffs2/build.c #define for_each_inode(i, c, ic) \ ic 47 fs/jffs2/build.c for (i = 0, ic = first_inode_chain(&i, (c)); \ ic 48 fs/jffs2/build.c ic; \ ic 49 fs/jffs2/build.c ic = next_inode(&i, ic, (c))) ic 53 fs/jffs2/build.c struct jffs2_inode_cache *ic, ic 58 fs/jffs2/build.c dbg_fsbuild("building directory inode #%u\n", ic->ino); ic 61 fs/jffs2/build.c for(fd = ic->scan_dents; fd; fd = fd->next) { ic 71 fs/jffs2/build.c fd->name, fd->ino, ic->ino); ic 74 fs/jffs2/build.c fd->ic = NULL; ic 79 fs/jffs2/build.c fd->ic = child_ic; ic 102 fs/jffs2/build.c struct jffs2_inode_cache *ic; ic 123 fs/jffs2/build.c for_each_inode(i, c, ic) { ic 124 fs/jffs2/build.c if (ic->scan_dents) { ic 125 fs/jffs2/build.c jffs2_build_inode_pass1(c, ic, &dir_hardlinks); ic 139 fs/jffs2/build.c for_each_inode(i, c, ic) { ic 140 fs/jffs2/build.c if (ic->pino_nlink) ic 143 fs/jffs2/build.c jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); ic 153 fs/jffs2/build.c ic = jffs2_get_ino_cache(c, fd->ino); ic 155 fs/jffs2/build.c if (ic) ic 156 fs/jffs2/build.c jffs2_build_remove_unlinked_inode(c, ic, &dead_fds); ic 170 fs/jffs2/build.c for_each_inode(i, c, ic) { ic 171 fs/jffs2/build.c if (ic->flags & INO_FLAGS_IS_DIR) ic 172 fs/jffs2/build.c ic->pino_nlink = 0; ic 178 fs/jffs2/build.c for_each_inode(i, c, ic) { ic 179 fs/jffs2/build.c while(ic->scan_dents) { ic 180 fs/jffs2/build.c fd = ic->scan_dents; ic 181 fs/jffs2/build.c ic->scan_dents = fd->next; ic 187 fs/jffs2/build.c if (!fd->ic) { ic 194 fs/jffs2/build.c BUG_ON(!(fd->ic->flags & INO_FLAGS_IS_DIR)); ic 200 fs/jffs2/build.c if (dir_hardlinks && fd->ic->pino_nlink) { ic 202 fs/jffs2/build.c fd->name, fd->ino, ic->ino, fd->ic->pino_nlink); ic 207 fs/jffs2/build.c fd->ic->pino_nlink = ic->ino; ic 211 fs/jffs2/build.c ic->scan_dents = NULL; ic 226 fs/jffs2/build.c for_each_inode(i, c, ic) { ic 227 fs/jffs2/build.c while(ic->scan_dents) { ic 228 fs/jffs2/build.c fd = ic->scan_dents; ic 229 fs/jffs2/build.c ic->scan_dents = fd->next; ic 240 fs/jffs2/build.c struct jffs2_inode_cache *ic, ic 246 fs/jffs2/build.c dbg_fsbuild("removing ino #%u with nlink == zero.\n", ic->ino); ic 248 fs/jffs2/build.c raw = ic->nodes; ic 249 fs/jffs2/build.c while (raw != (void *)ic) { ic 256 fs/jffs2/build.c if (ic->scan_dents) { ic 258 fs/jffs2/build.c dbg_fsbuild("inode #%u was a directory which may have children...\n", ic->ino); ic 260 fs/jffs2/build.c while(ic->scan_dents) { ic 263 fs/jffs2/build.c fd = ic->scan_dents; ic 264 fs/jffs2/build.c ic->scan_dents = fd->next; ic 211 fs/jffs2/erase.c struct jffs2_inode_cache *ic = NULL; ic 222 fs/jffs2/erase.c ic = (struct jffs2_inode_cache *)(*prev); ic 223 fs/jffs2/erase.c prev = &ic->nodes; ic 245 fs/jffs2/erase.c if (!ic) { ic 252 fs/jffs2/erase.c jeb->offset, jeb->offset + c->sector_size, ic->ino); ic 259 fs/jffs2/erase.c this = ic->nodes; ic 274 fs/jffs2/erase.c switch (ic->class) { ic 277 fs/jffs2/erase.c jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); ic 280 fs/jffs2/erase.c jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); ic 284 fs/jffs2/erase.c if (ic->nodes == (void *)ic && ic->pino_nlink == 0) ic 285 fs/jffs2/erase.c jffs2_del_ino_cache(c, ic); ic 624 fs/jffs2/fs.c struct jffs2_inode_cache *ic; ic 647 fs/jffs2/fs.c ic = jffs2_get_ino_cache(c, inum); ic 648 fs/jffs2/fs.c if (!ic) { ic 654 fs/jffs2/fs.c if (ic->state != INO_STATE_CHECKEDABSENT) { ic 657 fs/jffs2/fs.c ic->ino, ic->state); ic 26 fs/jffs2/gc.c struct jffs2_inode_cache *ic, ic 127 fs/jffs2/gc.c struct jffs2_inode_cache *ic; ic 159 fs/jffs2/gc.c for (ic = c->inocache_list[bucket]; ic; ic = ic->next) { ic 160 fs/jffs2/gc.c if (ic->ino < want_ino) ic 163 fs/jffs2/gc.c if (ic->state != INO_STATE_CHECKEDABSENT && ic 164 fs/jffs2/gc.c ic->state != INO_STATE_PRESENT) ic 168 fs/jffs2/gc.c ic->ino); ic 189 fs/jffs2/gc.c c->check_ino = ic->ino + c->inocache_hashsize; ic 191 fs/jffs2/gc.c if (!ic->pino_nlink) { ic 193 fs/jffs2/gc.c ic->ino); ic 195 fs/jffs2/gc.c jffs2_xattr_delete_inode(c, ic); ic 198 fs/jffs2/gc.c switch(ic->state) { ic 207 fs/jffs2/gc.c ic->ino, ic->state); ic 216 fs/jffs2/gc.c ic->ino); ic 219 fs/jffs2/gc.c c->check_ino = ic->ino; ic 231 fs/jffs2/gc.c ic->state = INO_STATE_CHECKING; ic 235 fs/jffs2/gc.c __func__, ic->ino); ic 237 fs/jffs2/gc.c ret = jffs2_do_crccheck_inode(c, ic); ic 240 fs/jffs2/gc.c ic->ino); ic 242 fs/jffs2/gc.c jffs2_set_inocache_state(c, ic, INO_STATE_CHECKEDABSENT); ic 327 fs/jffs2/gc.c ic = jffs2_raw_ref_to_ic(raw); ic 332 fs/jffs2/gc.c if (ic->class == RAWNODE_CLASS_XATTR_DATUM ic 333 fs/jffs2/gc.c || ic->class == RAWNODE_CLASS_XATTR_REF) { ic 336 fs/jffs2/gc.c if (ic->class == RAWNODE_CLASS_XATTR_DATUM) { ic 337 fs/jffs2/gc.c ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw); ic 339 fs/jffs2/gc.c ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw); ic 354 fs/jffs2/gc.c ic->ino); ic 365 fs/jffs2/gc.c switch(ic->state) { ic 372 fs/jffs2/gc.c ic->state = INO_STATE_GC; ic 375 fs/jffs2/gc.c ic->ino); ic 392 fs/jffs2/gc.c ic->ino, ic->state); ic 406 fs/jffs2/gc.c __func__, ic->ino, ic->state); ic 426 fs/jffs2/gc.c if (ic->state == INO_STATE_GC) { ic 429 fs/jffs2/gc.c ret = jffs2_garbage_collect_pristine(c, ic, raw); ic 432 fs/jffs2/gc.c ic->state = INO_STATE_CHECKEDABSENT; ic 450 fs/jffs2/gc.c inum = ic->ino; ic 451 fs/jffs2/gc.c nlink = ic->pino_nlink; ic 592 fs/jffs2/gc.c struct jffs2_inode_cache *ic, ic 610 fs/jffs2/gc.c if (ic && alloclen > sizeof(struct jffs2_raw_inode) + JFFS2_MIN_DATA_LEN) ic 689 fs/jffs2/gc.c if (ic) { ic 744 fs/jffs2/gc.c jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, rawlen, ic); ic 408 fs/jffs2/nodelist.c void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state) ic 411 fs/jffs2/nodelist.c ic->state = state; ic 588 fs/jffs2/nodelist.c struct jffs2_inode_cache *ic) ic 626 fs/jffs2/nodelist.c if (ic) { ic 627 fs/jffs2/nodelist.c ref->next_in_ino = ic->nodes; ic 628 fs/jffs2/nodelist.c ic->nodes = ref; ic 255 fs/jffs2/nodelist.h struct jffs2_inode_cache *ic; /* Just during part of build */ ic 365 fs/jffs2/nodelist.h void jffs2_set_inocache_state(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, int state); ic 378 fs/jffs2/nodelist.h struct jffs2_inode_cache *ic); ic 391 fs/jffs2/nodelist.h struct jffs2_inode_cache *ic); ic 418 fs/jffs2/nodelist.h int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); ic 502 fs/jffs2/nodemgmt.c struct jffs2_inode_cache *ic) ic 530 fs/jffs2/nodemgmt.c new = jffs2_link_node_ref(c, jeb, ofs, len, ic); ic 804 fs/jffs2/nodemgmt.c struct jffs2_inode_cache *ic; ic 809 fs/jffs2/nodemgmt.c ic = jffs2_raw_ref_to_ic(ref); ic 810 fs/jffs2/nodemgmt.c for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino)) ic 816 fs/jffs2/nodemgmt.c switch (ic->class) { ic 819 fs/jffs2/nodemgmt.c jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic); ic 822 fs/jffs2/nodemgmt.c jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic); ic 826 fs/jffs2/nodemgmt.c if (ic->nodes == (void *)ic && ic->pino_nlink == 0) ic 827 fs/jffs2/nodemgmt.c jffs2_del_ino_cache(c, ic); ic 1376 fs/jffs2/readinode.c int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) ic 1387 fs/jffs2/readinode.c f->inocache = ic; ic 1392 fs/jffs2/readinode.c jffs2_xattr_do_crccheck_inode(c, ic); ic 964 fs/jffs2/scan.c struct jffs2_inode_cache *ic; ic 966 fs/jffs2/scan.c ic = jffs2_get_ino_cache(c, ino); ic 967 fs/jffs2/scan.c if (ic) ic 968 fs/jffs2/scan.c return ic; ic 973 fs/jffs2/scan.c ic = jffs2_alloc_inode_cache(); ic 974 fs/jffs2/scan.c if (!ic) { ic 978 fs/jffs2/scan.c memset(ic, 0, sizeof(*ic)); ic 980 fs/jffs2/scan.c ic->ino = ino; ic 981 fs/jffs2/scan.c ic->nodes = (void *)ic; ic 982 fs/jffs2/scan.c jffs2_add_ino_cache(c, ic); ic 984 fs/jffs2/scan.c ic->pino_nlink = 1; ic 985 fs/jffs2/scan.c return ic; ic 991 fs/jffs2/scan.c struct jffs2_inode_cache *ic; ic 1018 fs/jffs2/scan.c ic = jffs2_get_ino_cache(c, ino); ic 1019 fs/jffs2/scan.c if (!ic) { ic 1020 fs/jffs2/scan.c ic = jffs2_scan_make_ino_cache(c, ino); ic 1021 fs/jffs2/scan.c if (!ic) ic 1026 fs/jffs2/scan.c jffs2_link_node_ref(c, jeb, ofs | REF_UNCHECKED, PAD(je32_to_cpu(ri->totlen)), ic); ic 1046 fs/jffs2/scan.c struct jffs2_inode_cache *ic; ic 1094 fs/jffs2/scan.c ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(rd->pino)); ic 1095 fs/jffs2/scan.c if (!ic) { ic 1101 fs/jffs2/scan.c PAD(je32_to_cpu(rd->totlen)), ic); ic 1108 fs/jffs2/scan.c jffs2_add_fd_to_list(c, fd, &ic->scan_dents); ic 373 fs/jffs2/summary.c struct jffs2_inode_cache *ic) ic 381 fs/jffs2/summary.c return jffs2_link_node_ref(c, jeb, jeb->offset + ofs, len, ic); ic 389 fs/jffs2/summary.c struct jffs2_inode_cache *ic; ic 418 fs/jffs2/summary.c ic = jffs2_scan_make_ino_cache(c, ino); ic 419 fs/jffs2/summary.c if (!ic) { ic 425 fs/jffs2/summary.c PAD(je32_to_cpu(spi->totlen)), ic); ic 467 fs/jffs2/summary.c ic = jffs2_scan_make_ino_cache(c, je32_to_cpu(spd->pino)); ic 468 fs/jffs2/summary.c if (!ic) { ic 474 fs/jffs2/summary.c PAD(je32_to_cpu(spd->totlen)), ic); ic 482 fs/jffs2/summary.c jffs2_add_fd_to_list(c, fd, &ic->scan_dents); ic 465 fs/jffs2/wbuf.c struct jffs2_inode_cache *ic; ic 473 fs/jffs2/wbuf.c ic = jffs2_raw_ref_to_ic(raw); ic 476 fs/jffs2/wbuf.c if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) { ic 477 fs/jffs2/wbuf.c struct jffs2_xattr_datum *xd = (void *)ic; ic 481 fs/jffs2/wbuf.c ic = NULL; ic 482 fs/jffs2/wbuf.c } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) { ic 483 fs/jffs2/wbuf.c struct jffs2_xattr_datum *xr = (void *)ic; ic 487 fs/jffs2/wbuf.c ic = NULL; ic 488 fs/jffs2/wbuf.c } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) { ic 489 fs/jffs2/wbuf.c struct jffs2_raw_node_ref **p = &ic->nodes; ic 492 fs/jffs2/wbuf.c while (*p && *p != (void *)ic) { ic 501 fs/jffs2/wbuf.c if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) { ic 505 fs/jffs2/wbuf.c f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); ic 509 fs/jffs2/wbuf.c ic->ino, PTR_ERR(f)); ic 519 fs/jffs2/wbuf.c } else if (unlikely(ic->state != INO_STATE_PRESENT && ic 520 fs/jffs2/wbuf.c ic->state != INO_STATE_CHECKEDABSENT && ic 521 fs/jffs2/wbuf.c ic->state != INO_STATE_GC)) { ic 522 fs/jffs2/wbuf.c JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state); ic 527 fs/jffs2/wbuf.c new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); ic 26 fs/jffs2/write.c struct jffs2_inode_cache *ic; ic 28 fs/jffs2/write.c ic = jffs2_alloc_inode_cache(); ic 29 fs/jffs2/write.c if (!ic) { ic 33 fs/jffs2/write.c memset(ic, 0, sizeof(*ic)); ic 35 fs/jffs2/write.c f->inocache = ic; ic 525 fs/jffs2/xattr.c rr.ino = cpu_to_je32(ref->ic->ino); ic 545 fs/jffs2/xattr.c dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid); ic 550 fs/jffs2/xattr.c static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic, ic 560 fs/jffs2/xattr.c ref->ic = ic; ic 570 fs/jffs2/xattr.c ref->next = ic->xref; ic 571 fs/jffs2/xattr.c ic->xref = ref; ic 583 fs/jffs2/xattr.c ref->ino = ref->ic->ino; ic 596 fs/jffs2/xattr.c void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) ic 602 fs/jffs2/xattr.c if (!ic || ic->pino_nlink > 0) ic 606 fs/jffs2/xattr.c for (ref = ic->xref; ref; ref = _ref) { ic 610 fs/jffs2/xattr.c ic->xref = NULL; ic 614 fs/jffs2/xattr.c void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) ic 621 fs/jffs2/xattr.c for (ref = ic->xref; ref; ref = _ref) { ic 630 fs/jffs2/xattr.c ic->xref = NULL; ic 634 fs/jffs2/xattr.c static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) ic 643 fs/jffs2/xattr.c if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED)) ic 648 fs/jffs2/xattr.c for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { ic 683 fs/jffs2/xattr.c ic->flags |= INO_FLAGS_XATTR_CHECKED; ic 690 fs/jffs2/xattr.c void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic) ic 692 fs/jffs2/xattr.c check_xattr_ref_inode(c, ic); ic 780 fs/jffs2/xattr.c struct jffs2_inode_cache *ic; ic 842 fs/jffs2/xattr.c ic = jffs2_get_ino_cache(c, ref->ino); ic 843 fs/jffs2/xattr.c if (!xd || !ic || !ic->pino_nlink) { ic 853 fs/jffs2/xattr.c ref->ic = ic; ic 855 fs/jffs2/xattr.c ref->next = ic->xref; ic 856 fs/jffs2/xattr.c ic->xref = ref; ic 966 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; ic 974 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); ic 981 fs/jffs2/xattr.c for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { ic 982 fs/jffs2/xattr.c BUG_ON(ref->ic != ic); ic 1036 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; ic 1041 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); ic 1047 fs/jffs2/xattr.c for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { ic 1048 fs/jffs2/xattr.c BUG_ON(ref->ic!=ic); ic 1098 fs/jffs2/xattr.c struct jffs2_inode_cache *ic = f->inocache; ic 1104 fs/jffs2/xattr.c rc = check_xattr_ref_inode(c, ic); ic 1119 fs/jffs2/xattr.c for (ref=ic->xref, pref=&ic->xref; ref; pref=&ref->next, ref=ref->next) { ic 1138 fs/jffs2/xattr.c ref->ino = ic->ino; ic 1150 fs/jffs2/xattr.c ref->ic = ic; ic 1190 fs/jffs2/xattr.c newref = create_xattr_ref(c, ic, xd); ic 1193 fs/jffs2/xattr.c ref->next = ic->xref; ic 1194 fs/jffs2/xattr.c ic->xref = ref; ic 1281 fs/jffs2/xattr.c ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node)); ic 55 fs/jffs2/xattr.h struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */ ic 80 fs/jffs2/xattr.h extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); ic 81 fs/jffs2/xattr.h extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); ic 82 fs/jffs2/xattr.h extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); ic 109 fs/jffs2/xattr.h #define jffs2_xattr_do_crccheck_inode(c, ic) ic 110 fs/jffs2/xattr.h #define jffs2_xattr_delete_inode(c, ic) ic 111 fs/jffs2/xattr.h #define jffs2_xattr_free_inode(c, ic) ic 576 fs/ntfs/attrib.c const u32 name_len, const IGNORE_CASE_BOOL ic, ic 616 fs/ntfs/attrib.c a->name_length, ic, upcase, upcase_len)) { ic 845 fs/ntfs/attrib.c const IGNORE_CASE_BOOL ic, const VCN lowest_vcn, ic 915 fs/ntfs/attrib.c name_len, ic, vol->upcase, vol->upcase_len)) { ic 1093 fs/ntfs/attrib.c return ntfs_attr_find(AT_END, name, name_len, ic, val, val_len, ic 1127 fs/ntfs/attrib.c err = ntfs_attr_find(type, name, name_len, ic, val, val_len, ic 1174 fs/ntfs/attrib.c const u32 name_len, const IGNORE_CASE_BOOL ic, ic 1189 fs/ntfs/attrib.c return ntfs_attr_find(type, name, name_len, ic, val, val_len, ic 1191 fs/ntfs/attrib.c return ntfs_external_attr_find(type, name, name_len, ic, lowest_vcn, ic 60 fs/ntfs/attrib.h const u32 name_len, const IGNORE_CASE_BOOL ic, ic 632 fs/ntfs/dir.c IGNORE_CASE_BOOL ic; ic 696 fs/ntfs/dir.c ic = ie->key.file_name.file_name_type ? IGNORE_CASE : ic 706 fs/ntfs/dir.c ie->key.file_name.file_name_length, ic, ic 883 fs/ntfs/dir.c ic = ie->key.file_name.file_name_type ? IGNORE_CASE : ic 893 fs/ntfs/dir.c ie->key.file_name.file_name_length, ic, ic 96 fs/ntfs/ntfs.h const IGNORE_CASE_BOOL ic, ic 100 fs/ntfs/ntfs.h const int err_val, const IGNORE_CASE_BOOL ic, ic 111 fs/ntfs/ntfs.h const int err_val, const IGNORE_CASE_BOOL ic, ic 55 fs/ntfs/unistr.c const ntfschar *s2, size_t s2_len, const IGNORE_CASE_BOOL ic, ic 60 fs/ntfs/unistr.c if (ic == CASE_SENSITIVE) ic 85 fs/ntfs/unistr.c const int err_val, const IGNORE_CASE_BOOL ic, ic 97 fs/ntfs/unistr.c if (ic) { ic 212 fs/ntfs/unistr.c const int err_val, const IGNORE_CASE_BOOL ic, ic 219 fs/ntfs/unistr.c err_val, ic, upcase, upcase_len); ic 341 fs/xfs/xfs_itable.c struct xfs_inumbers_chunk *ic = data; ic 344 fs/xfs/xfs_itable.c error = ic->formatter(ic->breq, &inogrp); ic 348 fs/xfs/xfs_itable.c ic->breq->startino = XFS_AGINO_TO_INO(mp, agno, irec->ir_startino) + ic 361 fs/xfs/xfs_itable.c struct xfs_inumbers_chunk ic = { ic 371 fs/xfs/xfs_itable.c xfs_inumbers_walk, breq->icount, &ic); ic 3897 fs/xfs/xfs_log.c xlog_in_core_t *iclog, *ic; ic 3905 fs/xfs/xfs_log.c ic = iclog; ic 3907 fs/xfs/xfs_log.c ic->ic_state = XLOG_STATE_IOERROR; ic 3908 fs/xfs/xfs_log.c ic = ic->ic_next; ic 3909 fs/xfs/xfs_log.c } while (ic != iclog); ic 424 include/soc/fsl/qe/immap_qe.h struct qe_ic_regs ic; /* Interrupt Controller */ ic 51 include/uapi/linux/mmc/ioctl.h #define mmc_ioc_cmd_set_data(ic, ptr) ic.data_ptr = (__u64)(unsigned long) ptr ic 437 include/video/imx-ipu-v3.h int ipu_ic_task_init(struct ipu_ic *ic, ic 441 include/video/imx-ipu-v3.h int ipu_ic_task_init_rsc(struct ipu_ic *ic, ic 446 include/video/imx-ipu-v3.h int ipu_ic_task_graphics_init(struct ipu_ic *ic, ic 450 include/video/imx-ipu-v3.h void ipu_ic_task_enable(struct ipu_ic *ic); ic 451 include/video/imx-ipu-v3.h void ipu_ic_task_disable(struct ipu_ic *ic); ic 452 include/video/imx-ipu-v3.h int ipu_ic_task_idma_init(struct ipu_ic *ic, struct ipuv3_channel *channel, ic 455 include/video/imx-ipu-v3.h int ipu_ic_enable(struct ipu_ic *ic); ic 456 include/video/imx-ipu-v3.h int ipu_ic_disable(struct ipu_ic *ic); ic 458 include/video/imx-ipu-v3.h void ipu_ic_put(struct ipu_ic *ic); ic 459 include/video/imx-ipu-v3.h void ipu_ic_dump(struct ipu_ic *ic); ic 152 net/atm/ioctl.c struct atm_ioctl *ic = list_entry(pos, struct atm_ioctl, list); ic 153 net/atm/ioctl.c if (try_module_get(ic->owner)) { ic 154 net/atm/ioctl.c error = ic->ioctl(sock, cmd, arg); ic 155 net/atm/ioctl.c module_put(ic->owner); ic 1822 net/ipv4/netfilter/ip_tables.c const struct icmphdr *ic; ic 1830 net/ipv4/netfilter/ip_tables.c ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); ic 1831 net/ipv4/netfilter/ip_tables.c if (ic == NULL) { ic 1842 net/ipv4/netfilter/ip_tables.c ic->type, ic->code, ic 1831 net/ipv6/netfilter/ip6_tables.c const struct icmp6hdr *ic; ic 1839 net/ipv6/netfilter/ip6_tables.c ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph); ic 1840 net/ipv6/netfilter/ip6_tables.c if (ic == NULL) { ic 1851 net/ipv6/netfilter/ip6_tables.c ic->icmp6_type, ic->icmp6_code, ic 214 net/ipv6/netfilter/nf_log_ipv6.c const struct icmp6hdr *ic; ic 223 net/ipv6/netfilter/nf_log_ipv6.c ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h); ic 224 net/ipv6/netfilter/nf_log_ipv6.c if (ic == NULL) { ic 232 net/ipv6/netfilter/nf_log_ipv6.c ic->icmp6_type, ic->icmp6_code); ic 234 net/ipv6/netfilter/nf_log_ipv6.c switch (ic->icmp6_type) { ic 239 net/ipv6/netfilter/nf_log_ipv6.c ntohs(ic->icmp6_identifier), ic 240 net/ipv6/netfilter/nf_log_ipv6.c ntohs(ic->icmp6_sequence)); ic 250 net/ipv6/netfilter/nf_log_ipv6.c ntohl(ic->icmp6_pointer)); ic 264 net/ipv6/netfilter/nf_log_ipv6.c if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) { ic 266 net/ipv6/netfilter/nf_log_ipv6.c ntohl(ic->icmp6_mtu)); ic 68 net/netfilter/ipset/ip_set_getport.c const struct icmphdr *ic; ic 70 net/netfilter/ipset/ip_set_getport.c ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); ic 71 net/netfilter/ipset/ip_set_getport.c if (!ic) ic 74 net/netfilter/ipset/ip_set_getport.c *port = (__force __be16)htons((ic->type << 8) | ic->code); ic 79 net/netfilter/ipset/ip_set_getport.c const struct icmp6hdr *ic; ic 81 net/netfilter/ipset/ip_set_getport.c ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich); ic 82 net/netfilter/ipset/ip_set_getport.c if (!ic) ic 86 net/netfilter/ipset/ip_set_getport.c htons((ic->icmp6_type << 8) | ic->icmp6_code); ic 937 net/netfilter/ipvs/ip_vs_core.c struct icmphdr _icmph, *ic; ic 955 net/netfilter/ipvs/ip_vs_core.c ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); ic 956 net/netfilter/ipvs/ip_vs_core.c if (ic == NULL) ic 960 net/netfilter/ipvs/ip_vs_core.c ic->type, ntohs(icmp_id(ic)), ic 970 net/netfilter/ipvs/ip_vs_core.c if ((ic->type != ICMP_DEST_UNREACH) && ic 971 net/netfilter/ipvs/ip_vs_core.c (ic->type != ICMP_SOURCE_QUENCH) && ic 972 net/netfilter/ipvs/ip_vs_core.c (ic->type != ICMP_TIME_EXCEEDED)) { ic 1013 net/netfilter/ipvs/ip_vs_core.c struct icmp6hdr _icmph, *ic; ic 1021 net/netfilter/ipvs/ip_vs_core.c ic = frag_safe_skb_hp(skb, ipvsh->len, sizeof(_icmph), &_icmph); ic 1022 net/netfilter/ipvs/ip_vs_core.c if (ic == NULL) ic 1032 net/netfilter/ipvs/ip_vs_core.c if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) { ic 1043 net/netfilter/ipvs/ip_vs_core.c ic->icmp6_type, ntohs(icmpv6_id(ic)), ic 1657 net/netfilter/ipvs/ip_vs_core.c struct icmphdr _icmph, *ic; ic 1677 net/netfilter/ipvs/ip_vs_core.c ic = skb_header_pointer(skb, offset, sizeof(_icmph), &_icmph); ic 1678 net/netfilter/ipvs/ip_vs_core.c if (ic == NULL) ic 1682 net/netfilter/ipvs/ip_vs_core.c ic->type, ntohs(icmp_id(ic)), ic 1692 net/netfilter/ipvs/ip_vs_core.c if ((ic->type != ICMP_DEST_UNREACH) && ic 1693 net/netfilter/ipvs/ip_vs_core.c (ic->type != ICMP_SOURCE_QUENCH) && ic 1694 net/netfilter/ipvs/ip_vs_core.c (ic->type != ICMP_TIME_EXCEEDED)) { ic 1801 net/netfilter/ipvs/ip_vs_core.c __be32 info = ic->un.gateway; ic 1802 net/netfilter/ipvs/ip_vs_core.c __u8 type = ic->type; ic 1803 net/netfilter/ipvs/ip_vs_core.c __u8 code = ic->code; ic 1806 net/netfilter/ipvs/ip_vs_core.c if (ic->type == ICMP_DEST_UNREACH && ic 1807 net/netfilter/ipvs/ip_vs_core.c ic->code == ICMP_FRAG_NEEDED) { ic 1809 net/netfilter/ipvs/ip_vs_core.c u32 mtu = ntohs(ic->un.frag.mtu); ic 1875 net/netfilter/ipvs/ip_vs_core.c struct icmp6hdr _icmph, *ic; ic 1885 net/netfilter/ipvs/ip_vs_core.c ic = frag_safe_skb_hp(skb, iph->len, sizeof(_icmph), &_icmph); ic 1886 net/netfilter/ipvs/ip_vs_core.c if (ic == NULL) ic 1896 net/netfilter/ipvs/ip_vs_core.c if (ic->icmp6_type & ICMPV6_INFOMSG_MASK) { ic 1907 net/netfilter/ipvs/ip_vs_core.c ic->icmp6_type, ntohs(icmpv6_id(ic)), ic 75 net/rds/ib.c struct rds_ib_connection *ic; ic 78 net/rds/ib.c list_for_each_entry(ic, &ib_nodev_conns, ib_node) ic 79 net/rds/ib.c rds_conn_connect_if_down(ic->conn); ic 85 net/rds/ib.c struct rds_ib_connection *ic; ic 89 net/rds/ib.c list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node) ic 90 net/rds/ib.c rds_conn_path_drop(&ic->conn->c_path[0], true); ic 294 net/rds/ib.c struct rds_ib_connection *ic = conn->c_transport_data; ic 304 net/rds/ib.c if (ic) { ic 306 net/rds/ib.c iinfo->sl = ic->i_sl; ic 314 net/rds/ib.c rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo->src_gid, ic 317 net/rds/ib.c rds_ibdev = ic->rds_ibdev; ic 318 net/rds/ib.c iinfo->max_send_wr = ic->i_send_ring.w_nr; ic 319 net/rds/ib.c iinfo->max_recv_wr = ic->i_recv_ring.w_nr; ic 322 net/rds/ib.c iinfo->cache_allocs = atomic_read(&ic->i_cache_allocs); ic 333 net/rds/ib.c struct rds_ib_connection *ic = conn->c_transport_data; ic 341 net/rds/ib.c if (ic) { ic 343 net/rds/ib.c iinfo6->sl = ic->i_sl; ic 352 net/rds/ib.c rdma_read_gids(ic->i_cm_id, (union ib_gid *)&iinfo6->src_gid, ic 354 net/rds/ib.c rds_ibdev = ic->rds_ibdev; ic 355 net/rds/ib.c iinfo6->max_send_wr = ic->i_send_ring.w_nr; ic 356 net/rds/ib.c iinfo6->max_recv_wr = ic->i_recv_ring.w_nr; ic 359 net/rds/ib.c iinfo6->cache_allocs = atomic_read(&ic->i_cache_allocs); ic 395 net/rds/ib.h void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); ic 401 net/rds/ib.h int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp); ic 402 net/rds/ib.h void rds_ib_recv_free_caches(struct rds_ib_connection *ic); ic 406 net/rds/ib.h void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc, ic 409 net/rds/ib.h void rds_ib_recv_init_ring(struct rds_ib_connection *ic); ic 410 net/rds/ib.h void rds_ib_recv_clear_ring(struct rds_ib_connection *ic); ic 411 net/rds/ib.h void rds_ib_recv_init_ack(struct rds_ib_connection *ic); ic 412 net/rds/ib.h void rds_ib_attempt_ack(struct rds_ib_connection *ic); ic 413 net/rds/ib.h void rds_ib_ack_send_complete(struct rds_ib_connection *ic); ic 414 net/rds/ib.h u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic); ic 415 net/rds/ib.h void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required); ic 433 net/rds/ib.h void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc); ic 434 net/rds/ib.h void rds_ib_send_init_ring(struct rds_ib_connection *ic); ic 435 net/rds/ib.h void rds_ib_send_clear_ring(struct rds_ib_connection *ic); ic 439 net/rds/ib.h int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, ic 58 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 62 net/rds/ib_cm.c ic->i_flowctl = 1; ic 65 net/rds/ib_cm.c ic->i_flowctl = 0; ic 84 net/rds/ib_cm.c rds_ib_tune_rnr(struct rds_ib_connection *ic, struct ib_qp_attr *attr) ic 89 net/rds/ib_cm.c ret = ib_modify_qp(ic->i_cm_id->qp, attr, IB_QP_MIN_RNR_TIMER); ic 100 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 149 net/rds/ib_cm.c ic->i_active_side ? "Active" : "Passive", ic 153 net/rds/ib_cm.c ic->i_flowctl ? ", flow control" : ""); ic 156 net/rds/ib_cm.c ic->i_sl = ic->i_cm_id->route.path_rec->sl; ic 158 net/rds/ib_cm.c atomic_set(&ic->i_cq_quiesce, 0); ic 164 net/rds/ib_cm.c rds_ib_send_init_ring(ic); ic 165 net/rds/ib_cm.c rds_ib_recv_init_ring(ic); ic 171 net/rds/ib_cm.c rds_ib_tune_rnr(ic, &qp_attr); ic 174 net/rds/ib_cm.c err = ib_modify_qp(ic->i_cm_id->qp, &qp_attr, IB_QP_STATE); ic 179 net/rds/ib_cm.c err = rds_ib_update_ipaddr(ic->rds_ibdev, &conn->c_laddr); ic 204 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 205 net/rds/ib_cm.c struct rds_ib_device *rds_ibdev = ic->rds_ibdev; ic 228 net/rds/ib_cm.c cpu_to_be64(rds_ib_piggyb_ack(ic)); ic 243 net/rds/ib_cm.c cpu_to_be64(rds_ib_piggyb_ack(ic)); ic 251 net/rds/ib_cm.c if (ic->i_flowctl) { ic 255 net/rds/ib_cm.c (atomic_read(&ic->i_credits)); ic 261 net/rds/ib_cm.c &ic->i_credits); ic 283 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 289 net/rds/ib_cm.c tasklet_schedule(&ic->i_recv_tasklet); ic 292 net/rds/ib_cm.c static void poll_scq(struct rds_ib_connection *ic, struct ib_cq *cq, ic 305 net/rds/ib_cm.c if (wc->wr_id <= ic->i_send_ring.w_nr || ic 307 net/rds/ib_cm.c rds_ib_send_cqe_handler(ic, wc); ic 309 net/rds/ib_cm.c rds_ib_mr_cqe_handler(ic, wc); ic 317 net/rds/ib_cm.c struct rds_ib_connection *ic = (struct rds_ib_connection *)data; ic 318 net/rds/ib_cm.c struct rds_connection *conn = ic->conn; ic 323 net/rds/ib_cm.c if (atomic_read(&ic->i_cq_quiesce)) ic 326 net/rds/ib_cm.c poll_scq(ic, ic->i_send_cq, ic->i_send_wc); ic 327 net/rds/ib_cm.c ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); ic 328 net/rds/ib_cm.c poll_scq(ic, ic->i_send_cq, ic->i_send_wc); ic 333 net/rds/ib_cm.c rds_send_xmit(&ic->conn->c_path[0]); ic 336 net/rds/ib_cm.c static void poll_rcq(struct rds_ib_connection *ic, struct ib_cq *cq, ic 350 net/rds/ib_cm.c rds_ib_recv_cqe_handler(ic, wc, ack_state); ic 357 net/rds/ib_cm.c struct rds_ib_connection *ic = (struct rds_ib_connection *)data; ic 358 net/rds/ib_cm.c struct rds_connection *conn = ic->conn; ic 359 net/rds/ib_cm.c struct rds_ib_device *rds_ibdev = ic->rds_ibdev; ic 368 net/rds/ib_cm.c if (atomic_read(&ic->i_cq_quiesce)) ic 372 net/rds/ib_cm.c poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); ic 373 net/rds/ib_cm.c ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); ic 374 net/rds/ib_cm.c poll_rcq(ic, ic->i_recv_cq, ic->i_recv_wc, &state); ic 377 net/rds/ib_cm.c rds_ib_set_ack(ic, state.ack_next, state.ack_required); ic 378 net/rds/ib_cm.c if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) { ic 380 net/rds/ib_cm.c ic->i_ack_recv = state.ack_recv; ic 384 net/rds/ib_cm.c rds_ib_attempt_ack(ic); ic 390 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 392 net/rds/ib_cm.c rdsdebug("conn %p ic %p event %u (%s)\n", conn, ic, event->event, ic 397 net/rds/ib_cm.c rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); ic 411 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 417 net/rds/ib_cm.c tasklet_schedule(&ic->i_send_tasklet); ic 448 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 449 net/rds/ib_cm.c struct ib_device *dev = ic->i_cm_id->device; ic 475 net/rds/ib_cm.c if (ic->i_send_ring.w_nr != max_wrs) ic 476 net/rds/ib_cm.c rds_ib_ring_resize(&ic->i_send_ring, max_wrs); ic 480 net/rds/ib_cm.c if (ic->i_recv_ring.w_nr != max_wrs) ic 481 net/rds/ib_cm.c rds_ib_ring_resize(&ic->i_recv_ring, max_wrs); ic 484 net/rds/ib_cm.c ic->i_pd = rds_ibdev->pd; ic 486 net/rds/ib_cm.c ic->i_scq_vector = ibdev_get_unused_vector(rds_ibdev); ic 487 net/rds/ib_cm.c cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1; ic 488 net/rds/ib_cm.c cq_attr.comp_vector = ic->i_scq_vector; ic 489 net/rds/ib_cm.c ic->i_send_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_send, ic 492 net/rds/ib_cm.c if (IS_ERR(ic->i_send_cq)) { ic 493 net/rds/ib_cm.c ret = PTR_ERR(ic->i_send_cq); ic 494 net/rds/ib_cm.c ic->i_send_cq = NULL; ic 495 net/rds/ib_cm.c ibdev_put_vector(rds_ibdev, ic->i_scq_vector); ic 500 net/rds/ib_cm.c ic->i_rcq_vector = ibdev_get_unused_vector(rds_ibdev); ic 501 net/rds/ib_cm.c cq_attr.cqe = ic->i_recv_ring.w_nr; ic 502 net/rds/ib_cm.c cq_attr.comp_vector = ic->i_rcq_vector; ic 503 net/rds/ib_cm.c ic->i_recv_cq = ib_create_cq(dev, rds_ib_cq_comp_handler_recv, ic 506 net/rds/ib_cm.c if (IS_ERR(ic->i_recv_cq)) { ic 507 net/rds/ib_cm.c ret = PTR_ERR(ic->i_recv_cq); ic 508 net/rds/ib_cm.c ic->i_recv_cq = NULL; ic 509 net/rds/ib_cm.c ibdev_put_vector(rds_ibdev, ic->i_rcq_vector); ic 514 net/rds/ib_cm.c ret = ib_req_notify_cq(ic->i_send_cq, IB_CQ_NEXT_COMP); ic 520 net/rds/ib_cm.c ret = ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED); ic 531 net/rds/ib_cm.c attr.cap.max_send_wr = ic->i_send_ring.w_nr + fr_queue_space + 1; ic 532 net/rds/ib_cm.c attr.cap.max_recv_wr = ic->i_recv_ring.w_nr + 1; ic 537 net/rds/ib_cm.c attr.send_cq = ic->i_send_cq; ic 538 net/rds/ib_cm.c attr.recv_cq = ic->i_recv_cq; ic 544 net/rds/ib_cm.c ret = rdma_create_qp(ic->i_cm_id, ic->i_pd, &attr); ic 550 net/rds/ib_cm.c ic->i_send_hdrs = ib_dma_alloc_coherent(dev, ic 551 net/rds/ib_cm.c ic->i_send_ring.w_nr * ic 553 net/rds/ib_cm.c &ic->i_send_hdrs_dma, GFP_KERNEL); ic 554 net/rds/ib_cm.c if (!ic->i_send_hdrs) { ic 560 net/rds/ib_cm.c ic->i_recv_hdrs = ib_dma_alloc_coherent(dev, ic 561 net/rds/ib_cm.c ic->i_recv_ring.w_nr * ic 563 net/rds/ib_cm.c &ic->i_recv_hdrs_dma, GFP_KERNEL); ic 564 net/rds/ib_cm.c if (!ic->i_recv_hdrs) { ic 570 net/rds/ib_cm.c ic->i_ack = ib_dma_alloc_coherent(dev, sizeof(struct rds_header), ic 571 net/rds/ib_cm.c &ic->i_ack_dma, GFP_KERNEL); ic 572 net/rds/ib_cm.c if (!ic->i_ack) { ic 578 net/rds/ib_cm.c ic->i_sends = vzalloc_node(array_size(sizeof(struct rds_ib_send_work), ic 579 net/rds/ib_cm.c ic->i_send_ring.w_nr), ic 581 net/rds/ib_cm.c if (!ic->i_sends) { ic 587 net/rds/ib_cm.c ic->i_recvs = vzalloc_node(array_size(sizeof(struct rds_ib_recv_work), ic 588 net/rds/ib_cm.c ic->i_recv_ring.w_nr), ic 590 net/rds/ib_cm.c if (!ic->i_recvs) { ic 596 net/rds/ib_cm.c rds_ib_recv_init_ack(ic); ic 598 net/rds/ib_cm.c rdsdebug("conn %p pd %p cq %p %p\n", conn, ic->i_pd, ic 599 net/rds/ib_cm.c ic->i_send_cq, ic->i_recv_cq); ic 604 net/rds/ib_cm.c vfree(ic->i_sends); ic 607 net/rds/ib_cm.c ic->i_ack, ic->i_ack_dma); ic 609 net/rds/ib_cm.c ib_dma_free_coherent(dev, ic->i_recv_ring.w_nr * ic 611 net/rds/ib_cm.c ic->i_recv_hdrs, ic->i_recv_hdrs_dma); ic 613 net/rds/ib_cm.c ib_dma_free_coherent(dev, ic->i_send_ring.w_nr * ic 615 net/rds/ib_cm.c ic->i_send_hdrs, ic->i_send_hdrs_dma); ic 617 net/rds/ib_cm.c rdma_destroy_qp(ic->i_cm_id); ic 619 net/rds/ib_cm.c ib_destroy_cq(ic->i_recv_cq); ic 620 net/rds/ib_cm.c ic->i_recv_cq = NULL; ic 622 net/rds/ib_cm.c ib_destroy_cq(ic->i_send_cq); ic 623 net/rds/ib_cm.c ic->i_send_cq = NULL; ic 728 net/rds/ib_cm.c struct rds_ib_connection *ic = NULL; ic 824 net/rds/ib_cm.c ic = conn->c_transport_data; ic 836 net/rds/ib_cm.c BUG_ON(ic->i_cm_id); ic 838 net/rds/ib_cm.c ic->i_cm_id = cm_id; ic 871 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 879 net/rds/ib_cm.c ic->i_flowctl = rds_ib_sysctl_flow_control; /* advertise flow control */ ic 899 net/rds/ib_cm.c if (ic->i_cm_id == cm_id) ic 902 net/rds/ib_cm.c ic->i_active_side = true; ic 911 net/rds/ib_cm.c struct rds_ib_connection *ic; ic 914 net/rds/ib_cm.c ic = conn->c_transport_data; ic 924 net/rds/ib_cm.c ic->i_cm_id = rdma_create_id(&init_net, handler, conn, ic 926 net/rds/ib_cm.c if (IS_ERR(ic->i_cm_id)) { ic 927 net/rds/ib_cm.c ret = PTR_ERR(ic->i_cm_id); ic 928 net/rds/ib_cm.c ic->i_cm_id = NULL; ic 933 net/rds/ib_cm.c rdsdebug("created cm id %p for conn %p\n", ic->i_cm_id, conn); ic 963 net/rds/ib_cm.c ret = rdma_resolve_addr(ic->i_cm_id, (struct sockaddr *)&src, ic 967 net/rds/ib_cm.c rdsdebug("addr resolve failed for cm id %p: %d\n", ic->i_cm_id, ic 969 net/rds/ib_cm.c rdma_destroy_id(ic->i_cm_id); ic 970 net/rds/ib_cm.c ic->i_cm_id = NULL; ic 985 net/rds/ib_cm.c struct rds_ib_connection *ic = conn->c_transport_data; ic 988 net/rds/ib_cm.c rdsdebug("cm %p pd %p cq %p %p qp %p\n", ic->i_cm_id, ic 989 net/rds/ib_cm.c ic->i_pd, ic->i_send_cq, ic->i_recv_cq, ic 990 net/rds/ib_cm.c ic->i_cm_id ? ic->i_cm_id->qp : NULL); ic 992 net/rds/ib_cm.c if (ic->i_cm_id) { ic 993 net/rds/ib_cm.c struct ib_device *dev = ic->i_cm_id->device; ic 995 net/rds/ib_cm.c rdsdebug("disconnecting cm %p\n", ic->i_cm_id); ic 996 net/rds/ib_cm.c err = rdma_disconnect(ic->i_cm_id); ic 1002 net/rds/ib_cm.c ic->i_cm_id, err); ic 1020 net/rds/ib_cm.c rds_ib_ring_empty(&ic->i_recv_ring) && ic 1021 net/rds/ib_cm.c (atomic_read(&ic->i_signaled_sends) == 0) && ic 1022 net/rds/ib_cm.c (atomic_read(&ic->i_fastreg_inuse_count) == 0) && ic 1023 net/rds/ib_cm.c (atomic_read(&ic->i_fastreg_wrs) == RDS_IB_DEFAULT_FR_WR)); ic 1024 net/rds/ib_cm.c tasklet_kill(&ic->i_send_tasklet); ic 1025 net/rds/ib_cm.c tasklet_kill(&ic->i_recv_tasklet); ic 1027 net/rds/ib_cm.c atomic_set(&ic->i_cq_quiesce, 1); ic 1030 net/rds/ib_cm.c if (ic->i_cm_id->qp) ic 1031 net/rds/ib_cm.c rdma_destroy_qp(ic->i_cm_id); ic 1032 net/rds/ib_cm.c if (ic->i_send_cq) { ic 1033 net/rds/ib_cm.c if (ic->rds_ibdev) ic 1034 net/rds/ib_cm.c ibdev_put_vector(ic->rds_ibdev, ic->i_scq_vector); ic 1035 net/rds/ib_cm.c ib_destroy_cq(ic->i_send_cq); ic 1038 net/rds/ib_cm.c if (ic->i_recv_cq) { ic 1039 net/rds/ib_cm.c if (ic->rds_ibdev) ic 1040 net/rds/ib_cm.c ibdev_put_vector(ic->rds_ibdev, ic->i_rcq_vector); ic 1041 net/rds/ib_cm.c ib_destroy_cq(ic->i_recv_cq); ic 1045 net/rds/ib_cm.c if (ic->i_send_hdrs) ic 1047 net/rds/ib_cm.c ic->i_send_ring.w_nr * ic 1049 net/rds/ib_cm.c ic->i_send_hdrs, ic 1050 net/rds/ib_cm.c ic->i_send_hdrs_dma); ic 1052 net/rds/ib_cm.c if (ic->i_recv_hdrs) ic 1054 net/rds/ib_cm.c ic->i_recv_ring.w_nr * ic 1056 net/rds/ib_cm.c ic->i_recv_hdrs, ic 1057 net/rds/ib_cm.c ic->i_recv_hdrs_dma); ic 1059 net/rds/ib_cm.c if (ic->i_ack) ic 1061 net/rds/ib_cm.c ic->i_ack, ic->i_ack_dma); ic 1063 net/rds/ib_cm.c if (ic->i_sends) ic 1064 net/rds/ib_cm.c rds_ib_send_clear_ring(ic); ic 1065 net/rds/ib_cm.c if (ic->i_recvs) ic 1066 net/rds/ib_cm.c rds_ib_recv_clear_ring(ic); ic 1068 net/rds/ib_cm.c rdma_destroy_id(ic->i_cm_id); ic 1073 net/rds/ib_cm.c if (ic->rds_ibdev) ic 1074 net/rds/ib_cm.c rds_ib_remove_conn(ic->rds_ibdev, conn); ic 1076 net/rds/ib_cm.c ic->i_cm_id = NULL; ic 1077 net/rds/ib_cm.c ic->i_pd = NULL; ic 1078 net/rds/ib_cm.c ic->i_send_cq = NULL; ic 1079 net/rds/ib_cm.c ic->i_recv_cq = NULL; ic 1080 net/rds/ib_cm.c ic->i_send_hdrs = NULL; ic 1081 net/rds/ib_cm.c ic->i_recv_hdrs = NULL; ic 1082 net/rds/ib_cm.c ic->i_ack = NULL; ic 1084 net/rds/ib_cm.c BUG_ON(ic->rds_ibdev); ic 1087 net/rds/ib_cm.c if (ic->i_data_op) { ic 1090 net/rds/ib_cm.c rm = container_of(ic->i_data_op, struct rds_message, data); ic 1092 net/rds/ib_cm.c ic->i_data_op = NULL; ic 1096 net/rds/ib_cm.c clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); ic 1098 net/rds/ib_cm.c atomic64_set(&ic->i_ack_next, 0); ic 1100 net/rds/ib_cm.c ic->i_ack_next = 0; ic 1102 net/rds/ib_cm.c ic->i_ack_recv = 0; ic 1105 net/rds/ib_cm.c ic->i_flowctl = 0; ic 1106 net/rds/ib_cm.c atomic_set(&ic->i_credits, 0); ic 1109 net/rds/ib_cm.c rds_ib_ring_init(&ic->i_send_ring, ic->i_send_ring.w_nr); ic 1110 net/rds/ib_cm.c rds_ib_ring_init(&ic->i_recv_ring, ic->i_recv_ring.w_nr); ic 1112 net/rds/ib_cm.c if (ic->i_ibinc) { ic 1113 net/rds/ib_cm.c rds_inc_put(&ic->i_ibinc->ii_inc); ic 1114 net/rds/ib_cm.c ic->i_ibinc = NULL; ic 1117 net/rds/ib_cm.c vfree(ic->i_sends); ic 1118 net/rds/ib_cm.c ic->i_sends = NULL; ic 1119 net/rds/ib_cm.c vfree(ic->i_recvs); ic 1120 net/rds/ib_cm.c ic->i_recvs = NULL; ic 1121 net/rds/ib_cm.c ic->i_active_side = false; ic 1126 net/rds/ib_cm.c struct rds_ib_connection *ic; ic 1131 net/rds/ib_cm.c ic = kzalloc(sizeof(struct rds_ib_connection), gfp); ic 1132 net/rds/ib_cm.c if (!ic) ic 1135 net/rds/ib_cm.c ret = rds_ib_recv_alloc_caches(ic, gfp); ic 1137 net/rds/ib_cm.c kfree(ic); ic 1141 net/rds/ib_cm.c INIT_LIST_HEAD(&ic->ib_node); ic 1142 net/rds/ib_cm.c tasklet_init(&ic->i_send_tasklet, rds_ib_tasklet_fn_send, ic 1143 net/rds/ib_cm.c (unsigned long)ic); ic 1144 net/rds/ib_cm.c tasklet_init(&ic->i_recv_tasklet, rds_ib_tasklet_fn_recv, ic 1145 net/rds/ib_cm.c (unsigned long)ic); ic 1146 net/rds/ib_cm.c mutex_init(&ic->i_recv_mutex); ic 1148 net/rds/ib_cm.c spin_lock_init(&ic->i_ack_lock); ic 1150 net/rds/ib_cm.c atomic_set(&ic->i_signaled_sends, 0); ic 1151 net/rds/ib_cm.c atomic_set(&ic->i_fastreg_wrs, RDS_IB_DEFAULT_FR_WR); ic 1157 net/rds/ib_cm.c rds_ib_ring_init(&ic->i_send_ring, 0); ic 1158 net/rds/ib_cm.c rds_ib_ring_init(&ic->i_recv_ring, 0); ic 1160 net/rds/ib_cm.c ic->conn = conn; ic 1161 net/rds/ib_cm.c conn->c_transport_data = ic; ic 1164 net/rds/ib_cm.c list_add_tail(&ic->ib_node, &ib_nodev_conns); ic 1177 net/rds/ib_cm.c struct rds_ib_connection *ic = arg; ic 1180 net/rds/ib_cm.c rdsdebug("ic %p\n", ic); ic 1187 net/rds/ib_cm.c lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; ic 1190 net/rds/ib_cm.c list_del(&ic->ib_node); ic 1193 net/rds/ib_cm.c rds_ib_recv_free_caches(ic); ic 1195 net/rds/ib_cm.c kfree(ic); ic 47 net/rds/ib_frmr.c atomic_dec(&ibmr->ic->i_fastreg_inuse_count); ic 129 net/rds/ib_frmr.c while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { ic 130 net/rds/ib_frmr.c atomic_inc(&ibmr->ic->i_fastreg_wrs); ic 143 net/rds/ib_frmr.c atomic_inc(&ibmr->ic->i_fastreg_inuse_count); ic 164 net/rds/ib_frmr.c ret = ib_post_send(ibmr->ic->i_cm_id->qp, ®_wr.wr, NULL); ic 169 net/rds/ib_frmr.c atomic_inc(&ibmr->ic->i_fastreg_wrs); ic 270 net/rds/ib_frmr.c struct rdma_cm_id *i_cm_id = ibmr->ic->i_cm_id; ic 279 net/rds/ib_frmr.c while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) { ic 280 net/rds/ib_frmr.c atomic_inc(&ibmr->ic->i_fastreg_wrs); ic 301 net/rds/ib_frmr.c atomic_inc(&ibmr->ic->i_fastreg_wrs); ic 322 net/rds/ib_frmr.c void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) ic 329 net/rds/ib_frmr.c if (rds_conn_up(ic->conn)) ic 330 net/rds/ib_frmr.c rds_ib_conn_error(ic->conn, ic 332 net/rds/ib_frmr.c &ic->conn->c_laddr, ic 333 net/rds/ib_frmr.c &ic->conn->c_faddr, ic 354 net/rds/ib_frmr.c atomic_inc(&ic->i_fastreg_wrs); ic 402 net/rds/ib_frmr.c struct rds_ib_connection *ic, ic 410 net/rds/ib_frmr.c if (!ic) { ic 424 net/rds/ib_frmr.c ibmr->ic = ic; ic 72 net/rds/ib_mr.h struct rds_ib_connection *ic; ic 144 net/rds/ib_mr.h struct rds_ib_connection *ic, ic 121 net/rds/ib_rdma.c struct rds_ib_connection *ic = conn->c_transport_data; ic 126 net/rds/ib_rdma.c BUG_ON(list_empty(&ic->ib_node)); ic 127 net/rds/ib_rdma.c list_del(&ic->ib_node); ic 130 net/rds/ib_rdma.c list_add_tail(&ic->ib_node, &rds_ibdev->conn_list); ic 134 net/rds/ib_rdma.c ic->rds_ibdev = rds_ibdev; ic 140 net/rds/ib_rdma.c struct rds_ib_connection *ic = conn->c_transport_data; ic 146 net/rds/ib_rdma.c BUG_ON(list_empty(&ic->ib_node)); ic 147 net/rds/ib_rdma.c list_del(&ic->ib_node); ic 150 net/rds/ib_rdma.c list_add_tail(&ic->ib_node, &ib_nodev_conns); ic 154 net/rds/ib_rdma.c ic->rds_ibdev = NULL; ic 160 net/rds/ib_rdma.c struct rds_ib_connection *ic, *_ic; ic 168 net/rds/ib_rdma.c list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node) ic 169 net/rds/ib_rdma.c rds_conn_destroy(ic->conn); ic 535 net/rds/ib_rdma.c struct rds_ib_connection *ic = NULL; ic 545 net/rds/ib_rdma.c ic = conn->c_transport_data; ic 553 net/rds/ib_rdma.c ibmr = rds_ib_reg_frmr(rds_ibdev, ic, sg, nents, key_ret); ic 47 net/rds/ib_recv.c void rds_ib_recv_init_ring(struct rds_ib_connection *ic) ic 52 net/rds/ib_recv.c for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { ic 64 net/rds/ib_recv.c sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header)); ic 66 net/rds/ib_recv.c sge->lkey = ic->i_pd->local_dma_lkey; ic 71 net/rds/ib_recv.c sge->lkey = ic->i_pd->local_dma_lkey; ic 121 net/rds/ib_recv.c int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp) ic 125 net/rds/ib_recv.c ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); ic 127 net/rds/ib_recv.c ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); ic 129 net/rds/ib_recv.c free_percpu(ic->i_cache_incs.percpu); ic 155 net/rds/ib_recv.c void rds_ib_recv_free_caches(struct rds_ib_connection *ic) ic 163 net/rds/ib_recv.c rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); ic 164 net/rds/ib_recv.c rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); ic 165 net/rds/ib_recv.c free_percpu(ic->i_cache_incs.percpu); ic 174 net/rds/ib_recv.c rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); ic 175 net/rds/ib_recv.c rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); ic 176 net/rds/ib_recv.c free_percpu(ic->i_cache_frags.percpu); ic 192 net/rds/ib_recv.c static void rds_ib_frag_free(struct rds_ib_connection *ic, ic 197 net/rds/ib_recv.c rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); ic 198 net/rds/ib_recv.c atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); ic 208 net/rds/ib_recv.c struct rds_ib_connection *ic = inc->i_conn->c_transport_data; ic 215 net/rds/ib_recv.c rds_ib_frag_free(ic, frag); ic 220 net/rds/ib_recv.c rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); ic 223 net/rds/ib_recv.c static void rds_ib_recv_clear_one(struct rds_ib_connection *ic, ic 231 net/rds/ib_recv.c ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); ic 232 net/rds/ib_recv.c rds_ib_frag_free(ic, recv->r_frag); ic 237 net/rds/ib_recv.c void rds_ib_recv_clear_ring(struct rds_ib_connection *ic) ic 241 net/rds/ib_recv.c for (i = 0; i < ic->i_recv_ring.w_nr; i++) ic 242 net/rds/ib_recv.c rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); ic 245 net/rds/ib_recv.c static struct rds_ib_incoming *rds_ib_refill_one_inc(struct rds_ib_connection *ic, ic 252 net/rds/ib_recv.c cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); ic 270 net/rds/ib_recv.c rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); ic 275 net/rds/ib_recv.c static struct rds_page_frag *rds_ib_refill_one_frag(struct rds_ib_connection *ic, ic 282 net/rds/ib_recv.c cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); ic 285 net/rds/ib_recv.c atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); ic 310 net/rds/ib_recv.c struct rds_ib_connection *ic = conn->c_transport_data; ic 321 net/rds/ib_recv.c if (!ic->i_cache_incs.ready) ic 322 net/rds/ib_recv.c rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); ic 323 net/rds/ib_recv.c if (!ic->i_cache_frags.ready) ic 324 net/rds/ib_recv.c rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); ic 331 net/rds/ib_recv.c recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); ic 337 net/rds/ib_recv.c recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); ic 341 net/rds/ib_recv.c ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, ic 346 net/rds/ib_recv.c sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header); ic 383 net/rds/ib_recv.c struct rds_ib_connection *ic = conn->c_transport_data; ic 399 net/rds/ib_recv.c rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { ic 400 net/rds/ib_recv.c if (pos >= ic->i_recv_ring.w_nr) { ic 406 net/rds/ib_recv.c recv = &ic->i_recvs[pos]; ic 418 net/rds/ib_recv.c ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); ic 436 net/rds/ib_recv.c if (ic->i_flowctl && posted) ic 440 net/rds/ib_recv.c rds_ib_ring_unalloc(&ic->i_recv_ring, 1); ic 456 net/rds/ib_recv.c (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || ic 457 net/rds/ib_recv.c rds_ib_ring_empty(&ic->i_recv_ring))) { ic 573 net/rds/ib_recv.c void rds_ib_recv_init_ack(struct rds_ib_connection *ic) ic 575 net/rds/ib_recv.c struct ib_send_wr *wr = &ic->i_ack_wr; ic 576 net/rds/ib_recv.c struct ib_sge *sge = &ic->i_ack_sge; ic 578 net/rds/ib_recv.c sge->addr = ic->i_ack_dma; ic 580 net/rds/ib_recv.c sge->lkey = ic->i_pd->local_dma_lkey; ic 612 net/rds/ib_recv.c void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) ic 616 net/rds/ib_recv.c spin_lock_irqsave(&ic->i_ack_lock, flags); ic 617 net/rds/ib_recv.c ic->i_ack_next = seq; ic 619 net/rds/ib_recv.c set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 620 net/rds/ib_recv.c spin_unlock_irqrestore(&ic->i_ack_lock, flags); ic 623 net/rds/ib_recv.c static u64 rds_ib_get_ack(struct rds_ib_connection *ic) ic 628 net/rds/ib_recv.c clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 630 net/rds/ib_recv.c spin_lock_irqsave(&ic->i_ack_lock, flags); ic 631 net/rds/ib_recv.c seq = ic->i_ack_next; ic 632 net/rds/ib_recv.c spin_unlock_irqrestore(&ic->i_ack_lock, flags); ic 637 net/rds/ib_recv.c void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required) ic 639 net/rds/ib_recv.c atomic64_set(&ic->i_ack_next, seq); ic 642 net/rds/ib_recv.c set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 646 net/rds/ib_recv.c static u64 rds_ib_get_ack(struct rds_ib_connection *ic) ic 648 net/rds/ib_recv.c clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 651 net/rds/ib_recv.c return atomic64_read(&ic->i_ack_next); ic 656 net/rds/ib_recv.c static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits) ic 658 net/rds/ib_recv.c struct rds_header *hdr = ic->i_ack; ic 662 net/rds/ib_recv.c seq = rds_ib_get_ack(ic); ic 664 net/rds/ib_recv.c rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq); ic 669 net/rds/ib_recv.c ic->i_ack_queued = jiffies; ic 671 net/rds/ib_recv.c ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); ic 676 net/rds/ib_recv.c clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); ic 677 net/rds/ib_recv.c set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 681 net/rds/ib_recv.c rds_ib_conn_error(ic->conn, "sending ack failed\n"); ic 724 net/rds/ib_recv.c void rds_ib_attempt_ack(struct rds_ib_connection *ic) ic 728 net/rds/ib_recv.c if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) ic 731 net/rds/ib_recv.c if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { ic 737 net/rds/ib_recv.c if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) { ic 739 net/rds/ib_recv.c clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); ic 743 net/rds/ib_recv.c clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 744 net/rds/ib_recv.c rds_ib_send_ack(ic, adv_credits); ic 751 net/rds/ib_recv.c void rds_ib_ack_send_complete(struct rds_ib_connection *ic) ic 753 net/rds/ib_recv.c clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); ic 754 net/rds/ib_recv.c rds_ib_attempt_ack(ic); ic 761 net/rds/ib_recv.c u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic) ic 763 net/rds/ib_recv.c if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) ic 765 net/rds/ib_recv.c return rds_ib_get_ack(ic); ic 845 net/rds/ib_recv.c struct rds_ib_connection *ic = conn->c_transport_data; ic 846 net/rds/ib_recv.c struct rds_ib_incoming *ibinc = ic->i_ibinc; ic 851 net/rds/ib_recv.c rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, ic 864 net/rds/ib_recv.c ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs]; ic 900 net/rds/ib_recv.c rds_ib_frag_free(ic, recv->r_frag); ic 914 net/rds/ib_recv.c ic->i_ibinc = ibinc; ic 920 net/rds/ib_recv.c ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); ic 924 net/rds/ib_recv.c rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc, ic 925 net/rds/ib_recv.c ic->i_recv_data_rem, hdr->h_flags); ic 943 net/rds/ib_recv.c if (ic->i_recv_data_rem > RDS_FRAG_SIZE) ic 944 net/rds/ib_recv.c ic->i_recv_data_rem -= RDS_FRAG_SIZE; ic 946 net/rds/ib_recv.c ic->i_recv_data_rem = 0; ic 947 net/rds/ib_recv.c ic->i_ibinc = NULL; ic 970 net/rds/ib_recv.c void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, ic 974 net/rds/ib_recv.c struct rds_connection *conn = ic->conn; ic 983 net/rds/ib_recv.c recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; ic 984 net/rds/ib_recv.c ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, ic 1010 net/rds/ib_recv.c rds_ib_frag_free(ic, recv->r_frag); ic 1013 net/rds/ib_recv.c rds_ib_ring_free(&ic->i_recv_ring, 1); ic 1018 net/rds/ib_recv.c if (rds_ib_ring_empty(&ic->i_recv_ring)) ic 1021 net/rds/ib_recv.c if (rds_ib_ring_low(&ic->i_recv_ring)) { ic 1030 net/rds/ib_recv.c struct rds_ib_connection *ic = conn->c_transport_data; ic 1034 net/rds/ib_recv.c rds_ib_attempt_ack(ic); ic 72 net/rds/ib_send.c static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, ic 77 net/rds/ib_send.c ib_dma_unmap_sg(ic->i_cm_id->device, ic 82 net/rds/ib_send.c static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, ic 87 net/rds/ib_send.c ib_dma_unmap_sg(ic->i_cm_id->device, ic 122 net/rds/ib_send.c static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, ic 128 net/rds/ib_send.c ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, ic 149 net/rds/ib_send.c static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, ic 160 net/rds/ib_send.c rds_ib_send_unmap_data(ic, send->s_op, wc_status); ic 167 net/rds/ib_send.c rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); ic 174 net/rds/ib_send.c rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); ic 189 net/rds/ib_send.c void rds_ib_send_init_ring(struct rds_ib_connection *ic) ic 194 net/rds/ib_send.c for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { ic 204 net/rds/ib_send.c sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); ic 206 net/rds/ib_send.c sge->lkey = ic->i_pd->local_dma_lkey; ic 208 net/rds/ib_send.c send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; ic 212 net/rds/ib_send.c void rds_ib_send_clear_ring(struct rds_ib_connection *ic) ic 217 net/rds/ib_send.c for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { ic 219 net/rds/ib_send.c rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); ic 227 net/rds/ib_send.c static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) ic 229 net/rds/ib_send.c if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && ic 232 net/rds/ib_send.c BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); ic 241 net/rds/ib_send.c void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) ic 244 net/rds/ib_send.c struct rds_connection *conn = ic->conn; ic 259 net/rds/ib_send.c if (time_after(jiffies, ic->i_ack_queued + HZ / 2)) ic 261 net/rds/ib_send.c rds_ib_ack_send_complete(ic); ic 265 net/rds/ib_send.c oldest = rds_ib_ring_oldest(&ic->i_send_ring); ic 267 net/rds/ib_send.c completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest); ic 270 net/rds/ib_send.c send = &ic->i_sends[oldest]; ic 274 net/rds/ib_send.c rm = rds_ib_send_unmap_op(ic, send, wc->status); ic 290 net/rds/ib_send.c oldest = (oldest + 1) % ic->i_send_ring.w_nr; ic 293 net/rds/ib_send.c rds_ib_ring_free(&ic->i_send_ring, completed); ic 294 net/rds/ib_send.c rds_ib_sub_signaled(ic, nr_sig); ic 354 net/rds/ib_send.c int rds_ib_send_grab_credits(struct rds_ib_connection *ic, ic 361 net/rds/ib_send.c if (!ic->i_flowctl) ic 366 net/rds/ib_send.c oldval = newval = atomic_read(&ic->i_credits); ic 378 net/rds/ib_send.c struct rds_connection *conn = ic->i_cm_id->context; ic 400 net/rds/ib_send.c if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) ic 409 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 416 net/rds/ib_send.c IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), ic 419 net/rds/ib_send.c atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); ic 430 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 435 net/rds/ib_send.c atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); ic 449 net/rds/ib_send.c if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) ic 450 net/rds/ib_send.c set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); ic 453 net/rds/ib_send.c static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, ic 462 net/rds/ib_send.c if (ic->i_unsignaled_wrs-- == 0 || notify) { ic 463 net/rds/ib_send.c ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; ic 486 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 487 net/rds/ib_send.c struct ib_device *dev = ic->i_cm_id->device; ic 523 net/rds/ib_send.c work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); ic 531 net/rds/ib_send.c if (ic->i_flowctl) { ic 532 net/rds/ib_send.c credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); ic 535 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); ic 548 net/rds/ib_send.c if (!ic->i_data_op) { ic 554 net/rds/ib_send.c rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); ic 557 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 568 net/rds/ib_send.c ic->i_data_op = &rm->data; ic 595 net/rds/ib_send.c rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); ic 601 net/rds/ib_send.c if (ic->i_flowctl) { ic 602 net/rds/ib_send.c rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); ic 618 net/rds/ib_send.c send = &ic->i_sends[pos]; ic 621 net/rds/ib_send.c scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; ic 634 net/rds/ib_send.c send->s_sge[0].addr = ic->i_send_hdrs_dma ic 638 net/rds/ib_send.c memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); ic 660 net/rds/ib_send.c rds_ib_set_wr_signal_state(ic, send, false); ic 665 net/rds/ib_send.c if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) { ic 666 net/rds/ib_send.c rds_ib_set_wr_signal_state(ic, send, true); ic 676 net/rds/ib_send.c if (ic->i_flowctl && adv_credits) { ic 677 net/rds/ib_send.c struct rds_header *hdr = &ic->i_send_hdrs[pos]; ic 690 net/rds/ib_send.c pos = (pos + 1) % ic->i_send_ring.w_nr; ic 691 net/rds/ib_send.c send = &ic->i_sends[pos]; ic 704 net/rds/ib_send.c prev->s_op = ic->i_data_op; ic 707 net/rds/ib_send.c nr_sig += rds_ib_set_wr_signal_state(ic, prev, true); ic 708 net/rds/ib_send.c ic->i_data_op = NULL; ic 713 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); ic 716 net/rds/ib_send.c if (ic->i_flowctl && i < credit_alloc) ic 720 net/rds/ib_send.c atomic_add(nr_sig, &ic->i_signaled_sends); ic 724 net/rds/ib_send.c ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); ic 725 net/rds/ib_send.c rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, ic 731 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 732 net/rds/ib_send.c rds_ib_sub_signaled(ic, nr_sig); ic 734 net/rds/ib_send.c ic->i_data_op = prev->s_op; ic 738 net/rds/ib_send.c rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); ic 755 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 763 net/rds/ib_send.c work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); ic 771 net/rds/ib_send.c send = &ic->i_sends[pos]; ic 788 net/rds/ib_send.c nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); ic 797 net/rds/ib_send.c ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); ic 798 net/rds/ib_send.c rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); ic 800 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 809 net/rds/ib_send.c send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; ic 815 net/rds/ib_send.c atomic_add(nr_sig, &ic->i_signaled_sends); ic 818 net/rds/ib_send.c ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); ic 819 net/rds/ib_send.c rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, ic 825 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 826 net/rds/ib_send.c rds_ib_sub_signaled(ic, nr_sig); ic 841 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 849 net/rds/ib_send.c u32 max_sge = ic->rds_ibdev->max_sge; ic 861 net/rds/ib_send.c op->op_count = ib_dma_map_sg(ic->i_cm_id->device, ic 864 net/rds/ib_send.c rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); ic 880 net/rds/ib_send.c work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); ic 882 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 888 net/rds/ib_send.c send = &ic->i_sends[pos]; ic 901 net/rds/ib_send.c nr_sig += rds_ib_set_wr_signal_state(ic, send, ic 925 net/rds/ib_send.c send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; ic 928 net/rds/ib_send.c rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); ic 940 net/rds/ib_send.c if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) ic 941 net/rds/ib_send.c send = ic->i_sends; ic 951 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); ic 956 net/rds/ib_send.c atomic_add(nr_sig, &ic->i_signaled_sends); ic 959 net/rds/ib_send.c ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); ic 960 net/rds/ib_send.c rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, ic 966 net/rds/ib_send.c rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); ic 967 net/rds/ib_send.c rds_ib_sub_signaled(ic, nr_sig); ic 984 net/rds/ib_send.c struct rds_ib_connection *ic = conn->c_transport_data; ic 988 net/rds/ib_send.c rds_ib_attempt_ack(ic); ic 102 sound/soc/codecs/ak4613.c u8 ic; ic 436 sound/soc/codecs/ak4613.c snd_soc_component_update_bits(component, ICTRL, ICTRL_MASK, priv->ic); ic 624 sound/soc/codecs/ak4613.c priv->ic |= 1 << i; ic 920 sound/soc/sh/rcar/core.c struct snd_interval ic; ic 929 sound/soc/sh/rcar/core.c ic = *ic_; ic 930 sound/soc/sh/rcar/core.c ic.min = ic 931 sound/soc/sh/rcar/core.c ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params); ic 935 sound/soc/sh/rcar/core.c &ic, ir); ic 943 sound/soc/sh/rcar/core.c struct snd_interval ic; ic 952 sound/soc/sh/rcar/core.c ic = *ic_; ic 953 sound/soc/sh/rcar/core.c ic.min = ic 954 sound/soc/sh/rcar/core.c ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params); ic 958 sound/soc/sh/rcar/core.c ir, &ic);