hd 482 arch/arm/mach-davinci/board-dm646x-evm.c static int set_vpif_clock(int mux_mode, int hd) hd 516 arch/arm/mach-davinci/board-dm646x-evm.c if (hd >= 1) hd 413 arch/ia64/include/asm/pal.h hd : 1, /* Non-essential hw hd 703 arch/ia64/include/asm/pal.h #define pmci_proc_hardware_damage pme_processor.hd hd 52 arch/mips/sgi-ip27/ip27-irq.c struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); hd 53 arch/mips/sgi-ip27/ip27-irq.c unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); hd 56 arch/mips/sgi-ip27/ip27-irq.c __raw_writeq(mask[0], hd->irq_mask[0]); hd 57 arch/mips/sgi-ip27/ip27-irq.c __raw_writeq(mask[1], hd->irq_mask[1]); hd 62 arch/mips/sgi-ip27/ip27-irq.c struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); hd 63 arch/mips/sgi-ip27/ip27-irq.c unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu); hd 66 arch/mips/sgi-ip27/ip27-irq.c __raw_writeq(mask[0], hd->irq_mask[0]); hd 67 arch/mips/sgi-ip27/ip27-irq.c __raw_writeq(mask[1], hd->irq_mask[1]); hd 70 arch/mips/sgi-ip27/ip27-irq.c static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask) hd 80 arch/mips/sgi-ip27/ip27-irq.c hd->cpu = cpu; hd 82 arch/mips/sgi-ip27/ip27-irq.c hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A); hd 83 arch/mips/sgi-ip27/ip27-irq.c hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A); hd 85 arch/mips/sgi-ip27/ip27-irq.c hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B); hd 86 arch/mips/sgi-ip27/ip27-irq.c hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B); hd 93 arch/mips/sgi-ip27/ip27-irq.c struct hub_irq_data *hd = irq_data_get_irq_chip_data(d); hd 95 arch/mips/sgi-ip27/ip27-irq.c if (!hd) hd 101 arch/mips/sgi-ip27/ip27-irq.c setup_hub_mask(hd, mask); hd 106 arch/mips/sgi-ip27/ip27-irq.c irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); hd 122 arch/mips/sgi-ip27/ip27-irq.c struct hub_irq_data *hd; hd 130 arch/mips/sgi-ip27/ip27-irq.c hd = kzalloc(sizeof(*hd), GFP_KERNEL); hd 131 arch/mips/sgi-ip27/ip27-irq.c if (!hd) hd 136 arch/mips/sgi-ip27/ip27-irq.c kfree(hd); hd 139 arch/mips/sgi-ip27/ip27-irq.c irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd, hd 144 arch/mips/sgi-ip27/ip27-irq.c setup_hub_mask(hd, &hub->h_cpus); hd 145 arch/mips/sgi-ip27/ip27-irq.c info->nasid = cpu_to_node(hd->cpu); hd 32 arch/sh/drivers/heartbeat.c static inline void heartbeat_toggle_bit(struct heartbeat_data *hd, hd 37 arch/sh/drivers/heartbeat.c new = (1 << hd->bit_pos[bit]); hd 41 arch/sh/drivers/heartbeat.c new &= hd->mask; hd 43 arch/sh/drivers/heartbeat.c switch (hd->regsize) { hd 45 arch/sh/drivers/heartbeat.c new |= ioread32(hd->base) & ~hd->mask; hd 46 arch/sh/drivers/heartbeat.c iowrite32(new, hd->base); hd 49 arch/sh/drivers/heartbeat.c new |= ioread16(hd->base) & ~hd->mask; hd 50 arch/sh/drivers/heartbeat.c iowrite16(new, hd->base); hd 53 arch/sh/drivers/heartbeat.c new |= ioread8(hd->base) & ~hd->mask; hd 54 arch/sh/drivers/heartbeat.c iowrite8(new, hd->base); hd 61 arch/sh/drivers/heartbeat.c struct heartbeat_data *hd = from_timer(hd, t, timer); hd 64 arch/sh/drivers/heartbeat.c heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED); hd 67 arch/sh/drivers/heartbeat.c if ((bit == 0) || (bit == (hd->nr_bits)-1)) hd 70 arch/sh/drivers/heartbeat.c mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) / hd 77 arch/sh/drivers/heartbeat.c struct heartbeat_data *hd; hd 92 arch/sh/drivers/heartbeat.c hd = pdev->dev.platform_data; hd 94 arch/sh/drivers/heartbeat.c hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL); hd 95 arch/sh/drivers/heartbeat.c if (unlikely(!hd)) hd 99 arch/sh/drivers/heartbeat.c hd->base = ioremap_nocache(res->start, resource_size(res)); hd 100 arch/sh/drivers/heartbeat.c if (unlikely(!hd->base)) { hd 104 arch/sh/drivers/heartbeat.c kfree(hd); hd 109 arch/sh/drivers/heartbeat.c if (!hd->nr_bits) { hd 110 arch/sh/drivers/heartbeat.c hd->bit_pos = default_bit_pos; hd 111 arch/sh/drivers/heartbeat.c hd->nr_bits = ARRAY_SIZE(default_bit_pos); hd 114 arch/sh/drivers/heartbeat.c hd->mask = 0; hd 115 arch/sh/drivers/heartbeat.c for (i = 0; i < hd->nr_bits; i++) hd 116 arch/sh/drivers/heartbeat.c hd->mask |= (1 << hd->bit_pos[i]); hd 118 arch/sh/drivers/heartbeat.c if (!hd->regsize) { hd 121 arch/sh/drivers/heartbeat.c hd->regsize = 32; hd 124 arch/sh/drivers/heartbeat.c hd->regsize = 16; hd 128 arch/sh/drivers/heartbeat.c hd->regsize = 8; hd 133 arch/sh/drivers/heartbeat.c timer_setup(&hd->timer, heartbeat_timer, 0); hd 134 arch/sh/drivers/heartbeat.c platform_set_drvdata(pdev, hd); hd 136 arch/sh/drivers/heartbeat.c return mod_timer(&hd->timer, jiffies + 1); hd 188 arch/x86/kernel/hpet.c struct hpet_data hd; hd 191 arch/x86/kernel/hpet.c memset(&hd, 0, sizeof(hd)); hd 192 arch/x86/kernel/hpet.c hd.hd_phys_address = hpet_address; hd 193 arch/x86/kernel/hpet.c hd.hd_address = hpet_virt_address; hd 194 arch/x86/kernel/hpet.c hd.hd_nirqs = hpet_base.nr_channels; hd 201 arch/x86/kernel/hpet.c hd.hd_irq[0] = HPET_LEGACY_8254; hd 202 arch/x86/kernel/hpet.c hd.hd_irq[1] = HPET_LEGACY_RTC; hd 208 arch/x86/kernel/hpet.c hd.hd_irq[i] = hc->irq; hd 217 arch/x86/kernel/hpet.c hpet_reserve_timer(&hd, hc->num); hd 222 arch/x86/kernel/hpet.c hpet_alloc(&hd); hd 1369 block/genhd.c struct hd_struct *hd; hd 1382 block/genhd.c while ((hd = disk_part_iter_next(&piter))) { hd 1383 block/genhd.c inflight = part_in_flight(gp->queue, hd); hd 1389 block/genhd.c MAJOR(part_devt(hd)), MINOR(part_devt(hd)), hd 1390 block/genhd.c disk_name(gp, hd->partno, buf), hd 1391 block/genhd.c part_stat_read(hd, ios[STAT_READ]), hd 1392 block/genhd.c part_stat_read(hd, merges[STAT_READ]), hd 1393 block/genhd.c part_stat_read(hd, sectors[STAT_READ]), hd 1394 block/genhd.c (unsigned int)part_stat_read_msecs(hd, STAT_READ), hd 1395 block/genhd.c part_stat_read(hd, ios[STAT_WRITE]), hd 1396 block/genhd.c part_stat_read(hd, merges[STAT_WRITE]), hd 1397 block/genhd.c part_stat_read(hd, sectors[STAT_WRITE]), hd 1398 block/genhd.c (unsigned int)part_stat_read_msecs(hd, STAT_WRITE), hd 1400 block/genhd.c jiffies_to_msecs(part_stat_read(hd, io_ticks)), hd 1401 block/genhd.c jiffies_to_msecs(part_stat_read(hd, time_in_queue)), hd 1402 block/genhd.c part_stat_read(hd, ios[STAT_DISCARD]), hd 1403 block/genhd.c part_stat_read(hd, merges[STAT_DISCARD]), hd 1404 block/genhd.c part_stat_read(hd, sectors[STAT_DISCARD]), hd 1405 block/genhd.c (unsigned int)part_stat_read_msecs(hd, STAT_DISCARD) hd 35 block/partition-generic.c char *disk_name(struct gendisk *hd, int partno, char *buf) hd 38 block/partition-generic.c snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); hd 39 block/partition-generic.c else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) hd 40 block/partition-generic.c snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); hd 42 block/partition-generic.c snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); hd 115 block/partitions/check.c static struct parsed_partitions *allocate_partitions(struct gendisk *hd) hd 124 block/partitions/check.c nr = disk_max_parts(hd); hd 143 block/partitions/check.c check_partition(struct gendisk *hd, struct block_device *bdev) hd 148 block/partitions/check.c state = allocate_partitions(hd); hd 159 block/partitions/check.c disk_name(hd, 0, state->name); hd 42 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 44 drivers/auxdisplay/hd44780.c if (hd->pins[PIN_CTRL_BL]) hd 45 drivers/auxdisplay/hd44780.c gpiod_set_value_cansleep(hd->pins[PIN_CTRL_BL], on); hd 48 drivers/auxdisplay/hd44780.c static void hd44780_strobe_gpio(struct hd44780 *hd) hd 53 drivers/auxdisplay/hd44780.c gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 1); hd 58 drivers/auxdisplay/hd44780.c gpiod_set_value_cansleep(hd->pins[PIN_CTRL_E], 0); hd 62 drivers/auxdisplay/hd44780.c static void hd44780_write_gpio8(struct hd44780 *hd, u8 val, unsigned int rs) hd 69 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 10 : 9; hd 72 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA0], NULL, values); hd 74 drivers/auxdisplay/hd44780.c hd44780_strobe_gpio(hd); hd 78 drivers/auxdisplay/hd44780.c static void hd44780_write_gpio4(struct hd44780 *hd, u8 val, unsigned int rs) hd 86 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 6 : 5; hd 89 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd 91 drivers/auxdisplay/hd44780.c hd44780_strobe_gpio(hd); hd 98 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd 100 drivers/auxdisplay/hd44780.c hd44780_strobe_gpio(hd); hd 106 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 108 drivers/auxdisplay/hd44780.c hd44780_write_gpio8(hd, cmd, 0); hd 117 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 119 drivers/auxdisplay/hd44780.c hd44780_write_gpio8(hd, data, 1); hd 134 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 136 drivers/auxdisplay/hd44780.c hd44780_write_gpio4(hd, cmd, 0); hd 146 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 151 drivers/auxdisplay/hd44780.c n = hd->pins[PIN_CTRL_RW] ? 6 : 5; hd 154 drivers/auxdisplay/hd44780.c gpiod_set_array_value_cansleep(n, &hd->pins[PIN_DATA4], NULL, values); hd 156 drivers/auxdisplay/hd44780.c hd44780_strobe_gpio(hd); hd 162 drivers/auxdisplay/hd44780.c struct hd44780 *hd = lcd->drvdata; hd 164 drivers/auxdisplay/hd44780.c hd44780_write_gpio4(hd, data, 1); hd 182 drivers/auxdisplay/hd44780.c struct hd44780 *hd; hd 205 drivers/auxdisplay/hd44780.c hd = lcd->drvdata; hd 208 drivers/auxdisplay/hd44780.c hd->pins[base + i] = devm_gpiod_get_index(dev, "data", i, hd 210 drivers/auxdisplay/hd44780.c if (IS_ERR(hd->pins[base + i])) { hd 211 drivers/auxdisplay/hd44780.c ret = PTR_ERR(hd->pins[base + i]); hd 216 drivers/auxdisplay/hd44780.c hd->pins[PIN_CTRL_E] = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); hd 217 drivers/auxdisplay/hd44780.c if (IS_ERR(hd->pins[PIN_CTRL_E])) { hd 218 drivers/auxdisplay/hd44780.c ret = PTR_ERR(hd->pins[PIN_CTRL_E]); hd 222 drivers/auxdisplay/hd44780.c hd->pins[PIN_CTRL_RS] = devm_gpiod_get(dev, "rs", GPIOD_OUT_HIGH); hd 223 drivers/auxdisplay/hd44780.c if (IS_ERR(hd->pins[PIN_CTRL_RS])) { hd 224 drivers/auxdisplay/hd44780.c ret = PTR_ERR(hd->pins[PIN_CTRL_RS]); hd 229 drivers/auxdisplay/hd44780.c hd->pins[PIN_CTRL_RW] = devm_gpiod_get_optional(dev, "rw", hd 231 drivers/auxdisplay/hd44780.c if (IS_ERR(hd->pins[PIN_CTRL_RW])) { hd 232 drivers/auxdisplay/hd44780.c ret = PTR_ERR(hd->pins[PIN_CTRL_RW]); hd 236 drivers/auxdisplay/hd44780.c hd->pins[PIN_CTRL_BL] = devm_gpiod_get_optional(dev, "backlight", hd 238 drivers/auxdisplay/hd44780.c if (IS_ERR(hd->pins[PIN_CTRL_BL])) { hd 239 drivers/auxdisplay/hd44780.c ret = PTR_ERR(hd->pins[PIN_CTRL_BL]); hd 23 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 30 drivers/clk/qcom/clk-hfpll.c if (hd->config_val) hd 31 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->config_reg, hd->config_val); hd 32 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->m_reg, 0); hd 33 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->n_reg, 1); hd 35 drivers/clk/qcom/clk-hfpll.c if (hd->user_reg) { hd 36 drivers/clk/qcom/clk-hfpll.c u32 regval = hd->user_val; hd 42 drivers/clk/qcom/clk-hfpll.c if (hd->user_vco_mask && rate > hd->low_vco_max_rate) hd 43 drivers/clk/qcom/clk-hfpll.c regval |= hd->user_vco_mask; hd 44 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->user_reg, regval); hd 47 drivers/clk/qcom/clk-hfpll.c if (hd->droop_reg) hd 48 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->droop_reg, hd->droop_val); hd 56 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 63 drivers/clk/qcom/clk-hfpll.c regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL); hd 72 drivers/clk/qcom/clk-hfpll.c regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N); hd 75 drivers/clk/qcom/clk-hfpll.c if (hd->status_reg) { hd 77 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->status_reg, &val); hd 78 drivers/clk/qcom/clk-hfpll.c } while (!(val & BIT(hd->lock_bit))); hd 84 drivers/clk/qcom/clk-hfpll.c regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL); hd 92 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 97 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->mode_reg, &mode); hd 107 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 114 drivers/clk/qcom/clk-hfpll.c regmap_update_bits(regmap, hd->mode_reg, hd 132 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 135 drivers/clk/qcom/clk-hfpll.c rate = clamp(rate, hd->min_rate, hd->max_rate); hd 138 drivers/clk/qcom/clk-hfpll.c if (rrate > hd->max_rate) hd 152 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 167 drivers/clk/qcom/clk-hfpll.c if (hd->user_reg && hd->user_vco_mask) { hd 168 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->user_reg, &val); hd 169 drivers/clk/qcom/clk-hfpll.c if (rate <= hd->low_vco_max_rate) hd 170 drivers/clk/qcom/clk-hfpll.c val &= ~hd->user_vco_mask; hd 172 drivers/clk/qcom/clk-hfpll.c val |= hd->user_vco_mask; hd 173 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->user_reg, val); hd 176 drivers/clk/qcom/clk-hfpll.c regmap_write(regmap, hd->l_reg, l_val); hd 190 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 194 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->l_reg, &l_val); hd 202 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 206 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->mode_reg, &mode); hd 212 drivers/clk/qcom/clk-hfpll.c if (hd->status_reg) { hd 213 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->status_reg, &status); hd 214 drivers/clk/qcom/clk-hfpll.c if (!(status & BIT(hd->lock_bit))) { hd 226 drivers/clk/qcom/clk-hfpll.c struct hfpll_data const *hd = h->d; hd 230 drivers/clk/qcom/clk-hfpll.c regmap_read(regmap, hd->mode_reg, &mode); hd 105 drivers/gpu/drm/i2c/ch7006_mode.c #define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ hd 108 drivers/gpu/drm/i2c/ch7006_mode.c .name = #hd "x" #vd, \ hd 112 drivers/gpu/drm/i2c/ch7006_mode.c .hdisplay = hd, \ hd 135 drivers/gpu/drm/i2c/ch7006_mode.c #define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \ hd 137 drivers/gpu/drm/i2c/ch7006_mode.c __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \ hd 138 drivers/gpu/drm/i2c/ch7006_mode.c scale_mask, norm_mask, hd, vd) hd 292 drivers/gpu/drm/omapdrm/dss/hdmi4.c static void hdmi_start_audio_stream(struct omap_hdmi *hd) hd 294 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi_wp_audio_enable(&hd->wp, true); hd 295 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi4_audio_start(&hd->core, &hd->wp); hd 298 drivers/gpu/drm/omapdrm/dss/hdmi4.c static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hd 300 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi4_audio_stop(&hd->core, &hd->wp); hd 301 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi_wp_audio_enable(&hd->wp, false); hd 485 drivers/gpu/drm/omapdrm/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 487 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_lock(&hd->lock); hd 489 drivers/gpu/drm/omapdrm/dss/hdmi4.c WARN_ON(hd->audio_abort_cb != NULL); hd 491 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_abort_cb = abort_cb; hd 493 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_unlock(&hd->lock); hd 500 drivers/gpu/drm/omapdrm/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 502 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_lock(&hd->lock); hd 503 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_abort_cb = NULL; hd 504 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_configured = false; hd 505 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_playing = false; hd 506 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_unlock(&hd->lock); hd 513 drivers/gpu/drm/omapdrm/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 516 drivers/gpu/drm/omapdrm/dss/hdmi4.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 518 drivers/gpu/drm/omapdrm/dss/hdmi4.c if (hd->display_enabled) { hd 519 drivers/gpu/drm/omapdrm/dss/hdmi4.c if (!hdmi_mode_has_audio(&hd->cfg)) hd 522 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi_start_audio_stream(hd); hd 524 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_playing = true; hd 526 drivers/gpu/drm/omapdrm/dss/hdmi4.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 532 drivers/gpu/drm/omapdrm/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 535 drivers/gpu/drm/omapdrm/dss/hdmi4.c WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); hd 537 drivers/gpu/drm/omapdrm/dss/hdmi4.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 539 drivers/gpu/drm/omapdrm/dss/hdmi4.c if (hd->display_enabled) hd 540 drivers/gpu/drm/omapdrm/dss/hdmi4.c hdmi_stop_audio_stream(hd); hd 541 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_playing = false; hd 543 drivers/gpu/drm/omapdrm/dss/hdmi4.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 549 drivers/gpu/drm/omapdrm/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 552 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_lock(&hd->lock); hd 554 drivers/gpu/drm/omapdrm/dss/hdmi4.c if (hd->display_enabled) { hd 555 drivers/gpu/drm/omapdrm/dss/hdmi4.c ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio, hd 556 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->cfg.vm.pixelclock); hd 561 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_configured = true; hd 562 drivers/gpu/drm/omapdrm/dss/hdmi4.c hd->audio_config = *dss_audio; hd 564 drivers/gpu/drm/omapdrm/dss/hdmi4.c mutex_unlock(&hd->lock); hd 298 drivers/gpu/drm/omapdrm/dss/hdmi5.c static void hdmi_start_audio_stream(struct omap_hdmi *hd) hd 300 drivers/gpu/drm/omapdrm/dss/hdmi5.c REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2); hd 301 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_wp_audio_enable(&hd->wp, true); hd 302 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_wp_audio_core_req_enable(&hd->wp, true); hd 305 drivers/gpu/drm/omapdrm/dss/hdmi5.c static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hd 307 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_wp_audio_core_req_enable(&hd->wp, false); hd 308 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_wp_audio_enable(&hd->wp, false); hd 309 drivers/gpu/drm/omapdrm/dss/hdmi5.c REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2); hd 478 drivers/gpu/drm/omapdrm/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 480 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_lock(&hd->lock); hd 482 drivers/gpu/drm/omapdrm/dss/hdmi5.c WARN_ON(hd->audio_abort_cb != NULL); hd 484 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_abort_cb = abort_cb; hd 486 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_unlock(&hd->lock); hd 493 drivers/gpu/drm/omapdrm/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 495 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_lock(&hd->lock); hd 496 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_abort_cb = NULL; hd 497 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_configured = false; hd 498 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_playing = false; hd 499 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_unlock(&hd->lock); hd 506 drivers/gpu/drm/omapdrm/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 509 drivers/gpu/drm/omapdrm/dss/hdmi5.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 511 drivers/gpu/drm/omapdrm/dss/hdmi5.c if (hd->display_enabled) { hd 512 drivers/gpu/drm/omapdrm/dss/hdmi5.c if (!hdmi_mode_has_audio(&hd->cfg)) hd 515 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_start_audio_stream(hd); hd 517 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_playing = true; hd 519 drivers/gpu/drm/omapdrm/dss/hdmi5.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 525 drivers/gpu/drm/omapdrm/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 528 drivers/gpu/drm/omapdrm/dss/hdmi5.c if (!hdmi_mode_has_audio(&hd->cfg)) hd 531 drivers/gpu/drm/omapdrm/dss/hdmi5.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 533 drivers/gpu/drm/omapdrm/dss/hdmi5.c if (hd->display_enabled) hd 534 drivers/gpu/drm/omapdrm/dss/hdmi5.c hdmi_stop_audio_stream(hd); hd 535 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_playing = false; hd 537 drivers/gpu/drm/omapdrm/dss/hdmi5.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 543 drivers/gpu/drm/omapdrm/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 546 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_lock(&hd->lock); hd 548 drivers/gpu/drm/omapdrm/dss/hdmi5.c if (hd->display_enabled) { hd 549 drivers/gpu/drm/omapdrm/dss/hdmi5.c ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio, hd 550 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->cfg.vm.pixelclock); hd 555 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_configured = true; hd 556 drivers/gpu/drm/omapdrm/dss/hdmi5.c hd->audio_config = *dss_audio; hd 558 drivers/gpu/drm/omapdrm/dss/hdmi5.c mutex_unlock(&hd->lock); hd 1582 drivers/gpu/drm/radeon/radeon.h unsigned hd; hd 1141 drivers/gpu/drm/radeon/radeon_pm.c enable |= rdev->pm.dpm.hd > 0; hd 1151 drivers/gpu/drm/radeon/radeon_pm.c if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0)) hd 1153 drivers/gpu/drm/radeon/radeon_pm.c else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0)) hd 1155 drivers/gpu/drm/radeon/radeon_pm.c else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1)) hd 1157 drivers/gpu/drm/radeon/radeon_pm.c else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2)) hd 852 drivers/gpu/drm/radeon/radeon_uvd.c unsigned *sd, unsigned *hd) hd 857 drivers/gpu/drm/radeon/radeon_uvd.c *hd = 0; hd 864 drivers/gpu/drm/radeon/radeon_uvd.c ++(*hd); hd 878 drivers/gpu/drm/radeon/radeon_uvd.c &rdev->pm.dpm.hd); hd 897 drivers/gpu/drm/radeon/radeon_uvd.c unsigned hd = 0, sd = 0; hd 898 drivers/gpu/drm/radeon/radeon_uvd.c radeon_uvd_count_handles(rdev, &sd, &hd); hd 900 drivers/gpu/drm/radeon/radeon_uvd.c (rdev->pm.dpm.hd != hd)) { hd 902 drivers/gpu/drm/radeon/radeon_uvd.c rdev->pm.dpm.hd = hd; hd 24 drivers/greybus/connection.c struct gb_host_device *hd = intf->hd; hd 27 drivers/greybus/connection.c list_for_each_entry(connection, &hd->connections, hd_links) { hd 54 drivers/greybus/connection.c gb_connection_hd_find(struct gb_host_device *hd, u16 cport_id) hd 60 drivers/greybus/connection.c list_for_each_entry(connection, &hd->connections, hd_links) hd 76 drivers/greybus/connection.c void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, hd 81 drivers/greybus/connection.c trace_gb_hd_in(hd); hd 83 drivers/greybus/connection.c connection = gb_connection_hd_find(hd, cport_id); hd 85 drivers/greybus/connection.c dev_err(&hd->dev, hd 144 drivers/greybus/connection.c _gb_connection_create(struct gb_host_device *hd, int hd_cport_id, hd 161 drivers/greybus/connection.c ret = gb_hd_cport_allocate(hd, hd_cport_id, flags); hd 163 drivers/greybus/connection.c dev_err(&hd->dev, "failed to allocate cport: %d\n", ret); hd 176 drivers/greybus/connection.c connection->hd = hd; hd 191 drivers/greybus/connection.c dev_name(&hd->dev), hd_cport_id); hd 202 drivers/greybus/connection.c list_add(&connection->hd_links, &hd->connections); hd 220 drivers/greybus/connection.c gb_hd_cport_release(hd, hd_cport_id); hd 228 drivers/greybus/connection.c gb_connection_create_static(struct gb_host_device *hd, u16 hd_cport_id, hd 231 drivers/greybus/connection.c return _gb_connection_create(hd, hd_cport_id, NULL, NULL, 0, handler, hd 238 drivers/greybus/connection.c return _gb_connection_create(intf->hd, -1, intf, NULL, 0, NULL, hd 249 drivers/greybus/connection.c return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, hd 264 drivers/greybus/connection.c return _gb_connection_create(intf->hd, -1, intf, bundle, cport_id, hd 281 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 284 drivers/greybus/connection.c if (!hd->driver->cport_enable) hd 287 drivers/greybus/connection.c ret = hd->driver->cport_enable(hd, connection->hd_cport_id, hd 290 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to enable host cport: %d\n", hd 300 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 303 drivers/greybus/connection.c if (!hd->driver->cport_disable) hd 306 drivers/greybus/connection.c ret = hd->driver->cport_disable(hd, connection->hd_cport_id); hd 308 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to disable host cport: %d\n", hd 315 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 318 drivers/greybus/connection.c if (!hd->driver->cport_connected) hd 321 drivers/greybus/connection.c ret = hd->driver->cport_connected(hd, connection->hd_cport_id); hd 323 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to set connected state: %d\n", hd 333 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 336 drivers/greybus/connection.c if (!hd->driver->cport_flush) hd 339 drivers/greybus/connection.c ret = hd->driver->cport_flush(hd, connection->hd_cport_id); hd 341 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to flush host cport: %d\n", hd 351 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 355 drivers/greybus/connection.c if (!hd->driver->cport_quiesce) hd 364 drivers/greybus/connection.c if (!hd->driver->cport_quiesce) hd 367 drivers/greybus/connection.c ret = hd->driver->cport_quiesce(hd, connection->hd_cport_id, hd 371 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to quiesce host cport: %d\n", hd 381 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 384 drivers/greybus/connection.c if (!hd->driver->cport_clear) hd 387 drivers/greybus/connection.c ret = hd->driver->cport_clear(hd, connection->hd_cport_id); hd 389 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to clear host cport: %d\n", hd 404 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 425 drivers/greybus/connection.c ret = gb_svc_connection_create(hd->svc, hd 426 drivers/greybus/connection.c hd->svc->ap_intf_id, hd 432 drivers/greybus/connection.c dev_err(&connection->hd->dev, hd 447 drivers/greybus/connection.c gb_svc_connection_destroy(connection->hd->svc, hd 448 drivers/greybus/connection.c connection->hd->svc->ap_intf_id, hd 493 drivers/greybus/connection.c dev_err(&connection->hd->dev, hd 560 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 561 drivers/greybus/connection.c const struct gb_hd_driver *drv = hd->driver; hd 571 drivers/greybus/connection.c ret = drv->cport_shutdown(hd, connection->hd_cport_id, phase, hd 578 drivers/greybus/connection.c dev_err(&hd->dev, "%s: failed to send cport shutdown (phase %d): %d\n", hd 901 drivers/greybus/connection.c gb_hd_cport_release(connection->hd, connection->hd_cport_id); hd 912 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 915 drivers/greybus/connection.c if (!hd->driver->latency_tag_enable) hd 918 drivers/greybus/connection.c ret = hd->driver->latency_tag_enable(hd, connection->hd_cport_id); hd 920 drivers/greybus/connection.c dev_err(&connection->hd->dev, hd 929 drivers/greybus/connection.c struct gb_host_device *hd = connection->hd; hd 932 drivers/greybus/connection.c if (!hd->driver->latency_tag_disable) hd 935 drivers/greybus/connection.c ret = hd->driver->latency_tag_disable(hd, connection->hd_cport_id); hd 937 drivers/greybus/connection.c dev_err(&connection->hd->dev, hd 83 drivers/greybus/core.c struct gb_host_device *hd; hd 91 drivers/greybus/core.c hd = to_gb_host_device(dev); hd 94 drivers/greybus/core.c hd = module->hd; hd 98 drivers/greybus/core.c hd = intf->hd; hd 103 drivers/greybus/core.c hd = intf->hd; hd 108 drivers/greybus/core.c hd = intf->hd; hd 111 drivers/greybus/core.c hd = svc->hd; hd 117 drivers/greybus/core.c if (add_uevent_var(env, "BUS=%u", hd->bus_id)) hd 151 drivers/greybus/core.c struct gb_host_device *hd; hd 153 drivers/greybus/core.c hd = to_gb_host_device(dev); hd 154 drivers/greybus/core.c gb_hd_shutdown(hd); hd 103 drivers/greybus/es2.c struct gb_host_device *hd; hd 136 drivers/greybus/es2.c static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd) hd 138 drivers/greybus/es2.c return (struct es2_ap_dev *)&hd->hd_priv; hd 217 drivers/greybus/es2.c static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, hd 220 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 390 drivers/greybus/es2.c static int message_send(struct gb_host_device *hd, u16 cport_id, hd 393 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 405 drivers/greybus/es2.c if (!cport_id_valid(hd, cport_id)) { hd 455 drivers/greybus/es2.c struct gb_host_device *hd = message->operation->connection->hd; hd 456 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 488 drivers/greybus/es2.c static int es2_cport_allocate(struct gb_host_device *hd, int cport_id, hd 491 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 492 drivers/greybus/es2.c struct ida *id_map = &hd->cport_id_map; hd 498 drivers/greybus/es2.c dev_err(&hd->dev, "cport %d not available\n", cport_id); hd 505 drivers/greybus/es2.c dev_err(&hd->dev, "CDSI1 already in use\n"); hd 516 drivers/greybus/es2.c ida_end = hd->num_cports; hd 517 drivers/greybus/es2.c } else if (cport_id < hd->num_cports) { hd 521 drivers/greybus/es2.c dev_err(&hd->dev, "cport %d not available\n", cport_id); hd 528 drivers/greybus/es2.c static void es2_cport_release(struct gb_host_device *hd, u16 cport_id) hd 530 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 538 drivers/greybus/es2.c ida_simple_remove(&hd->cport_id_map, cport_id); hd 541 drivers/greybus/es2.c static int cport_enable(struct gb_host_device *hd, u16 cport_id, hd 544 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 562 drivers/greybus/es2.c dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__, hd 586 drivers/greybus/es2.c static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id) hd 588 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 605 drivers/greybus/es2.c static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id) hd 607 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 623 drivers/greybus/es2.c static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id, hd 626 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 649 drivers/greybus/es2.c static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id, hd 652 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 678 drivers/greybus/es2.c static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id) hd 680 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 696 drivers/greybus/es2.c static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id) hd 699 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 714 drivers/greybus/es2.c static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id) hd 717 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 807 drivers/greybus/es2.c gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1); hd 808 drivers/greybus/es2.c gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0); hd 811 drivers/greybus/es2.c gb_hd_put(es2->hd); hd 818 drivers/greybus/es2.c struct gb_host_device *hd = urb->context; hd 846 drivers/greybus/es2.c if (cport_id_valid(hd, cport_id)) { hd 847 drivers/greybus/es2.c greybus_data_rcvd(hd, cport_id, urb->transfer_buffer, hd 862 drivers/greybus/es2.c struct gb_host_device *hd = message->operation->connection->hd; hd 863 drivers/greybus/es2.c struct es2_ap_dev *es2 = hd_to_es2(hd); hd 877 drivers/greybus/es2.c greybus_message_sent(hd, message, status); hd 1253 drivers/greybus/es2.c struct gb_host_device *hd; hd 1275 drivers/greybus/es2.c hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX, hd 1277 drivers/greybus/es2.c if (IS_ERR(hd)) { hd 1279 drivers/greybus/es2.c return PTR_ERR(hd); hd 1282 drivers/greybus/es2.c es2 = hd_to_es2(hd); hd 1283 drivers/greybus/es2.c es2->hd = hd; hd 1294 drivers/greybus/es2.c retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0); hd 1297 drivers/greybus/es2.c retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1); hd 1363 drivers/greybus/es2.c cport_in_callback, hd); hd 1422 drivers/greybus/es2.c retval = gb_hd_add(hd); hd 1433 drivers/greybus/es2.c gb_hd_del(hd); hd 1446 drivers/greybus/es2.c gb_hd_del(es2->hd); hd 179 drivers/greybus/greybus_trace.h __entry->hd_bus_id = connection->hd->bus_id; hd 388 drivers/greybus/greybus_trace.h __entry->hd_bus_id = module->hd->bus_id; hd 431 drivers/greybus/greybus_trace.h TP_PROTO(struct gb_host_device *hd), hd 433 drivers/greybus/greybus_trace.h TP_ARGS(hd), hd 442 drivers/greybus/greybus_trace.h __entry->bus_id = hd->bus_id; hd 443 drivers/greybus/greybus_trace.h __entry->num_cports = hd->num_cports; hd 444 drivers/greybus/greybus_trace.h __entry->buffer_size_max = hd->buffer_size_max; hd 454 drivers/greybus/greybus_trace.h TP_PROTO(struct gb_host_device *hd), \ hd 455 drivers/greybus/greybus_trace.h TP_ARGS(hd)) hd 24 drivers/greybus/hd.c int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, hd 27 drivers/greybus/hd.c if (!hd || !hd->driver || !hd->driver->output) hd 29 drivers/greybus/hd.c return hd->driver->output(hd, req, size, cmd, async); hd 36 drivers/greybus/hd.c struct gb_host_device *hd = to_gb_host_device(dev); hd 38 drivers/greybus/hd.c return sprintf(buf, "%d\n", hd->bus_id); hd 48 drivers/greybus/hd.c int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id) hd 50 drivers/greybus/hd.c struct ida *id_map = &hd->cport_id_map; hd 55 drivers/greybus/hd.c dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id); hd 63 drivers/greybus/hd.c void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id) hd 65 drivers/greybus/hd.c struct ida *id_map = &hd->cport_id_map; hd 72 drivers/greybus/hd.c int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, hd 75 drivers/greybus/hd.c struct ida *id_map = &hd->cport_id_map; hd 78 drivers/greybus/hd.c if (hd->driver->cport_allocate) hd 79 drivers/greybus/hd.c return hd->driver->cport_allocate(hd, cport_id, flags); hd 83 drivers/greybus/hd.c ida_end = hd->num_cports; hd 84 drivers/greybus/hd.c } else if (cport_id < hd->num_cports) { hd 88 drivers/greybus/hd.c dev_err(&hd->dev, "cport %d not available\n", cport_id); hd 96 drivers/greybus/hd.c void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id) hd 98 drivers/greybus/hd.c if (hd->driver->cport_release) { hd 99 drivers/greybus/hd.c hd->driver->cport_release(hd, cport_id); hd 103 drivers/greybus/hd.c ida_simple_remove(&hd->cport_id_map, cport_id); hd 108 drivers/greybus/hd.c struct gb_host_device *hd = to_gb_host_device(dev); hd 110 drivers/greybus/hd.c trace_gb_hd_release(hd); hd 112 drivers/greybus/hd.c if (hd->svc) hd 113 drivers/greybus/hd.c gb_svc_put(hd->svc); hd 114 drivers/greybus/hd.c ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id); hd 115 drivers/greybus/hd.c ida_destroy(&hd->cport_id_map); hd 116 drivers/greybus/hd.c kfree(hd); hd 129 drivers/greybus/hd.c struct gb_host_device *hd; hd 161 drivers/greybus/hd.c hd = kzalloc(sizeof(*hd) + driver->hd_priv_size, GFP_KERNEL); hd 162 drivers/greybus/hd.c if (!hd) hd 167 drivers/greybus/hd.c kfree(hd); hd 170 drivers/greybus/hd.c hd->bus_id = ret; hd 172 drivers/greybus/hd.c hd->driver = driver; hd 173 drivers/greybus/hd.c INIT_LIST_HEAD(&hd->modules); hd 174 drivers/greybus/hd.c INIT_LIST_HEAD(&hd->connections); hd 175 drivers/greybus/hd.c ida_init(&hd->cport_id_map); hd 176 drivers/greybus/hd.c hd->buffer_size_max = buffer_size_max; hd 177 drivers/greybus/hd.c hd->num_cports = num_cports; hd 179 drivers/greybus/hd.c hd->dev.parent = parent; hd 180 drivers/greybus/hd.c hd->dev.bus = &greybus_bus_type; hd 181 drivers/greybus/hd.c hd->dev.type = &greybus_hd_type; hd 182 drivers/greybus/hd.c hd->dev.groups = bus_groups; hd 183 drivers/greybus/hd.c hd->dev.dma_mask = hd->dev.parent->dma_mask; hd 184 drivers/greybus/hd.c device_initialize(&hd->dev); hd 185 drivers/greybus/hd.c dev_set_name(&hd->dev, "greybus%d", hd->bus_id); hd 187 drivers/greybus/hd.c trace_gb_hd_create(hd); hd 189 drivers/greybus/hd.c hd->svc = gb_svc_create(hd); hd 190 drivers/greybus/hd.c if (!hd->svc) { hd 191 drivers/greybus/hd.c dev_err(&hd->dev, "failed to create svc\n"); hd 192 drivers/greybus/hd.c put_device(&hd->dev); hd 196 drivers/greybus/hd.c return hd; hd 200 drivers/greybus/hd.c int gb_hd_add(struct gb_host_device *hd) hd 204 drivers/greybus/hd.c ret = device_add(&hd->dev); hd 208 drivers/greybus/hd.c ret = gb_svc_add(hd->svc); hd 210 drivers/greybus/hd.c device_del(&hd->dev); hd 214 drivers/greybus/hd.c trace_gb_hd_add(hd); hd 220 drivers/greybus/hd.c void gb_hd_del(struct gb_host_device *hd) hd 222 drivers/greybus/hd.c trace_gb_hd_del(hd); hd 228 drivers/greybus/hd.c gb_svc_del(hd->svc); hd 230 drivers/greybus/hd.c device_del(&hd->dev); hd 234 drivers/greybus/hd.c void gb_hd_shutdown(struct gb_host_device *hd) hd 236 drivers/greybus/hd.c gb_svc_del(hd->svc); hd 240 drivers/greybus/hd.c void gb_hd_put(struct gb_host_device *hd) hd 242 drivers/greybus/hd.c put_device(&hd->dev); hd 51 drivers/greybus/interface.c return gb_svc_dme_peer_get(intf->hd->svc, intf->interface_id, hd 128 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 175 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 363 drivers/greybus/interface.c struct gb_host_device *hd = intf->hd; hd 381 drivers/greybus/interface.c ret = gb_svc_dme_peer_get(hd->svc, intf->interface_id, attr, hd 435 drivers/greybus/interface.c return gb_svc_dme_peer_set(hd->svc, intf->interface_id, attr, hd 464 drivers/greybus/interface.c ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, hd 483 drivers/greybus/interface.c ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, hd 502 drivers/greybus/interface.c ret = gb_svc_pwrmon_intf_sample_get(intf->hd->svc, intf->interface_id, hd 736 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 789 drivers/greybus/interface.c struct gb_host_device *hd = module->hd; hd 796 drivers/greybus/interface.c intf->hd = hd; /* XXX refcount? */ hd 827 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 843 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 859 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 876 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 912 drivers/greybus/interface.c struct gb_svc *svc = intf->hd->svc; hd 41 drivers/greybus/module.c ret = gb_svc_intf_eject(module->hd->svc, module->module_id); hd 89 drivers/greybus/module.c struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, hd 101 drivers/greybus/module.c module->hd = hd; hd 105 drivers/greybus/module.c module->dev.parent = &hd->dev; hd 109 drivers/greybus/module.c module->dev.dma_mask = hd->dev.dma_mask; hd 111 drivers/greybus/module.c dev_set_name(&module->dev, "%d-%u", hd->bus_id, module_id); hd 220 drivers/greybus/operation.c return connection->hd->driver->message_send(connection->hd, hd 231 drivers/greybus/operation.c struct gb_host_device *hd = message->operation->connection->hd; hd 233 drivers/greybus/operation.c hd->driver->message_cancel(message); hd 245 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 254 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 309 drivers/greybus/operation.c static void gb_operation_message_init(struct gb_host_device *hd, hd 361 drivers/greybus/operation.c gb_operation_message_alloc(struct gb_host_device *hd, u8 type, hd 368 drivers/greybus/operation.c if (message_size > hd->buffer_size_max) { hd 369 drivers/greybus/operation.c dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n", hd 370 drivers/greybus/operation.c message_size, hd->buffer_size_max); hd 384 drivers/greybus/operation.c gb_operation_message_init(hd, message, 0, payload_size, type); hd 471 drivers/greybus/operation.c struct gb_host_device *hd = operation->connection->hd; hd 477 drivers/greybus/operation.c response = gb_operation_message_alloc(hd, type, response_size, gfp); hd 523 drivers/greybus/operation.c struct gb_host_device *hd = connection->hd; hd 531 drivers/greybus/operation.c operation->request = gb_operation_message_alloc(hd, type, request_size, hd 623 drivers/greybus/operation.c struct gb_host_device *hd = connection->hd; hd 625 drivers/greybus/operation.c return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr); hd 837 drivers/greybus/operation.c dev_err(&connection->hd->dev, "request result already set\n"); hd 871 drivers/greybus/operation.c void greybus_message_sent(struct gb_host_device *hd, hd 890 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 928 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 970 drivers/greybus/operation.c dev_err_ratelimited(&connection->hd->dev, hd 978 drivers/greybus/operation.c dev_err_ratelimited(&connection->hd->dev, hd 988 drivers/greybus/operation.c dev_err_ratelimited(&connection->hd->dev, hd 997 drivers/greybus/operation.c dev_err_ratelimited(&connection->hd->dev, hd 1030 drivers/greybus/operation.c struct device *dev = &connection->hd->dev; hd 1161 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 1214 drivers/greybus/operation.c dev_err(&connection->hd->dev, hd 880 drivers/greybus/svc.c struct gb_host_device *hd = svc->hd; hd 885 drivers/greybus/svc.c list_for_each_entry(module, &hd->modules, hd_node) { hd 900 drivers/greybus/svc.c struct gb_host_device *hd = svc->hd; hd 903 drivers/greybus/svc.c list_for_each_entry(module, &hd->modules, hd_node) { hd 948 drivers/greybus/svc.c struct gb_host_device *hd = svc->hd; hd 976 drivers/greybus/svc.c module = gb_module_create(hd, module_id, num_interfaces); hd 988 drivers/greybus/svc.c list_add(&module->hd_node, &hd->modules); hd 1302 drivers/greybus/svc.c struct gb_svc *gb_svc_create(struct gb_host_device *hd) hd 1310 drivers/greybus/svc.c svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); hd 1316 drivers/greybus/svc.c svc->dev.parent = &hd->dev; hd 1323 drivers/greybus/svc.c dev_set_name(&svc->dev, "%d-svc", hd->bus_id); hd 1327 drivers/greybus/svc.c svc->hd = hd; hd 1329 drivers/greybus/svc.c svc->connection = gb_connection_create_static(hd, GB_SVC_CPORT_ID, hd 1364 drivers/greybus/svc.c struct gb_host_device *hd = svc->hd; hd 1367 drivers/greybus/svc.c list_for_each_entry_safe(module, tmp, &hd->modules, hd_node) { hd 506 drivers/hwmon/ibmpowernv.c static void populate_sensor(struct sensor_data *sdata, int od, int hd, int sid, hd 520 drivers/hwmon/ibmpowernv.c sdata->hwmon_index = hd; hd 11804 drivers/infiniband/hw/hfi1/chip.c void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, hd 11823 drivers/infiniband/hw/hfi1/chip.c (((u64)hd & RCV_HDR_HEAD_HEAD_MASK) hd 795 drivers/infiniband/hw/hfi1/chip.h void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd, hd 2049 drivers/infiniband/hw/qib/qib_iba6120.c static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd, hd 2054 drivers/infiniband/hw/qib/qib_iba6120.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); hd 2701 drivers/infiniband/hw/qib/qib_iba7220.c static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd, hd 2706 drivers/infiniband/hw/qib/qib_iba7220.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); hd 4431 drivers/infiniband/hw/qib/qib_iba7322.c static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd, hd 4438 drivers/infiniband/hw/qib/qib_iba7322.c if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT) hd 4442 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); hd 4443 drivers/infiniband/hw/qib/qib_iba7322.c qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt); hd 617 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd; hd 619 drivers/md/dm-clone-target.c hlist_for_each_entry(hd, &bucket->head, h) { hd 620 drivers/md/dm-clone-target.c if (hd->region_nr == region_nr) hd 621 drivers/md/dm-clone-target.c return hd; hd 633 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd) hd 635 drivers/md/dm-clone-target.c hlist_add_head(&hd->h, &bucket->head); hd 647 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd) hd 651 drivers/md/dm-clone-target.c hd2 = __hash_find(bucket, hd->region_nr); hd 655 drivers/md/dm-clone-target.c __insert_region_hydration(bucket, hd); hd 657 drivers/md/dm-clone-target.c return hd; hd 665 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd; hd 671 drivers/md/dm-clone-target.c hd = mempool_alloc(&clone->hydration_pool, GFP_NOIO); hd 672 drivers/md/dm-clone-target.c hd->clone = clone; hd 674 drivers/md/dm-clone-target.c return hd; hd 677 drivers/md/dm-clone-target.c static inline void free_hydration(struct dm_clone_region_hydration *hd) hd 679 drivers/md/dm-clone-target.c mempool_free(hd, &hd->clone->hydration_pool); hd 683 drivers/md/dm-clone-target.c static void hydration_init(struct dm_clone_region_hydration *hd, unsigned long region_nr) hd 685 drivers/md/dm-clone-target.c hd->region_nr = region_nr; hd 686 drivers/md/dm-clone-target.c hd->overwrite_bio = NULL; hd 687 drivers/md/dm-clone-target.c bio_list_init(&hd->deferred_bios); hd 688 drivers/md/dm-clone-target.c hd->status = 0; hd 690 drivers/md/dm-clone-target.c INIT_LIST_HEAD(&hd->list); hd 691 drivers/md/dm-clone-target.c INIT_HLIST_NODE(&hd->h); hd 700 drivers/md/dm-clone-target.c static int hydration_update_metadata(struct dm_clone_region_hydration *hd) hd 705 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; hd 711 drivers/md/dm-clone-target.c if (likely(!r) && hd->status == BLK_STS_OK) hd 712 drivers/md/dm-clone-target.c r = dm_clone_set_region_hydrated(clone->cmd, hd->region_nr); hd 714 drivers/md/dm-clone-target.c bucket = get_hash_table_bucket(clone, hd->region_nr); hd 718 drivers/md/dm-clone-target.c hlist_del(&hd->h); hd 734 drivers/md/dm-clone-target.c static void hydration_complete(struct dm_clone_region_hydration *hd) hd 738 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; hd 740 drivers/md/dm-clone-target.c r = hydration_update_metadata(hd); hd 742 drivers/md/dm-clone-target.c if (hd->status == BLK_STS_OK && likely(!r)) { hd 743 drivers/md/dm-clone-target.c if (hd->overwrite_bio) hd 744 drivers/md/dm-clone-target.c complete_overwrite_bio(clone, hd->overwrite_bio); hd 746 drivers/md/dm-clone-target.c issue_deferred_bios(clone, &hd->deferred_bios); hd 748 drivers/md/dm-clone-target.c status = r ? BLK_STS_IOERR : hd->status; hd 750 drivers/md/dm-clone-target.c if (hd->overwrite_bio) hd 751 drivers/md/dm-clone-target.c bio_list_add(&hd->deferred_bios, hd->overwrite_bio); hd 753 drivers/md/dm-clone-target.c fail_bios(&hd->deferred_bios, status); hd 756 drivers/md/dm-clone-target.c free_hydration(hd); hd 766 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *tmp, *hd = context; hd 767 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; hd 777 drivers/md/dm-clone-target.c list_splice_tail(&hd->list, &batched_hydrations); hd 779 drivers/md/dm-clone-target.c hd->status = status; hd 780 drivers/md/dm-clone-target.c hydration_complete(hd); hd 783 drivers/md/dm-clone-target.c list_for_each_entry_safe(hd, tmp, &batched_hydrations, list) { hd 784 drivers/md/dm-clone-target.c hd->status = status; hd 785 drivers/md/dm-clone-target.c hydration_complete(hd); hd 794 drivers/md/dm-clone-target.c static void hydration_copy(struct dm_clone_region_hydration *hd, unsigned int nr_regions) hd 799 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; hd 805 drivers/md/dm-clone-target.c region_start = hd->region_nr; hd 835 drivers/md/dm-clone-target.c hydration_kcopyd_callback, hd); hd 840 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd = bio->bi_private; hd 842 drivers/md/dm-clone-target.c bio->bi_end_io = hd->overwrite_bio_end_io; hd 843 drivers/md/dm-clone-target.c hd->status = bio->bi_status; hd 845 drivers/md/dm-clone-target.c hydration_complete(hd); hd 848 drivers/md/dm-clone-target.c static void hydration_overwrite(struct dm_clone_region_hydration *hd, struct bio *bio) hd 855 drivers/md/dm-clone-target.c hd->overwrite_bio = bio; hd 856 drivers/md/dm-clone-target.c hd->overwrite_bio_end_io = bio->bi_end_io; hd 859 drivers/md/dm-clone-target.c bio->bi_private = hd; hd 861 drivers/md/dm-clone-target.c atomic_inc(&hd->clone->hydrations_in_flight); hd 880 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd, *hd2; hd 887 drivers/md/dm-clone-target.c hd = __hash_find(bucket, region_nr); hd 888 drivers/md/dm-clone-target.c if (hd) { hd 890 drivers/md/dm-clone-target.c bio_list_add(&hd->deferred_bios, bio); hd 908 drivers/md/dm-clone-target.c hd = alloc_hydration(clone); hd 909 drivers/md/dm-clone-target.c hydration_init(hd, region_nr); hd 916 drivers/md/dm-clone-target.c free_hydration(hd); hd 921 drivers/md/dm-clone-target.c hd2 = __find_or_insert_region_hydration(bucket, hd); hd 922 drivers/md/dm-clone-target.c if (hd2 != hd) { hd 926 drivers/md/dm-clone-target.c free_hydration(hd); hd 936 drivers/md/dm-clone-target.c hlist_del(&hd->h); hd 938 drivers/md/dm-clone-target.c free_hydration(hd); hd 952 drivers/md/dm-clone-target.c hydration_overwrite(hd, bio); hd 954 drivers/md/dm-clone-target.c bio_list_add(&hd->deferred_bios, bio); hd 956 drivers/md/dm-clone-target.c hydration_copy(hd, 1); hd 981 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd) hd 983 drivers/md/dm-clone-target.c struct clone *clone = hd->clone; hd 989 drivers/md/dm-clone-target.c (batch->head->region_nr + batch->nr_batched_regions) == hd->region_nr) { hd 990 drivers/md/dm-clone-target.c list_add_tail(&hd->list, &batch->head->list); hd 992 drivers/md/dm-clone-target.c hd = NULL; hd 996 drivers/md/dm-clone-target.c if (batch->nr_batched_regions >= max_batch_size || hd) { hd 1003 drivers/md/dm-clone-target.c if (!hd) hd 1008 drivers/md/dm-clone-target.c hydration_copy(hd, 1); hd 1013 drivers/md/dm-clone-target.c BUG_ON(!list_empty(&hd->list)); hd 1014 drivers/md/dm-clone-target.c batch->head = hd; hd 1024 drivers/md/dm-clone-target.c struct dm_clone_region_hydration *hd; hd 1027 drivers/md/dm-clone-target.c hd = alloc_hydration(clone); hd 1040 drivers/md/dm-clone-target.c hydration_init(hd, offset); hd 1041 drivers/md/dm-clone-target.c __insert_region_hydration(bucket, hd); hd 1045 drivers/md/dm-clone-target.c __batch_hydration(batch, hd); hd 1054 drivers/md/dm-clone-target.c if (hd) hd 1055 drivers/md/dm-clone-target.c free_hydration(hd); hd 29 drivers/media/platform/ti-vpe/csc.c u16 hd[12]; hd 158 drivers/media/platform/ti-vpe/csc.c coeff = sd_hd_coeffs->hd; hd 519 drivers/media/radio/radio-si476x.c .hd = false, hd 721 drivers/media/radio/radio-si476x.c args.hd = false; hd 364 drivers/message/fusion/mptbase.c MPT_SCSI_HOST *hd; hd 383 drivers/message/fusion/mptbase.c hd = shost_priv(ioc->sh); hd 384 drivers/message/fusion/mptbase.c ioc->schedule_dead_ioc_flush_running_cmds(hd); hd 598 drivers/message/fusion/mptbase.h typedef void (*MPT_FLUSH_RUNNING_CMDS)(MPT_SCSI_HOST *hd); hd 2449 drivers/message/fusion/mptctl.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 2451 drivers/message/fusion/mptctl.c if (hd && (cim_rev == 1)) { hd 2562 drivers/message/fusion/mptctl.c MPT_SCSI_HOST *hd = NULL; hd 2673 drivers/message/fusion/mptctl.c hd = shost_priv(ioc->sh); hd 2674 drivers/message/fusion/mptctl.c if (hd != NULL) hd 2675 drivers/message/fusion/mptctl.c karg.select_timeouts = hd->sel_timeout[karg.hdr.id]; hd 190 drivers/message/fusion/mptfc.c MPT_SCSI_HOST *hd; hd 199 drivers/message/fusion/mptfc.c hd = shost_priv(SCpnt->device->host); hd 200 drivers/message/fusion/mptfc.c ioc = hd->ioc; hd 599 drivers/message/fusion/mptfc.c MPT_SCSI_HOST *hd; hd 612 drivers/message/fusion/mptfc.c hd = shost_priv(sdev->host); hd 613 drivers/message/fusion/mptfc.c ioc = hd->ioc; hd 1172 drivers/message/fusion/mptfc.c MPT_SCSI_HOST *hd; hd 1288 drivers/message/fusion/mptfc.c hd = shost_priv(sh); hd 1289 drivers/message/fusion/mptfc.c hd->ioc = ioc; hd 1304 drivers/message/fusion/mptfc.c hd->last_queue_full = 0; hd 1369 drivers/message/fusion/mptfc.c MPT_SCSI_HOST *hd; hd 1381 drivers/message/fusion/mptfc.c ((hd = shost_priv(ioc->sh)) == NULL)) hd 338 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 341 drivers/message/fusion/mptsas.c if (!list_empty(&hd->target_reset_list)) { hd 343 drivers/message/fusion/mptsas.c &hd->target_reset_list, list) { hd 1113 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1138 drivers/message/fusion/mptsas.c list_add_tail(&target_reset_list->list, &hd->target_reset_list); hd 1160 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1161 drivers/message/fusion/mptsas.c struct list_head *head = &hd->target_reset_list; hd 1195 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1196 drivers/message/fusion/mptsas.c struct list_head *head = &hd->target_reset_list; hd 1289 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd; hd 1296 drivers/message/fusion/mptsas.c hd = shost_priv(ioc->sh); hd 1297 drivers/message/fusion/mptsas.c if (!hd->ioc) hd 1688 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(host); hd 1689 drivers/message/fusion/mptsas.c MPT_ADAPTER *ioc = hd->ioc; hd 1718 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(host); hd 1724 drivers/message/fusion/mptsas.c MPT_ADAPTER *ioc = hd->ioc; hd 1796 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(host); hd 1800 drivers/message/fusion/mptsas.c MPT_ADAPTER *ioc = hd->ioc; hd 1845 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd = shost_priv(host); hd 1851 drivers/message/fusion/mptsas.c MPT_ADAPTER *ioc = hd->ioc; hd 1898 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd; hd 1908 drivers/message/fusion/mptsas.c hd = shost_priv(shost); hd 1909 drivers/message/fusion/mptsas.c ioc = hd->ioc; hd 1929 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd; hd 1934 drivers/message/fusion/mptsas.c hd = shost_priv(sc->device->host); hd 1935 drivers/message/fusion/mptsas.c if (hd == NULL) { hd 1941 drivers/message/fusion/mptsas.c ioc = hd->ioc; hd 5128 drivers/message/fusion/mptsas.c MPT_SCSI_HOST *hd; hd 5262 drivers/message/fusion/mptsas.c hd = shost_priv(sh); hd 5263 drivers/message/fusion/mptsas.c hd->ioc = ioc; hd 5281 drivers/message/fusion/mptsas.c hd->last_queue_full = 0; hd 5282 drivers/message/fusion/mptsas.c INIT_LIST_HEAD(&hd->target_reset_list); hd 95 drivers/message/fusion/mptscsih.c static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply); hd 97 drivers/message/fusion/mptscsih.c int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, hd 108 drivers/message/fusion/mptscsih.c static int mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd); hd 109 drivers/message/fusion/mptscsih.c static void mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice); hd 591 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 598 drivers/message/fusion/mptscsih.c hd = shost_priv(ioc->sh); hd 688 drivers/message/fusion/mptscsih.c mptscsih_copy_sense_data(sc, hd, mf, pScsiReply); hd 726 drivers/message/fusion/mptscsih.c if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF) hd 727 drivers/message/fusion/mptscsih.c hd->sel_timeout[pScsiReq->TargetID]++; hd 1032 drivers/message/fusion/mptscsih.c mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) hd 1034 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 1079 drivers/message/fusion/mptscsih.c mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice) hd 1085 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 1149 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1156 drivers/message/fusion/mptscsih.c if ((hd = shost_priv(sc->device->host)) == NULL) hd 1158 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 1159 drivers/message/fusion/mptscsih.c if (time - hd->last_queue_full > 10 * HZ) { hd 1162 drivers/message/fusion/mptscsih.c hd->last_queue_full = time; hd 1178 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1181 drivers/message/fusion/mptscsih.c if((hd = shost_priv(host)) == NULL) hd 1198 drivers/message/fusion/mptscsih.c kfree(hd->info_kbuf); hd 1287 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 1288 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 1315 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1327 drivers/message/fusion/mptscsih.c hd = shost_priv(SCpnt->device->host); hd 1328 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 1513 drivers/message/fusion/mptscsih.c mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun, hd 1520 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 1684 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1694 drivers/message/fusion/mptscsih.c if ((hd = shost_priv(SCpnt->device->host)) == NULL) { hd 1702 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 1769 drivers/message/fusion/mptscsih.c retval = mptscsih_IssueTaskMgmt(hd, hd 1807 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1814 drivers/message/fusion/mptscsih.c if ((hd = shost_priv(SCpnt->device->host)) == NULL){ hd 1820 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 1838 drivers/message/fusion/mptscsih.c retval = mptscsih_IssueTaskMgmt(hd, hd 1867 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 1874 drivers/message/fusion/mptscsih.c if ((hd = shost_priv(SCpnt->device->host)) == NULL){ hd 1880 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 1891 drivers/message/fusion/mptscsih.c retval = mptscsih_IssueTaskMgmt(hd, hd 1917 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST * hd; hd 1923 drivers/message/fusion/mptscsih.c if ((hd = shost_priv(SCpnt->device->host)) == NULL){ hd 1930 drivers/message/fusion/mptscsih.c mptscsih_flush_running_cmds(hd); hd 1932 drivers/message/fusion/mptscsih.c ioc = hd->ioc; hd 2286 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 2297 drivers/message/fusion/mptscsih.c mptscsih_search_running_cmds(hd, vdevice); hd 2299 drivers/message/fusion/mptscsih.c mptscsih_synchronize_cache(hd, vdevice); hd 2315 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(sdev->host); hd 2319 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 2357 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(sh); hd 2358 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 2406 drivers/message/fusion/mptscsih.c mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply) hd 2411 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 2548 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd; hd 2553 drivers/message/fusion/mptscsih.c hd = shost_priv(ioc->sh); hd 2562 drivers/message/fusion/mptscsih.c mptscsih_flush_running_cmds(hd); hd 2776 drivers/message/fusion/mptscsih.c mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io) hd 2785 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3010 drivers/message/fusion/mptscsih.c mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice) hd 3038 drivers/message/fusion/mptscsih.c mptscsih_do_cmd(hd, &iocmd); hd 3046 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3047 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3062 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3063 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3078 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3079 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3091 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3092 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3105 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3106 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3119 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3120 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3132 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3133 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3144 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3145 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3157 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3158 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3170 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3171 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3183 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3184 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3196 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3197 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 3206 drivers/message/fusion/mptscsih.c MPT_SCSI_HOST *hd = shost_priv(host); hd 3207 drivers/message/fusion/mptscsih.c MPT_ADAPTER *ioc = hd->ioc; hd 117 drivers/message/fusion/mptscsih.h extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, hd 137 drivers/message/fusion/mptscsih.h extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd); hd 108 drivers/message/fusion/mptspi.c mptspi_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtTarget *target, hd 111 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 254 drivers/message/fusion/mptspi.c mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id) hd 256 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 328 drivers/message/fusion/mptspi.c mptspi_initTarget(MPT_SCSI_HOST *hd, VirtTarget *vtarget, hd 343 drivers/message/fusion/mptspi.c if ((sdev->type == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) { hd 347 drivers/message/fusion/mptspi.c mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id); hd 358 drivers/message/fusion/mptspi.c mptspi_writeIOCPage4(hd, vtarget->channel, vtarget->id); hd 362 drivers/message/fusion/mptspi.c mptspi_setTargetNegoParms(hd, vtarget, sdev); hd 376 drivers/message/fusion/mptspi.c mptspi_is_raid(struct _MPT_SCSI_HOST *hd, u32 id) hd 379 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 400 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(shost); hd 404 drivers/message/fusion/mptspi.c if (hd == NULL) hd 407 drivers/message/fusion/mptspi.c ioc = hd->ioc; hd 431 drivers/message/fusion/mptspi.c mptspi_is_raid(hd, starget->id)) { hd 471 drivers/message/fusion/mptspi.c mptspi_print_write_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) hd 473 drivers/message/fusion/mptspi.c ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Requested = 0x%08x" hd 475 drivers/message/fusion/mptspi.c hd->ioc->name, starget->id, ii, hd 496 drivers/message/fusion/mptspi.c mptspi_print_read_nego(struct _MPT_SCSI_HOST *hd, struct scsi_target *starget, u32 ii) hd 498 drivers/message/fusion/mptspi.c ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d Read = 0x%08x" hd 500 drivers/message/fusion/mptspi.c hd->ioc->name, starget->id, ii, hd 517 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(shost); hd 518 drivers/message/fusion/mptspi.c struct _MPT_ADAPTER *ioc = hd->ioc; hd 528 drivers/message/fusion/mptspi.c mptspi_is_raid(hd, starget->id)) hd 568 drivers/message/fusion/mptspi.c mptspi_print_read_nego(hd, starget, le32_to_cpu(spi_dev_pg0->NegotiatedParameters)); hd 618 drivers/message/fusion/mptspi.c mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id) hd 620 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 631 drivers/message/fusion/mptspi.c dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT hd 683 drivers/message/fusion/mptspi.c static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd, hd 687 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 691 drivers/message/fusion/mptspi.c mptspi_is_raid(hd, sdev->id)) hd 696 drivers/message/fusion/mptspi.c mptscsih_quiesce_raid(hd, 1, vtarget->channel, vtarget->id) < 0) { hd 702 drivers/message/fusion/mptspi.c hd->spi_pending |= (1 << sdev->id); hd 704 drivers/message/fusion/mptspi.c hd->spi_pending &= ~(1 << sdev->id); hd 707 drivers/message/fusion/mptspi.c mptscsih_quiesce_raid(hd, 0, vtarget->channel, vtarget->id) < 0) hd 718 drivers/message/fusion/mptspi.c MPT_SCSI_HOST *hd = shost_priv(sdev->host); hd 722 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 751 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(sdev->host); hd 755 drivers/message/fusion/mptspi.c mptspi_initTarget(hd, vtarget, sdev); hd 762 drivers/message/fusion/mptspi.c ddvprintk(hd->ioc, printk(MYIOC_s_DEBUG_FMT "id=%d min_period=0x%02x" hd 763 drivers/message/fusion/mptspi.c " max_offset=0x%02x max_width=%d\n", hd->ioc->name, hd 769 drivers/message/fusion/mptspi.c !(mptspi_is_raid(hd, sdev->id))) && hd 771 drivers/message/fusion/mptspi.c mptspi_dv_device(hd, sdev); hd 779 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(shost); hd 781 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 853 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(shost); hd 854 drivers/message/fusion/mptspi.c struct _MPT_ADAPTER *ioc = hd->ioc; hd 868 drivers/message/fusion/mptspi.c mptspi_is_raid(hd, starget->id)) hd 920 drivers/message/fusion/mptspi.c mptspi_print_write_nego(hd, starget, le32_to_cpu(pg1->RequestedParameters)); hd 1063 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(shost); hd 1068 drivers/message/fusion/mptspi.c hd->ioc->spi_data.noQas) hd 1106 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd; hd 1114 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = wqw->hd; hd 1115 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 1143 drivers/message/fusion/mptspi.c mptspi_dv_device(hd, sdev); hd 1151 drivers/message/fusion/mptspi.c static void mpt_dv_raid(struct _MPT_SCSI_HOST *hd, int disk) hd 1154 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 1163 drivers/message/fusion/mptspi.c wqw->hd = hd; hd 1173 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1178 drivers/message/fusion/mptspi.c if (hd && event == MPI_EVENT_INTEGRATED_RAID) { hd 1184 drivers/message/fusion/mptspi.c mpt_dv_raid(hd, disk); hd 1193 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = hd 1195 drivers/message/fusion/mptspi.c return ((mptspi_is_raid(hd, starget->id)) && hd 1260 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = wqw->hd; hd 1265 drivers/message/fusion/mptspi.c MPT_ADAPTER *ioc = hd->ioc; hd 1269 drivers/message/fusion/mptspi.c if (hd->spi_pending) { hd 1271 drivers/message/fusion/mptspi.c if (hd->spi_pending & (1 << sdev->id)) hd 1282 drivers/message/fusion/mptspi.c mptspi_dv_device(hd, sdev); hd 1287 drivers/message/fusion/mptspi.c mptspi_dv_renegotiate(struct _MPT_SCSI_HOST *hd) hd 1295 drivers/message/fusion/mptspi.c wqw->hd = hd; hd 1316 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1318 drivers/message/fusion/mptspi.c mptspi_dv_renegotiate(hd); hd 1332 drivers/message/fusion/mptspi.c struct _MPT_SCSI_HOST *hd = shost_priv(ioc->sh); hd 1336 drivers/message/fusion/mptspi.c mptspi_dv_renegotiate(hd); hd 1355 drivers/message/fusion/mptspi.c MPT_SCSI_HOST *hd; hd 1490 drivers/message/fusion/mptspi.c hd = shost_priv(sh); hd 1491 drivers/message/fusion/mptspi.c hd->ioc = ioc; hd 1513 drivers/message/fusion/mptspi.c hd->last_queue_full = 0; hd 1514 drivers/message/fusion/mptspi.c hd->spi_pending = 0; hd 1532 drivers/message/fusion/mptspi.c mptscsih_IssueTaskMgmt(hd, hd 1142 drivers/mfd/si476x-cmd.c (tuneargs->hd << 6), hd 1343 drivers/mfd/si476x-cmd.c (tuneargs->hd << 6) | (tuneargs->tunemode << 4) hd 1361 drivers/mfd/si476x-cmd.c (tuneargs->hd << 6) | (tuneargs->tunemode << 4) hd 1065 drivers/mtd/mtdswap.c union swap_header *hd = (union swap_header *)(buf); hd 1069 drivers/mtd/mtdswap.c hd->info.version = 1; hd 1070 drivers/mtd/mtdswap.c hd->info.last_page = d->mbd_dev->size - 1; hd 1071 drivers/mtd/mtdswap.c hd->info.nr_badpages = 0; hd 116 drivers/scsi/aacraid/aachba.c } __attribute__((packed)) hd; hd 134 drivers/scsi/aacraid/aachba.c } __attribute__((packed)) hd; hd 3133 drivers/scsi/aacraid/aachba.c mpd.hd.data_length = sizeof(mpd.hd) - 1; hd 3135 drivers/scsi/aacraid/aachba.c mpd.hd.med_type = 0; hd 3139 drivers/scsi/aacraid/aachba.c mpd.hd.dev_par = 0; hd 3142 drivers/scsi/aacraid/aachba.c mpd.hd.dev_par = 0x10; hd 3144 drivers/scsi/aacraid/aachba.c mpd.hd.bd_length = 0; /* Block descriptor length */ hd 3146 drivers/scsi/aacraid/aachba.c mpd.hd.bd_length = sizeof(mpd.bd); hd 3147 drivers/scsi/aacraid/aachba.c mpd.hd.data_length += mpd.hd.bd_length; hd 3160 drivers/scsi/aacraid/aachba.c mpd.hd.data_length = 23; hd 3163 drivers/scsi/aacraid/aachba.c mpd.hd.data_length = 15; hd 3178 drivers/scsi/aacraid/aachba.c mpd.hd.data_length += 3; hd 3211 drivers/scsi/aacraid/aachba.c mpd10.hd.data_length[0] = 0; hd 3213 drivers/scsi/aacraid/aachba.c mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1; hd 3215 drivers/scsi/aacraid/aachba.c mpd10.hd.med_type = 0; hd 3219 drivers/scsi/aacraid/aachba.c mpd10.hd.dev_par = 0; hd 3222 drivers/scsi/aacraid/aachba.c mpd10.hd.dev_par = 0x10; hd 3223 drivers/scsi/aacraid/aachba.c mpd10.hd.rsrvd[0] = 0; /* reserved */ hd 3224 drivers/scsi/aacraid/aachba.c mpd10.hd.rsrvd[1] = 0; /* reserved */ hd 3227 drivers/scsi/aacraid/aachba.c mpd10.hd.bd_length[0] = 0; hd 3229 drivers/scsi/aacraid/aachba.c mpd10.hd.bd_length[1] = 0; hd 3231 drivers/scsi/aacraid/aachba.c mpd10.hd.bd_length[0] = 0; hd 3232 drivers/scsi/aacraid/aachba.c mpd10.hd.bd_length[1] = sizeof(mpd10.bd); hd 3234 drivers/scsi/aacraid/aachba.c mpd10.hd.data_length[1] += mpd10.hd.bd_length[1]; hd 3258 drivers/scsi/aacraid/aachba.c mpd10.hd.data_length[1] += 3; hd 1297 drivers/scsi/aha152x.c struct aha152x_hostdata *hd; hd 1299 drivers/scsi/aha152x.c list_for_each_entry(hd, &aha152x_host_list, host_list) { hd 1300 drivers/scsi/aha152x.c struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); hd 3338 drivers/scsi/aha152x.c struct aha152x_hostdata *hd, *tmp; hd 3340 drivers/scsi/aha152x.c list_for_each_entry_safe(hd, tmp, &aha152x_host_list, host_list) { hd 3341 drivers/scsi/aha152x.c struct Scsi_Host *shost = container_of((void *)hd, struct Scsi_Host, hostdata); hd 1926 drivers/scsi/gdth.c mpd.hd.data_length = sizeof(gdth_modep_data); hd 1927 drivers/scsi/gdth.c mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0; hd 1928 drivers/scsi/gdth.c mpd.hd.bd_length = sizeof(mpd.bd); hd 961 drivers/scsi/gdth.h } __attribute__((packed)) hd; hd 200 drivers/scsi/lpfc/lpfc_crtn.h int lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *hd, hd 66 drivers/scsi/sgiwd93.c void fill_hpc_entries(struct ip22_hostdata *hd, struct scsi_cmnd *cmd, int din) hd 74 drivers/scsi/sgiwd93.c physaddr = dma_map_single(hd->dev, addr, len, DMA_DIR(din)); hd 76 drivers/scsi/sgiwd93.c hcp = hd->cpu; hd 98 drivers/scsi/sgiwd93.c dma_cache_sync(hd->dev, hd->cpu, hd 99 drivers/scsi/sgiwd93.c (unsigned long)(hcp + 1) - (unsigned long)hd->cpu, hd 1918 drivers/scsi/wd33c93.c static inline void set_resync ( struct WD33C93_hostdata *hd, int mask ) hd 1923 drivers/scsi/wd33c93.c hd->sync_stat[i] = SS_UNSET; hd 2056 drivers/scsi/wd33c93.c struct WD33C93_hostdata *hd; hd 2059 drivers/scsi/wd33c93.c hd = (struct WD33C93_hostdata *) instance->hostdata; hd 2080 drivers/scsi/wd33c93.c hd->args = simple_strtoul(bp+6, &bp, 0) & DB_MASK; hd 2085 drivers/scsi/wd33c93.c hd->disconnect = x; hd 2088 drivers/scsi/wd33c93.c hd->default_sx_per = hd 2089 drivers/scsi/wd33c93.c hd->sx_table[round_period((unsigned int) x, hd 2090 drivers/scsi/wd33c93.c hd->sx_table)].period_ns; hd 2092 drivers/scsi/wd33c93.c set_resync(hd, (int)simple_strtoul(bp+7, &bp, 0)); hd 2094 drivers/scsi/wd33c93.c hd->proc = simple_strtoul(bp+5, &bp, 0); hd 2096 drivers/scsi/wd33c93.c hd->no_dma = simple_strtoul(bp+6, &bp, 0); hd 2098 drivers/scsi/wd33c93.c hd->level2 = simple_strtoul(bp+7, &bp, 0); hd 2100 drivers/scsi/wd33c93.c hd->dma_mode = hd 2104 drivers/scsi/wd33c93.c if (x != hd->fast) hd 2105 drivers/scsi/wd33c93.c set_resync(hd, 0xff); hd 2106 drivers/scsi/wd33c93.c hd->fast = x; hd 2109 drivers/scsi/wd33c93.c set_resync(hd, x ^ hd->no_sync); hd 2110 drivers/scsi/wd33c93.c hd->no_sync = x; hd 2125 drivers/scsi/wd33c93.c struct WD33C93_hostdata *hd; hd 2129 drivers/scsi/wd33c93.c hd = (struct WD33C93_hostdata *) instance->hostdata; hd 2131 drivers/scsi/wd33c93.c spin_lock_irq(&hd->lock); hd 2132 drivers/scsi/wd33c93.c if (hd->proc & PR_VERSION) hd 2136 drivers/scsi/wd33c93.c if (hd->proc & PR_INFO) { hd 2139 drivers/scsi/wd33c93.c hd->clock_freq, hd->no_sync, hd->no_dma, hd->dma_mode, hd->fast); hd 2142 drivers/scsi/wd33c93.c seq_printf(m, "\t%02x", hd->sync_xfer[x]); hd 2145 drivers/scsi/wd33c93.c seq_printf(m, "\t%02x", hd->sync_stat[x]); hd 2148 drivers/scsi/wd33c93.c if (hd->proc & PR_STATISTICS) { hd 2151 drivers/scsi/wd33c93.c seq_printf(m, "\t%ld", hd->cmd_cnt[x]); hd 2154 drivers/scsi/wd33c93.c seq_printf(m, "\t%ld", hd->disc_allowed_cnt[x]); hd 2157 drivers/scsi/wd33c93.c seq_printf(m, "\t%ld", hd->disc_done_cnt[x]); hd 2160 drivers/scsi/wd33c93.c hd->int_cnt, hd->dma_cnt, hd->pio_cnt); hd 2163 drivers/scsi/wd33c93.c if (hd->proc & PR_CONNECTED) { hd 2165 drivers/scsi/wd33c93.c if (hd->connected) { hd 2166 drivers/scsi/wd33c93.c cmd = (struct scsi_cmnd *) hd->connected; hd 2171 drivers/scsi/wd33c93.c if (hd->proc & PR_INPUTQ) { hd 2173 drivers/scsi/wd33c93.c cmd = (struct scsi_cmnd *) hd->input_Q; hd 2180 drivers/scsi/wd33c93.c if (hd->proc & PR_DISCQ) { hd 2182 drivers/scsi/wd33c93.c cmd = (struct scsi_cmnd *) hd->disconnected_Q; hd 2190 drivers/scsi/wd33c93.c spin_unlock_irq(&hd->lock); hd 24 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 45 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 62 drivers/staging/greybus/audio_apbridgea.c ret = gb_hd_output(connection->hd, &req, sizeof(req), hd 80 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 93 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 107 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 119 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 132 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 146 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 159 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 172 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 184 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 197 drivers/staging/greybus/audio_apbridgea.c return gb_hd_output(connection->hd, &req, sizeof(req), hd 206 drivers/staging/greybus/camera.c dev_err(&connection->hd->dev, hd 308 drivers/staging/greybus/camera.c struct gb_svc *svc = gcam->connection->hd->svc; hd 339 drivers/staging/greybus/camera.c struct gb_svc *svc = gcam->connection->hd->svc; hd 435 drivers/staging/greybus/camera.c ret = gb_hd_output(gcam->connection->hd, &csi_cfg, hd 469 drivers/staging/greybus/camera.c ret = gb_hd_output(gcam->connection->hd, &csi_cfg, hd 81 drivers/staging/greybus/gbphy.c struct gb_host_device *hd = intf->hd; hd 83 drivers/staging/greybus/gbphy.c if (add_uevent_var(env, "BUS=%u", hd->bus_id)) hd 176 drivers/video/fbdev/asiliantfb.c unsigned hd = p->var.xres / 8; hd 194 drivers/video/fbdev/asiliantfb.c write_cr(0x01, hd - 1); hd 195 drivers/video/fbdev/asiliantfb.c write_cr(0x02, hd); hd 192 drivers/video/fbdev/matrox/matroxfb_misc.c unsigned int hd, hs, he, hbe, ht; hd 240 drivers/video/fbdev/matrox/matroxfb_misc.c hd = m->HDisplay >> 3; hd 249 drivers/video/fbdev/matrox/matroxfb_misc.c hd >>= 1; hd 258 drivers/video/fbdev/matrox/matroxfb_misc.c hd <<= 1; hd 264 drivers/video/fbdev/matrox/matroxfb_misc.c hd = hd - 1; hd 290 drivers/video/fbdev/matrox/matroxfb_misc.c ((hd & 0x100) >> 7) | /* blanking */ hd 305 drivers/video/fbdev/matrox/matroxfb_misc.c hw->CRTC[1] = hd; hd 306 drivers/video/fbdev/matrox/matroxfb_misc.c hw->CRTC[2] = hd; hd 303 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c static void hdmi_start_audio_stream(struct omap_hdmi *hd) hd 305 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi_wp_audio_enable(&hd->wp, true); hd 306 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi4_audio_start(&hd->core, &hd->wp); hd 309 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hd 311 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi4_audio_stop(&hd->core, &hd->wp); hd 312 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi_wp_audio_enable(&hd->wp, false); hd 555 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 558 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_lock(&hd->lock); hd 560 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { hd 565 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_abort_cb = abort_cb; hd 568 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_unlock(&hd->lock); hd 575 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 577 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_lock(&hd->lock); hd 578 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_abort_cb = NULL; hd 579 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_configured = false; hd 580 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_playing = false; hd 581 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_unlock(&hd->lock); hd 588 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 591 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); hd 593 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 595 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c if (hd->display_enabled) hd 596 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi_start_audio_stream(hd); hd 597 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_playing = true; hd 599 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 605 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 608 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); hd 610 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 612 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c if (hd->display_enabled) hd 613 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hdmi_stop_audio_stream(hd); hd 614 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_playing = false; hd 616 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 622 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 625 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_lock(&hd->lock); hd 627 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { hd 632 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio, hd 633 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->cfg.timings.pixelclock); hd 635 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_configured = true; hd 636 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c hd->audio_config = *dss_audio; hd 639 drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c mutex_unlock(&hd->lock); hd 331 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c static void hdmi_start_audio_stream(struct omap_hdmi *hd) hd 334 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_wp_audio_enable(&hd->wp, true); hd 335 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_wp_audio_core_req_enable(&hd->wp, true); hd 338 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c static void hdmi_stop_audio_stream(struct omap_hdmi *hd) hd 340 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_wp_audio_core_req_enable(&hd->wp, false); hd 341 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_wp_audio_enable(&hd->wp, false); hd 342 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2); hd 585 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 588 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_lock(&hd->lock); hd 590 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { hd 595 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_abort_cb = abort_cb; hd 598 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_unlock(&hd->lock); hd 605 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 607 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_lock(&hd->lock); hd 608 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_abort_cb = NULL; hd 609 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_configured = false; hd 610 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_playing = false; hd 611 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_unlock(&hd->lock); hd 618 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 621 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); hd 623 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 625 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c if (hd->display_enabled) hd 626 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_start_audio_stream(hd); hd 627 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_playing = true; hd 629 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 635 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 638 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c WARN_ON(!hdmi_mode_has_audio(&hd->cfg)); hd 640 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c spin_lock_irqsave(&hd->audio_playing_lock, flags); hd 642 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c if (hd->display_enabled) hd 643 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hdmi_stop_audio_stream(hd); hd 644 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_playing = false; hd 646 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c spin_unlock_irqrestore(&hd->audio_playing_lock, flags); hd 652 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c struct omap_hdmi *hd = dev_get_drvdata(dev); hd 655 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_lock(&hd->lock); hd 657 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) { hd 662 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio, hd 663 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->cfg.timings.pixelclock); hd 666 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_configured = true; hd 667 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c hd->audio_config = *dss_audio; hd 670 drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c mutex_unlock(&hd->lock); hd 551 drivers/video/fbdev/tdfxfb.c u32 hd, hs, he, ht, hbs, hbe; hd 581 drivers/video/fbdev/tdfxfb.c hd = wd; hd 585 drivers/video/fbdev/tdfxfb.c hbs = hd; hd 644 drivers/video/fbdev/tdfxfb.c reg.crt[0x01] = hd; hd 669 drivers/video/fbdev/tdfxfb.c ((hd & 0x100) >> 6) | hd 5023 fs/dlm/lock.c struct dlm_header *hd = &p->header; hd 5027 fs/dlm/lock.c switch (hd->h_cmd) { hd 5037 fs/dlm/lock.c log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid); hd 5041 fs/dlm/lock.c if (hd->h_nodeid != nodeid) { hd 5043 fs/dlm/lock.c hd->h_nodeid, nodeid, hd->h_lockspace); hd 5047 fs/dlm/lock.c ls = dlm_find_lockspace_global(hd->h_lockspace); hd 5052 fs/dlm/lock.c hd->h_lockspace, nodeid, hd->h_cmd, type); hd 5055 fs/dlm/lock.c if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS) hd 5064 fs/dlm/lock.c if (hd->h_cmd == DLM_MSG) hd 23 fs/dlm/util.c static void header_out(struct dlm_header *hd) hd 25 fs/dlm/util.c hd->h_version = cpu_to_le32(hd->h_version); hd 26 fs/dlm/util.c hd->h_lockspace = cpu_to_le32(hd->h_lockspace); hd 27 fs/dlm/util.c hd->h_nodeid = cpu_to_le32(hd->h_nodeid); hd 28 fs/dlm/util.c hd->h_length = cpu_to_le16(hd->h_length); hd 31 fs/dlm/util.c static void header_in(struct dlm_header *hd) hd 33 fs/dlm/util.c hd->h_version = le32_to_cpu(hd->h_version); hd 34 fs/dlm/util.c hd->h_lockspace = le32_to_cpu(hd->h_lockspace); hd 35 fs/dlm/util.c hd->h_nodeid = le32_to_cpu(hd->h_nodeid); hd 36 fs/dlm/util.c hd->h_length = le16_to_cpu(hd->h_length); hd 134 include/drm/drm_modes.h #define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ hd 136 include/drm/drm_modes.h .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ hd 151 include/drm/drm_modes.h #define DRM_SIMPLE_MODE(hd, vd, hd_mm, vd_mm) \ hd 153 include/drm/drm_modes.h .hdisplay = (hd), .hsync_start = (hd), .hsync_end = (hd), \ hd 154 include/drm/drm_modes.h .htotal = (hd), .vdisplay = (vd), .vsync_start = (vd), \ hd 622 include/linux/genhd.h extern char *disk_name (struct gendisk *hd, int partno, char *buf); hd 146 include/linux/greybus.h static inline bool cport_id_valid(struct gb_host_device *hd, u16 cport_id) hd 148 include/linux/greybus.h return cport_id != CPORT_ID_BAD && cport_id < hd->num_cports; hd 39 include/linux/greybus/connection.h struct gb_host_device *hd; hd 67 include/linux/greybus/connection.h struct gb_connection *gb_connection_create_static(struct gb_host_device *hd, hd 93 include/linux/greybus/connection.h void greybus_data_rcvd(struct gb_host_device *hd, u16 cport_id, hd 21 include/linux/greybus/hd.h int (*cport_allocate)(struct gb_host_device *hd, int cport_id, hd 23 include/linux/greybus/hd.h void (*cport_release)(struct gb_host_device *hd, u16 cport_id); hd 24 include/linux/greybus/hd.h int (*cport_enable)(struct gb_host_device *hd, u16 cport_id, hd 26 include/linux/greybus/hd.h int (*cport_disable)(struct gb_host_device *hd, u16 cport_id); hd 27 include/linux/greybus/hd.h int (*cport_connected)(struct gb_host_device *hd, u16 cport_id); hd 28 include/linux/greybus/hd.h int (*cport_flush)(struct gb_host_device *hd, u16 cport_id); hd 29 include/linux/greybus/hd.h int (*cport_shutdown)(struct gb_host_device *hd, u16 cport_id, hd 31 include/linux/greybus/hd.h int (*cport_quiesce)(struct gb_host_device *hd, u16 cport_id, hd 33 include/linux/greybus/hd.h int (*cport_clear)(struct gb_host_device *hd, u16 cport_id); hd 35 include/linux/greybus/hd.h int (*message_send)(struct gb_host_device *hd, u16 dest_cport_id, hd 38 include/linux/greybus/hd.h int (*latency_tag_enable)(struct gb_host_device *hd, u16 cport_id); hd 39 include/linux/greybus/hd.h int (*latency_tag_disable)(struct gb_host_device *hd, u16 cport_id); hd 40 include/linux/greybus/hd.h int (*output)(struct gb_host_device *hd, void *req, u16 size, u8 cmd, hd 65 include/linux/greybus/hd.h int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id); hd 66 include/linux/greybus/hd.h void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id); hd 67 include/linux/greybus/hd.h int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id, hd 69 include/linux/greybus/hd.h void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id); hd 75 include/linux/greybus/hd.h int gb_hd_add(struct gb_host_device *hd); hd 76 include/linux/greybus/hd.h void gb_hd_del(struct gb_host_device *hd); hd 77 include/linux/greybus/hd.h void gb_hd_shutdown(struct gb_host_device *hd); hd 78 include/linux/greybus/hd.h void gb_hd_put(struct gb_host_device *hd); hd 79 include/linux/greybus/hd.h int gb_hd_output(struct gb_host_device *hd, void *req, u16 size, u8 cmd, hd 50 include/linux/greybus/interface.h struct gb_host_device *hd; hd 17 include/linux/greybus/module.h struct gb_host_device *hd; hd 30 include/linux/greybus/module.h struct gb_module *gb_module_create(struct gb_host_device *hd, u8 module_id, hd 188 include/linux/greybus/operation.h void greybus_message_sent(struct gb_host_device *hd, hd 42 include/linux/greybus/svc.h struct gb_host_device *hd; hd 62 include/linux/greybus/svc.h struct gb_svc *gb_svc_create(struct gb_host_device *hd); hd 103 include/linux/hpet.h static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) hd 105 include/linux/hpet.h hd->hd_state |= (1 << timer); hd 354 include/linux/mfd/si476x-core.h bool hd; hd 939 kernel/trace/ring_buffer.c struct list_head *hd; hd 944 kernel/trace/ring_buffer.c list_for_each(hd, cpu_buffer->pages) hd 945 kernel/trace/ring_buffer.c rb_list_head_clear(hd); hd 893 lib/vsprintf.c struct gendisk *hd; hd 898 lib/vsprintf.c hd = bdev->bd_disk; hd 899 lib/vsprintf.c buf = string(buf, end, hd->disk_name, spec); hd 901 lib/vsprintf.c if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { hd 39 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 55 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 70 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 77 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 96 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 102 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 111 net/smc/smc_llc.c struct smc_llc_hdr hd; hd 200 net/smc/smc_llc.c confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; hd 201 net/smc/smc_llc.c confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); hd 202 net/smc/smc_llc.c confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; hd 204 net/smc/smc_llc.c confllc->hd.flags |= SMC_LLC_FLAG_RESP; hd 231 net/smc/smc_llc.c rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY; hd 232 net/smc/smc_llc.c rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey); hd 256 net/smc/smc_llc.c rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY; hd 257 net/smc/smc_llc.c rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey); hd 271 net/smc/smc_llc.c addllc->hd.common.type = SMC_LLC_ADD_LINK; hd 272 net/smc/smc_llc.c addllc->hd.length = sizeof(struct smc_llc_msg_add_link); hd 274 net/smc/smc_llc.c addllc->hd.flags |= SMC_LLC_FLAG_RESP; hd 276 net/smc/smc_llc.c addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; hd 277 net/smc/smc_llc.c addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; hd 308 net/smc/smc_llc.c delllc->hd.common.type = SMC_LLC_DELETE_LINK; hd 309 net/smc/smc_llc.c delllc->hd.length = sizeof(struct smc_llc_msg_add_link); hd 311 net/smc/smc_llc.c delllc->hd.flags |= SMC_LLC_FLAG_RESP; hd 313 net/smc/smc_llc.c delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; hd 315 net/smc/smc_llc.c delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; hd 351 net/smc/smc_llc.c testllc->hd.common.type = SMC_LLC_TEST_LINK; hd 352 net/smc/smc_llc.c testllc->hd.length = sizeof(struct smc_llc_msg_test_link); hd 410 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_NO_RMBE_EYEC) hd 415 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 436 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 464 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 485 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 489 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RESP; hd 499 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 500 net/smc/smc_llc.c link->llc_confirm_rkey_rc = llc->hd.flags & hd 510 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RESP; hd 512 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; hd 520 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 524 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RESP; hd 535 net/smc/smc_llc.c if (llc->hd.flags & SMC_LLC_FLAG_RESP) { hd 536 net/smc/smc_llc.c link->llc_delete_rkey_rc = llc->hd.flags & hd 547 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; hd 551 net/smc/smc_llc.c llc->hd.flags |= SMC_LLC_FLAG_RESP; hd 120 net/tipc/name_table.c static struct tipc_service *tipc_service_create(u32 type, struct hlist_head *hd) hd 134 net/tipc/name_table.c hlist_add_head_rcu(&service->service_list, hd); hd 2880 tools/perf/util/header.c struct header_print_data *hd = data; hd 2900 tools/perf/util/header.c if (!feat_ops[feat].full_only || hd->full) hd 2901 tools/perf/util/header.c feat_ops[feat].print(&ff, hd->fp); hd 2903 tools/perf/util/header.c fprintf(hd->fp, "# %s info available, use -I to display\n", hd 2911 tools/perf/util/header.c struct header_print_data hd; hd 2918 tools/perf/util/header.c hd.fp = fp; hd 2919 tools/perf/util/header.c hd.full = full; hd 2933 tools/perf/util/header.c perf_header__process_sections(header, fd, &hd, hd 1917 tools/perf/util/sort.c static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd, hd 1922 tools/perf/util/sort.c fmt = memdup(hd->fmt, sizeof(*fmt)); hd 2518 tools/perf/util/sort.c static int __hpp_dimension__add(struct hpp_dimension *hd, hd 2524 tools/perf/util/sort.c if (hd->taken) hd 2527 tools/perf/util/sort.c fmt = __hpp_dimension__alloc_hpp(hd, level); hd 2531 tools/perf/util/sort.c hd->taken = 1; hd 2550 tools/perf/util/sort.c struct hpp_dimension *hd) hd 2554 tools/perf/util/sort.c if (hd->taken) hd 2557 tools/perf/util/sort.c fmt = __hpp_dimension__alloc_hpp(hd, 0); hd 2561 tools/perf/util/sort.c hd->taken = 1; hd 2619 tools/perf/util/sort.c struct hpp_dimension *hd = &hpp_sort_dimensions[i]; hd 2621 tools/perf/util/sort.c if (strncasecmp(tok, hd->name, strlen(tok))) hd 2624 tools/perf/util/sort.c return __hpp_dimension__add(hd, list, level); hd 2963 tools/perf/util/sort.c struct hpp_dimension *hd = &hpp_sort_dimensions[i]; hd 2965 tools/perf/util/sort.c if (strncasecmp(tok, hd->name, strlen(tok))) hd 2968 tools/perf/util/sort.c return __hpp_dimension__add_output(list, hd);