csd 110 arch/ia64/kernel/minstate.h mov r9=ar.csd; \ csd 695 arch/mips/kernel/process.c call_single_data_t *csd; csd 711 arch/mips/kernel/process.c csd = &per_cpu(backtrace_csd, cpu); csd 712 arch/mips/kernel/process.c csd->func = handle_backtrace; csd 713 arch/mips/kernel/process.c smp_call_function_single_async(cpu, csd); csd 705 arch/mips/kernel/smp.c call_single_data_t *csd; csd 710 arch/mips/kernel/smp.c csd = &per_cpu(tick_broadcast_csd, cpu); csd 713 arch/mips/kernel/smp.c smp_call_function_single_async(cpu, csd); csd 726 arch/mips/kernel/smp.c call_single_data_t *csd; csd 730 arch/mips/kernel/smp.c csd = &per_cpu(tick_broadcast_csd, cpu); csd 731 arch/mips/kernel/smp.c csd->func = tick_broadcast_callee; csd 443 arch/s390/kernel/perf_cpum_cf_diag.c static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth) csd 451 arch/s390/kernel/perf_cpum_cf_diag.c ctrstart = (struct cf_ctrset_entry *)(csd->start + offset); csd 452 arch/s390/kernel/perf_cpum_cf_diag.c ctrstop = (struct cf_ctrset_entry *)(csd->data + offset); csd 473 arch/s390/kernel/perf_cpum_cf_diag.c trailer_start = (struct cf_trailer_entry *)(csd->start + offset); csd 474 arch/s390/kernel/perf_cpum_cf_diag.c trailer_stop = (struct cf_trailer_entry *)(csd->data + offset); csd 488 arch/s390/kernel/perf_cpum_cf_diag.c struct cf_diag_csd *csd) csd 503 arch/s390/kernel/perf_cpum_cf_diag.c raw.frag.size = csd->used; csd 504 arch/s390/kernel/perf_cpum_cf_diag.c raw.frag.data = csd->data; csd 505 arch/s390/kernel/perf_cpum_cf_diag.c raw.size = csd->used; csd 524 arch/s390/kernel/perf_cpum_cf_diag.c struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); csd 538 arch/s390/kernel/perf_cpum_cf_diag.c csd->used = cf_diag_getctr(csd->start, sizeof(csd->start), csd 547 arch/s390/kernel/perf_cpum_cf_diag.c struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd); csd 557 arch/s390/kernel/perf_cpum_cf_diag.c csd->used = cf_diag_getctr(csd->data, sizeof(csd->data), csd 559 arch/s390/kernel/perf_cpum_cf_diag.c if (cf_diag_diffctr(csd, event->hw.config_base)) csd 560 arch/s390/kernel/perf_cpum_cf_diag.c cf_diag_push_sample(event, csd); csd 145 arch/s390/pci/pci_irq.c call_single_data_t csd; csd 181 arch/s390/pci/pci_irq.c cpu_data->csd.func = zpci_handle_remote_irq; csd 182 arch/s390/pci/pci_irq.c cpu_data->csd.info = &cpu_data->scheduled; csd 183 arch/s390/pci/pci_irq.c cpu_data->csd.flags = 0; csd 184 arch/s390/pci/pci_irq.c smp_call_function_single_async(cpu, &cpu_data->csd); csd 77 arch/x86/kernel/cpuid.c call_single_data_t csd = { csd 85 arch/x86/kernel/cpuid.c err = smp_call_function_single_async(cpu, &csd); csd 172 arch/x86/lib/msr-smp.c call_single_data_t csd = { csd 182 arch/x86/lib/msr-smp.c err = smp_call_function_single_async(cpu, &csd); csd 619 block/blk-mq.c rq->csd.func = __blk_mq_complete_request_remote; csd 620 block/blk-mq.c rq->csd.info = rq; csd 621 block/blk-mq.c rq->csd.flags = 0; csd 622 block/blk-mq.c smp_call_function_single_async(ctx->cpu, &rq->csd); csd 64 block/blk-softirq.c call_single_data_t *data = &rq->csd; csd 19 drivers/block/null_blk.h struct __call_single_data csd; csd 232 drivers/bus/qcom-ebi2.c const struct cs_data *csd; csd 238 drivers/bus/qcom-ebi2.c csd = &cs_info[csindex]; csd 240 drivers/bus/qcom-ebi2.c val |= csd->enable_mask; csd 287 drivers/bus/qcom-ebi2.c writel(slowcfg, ebi2_xmem + csd->slow_cfg); csd 289 drivers/bus/qcom-ebi2.c writel(fastcfg, ebi2_xmem + csd->fast_cfg); csd 333 drivers/cpuidle/coupled.c call_single_data_t *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); csd 336 drivers/cpuidle/coupled.c smp_call_function_single_async(cpu, csd); csd 645 drivers/cpuidle/coupled.c call_single_data_t *csd; csd 675 drivers/cpuidle/coupled.c csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); csd 676 drivers/cpuidle/coupled.c csd->func = cpuidle_coupled_handle_poke; csd 677 drivers/cpuidle/coupled.c csd->info = (void *)(unsigned long)dev->cpu; csd 102 drivers/input/keyboard/qt1050.c unsigned int csd; csd 129 drivers/input/keyboard/qt1050.c .csd = QT1050_CSD_0, csd 134 drivers/input/keyboard/qt1050.c .csd = QT1050_CSD_1, csd 139 drivers/input/keyboard/qt1050.c .csd = QT1050_CSD_2, csd 144 drivers/input/keyboard/qt1050.c .csd = QT1050_CSD_3, csd 149 drivers/input/keyboard/qt1050.c .csd = QT1050_CSD_4, csd 330 drivers/input/keyboard/qt1050.c err = regmap_write(map, key_regs->csd, button->charge_delay); csd 428 drivers/media/platform/qcom/camss/camss.c struct camss_async_subdev *csd) csd 430 drivers/media/platform/qcom/camss/camss.c struct csiphy_lanes_cfg *lncfg = &csd->interface.csi2.lane_cfg; csd 437 drivers/media/platform/qcom/camss/camss.c csd->interface.csiphy_id = vep.base.port; csd 473 drivers/media/platform/qcom/camss/camss.c struct camss_async_subdev *csd; csd 488 drivers/media/platform/qcom/camss/camss.c sizeof(*csd)); csd 495 drivers/media/platform/qcom/camss/camss.c csd = container_of(asd, struct camss_async_subdev, asd); csd 497 drivers/media/platform/qcom/camss/camss.c ret = camss_of_parse_endpoint_node(dev, node, csd); csd 742 drivers/media/platform/qcom/camss/camss.c struct camss_async_subdev *csd = csd 744 drivers/media/platform/qcom/camss/camss.c u8 id = csd->interface.csiphy_id; csd 747 drivers/media/platform/qcom/camss/camss.c csiphy->cfg.csi2 = &csd->interface.csi2; csd 2243 drivers/mmc/core/block.c !(card->csd.cmdclass & CCC_BLOCK_WRITE); csd 2345 drivers/mmc/core/block.c card->csd.mmca_vsn >= CSD_SPEC_VER_3) || csd 2385 drivers/mmc/core/block.c size = (typeof(sector_t))card->csd.capacity csd 2386 drivers/mmc/core/block.c << (card->csd.read_blkbits - 9); csd 2875 drivers/mmc/core/block.c if (!(card->csd.cmdclass & CCC_BLOCK_READ)) csd 688 drivers/mmc/core/core.c mult <<= card->csd.r2w_factor; csd 690 drivers/mmc/core/core.c data->timeout_ns = card->csd.taac_ns * mult; csd 691 drivers/mmc/core/core.c data->timeout_clks = card->csd.taac_clks * mult; csd 1530 drivers/mmc/core/core.c sz = (card->csd.capacity << (card->csd.read_blkbits - 9)) >> 11; csd 1566 drivers/mmc/core/core.c unsigned int mult = (10 << card->csd.r2w_factor); csd 1567 drivers/mmc/core/core.c unsigned int timeout_clks = card->csd.taac_clks * mult; csd 1571 drivers/mmc/core/core.c if (card->csd.taac_ns < 1000000) csd 1572 drivers/mmc/core/core.c timeout_us = (card->csd.taac_ns * mult) / 1000; csd 1574 drivers/mmc/core/core.c timeout_us = (card->csd.taac_ns / 1000) * mult; csd 1868 drivers/mmc/core/core.c !(card->csd.cmdclass & CCC_ERASE)) csd 1925 drivers/mmc/core/core.c (card->csd.cmdclass & CCC_ERASE) && card->erase_size) csd 76 drivers/mmc/core/mmc.c switch (card->csd.mmca_vsn) { csd 113 drivers/mmc/core/mmc.c mmc_hostname(card->host), card->csd.mmca_vsn); csd 125 drivers/mmc/core/mmc.c card->erase_size = card->csd.erase_size; csd 135 drivers/mmc/core/mmc.c struct mmc_csd *csd = &card->csd; csd 144 drivers/mmc/core/mmc.c csd->structure = UNSTUFF_BITS(resp, 126, 2); csd 145 drivers/mmc/core/mmc.c if (csd->structure == 0) { csd 147 drivers/mmc/core/mmc.c mmc_hostname(card->host), csd->structure); csd 151 drivers/mmc/core/mmc.c csd->mmca_vsn = UNSTUFF_BITS(resp, 122, 4); csd 154 drivers/mmc/core/mmc.c csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; csd 155 drivers/mmc/core/mmc.c csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; csd 159 drivers/mmc/core/mmc.c csd->max_dtr = tran_exp[e] * tran_mant[m]; csd 160 drivers/mmc/core/mmc.c csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); csd 164 drivers/mmc/core/mmc.c csd->capacity = (1 + m) << (e + 2); csd 166 drivers/mmc/core/mmc.c csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); csd 167 drivers/mmc/core/mmc.c csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd 168 drivers/mmc/core/mmc.c csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd 169 drivers/mmc/core/mmc.c csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd 170 drivers/mmc/core/mmc.c csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1); csd 171 drivers/mmc/core/mmc.c csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); csd 172 drivers/mmc/core/mmc.c csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd 173 drivers/mmc/core/mmc.c csd->write_partial = UNSTUFF_BITS(resp, 21, 1); csd 175 drivers/mmc/core/mmc.c if (csd->write_blkbits >= 9) { csd 178 drivers/mmc/core/mmc.c csd->erase_size = (a + 1) * (b + 1); csd 179 drivers/mmc/core/mmc.c csd->erase_size <<= csd->write_blkbits - 9; csd 371 drivers/mmc/core/mmc.c if (card->csd.structure == 3) { csd 677 drivers/mmc/core/mmc.c if (card->csd.capacity == (4096 * 512)) { csd 769 drivers/mmc/core/mmc.c MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], csd 818 drivers/mmc/core/mmc.c if (card->csd.dsr_imp && host->dsr_req) csd 972 drivers/mmc/core/mmc.c else if (max_dtr > card->csd.max_dtr) csd 973 drivers/mmc/core/mmc.c max_dtr = card->csd.max_dtr; csd 1654 drivers/mmc/core/mmc.c if (card->csd.dsr_imp && host->dsr_req) csd 297 drivers/mmc/core/mmc_ops.c static int mmc_spi_send_csd(struct mmc_card *card, u32 *csd) csd 311 drivers/mmc/core/mmc_ops.c csd[i] = be32_to_cpu(csd_tmp[i]); csd 318 drivers/mmc/core/mmc_ops.c int mmc_send_csd(struct mmc_card *card, u32 *csd) csd 321 drivers/mmc/core/mmc_ops.c return mmc_spi_send_csd(card, csd); csd 323 drivers/mmc/core/mmc_ops.c return mmc_send_cxd_native(card->host, card->rca << 16, csd, csd 895 drivers/mmc/core/mmc_ops.c return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3); csd 22 drivers/mmc/core/mmc_ops.h int mmc_send_csd(struct mmc_card *card, u32 *csd); csd 614 drivers/mmc/core/mmc_test.c return card->csd.capacity << (card->csd.read_blkbits - 9); csd 1117 drivers/mmc/core/mmc_test.c if (!test->card->csd.write_partial) csd 1135 drivers/mmc/core/mmc_test.c if (!test->card->csd.read_partial) csd 1153 drivers/mmc/core/mmc_test.c if (!test->card->csd.write_partial) csd 1171 drivers/mmc/core/mmc_test.c if (!test->card->csd.read_partial) csd 101 drivers/mmc/core/sd.c struct mmc_csd *csd = &card->csd; csd 111 drivers/mmc/core/sd.c csd->taac_ns = (taac_exp[e] * taac_mant[m] + 9) / 10; csd 112 drivers/mmc/core/sd.c csd->taac_clks = UNSTUFF_BITS(resp, 104, 8) * 100; csd 116 drivers/mmc/core/sd.c csd->max_dtr = tran_exp[e] * tran_mant[m]; csd 117 drivers/mmc/core/sd.c csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); csd 121 drivers/mmc/core/sd.c csd->capacity = (1 + m) << (e + 2); csd 123 drivers/mmc/core/sd.c csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4); csd 124 drivers/mmc/core/sd.c csd->read_partial = UNSTUFF_BITS(resp, 79, 1); csd 125 drivers/mmc/core/sd.c csd->write_misalign = UNSTUFF_BITS(resp, 78, 1); csd 126 drivers/mmc/core/sd.c csd->read_misalign = UNSTUFF_BITS(resp, 77, 1); csd 127 drivers/mmc/core/sd.c csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1); csd 128 drivers/mmc/core/sd.c csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3); csd 129 drivers/mmc/core/sd.c csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4); csd 130 drivers/mmc/core/sd.c csd->write_partial = UNSTUFF_BITS(resp, 21, 1); csd 133 drivers/mmc/core/sd.c csd->erase_size = 1; csd 134 drivers/mmc/core/sd.c } else if (csd->write_blkbits >= 9) { csd 135 drivers/mmc/core/sd.c csd->erase_size = UNSTUFF_BITS(resp, 39, 7) + 1; csd 136 drivers/mmc/core/sd.c csd->erase_size <<= csd->write_blkbits - 9; csd 148 drivers/mmc/core/sd.c csd->taac_ns = 0; /* Unused */ csd 149 drivers/mmc/core/sd.c csd->taac_clks = 0; /* Unused */ csd 153 drivers/mmc/core/sd.c csd->max_dtr = tran_exp[e] * tran_mant[m]; csd 154 drivers/mmc/core/sd.c csd->cmdclass = UNSTUFF_BITS(resp, 84, 12); csd 155 drivers/mmc/core/sd.c csd->c_size = UNSTUFF_BITS(resp, 48, 22); csd 158 drivers/mmc/core/sd.c if (csd->c_size >= 0xFFFF) csd 162 drivers/mmc/core/sd.c csd->capacity = (1 + m) << 10; csd 164 drivers/mmc/core/sd.c csd->read_blkbits = 9; csd 165 drivers/mmc/core/sd.c csd->read_partial = 0; csd 166 drivers/mmc/core/sd.c csd->write_misalign = 0; csd 167 drivers/mmc/core/sd.c csd->read_misalign = 0; csd 168 drivers/mmc/core/sd.c csd->r2w_factor = 4; /* Unused */ csd 169 drivers/mmc/core/sd.c csd->write_blkbits = 9; csd 170 drivers/mmc/core/sd.c csd->write_partial = 0; csd 171 drivers/mmc/core/sd.c csd->erase_size = 1; csd 179 drivers/mmc/core/sd.c card->erase_size = csd->erase_size; csd 243 drivers/mmc/core/sd.c if (!(card->csd.cmdclass & CCC_APP_SPEC)) { csd 308 drivers/mmc/core/sd.c if (!(card->csd.cmdclass & CCC_SWITCH)) { csd 366 drivers/mmc/core/sd.c if (!(card->csd.cmdclass & CCC_SWITCH)) csd 603 drivers/mmc/core/sd.c if (!(card->csd.cmdclass & CCC_SWITCH)) csd 670 drivers/mmc/core/sd.c MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], csd 701 drivers/mmc/core/sd.c if (card->csd.dsr_imp && host->dsr_req) csd 918 drivers/mmc/core/sd.c } else if (max_dtr > card->csd.max_dtr) { csd 919 drivers/mmc/core/sd.c max_dtr = card->csd.max_dtr; csd 1006 drivers/mmc/core/sd.c if (card->csd.dsr_imp && host->dsr_req) csd 729 drivers/net/ethernet/cavium/liquidio/lio_core.c call_single_data_t *csd = &droq->csd; csd 731 drivers/net/ethernet/cavium/liquidio/lio_core.c csd->func = napi_schedule_wrapper; csd 732 drivers/net/ethernet/cavium/liquidio/lio_core.c csd->info = &droq->napi; csd 733 drivers/net/ethernet/cavium/liquidio/lio_core.c csd->flags = 0; csd 735 drivers/net/ethernet/cavium/liquidio/lio_core.c smp_call_function_single_async(droq->cpu_id, csd); csd 323 drivers/net/ethernet/cavium/liquidio/octeon_droq.h call_single_data_t csd; csd 1912 drivers/scsi/hpsa.c struct hpsa_scsi_dev_t *csd; csd 1951 drivers/scsi/hpsa.c csd = h->dev[i]; csd 1952 drivers/scsi/hpsa.c device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); csd 238 include/linux/blkdev.h struct __call_single_data csd; csd 287 include/linux/mmc/card.h struct mmc_csd csd; /* card specific */ csd 3013 include/linux/netdevice.h call_single_data_t csd ____cacheline_aligned_in_smp; csd 60 include/linux/smp.h int smp_call_function_single_async(int cpu, call_single_data_t *csd); csd 247 kernel/debug/debug_core.c call_single_data_t *csd; csd 257 kernel/debug/debug_core.c csd = &per_cpu(kgdb_roundup_csd, cpu); csd 271 kernel/debug/debug_core.c csd->func = kgdb_call_nmi_hook; csd 272 kernel/debug/debug_core.c ret = smp_call_function_single_async(cpu, csd); csd 32 kernel/smp.c call_single_data_t __percpu *csd; csd 55 kernel/smp.c cfd->csd = alloc_percpu(call_single_data_t); csd 56 kernel/smp.c if (!cfd->csd) { csd 71 kernel/smp.c free_percpu(cfd->csd); csd 107 kernel/smp.c static __always_inline void csd_lock_wait(call_single_data_t *csd) csd 109 kernel/smp.c smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK)); csd 112 kernel/smp.c static __always_inline void csd_lock(call_single_data_t *csd) csd 114 kernel/smp.c csd_lock_wait(csd); csd 115 kernel/smp.c csd->flags |= CSD_FLAG_LOCK; csd 125 kernel/smp.c static __always_inline void csd_unlock(call_single_data_t *csd) csd 127 kernel/smp.c WARN_ON(!(csd->flags & CSD_FLAG_LOCK)); csd 132 kernel/smp.c smp_store_release(&csd->flags, 0); csd 142 kernel/smp.c static int generic_exec_single(int cpu, call_single_data_t *csd, csd 152 kernel/smp.c csd_unlock(csd); csd 161 kernel/smp.c csd_unlock(csd); csd 165 kernel/smp.c csd->func = func; csd 166 kernel/smp.c csd->info = info; csd 179 kernel/smp.c if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) csd 214 kernel/smp.c call_single_data_t *csd, *csd_next; csd 233 kernel/smp.c llist_for_each_entry(csd, entry, llist) csd 235 kernel/smp.c csd->func); csd 238 kernel/smp.c llist_for_each_entry_safe(csd, csd_next, entry, llist) { csd 239 kernel/smp.c smp_call_func_t func = csd->func; csd 240 kernel/smp.c void *info = csd->info; csd 243 kernel/smp.c if (csd->flags & CSD_FLAG_SYNCHRONOUS) { csd 245 kernel/smp.c csd_unlock(csd); csd 247 kernel/smp.c csd_unlock(csd); csd 272 kernel/smp.c call_single_data_t *csd; csd 302 kernel/smp.c csd = &csd_stack; csd 304 kernel/smp.c csd = this_cpu_ptr(&csd_data); csd 305 kernel/smp.c csd_lock(csd); csd 308 kernel/smp.c err = generic_exec_single(cpu, csd, func, info); csd 311 kernel/smp.c csd_lock_wait(csd); csd 335 kernel/smp.c int smp_call_function_single_async(int cpu, call_single_data_t *csd) csd 342 kernel/smp.c if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK)) csd 343 kernel/smp.c csd_lock_wait(csd); csd 345 kernel/smp.c csd->flags = CSD_FLAG_LOCK; csd 348 kernel/smp.c err = generic_exec_single(cpu, csd, csd->func, csd->info); csd 466 kernel/smp.c call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu); csd 468 kernel/smp.c csd_lock(csd); csd 470 kernel/smp.c csd->flags |= CSD_FLAG_SYNCHRONOUS; csd 471 kernel/smp.c csd->func = func; csd 472 kernel/smp.c csd->info = info; csd 473 kernel/smp.c if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) csd 482 kernel/smp.c call_single_data_t *csd; csd 484 kernel/smp.c csd = per_cpu_ptr(cfd->csd, cpu); csd 485 kernel/smp.c csd_lock_wait(csd); csd 27 kernel/up.c int smp_call_function_single_async(int cpu, call_single_data_t *csd) csd 32 kernel/up.c csd->func(csd->info); csd 5813 net/core/dev.c smp_call_function_single_async(remsd->cpu, &remsd->csd); csd 10204 net/core/dev.c sd->csd.func = rps_trigger_softirq; csd 10205 net/core/dev.c sd->csd.info = sd; csd 1168 tools/testing/selftests/net/nettest.c int lsd, csd = -1; csd 1222 tools/testing/selftests/net/nettest.c csd = accept(lsd, (void *) addr, &alen); csd 1223 tools/testing/selftests/net/nettest.c if (csd < 0) { csd 1228 tools/testing/selftests/net/nettest.c rc = show_sockstat(csd, args); csd 1232 tools/testing/selftests/net/nettest.c rc = check_device(csd, args); csd 1237 tools/testing/selftests/net/nettest.c rc = msg_loop(0, csd, (void *) addr, alen, args); csd 1238 tools/testing/selftests/net/nettest.c close(csd);