sds 106 drivers/i2c/busses/i2c-pxa-pci.c struct ce4100_devices *sds; sds 116 drivers/i2c/busses/i2c-pxa-pci.c sds = kzalloc(sizeof(*sds), GFP_KERNEL); sds 117 drivers/i2c/busses/i2c-pxa-pci.c if (!sds) { sds 122 drivers/i2c/busses/i2c-pxa-pci.c for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds 123 drivers/i2c/busses/i2c-pxa-pci.c sds->pdev[i] = add_i2c_device(dev, i); sds 124 drivers/i2c/busses/i2c-pxa-pci.c if (IS_ERR(sds->pdev[i])) { sds 125 drivers/i2c/busses/i2c-pxa-pci.c ret = PTR_ERR(sds->pdev[i]); sds 127 drivers/i2c/busses/i2c-pxa-pci.c platform_device_unregister(sds->pdev[i]); sds 131 drivers/i2c/busses/i2c-pxa-pci.c pci_set_drvdata(dev, sds); sds 135 drivers/i2c/busses/i2c-pxa-pci.c kfree(sds); sds 1073 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c struct qlcnic_host_sds_ring *sds; sds 1090 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds = &recv_ctx->sds_rings[i]; sds 1091 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->consumer = 0; sds 1092 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); sds 1093 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.phy_addr_low = LSD(sds->phys_addr); sds 1094 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds 1095 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.sds_ring_size = sds->num_desc; sds 1124 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds = &recv_ctx->sds_rings[i]; sds 1125 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->crb_sts_consumer = ahw->pci_base0 + sds 1132 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->crb_intr_mask = ahw->pci_base0 + intr_mask; sds 1171 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c struct qlcnic_host_sds_ring *sds; sds 1213 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds = &recv_ctx->sds_rings[i]; sds 1214 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->consumer = 0; sds 1215 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); sds 1216 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.phy_addr_low = LSD(sds->phys_addr); sds 1217 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.phy_addr_high = MSD(sds->phys_addr); sds 1218 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds_mbx.sds_ring_size = sds->num_desc; sds 1275 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds = &recv_ctx->sds_rings[i]; sds 1276 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->crb_sts_consumer = ahw->pci_base0 + sds 1282 drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c sds->crb_intr_mask = ahw->pci_base0 + intr_mask; sds 173 include/linux/sched/topology.h struct sched_domain_shared *__percpu *sds; sds 5831 kernel/sched/fair.c struct sched_domain_shared *sds; sds 5833 kernel/sched/fair.c sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); sds 5834 kernel/sched/fair.c if (sds) sds 5835 kernel/sched/fair.c WRITE_ONCE(sds->has_idle_cores, val); sds 5840 kernel/sched/fair.c struct sched_domain_shared *sds; sds 5842 kernel/sched/fair.c sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); sds 5843 kernel/sched/fair.c if (sds) sds 5844 kernel/sched/fair.c return READ_ONCE(sds->has_idle_cores); sds 7749 kernel/sched/fair.c static inline void init_sd_lb_stats(struct sd_lb_stats *sds) sds 7757 kernel/sched/fair.c *sds = (struct sd_lb_stats){ sds 8122 kernel/sched/fair.c struct sd_lb_stats *sds, sds 8126 kernel/sched/fair.c struct sg_lb_stats *busiest = &sds->busiest_stat; sds 8135 kernel/sched/fair.c (!group_smaller_max_cpu_capacity(sg, sds->local) || sds 8136 kernel/sched/fair.c !group_has_capacity(env, &sds->local_stat))) sds 8158 kernel/sched/fair.c group_smaller_min_cpu_capacity(sds->local, sg)) sds 8183 kernel/sched/fair.c if (!sds->busiest) sds 8187 kernel/sched/fair.c if (sched_asym_prefer(sds->busiest->asym_prefer_cpu, sds 8230 kernel/sched/fair.c static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) sds 8234 kernel/sched/fair.c struct sg_lb_stats *local = &sds->local_stat; sds 8250 kernel/sched/fair.c sds->local = sg; sds 8273 kernel/sched/fair.c if (prefer_sibling && sds->local && sds 8280 kernel/sched/fair.c if (update_sd_pick_busiest(env, sds, sg, sgs)) { sds 8281 kernel/sched/fair.c sds->busiest = sg; sds 8282 kernel/sched/fair.c sds->busiest_stat = *sgs; sds 8287 kernel/sched/fair.c sds->total_running += sgs->sum_nr_running; sds 8288 kernel/sched/fair.c sds->total_load += sgs->group_load; sds 8289 kernel/sched/fair.c sds->total_capacity += sgs->group_capacity; sds 8304 kernel/sched/fair.c env->fbq_type = fbq_classify_group(&sds->busiest_stat); sds 8346 kernel/sched/fair.c static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) sds 8356 kernel/sched/fair.c if (!sds->busiest) sds 8359 kernel/sched/fair.c busiest_cpu = sds->busiest->asym_prefer_cpu; sds 8363 kernel/sched/fair.c env->imbalance = sds->busiest_stat.group_load; sds 8376 kernel/sched/fair.c void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) sds 8383 kernel/sched/fair.c local = &sds->local_stat; sds 8384 kernel/sched/fair.c busiest = &sds->busiest_stat; sds 8444 kernel/sched/fair.c static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds) sds 8449 kernel/sched/fair.c local = &sds->local_stat; sds 8450 kernel/sched/fair.c busiest = &sds->busiest_stat; sds 8458 kernel/sched/fair.c min(busiest->load_per_task, sds->avg_load); sds 8468 kernel/sched/fair.c (busiest->avg_load <= sds->avg_load || sds 8469 kernel/sched/fair.c local->avg_load >= sds->avg_load)) { sds 8471 kernel/sched/fair.c return fix_small_imbalance(env, sds); sds 8495 kernel/sched/fair.c max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity); sds 8500 kernel/sched/fair.c (sds->avg_load - local->avg_load) * local->group_capacity sds 8516 kernel/sched/fair.c return fix_small_imbalance(env, sds); sds 8535 kernel/sched/fair.c struct sd_lb_stats sds; sds 8537 kernel/sched/fair.c init_sd_lb_stats(&sds); sds 8543 kernel/sched/fair.c update_sd_lb_stats(env, &sds); sds 8552 kernel/sched/fair.c local = &sds.local_stat; sds 8553 kernel/sched/fair.c busiest = &sds.busiest_stat; sds 8556 kernel/sched/fair.c if (check_asym_packing(env, &sds)) sds 8557 kernel/sched/fair.c return sds.busiest; sds 8560 kernel/sched/fair.c if (!sds.busiest || busiest->sum_nr_running == 0) sds 8564 kernel/sched/fair.c sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) sds 8565 kernel/sched/fair.c / sds.total_capacity; sds 8598 kernel/sched/fair.c if (local->avg_load >= sds.avg_load) sds 8625 kernel/sched/fair.c calculate_imbalance(env, &sds); sds 8626 kernel/sched/fair.c return env->imbalance ? sds.busiest : NULL; sds 9403 kernel/sched/fair.c struct sched_domain_shared *sds; sds 9487 kernel/sched/fair.c sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); sds 9488 kernel/sched/fair.c if (sds) { sds 9498 kernel/sched/fair.c nr_busy = atomic_read(&sds->nr_busy_cpus); sds 629 kernel/sched/topology.c struct sched_domain_shared *sds = NULL; sds 638 kernel/sched/topology.c sds = sd->shared; sds 644 kernel/sched/topology.c rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); sds 1268 kernel/sched/topology.c if (atomic_read(&(*per_cpu_ptr(sdd->sds, cpu))->ref)) sds 1269 kernel/sched/topology.c *per_cpu_ptr(sdd->sds, cpu) = NULL; sds 1422 kernel/sched/topology.c sd->shared = *per_cpu_ptr(sdd->sds, sd_id); sds 1762 kernel/sched/topology.c sdd->sds = alloc_percpu(struct sched_domain_shared *); sds 1763 kernel/sched/topology.c if (!sdd->sds) sds 1776 kernel/sched/topology.c struct sched_domain_shared *sds; sds 1787 kernel/sched/topology.c sds = kzalloc_node(sizeof(struct sched_domain_shared), sds 1789 kernel/sched/topology.c if (!sds) sds 1792 kernel/sched/topology.c *per_cpu_ptr(sdd->sds, j) = sds; sds 1837 kernel/sched/topology.c if (sdd->sds) sds 1838 kernel/sched/topology.c kfree(*per_cpu_ptr(sdd->sds, j)); sds 1846 kernel/sched/topology.c free_percpu(sdd->sds); sds 1847 kernel/sched/topology.c sdd->sds = NULL; sds 454 kernel/trace/bpf_trace.c struct perf_sample_data sds[3]; sds 462 kernel/trace/bpf_trace.c struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds); sds 473 kernel/trace/bpf_trace.c if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) { sds 478 kernel/trace/bpf_trace.c sd = &sds->sds[nest_level - 1]; sds 535 kernel/trace/bpf_trace.c if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { sds 539 kernel/trace/bpf_trace.c sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);