ctrlr 615 drivers/bus/moxtet.c struct device_node *ctrlr, ctrlr 1120 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c struct komeda_timing_ctrlr *ctrlr; ctrlr 1125 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr), ctrlr 1136 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c ctrlr = to_ctrlr(c); ctrlr 1138 drivers/gpu/drm/arm/display/komeda/d71/d71_component.c ctrlr->supports_dual_link = true; ctrlr 111 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c pos = to_cpos(pipe->ctrlr); ctrlr 316 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c if (pipe->dual_link && !pipe->ctrlr->supports_dual_link) { ctrlr 411 drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h struct komeda_timing_ctrlr *ctrlr; ctrlr 766 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr, ctrlr 774 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c c_st = komeda_component_get_state_and_set_user(&ctrlr->base, ctrlr 782 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c komeda_component_set_output(&dflow->input, &ctrlr->base, 0); ctrlr 1167 drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow); ctrlr 307 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c struct komeda_timing_ctrlr *ctrlr) ctrlr 315 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c st->base.component = &ctrlr->base; ctrlr 316 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj, ctrlr 417 drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr); ctrlr 90 drivers/irqchip/irq-atmel-aic-common.c struct device_node *ctrlr, ctrlr 25 drivers/irqchip/irq-atmel-aic-common.h struct device_node *ctrlr, ctrlr 172 drivers/irqchip/irq-atmel-aic.c struct device_node *ctrlr, ctrlr 187 drivers/irqchip/irq-atmel-aic.c ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, ctrlr 275 drivers/irqchip/irq-atmel-aic5.c struct device_node *ctrlr, ctrlr 288 drivers/irqchip/irq-atmel-aic5.c ret = aic_common_irq_domain_xlate(d, ctrlr, intspec, intsize, ctrlr 107 drivers/irqchip/irq-bcm2835.c static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr, ctrlr 191 drivers/irqchip/irq-csky-mpintc.c struct device_node *ctrlr, const u32 *intspec, ctrlr 433 drivers/irqchip/irq-mips-gic.c static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, ctrlr 553 drivers/irqchip/irq-mips-gic.c static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, ctrlr 165 drivers/irqchip/irq-pic32-evic.c int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, ctrlr 46 drivers/irqchip/irq-xtensa-mx.c struct device_node *ctrlr, ctrlr 30 drivers/irqchip/irq-xtensa-pic.c struct device_node *ctrlr, ctrlr 27 drivers/sh/intc/irqdomain.c static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr, ctrlr 40 drivers/soc/qcom/rpmh.c #define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client) ctrlr 102 drivers/soc/qcom/rpmh.c static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr) ctrlr 106 drivers/soc/qcom/rpmh.c list_for_each_entry(p, &ctrlr->cache, list) { ctrlr 116 drivers/soc/qcom/rpmh.c static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr, ctrlr 123 drivers/soc/qcom/rpmh.c spin_lock_irqsave(&ctrlr->cache_lock, flags); ctrlr 124 drivers/soc/qcom/rpmh.c req = __find_req(ctrlr, cmd->addr); ctrlr 137 drivers/soc/qcom/rpmh.c list_add_tail(&req->list, &ctrlr->cache); ctrlr 155 drivers/soc/qcom/rpmh.c ctrlr->dirty = true; ctrlr 157 drivers/soc/qcom/rpmh.c spin_unlock_irqrestore(&ctrlr->cache_lock, flags); ctrlr 176 drivers/soc/qcom/rpmh.c struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); ctrlr 185 drivers/soc/qcom/rpmh.c req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); ctrlr 194 drivers/soc/qcom/rpmh.c ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg); ctrlr 284 drivers/soc/qcom/rpmh.c static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req) ctrlr 288 drivers/soc/qcom/rpmh.c spin_lock_irqsave(&ctrlr->cache_lock, flags); ctrlr 289 drivers/soc/qcom/rpmh.c list_add_tail(&req->list, &ctrlr->batch_cache); ctrlr 290 drivers/soc/qcom/rpmh.c spin_unlock_irqrestore(&ctrlr->cache_lock, flags); ctrlr 293 drivers/soc/qcom/rpmh.c static int flush_batch(struct rpmh_ctrlr *ctrlr) ctrlr 302 drivers/soc/qcom/rpmh.c spin_lock_irqsave(&ctrlr->cache_lock, flags); ctrlr 303 drivers/soc/qcom/rpmh.c list_for_each_entry(req, &ctrlr->batch_cache, list) { ctrlr 306 drivers/soc/qcom/rpmh.c ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), ctrlr 312 drivers/soc/qcom/rpmh.c spin_unlock_irqrestore(&ctrlr->cache_lock, flags); ctrlr 317 drivers/soc/qcom/rpmh.c static void invalidate_batch(struct rpmh_ctrlr *ctrlr) ctrlr 322 drivers/soc/qcom/rpmh.c spin_lock_irqsave(&ctrlr->cache_lock, flags); ctrlr 323 drivers/soc/qcom/rpmh.c list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list) ctrlr 325 drivers/soc/qcom/rpmh.c INIT_LIST_HEAD(&ctrlr->batch_cache); ctrlr 326 drivers/soc/qcom/rpmh.c spin_unlock_irqrestore(&ctrlr->cache_lock, flags); ctrlr 352 drivers/soc/qcom/rpmh.c struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); ctrlr 384 drivers/soc/qcom/rpmh.c cache_batch(ctrlr, req); ctrlr 393 drivers/soc/qcom/rpmh.c ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg); ctrlr 434 drivers/soc/qcom/rpmh.c struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); ctrlr 442 drivers/soc/qcom/rpmh.c return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg); ctrlr 460 drivers/soc/qcom/rpmh.c struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); ctrlr 463 drivers/soc/qcom/rpmh.c if (!ctrlr->dirty) { ctrlr 469 drivers/soc/qcom/rpmh.c ret = flush_batch(ctrlr); ctrlr 477 drivers/soc/qcom/rpmh.c list_for_each_entry(p, &ctrlr->cache, list) { ctrlr 492 drivers/soc/qcom/rpmh.c ctrlr->dirty = false; ctrlr 508 drivers/soc/qcom/rpmh.c struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); ctrlr 511 drivers/soc/qcom/rpmh.c invalidate_batch(ctrlr); ctrlr 512 drivers/soc/qcom/rpmh.c ctrlr->dirty = true; ctrlr 515 drivers/soc/qcom/rpmh.c ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr)); ctrlr 1222 drivers/thermal/tegra/soctherm.c struct device_node *ctrlr, const u32 *intspec, unsigned int intsize, ctrlr 1179 drivers/vme/bridges/vme_ca91cx42.c struct vme_dma_resource *ctrlr; ctrlr 1187 drivers/vme/bridges/vme_ca91cx42.c ctrlr = list->parent; ctrlr 1189 drivers/vme/bridges/vme_ca91cx42.c bridge = ctrlr->parent->driver_priv; ctrlr 1190 drivers/vme/bridges/vme_ca91cx42.c dev = ctrlr->parent->parent; ctrlr 1192 drivers/vme/bridges/vme_ca91cx42.c mutex_lock(&ctrlr->mtx); ctrlr 1194 drivers/vme/bridges/vme_ca91cx42.c if (!(list_empty(&ctrlr->running))) { ctrlr 1201 drivers/vme/bridges/vme_ca91cx42.c mutex_unlock(&ctrlr->mtx); ctrlr 1204 drivers/vme/bridges/vme_ca91cx42.c list_add(&list->list, &ctrlr->running); ctrlr 1213 drivers/vme/bridges/vme_ca91cx42.c mutex_unlock(&ctrlr->mtx); ctrlr 1235 drivers/vme/bridges/vme_ca91cx42.c ca91cx42_dma_busy(ctrlr->parent)); ctrlr 1242 drivers/vme/bridges/vme_ca91cx42.c ca91cx42_dma_busy(ctrlr->parent)); ctrlr 1263 drivers/vme/bridges/vme_ca91cx42.c mutex_lock(&ctrlr->mtx); ctrlr 1265 drivers/vme/bridges/vme_ca91cx42.c mutex_unlock(&ctrlr->mtx); ctrlr 1809 drivers/vme/bridges/vme_tsi148.c struct vme_dma_resource *ctrlr; ctrlr 1817 drivers/vme/bridges/vme_tsi148.c ctrlr = list->parent; ctrlr 1819 drivers/vme/bridges/vme_tsi148.c tsi148_bridge = ctrlr->parent; ctrlr 1823 drivers/vme/bridges/vme_tsi148.c mutex_lock(&ctrlr->mtx); ctrlr 1825 drivers/vme/bridges/vme_tsi148.c channel = ctrlr->number; ctrlr 1827 drivers/vme/bridges/vme_tsi148.c if (!list_empty(&ctrlr->running)) { ctrlr 1834 drivers/vme/bridges/vme_tsi148.c mutex_unlock(&ctrlr->mtx); ctrlr 1837 drivers/vme/bridges/vme_tsi148.c list_add(&list->list, &ctrlr->running); ctrlr 1844 drivers/vme/bridges/vme_tsi148.c mutex_unlock(&ctrlr->mtx); ctrlr 1861 drivers/vme/bridges/vme_tsi148.c tsi148_dma_busy(ctrlr->parent, channel)); ctrlr 1868 drivers/vme/bridges/vme_tsi148.c tsi148_dma_busy(ctrlr->parent, channel)); ctrlr 1887 drivers/vme/bridges/vme_tsi148.c mutex_lock(&ctrlr->mtx); ctrlr 1889 drivers/vme/bridges/vme_tsi148.c mutex_unlock(&ctrlr->mtx); ctrlr 1224 drivers/vme/vme.c struct vme_dma_resource *ctrlr; ctrlr 1231 drivers/vme/vme.c ctrlr = list_entry(resource->entry, struct vme_dma_resource, list); ctrlr 1233 drivers/vme/vme.c if (!mutex_trylock(&ctrlr->mtx)) { ctrlr 1238 drivers/vme/vme.c if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) { ctrlr 1240 drivers/vme/vme.c mutex_unlock(&ctrlr->mtx); ctrlr 1244 drivers/vme/vme.c ctrlr->locked = 0; ctrlr 1246 drivers/vme/vme.c mutex_unlock(&ctrlr->mtx); ctrlr 13 drivers/xen/dbgp.c const struct device *ctrlr = hcd_to_bus(hcd)->controller; ctrlr 23 drivers/xen/dbgp.c if (dev_is_pci(ctrlr)) { ctrlr 24 drivers/xen/dbgp.c const struct pci_dev *pdev = to_pci_dev(ctrlr); ctrlr 421 include/linux/irqdomain.h int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, ctrlr 424 include/linux/irqdomain.h int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, ctrlr 427 include/linux/irqdomain.h int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, ctrlr 927 kernel/irq/irqdomain.c int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, ctrlr 946 kernel/irq/irqdomain.c int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, ctrlr 952 kernel/irq/irqdomain.c of_phandle_args_to_fwspec(ctrlr, intspec, intsize, &fwspec); ctrlr 969 kernel/irq/irqdomain.c struct device_node *ctrlr,