Home
last modified time | relevance | path

Searched refs:domain (Results 1 – 200 of 628) sorted by relevance

1234

/linux-4.4.14/kernel/irq/
Dirqdomain.c28 static void irq_domain_check_hierarchy(struct irq_domain *domain);
99 struct irq_domain *domain; in __irq_domain_add() local
104 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), in __irq_domain_add()
106 if (WARN_ON(!domain)) in __irq_domain_add()
112 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); in __irq_domain_add()
113 domain->ops = ops; in __irq_domain_add()
114 domain->host_data = host_data; in __irq_domain_add()
115 domain->fwnode = fwnode; in __irq_domain_add()
116 domain->hwirq_max = hwirq_max; in __irq_domain_add()
117 domain->revmap_size = size; in __irq_domain_add()
[all …]
Dmsi.c83 static void msi_domain_activate(struct irq_domain *domain, in msi_domain_activate() argument
92 static void msi_domain_deactivate(struct irq_domain *domain, in msi_domain_deactivate() argument
101 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, in msi_domain_alloc() argument
104 struct msi_domain_info *info = domain->host_data; in msi_domain_alloc()
109 if (irq_find_mapping(domain, hwirq) > 0) in msi_domain_alloc()
112 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in msi_domain_alloc()
117 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc()
121 ops->msi_free(domain, info, virq + i); in msi_domain_alloc()
123 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_alloc()
131 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, in msi_domain_free() argument
[all …]
/linux-4.4.14/drivers/iommu/
Dipmmu-vmsa.c199 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) in ipmmu_ctx_read() argument
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read()
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, in ipmmu_ctx_write() argument
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write()
215 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
219 while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
230 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
234 reg = ipmmu_ctx_read(domain, IMCTR); in ipmmu_tlb_invalidate()
236 ipmmu_ctx_write(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
[all …]
Diommu.c55 struct iommu_domain *domain; member
82 static int __iommu_attach_device(struct iommu_domain *domain,
84 static int __iommu_attach_group(struct iommu_domain *domain,
86 static void __iommu_detach_group(struct iommu_domain *domain,
331 struct iommu_domain *domain = group->default_domain; in iommu_group_create_direct_mappings() local
337 if (!domain || domain->type != IOMMU_DOMAIN_DMA) in iommu_group_create_direct_mappings()
340 BUG_ON(!domain->ops->pgsize_bitmap); in iommu_group_create_direct_mappings()
342 pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); in iommu_group_create_direct_mappings()
357 phys_addr = iommu_iova_to_phys(domain, addr); in iommu_group_create_direct_mappings()
361 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); in iommu_group_create_direct_mappings()
[all …]
Damd_iommu.c92 struct protection_domain *domain; /* Domain the device is bound to */ member
115 static void update_domain(struct protection_domain *domain);
116 static int protection_domain_init(struct protection_domain *domain);
126 return container_of(dom, struct protection_domain, domain); in to_pdomain()
344 struct iommu_domain *domain; in init_iommu_group() local
351 domain = iommu_group_default_domain(group); in init_iommu_group()
352 if (!domain) in init_iommu_group()
355 dma_domain = to_pdomain(domain)->priv; in init_iommu_group()
1104 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1111 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
[all …]
Dexynos-iommu.c210 struct iommu_domain domain; /* generic domain data structure */ member
227 struct exynos_iommu_domain *domain; /* domain we belong to */ member
236 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
358 if (data->domain) in exynos_sysmmu_irq()
359 ret = report_iommu_fault(&data->domain->domain, in exynos_sysmmu_irq()
402 data->domain = NULL; in __sysmmu_disable()
458 struct exynos_iommu_domain *domain) in __sysmmu_enable() argument
466 data->domain = domain; in __sysmmu_enable()
662 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc() local
668 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc()
[all …]
Dintel-iommu.c377 #define for_each_domain_iommu(idx, domain) \ argument
379 if (domain->iommu_refcnt[idx])
412 struct iommu_domain domain; /* generic domain data structure for member
431 struct dmar_domain *domain; /* pointer to domain */ member
465 struct dmar_domain *domain[HIGH_WATER_MARK]; member
480 static void domain_exit(struct dmar_domain *domain);
481 static void domain_remove_dev_info(struct dmar_domain *domain);
482 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
487 static int domain_detach_iommu(struct dmar_domain *domain,
567 return container_of(dom, struct dmar_domain, domain); in to_dmar_domain()
[all …]
Ds390-iommu.c23 struct iommu_domain domain; member
37 return container_of(dom, struct s390_domain, domain); in to_s390_domain()
73 return &s390_domain->domain; in s390_domain_alloc()
76 void s390_domain_free(struct iommu_domain *domain) in s390_domain_free() argument
78 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_domain_free()
84 static int s390_iommu_attach_device(struct iommu_domain *domain, in s390_iommu_attach_device() argument
87 struct s390_domain *s390_domain = to_s390_domain(domain); in s390_iommu_attach_device()
113 domain->geometry.aperture_start = zdev->start_dma; in s390_iommu_attach_device()
114 domain->geometry.aperture_end = zdev->end_dma; in s390_iommu_attach_device()
115 domain->geometry.force_aperture = true; in s390_iommu_attach_device()
[all …]
Ddma-iommu.c44 int iommu_get_dma_cookie(struct iommu_domain *domain) in iommu_get_dma_cookie() argument
48 if (domain->iova_cookie) in iommu_get_dma_cookie()
52 domain->iova_cookie = iovad; in iommu_get_dma_cookie()
64 void iommu_put_dma_cookie(struct iommu_domain *domain) in iommu_put_dma_cookie() argument
66 struct iova_domain *iovad = domain->iova_cookie; in iommu_put_dma_cookie()
73 domain->iova_cookie = NULL; in iommu_put_dma_cookie()
88 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) in iommu_dma_init_domain() argument
90 struct iova_domain *iovad = domain->iova_cookie; in iommu_dma_init_domain()
97 order = __ffs(domain->ops->pgsize_bitmap); in iommu_dma_init_domain()
102 if (domain->geometry.force_aperture) { in iommu_dma_init_domain()
[all …]
Dfsl_pamu_domain.c307 struct fsl_dma_domain *domain; in iommu_alloc_dma_domain() local
309 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); in iommu_alloc_dma_domain()
310 if (!domain) in iommu_alloc_dma_domain()
313 domain->stash_id = ~(u32)0; in iommu_alloc_dma_domain()
314 domain->snoop_id = ~(u32)0; in iommu_alloc_dma_domain()
315 domain->win_cnt = pamu_get_max_subwin_cnt(); in iommu_alloc_dma_domain()
316 domain->geom_size = 0; in iommu_alloc_dma_domain()
318 INIT_LIST_HEAD(&domain->devices); in iommu_alloc_dma_domain()
320 spin_lock_init(&domain->domain_lock); in iommu_alloc_dma_domain()
322 return domain; in iommu_alloc_dma_domain()
[all …]
Dtegra-gart.c67 struct iommu_domain domain; /* generic domain handle */ member
78 return container_of(dom, struct gart_domain, domain); in to_gart_domain()
166 static int gart_iommu_attach_dev(struct iommu_domain *domain, in gart_iommu_attach_dev() argument
169 struct gart_domain *gart_domain = to_gart_domain(domain); in gart_iommu_attach_dev()
199 static void gart_iommu_detach_dev(struct iommu_domain *domain, in gart_iommu_detach_dev() argument
202 struct gart_domain *gart_domain = to_gart_domain(domain); in gart_iommu_detach_dev()
238 gart_domain->domain.geometry.aperture_start = gart->iovmm_base; in gart_iommu_domain_alloc()
239 gart_domain->domain.geometry.aperture_end = gart->iovmm_base + in gart_iommu_domain_alloc()
241 gart_domain->domain.geometry.force_aperture = true; in gart_iommu_domain_alloc()
243 return &gart_domain->domain; in gart_iommu_domain_alloc()
[all …]
Dmsm_iommu.c55 struct iommu_domain domain; member
60 return container_of(dom, struct msm_priv, domain); in to_msm_priv()
86 static int __flush_iotlb(struct iommu_domain *domain) in __flush_iotlb() argument
88 struct msm_priv *priv = to_msm_priv(domain); in __flush_iotlb()
238 priv->domain.geometry.aperture_start = 0; in msm_iommu_domain_alloc()
239 priv->domain.geometry.aperture_end = (1ULL << 32) - 1; in msm_iommu_domain_alloc()
240 priv->domain.geometry.force_aperture = true; in msm_iommu_domain_alloc()
242 return &priv->domain; in msm_iommu_domain_alloc()
249 static void msm_iommu_domain_free(struct iommu_domain *domain) in msm_iommu_domain_free() argument
257 priv = to_msm_priv(domain); in msm_iommu_domain_free()
[all …]
Dshmobile-iommu.c45 struct iommu_domain domain; member
53 return container_of(dom, struct shmobile_iommu_domain, domain); in to_sh_domain()
112 return &sh_domain->domain; in shmobile_iommu_domain_alloc()
115 static void shmobile_iommu_domain_free(struct iommu_domain *domain) in shmobile_iommu_domain_free() argument
117 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_domain_free()
128 static int shmobile_iommu_attach_device(struct iommu_domain *domain, in shmobile_iommu_attach_device() argument
132 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_attach_device()
157 static void shmobile_iommu_detach_device(struct iommu_domain *domain, in shmobile_iommu_detach_device() argument
161 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_detach_device()
220 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, in shmobile_iommu_map() argument
[all …]
Drockchip-iommu.c84 struct iommu_domain domain; member
92 struct iommu_domain *domain; /* domain to which iommu is attached */ member
107 return container_of(dom, struct rk_iommu_domain, domain); in to_rk_domain()
488 if (iommu->domain) in rk_iommu_irq()
489 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
510 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, in rk_iommu_iova_to_phys() argument
513 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_iova_to_phys()
657 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, in rk_iommu_map() argument
660 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_map()
688 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, in rk_iommu_unmap() argument
[all …]
Domap-iommu.c58 struct iommu_domain domain; member
80 return container_of(dom, struct omap_iommu_domain, domain); in to_omap_domain()
787 struct iommu_domain *domain = obj->domain; in iommu_fault_handler() local
788 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in iommu_fault_handler()
798 if (!report_iommu_fault(domain, obj->dev, da, 0)) in iommu_fault_handler()
1036 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, in omap_iommu_map() argument
1039 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in omap_iommu_map()
1063 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, in omap_iommu_unmap() argument
1066 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in omap_iommu_unmap()
1076 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) in omap_iommu_attach_dev() argument
[all …]
Damd_iommu_v2.c68 struct iommu_domain *domain; member
145 iommu_detach_group(dev_state->domain, group); in free_device_state()
150 iommu_domain_free(dev_state->domain); in free_device_state()
284 struct iommu_domain *domain; in unbind_pasid() local
286 domain = pasid_state->device_state->domain; in unbind_pasid()
298 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); in unbind_pasid()
379 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); in __mn_flush_page()
411 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, in mn_invalidate_range()
414 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); in mn_invalidate_range()
680 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, in amd_iommu_bind_pasid()
[all …]
Darm-smmu.c349 struct iommu_domain domain; member
369 return container_of(dom, struct arm_smmu_domain, domain); in to_smmu_domain()
632 struct iommu_domain *domain = dev; in arm_smmu_context_fault() local
633 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_context_fault()
659 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { in arm_smmu_context_fault()
796 static int arm_smmu_init_domain_context(struct iommu_domain *domain, in arm_smmu_init_domain_context() argument
804 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_init_domain_context()
905 "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
925 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) in arm_smmu_destroy_domain_context() argument
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_destroy_domain_context()
[all …]
Dtegra-smmu.c41 struct iommu_domain domain; member
54 return container_of(dom, struct tegra_smmu_as, domain); in to_smmu_as()
295 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc()
296 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc()
297 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc()
299 return &as->domain; in tegra_smmu_domain_alloc()
302 static void tegra_smmu_domain_free(struct iommu_domain *domain) in tegra_smmu_domain_free() argument
304 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_domain_free()
440 static int tegra_smmu_attach_dev(struct iommu_domain *domain, in tegra_smmu_attach_dev() argument
444 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_attach_dev()
[all …]
Darm-smmu-v3.c605 struct arm_smmu_domain *domain; member
631 struct iommu_domain domain; member
646 return container_of(dom, struct arm_smmu_domain, domain); in to_smmu_domain()
1399 return &smmu_domain->domain; in arm_smmu_domain_alloc()
1420 static void arm_smmu_domain_free(struct iommu_domain *domain) in arm_smmu_domain_free() argument
1422 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_free()
1498 static int arm_smmu_domain_finalise(struct iommu_domain *domain) in arm_smmu_domain_finalise() argument
1507 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_domain_finalise()
1595 struct arm_smmu_domain *smmu_domain = smmu_group->domain; in arm_smmu_install_ste_for_group()
1618 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) in arm_smmu_attach_dev() argument
[all …]
/linux-4.4.14/drivers/pinctrl/meson/
Dpinctrl-meson.c76 static int meson_get_bank(struct meson_domain *domain, unsigned int pin, in meson_get_bank() argument
81 for (i = 0; i < domain->data->num_banks; i++) { in meson_get_bank()
82 if (pin >= domain->data->banks[i].first && in meson_get_bank()
83 pin <= domain->data->banks[i].last) { in meson_get_bank()
84 *bank = &domain->data->banks[i]; in meson_get_bank()
103 struct meson_domain **domain, in meson_get_domain_and_bank() argument
113 *domain = d; in meson_get_domain_and_bank()
196 struct meson_domain *domain; in meson_pmx_disable_other_groups() local
207 domain = &pc->domains[group->domain]; in meson_pmx_disable_other_groups()
208 regmap_update_bits(domain->reg_mux, in meson_pmx_disable_other_groups()
[all …]
Dpinctrl-meson.h37 unsigned int domain; member
167 .domain = 0, \
185 .domain = 1, \
/linux-4.4.14/include/linux/
Diommu.h160 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
161 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
162 int (*map)(struct iommu_domain *domain, unsigned long iova,
164 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
166 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
168 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
172 int (*domain_get_attr)(struct iommu_domain *domain,
174 int (*domain_set_attr)(struct iommu_domain *domain,
182 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
184 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
[all …]
Dirqdomain.h278 extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
280 extern void irq_domain_associate_many(struct irq_domain *domain,
283 extern void irq_domain_disassociate(struct irq_domain *domain,
301 static inline unsigned int irq_linear_revmap(struct irq_domain *domain, in irq_linear_revmap() argument
304 return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; in irq_linear_revmap()
309 extern int irq_create_strict_mappings(struct irq_domain *domain,
333 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
335 extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
357 extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
364 static inline int irq_domain_alloc_irqs(struct irq_domain *domain, in irq_domain_alloc_irqs() argument
[all …]
Dmsi.h203 int (*msi_init)(struct irq_domain *domain,
207 void (*msi_free)(struct irq_domain *domain,
210 int (*msi_check)(struct irq_domain *domain,
213 int (*msi_prepare)(struct irq_domain *domain,
219 int (*handle_error)(struct irq_domain *domain,
271 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
273 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
274 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
289 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
291 void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
[all …]
Ddma-iommu.h28 int iommu_get_dma_cookie(struct iommu_domain *domain);
29 void iommu_put_dma_cookie(struct iommu_domain *domain);
32 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
74 static inline int iommu_get_dma_cookie(struct iommu_domain *domain) in iommu_get_dma_cookie() argument
79 static inline void iommu_put_dma_cookie(struct iommu_domain *domain) in iommu_put_dma_cookie() argument
Dasync.h42 struct async_domain *domain);
43 void async_unregister_domain(struct async_domain *domain);
45 extern void async_synchronize_full_domain(struct async_domain *domain);
48 struct async_domain *domain);
Dpm_domain.h28 bool (*power_down_ok)(struct dev_pm_domain *domain);
41 struct dev_pm_domain domain; /* PM domain operations */ member
56 int (*power_off)(struct generic_pm_domain *domain);
58 int (*power_on)(struct generic_pm_domain *domain);
64 int (*attach_dev)(struct generic_pm_domain *domain,
66 void (*detach_dev)(struct generic_pm_domain *domain,
73 return container_of(pd, struct generic_pm_domain, domain); in pd_to_genpd()
Dvga_switcheroo.h144 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
146 …vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
163 …_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL;… in vga_switcheroo_init_domain_pm_ops() argument
165 …t_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL;… in vga_switcheroo_init_domain_pm_optimus_hdmi_audio() argument
/linux-4.4.14/arch/arm/boot/dts/
Dk2hk-clocks.dtsi62 reg-names = "control", "domain";
63 domain-id = <0>;
72 reg-names = "control", "domain";
73 domain-id = <4>;
82 reg-names = "control", "domain";
83 domain-id = <5>;
92 reg-names = "control", "domain";
93 domain-id = <9>;
102 reg-names = "control", "domain";
103 domain-id = <10>;
[all …]
Dk2l-clocks.dtsi52 reg-names = "control", "domain";
54 domain-id = <0>;
63 reg-names = "control", "domain";
64 domain-id = <4>;
73 reg-names = "control", "domain";
74 domain-id = <9>;
83 reg-names = "control", "domain";
84 domain-id = <10>;
93 reg-names = "control", "domain";
94 domain-id = <11>;
[all …]
Dkeystone-clocks.dtsi169 reg-names = "control", "domain";
170 domain-id = <0>;
180 reg-names = "control", "domain";
181 domain-id = <0>;
190 reg-names = "control", "domain";
191 domain-id = <0>;
201 reg-names = "control", "domain";
202 domain-id = <1>;
211 reg-names = "control", "domain";
212 domain-id = <1>;
[all …]
Dk2e-clocks.dtsi44 reg-names = "control", "domain";
45 domain-id = <0>;
54 reg-names = "control", "domain";
55 domain-id = <5>;
64 reg-names = "control", "domain";
65 domain-id = <18>;
74 reg-names = "control", "domain";
75 domain-id = <29>;
Dr8a73a4.dtsi768 #power-domain-cells = <0>;
774 #power-domain-cells = <0>;
778 #power-domain-cells = <0>;
783 #power-domain-cells = <0>;
790 #power-domain-cells = <0>;
794 #power-domain-cells = <0>;
802 #power-domain-cells = <0>;
806 #power-domain-cells = <0>;
814 #power-domain-cells = <0>;
818 #power-domain-cells = <0>;
[all …]
Dexynos4415.dtsi131 pd_cam: cam-power-domain@10024000 {
134 #power-domain-cells = <0>;
137 pd_tv: tv-power-domain@10024020 {
140 #power-domain-cells = <0>;
143 pd_mfc: mfc-power-domain@10024040 {
146 #power-domain-cells = <0>;
149 pd_g3d: g3d-power-domain@10024060 {
152 #power-domain-cells = <0>;
155 pd_lcd0: lcd0-power-domain@10024080 {
158 #power-domain-cells = <0>;
[all …]
Dsh73a0.dtsi418 #power-domain-cells = <0>;
422 #power-domain-cells = <0>;
427 #power-domain-cells = <0>;
432 #power-domain-cells = <0>;
437 #power-domain-cells = <0>;
442 #power-domain-cells = <0>;
447 #power-domain-cells = <0>;
454 #power-domain-cells = <0>;
458 #power-domain-cells = <0>;
463 #power-domain-cells = <0>;
[all …]
/linux-4.4.14/drivers/dca/
Ddca-core.c60 struct dca_domain *domain; in dca_allocate_domain() local
62 domain = kzalloc(sizeof(*domain), GFP_NOWAIT); in dca_allocate_domain()
63 if (!domain) in dca_allocate_domain()
66 INIT_LIST_HEAD(&domain->dca_providers); in dca_allocate_domain()
67 domain->pci_rc = rc; in dca_allocate_domain()
69 return domain; in dca_allocate_domain()
72 static void dca_free_domain(struct dca_domain *domain) in dca_free_domain() argument
74 list_del(&domain->node); in dca_free_domain()
75 kfree(domain); in dca_free_domain()
97 struct dca_domain *domain; in unregister_dca_providers() local
[all …]
/linux-4.4.14/drivers/vfio/
Dvfio_iommu_type1.c65 struct iommu_domain *domain; member
339 struct vfio_domain *domain, *d; in vfio_unmap_unpin() local
351 domain = d = list_first_entry(&iommu->domain_list, in vfio_unmap_unpin()
355 iommu_unmap(d->domain, dma->iova, dma->size); in vfio_unmap_unpin()
363 phys = iommu_iova_to_phys(domain->domain, iova); in vfio_unmap_unpin()
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { in vfio_unmap_unpin()
376 next = iommu_iova_to_phys(domain->domain, iova + len); in vfio_unmap_unpin()
381 unmapped = iommu_unmap(domain->domain, iova, len); in vfio_unmap_unpin()
405 struct vfio_domain *domain; in vfio_pgsize_bitmap() local
409 list_for_each_entry(domain, &iommu->domain_list, next) in vfio_pgsize_bitmap()
[all …]
/linux-4.4.14/drivers/irqchip/
Dirq-atmel-aic5.c88 struct irq_domain *domain = d->domain; in aic5_mask() local
89 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); in aic5_mask()
105 struct irq_domain *domain = d->domain; in aic5_unmask() local
106 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); in aic5_unmask()
122 struct irq_domain *domain = d->domain; in aic5_retrigger() local
123 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); in aic5_retrigger()
136 struct irq_domain *domain = d->domain; in aic5_set_type() local
137 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); in aic5_set_type()
155 struct irq_domain *domain = d->domain; in aic5_suspend() local
156 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_suspend()
[all …]
Dirq-mmp.c48 struct irq_domain *domain; member
65 struct irq_domain *domain = d->domain; in icu_mask_ack_irq() local
66 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_mask_ack_irq()
89 struct irq_domain *domain = d->domain; in icu_mask_irq() local
90 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_mask_irq()
108 struct irq_domain *domain = d->domain; in icu_unmask_irq() local
109 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_unmask_irq()
135 struct irq_domain *domain; in icu_mux_irq_demux() local
142 domain = icu_data[i].domain; in icu_mux_irq_demux()
143 data = (struct icu_chip_data *)domain->host_data; in icu_mux_irq_demux()
[all …]
Dirq-atmel-aic-common.c115 static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) in aic_common_ext_irq_of_init() argument
117 struct device_node *node = irq_domain_get_of_node(domain); in aic_common_ext_irq_of_init()
124 gc = irq_get_domain_generic_chip(domain, 0); in aic_common_ext_irq_of_init()
130 gc = irq_get_domain_generic_chip(domain, hwirq); in aic_common_ext_irq_of_init()
133 hwirq, domain->revmap_size); in aic_common_ext_irq_of_init()
220 struct irq_domain *domain; in aic_common_of_init() local
239 domain = irq_domain_add_linear(node, nchips * 32, ops, aic); in aic_common_of_init()
240 if (!domain) { in aic_common_of_init()
245 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, in aic_common_of_init()
253 gc = irq_get_domain_generic_chip(domain, i * 32); in aic_common_of_init()
[all …]
Dirq-moxart.c40 struct irq_domain *domain; member
55 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); in handle_irq()
74 intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, in moxart_of_intc_init()
76 if (!intc.domain) { in moxart_of_intc_init()
81 ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, in moxart_of_intc_init()
87 irq_domain_remove(intc.domain); in moxart_of_intc_init()
97 gc = irq_get_domain_generic_chip(intc.domain, 0); in moxart_of_intc_init()
Dirq-vf610-mscm-ir.c128 static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq, in vf610_mscm_ir_domain_alloc() argument
136 if (!irq_domain_get_of_node(domain->parent)) in vf610_mscm_ir_domain_alloc()
144 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in vf610_mscm_ir_domain_alloc()
146 domain->host_data); in vf610_mscm_ir_domain_alloc()
148 parent_fwspec.fwnode = domain->parent->fwnode; in vf610_mscm_ir_domain_alloc()
160 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, in vf610_mscm_ir_domain_alloc()
185 struct irq_domain *domain, *domain_parent; in vf610_mscm_ir_of_init() local
216 domain = irq_domain_add_hierarchy(domain_parent, 0, in vf610_mscm_ir_of_init()
219 if (!domain) { in vf610_mscm_ir_of_init()
224 if (of_device_is_compatible(irq_domain_get_of_node(domain->parent), in vf610_mscm_ir_of_init()
Dirq-tb10x.c102 struct irq_domain *domain = irq_desc_get_handler_data(desc); in tb10x_irq_cascade() local
105 generic_handle_irq(irq_find_mapping(domain, irq)); in tb10x_irq_cascade()
114 struct irq_domain *domain; in of_tb10x_init_irq() local
136 domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, in of_tb10x_init_irq()
138 if (!domain) { in of_tb10x_init_irq()
145 ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, in of_tb10x_init_irq()
155 gc = domain->gc->gc[0]; in of_tb10x_init_irq()
178 domain); in of_tb10x_init_irq()
189 irq_domain_remove(domain); in of_tb10x_init_irq()
Dirq-mtk-sysirq.c91 static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, in mtk_sysirq_domain_alloc() argument
108 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in mtk_sysirq_domain_alloc()
110 domain->host_data); in mtk_sysirq_domain_alloc()
112 gic_fwspec.fwnode = domain->parent->fwnode; in mtk_sysirq_domain_alloc()
113 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec); in mtk_sysirq_domain_alloc()
125 struct irq_domain *domain, *domain_parent; in mtk_sysirq_of_init() local
153 domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node, in mtk_sysirq_of_init()
155 if (!domain) { in mtk_sysirq_of_init()
Dirq-imx-gpcv2.c174 static int imx_gpcv2_domain_alloc(struct irq_domain *domain, in imx_gpcv2_domain_alloc() argument
185 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type); in imx_gpcv2_domain_alloc()
193 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, in imx_gpcv2_domain_alloc()
194 &gpcv2_irqchip_data_chip, domain->host_data); in imx_gpcv2_domain_alloc()
198 parent_fwspec.fwnode = domain->parent->fwnode; in imx_gpcv2_domain_alloc()
199 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, in imx_gpcv2_domain_alloc()
212 struct irq_domain *parent_domain, *domain; in imx_gpcv2_irqchip_init() local
240 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, in imx_gpcv2_irqchip_init()
242 if (!domain) { in imx_gpcv2_irqchip_init()
247 irq_set_default_host(domain); in imx_gpcv2_irqchip_init()
Dirq-brcmstb-l2.c47 struct irq_domain *domain; member
55 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); in brcmstb_l2_intc_irq_handle()
77 generic_handle_irq(irq_find_mapping(b->domain, irq)); in brcmstb_l2_intc_irq_handle()
151 data->domain = irq_domain_add_linear(np, 32, in brcmstb_l2_intc_of_init()
153 if (!data->domain) { in brcmstb_l2_intc_of_init()
166 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, in brcmstb_l2_intc_of_init()
177 gc = irq_get_domain_generic_chip(data->domain, 0); in brcmstb_l2_intc_of_init()
208 irq_domain_remove(data->domain); in brcmstb_l2_intc_of_init()
Dirq-sunxi-nmi.c66 struct irq_domain *domain = irq_desc_get_handler_data(desc); in sunxi_sc_nmi_handle_irq() local
68 unsigned int virq = irq_find_mapping(domain, 0); in sunxi_sc_nmi_handle_irq()
127 struct irq_domain *domain; in sunxi_sc_nmi_irq_init() local
134 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); in sunxi_sc_nmi_irq_init()
135 if (!domain) { in sunxi_sc_nmi_irq_init()
140 ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, in sunxi_sc_nmi_irq_init()
155 gc = irq_get_domain_generic_chip(domain, 0); in sunxi_sc_nmi_irq_init()
187 irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); in sunxi_sc_nmi_irq_init()
192 irq_domain_remove(domain); in sunxi_sc_nmi_irq_init()
Dirq-i8259.c334 struct irq_domain *domain; in __init_i8259_irqs() local
341 domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0, in __init_i8259_irqs()
343 if (!domain) in __init_i8259_irqs()
347 return domain; in __init_i8259_irqs()
357 struct irq_domain *domain = irq_desc_get_handler_data(desc); in i8259_irq_dispatch() local
364 irq = irq_linear_revmap(domain, hwirq); in i8259_irq_dispatch()
370 struct irq_domain *domain; in i8259_of_init() local
379 domain = __init_i8259_irqs(node); in i8259_of_init()
381 domain); in i8259_of_init()
Dirq-atmel-aic.c141 static void __init aic_hw_init(struct irq_domain *domain) in aic_hw_init() argument
143 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); in aic_hw_init()
245 struct irq_domain *domain; in aic_of_init() local
250 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", in aic_of_init()
252 if (IS_ERR(domain)) in aic_of_init()
253 return PTR_ERR(domain); in aic_of_init()
257 aic_domain = domain; in aic_of_init()
258 gc = irq_get_domain_generic_chip(domain, 0); in aic_of_init()
271 aic_hw_init(domain); in aic_of_init()
Dirq-gic-v2m.c127 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, in gicv2m_irq_gic_domain_alloc() argument
135 if (is_of_node(domain->parent->fwnode)) { in gicv2m_irq_gic_domain_alloc()
136 fwspec.fwnode = domain->parent->fwnode; in gicv2m_irq_gic_domain_alloc()
145 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); in gicv2m_irq_gic_domain_alloc()
150 d = irq_domain_get_irq_data(domain->parent, virq); in gicv2m_irq_gic_domain_alloc()
170 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in gicv2m_irq_domain_alloc() argument
192 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); in gicv2m_irq_domain_alloc()
198 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, in gicv2m_irq_domain_alloc()
204 static void gicv2m_irq_domain_free(struct irq_domain *domain, in gicv2m_irq_domain_free() argument
207 struct irq_data *d = irq_domain_get_irq_data(domain, virq); in gicv2m_irq_domain_free()
[all …]
Dirq-tegra.c245 static int tegra_ictlr_domain_alloc(struct irq_domain *domain, in tegra_ictlr_domain_alloc() argument
251 struct tegra_ictlr_info *info = domain->host_data; in tegra_ictlr_domain_alloc()
267 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in tegra_ictlr_domain_alloc()
273 parent_fwspec.fwnode = domain->parent->fwnode; in tegra_ictlr_domain_alloc()
274 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, in tegra_ictlr_domain_alloc()
278 static void tegra_ictlr_domain_free(struct irq_domain *domain, in tegra_ictlr_domain_free() argument
285 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in tegra_ictlr_domain_free()
299 struct irq_domain *parent_domain, *domain; in tegra_ictlr_init() local
354 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, in tegra_ictlr_init()
357 if (!domain) { in tegra_ictlr_init()
Dirq-renesas-h8300h.c78 struct irq_domain *domain; in h8300h_intc_of_init() local
87 domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); in h8300h_intc_of_init()
88 BUG_ON(!domain); in h8300h_intc_of_init()
89 irq_set_default_host(domain); in h8300h_intc_of_init()
Dirq-vt8500.c76 struct irq_domain *domain; /* Domain for this controller */ member
85 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_mask()
106 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_unmask()
117 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_set_type()
199 handle_domain_irq(intc[i].domain, irqnr, regs); in vt8500_handle_irq()
216 intc[active_cnt].domain = irq_domain_add_linear(node, 64, in vt8500_irq_init()
224 if (!intc[active_cnt].domain) { in vt8500_irq_init()
Dirq-dw-apb-ictl.c73 struct irq_domain *domain; in dw_apb_ictl_init() local
123 domain = irq_domain_add_linear(np, nrirqs, in dw_apb_ictl_init()
125 if (!domain) { in dw_apb_ictl_init()
131 ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name, in dw_apb_ictl_init()
140 gc = irq_get_domain_generic_chip(domain, i * 32); in dw_apb_ictl_init()
149 irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain); in dw_apb_ictl_init()
Dirq-crossbar.c78 static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, in allocate_gic_irq() argument
85 if (!irq_domain_get_of_node(domain->parent)) in allocate_gic_irq()
100 fwspec.fwnode = domain->parent->fwnode; in allocate_gic_irq()
106 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); in allocate_gic_irq()
156 static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, in crossbar_domain_free() argument
163 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in crossbar_domain_free()
339 struct irq_domain *parent_domain, *domain; in irqcrossbar_init() local
357 domain = irq_domain_add_hierarchy(parent_domain, 0, in irqcrossbar_init()
361 if (!domain) { in irqcrossbar_init()
Dirq-renesas-h8s.c84 struct irq_domain *domain; in h8s_intc_of_init() local
95 domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); in h8s_intc_of_init()
96 BUG_ON(!domain); in h8s_intc_of_init()
97 irq_set_default_host(domain); in h8s_intc_of_init()
Dirq-gic-v3-its-platform-msi.c27 static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, in its_pmsi_prepare() argument
34 msi_info = msi_get_domain_info(domain->parent); in its_pmsi_prepare()
43 if (args.np == irq_domain_get_of_node(domain)) { in its_pmsi_prepare()
57 return msi_info->ops->msi_prepare(domain->parent, in its_pmsi_prepare()
Dirq-imgpdc.c81 struct irq_domain *domain; member
121 return (struct pdc_intc_priv *)data->domain->host_data; in irqd_to_priv()
239 irq_no = irq_linear_revmap(priv->domain, i); in pdc_intc_perip_isr()
260 irq_no = irq_linear_revmap(priv->domain, in pdc_intc_syswake_isr()
385 priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, in pdc_intc_probe()
387 if (unlikely(!priv->domain)) { in pdc_intc_probe()
397 ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc", in pdc_intc_probe()
405 gc = irq_get_domain_generic_chip(priv->domain, 0); in pdc_intc_probe()
419 gc = irq_get_domain_generic_chip(priv->domain, 8); in pdc_intc_probe()
470 irq_domain_remove(priv->domain); in pdc_intc_probe()
[all …]
Dirq-metag.c29 struct irq_domain *domain; member
239 irq_no = irq_linear_revmap(priv->domain, hw); in metag_internal_irq_demux()
270 if (!priv->domain) in internal_irq_map()
272 return irq_create_mapping(priv->domain, hw); in internal_irq_map()
329 priv->domain = irq_domain_add_linear(NULL, 32, in init_internal_IRQ()
332 if (unlikely(!priv->domain)) { in init_internal_IRQ()
Dirq-bcm2835.c93 struct irq_domain *domain; member
154 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), in armctrl_of_init()
156 if (!intc.domain) in armctrl_of_init()
165 irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i)); in armctrl_of_init()
245 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); in bcm2835_handle_irq()
253 generic_handle_irq(irq_linear_revmap(intc.domain, hwirq)); in bcm2836_chained_handle_irq()
Dirq-orion.c143 struct irq_domain *domain; in orion_bridge_irq_init() local
150 domain = irq_domain_add_linear(np, nrirqs, in orion_bridge_irq_init()
152 if (!domain) { in orion_bridge_irq_init()
157 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, in orion_bridge_irq_init()
182 gc = irq_get_domain_generic_chip(domain, 0); in orion_bridge_irq_init()
201 domain); in orion_bridge_irq_init()
Dirq-clps711x.c72 struct irq_domain *domain; member
84 handle_domain_irq(clps711x_intc->domain, in clps711x_irqh()
90 handle_domain_irq(clps711x_intc->domain, in clps711x_irqh()
190 clps711x_intc->domain = in _clps711x_intc_init()
193 if (!clps711x_intc->domain) { in _clps711x_intc_init()
198 irq_set_default_host(clps711x_intc->domain); in _clps711x_intc_init()
Dirq-gic-v3-its-pci-msi.c68 static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, in its_pci_msi_prepare() argument
78 msi_info = msi_get_domain_info(domain->parent); in its_pci_msi_prepare()
87 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); in its_pci_msi_prepare()
89 return msi_info->ops->msi_prepare(domain->parent, in its_pci_msi_prepare()
Dirq-bcm7120-l2.c51 struct irq_domain *domain; member
71 irq_get_domain_generic_chip(b->domain, base); in bcm7120_l2_intc_irq_handle()
82 generic_handle_irq(irq_find_mapping(b->domain, in bcm7120_l2_intc_irq_handle()
266 data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, in bcm7120_l2_intc_probe()
268 if (!data->domain) { in bcm7120_l2_intc_probe()
280 ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, in bcm7120_l2_intc_probe()
292 gc = irq_get_domain_generic_chip(data->domain, irq); in bcm7120_l2_intc_probe()
330 irq_domain_remove(data->domain); in bcm7120_l2_intc_probe()
Dirq-bcm2836.c75 struct irq_domain *domain; member
151 int irq = irq_create_mapping(intc.domain, hwirq); in bcm2836_arm_irqchip_register_irq()
178 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); in bcm2836_arm_irqchip_handle_irq()
249 intc.domain = irq_domain_add_linear(node, LAST_IRQ + 1, in bcm2836_arm_irqchip_l1_intc_of_init()
252 if (!intc.domain) in bcm2836_arm_irqchip_l1_intc_of_init()
Dirq-versatile-fpga.c44 struct irq_domain *domain; member
82 generic_handle_irq(irq_find_mapping(f->domain, irq)); in fpga_irq_handle()
99 handle_domain_irq(f->domain, irq, regs); in handle_one_fpga()
164 f->domain = irq_domain_add_simple(node, fls(valid), irq_start, in fpga_irq_init()
171 irq_create_mapping(f->domain, i); in fpga_irq_init()
Dirq-gic-v3.c47 struct irq_domain *domain; member
347 err = handle_domain_irq(gic_data.domain, irqnr, regs); in gic_handle_irq()
789 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in gic_irq_domain_alloc() argument
797 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); in gic_irq_domain_alloc()
802 gic_irq_domain_map(domain, virq + i, hwirq + i); in gic_irq_domain_alloc()
807 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, in gic_irq_domain_free() argument
813 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in gic_irq_domain_free()
911 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, in gic_of_init()
915 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { in gic_of_init()
923 its_init(node, &gic_data.rdists, gic_data.domain); in gic_of_init()
[all …]
Dirq-omap-intc.c69 static struct irq_domain *domain; variable
254 domain = irq_domain_add_linear(node, omap_nr_irqs, in omap_init_irq_of()
259 ret = omap_alloc_gc_of(domain, omap_irq_base); in omap_init_irq_of()
261 irq_domain_remove(domain); in omap_init_irq_of()
280 domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, in omap_init_irq_legacy()
363 handle_domain_irq(domain, irqnr, regs); in omap_intc_handle_irq()
Dirq-mips-cpu.c148 struct irq_domain *domain; in __mips_cpu_irq_init() local
154 domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0, in __mips_cpu_irq_init()
156 if (!domain) in __mips_cpu_irq_init()
Dirq-nvic.c62 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in nvic_irq_domain_alloc() argument
70 ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); in nvic_irq_domain_alloc()
75 irq_map_generic_chip(domain, virq + i, hwirq + i); in nvic_irq_domain_alloc()
Dirq-s3c24xx.c73 struct irq_domain *domain; member
107 irqno = irq_find_mapping(parent_intc->domain, in s3c_irq_mask()
127 irqno = irq_find_mapping(parent_intc->domain, in s3c_irq_unmask()
314 offset = irq_domain_get_of_node(intc->domain) ? 32 : 0; in s3c_irq_demux()
327 irq = irq_find_mapping(sub_intc->domain, offset + n); in s3c_irq_demux()
345 if (!irq_domain_get_of_node(intc->domain)) in s3c24xx_handle_intc()
364 handle_domain_irq(intc->domain, intc_offset + offset, regs); in s3c24xx_handle_intc()
481 irqno = irq_find_mapping(parent_intc->domain, in s3c24xx_irq_map()
590 intc->domain = irq_domain_add_legacy(np, irq_num, irq_start, in s3c24xx_init_intc()
593 if (!intc->domain) { in s3c24xx_init_intc()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/power/
Dpower_domain.txt7 This device tree binding can be used to bind PM domain consumer devices with
8 their PM domains provided by PM domain providers. A PM domain provider can be
11 phandle arguments (so called PM domain specifiers) of length specified by the
12 #power-domain-cells property in the PM domain provider node.
14 ==PM domain providers==
17 - #power-domain-cells : Number of cells in a PM domain specifier;
18 Typically 0 for nodes representing a single PM domain and 1 for nodes
23 - power-domains : A phandle and PM domain specifier as defined by bindings of
25 Some power domains might be powered from another power domain (or have
27 a standard PM domain consumer binding is used. When provided, all domains
[all …]
Dpd-samsung.txt8 * samsung,exynos4210-pd - for exynos4210 type power domain.
11 - #power-domain-cells: number of cells in power domain specifier;
16 devices in this power domain are set to oscclk before power gating
17 and restored back after powering on a domain. This is required for
22 - clkN: Input clocks to the devices in this power domain. These clocks
23 will be reparented to oscclk before swithing power domain off.
25 the domain. Maximum of 4 clocks (N = 0 to 3) are supported.
27 the power domain. These clock should be enabled during power
28 domain on/off operations.
29 - power-domains: phandle pointing to the parent power domain, for more details
[all …]
Drenesas,sysc-rmobile.txt23 - pm-domains: This node contains a hierarchy of PM domain nodes, which should
30 Each of the PM domain nodes represents a PM domain, as documented by the
31 generic PM domain bindings in
38 - #power-domain-cells: Must be 0.
41 - reg: If the PM domain is not always-on, this property must contain the bit
47 If the PM domain is always-on, this property must be omitted.
52 This shows a subset of the r8a7740 PM domain hierarchy, containing the
53 C5 "always-on" domain, 2 of its subdomains (A4S and A4SU), and the A3SP domain,
64 #power-domain-cells = <0>;
70 #power-domain-cells = <0>;
[all …]
Dfsl,imx-gpc.txt13 - pu-supply: Link to the LDO regulator powering the PU power domain
14 - clocks: Clock phandles to devices in the PU power domain that need
15 to be enabled during domain power-up for reset propagation.
16 - #power-domain-cells: Should be 1, see below:
18 The gpc node is a power-controller as documented by the generic power domain
35 #power-domain-cells = <1>;
39 Specifying power domain for IP modules
42 IP cores belonging to a power domain should contain a 'power-domains' property
44 the power domain the device belongs to.
46 Example of a device that is part of the PU power domain:
Drockchip-io-domain.txt4 IO domain voltages on some Rockchip SoCs are variable but need to be
34 - "rockchip,rk3188-io-voltage-domain" for rk3188
35 - "rockchip,rk3288-io-voltage-domain" for rk3288
36 - "rockchip,rk3368-io-voltage-domain" for rk3368
37 - "rockchip,rk3368-pmu-io-voltage-domain" for rk3368 pmu-domains
85 compatible = "rockchip,rk3288-io-voltage-domain";
/linux-4.4.14/drivers/soc/dove/
Dpmu.c142 static int pmu_domain_power_off(struct generic_pm_domain *domain) in pmu_domain_power_off() argument
144 struct pmu_domain *pmu_dom = to_pmu_domain(domain); in pmu_domain_power_off()
176 static int pmu_domain_power_on(struct generic_pm_domain *domain) in pmu_domain_power_on() argument
178 struct pmu_domain *pmu_dom = to_pmu_domain(domain); in pmu_domain_power_on()
210 static void __pmu_domain_register(struct pmu_domain *domain, in __pmu_domain_register() argument
213 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR); in __pmu_domain_register()
215 domain->base.power_off = pmu_domain_power_off; in __pmu_domain_register()
216 domain->base.power_on = pmu_domain_power_on; in __pmu_domain_register()
218 pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask)); in __pmu_domain_register()
221 of_genpd_add_provider_simple(np, &domain->base); in __pmu_domain_register()
[all …]
/linux-4.4.14/kernel/
Dasync.c78 struct async_domain *domain; member
85 static async_cookie_t lowest_in_progress(struct async_domain *domain) in lowest_in_progress() argument
93 if (domain) in lowest_in_progress()
94 pending = &domain->pending; in lowest_in_progress()
148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) in __async_schedule() argument
176 entry->domain = domain; in __async_schedule()
183 list_add_tail(&entry->domain_list, &domain->pending); in __async_schedule()
184 if (domain->registered) in __async_schedule()
226 struct async_domain *domain) in async_schedule_domain() argument
228 return __async_schedule(func, data, domain); in async_schedule_domain()
[all …]
/linux-4.4.14/drivers/gpu/drm/msm/
Dmsm_iommu.c23 struct iommu_domain *domain; member
37 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
43 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
50 struct iommu_domain *domain = iommu->domain; in msm_iommu_map() local
56 if (!domain || !sgt) in msm_iommu_map()
65 ret = iommu_map(domain, da, pa, bytes, prot); in msm_iommu_map()
79 iommu_unmap(domain, da, bytes); in msm_iommu_map()
89 struct iommu_domain *domain = iommu->domain; in msm_iommu_unmap() local
98 unmapped = iommu_unmap(domain, da, bytes); in msm_iommu_unmap()
115 iommu_domain_free(iommu->domain); in msm_iommu_destroy()
[all …]
Dmsm_gem.c289 if (!msm_obj->domain[id].iova) { in msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; in msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); in msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova_locked()
327 if (msm_obj->domain[id].iova) { in msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova()
344 WARN_ON(!msm_obj->domain[id].iova); in msm_gem_iova()
345 return msm_obj->domain[id].iova; in msm_gem_iova()
524 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { in msm_gem_free_object()
526 if (mmu && msm_obj->domain[id].iova) { in msm_gem_free_object()
[all …]
/linux-4.4.14/arch/ia64/kernel/
Dirq_ia64.c80 .domain = CPU_MASK_NONE
106 static inline int find_unassigned_vector(cpumask_t domain) in find_unassigned_vector() argument
111 cpumask_and(&mask, &domain, cpu_online_mask); in find_unassigned_vector()
117 cpumask_and(&mask, &domain, &vector_table[vector]); in find_unassigned_vector()
125 static int __bind_irq_vector(int irq, int vector, cpumask_t domain) in __bind_irq_vector() argument
134 cpumask_and(&mask, &domain, cpu_online_mask); in __bind_irq_vector()
137 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) in __bind_irq_vector()
144 cfg->domain = domain; in __bind_irq_vector()
146 cpumask_or(&vector_table[vector], &vector_table[vector], &domain); in __bind_irq_vector()
150 int bind_irq_vector(int irq, int vector, cpumask_t domain) in bind_irq_vector() argument
[all …]
/linux-4.4.14/arch/x86/include/asm/
Dirqdomain.h40 extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
42 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
44 extern void mp_irqdomain_activate(struct irq_domain *domain,
46 extern void mp_irqdomain_deactivate(struct irq_domain *domain,
48 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
52 extern void arch_init_msi_domain(struct irq_domain *domain);
54 static inline void arch_init_msi_domain(struct irq_domain *domain) { } in arch_init_msi_domain() argument
58 extern void arch_init_htirq_domain(struct irq_domain *domain);
60 static inline void arch_init_htirq_domain(struct irq_domain *domain) { } in arch_init_htirq_domain() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/
Dctrl.c73 const struct nvkm_domain *domain; in nvkm_control_mthd_pstate_attr() local
93 domain = clk->domains; in nvkm_control_mthd_pstate_attr()
95 while (domain->name != nv_clk_src_max) { in nvkm_control_mthd_pstate_attr()
96 if (domain->mname && ++j == args->v0.index) in nvkm_control_mthd_pstate_attr()
98 domain++; in nvkm_control_mthd_pstate_attr()
101 if (domain->name == nv_clk_src_max) in nvkm_control_mthd_pstate_attr()
110 lo = pstate->base.domain[domain->name]; in nvkm_control_mthd_pstate_attr()
113 lo = min(lo, cstate->domain[domain->name]); in nvkm_control_mthd_pstate_attr()
114 hi = max(hi, cstate->domain[domain->name]); in nvkm_control_mthd_pstate_attr()
119 lo = max(nvkm_clk_read(clk, domain->name), 0); in nvkm_control_mthd_pstate_attr()
[all …]
Dtegra.c94 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); in nvkm_device_tegra_probe_iommu()
95 if (IS_ERR(tdev->iommu.domain)) in nvkm_device_tegra_probe_iommu()
103 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; in nvkm_device_tegra_probe_iommu()
115 ret = iommu_attach_device(tdev->iommu.domain, dev); in nvkm_device_tegra_probe_iommu()
129 iommu_detach_device(tdev->iommu.domain, dev); in nvkm_device_tegra_probe_iommu()
132 iommu_domain_free(tdev->iommu.domain); in nvkm_device_tegra_probe_iommu()
135 tdev->iommu.domain = NULL; in nvkm_device_tegra_probe_iommu()
145 if (tdev->iommu.domain) { in nvkm_device_tegra_remove_iommu()
147 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); in nvkm_device_tegra_remove_iommu()
148 iommu_domain_free(tdev->iommu.domain); in nvkm_device_tegra_remove_iommu()
/linux-4.4.14/Documentation/scheduler/
Dsched-stats.txt9 per-domain. Note that domains (and their associated information) will only
12 In version 14 of schedstat, there is at least one level of domain
14 domain. Domains have no particular names in this implementation, but
16 cpus on the machine, while domain0 is the most tightly focused domain,
18 are no architectures which need more than three domain levels. The first
19 field in the domain stats is a bit map indicating which cpus are affected
20 by that domain.
59 One of these is produced per domain for each cpu described. (Note that if
63 domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 3…
65 The first field is a bit mask indicating what cpus this domain operates over.
[all …]
Dsched-domains.txt1 Each CPU has a "base" scheduling domain (struct sched_domain). The domain
3 MUST be NULL terminated, and domain structures should be per-CPU as they are
6 Each scheduling domain spans a number of CPUs (stored in the ->span field).
7 A domain's span MUST be a superset of it child's span (this restriction could
8 be relaxed if the need arises), and a base domain for CPU i MUST span at least
9 i. The top domain for each CPU will generally span all CPUs in the system
12 explicitly set. A sched domain's span means "balance process load among these
15 Each scheduling domain must have one or more CPU groups (struct sched_group)
18 domain's span. The intersection of cpumasks from any two of these groups
20 contain the CPU to which the domain belongs. Groups may be shared among
[all …]
/linux-4.4.14/net/netlabel/
Dnetlabel_domainhash.c100 kfree(ptr->domain); in netlbl_domhsh_free_entry()
140 static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) in netlbl_domhsh_search() argument
146 if (domain != NULL) { in netlbl_domhsh_search()
147 bkt = netlbl_domhsh_hash(domain); in netlbl_domhsh_search()
150 if (iter->valid && strcmp(iter->domain, domain) == 0) in netlbl_domhsh_search()
170 static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) in netlbl_domhsh_search_def() argument
174 entry = netlbl_domhsh_search(domain); in netlbl_domhsh_search_def()
211 entry->domain ? entry->domain : "(default)"); in netlbl_domhsh_audit_add()
387 if (entry->domain != NULL) in netlbl_domhsh_add()
388 entry_old = netlbl_domhsh_search(entry->domain); in netlbl_domhsh_add()
[all …]
Dnetlabel_domainhash.h72 char *domain; member
90 int netlbl_domhsh_remove_af4(const char *domain,
94 int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
96 struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
97 struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
100 struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
Dnetlabel_kapi.c68 int netlbl_cfg_map_del(const char *domain, in netlbl_cfg_map_del() argument
75 return netlbl_domhsh_remove(domain, audit_info); in netlbl_cfg_map_del()
79 return netlbl_domhsh_remove_af4(domain, addr, mask, in netlbl_cfg_map_del()
102 int netlbl_cfg_unlbl_map_add(const char *domain, in netlbl_cfg_unlbl_map_add() argument
117 if (domain != NULL) { in netlbl_cfg_unlbl_map_add()
118 entry->domain = kstrdup(domain, GFP_ATOMIC); in netlbl_cfg_unlbl_map_add()
119 if (entry->domain == NULL) in netlbl_cfg_unlbl_map_add()
189 kfree(entry->domain); in netlbl_cfg_unlbl_map_add()
330 const char *domain, in netlbl_cfg_cipsov4_map_add() argument
348 if (domain != NULL) { in netlbl_cfg_cipsov4_map_add()
[all …]
Dnetlabel_mgmt.c106 entry->domain = kmalloc(tmp_size, GFP_KERNEL); in netlbl_mgmt_add_common()
107 if (entry->domain == NULL) { in netlbl_mgmt_add_common()
111 nla_strlcpy(entry->domain, in netlbl_mgmt_add_common()
246 kfree(entry->domain); in netlbl_mgmt_add_common()
274 if (entry->domain != NULL) { in netlbl_mgmt_listentry()
276 NLBL_MGMT_A_DOMAIN, entry->domain); in netlbl_mgmt_listentry()
412 char *domain; in netlbl_mgmt_remove() local
420 domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); in netlbl_mgmt_remove()
421 return netlbl_domhsh_remove(domain, &audit_info); in netlbl_mgmt_remove()
/linux-4.4.14/Documentation/devicetree/bindings/soc/rockchip/
Dpower_domain.txt6 Required properties for power domain controller:
9 - #power-domain-cells: Number of cells in a power-domain specifier.
14 Required properties for power domain sub nodes:
15 - reg: index of the power domain, should use macros in:
16 "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain.
17 - clocks (optional): phandles to clocks which need to be enabled while power domain
24 #power-domain-cells = <1>;
36 power domain to use.
38 "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain.
40 Example of the node using power domain:
/linux-4.4.14/arch/x86/kernel/apic/
Dmsi.c72 struct irq_domain *domain; in native_setup_msi_irqs() local
79 domain = irq_remapping_get_irq_domain(&info); in native_setup_msi_irqs()
80 if (domain == NULL) in native_setup_msi_irqs()
81 domain = msi_default_domain; in native_setup_msi_irqs()
82 if (domain == NULL) in native_setup_msi_irqs()
85 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); in native_setup_msi_irqs()
99 static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, in pci_msi_prepare() argument
198 static int dmar_msi_init(struct irq_domain *domain, in dmar_msi_init() argument
202 irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, in dmar_msi_init()
234 struct irq_domain *domain = dmar_get_irq_domain(); in dmar_alloc_hwirq() local
[all …]
Dhtirq.c63 static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq, in htirq_domain_alloc() argument
79 if (irq_find_mapping(domain, hwirq) > 0) in htirq_domain_alloc()
86 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); in htirq_domain_alloc()
99 irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg, in htirq_domain_alloc()
105 static void htirq_domain_free(struct irq_domain *domain, unsigned int virq, in htirq_domain_free() argument
108 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); in htirq_domain_free()
112 irq_domain_free_irqs_top(domain, virq, nr_irqs); in htirq_domain_free()
115 static void htirq_domain_activate(struct irq_domain *domain, in htirq_domain_activate() argument
137 static void htirq_domain_deactivate(struct irq_domain *domain, in htirq_domain_deactivate() argument
Dvector.c26 cpumask_var_t domain; member
82 if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node)) in alloc_apic_chip_data()
88 free_cpumask_var(data->domain); in alloc_apic_chip_data()
97 free_cpumask_var(data->domain); in free_apic_chip_data()
150 if (cpumask_subset(vector_cpumask, d->domain)) { in __assign_irq_vector()
151 if (cpumask_equal(vector_cpumask, d->domain)) in __assign_irq_vector()
157 cpumask_andnot(d->old_domain, d->domain, vector_cpumask); in __assign_irq_vector()
187 cpumask_copy(d->old_domain, d->domain); in __assign_irq_vector()
216 cpumask_copy(d->domain, vector_cpumask); in __assign_irq_vector()
223 BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain, in __assign_irq_vector()
[all …]
Dio_apic.c955 static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi, in alloc_irq_from_domain() argument
982 return __irq_domain_alloc_irqs(domain, irq, 1, in alloc_irq_from_domain()
997 static int alloc_isa_irq_from_domain(struct irq_domain *domain, in alloc_isa_irq_from_domain() argument
1017 irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true); in alloc_isa_irq_from_domain()
1019 irq_data = irq_domain_get_irq_data(domain, irq); in alloc_isa_irq_from_domain()
1035 struct irq_domain *domain = mp_ioapic_irqdomain(ioapic); in mp_map_pin_to_irq() local
1037 if (!domain) in mp_map_pin_to_irq()
1048 irq = irq_find_mapping(domain, pin); in mp_map_pin_to_irq()
1055 irq = alloc_isa_irq_from_domain(domain, irq, in mp_map_pin_to_irq()
1057 else if ((irq = irq_find_mapping(domain, pin)) == 0) in mp_map_pin_to_irq()
[all …]
/linux-4.4.14/net/tipc/
Daddr.c116 int tipc_in_scope(u32 domain, u32 addr) in tipc_in_scope() argument
118 if (!domain || (domain == addr)) in tipc_in_scope()
120 if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */ in tipc_in_scope()
122 if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */ in tipc_in_scope()
130 int tipc_addr_scope(u32 domain) in tipc_addr_scope() argument
132 if (likely(!domain)) in tipc_addr_scope()
134 if (tipc_node(domain)) in tipc_addr_scope()
136 if (tipc_cluster(domain)) in tipc_addr_scope()
Ddiscover.c66 u32 domain; member
85 u32 dest_domain = b_ptr->domain; in tipc_disc_init_msg()
159 if (!tipc_in_scope(bearer->domain, onode)) in tipc_disc_rcv()
233 if (tipc_node(req->domain) && req->num_nodes) { in disc_timeout()
289 req->domain = b_ptr->domain; in tipc_disc_create()
328 req->domain = b_ptr->domain; in tipc_disc_reset()
Daddr.h73 int tipc_in_scope(u32 domain, u32 addr);
74 int tipc_addr_scope(u32 domain);
/linux-4.4.14/arch/x86/kvm/
Diommu.c78 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_map_pages() local
82 if (!domain) in kvm_iommu_map_pages()
99 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { in kvm_iommu_map_pages()
130 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), in kvm_iommu_map_pages()
175 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_assign_device() local
180 if (!domain) in kvm_assign_device()
186 r = iommu_attach_device(domain, &pdev->dev); in kvm_assign_device()
216 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_deassign_device() local
219 if (!domain) in kvm_deassign_device()
225 iommu_detach_device(domain, &pdev->dev); in kvm_deassign_device()
[all …]
/linux-4.4.14/drivers/base/
Dmap.c32 int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, in kobj_map() argument
56 mutex_lock(domain->lock); in kobj_map()
58 struct probe **s = &domain->probes[index % 255]; in kobj_map()
64 mutex_unlock(domain->lock); in kobj_map()
68 void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) in kobj_unmap() argument
78 mutex_lock(domain->lock); in kobj_unmap()
81 for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { in kobj_unmap()
91 mutex_unlock(domain->lock); in kobj_unmap()
95 struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) in kobj_lookup() argument
102 mutex_lock(domain->lock); in kobj_lookup()
[all …]
Dplatform-msi.c61 static int platform_msi_init(struct irq_domain *domain, in platform_msi_init() argument
66 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, in platform_msi_init()
169 struct irq_domain *domain; in platform_msi_create_irq_domain() local
176 domain = msi_create_irq_domain(fwnode, info, parent); in platform_msi_create_irq_domain()
177 if (domain) in platform_msi_create_irq_domain()
178 domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; in platform_msi_create_irq_domain()
180 return domain; in platform_msi_create_irq_domain()
/linux-4.4.14/arch/arm/include/asm/
Ddomain.h89 unsigned int domain; in get_domain() local
93 : "=r" (domain) in get_domain()
96 return domain; in get_domain()
110 unsigned int domain = get_domain(); \
111 domain &= ~domain_mask(dom); \
112 domain = domain | domain_val(dom, type); \
113 set_domain(domain); \
/linux-4.4.14/arch/arm/mach-davinci/
Dpsc.c77 void davinci_psc_config(unsigned int domain, unsigned int ctlr, in davinci_psc_config() argument
107 pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain); in davinci_psc_config()
109 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); in davinci_psc_config()
111 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); in davinci_psc_config()
113 ptcmd = 1 << domain; in davinci_psc_config()
118 } while ((((epcpr >> domain) & 1) == 0)); in davinci_psc_config()
120 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); in davinci_psc_config()
122 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); in davinci_psc_config()
124 ptcmd = 1 << domain; in davinci_psc_config()
130 } while (!(((ptstat >> domain) & 1) == 0)); in davinci_psc_config()
/linux-4.4.14/arch/x86/platform/uv/
Duv_irq.c80 static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq, in uv_domain_alloc() argument
85 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); in uv_domain_alloc()
96 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in uv_domain_alloc()
105 irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data, in uv_domain_alloc()
114 static void uv_domain_free(struct irq_domain *domain, unsigned int virq, in uv_domain_free() argument
117 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); in uv_domain_free()
123 irq_domain_free_irqs_top(domain, virq, nr_irqs); in uv_domain_free()
130 static void uv_domain_activate(struct irq_domain *domain, in uv_domain_activate() argument
140 static void uv_domain_deactivate(struct irq_domain *domain, in uv_domain_deactivate() argument
184 struct irq_domain *domain = uv_get_irq_domain(); in uv_setup_irq() local
[all …]
/linux-4.4.14/drivers/clk/
Dclk-mb86s7x.c35 u32 domain; member
49 u8 cntrlr, domain, port; member
60 cmd.domain = crgclk->domain; in crg_gate_control()
70 cmd.domain, cmd.port, cmd.en); in crg_gate_control()
81 cmd.domain, cmd.port, cmd.en); in crg_gate_control()
111 cmd.domain = crgclk->domain; in crg_rate_control()
119 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
124 cmd.domain, cmd.port); in crg_rate_control()
136 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
140 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/arm/ux500/
Dpower_domain.txt6 The implementation of PM domains for UX500 are based upon the generic PM domain
9 ==PM domain providers==
13 - #power-domain-cells : Number of cells in a power domain specifier, must be 1.
18 #power-domain-cells = <1>;
21 ==PM domain consumers==
24 - power-domains: A phandle and PM domain specifier. Below are the list of
/linux-4.4.14/drivers/xen/xen-pciback/
Dpci_stub.c38 int domain; member
152 static struct pcistub_device *pcistub_device_find(int domain, int bus, in pcistub_device_find() argument
162 && domain == pci_domain_nr(psdev->dev->bus) in pcistub_device_find()
201 int domain, int bus, in pcistub_get_pci_dev_by_slot() argument
212 && domain == pci_domain_nr(psdev->dev->bus) in pcistub_get_pci_dev_by_slot()
321 if (pci_domain_nr(dev->bus) == pdev_id->domain in pcistub_match_one()
657 &aer_op->domain, &aer_op->bus, &aer_op->devfn); in common_process()
667 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); in common_process()
955 static inline int str_to_slot(const char *buf, int *domain, int *bus, in str_to_slot() argument
960 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, in str_to_slot()
[all …]
Dpassthrough.c20 unsigned int domain, in __xen_pcibk_get_pci_dev() argument
31 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) in __xen_pcibk_get_pci_dev()
50 unsigned int domain, bus, devfn; in __xen_pcibk_add_pci_dev() local
63 domain = (unsigned int)pci_domain_nr(dev->bus); in __xen_pcibk_add_pci_dev()
66 err = publish_cb(pdev, domain, bus, devfn, devid); in __xen_pcibk_add_pci_dev()
124 unsigned int domain, bus; in __xen_pcibk_publish_pci_roots() local
143 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); in __xen_pcibk_publish_pci_roots()
147 err = publish_root_cb(pdev, domain, bus); in __xen_pcibk_publish_pci_roots()
178 unsigned int *domain, unsigned int *bus, in __xen_pcibk_get_pcifront_dev() argument
181 *domain = pci_domain_nr(pcidev->bus); in __xen_pcibk_get_pcifront_dev()
Dpciback.h64 int domain, int bus,
86 unsigned int domain, unsigned int bus,
89 unsigned int domain, unsigned int bus);
100 unsigned int *domain, unsigned int *bus,
108 unsigned int domain, unsigned int bus,
134 xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, in xen_pcibk_get_pci_dev() argument
138 return xen_pcibk_backend->get(pdev, domain, bus, devfn); in xen_pcibk_get_pci_dev()
150 unsigned int *domain, in xen_pcibk_get_pcifront_dev() argument
155 return xen_pcibk_backend->find(pcidev, pdev, domain, bus, in xen_pcibk_get_pcifront_dev()
Dxenbus.c204 unsigned int domain, unsigned int bus, in xen_pcibk_publish_pci_dev() argument
219 "%04x:%02x:%02x.%02x", domain, bus, in xen_pcibk_publish_pci_dev()
227 int domain, int bus, int slot, int func, in xen_pcibk_export_device() argument
234 domain, bus, slot, func); in xen_pcibk_export_device()
236 dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func); in xen_pcibk_export_device()
243 domain, bus, slot, func); in xen_pcibk_export_device()
274 int domain, int bus, int slot, int func) in xen_pcibk_remove_device() argument
280 domain, bus, slot, func); in xen_pcibk_remove_device()
282 dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func)); in xen_pcibk_remove_device()
287 domain, bus, slot, func); in xen_pcibk_remove_device()
[all …]
/linux-4.4.14/drivers/pci/pcie/aer/
Daer_inject.c44 u16 domain; member
49 u16 domain; member
77 static void aer_error_init(struct aer_error *err, u16 domain, in aer_error_init() argument
82 err->domain = domain; in aer_error_init()
89 static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, in __find_aer_error() argument
95 if (domain == err->domain && in __find_aer_error()
106 int domain = pci_domain_nr(dev->bus); in __find_aer_error_by_dev() local
107 if (domain < 0) in __find_aer_error_by_dev()
109 return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); in __find_aer_error_by_dev()
191 int domain; in pci_read_aer() local
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
Dbase.c41 u8 pstate, u8 domain, u32 input) in nvkm_clk_adjust() argument
61 if (subd && boostS.domain == domain) { in nvkm_clk_adjust()
141 const struct nvkm_domain *domain = clk->domains; in nvkm_cstate_new() local
158 while (domain && domain->name != nv_clk_src_max) { in nvkm_cstate_new()
159 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { in nvkm_cstate_new()
161 domain->bios, cstepX.freq); in nvkm_cstate_new()
162 cstate->domain[domain->name] = freq; in nvkm_cstate_new()
164 domain++; in nvkm_cstate_new()
191 int khz = pstate->base.domain[nv_clk_src_mem]; in nvkm_pstate_prog()
264 u32 lo = pstate->base.domain[clock->name]; in nvkm_pstate_info()
[all …]
Dgk20a.c472 .domain[nv_clk_src_gpc] = 72000,
478 .domain[nv_clk_src_gpc] = 108000,
484 .domain[nv_clk_src_gpc] = 180000,
490 .domain[nv_clk_src_gpc] = 252000,
496 .domain[nv_clk_src_gpc] = 324000,
502 .domain[nv_clk_src_gpc] = 396000,
508 .domain[nv_clk_src_gpc] = 468000,
514 .domain[nv_clk_src_gpc] = 540000,
520 .domain[nv_clk_src_gpc] = 612000,
526 .domain[nv_clk_src_gpc] = 648000,
[all …]
/linux-4.4.14/drivers/pci/
Dxen-pcifront.c67 unsigned int domain, unsigned int bus, in pcifront_init_sd() argument
72 sd->sd.domain = domain; in pcifront_init_sd()
183 .domain = pci_domain_nr(bus), in pcifront_bus_read()
221 .domain = pci_domain_nr(bus), in pcifront_bus_write()
254 .domain = pci_domain_nr(dev->bus), in pci_frontend_enable_msix()
308 .domain = pci_domain_nr(dev->bus), in pci_frontend_disable_msix()
327 .domain = pci_domain_nr(dev->bus), in pci_frontend_enable_msi()
356 .domain = pci_domain_nr(dev->bus), in pci_frontend_disable_msi()
417 unsigned int domain, unsigned int bus, in pcifront_scan_bus() argument
438 "%04x:%02x:%02x.%d found.\n", domain, bus, in pcifront_scan_bus()
[all …]
Dmsi.c43 struct irq_domain *domain; in pci_msi_get_domain() local
45 domain = dev_get_msi_domain(&dev->dev); in pci_msi_get_domain()
46 if (domain) in pci_msi_get_domain()
47 return domain; in pci_msi_get_domain()
54 struct irq_domain *domain; in pci_msi_setup_msi_irqs() local
56 domain = pci_msi_get_domain(dev); in pci_msi_setup_msi_irqs()
57 if (domain && irq_domain_is_hierarchy(domain)) in pci_msi_setup_msi_irqs()
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); in pci_msi_setup_msi_irqs()
65 struct irq_domain *domain; in pci_msi_teardown_msi_irqs() local
67 domain = pci_msi_get_domain(dev); in pci_msi_teardown_msi_irqs()
[all …]
/linux-4.4.14/Documentation/devicetree/bindings/clock/
Dkeystone-gate.txt13 - reg : psc control and domain address address space
14 - reg-names : psc control and domain registers
15 - domain-id : psc domain id needed to check the transition state register
27 reg-names = "control", "domain";
28 domain-id = <0>;
Drenesas,r8a7778-cpg-clocks.txt15 - #power-domain-cells: Must be 0
19 "power-domains" property, as documented by the generic PM domain bindings in
35 #power-domain-cells = <0>;
Drenesas,r8a7779-cpg-clocks.txt17 - #power-domain-cells: Must be 0
21 "power-domains" property, as documented by the generic PM domain bindings in
37 #power-domain-cells = <0>;
Dqcom,mmcc.txt18 - #power-domain-cells : shall contain 1
26 #power-domain-cells = <1>;
Dqcom,gcc.txt22 - #power-domain-cells : shall contain 1
30 #power-domain-cells = <1>;
Drenesas,rz-cpg-clocks.txt19 - #power-domain-cells: Must be 0
23 "power-domains" property, as documented by the generic PM domain bindings in
39 #power-domain-cells = <0>;
/linux-4.4.14/arch/x86/pci/
Dacpi.c191 seg = info->sd.domain; in setup_mcfg_map()
220 pci_mmconfig_delete(info->sd.domain, in teardown_mcfg_map()
319 int domain = root->segment; in pci_acpi_scan_root() local
325 root->segment = domain = 0; in pci_acpi_scan_root()
327 if (domain && !pci_domains_supported) { in pci_acpi_scan_root()
330 domain, busnum); in pci_acpi_scan_root()
334 bus = pci_find_bus(domain, busnum); in pci_acpi_scan_root()
341 .domain = domain, in pci_acpi_scan_root()
354 domain, busnum); in pci_acpi_scan_root()
356 info->sd.domain = domain; in pci_acpi_scan_root()
Dintel_mid_pci.c96 unsigned int domain, busnum; in pci_device_update_fixed() local
99 domain = pci_domain_nr(bus); in pci_device_update_fixed()
105 raw_pci_ext_ops->read(domain, busnum, devfn, in pci_device_update_fixed()
128 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4, in pci_device_update_fixed()
133 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val); in pci_device_update_fixed()
Dcommon.c41 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, in raw_pci_read() argument
44 if (domain == 0 && reg < 256 && raw_pci_ops) in raw_pci_read()
45 return raw_pci_ops->read(domain, bus, devfn, reg, len, val); in raw_pci_read()
47 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val); in raw_pci_read()
51 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, in raw_pci_write() argument
54 if (domain == 0 && reg < 256 && raw_pci_ops) in raw_pci_write()
55 return raw_pci_ops->write(domain, bus, devfn, reg, len, val); in raw_pci_write()
57 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val); in raw_pci_write()
Dxen.c513 domid_t domain; member
535 int domain = -ENODEV; in xen_find_device_domain_owner() local
540 domain = owner->domain; in xen_find_device_domain_owner()
542 return domain; in xen_find_device_domain_owner()
546 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) in xen_register_device_domain_owner() argument
560 owner->domain = domain; in xen_register_device_domain_owner()
/linux-4.4.14/security/tomoyo/
Dutil.c599 struct tomoyo_domain_info *domain; in tomoyo_find_domain() local
604 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { in tomoyo_find_domain()
605 if (!domain->is_deleted && in tomoyo_find_domain()
606 !tomoyo_pathcmp(&name, domain->domainname)) in tomoyo_find_domain()
607 return domain; in tomoyo_find_domain()
1003 struct tomoyo_domain_info *domain, const u8 index) in tomoyo_init_request_info() argument
1007 if (!domain) in tomoyo_init_request_info()
1008 domain = tomoyo_domain(); in tomoyo_init_request_info()
1009 r->domain = domain; in tomoyo_init_request_info()
1010 profile = domain->profile; in tomoyo_init_request_info()
[all …]
Dcommon.c974 struct tomoyo_domain_info *domain = NULL; in tomoyo_select_domain() local
988 domain = tomoyo_real_domain(p); in tomoyo_select_domain()
992 domain = tomoyo_find_domain(data + 7); in tomoyo_select_domain()
994 domain = tomoyo_find_domain_by_qid(pid); in tomoyo_select_domain()
997 head->w.domain = domain; in tomoyo_select_domain()
1003 if (domain) in tomoyo_select_domain()
1004 head->r.domain = &domain->list; in tomoyo_select_domain()
1008 if (domain && domain->is_deleted) in tomoyo_select_domain()
1066 struct tomoyo_domain_info *domain; in tomoyo_delete_domain() local
1074 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { in tomoyo_delete_domain()
[all …]
Ddomain.c160 const struct tomoyo_domain_info *domain = r->domain; in tomoyo_check_acl() local
163 const struct list_head *list = &domain->acl_info_list; in tomoyo_check_acl()
179 list = &domain->ns->acl_group[domain->group]; in tomoyo_check_acl()
545 const struct tomoyo_domain_info *domain = tomoyo_domain(); in tomoyo_assign_domain() local
546 e.profile = domain->profile; in tomoyo_assign_domain()
547 e.group = domain->group; in tomoyo_assign_domain()
603 ee->r.profile = r->domain->profile; in tomoyo_environ()
604 ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile, in tomoyo_environ()
679 struct tomoyo_domain_info *domain = NULL; in tomoyo_find_next_domain() local
796 domain = old_domain; in tomoyo_find_next_domain()
[all …]
Dtomoyo.c36 struct tomoyo_domain_info *domain = old->security; in tomoyo_cred_prepare() local
37 new->security = domain; in tomoyo_cred_prepare()
38 if (domain) in tomoyo_cred_prepare()
39 atomic_inc(&domain->users); in tomoyo_cred_prepare()
61 struct tomoyo_domain_info *domain = cred->security; in tomoyo_cred_free() local
62 if (domain) in tomoyo_cred_free()
63 atomic_dec(&domain->users); in tomoyo_cred_free()
114 struct tomoyo_domain_info *domain = bprm->cred->security; in tomoyo_bprm_check_security() local
120 if (!domain) { in tomoyo_bprm_check_security()
129 return tomoyo_check_open_permission(domain, &bprm->file->f_path, in tomoyo_bprm_check_security()
Dgc.c49 if (head->r.domain == element || head->r.group == element || in tomoyo_struct_used_by_io_buffer()
50 head->r.acl == element || &head->w.domain->list == element) in tomoyo_struct_used_by_io_buffer()
245 struct tomoyo_domain_info *domain = in tomoyo_del_domain() local
246 container_of(element, typeof(*domain), list); in tomoyo_del_domain()
254 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { in tomoyo_del_domain()
258 tomoyo_put_name(domain->domainname); in tomoyo_del_domain()
517 struct tomoyo_domain_info *domain; in tomoyo_collect_entry() local
519 list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, in tomoyo_collect_entry()
521 tomoyo_collect_acl(&domain->acl_info_list); in tomoyo_collect_entry()
522 if (!domain->is_deleted || atomic_read(&domain->users)) in tomoyo_collect_entry()
[all …]
/linux-4.4.14/arch/powerpc/perf/
Dhv-24x7.c30 static const char *event_domain_suffix(unsigned domain) in event_domain_suffix() argument
32 switch (domain) { in event_domain_suffix()
39 WARN(1, "unknown domain %d\n", domain); in event_domain_suffix()
44 static bool domain_is_valid(unsigned domain) in domain_is_valid() argument
46 switch (domain) { in domain_is_valid()
58 static bool is_physical_domain(unsigned domain) in is_physical_domain() argument
60 switch (domain) { in is_physical_domain()
71 static bool catalog_entry_domain_is_valid(unsigned domain) in catalog_entry_domain_is_valid() argument
73 return is_physical_domain(domain); in catalog_entry_domain_is_valid()
101 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
[all …]
/linux-4.4.14/Documentation/networking/
Dregulatory.txt16 to the kernel one regulatory domain to be used as the central
17 core regulatory domain all wireless devices should adhere to.
22 Userspace gets a regulatory domain in the kernel by having
27 is CRDA - central regulatory domain agent. Its documented here:
32 it needs a new regulatory domain. A udev rule can be put in place
33 to trigger crda to send the respective regulatory domain for a
54 # set regulatory domain to "Costa Rica"
57 This will request the kernel to set the regulatory domain to
59 to provide a regulatory domain for the alpha2 specified by the user
65 regulatory domain is required. More on this to be added
[all …]
/linux-4.4.14/arch/mips/ralink/
Dirq.c104 struct irq_domain *domain = irq_desc_get_handler_data(desc); in ralink_intc_irq_handler() local
105 generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); in ralink_intc_irq_handler()
152 struct irq_domain *domain; in intc_of_init() local
181 domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, in intc_of_init()
183 if (!domain) in intc_of_init()
188 irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain); in intc_of_init()
191 rt_perfcount_irq = irq_create_mapping(domain, 9); in intc_of_init()
/linux-4.4.14/drivers/staging/board/
Dboard.c140 const char *domain) in board_staging_add_dev_domain() argument
146 np = of_find_node_by_path(domain); in board_staging_add_dev_domain()
148 pr_err("Cannot find domain node %s\n", domain); in board_staging_add_dev_domain()
156 pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd)); in board_staging_add_dev_domain()
166 const char *domain) in board_staging_add_dev_domain() argument
190 if (dev->domain) in board_staging_register_device()
191 board_staging_add_dev_domain(pdev, dev->domain); in board_staging_register_device()
/linux-4.4.14/drivers/gpio/
Dgpio-tb10x.c58 struct irq_domain *domain; member
145 return irq_create_mapping(tb10x_gpio->domain, offset); in tb10x_gpio_to_irq()
169 generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i)); in tb10x_gpio_irq_cascade()
239 tb10x_gpio->domain = irq_domain_add_linear(dn, in tb10x_gpio_probe()
242 if (!tb10x_gpio->domain) { in tb10x_gpio_probe()
247 ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain, in tb10x_gpio_probe()
254 gc = tb10x_gpio->domain->gc->gc[0]; in tb10x_gpio_probe()
281 irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0], in tb10x_gpio_remove()
283 kfree(tb10x_gpio->domain->gc); in tb10x_gpio_remove()
284 irq_domain_remove(tb10x_gpio->domain); in tb10x_gpio_remove()
Dgpio-dwapb.c83 struct irq_domain *domain; member
115 return irq_find_mapping(gpio->domain, offset); in dwapb_gpio_to_irq()
137 int gpio_irq = irq_find_mapping(gpio->domain, hwirq); in dwapb_do_irq()
305 gpio->domain = irq_domain_add_linear(node, ngpio, in dwapb_configure_irqs()
307 if (!gpio->domain) in dwapb_configure_irqs()
310 err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, in dwapb_configure_irqs()
316 irq_domain_remove(gpio->domain); in dwapb_configure_irqs()
317 gpio->domain = NULL; in dwapb_configure_irqs()
321 irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); in dwapb_configure_irqs()
323 irq_domain_remove(gpio->domain); in dwapb_configure_irqs()
[all …]
Dgpio-grgpio.c78 struct irq_domain *domain; member
125 return irq_create_mapping(priv->domain, offset); in grgpio_to_irq()
404 priv->domain = irq_domain_add_linear(np, gc->ngpio, in grgpio_probe()
407 if (!priv->domain) { in grgpio_probe()
441 if (priv->domain) in grgpio_probe()
442 irq_domain_remove(priv->domain); in grgpio_probe()
447 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off"); in grgpio_probe()
461 if (priv->domain) { in grgpio_remove()
472 if (priv->domain) in grgpio_remove()
473 irq_domain_remove(priv->domain); in grgpio_remove()
Dgpio-tz1090.c60 struct irq_domain *domain; member
259 if (!bank->domain) in tz1090_gpio_to_irq()
262 return irq_create_mapping(bank->domain, offset); in tz1090_gpio_to_irq()
270 return (struct tz1090_gpio_bank *)data->domain->host_data; in irqd_to_gpio_bank()
395 irq_no = irq_linear_revmap(bank->domain, hw); in tz1090_gpio_irq_handler()
466 bank->domain = irq_domain_add_linear(np, in tz1090_gpio_bank_probe()
472 err = irq_alloc_domain_generic_chips(bank->domain, bank->chip.ngpio, 2, in tz1090_gpio_bank_probe()
479 irq_domain_remove(bank->domain); in tz1090_gpio_bank_probe()
483 gc = irq_get_domain_generic_chip(bank->domain, 0); in tz1090_gpio_bank_probe()
Dgpio-mxs.c66 struct irq_domain *domain; member
172 generic_handle_irq(irq_find_mapping(port->domain, irqoffset)); in mxs_gpio_irq_handler()
232 return irq_find_mapping(port->domain, offset); in mxs_gpio_to_irq()
317 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, in mxs_gpio_probe()
319 if (!port->domain) { in mxs_gpio_probe()
354 irq_domain_remove(port->domain); in mxs_gpio_probe()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_object.c119 u32 domain, u64 flags) in amdgpu_ttm_placement_init() argument
126 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { in amdgpu_ttm_placement_init()
141 if (domain & AMDGPU_GEM_DOMAIN_GTT) { in amdgpu_ttm_placement_init()
152 if (domain & AMDGPU_GEM_DOMAIN_CPU) { in amdgpu_ttm_placement_init()
163 if (domain & AMDGPU_GEM_DOMAIN_GDS) { in amdgpu_ttm_placement_init()
168 if (domain & AMDGPU_GEM_DOMAIN_GWS) { in amdgpu_ttm_placement_init()
173 if (domain & AMDGPU_GEM_DOMAIN_OA) { in amdgpu_ttm_placement_init()
198 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) in amdgpu_ttm_placement_from_domain() argument
201 rbo->placements, domain, rbo->flags); in amdgpu_ttm_placement_from_domain()
219 bool kernel, u32 domain, u64 flags, in amdgpu_bo_create_restricted() argument
[all …]
Damdgpu_object.h130 bool kernel, u32 domain, u64 flags,
136 bool kernel, u32 domain, u64 flags,
145 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
146 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
185 unsigned size, u32 align, u32 domain);
/linux-4.4.14/arch/nios2/kernel/
Dirq.c77 struct irq_domain *domain; in init_IRQ() local
86 domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL); in init_IRQ()
87 BUG_ON(!domain); in init_IRQ()
89 irq_set_default_host(domain); in init_IRQ()
/linux-4.4.14/drivers/gpu/drm/i915/
Dintel_uncore.c248 struct intel_uncore_forcewake_domain *domain = (void *)arg; in intel_uncore_fw_release_timer() local
251 assert_device_not_suspended(domain->i915); in intel_uncore_fw_release_timer()
253 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); in intel_uncore_fw_release_timer()
254 if (WARN_ON(domain->wake_count == 0)) in intel_uncore_fw_release_timer()
255 domain->wake_count++; in intel_uncore_fw_release_timer()
257 if (--domain->wake_count == 0) in intel_uncore_fw_release_timer()
258 domain->i915->uncore.funcs.force_wake_put(domain->i915, in intel_uncore_fw_release_timer()
259 1 << domain->id); in intel_uncore_fw_release_timer()
261 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); in intel_uncore_fw_release_timer()
268 struct intel_uncore_forcewake_domain *domain; in intel_uncore_forcewake_reset() local
[all …]
/linux-4.4.14/arch/arm/mach-exynos/
Dpm_domains.c43 static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) in exynos_pd_power() argument
51 pd = container_of(domain, struct exynos_pm_domain, pd); in exynos_pd_power()
81 pr_err("Power domain %s %s failed\n", domain->name, op); in exynos_pd_power()
112 static int exynos_pd_power_on(struct generic_pm_domain *domain) in exynos_pd_power_on() argument
114 return exynos_pd_power(domain, true); in exynos_pd_power_on()
117 static int exynos_pd_power_off(struct generic_pm_domain *domain) in exynos_pd_power_off() argument
119 return exynos_pd_power(domain, false); in exynos_pd_power_off()
Dsuspend.c202 static int exynos_pmu_domain_alloc(struct irq_domain *domain, in exynos_pmu_domain_alloc() argument
219 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in exynos_pmu_domain_alloc()
223 parent_fwspec.fwnode = domain->parent->fwnode; in exynos_pmu_domain_alloc()
224 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, in exynos_pmu_domain_alloc()
237 struct irq_domain *parent_domain, *domain; in exynos_pmu_irq_init() local
258 domain = irq_domain_add_hierarchy(parent_domain, 0, 0, in exynos_pmu_irq_init()
261 if (!domain) { in exynos_pmu_irq_init()
/linux-4.4.14/drivers/gpu/drm/qxl/
Dqxl_object.c53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) in qxl_ttm_placement_from_domain() argument
61 if (domain == QXL_GEM_DOMAIN_VRAM) in qxl_ttm_placement_from_domain()
63 if (domain == QXL_GEM_DOMAIN_SURFACE) in qxl_ttm_placement_from_domain()
65 if (domain == QXL_GEM_DOMAIN_CPU) in qxl_ttm_placement_from_domain()
79 unsigned long size, bool kernel, bool pinned, u32 domain, in qxl_bo_create() argument
101 bo->type = domain; in qxl_bo_create()
109 qxl_ttm_placement_from_domain(bo, domain, pinned); in qxl_bo_create()
118 size, domain); in qxl_bo_create()
224 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) in qxl_bo_pin() argument
235 qxl_ttm_placement_from_domain(bo, domain, true); in qxl_bo_pin()
Dqxl_object.h89 bool kernel, bool pinned, u32 domain,
98 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
100 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_uiom.c68 static int usnic_uiom_dma_fault(struct iommu_domain *domain, in usnic_uiom_dma_fault() argument
75 domain, iova, flags); in usnic_uiom_dma_fault()
209 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
286 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
303 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
468 void *domain; in usnic_uiom_alloc_pd() local
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); in usnic_uiom_alloc_pd()
475 if (!domain) { in usnic_uiom_alloc_pd()
481 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); in usnic_uiom_alloc_pd()
491 iommu_domain_free(pd->domain); in usnic_uiom_dealloc_pd()
[all …]
/linux-4.4.14/tools/testing/selftests/net/
Dsocket.c10 int domain; member
47 fd = socket(s->domain, s->type, s->protocol); in run_tests()
62 s->domain, s->type, s->protocol, in run_tests()
75 s->domain, s->type, s->protocol, in run_tests()
/linux-4.4.14/arch/mips/ath25/
Dar2315.c79 struct irq_domain *domain = irq_desc_get_handler_data(desc); in ar2315_misc_irq_handler() local
82 misc_irq = irq_find_mapping(domain, nr); in ar2315_misc_irq_handler()
151 struct irq_domain *domain; in ar2315_arch_init_irq() local
156 domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, in ar2315_arch_init_irq()
158 if (!domain) in ar2315_arch_init_irq()
161 irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB); in ar2315_arch_init_irq()
165 ar2315_misc_irq_handler, domain); in ar2315_arch_init_irq()
167 ar2315_misc_irq_domain = domain; in ar2315_arch_init_irq()
Dar5312.c83 struct irq_domain *domain = irq_desc_get_handler_data(desc); in ar5312_misc_irq_handler() local
86 misc_irq = irq_find_mapping(domain, nr); in ar5312_misc_irq_handler()
146 struct irq_domain *domain; in ar5312_arch_init_irq() local
151 domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, in ar5312_arch_init_irq()
153 if (!domain) in ar5312_arch_init_irq()
156 irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); in ar5312_arch_init_irq()
160 ar5312_misc_irq_handler, domain); in ar5312_arch_init_irq()
162 ar5312_misc_irq_domain = domain; in ar5312_arch_init_irq()
/linux-4.4.14/drivers/gpu/drm/radeon/
Dradeon_object.c96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) in radeon_ttm_placement_from_domain() argument
102 if (domain & RADEON_GEM_DOMAIN_VRAM) { in radeon_ttm_placement_from_domain()
121 if (domain & RADEON_GEM_DOMAIN_GTT) { in radeon_ttm_placement_from_domain()
140 if (domain & RADEON_GEM_DOMAIN_CPU) { in radeon_ttm_placement_from_domain()
180 u32 domain, u32 flags, struct sg_table *sg, in radeon_bo_create() argument
216 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | in radeon_bo_create()
257 radeon_ttm_placement_from_domain(bo, domain); in radeon_bo_create()
329 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, in radeon_bo_pin_restricted() argument
345 if (domain == RADEON_GEM_DOMAIN_VRAM) in radeon_bo_pin_restricted()
355 radeon_ttm_placement_from_domain(bo, domain); in radeon_bo_pin_restricted()
[all …]
Dradeon_object.h127 bool kernel, u32 domain, u32 flags,
135 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
136 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
175 unsigned size, u32 align, u32 domain,
Dradeon_gem.c97 uint32_t domain; in radeon_gem_set_domain() local
103 domain = wdomain; in radeon_gem_set_domain()
104 if (!domain) { in radeon_gem_set_domain()
105 domain = rdomain; in radeon_gem_set_domain()
107 if (!domain) { in radeon_gem_set_domain()
112 if (domain == RADEON_GEM_DOMAIN_CPU) { in radeon_gem_set_domain()
451 args->domain = radeon_mem_type_to_domain(cur_placement); in radeon_gem_busy_ioctl()
546 unsigned domain; in radeon_gem_va_update_vm() local
564 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); in radeon_gem_va_update_vm()
567 if (domain == RADEON_GEM_DOMAIN_CPU) in radeon_gem_va_update_vm()
[all …]
/linux-4.4.14/arch/powerpc/platforms/powernv/
Dopal-irqchip.c37 struct irq_domain *domain; member
65 virq = irq_find_mapping(opal_event_irqchip.domain, in opal_handle_events()
203 opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, in opal_event_init()
206 if (!opal_event_irqchip.domain) { in opal_event_init()
261 if (WARN_ON_ONCE(!opal_event_irqchip.domain)) in opal_event_request()
264 return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr); in opal_event_request()
/linux-4.4.14/arch/arm/mach-imx/
D3ds_debugboard.c63 static struct irq_domain *domain; variable
104 generic_handle_irq(irq_find_mapping(domain, expio_irq)); in mxc_expio_irq_handler()
192 domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0, in mxc_expio_init()
194 WARN_ON(!domain); in mxc_expio_init()
208 smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET); in mxc_expio_init()
209 smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET); in mxc_expio_init()
Dmach-mx31ads.c77 static struct irq_domain *domain; variable
123 serial_platform_data[0].irq = irq_find_mapping(domain, in mxc_init_extuart()
125 serial_platform_data[1].irq = irq_find_mapping(domain, in mxc_init_extuart()
133 irq_find_mapping(domain, EXPIO_INT_ENET_INT); in mxc_init_ext_ethernet()
135 irq_find_mapping(domain, EXPIO_INT_ENET_INT); in mxc_init_ext_ethernet()
171 generic_handle_irq(irq_find_mapping(domain, expio_irq)); in mx31ads_expio_irq_handler()
235 domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0, in mx31ads_init_expio()
237 WARN_ON(!domain); in mx31ads_init_expio()
Dgpc.c207 static int imx_gpc_domain_alloc(struct irq_domain *domain, in imx_gpc_domain_alloc() argument
226 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, in imx_gpc_domain_alloc()
230 parent_fwspec.fwnode = domain->parent->fwnode; in imx_gpc_domain_alloc()
231 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, in imx_gpc_domain_alloc()
244 struct irq_domain *parent_domain, *domain; in imx_gpc_init() local
262 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, in imx_gpc_init()
265 if (!domain) { in imx_gpc_init()
Davic.c55 static struct irq_domain *domain; variable
147 handle_domain_irq(domain, nivector, regs); in avic_handle_irq()
182 domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, in mxc_init_irq()
184 WARN_ON(!domain); in mxc_init_irq()
Dtzic.c54 static struct irq_domain *domain; variable
144 handle_domain_irq(domain, irqofs + i * 32, regs); in tzic_handle_irq()
187 domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0, in tzic_init_irq()
189 WARN_ON(!domain); in tzic_init_irq()
/linux-4.4.14/arch/arm/mach-zx/
Dzx296702-pm-domain.c38 static int normal_power_off(struct generic_pm_domain *domain) in normal_power_off() argument
40 struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; in normal_power_off()
67 pr_err("Error: %s %s fail\n", __func__, domain->name); in normal_power_off()
74 static int normal_power_on(struct generic_pm_domain *domain) in normal_power_on() argument
76 struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; in normal_power_on()
88 pr_err("Error: %s %s fail\n", __func__, domain->name); in normal_power_on()
/linux-4.4.14/Documentation/devicetree/bindings/bus/
Dsimple-pm-bus.txt7 However, its bus controller is part of a PM domain, or under the control of a
8 functional clock. Hence, the bus controller's PM domain and/or clock must be
24 Optional platform-specific properties for clock or PM domain control (at least
27 - power-domains: Must contain a reference to the PM domain.
28 Please refer to the binding documentation for the clock and/or PM domain
/linux-4.4.14/Documentation/devicetree/bindings/pci/
Dpci.txt14 - linux,pci-domain:
15 If present this property assigns a fixed PCI domain number to a host bridge,
18 host bridges in the system, otherwise potentially conflicting domain numbers
19 may be assigned to root buses behind different host bridges. The domain
Dbrcm,iproc-pcie.txt9 - linux,pci-domain: PCI domain ID. Should be unique for each host controller
44 linux,pci-domain = <0>;
71 linux,pci-domain = <1>;
/linux-4.4.14/include/net/
Dnetlabel.h208 char *domain; member
318 kfree(secattr->domain); in netlbl_secattr_destroy()
357 int netlbl_cfg_map_del(const char *domain,
362 int netlbl_cfg_unlbl_map_add(const char *domain,
384 const char *domain,
445 static inline int netlbl_cfg_map_del(const char *domain, in netlbl_cfg_map_del() argument
453 static inline int netlbl_cfg_unlbl_map_add(const char *domain, in netlbl_cfg_unlbl_map_add() argument
491 const char *domain, in netlbl_cfg_cipsov4_map_add() argument
/linux-4.4.14/drivers/sh/intc/
Dirqdomain.c62 d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, in intc_irq_domain_init()
65 d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); in intc_irq_domain_init()
67 BUG_ON(!d->domain); in intc_irq_domain_init()
/linux-4.4.14/tools/power/cpupower/utils/helpers/
Dpci.c25 struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus, in pci_acc_init() argument
36 filter_nb_link.domain = domain; in pci_acc_init()
/linux-4.4.14/drivers/pinctrl/
Dpinctrl-adi2.c128 struct irq_domain *domain[2]; member
135 u8 map, struct irq_domain *domain);
188 struct irq_domain *domain; member
541 struct irq_domain *domain; in adi_gpio_handle_pint_irq() local
550 domain = pint->domain[0]; in adi_gpio_handle_pint_irq()
556 domain = pint->domain[1]; in adi_gpio_handle_pint_irq()
563 generic_handle_irq(irq_find_mapping(domain, in adi_gpio_handle_pint_irq()
792 return irq_find_mapping(port->domain, offset); in adi_gpio_to_irq()
794 return irq_create_mapping(port->domain, offset); in adi_gpio_to_irq()
798 struct irq_domain *domain) in adi_pint_map_port() argument
[all …]
/linux-4.4.14/drivers/clk/qcom/
Dgdsc.c43 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd) argument
124 static int gdsc_enable(struct generic_pm_domain *domain) in gdsc_enable() argument
126 struct gdsc *sc = domain_to_gdsc(domain); in gdsc_enable()
151 static int gdsc_disable(struct generic_pm_domain *domain) in gdsc_disable() argument
153 struct gdsc *sc = domain_to_gdsc(domain); in gdsc_disable()
/linux-4.4.14/Documentation/devicetree/bindings/soc/mediatek/
Dscpsys.txt8 domain control.
10 The driver implements the Generic PM domain bindings described in
16 - #power-domain-cells: Must be 1
27 #power-domain-cells = <1>;
/linux-4.4.14/drivers/mfd/
Dlp8788-irq.c42 struct irq_domain *domain; member
127 handle_nested_irq(irq_find_mapping(irqd->domain, i)); in lp8788_irq_handler()
168 irqd->domain = irq_domain_add_linear(lp->dev->of_node, LP8788_INT_MAX, in lp8788_irq_init()
170 if (!irqd->domain) { in lp8788_irq_init()
175 lp->irqdm = irqd->domain; in lp8788_irq_init()
/linux-4.4.14/drivers/remoteproc/
Dremoteproc_core.c78 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, in rproc_iommu_fault() argument
96 struct iommu_domain *domain; in rproc_enable_iommu() local
105 domain = iommu_domain_alloc(dev->bus); in rproc_enable_iommu()
106 if (!domain) { in rproc_enable_iommu()
111 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); in rproc_enable_iommu()
113 ret = iommu_attach_device(domain, dev); in rproc_enable_iommu()
119 rproc->domain = domain; in rproc_enable_iommu()
124 iommu_domain_free(domain); in rproc_enable_iommu()
130 struct iommu_domain *domain = rproc->domain; in rproc_disable_iommu() local
133 if (!domain) in rproc_disable_iommu()
[all …]
/linux-4.4.14/Documentation/tpm/
Dxen-tpmfront.txt15 of the vTPM's secrets (Keys, NVRAM, etc) are managed by a vTPM Manager domain,
19 major component of vTPM is implemented as a separate domain, providing secure
77 * vtpm-stubdom: A mini-os stub domain that implements a vTPM. There is a
82 * mini-os/tpmfront: Mini-os TPM frontend driver. The vTPM mini-os domain
85 domains such as pv-grub that talk to the vTPM domain.
87 * vtpmmgr-stubdom: A mini-os domain that implements the vTPM manager. There is
89 entire lifetime of the machine. This domain regulates
108 domain's configuration file.
/linux-4.4.14/drivers/base/power/
Ddomain.c55 if (&gpd->domain == dev->pm_domain) { in pm_genpd_lookup_dev()
332 if (!genpd->gov->power_down_ok(&genpd->domain)) in genpd_poweroff()
1191 dev->pm_domain = &genpd->domain; in genpd_alloc_dev_data()
1487 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; in pm_genpd_init()
1488 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; in pm_genpd_init()
1489 genpd->domain.ops.prepare = pm_genpd_prepare; in pm_genpd_init()
1490 genpd->domain.ops.suspend = pm_genpd_suspend; in pm_genpd_init()
1491 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; in pm_genpd_init()
1492 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; in pm_genpd_init()
1493 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; in pm_genpd_init()
[all …]
/linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp5/
Dmdp5_irq.c110 mdp5_kms->irqcontroller.domain, hwirq)); in mdp5_irq()
204 mdp5_kms->irqcontroller.domain = d; in mdp5_irq_domain_init()
211 if (mdp5_kms->irqcontroller.domain) { in mdp5_irq_domain_fini()
212 irq_domain_remove(mdp5_kms->irqcontroller.domain); in mdp5_irq_domain_fini()
213 mdp5_kms->irqcontroller.domain = NULL; in mdp5_irq_domain_fini()
/linux-4.4.14/drivers/firmware/
Darm_scpi.c218 u8 domain; member
430 static int scpi_dvfs_get_idx(u8 domain) in scpi_dvfs_get_idx() argument
435 ret = scpi_send_message(SCPI_CMD_GET_DVFS, &domain, sizeof(domain), in scpi_dvfs_get_idx()
440 static int scpi_dvfs_set_idx(u8 domain, u8 index) in scpi_dvfs_set_idx() argument
443 struct dvfs_set dvfs = {domain, index}; in scpi_dvfs_set_idx()
456 static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain) in scpi_dvfs_get_info() argument
463 if (domain >= MAX_DVFS_DOMAINS) in scpi_dvfs_get_info()
466 if (scpi_info->dvfs[domain]) /* data already populated */ in scpi_dvfs_get_info()
467 return scpi_info->dvfs[domain]; in scpi_dvfs_get_info()
469 ret = scpi_send_message(SCPI_CMD_GET_DVFS_INFO, &domain, sizeof(domain), in scpi_dvfs_get_info()
[all …]
/linux-4.4.14/arch/arm64/mm/
Ddma-mapping.c816 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); in do_iommu_attach() local
823 if (!domain) { in do_iommu_attach()
832 domain = ops->domain_alloc(IOMMU_DOMAIN_DMA); in do_iommu_attach()
833 if (!domain) in do_iommu_attach()
836 domain->ops = ops; in do_iommu_attach()
837 domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT; in do_iommu_attach()
839 if (iommu_attach_device(domain, dev)) in do_iommu_attach()
843 if (iommu_dma_init_domain(domain, dma_base, size)) in do_iommu_attach()
850 iommu_detach_device(domain, dev); in do_iommu_attach()
852 if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT) in do_iommu_attach()
[all …]
/linux-4.4.14/drivers/cpufreq/
Dscpi-cpufreq.c34 int domain = topology_physical_package_id(cpu_dev->id); in scpi_get_dvfs_info() local
36 if (domain < 0) in scpi_get_dvfs_info()
38 return scpi_ops->dvfs_get_info(domain); in scpi_get_dvfs_info()
/linux-4.4.14/drivers/pci/host/
Dpcie-altera-msi.c124 static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in altera_irq_domain_alloc() argument
127 struct altera_msi *msi = domain->host_data; in altera_irq_domain_alloc()
144 irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, in altera_irq_domain_alloc()
145 domain->host_data, handle_simple_irq, in altera_irq_domain_alloc()
155 static void altera_irq_domain_free(struct irq_domain *domain, in altera_irq_domain_free() argument
158 struct irq_data *d = irq_domain_get_irq_data(domain, virq); in altera_irq_domain_free()
Dpci-xgene-msi.c203 static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in xgene_irq_domain_alloc() argument
206 struct xgene_msi *msi = domain->host_data; in xgene_irq_domain_alloc()
223 irq_domain_set_info(domain, virq, msi_irq, in xgene_irq_domain_alloc()
224 &xgene_msi_bottom_irq_chip, domain->host_data, in xgene_irq_domain_alloc()
230 static void xgene_irq_domain_free(struct irq_domain *domain, in xgene_irq_domain_free() argument
233 struct irq_data *d = irq_domain_get_irq_data(domain, virq); in xgene_irq_domain_free()
244 irq_domain_free_irqs_parent(domain, virq, nr_irqs); in xgene_irq_domain_free()
/linux-4.4.14/arch/arm/mm/
Dmmu.c255 .domain = DOMAIN_IO,
261 .domain = DOMAIN_IO,
267 .domain = DOMAIN_IO,
273 .domain = DOMAIN_IO,
279 .domain = DOMAIN_IO,
283 .domain = DOMAIN_KERNEL,
288 .domain = DOMAIN_KERNEL,
295 .domain = DOMAIN_VECTORS,
301 .domain = DOMAIN_VECTORS,
307 .domain = DOMAIN_KERNEL,
[all …]
/linux-4.4.14/arch/arm/mach-omap2/
Domap-wakeupgen.c424 static int wakeupgen_domain_alloc(struct irq_domain *domain, in wakeupgen_domain_alloc() argument
443 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in wakeupgen_domain_alloc()
447 parent_fwspec.fwnode = domain->parent->fwnode; in wakeupgen_domain_alloc()
448 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, in wakeupgen_domain_alloc()
464 struct irq_domain *parent_domain, *domain; in wakeupgen_init() local
499 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, in wakeupgen_init()
502 if (!domain) { in wakeupgen_init()
/linux-4.4.14/include/linux/clk/
Dshmobile.h29 int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
30 void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
/linux-4.4.14/arch/arc/kernel/
Dmcip.c248 struct irq_domain *domain = irq_desc_get_handler_data(desc); in idu_cascade_isr() local
253 generic_handle_irq(irq_find_mapping(domain, idu_irq)); in idu_cascade_isr()
316 struct irq_domain *domain; in idu_of_init() local
326 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL); in idu_of_init()
341 irq_set_chained_handler_and_data(irq, idu_cascade_isr, domain); in idu_of_init()
/linux-4.4.14/arch/ia64/include/asm/
Dhw_irq.h101 cpumask_t domain; member
108 #define irq_to_domain(x) irq_cfg[(x)].domain
120 extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
/linux-4.4.14/arch/arm/kernel/
Dprocess.c99 unsigned int domain; in __show_regs() local
107 domain = DACR_UACCESS_ENABLE; in __show_regs()
109 domain = *(unsigned int *)(regs + 1); in __show_regs()
111 domain = get_domain(); in __show_regs()
144 if ((domain & domain_mask(DOMAIN_USER)) == in __show_regs()
173 transbase, domain); in __show_regs()
/linux-4.4.14/Documentation/ABI/testing/
Dsysfs-class-regulator153 output voltage setting for this domain measured in microvolts,
157 the power domain has no min microvolts constraint defined by
168 output voltage setting for this domain measured in microvolts,
172 the power domain has no max microvolts constraint defined by
183 output current limit setting for this domain measured in
187 the power domain has no min microamps constraint defined by
198 output current limit setting for this domain measured in
202 the power domain has no max microamps constraint defined by
255 voltage setting for this domain measured in microvolts when
266 voltage setting for this domain measured in microvolts when
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/
Dnouveau_gem.c178 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, in nouveau_gem_new() argument
187 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) in nouveau_gem_new()
189 if (domain & NOUVEAU_GEM_DOMAIN_GART) in nouveau_gem_new()
191 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) in nouveau_gem_new()
194 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) in nouveau_gem_new()
210 nvbo->valid_domains &= domain; in nouveau_gem_new()
233 rep->domain = nvbo->valid_domains; in nouveau_gem_info()
235 rep->domain = NOUVEAU_GEM_DOMAIN_GART; in nouveau_gem_info()
237 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; in nouveau_gem_info()
271 req->info.domain, req->info.tile_mode, in nouveau_gem_ioctl_new()
[all …]
/linux-4.4.14/arch/arm64/kernel/
Dpci.c67 int raw_pci_read(unsigned int domain, unsigned int bus, in raw_pci_read() argument
73 int raw_pci_write(unsigned int domain, unsigned int bus, in raw_pci_write() argument
/linux-4.4.14/arch/arm/mach-ux500/
Dpm_domains.c18 static int pd_power_off(struct generic_pm_domain *domain) in pd_power_off() argument
30 static int pd_power_on(struct generic_pm_domain *domain) in pd_power_on() argument
/linux-4.4.14/drivers/net/ethernet/emulex/benet/
Dbe_cmds.c1022 u32 if_id, u32 *pmac_id, u32 domain) in be_cmd_pmac_add() argument
1041 req->hdr.domain = domain; in be_cmd_pmac_add()
1084 req->hdr.domain = dom; in be_cmd_pmac_del()
1473 u32 *if_handle, u32 domain) in be_cmd_if_create() argument
1483 req->hdr.domain = domain; in be_cmd_if_create()
1502 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) in be_cmd_if_destroy() argument
1523 req->hdr.domain = domain; in be_cmd_if_destroy()
1666 req->hdr.domain = dom; in be_cmd_link_status_query()
1904 u32 num, u32 domain) in be_cmd_vlan_config() argument
1922 req->hdr.domain = domain; in be_cmd_vlan_config()
[all …]
/linux-4.4.14/drivers/soc/rockchip/
DKconfig7 bool "Rockchip generic power domain"
11 Say y here to enable power domain support.
/linux-4.4.14/include/xen/interface/io/
Dpciif.h77 uint32_t domain; /* PCI Domain/Segment */ member
101 uint32_t domain; /* PCI Domain/Segment*/ member
/linux-4.4.14/arch/arm/mach-omap1/
Dirq.c68 static struct irq_domain *domain; variable
168 handle_domain_irq(domain, irqnr, regs); in omap1_handle_irq()
238 domain = irq_domain_add_legacy(NULL, nr_irqs, irq_base, 0, in omap1_init_irq()
271 d = irq_get_irq_data(irq_find_mapping(domain, omap_l2_irq)); in omap1_init_irq()
/linux-4.4.14/drivers/thermal/ti-soc-thermal/
Domap4-thermal-data.c84 .domain = "cpu",
224 .domain = "cpu",
257 .domain = "cpu",
Ddra752-thermal-data.c436 .domain = "cpu",
447 .domain = "gpu",
456 .domain = "core",
465 .domain = "dspeve",
474 .domain = "iva",
Dti-thermal.h87 int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain);
94 int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain) in ti_thermal_expose_sensor() argument
/linux-4.4.14/drivers/gpu/vga/
Dvga_switcheroo.c913 struct dev_pm_domain *domain) in vga_switcheroo_init_domain_pm_ops() argument
917 domain->ops = *dev->bus->pm; in vga_switcheroo_init_domain_pm_ops()
918 domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend; in vga_switcheroo_init_domain_pm_ops()
919 domain->ops.runtime_resume = vga_switcheroo_runtime_resume; in vga_switcheroo_init_domain_pm_ops()
921 dev->pm_domain = domain; in vga_switcheroo_init_domain_pm_ops()
984 struct dev_pm_domain *domain) in vga_switcheroo_init_domain_pm_optimus_hdmi_audio() argument
988 domain->ops = *dev->bus->pm; in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
989 domain->ops.runtime_resume = in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
992 dev->pm_domain = domain; in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c104 struct iommu_domain *domain; member
347 iommu_unmap(imem->domain, in gk20a_instobj_dtor_iommu()
486 ret = iommu_map(imem->domain, offset, node->dma_addrs[i], in gk20a_instobj_ctor_iommu()
493 iommu_unmap(imem->domain, offset, PAGE_SIZE); in gk20a_instobj_ctor_iommu()
536 imem->domain ? "IOMMU" : "DMA", size, align); in gk20a_instobj_new()
542 if (imem->domain) in gk20a_instobj_new()
607 if (tdev->iommu.domain) { in gk20a_instmem_new()
610 imem->domain = tdev->iommu.domain; in gk20a_instmem_new()
/linux-4.4.14/fs/ocfs2/dlm/
Ddlmdomain.c259 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) in __dlm_lookup_domain_full() argument
269 memcmp(tmp->name, domain, len)==0) in __dlm_lookup_domain_full()
277 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) in __dlm_lookup_domain() argument
281 return __dlm_lookup_domain_full(domain, strlen(domain)); in __dlm_lookup_domain()
288 static int dlm_wait_on_domain_helper(const char *domain) in dlm_wait_on_domain_helper() argument
295 tmp = __dlm_lookup_domain(domain); in dlm_wait_on_domain_helper()
821 query->domain); in dlm_query_join_handler()
839 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); in dlm_query_join_handler()
936 assert->domain); in dlm_assert_joined_handler()
939 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); in dlm_assert_joined_handler()
[all …]

1234