Home
last modified time | relevance | path

Searched refs:domain (Results 1 – 200 of 531) sorted by relevance

123

/linux-4.1.27/kernel/irq/
Dirqdomain.c28 static void irq_domain_check_hierarchy(struct irq_domain *domain);
48 struct irq_domain *domain; in __irq_domain_add() local
50 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), in __irq_domain_add()
52 if (WARN_ON(!domain)) in __irq_domain_add()
56 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); in __irq_domain_add()
57 domain->ops = ops; in __irq_domain_add()
58 domain->host_data = host_data; in __irq_domain_add()
59 domain->of_node = of_node_get(of_node); in __irq_domain_add()
60 domain->hwirq_max = hwirq_max; in __irq_domain_add()
61 domain->revmap_size = size; in __irq_domain_add()
[all …]
Dmsi.c66 static void msi_domain_activate(struct irq_domain *domain, in msi_domain_activate() argument
75 static void msi_domain_deactivate(struct irq_domain *domain, in msi_domain_deactivate() argument
84 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, in msi_domain_alloc() argument
87 struct msi_domain_info *info = domain->host_data; in msi_domain_alloc()
92 if (irq_find_mapping(domain, hwirq) > 0) in msi_domain_alloc()
95 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); in msi_domain_alloc()
100 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); in msi_domain_alloc()
104 ops->msi_free(domain, info, virq + i); in msi_domain_alloc()
106 irq_domain_free_irqs_top(domain, virq, nr_irqs); in msi_domain_alloc()
114 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, in msi_domain_free() argument
[all …]
/linux-4.1.27/drivers/iommu/
Dipmmu-vmsa.c199 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) in ipmmu_ctx_read() argument
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); in ipmmu_ctx_read()
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, in ipmmu_ctx_write() argument
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); in ipmmu_ctx_write()
215 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_sync() argument
219 while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { in ipmmu_tlb_sync()
222 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
230 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) in ipmmu_tlb_invalidate() argument
234 reg = ipmmu_ctx_read(domain, IMCTR); in ipmmu_tlb_invalidate()
236 ipmmu_ctx_write(domain, IMCTR, reg); in ipmmu_tlb_invalidate()
[all …]
Damd_iommu.c99 struct protection_domain *domain; /* Domain the device is bound to */ member
121 static void update_domain(struct protection_domain *domain);
132 return container_of(dom, struct protection_domain, domain); in to_pdomain()
1171 static void __domain_flush_pages(struct protection_domain *domain, in __domain_flush_pages() argument
1178 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); in __domain_flush_pages()
1181 if (!domain->dev_iommu[i]) in __domain_flush_pages()
1191 list_for_each_entry(dev_data, &domain->dev_list, list) { in __domain_flush_pages()
1202 static void domain_flush_pages(struct protection_domain *domain, in domain_flush_pages() argument
1205 __domain_flush_pages(domain, address, size, 0); in domain_flush_pages()
1209 static void domain_flush_tlb(struct protection_domain *domain) in domain_flush_tlb() argument
[all …]
Dintel-iommu.c342 struct iommu_domain domain; /* generic domain data structure for member
354 struct dmar_domain *domain; /* pointer to domain */ member
388 struct dmar_domain *domain[HIGH_WATER_MARK]; member
403 static void domain_exit(struct dmar_domain *domain);
404 static void domain_remove_dev_info(struct dmar_domain *domain);
405 static void domain_remove_one_dev_info(struct dmar_domain *domain,
409 static int domain_detach_iommu(struct dmar_domain *domain,
446 return container_of(dom, struct dmar_domain, domain); in to_dmar_domain()
529 static inline int domain_type_is_vm(struct dmar_domain *domain) in domain_type_is_vm() argument
531 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; in domain_type_is_vm()
[all …]
Diommu.c890 void iommu_set_fault_handler(struct iommu_domain *domain, in iommu_set_fault_handler() argument
894 BUG_ON(!domain); in iommu_set_fault_handler()
896 domain->handler = handler; in iommu_set_fault_handler()
897 domain->handler_token = token; in iommu_set_fault_handler()
903 struct iommu_domain *domain; in iommu_domain_alloc() local
908 domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); in iommu_domain_alloc()
909 if (!domain) in iommu_domain_alloc()
912 domain->ops = bus->iommu_ops; in iommu_domain_alloc()
913 domain->type = IOMMU_DOMAIN_UNMANAGED; in iommu_domain_alloc()
915 return domain; in iommu_domain_alloc()
[all …]
Dfsl_pamu_domain.c307 struct fsl_dma_domain *domain; in iommu_alloc_dma_domain() local
309 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); in iommu_alloc_dma_domain()
310 if (!domain) in iommu_alloc_dma_domain()
313 domain->stash_id = ~(u32)0; in iommu_alloc_dma_domain()
314 domain->snoop_id = ~(u32)0; in iommu_alloc_dma_domain()
315 domain->win_cnt = pamu_get_max_subwin_cnt(); in iommu_alloc_dma_domain()
316 domain->geom_size = 0; in iommu_alloc_dma_domain()
318 INIT_LIST_HEAD(&domain->devices); in iommu_alloc_dma_domain()
320 spin_lock_init(&domain->domain_lock); in iommu_alloc_dma_domain()
322 return domain; in iommu_alloc_dma_domain()
[all …]
Dtegra-gart.c67 struct iommu_domain domain; /* generic domain handle */ member
78 return container_of(dom, struct gart_domain, domain); in to_gart_domain()
166 static int gart_iommu_attach_dev(struct iommu_domain *domain, in gart_iommu_attach_dev() argument
169 struct gart_domain *gart_domain = to_gart_domain(domain); in gart_iommu_attach_dev()
199 static void gart_iommu_detach_dev(struct iommu_domain *domain, in gart_iommu_detach_dev() argument
202 struct gart_domain *gart_domain = to_gart_domain(domain); in gart_iommu_detach_dev()
238 gart_domain->domain.geometry.aperture_start = gart->iovmm_base; in gart_iommu_domain_alloc()
239 gart_domain->domain.geometry.aperture_end = gart->iovmm_base + in gart_iommu_domain_alloc()
241 gart_domain->domain.geometry.force_aperture = true; in gart_iommu_domain_alloc()
243 return &gart_domain->domain; in gart_iommu_domain_alloc()
[all …]
Dmsm_iommu.c55 struct iommu_domain domain; member
60 return container_of(dom, struct msm_priv, domain); in to_msm_priv()
86 static int __flush_iotlb(struct iommu_domain *domain) in __flush_iotlb() argument
88 struct msm_priv *priv = to_msm_priv(domain); in __flush_iotlb()
238 priv->domain.geometry.aperture_start = 0; in msm_iommu_domain_alloc()
239 priv->domain.geometry.aperture_end = (1ULL << 32) - 1; in msm_iommu_domain_alloc()
240 priv->domain.geometry.force_aperture = true; in msm_iommu_domain_alloc()
242 return &priv->domain; in msm_iommu_domain_alloc()
249 static void msm_iommu_domain_free(struct iommu_domain *domain) in msm_iommu_domain_free() argument
257 priv = to_msm_priv(domain); in msm_iommu_domain_free()
[all …]
Dshmobile-iommu.c45 struct iommu_domain domain; member
53 return container_of(dom, struct shmobile_iommu_domain, domain); in to_sh_domain()
112 return &sh_domain->domain; in shmobile_iommu_domain_alloc()
115 static void shmobile_iommu_domain_free(struct iommu_domain *domain) in shmobile_iommu_domain_free() argument
117 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_domain_free()
128 static int shmobile_iommu_attach_device(struct iommu_domain *domain, in shmobile_iommu_attach_device() argument
132 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_attach_device()
157 static void shmobile_iommu_detach_device(struct iommu_domain *domain, in shmobile_iommu_detach_device() argument
161 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); in shmobile_iommu_detach_device()
220 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, in shmobile_iommu_map() argument
[all …]
Dexynos-iommu.c192 struct iommu_domain *domain; member
203 struct iommu_domain domain; /* generic domain data structure */ member
214 struct iommu_domain *domain; member
220 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
347 if (data->domain) in exynos_sysmmu_irq()
348 ret = report_iommu_fault(data->domain, in exynos_sysmmu_irq()
391 data->domain = NULL; in __sysmmu_disable()
446 phys_addr_t pgtable, struct iommu_domain *domain) in __sysmmu_enable() argument
454 data->domain = domain; in __sysmmu_enable()
480 struct iommu_domain *domain) in __exynos_sysmmu_enable() argument
[all …]
Drockchip-iommu.c84 struct iommu_domain domain; member
92 struct iommu_domain *domain; /* domain to which iommu is attached */ member
107 return container_of(dom, struct rk_iommu_domain, domain); in to_rk_domain()
488 if (iommu->domain) in rk_iommu_irq()
489 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
510 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, in rk_iommu_iova_to_phys() argument
513 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_iova_to_phys()
646 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, in rk_iommu_map() argument
649 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_map()
677 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, in rk_iommu_unmap() argument
[all …]
Dtegra-smmu.c38 struct iommu_domain domain; member
49 return container_of(dom, struct tegra_smmu_as, domain); in to_smmu_as()
278 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc()
279 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc()
280 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc()
282 return &as->domain; in tegra_smmu_domain_alloc()
285 static void tegra_smmu_domain_free(struct iommu_domain *domain) in tegra_smmu_domain_free() argument
287 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_domain_free()
407 static int tegra_smmu_attach_dev(struct iommu_domain *domain, in tegra_smmu_attach_dev() argument
411 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_attach_dev()
[all …]
Damd_iommu_v2.c68 struct iommu_domain *domain; member
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); in free_device_state()
142 iommu_domain_free(dev_state->domain); in free_device_state()
276 struct iommu_domain *domain; in unbind_pasid() local
278 domain = pasid_state->device_state->domain; in unbind_pasid()
290 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); in unbind_pasid()
371 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); in __mn_flush_page()
403 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, in mn_invalidate_range()
406 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); in mn_invalidate_range()
656 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, in amd_iommu_bind_pasid()
[all …]
Darm-smmu.c339 struct iommu_domain domain; member
359 return container_of(dom, struct arm_smmu_domain, domain); in to_smmu_domain()
646 struct iommu_domain *domain = dev; in arm_smmu_context_fault() local
647 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_context_fault()
673 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { in arm_smmu_context_fault()
815 static int arm_smmu_init_domain_context(struct iommu_domain *domain, in arm_smmu_init_domain_context() argument
823 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_init_domain_context()
923 "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
943 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) in arm_smmu_destroy_domain_context() argument
945 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); in arm_smmu_destroy_domain_context()
[all …]
Domap-iommu.c62 struct iommu_domain domain; member
89 return container_of(dom, struct omap_iommu_domain, domain); in to_omap_domain()
913 struct iommu_domain *domain = obj->domain; in iommu_fault_handler() local
914 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in iommu_fault_handler()
924 if (!report_iommu_fault(domain, obj->dev, da, 0)) in iommu_fault_handler()
1123 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, in omap_iommu_map() argument
1126 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in omap_iommu_map()
1150 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, in omap_iommu_unmap() argument
1153 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); in omap_iommu_unmap()
1163 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) in omap_iommu_attach_dev() argument
[all …]
/linux-4.1.27/include/linux/
Diommu.h145 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
146 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
147 int (*map)(struct iommu_domain *domain, unsigned long iova,
149 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
151 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
153 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
157 int (*domain_get_attr)(struct iommu_domain *domain,
159 int (*domain_set_attr)(struct iommu_domain *domain,
163 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
165 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
[all …]
Dirqdomain.h205 extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
207 extern void irq_domain_associate_many(struct irq_domain *domain,
210 extern void irq_domain_disassociate(struct irq_domain *domain,
227 static inline unsigned int irq_linear_revmap(struct irq_domain *domain, in irq_linear_revmap() argument
230 return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; in irq_linear_revmap()
235 extern int irq_create_strict_mappings(struct irq_domain *domain,
259 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
266 extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
273 static inline int irq_domain_alloc_irqs(struct irq_domain *domain, in irq_domain_alloc_irqs() argument
276 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); in irq_domain_alloc_irqs()
[all …]
Dmsi.h112 struct irq_domain *domain; member
151 int (*msi_init)(struct irq_domain *domain,
155 void (*msi_free)(struct irq_domain *domain,
158 int (*msi_check)(struct irq_domain *domain,
161 int (*msi_prepare)(struct irq_domain *domain,
167 int (*handle_error)(struct irq_domain *domain,
219 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
221 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
222 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
231 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
[all …]
Dasync.h42 struct async_domain *domain);
43 void async_unregister_domain(struct async_domain *domain);
45 extern void async_synchronize_full_domain(struct async_domain *domain);
48 struct async_domain *domain);
Dvga_switcheroo.h66 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
68 …vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
85 …_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL;… in vga_switcheroo_init_domain_pm_ops() argument
87 …t_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL;… in vga_switcheroo_init_domain_pm_optimus_hdmi_audio() argument
Dpm_domain.h32 bool (*power_down_ok)(struct dev_pm_domain *domain);
50 struct dev_pm_domain domain; /* PM domain operations */ member
69 int (*power_off)(struct generic_pm_domain *domain);
71 int (*power_on)(struct generic_pm_domain *domain);
78 int (*attach_dev)(struct generic_pm_domain *domain,
80 void (*detach_dev)(struct generic_pm_domain *domain,
87 return container_of(pd, struct generic_pm_domain, domain); in pd_to_genpd()
Dirqdesc.h141 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
144 static inline int handle_domain_irq(struct irq_domain *domain, in handle_domain_irq() argument
147 return __handle_domain_irq(domain, hwirq, true, regs); in handle_domain_irq()
/linux-4.1.27/drivers/pinctrl/meson/
Dpinctrl-meson.c76 static int meson_get_bank(struct meson_domain *domain, unsigned int pin, in meson_get_bank() argument
81 for (i = 0; i < domain->data->num_banks; i++) { in meson_get_bank()
82 if (pin >= domain->data->banks[i].first && in meson_get_bank()
83 pin <= domain->data->banks[i].last) { in meson_get_bank()
84 *bank = &domain->data->banks[i]; in meson_get_bank()
103 struct meson_domain **domain, in meson_get_domain_and_bank() argument
113 *domain = d; in meson_get_domain_and_bank()
196 struct meson_domain *domain; in meson_pmx_disable_other_groups() local
207 domain = &pc->domains[group->domain]; in meson_pmx_disable_other_groups()
208 regmap_update_bits(domain->reg_mux, in meson_pmx_disable_other_groups()
[all …]
Dpinctrl-meson.h37 unsigned int domain; member
167 .domain = 0, \
185 .domain = 1, \
/linux-4.1.27/arch/arm/boot/dts/
Dk2hk-clocks.dtsi62 reg-names = "control", "domain";
63 domain-id = <0>;
72 reg-names = "control", "domain";
73 domain-id = <4>;
82 reg-names = "control", "domain";
83 domain-id = <5>;
92 reg-names = "control", "domain";
93 domain-id = <9>;
102 reg-names = "control", "domain";
103 domain-id = <10>;
[all …]
Dk2l-clocks.dtsi52 reg-names = "control", "domain";
54 domain-id = <0>;
63 reg-names = "control", "domain";
64 domain-id = <4>;
73 reg-names = "control", "domain";
74 domain-id = <9>;
83 reg-names = "control", "domain";
84 domain-id = <10>;
93 reg-names = "control", "domain";
94 domain-id = <11>;
[all …]
Dkeystone-clocks.dtsi169 reg-names = "control", "domain";
170 domain-id = <0>;
180 reg-names = "control", "domain";
181 domain-id = <0>;
190 reg-names = "control", "domain";
191 domain-id = <0>;
201 reg-names = "control", "domain";
202 domain-id = <1>;
211 reg-names = "control", "domain";
212 domain-id = <1>;
[all …]
Dk2e-clocks.dtsi44 reg-names = "control", "domain";
45 domain-id = <0>;
54 reg-names = "control", "domain";
55 domain-id = <5>;
64 reg-names = "control", "domain";
65 domain-id = <18>;
74 reg-names = "control", "domain";
75 domain-id = <29>;
Dr8a73a4.dtsi758 #power-domain-cells = <0>;
764 #power-domain-cells = <0>;
768 #power-domain-cells = <0>;
773 #power-domain-cells = <0>;
780 #power-domain-cells = <0>;
784 #power-domain-cells = <0>;
792 #power-domain-cells = <0>;
796 #power-domain-cells = <0>;
804 #power-domain-cells = <0>;
808 #power-domain-cells = <0>;
[all …]
Dexynos4415.dtsi131 pd_cam: cam-power-domain@10024000 {
134 #power-domain-cells = <0>;
137 pd_tv: tv-power-domain@10024020 {
140 #power-domain-cells = <0>;
143 pd_mfc: mfc-power-domain@10024040 {
146 #power-domain-cells = <0>;
149 pd_g3d: g3d-power-domain@10024060 {
152 #power-domain-cells = <0>;
155 pd_lcd0: lcd0-power-domain@10024080 {
158 #power-domain-cells = <0>;
[all …]
Dsh73a0.dtsi415 #power-domain-cells = <0>;
419 #power-domain-cells = <0>;
424 #power-domain-cells = <0>;
429 #power-domain-cells = <0>;
434 #power-domain-cells = <0>;
439 #power-domain-cells = <0>;
444 #power-domain-cells = <0>;
451 #power-domain-cells = <0>;
455 #power-domain-cells = <0>;
460 #power-domain-cells = <0>;
[all …]
Dexynos4.dtsi86 pd_mfc: mfc-power-domain@10023C40 {
89 #power-domain-cells = <0>;
92 pd_g3d: g3d-power-domain@10023C60 {
95 #power-domain-cells = <0>;
98 pd_lcd0: lcd0-power-domain@10023C80 {
101 #power-domain-cells = <0>;
104 pd_tv: tv-power-domain@10023C20 {
107 #power-domain-cells = <0>;
111 pd_cam: cam-power-domain@10023C00 {
114 #power-domain-cells = <0>;
[all …]
Dexynos3250.dtsi145 pd_cam: cam-power-domain@10023C00 {
148 #power-domain-cells = <0>;
151 pd_mfc: mfc-power-domain@10023C40 {
154 #power-domain-cells = <0>;
157 pd_g3d: g3d-power-domain@10023C60 {
160 #power-domain-cells = <0>;
163 pd_lcd0: lcd0-power-domain@10023C80 {
166 #power-domain-cells = <0>;
169 pd_isp: isp-power-domain@10023CA0 {
172 #power-domain-cells = <0>;
Dr8a7740.dtsi670 #power-domain-cells = <0>;
674 #power-domain-cells = <0>;
679 #power-domain-cells = <0>;
684 #power-domain-cells = <0>;
691 #power-domain-cells = <0>;
695 #power-domain-cells = <0>;
703 #power-domain-cells = <0>;
707 #power-domain-cells = <0>;
712 #power-domain-cells = <0>;
717 #power-domain-cells = <0>;
[all …]
/linux-4.1.27/drivers/dca/
Ddca-core.c60 struct dca_domain *domain; in dca_allocate_domain() local
62 domain = kzalloc(sizeof(*domain), GFP_NOWAIT); in dca_allocate_domain()
63 if (!domain) in dca_allocate_domain()
66 INIT_LIST_HEAD(&domain->dca_providers); in dca_allocate_domain()
67 domain->pci_rc = rc; in dca_allocate_domain()
69 return domain; in dca_allocate_domain()
72 static void dca_free_domain(struct dca_domain *domain) in dca_free_domain() argument
74 list_del(&domain->node); in dca_free_domain()
75 kfree(domain); in dca_free_domain()
97 struct dca_domain *domain; in unregister_dca_providers() local
[all …]
/linux-4.1.27/drivers/vfio/
Dvfio_iommu_type1.c65 struct iommu_domain *domain; member
339 struct vfio_domain *domain, *d; in vfio_unmap_unpin() local
351 domain = d = list_first_entry(&iommu->domain_list, in vfio_unmap_unpin()
355 iommu_unmap(d->domain, dma->iova, dma->size); in vfio_unmap_unpin()
363 phys = iommu_iova_to_phys(domain->domain, iova); in vfio_unmap_unpin()
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { in vfio_unmap_unpin()
376 next = iommu_iova_to_phys(domain->domain, iova + len); in vfio_unmap_unpin()
381 unmapped = iommu_unmap(domain->domain, iova, len); in vfio_unmap_unpin()
405 struct vfio_domain *domain; in vfio_pgsize_bitmap() local
409 list_for_each_entry(domain, &iommu->domain_list, next) in vfio_pgsize_bitmap()
[all …]
/linux-4.1.27/drivers/irqchip/
Dirq-atmel-aic5.c89 struct irq_domain *domain = d->domain; in aic5_mask() local
90 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_mask()
107 struct irq_domain *domain = d->domain; in aic5_unmask() local
108 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_unmask()
125 struct irq_domain *domain = d->domain; in aic5_retrigger() local
126 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_retrigger()
140 struct irq_domain *domain = d->domain; in aic5_set_type() local
141 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_set_type()
160 struct irq_domain *domain = d->domain; in aic5_suspend() local
161 struct irq_domain_chip_generic *dgc = domain->gc; in aic5_suspend()
[all …]
Dirq-mmp.c49 struct irq_domain *domain; member
66 struct irq_domain *domain = d->domain; in icu_mask_ack_irq() local
67 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_mask_ack_irq()
90 struct irq_domain *domain = d->domain; in icu_mask_irq() local
91 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_mask_irq()
109 struct irq_domain *domain = d->domain; in icu_unmask_irq() local
110 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; in icu_unmask_irq()
135 struct irq_domain *domain; in icu_mux_irq_demux() local
142 domain = icu_data[i].domain; in icu_mux_irq_demux()
143 data = (struct icu_chip_data *)domain->host_data; in icu_mux_irq_demux()
[all …]
Dirq-gic-v2m.c54 struct irq_domain *domain; member
114 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, in gicv2m_irq_gic_domain_alloc() argument
122 args.np = domain->parent->of_node; in gicv2m_irq_gic_domain_alloc()
128 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); in gicv2m_irq_gic_domain_alloc()
133 d = irq_domain_get_irq_data(domain->parent, virq); in gicv2m_irq_gic_domain_alloc()
153 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in gicv2m_irq_domain_alloc() argument
156 struct v2m_data *v2m = domain->host_data; in gicv2m_irq_domain_alloc()
172 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); in gicv2m_irq_domain_alloc()
178 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, in gicv2m_irq_domain_alloc()
184 static void gicv2m_irq_domain_free(struct irq_domain *domain, in gicv2m_irq_domain_free() argument
[all …]
Dirq-atmel-aic-common.c115 static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) in aic_common_ext_irq_of_init() argument
117 struct device_node *node = domain->of_node; in aic_common_ext_irq_of_init()
124 gc = irq_get_domain_generic_chip(domain, 0); in aic_common_ext_irq_of_init()
130 gc = irq_get_domain_generic_chip(domain, hwirq); in aic_common_ext_irq_of_init()
133 hwirq, domain->revmap_size); in aic_common_ext_irq_of_init()
220 struct irq_domain *domain; in aic_common_of_init() local
239 domain = irq_domain_add_linear(node, nchips * 32, ops, aic); in aic_common_of_init()
240 if (!domain) { in aic_common_of_init()
245 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, in aic_common_of_init()
253 gc = irq_get_domain_generic_chip(domain, i * 32); in aic_common_of_init()
[all …]
Dirq-moxart.c41 struct irq_domain *domain; member
56 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); in handle_irq()
75 intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, in moxart_of_intc_init()
77 if (!intc.domain) { in moxart_of_intc_init()
82 ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, in moxart_of_intc_init()
88 irq_domain_remove(intc.domain); in moxart_of_intc_init()
98 gc = irq_get_domain_generic_chip(intc.domain, 0); in moxart_of_intc_init()
Dirq-tb10x.c102 struct irq_domain *domain = irq_desc_get_handler_data(desc); in tb10x_irq_cascade() local
104 generic_handle_irq(irq_find_mapping(domain, irq)); in tb10x_irq_cascade()
113 struct irq_domain *domain; in of_tb10x_init_irq() local
135 domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, in of_tb10x_init_irq()
137 if (!domain) { in of_tb10x_init_irq()
144 ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, in of_tb10x_init_irq()
154 gc = domain->gc->gc[0]; in of_tb10x_init_irq()
176 irq_set_handler_data(irq, domain); in of_tb10x_init_irq()
188 irq_domain_remove(domain); in of_tb10x_init_irq()
Dirq-mtk-sysirq.c89 static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, in mtk_sysirq_domain_alloc() argument
106 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in mtk_sysirq_domain_alloc()
108 domain->host_data); in mtk_sysirq_domain_alloc()
110 gic_data.np = domain->parent->of_node; in mtk_sysirq_domain_alloc()
111 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); in mtk_sysirq_domain_alloc()
123 struct irq_domain *domain, *domain_parent; in mtk_sysirq_of_init() local
151 domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node, in mtk_sysirq_of_init()
153 if (!domain) { in mtk_sysirq_of_init()
Dirq-tegra.c225 static int tegra_ictlr_domain_xlate(struct irq_domain *domain, in tegra_ictlr_domain_xlate() argument
232 if (domain->of_node != controller) in tegra_ictlr_domain_xlate()
244 static int tegra_ictlr_domain_alloc(struct irq_domain *domain, in tegra_ictlr_domain_alloc() argument
250 struct tegra_ictlr_info *info = domain->host_data; in tegra_ictlr_domain_alloc()
266 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in tegra_ictlr_domain_alloc()
272 parent_args.np = domain->parent->of_node; in tegra_ictlr_domain_alloc()
273 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); in tegra_ictlr_domain_alloc()
276 static void tegra_ictlr_domain_free(struct irq_domain *domain, in tegra_ictlr_domain_free() argument
283 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in tegra_ictlr_domain_free()
297 struct irq_domain *parent_domain, *domain; in tegra_ictlr_init() local
[all …]
Dirq-vf610-mscm-ir.c128 static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq, in vf610_mscm_ir_domain_alloc() argument
141 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in vf610_mscm_ir_domain_alloc()
143 domain->host_data); in vf610_mscm_ir_domain_alloc()
145 gic_data.np = domain->parent->of_node; in vf610_mscm_ir_domain_alloc()
150 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); in vf610_mscm_ir_domain_alloc()
162 struct irq_domain *domain, *domain_parent; in vf610_mscm_ir_of_init() local
194 domain = irq_domain_add_hierarchy(domain_parent, 0, in vf610_mscm_ir_of_init()
197 if (!domain) { in vf610_mscm_ir_of_init()
Dirq-sunxi-nmi.c63 struct irq_domain *domain = irq_desc_get_handler_data(desc); in sunxi_sc_nmi_handle_irq() local
65 unsigned int virq = irq_find_mapping(domain, 0); in sunxi_sc_nmi_handle_irq()
124 struct irq_domain *domain; in sunxi_sc_nmi_irq_init() local
131 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); in sunxi_sc_nmi_irq_init()
132 if (!domain) { in sunxi_sc_nmi_irq_init()
137 ret = irq_alloc_domain_generic_chips(domain, 1, 2, node->name, in sunxi_sc_nmi_irq_init()
153 gc = irq_get_domain_generic_chip(domain, 0); in sunxi_sc_nmi_irq_init()
185 irq_set_handler_data(irq, domain); in sunxi_sc_nmi_irq_init()
191 irq_domain_remove(domain); in sunxi_sc_nmi_irq_init()
Dirq-brcmstb-l2.c49 struct irq_domain *domain; member
57 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); in brcmstb_l2_intc_irq_handle()
78 generic_handle_irq(irq_find_mapping(b->domain, irq)); in brcmstb_l2_intc_irq_handle()
152 data->domain = irq_domain_add_linear(np, 32, in brcmstb_l2_intc_of_init()
154 if (!data->domain) { in brcmstb_l2_intc_of_init()
167 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, in brcmstb_l2_intc_of_init()
178 gc = irq_get_domain_generic_chip(data->domain, 0); in brcmstb_l2_intc_of_init()
209 irq_domain_remove(data->domain); in brcmstb_l2_intc_of_init()
Dirq-atmel-aic.c141 static void __init aic_hw_init(struct irq_domain *domain) in aic_hw_init() argument
143 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); in aic_hw_init()
245 struct irq_domain *domain; in aic_of_init() local
250 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", in aic_of_init()
252 if (IS_ERR(domain)) in aic_of_init()
253 return PTR_ERR(domain); in aic_of_init()
257 aic_domain = domain; in aic_of_init()
258 gc = irq_get_domain_generic_chip(domain, 0); in aic_of_init()
271 aic_hw_init(domain); in aic_of_init()
Dirq-bcm2835.c94 struct irq_domain *domain; member
153 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), in armctrl_of_init()
155 if (!intc.domain) in armctrl_of_init()
164 irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i)); in armctrl_of_init()
188 handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); in armctrl_handle_bank()
196 handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); in armctrl_handle_shortcut()
207 handle_IRQ(irq_linear_revmap(intc.domain, irq), regs); in bcm2835_handle_irq()
Dirq-vt8500.c77 struct irq_domain *domain; /* Domain for this controller */ member
86 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_mask()
107 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_unmask()
118 struct vt8500_irq_data *priv = d->domain->host_data; in vt8500_irq_set_type()
201 handle_domain_irq(intc[i].domain, irqnr, regs); in vt8500_handle_irq()
218 intc[active_cnt].domain = irq_domain_add_linear(node, 64, in vt8500_irq_init()
226 if (!intc[active_cnt].domain) { in vt8500_irq_init()
Dirq-imgpdc.c81 struct irq_domain *domain; member
121 return (struct pdc_intc_priv *)data->domain->host_data; in irqd_to_priv()
238 irq_no = irq_linear_revmap(priv->domain, i); in pdc_intc_perip_isr()
259 irq_no = irq_linear_revmap(priv->domain, in pdc_intc_syswake_isr()
384 priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, in pdc_intc_probe()
386 if (unlikely(!priv->domain)) { in pdc_intc_probe()
396 ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc", in pdc_intc_probe()
404 gc = irq_get_domain_generic_chip(priv->domain, 0); in pdc_intc_probe()
418 gc = irq_get_domain_generic_chip(priv->domain, 8); in pdc_intc_probe()
469 irq_domain_remove(priv->domain); in pdc_intc_probe()
[all …]
Dirq-metag.c29 struct irq_domain *domain; member
239 irq_no = irq_linear_revmap(priv->domain, hw); in metag_internal_irq_demux()
270 if (!priv->domain) in internal_irq_map()
272 return irq_create_mapping(priv->domain, hw); in internal_irq_map()
330 priv->domain = irq_domain_add_linear(NULL, 32, in init_internal_IRQ()
333 if (unlikely(!priv->domain)) { in init_internal_IRQ()
Dirq-dw-apb-ictl.c73 struct irq_domain *domain; in dw_apb_ictl_init() local
123 domain = irq_domain_add_linear(np, nrirqs, in dw_apb_ictl_init()
125 if (!domain) { in dw_apb_ictl_init()
131 ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1, in dw_apb_ictl_init()
140 gc = irq_get_domain_generic_chip(domain, 0); in dw_apb_ictl_init()
141 gc->private = domain; in dw_apb_ictl_init()
Dirq-crossbar.c79 static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, in allocate_gic_irq() argument
98 args.np = domain->parent->of_node; in allocate_gic_irq()
104 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); in allocate_gic_irq()
154 static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, in crossbar_domain_free() argument
161 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in crossbar_domain_free()
334 struct irq_domain *parent_domain, *domain; in irqcrossbar_init() local
352 domain = irq_domain_add_hierarchy(parent_domain, 0, in irqcrossbar_init()
356 if (!domain) { in irqcrossbar_init()
Dirq-bcm7120-l2.c47 struct irq_domain *domain; member
66 irq_get_domain_generic_chip(b->domain, base); in bcm7120_l2_intc_irq_handle()
76 generic_handle_irq(irq_find_mapping(b->domain, in bcm7120_l2_intc_irq_handle()
245 data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, in bcm7120_l2_intc_probe()
247 if (!data->domain) { in bcm7120_l2_intc_probe()
259 ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, in bcm7120_l2_intc_probe()
271 gc = irq_get_domain_generic_chip(data->domain, irq); in bcm7120_l2_intc_probe()
302 irq_domain_remove(data->domain); in bcm7120_l2_intc_probe()
Dirq-orion.c144 struct irq_domain *domain; in orion_bridge_irq_init() local
151 domain = irq_domain_add_linear(np, nrirqs, in orion_bridge_irq_init()
153 if (!domain) { in orion_bridge_irq_init()
158 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, in orion_bridge_irq_init()
183 gc = irq_get_domain_generic_chip(domain, 0); in orion_bridge_irq_init()
201 irq_set_handler_data(irq, domain); in orion_bridge_irq_init()
Dirq-clps711x.c73 struct irq_domain *domain; member
85 handle_domain_irq(clps711x_intc->domain, in clps711x_irqh()
91 handle_domain_irq(clps711x_intc->domain, in clps711x_irqh()
191 clps711x_intc->domain = in _clps711x_intc_init()
194 if (!clps711x_intc->domain) { in _clps711x_intc_init()
199 irq_set_default_host(clps711x_intc->domain); in _clps711x_intc_init()
Dirq-gic-v3-its.c64 struct irq_domain *domain; member
1243 static int its_msi_prepare(struct irq_domain *domain, struct device *dev, in its_msi_prepare() argument
1259 its = domain->parent->host_data; in its_msi_prepare()
1295 static int its_irq_gic_domain_alloc(struct irq_domain *domain, in its_irq_gic_domain_alloc() argument
1301 args.np = domain->parent->of_node; in its_irq_gic_domain_alloc()
1307 return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); in its_irq_gic_domain_alloc()
1310 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in its_irq_domain_alloc() argument
1324 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); in its_irq_domain_alloc()
1328 irq_domain_set_hwirq_and_chip(domain, virq + i, in its_irq_domain_alloc()
1338 static void its_irq_domain_activate(struct irq_domain *domain, in its_irq_domain_activate() argument
[all …]
Dirq-versatile-fpga.c45 struct irq_domain *domain; member
82 generic_handle_irq(irq_find_mapping(f->domain, irq)); in fpga_irq_handle()
99 handle_domain_irq(f->domain, irq, regs); in handle_one_fpga()
164 f->domain = irq_domain_add_simple(node, fls(valid), irq_start, in fpga_irq_init()
171 irq_create_mapping(f->domain, i); in fpga_irq_init()
Dirq-gic-v3.c46 struct irq_domain *domain; member
346 err = handle_domain_irq(gic_data.domain, irqnr, regs); in gic_handle_irq()
750 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in gic_irq_domain_alloc() argument
758 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, in gic_irq_domain_alloc()
764 gic_irq_domain_map(domain, virq + i, hwirq + i); in gic_irq_domain_alloc()
769 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, in gic_irq_domain_free() argument
775 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); in gic_irq_domain_free()
857 gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, in gic_of_init()
861 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { in gic_of_init()
869 its_init(node, &gic_data.rdists, gic_data.domain); in gic_of_init()
[all …]
Dirq-omap-intc.c70 static struct irq_domain *domain; variable
255 domain = irq_domain_add_linear(node, omap_nr_irqs, in omap_init_irq_of()
260 ret = omap_alloc_gc_of(domain, omap_irq_base); in omap_init_irq_of()
262 irq_domain_remove(domain); in omap_init_irq_of()
281 domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0, in omap_init_irq_legacy()
364 handle_domain_irq(domain, irqnr, regs); in omap_intc_handle_irq()
Dirq-s3c24xx.c74 struct irq_domain *domain; member
108 irqno = irq_find_mapping(parent_intc->domain, in s3c_irq_mask()
128 irqno = irq_find_mapping(parent_intc->domain, in s3c_irq_unmask()
317 offset = (intc->domain->of_node) ? 32 : 0; in s3c_irq_demux()
330 irq = irq_find_mapping(sub_intc->domain, offset + n); in s3c_irq_demux()
348 if (!intc->domain->of_node) in s3c24xx_handle_intc()
367 handle_domain_irq(intc->domain, intc_offset + offset, regs); in s3c24xx_handle_intc()
486 irqno = irq_find_mapping(parent_intc->domain, in s3c24xx_irq_map()
601 intc->domain = irq_domain_add_legacy(np, irq_num, irq_start, in s3c24xx_init_intc()
604 if (!intc->domain) { in s3c24xx_init_intc()
[all …]
Dirq-vic.c83 struct irq_domain *domain; member
222 handle_domain_irq(vic->domain, irq, regs); in handle_one_vic()
239 generic_handle_irq(irq_find_mapping(vic->domain, hwirq)); in vic_handle_irq_cascaded()
304 v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, in vic_register()
309 irq_create_mapping(v->domain, i); in vic_register()
314 v->irq = irq_find_mapping(v->domain, 0); in vic_register()
Dirq-gic.c68 struct irq_domain *domain; member
275 handle_domain_irq(gic->domain, irqnr, regs); in gic_handle_irq()
314 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); in gic_handle_cascade_irq()
859 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, in gic_irq_domain_alloc() argument
867 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, in gic_irq_domain_alloc()
873 gic_irq_domain_map(domain, virq + i, hwirq + i); in gic_irq_domain_alloc()
957 gic->domain = irq_domain_add_linear(node, gic_irqs, in gic_init_bases()
983 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, in gic_init_bases()
987 if (WARN_ON(!gic->domain)) in gic_init_bases()
1036 gicv2m_of_init(node, gic_data[gic_cnt].domain); in gic_of_init()
[all …]
Dirq-hip04.c58 struct irq_domain *domain; member
182 handle_domain_irq(hip04_data.domain, irqnr, regs); in hip04_handle_irq()
407 hip04_data.domain = irq_domain_add_legacy(node, nr_irqs, irq_base, in hip04_of_init()
412 if (WARN_ON(!hip04_data.domain)) in hip04_of_init()
Dirq-bcm7038-l1.c45 struct irq_domain *domain; member
145 generic_handle_irq(irq_find_mapping(intc->domain, in bcm7038_l1_irq_handle()
307 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, in bcm7038_l1_of_init()
310 if (!intc->domain) { in bcm7038_l1_of_init()
Dirq-metag-ext.c36 struct irq_domain *domain; member
471 irq_no = irq_linear_revmap(priv->domain, hw); in meta_intc_irq_demux()
643 i = irq_linear_revmap(priv->domain, hw); in meta_intc_suspend()
708 i = irq_linear_revmap(priv->domain, hw); in meta_intc_resume()
850 priv->domain = irq_domain_add_linear(node, priv->nr_banks*32, in init_external_IRQ()
852 if (unlikely(!priv->domain)) { in init_external_IRQ()
/linux-4.1.27/Documentation/devicetree/bindings/power/
Dpower_domain.txt7 This device tree binding can be used to bind PM domain consumer devices with
8 their PM domains provided by PM domain providers. A PM domain provider can be
11 phandle arguments (so called PM domain specifiers) of length specified by the
12 #power-domain-cells property in the PM domain provider node.
14 ==PM domain providers==
17 - #power-domain-cells : Number of cells in a PM domain specifier;
18 Typically 0 for nodes representing a single PM domain and 1 for nodes
23 - power-domains : A phandle and PM domain specifier as defined by bindings of
25 Some power domains might be powered from another power domain (or have
27 a standard PM domain consumer binding is used. When provided, all domains
[all …]
Drenesas,sysc-rmobile.txt23 - pm-domains: This node contains a hierarchy of PM domain nodes, which should
30 Each of the PM domain nodes represents a PM domain, as documented by the
31 generic PM domain bindings in
38 - #power-domain-cells: Must be 0.
41 - reg: If the PM domain is not always-on, this property must contain the bit
47 If the PM domain is always-on, this property must be omitted.
52 This shows a subset of the r8a7740 PM domain hierarchy, containing the
53 C5 "always-on" domain, 2 of its subdomains (A4S and A4SU), and the A3SP domain,
64 #power-domain-cells = <0>;
70 #power-domain-cells = <0>;
[all …]
Dfsl,imx-gpc.txt13 - pu-supply: Link to the LDO regulator powering the PU power domain
14 - clocks: Clock phandles to devices in the PU power domain that need
15 to be enabled during domain power-up for reset propagation.
16 - #power-domain-cells: Should be 1, see below:
18 The gpc node is a power-controller as documented by the generic power domain
35 #power-domain-cells = <1>;
39 Specifying power domain for IP modules
42 IP cores belonging to a power domain should contain a 'power-domains' property
44 the power domain the device belongs to.
46 Example of a device that is part of the PU power domain:
Drockchip-io-domain.txt4 IO domain voltages on some Rockchip SoCs are variable but need to be
34 - "rockchip,rk3188-io-voltage-domain" for rk3188
35 - "rockchip,rk3288-io-voltage-domain" for rk3288
71 compatible = "rockchip,rk3288-io-voltage-domain";
/linux-4.1.27/kernel/
Dasync.c78 struct async_domain *domain; member
85 static async_cookie_t lowest_in_progress(struct async_domain *domain) in lowest_in_progress() argument
93 if (domain) in lowest_in_progress()
94 pending = &domain->pending; in lowest_in_progress()
148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) in __async_schedule() argument
176 entry->domain = domain; in __async_schedule()
183 list_add_tail(&entry->domain_list, &domain->pending); in __async_schedule()
184 if (domain->registered) in __async_schedule()
226 struct async_domain *domain) in async_schedule_domain() argument
228 return __async_schedule(func, data, domain); in async_schedule_domain()
[all …]
/linux-4.1.27/drivers/gpu/drm/msm/
Dmsm_iommu.c23 struct iommu_domain *domain; member
37 return iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
43 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
50 struct iommu_domain *domain = iommu->domain; in msm_iommu_map() local
56 if (!domain || !sgt) in msm_iommu_map()
65 ret = iommu_map(domain, da, pa, bytes, prot); in msm_iommu_map()
79 iommu_unmap(domain, da, bytes); in msm_iommu_map()
89 struct iommu_domain *domain = iommu->domain; in msm_iommu_unmap() local
98 unmapped = iommu_unmap(domain, da, bytes); in msm_iommu_unmap()
115 iommu_domain_free(iommu->domain); in msm_iommu_destroy()
[all …]
Dmsm_gem.c289 if (!msm_obj->domain[id].iova) { in msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; in msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); in msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova_locked()
327 if (msm_obj->domain[id].iova) { in msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova()
344 WARN_ON(!msm_obj->domain[id].iova); in msm_gem_iova()
345 return msm_obj->domain[id].iova; in msm_gem_iova()
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { in msm_gem_free_object()
527 if (mmu && msm_obj->domain[id].iova) { in msm_gem_free_object()
[all …]
/linux-4.1.27/arch/ia64/kernel/
Dirq_ia64.c80 .domain = CPU_MASK_NONE
106 static inline int find_unassigned_vector(cpumask_t domain) in find_unassigned_vector() argument
111 cpumask_and(&mask, &domain, cpu_online_mask); in find_unassigned_vector()
117 cpumask_and(&mask, &domain, &vector_table[vector]); in find_unassigned_vector()
125 static int __bind_irq_vector(int irq, int vector, cpumask_t domain) in __bind_irq_vector() argument
134 cpumask_and(&mask, &domain, cpu_online_mask); in __bind_irq_vector()
137 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) in __bind_irq_vector()
144 cfg->domain = domain; in __bind_irq_vector()
146 cpumask_or(&vector_table[vector], &vector_table[vector], &domain); in __bind_irq_vector()
150 int bind_irq_vector(int irq, int vector, cpumask_t domain) in bind_irq_vector() argument
[all …]
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/device/
Dctrl.c73 struct nvkm_domain *domain; in nvkm_control_mthd_pstate_attr() local
93 domain = clk->domains; in nvkm_control_mthd_pstate_attr()
95 while (domain->name != nv_clk_src_max) { in nvkm_control_mthd_pstate_attr()
96 if (domain->mname && ++j == args->v0.index) in nvkm_control_mthd_pstate_attr()
98 domain++; in nvkm_control_mthd_pstate_attr()
101 if (domain->name == nv_clk_src_max) in nvkm_control_mthd_pstate_attr()
110 lo = pstate->base.domain[domain->name]; in nvkm_control_mthd_pstate_attr()
113 lo = min(lo, cstate->domain[domain->name]); in nvkm_control_mthd_pstate_attr()
114 hi = max(hi, cstate->domain[domain->name]); in nvkm_control_mthd_pstate_attr()
119 lo = max(clk->read(clk, domain->name), 0); in nvkm_control_mthd_pstate_attr()
[all …]
/linux-4.1.27/Documentation/scheduler/
Dsched-stats.txt9 per-domain. Note that domains (and their associated information) will only
12 In version 14 of schedstat, there is at least one level of domain
14 domain. Domains have no particular names in this implementation, but
16 cpus on the machine, while domain0 is the most tightly focused domain,
18 are no architectures which need more than three domain levels. The first
19 field in the domain stats is a bit map indicating which cpus are affected
20 by that domain.
59 One of these is produced per domain for each cpu described. (Note that if
63 domain<N> <cpumask> 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 3…
65 The first field is a bit mask indicating what cpus this domain operates over.
[all …]
Dsched-domains.txt1 Each CPU has a "base" scheduling domain (struct sched_domain). The domain
3 MUST be NULL terminated, and domain structures should be per-CPU as they are
6 Each scheduling domain spans a number of CPUs (stored in the ->span field).
7 A domain's span MUST be a superset of it child's span (this restriction could
8 be relaxed if the need arises), and a base domain for CPU i MUST span at least
9 i. The top domain for each CPU will generally span all CPUs in the system
12 explicitly set. A sched domain's span means "balance process load among these
15 Each scheduling domain must have one or more CPU groups (struct sched_group)
18 domain's span. The intersection of cpumasks from any two of these groups
20 contain the CPU to which the domain belongs. Groups may be shared among
[all …]
/linux-4.1.27/Documentation/devicetree/bindings/arm/exynos/
Dpower_domain.txt8 * samsung,exynos4210-pd - for exynos4210 type power domain.
11 - #power-domain-cells: number of cells in power domain specifier;
16 devices in this power domain are set to oscclk before power gating
17 and restored back after powering on a domain. This is required for
23 devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
26 the power domain. These clock should be enabled during power
27 domain on/off operations.
28 - power-domains: phandle pointing to the parent power domain, for more details
32 defined with a phandle to respective power domain.
36 lcd0: power-domain-lcd0 {
[all …]
/linux-4.1.27/net/netlabel/
Dnetlabel_domainhash.c100 kfree(ptr->domain); in netlbl_domhsh_free_entry()
140 static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) in netlbl_domhsh_search() argument
146 if (domain != NULL) { in netlbl_domhsh_search()
147 bkt = netlbl_domhsh_hash(domain); in netlbl_domhsh_search()
150 if (iter->valid && strcmp(iter->domain, domain) == 0) in netlbl_domhsh_search()
170 static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) in netlbl_domhsh_search_def() argument
174 entry = netlbl_domhsh_search(domain); in netlbl_domhsh_search_def()
211 entry->domain ? entry->domain : "(default)"); in netlbl_domhsh_audit_add()
387 if (entry->domain != NULL) in netlbl_domhsh_add()
388 entry_old = netlbl_domhsh_search(entry->domain); in netlbl_domhsh_add()
[all …]
Dnetlabel_domainhash.h72 char *domain; member
90 int netlbl_domhsh_remove_af4(const char *domain,
94 int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
96 struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
97 struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
100 struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
Dnetlabel_kapi.c68 int netlbl_cfg_map_del(const char *domain, in netlbl_cfg_map_del() argument
75 return netlbl_domhsh_remove(domain, audit_info); in netlbl_cfg_map_del()
79 return netlbl_domhsh_remove_af4(domain, addr, mask, in netlbl_cfg_map_del()
102 int netlbl_cfg_unlbl_map_add(const char *domain, in netlbl_cfg_unlbl_map_add() argument
117 if (domain != NULL) { in netlbl_cfg_unlbl_map_add()
118 entry->domain = kstrdup(domain, GFP_ATOMIC); in netlbl_cfg_unlbl_map_add()
119 if (entry->domain == NULL) in netlbl_cfg_unlbl_map_add()
189 kfree(entry->domain); in netlbl_cfg_unlbl_map_add()
330 const char *domain, in netlbl_cfg_cipsov4_map_add() argument
348 if (domain != NULL) { in netlbl_cfg_cipsov4_map_add()
[all …]
Dnetlabel_mgmt.c106 entry->domain = kmalloc(tmp_size, GFP_KERNEL); in netlbl_mgmt_add_common()
107 if (entry->domain == NULL) { in netlbl_mgmt_add_common()
111 nla_strlcpy(entry->domain, in netlbl_mgmt_add_common()
246 kfree(entry->domain); in netlbl_mgmt_add_common()
274 if (entry->domain != NULL) { in netlbl_mgmt_listentry()
276 NLBL_MGMT_A_DOMAIN, entry->domain); in netlbl_mgmt_listentry()
412 char *domain; in netlbl_mgmt_remove() local
420 domain = nla_data(info->attrs[NLBL_MGMT_A_DOMAIN]); in netlbl_mgmt_remove()
421 return netlbl_domhsh_remove(domain, &audit_info); in netlbl_mgmt_remove()
/linux-4.1.27/arch/x86/kvm/
Diommu.c78 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_iommu_map_pages() local
82 if (!domain) in kvm_iommu_map_pages()
99 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { in kvm_iommu_map_pages()
130 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), in kvm_iommu_map_pages()
175 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_assign_device() local
180 if (!domain) in kvm_assign_device()
186 r = iommu_attach_device(domain, &pdev->dev); in kvm_assign_device()
215 struct iommu_domain *domain = kvm->arch.iommu_domain; in kvm_deassign_device() local
218 if (!domain) in kvm_deassign_device()
224 iommu_detach_device(domain, &pdev->dev); in kvm_deassign_device()
[all …]
/linux-4.1.27/drivers/base/
Dmap.c32 int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, in kobj_map() argument
56 mutex_lock(domain->lock); in kobj_map()
58 struct probe **s = &domain->probes[index % 255]; in kobj_map()
64 mutex_unlock(domain->lock); in kobj_map()
68 void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) in kobj_unmap() argument
78 mutex_lock(domain->lock); in kobj_unmap()
81 for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { in kobj_unmap()
91 mutex_unlock(domain->lock); in kobj_unmap()
95 struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) in kobj_lookup() argument
102 mutex_lock(domain->lock); in kobj_lookup()
[all …]
/linux-4.1.27/net/tipc/
Daddr.c123 int tipc_in_scope(u32 domain, u32 addr) in tipc_in_scope() argument
125 if (!domain || (domain == addr)) in tipc_in_scope()
127 if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */ in tipc_in_scope()
129 if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */ in tipc_in_scope()
137 int tipc_addr_scope(u32 domain) in tipc_addr_scope() argument
139 if (likely(!domain)) in tipc_addr_scope()
141 if (tipc_node(domain)) in tipc_addr_scope()
143 if (tipc_cluster(domain)) in tipc_addr_scope()
Ddiscover.c66 u32 domain; member
85 u32 dest_domain = b_ptr->domain; in tipc_disc_init_msg()
165 if (!tipc_in_scope(bearer->domain, onode)) in tipc_disc_rcv()
321 if (tipc_node(req->domain) && req->num_nodes) { in disc_timeout()
376 req->domain = b_ptr->domain; in tipc_disc_create()
412 req->domain = b_ptr->domain; in tipc_disc_reset()
Daddr.h65 int tipc_in_scope(u32 domain, u32 addr);
66 int tipc_addr_scope(u32 domain);
/linux-4.1.27/arch/arm/mach-davinci/
Dpsc.c77 void davinci_psc_config(unsigned int domain, unsigned int ctlr, in davinci_psc_config() argument
107 pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain); in davinci_psc_config()
109 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); in davinci_psc_config()
111 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); in davinci_psc_config()
113 ptcmd = 1 << domain; in davinci_psc_config()
118 } while ((((epcpr >> domain) & 1) == 0)); in davinci_psc_config()
120 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); in davinci_psc_config()
122 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); in davinci_psc_config()
124 ptcmd = 1 << domain; in davinci_psc_config()
130 } while (!(((ptstat >> domain) & 1) == 0)); in davinci_psc_config()
/linux-4.1.27/drivers/clk/
Dclk-mb86s7x.c35 u32 domain; member
49 u8 cntrlr, domain, port; member
60 cmd.domain = crgclk->domain; in crg_gate_control()
70 cmd.domain, cmd.port, cmd.en); in crg_gate_control()
81 cmd.domain, cmd.port, cmd.en); in crg_gate_control()
111 cmd.domain = crgclk->domain; in crg_rate_control()
119 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
124 cmd.domain, cmd.port); in crg_rate_control()
136 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
140 cmd.domain, cmd.port, cmd.frequency); in crg_rate_control()
[all …]
/linux-4.1.27/Documentation/devicetree/bindings/arm/ux500/
Dpower_domain.txt6 The implementation of PM domains for UX500 are based upon the generic PM domain
9 ==PM domain providers==
13 - #power-domain-cells : Number of cells in a power domain specifier, must be 1.
18 #power-domain-cells = <1>;
21 ==PM domain consumers==
24 - power-domains: A phandle and PM domain specifier. Below are the list of
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
Dbase.c41 u8 pstate, u8 domain, u32 input) in nvkm_clk_adjust() argument
61 if (subd && boostS.domain == domain) { in nvkm_clk_adjust()
139 struct nvkm_domain *domain = clk->domains; in nvkm_cstate_new() local
156 while (domain && domain->name != nv_clk_src_max) { in nvkm_cstate_new()
157 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { in nvkm_cstate_new()
159 domain->bios, cstepX.freq); in nvkm_cstate_new()
160 cstate->domain[domain->name] = freq; in nvkm_cstate_new()
162 domain++; in nvkm_cstate_new()
188 int khz = pstate->base.domain[nv_clk_src_mem]; in nvkm_pstate_prog()
259 u32 lo = pstate->base.domain[clock->name]; in nvkm_pstate_info()
[all …]
Dgk20a.c472 .domain[nv_clk_src_gpc] = 72000,
478 .domain[nv_clk_src_gpc] = 108000,
484 .domain[nv_clk_src_gpc] = 180000,
490 .domain[nv_clk_src_gpc] = 252000,
496 .domain[nv_clk_src_gpc] = 324000,
502 .domain[nv_clk_src_gpc] = 396000,
508 .domain[nv_clk_src_gpc] = 468000,
514 .domain[nv_clk_src_gpc] = 540000,
520 .domain[nv_clk_src_gpc] = 612000,
526 .domain[nv_clk_src_gpc] = 648000,
[all …]
/linux-4.1.27/drivers/xen/xen-pciback/
Dpci_stub.c38 int domain; member
152 static struct pcistub_device *pcistub_device_find(int domain, int bus, in pcistub_device_find() argument
162 && domain == pci_domain_nr(psdev->dev->bus) in pcistub_device_find()
201 int domain, int bus, in pcistub_get_pci_dev_by_slot() argument
212 && domain == pci_domain_nr(psdev->dev->bus) in pcistub_get_pci_dev_by_slot()
321 if (pci_domain_nr(dev->bus) == pdev_id->domain in pcistub_match_one()
657 &aer_op->domain, &aer_op->bus, &aer_op->devfn); in common_process()
667 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); in common_process()
955 static inline int str_to_slot(const char *buf, int *domain, int *bus, in str_to_slot() argument
960 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, in str_to_slot()
[all …]
Dpassthrough.c20 unsigned int domain, in __xen_pcibk_get_pci_dev() argument
31 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) in __xen_pcibk_get_pci_dev()
50 unsigned int domain, bus, devfn; in __xen_pcibk_add_pci_dev() local
63 domain = (unsigned int)pci_domain_nr(dev->bus); in __xen_pcibk_add_pci_dev()
66 err = publish_cb(pdev, domain, bus, devfn, devid); in __xen_pcibk_add_pci_dev()
124 unsigned int domain, bus; in __xen_pcibk_publish_pci_roots() local
143 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); in __xen_pcibk_publish_pci_roots()
147 err = publish_root_cb(pdev, domain, bus); in __xen_pcibk_publish_pci_roots()
178 unsigned int *domain, unsigned int *bus, in __xen_pcibk_get_pcifront_dev() argument
181 *domain = pci_domain_nr(pcidev->bus); in __xen_pcibk_get_pcifront_dev()
Dpciback.h64 int domain, int bus,
86 unsigned int domain, unsigned int bus,
89 unsigned int domain, unsigned int bus);
100 unsigned int *domain, unsigned int *bus,
108 unsigned int domain, unsigned int bus,
134 xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, in xen_pcibk_get_pci_dev() argument
138 return xen_pcibk_backend->get(pdev, domain, bus, devfn); in xen_pcibk_get_pci_dev()
150 unsigned int *domain, in xen_pcibk_get_pcifront_dev() argument
155 return xen_pcibk_backend->find(pcidev, pdev, domain, bus, in xen_pcibk_get_pcifront_dev()
Dxenbus.c202 unsigned int domain, unsigned int bus, in xen_pcibk_publish_pci_dev() argument
217 "%04x:%02x:%02x.%02x", domain, bus, in xen_pcibk_publish_pci_dev()
225 int domain, int bus, int slot, int func, in xen_pcibk_export_device() argument
232 domain, bus, slot, func); in xen_pcibk_export_device()
234 dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func); in xen_pcibk_export_device()
241 domain, bus, slot, func); in xen_pcibk_export_device()
272 int domain, int bus, int slot, int func) in xen_pcibk_remove_device() argument
278 domain, bus, slot, func); in xen_pcibk_remove_device()
280 dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func)); in xen_pcibk_remove_device()
285 domain, bus, slot, func); in xen_pcibk_remove_device()
[all …]
Dvpci.c30 unsigned int domain, in __xen_pcibk_get_pci_dev() argument
38 if (domain != 0 || bus != 0) in __xen_pcibk_get_pci_dev()
231 unsigned int *domain, unsigned int *bus, in __xen_pcibk_get_pcifront_dev() argument
250 *domain = 0; in __xen_pcibk_get_pcifront_dev()
/linux-4.1.27/drivers/pci/pcie/aer/
Daer_inject.c44 u16 domain; member
49 u16 domain; member
77 static void aer_error_init(struct aer_error *err, u16 domain, in aer_error_init() argument
82 err->domain = domain; in aer_error_init()
89 static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, in __find_aer_error() argument
95 if (domain == err->domain && in __find_aer_error()
106 int domain = pci_domain_nr(dev->bus); in __find_aer_error_by_dev() local
107 if (domain < 0) in __find_aer_error_by_dev()
109 return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); in __find_aer_error_by_dev()
191 int domain; in pci_read_aer() local
[all …]
Daerdrv_core.c548 u16 domain; member
563 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn, in aer_recover_queue() argument
570 .domain = domain, in aer_recover_queue()
580 domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); in aer_recover_queue()
591 pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus, in aer_recover_work_func()
595 entry.domain, entry.bus, in aer_recover_work_func()
/linux-4.1.27/arch/powerpc/perf/
Dhv-24x7.c30 static const char *event_domain_suffix(unsigned domain) in event_domain_suffix() argument
32 switch (domain) { in event_domain_suffix()
39 WARN(1, "unknown domain %d\n", domain); in event_domain_suffix()
44 static bool domain_is_valid(unsigned domain) in domain_is_valid() argument
46 switch (domain) { in domain_is_valid()
58 static bool is_physical_domain(unsigned domain) in is_physical_domain() argument
60 switch (domain) { in is_physical_domain()
71 static bool catalog_entry_domain_is_valid(unsigned domain) in catalog_entry_domain_is_valid() argument
73 return is_physical_domain(domain); in catalog_entry_domain_is_valid()
101 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
[all …]
/linux-4.1.27/drivers/pci/
Dxen-pcifront.c66 unsigned int domain, unsigned int bus, in pcifront_init_sd() argument
71 sd->sd.domain = domain; in pcifront_init_sd()
185 .domain = pci_domain_nr(bus), in pcifront_bus_read()
223 .domain = pci_domain_nr(bus), in pcifront_bus_write()
256 .domain = pci_domain_nr(dev->bus), in pci_frontend_enable_msix()
310 .domain = pci_domain_nr(dev->bus), in pci_frontend_disable_msix()
329 .domain = pci_domain_nr(dev->bus), in pci_frontend_enable_msi()
358 .domain = pci_domain_nr(dev->bus), in pci_frontend_disable_msi()
419 unsigned int domain, unsigned int bus, in pcifront_scan_bus() argument
440 "%04x:%02x:%02x.%d found.\n", domain, bus, in pcifront_scan_bus()
[all …]
Dmsi.c42 struct irq_domain *domain = NULL; in pci_msi_get_domain() local
45 domain = dev->bus->msi->domain; in pci_msi_get_domain()
46 if (!domain) in pci_msi_get_domain()
47 domain = arch_get_pci_msi_domain(dev); in pci_msi_get_domain()
49 return domain; in pci_msi_get_domain()
54 struct irq_domain *domain; in pci_msi_setup_msi_irqs() local
56 domain = pci_msi_get_domain(dev); in pci_msi_setup_msi_irqs()
57 if (domain) in pci_msi_setup_msi_irqs()
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); in pci_msi_setup_msi_irqs()
65 struct irq_domain *domain; in pci_msi_teardown_msi_irqs() local
[all …]
Dsearch.c131 struct pci_bus *pci_find_bus(int domain, int busnr) in pci_find_bus() argument
137 if (pci_domain_nr(bus) != domain) in pci_find_bus()
220 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, in pci_get_domain_bus_and_slot() argument
226 if (pci_domain_nr(dev->bus) == domain && in pci_get_domain_bus_and_slot()
/linux-4.1.27/drivers/gpu/drm/nouveau/
Dnouveau_platform.c104 gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type); in nouveau_platform_probe_iommu()
105 if (IS_ERR(gpu->iommu.domain)) in nouveau_platform_probe_iommu()
113 pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap; in nouveau_platform_probe_iommu()
125 err = iommu_attach_device(gpu->iommu.domain, dev); in nouveau_platform_probe_iommu()
140 iommu_detach_device(gpu->iommu.domain, dev); in nouveau_platform_probe_iommu()
143 iommu_domain_free(gpu->iommu.domain); in nouveau_platform_probe_iommu()
146 gpu->iommu.domain = NULL; in nouveau_platform_probe_iommu()
154 if (gpu->iommu.domain) { in nouveau_platform_remove_iommu()
156 iommu_detach_device(gpu->iommu.domain, dev); in nouveau_platform_remove_iommu()
157 iommu_domain_free(gpu->iommu.domain); in nouveau_platform_remove_iommu()
Dnouveau_gem.c176 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, in nouveau_gem_new() argument
185 if (domain & NOUVEAU_GEM_DOMAIN_VRAM) in nouveau_gem_new()
187 if (domain & NOUVEAU_GEM_DOMAIN_GART) in nouveau_gem_new()
189 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) in nouveau_gem_new()
192 if (domain & NOUVEAU_GEM_DOMAIN_COHERENT) in nouveau_gem_new()
208 nvbo->valid_domains &= domain; in nouveau_gem_new()
231 rep->domain = nvbo->valid_domains; in nouveau_gem_info()
233 rep->domain = NOUVEAU_GEM_DOMAIN_GART; in nouveau_gem_info()
235 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; in nouveau_gem_info()
269 req->info.domain, req->info.tile_mode, in nouveau_gem_ioctl_new()
[all …]
/linux-4.1.27/Documentation/devicetree/bindings/clock/
Dkeystone-gate.txt13 - reg : psc control and domain address address space
14 - reg-names : psc control and domain registers
15 - domain-id : psc domain id needed to check the transition state register
27 reg-names = "control", "domain";
28 domain-id = <0>;
/linux-4.1.27/security/tomoyo/
Dutil.c599 struct tomoyo_domain_info *domain; in tomoyo_find_domain() local
604 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { in tomoyo_find_domain()
605 if (!domain->is_deleted && in tomoyo_find_domain()
606 !tomoyo_pathcmp(&name, domain->domainname)) in tomoyo_find_domain()
607 return domain; in tomoyo_find_domain()
1003 struct tomoyo_domain_info *domain, const u8 index) in tomoyo_init_request_info() argument
1007 if (!domain) in tomoyo_init_request_info()
1008 domain = tomoyo_domain(); in tomoyo_init_request_info()
1009 r->domain = domain; in tomoyo_init_request_info()
1010 profile = domain->profile; in tomoyo_init_request_info()
[all …]
Dcommon.c974 struct tomoyo_domain_info *domain = NULL; in tomoyo_select_domain() local
988 domain = tomoyo_real_domain(p); in tomoyo_select_domain()
992 domain = tomoyo_find_domain(data + 7); in tomoyo_select_domain()
994 domain = tomoyo_find_domain_by_qid(pid); in tomoyo_select_domain()
997 head->w.domain = domain; in tomoyo_select_domain()
1003 if (domain) in tomoyo_select_domain()
1004 head->r.domain = &domain->list; in tomoyo_select_domain()
1008 if (domain && domain->is_deleted) in tomoyo_select_domain()
1066 struct tomoyo_domain_info *domain; in tomoyo_delete_domain() local
1074 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { in tomoyo_delete_domain()
[all …]
Ddomain.c160 const struct tomoyo_domain_info *domain = r->domain; in tomoyo_check_acl() local
163 const struct list_head *list = &domain->acl_info_list; in tomoyo_check_acl()
179 list = &domain->ns->acl_group[domain->group]; in tomoyo_check_acl()
545 const struct tomoyo_domain_info *domain = tomoyo_domain(); in tomoyo_assign_domain() local
546 e.profile = domain->profile; in tomoyo_assign_domain()
547 e.group = domain->group; in tomoyo_assign_domain()
603 ee->r.profile = r->domain->profile; in tomoyo_environ()
604 ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile, in tomoyo_environ()
679 struct tomoyo_domain_info *domain = NULL; in tomoyo_find_next_domain() local
796 domain = old_domain; in tomoyo_find_next_domain()
[all …]
Dtomoyo.c36 struct tomoyo_domain_info *domain = old->security; in tomoyo_cred_prepare() local
37 new->security = domain; in tomoyo_cred_prepare()
38 if (domain) in tomoyo_cred_prepare()
39 atomic_inc(&domain->users); in tomoyo_cred_prepare()
61 struct tomoyo_domain_info *domain = cred->security; in tomoyo_cred_free() local
62 if (domain) in tomoyo_cred_free()
63 atomic_dec(&domain->users); in tomoyo_cred_free()
120 struct tomoyo_domain_info *domain = bprm->cred->security; in tomoyo_bprm_check_security() local
126 if (!domain) { in tomoyo_bprm_check_security()
135 return tomoyo_check_open_permission(domain, &bprm->file->f_path, in tomoyo_bprm_check_security()
Dgc.c49 if (head->r.domain == element || head->r.group == element || in tomoyo_struct_used_by_io_buffer()
50 head->r.acl == element || &head->w.domain->list == element) in tomoyo_struct_used_by_io_buffer()
245 struct tomoyo_domain_info *domain = in tomoyo_del_domain() local
246 container_of(element, typeof(*domain), list); in tomoyo_del_domain()
254 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { in tomoyo_del_domain()
258 tomoyo_put_name(domain->domainname); in tomoyo_del_domain()
517 struct tomoyo_domain_info *domain; in tomoyo_collect_entry() local
519 list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, in tomoyo_collect_entry()
521 tomoyo_collect_acl(&domain->acl_info_list); in tomoyo_collect_entry()
522 if (!domain->is_deleted || atomic_read(&domain->users)) in tomoyo_collect_entry()
[all …]
Dsecurityfs_if.c108 const char *domain = tomoyo_domain()->domainname->name; in tomoyo_read_self() local
109 loff_t len = strlen(domain); in tomoyo_read_self()
116 if (copy_to_user(buf, domain + pos, len)) in tomoyo_read_self()
/linux-4.1.27/Documentation/networking/
Dregulatory.txt16 to the kernel one regulatory domain to be used as the central
17 core regulatory domain all wireless devices should adhere to.
22 Userspace gets a regulatory domain in the kernel by having
27 is CRDA - central regulatory domain agent. Its documented here:
32 it needs a new regulatory domain. A udev rule can be put in place
33 to trigger crda to send the respective regulatory domain for a
54 # set regulatory domain to "Costa Rica"
57 This will request the kernel to set the regulatory domain to
59 to provide a regulatory domain for the alpha2 specified by the user
65 regulatory domain is required. More on this to be added
[all …]
/linux-4.1.27/arch/mips/ralink/
Dirq.c104 struct irq_domain *domain = irq_get_handler_data(irq); in ralink_intc_irq_handler() local
105 generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); in ralink_intc_irq_handler()
152 struct irq_domain *domain; in intc_of_init() local
181 domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, in intc_of_init()
183 if (!domain) in intc_of_init()
189 irq_set_handler_data(irq, domain); in intc_of_init()
192 rt_perfcount_irq = irq_create_mapping(domain, 9); in intc_of_init()
/linux-4.1.27/arch/x86/kernel/apic/
Dvector.c54 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) in alloc_irq_cfg()
63 free_cpumask_var(cfg->domain); in alloc_irq_cfg()
95 free_cpumask_var(cfg->domain); in free_irq_cfg()
134 if (cpumask_subset(tmp_mask, cfg->domain)) { in __assign_irq_vector()
136 if (cpumask_equal(tmp_mask, cfg->domain)) in __assign_irq_vector()
143 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); in __assign_irq_vector()
146 cpumask_and(cfg->domain, cfg->domain, tmp_mask); in __assign_irq_vector()
178 cpumask_copy(cfg->old_domain, cfg->domain); in __assign_irq_vector()
185 cpumask_copy(cfg->domain, tmp_mask); in __assign_irq_vector()
214 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) in clear_irq_vector()
[all …]
/linux-4.1.27/drivers/gpu/drm/i915/
Dintel_uncore.c249 struct intel_uncore_forcewake_domain *domain = (void *)arg; in intel_uncore_fw_release_timer() local
252 assert_device_not_suspended(domain->i915); in intel_uncore_fw_release_timer()
254 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); in intel_uncore_fw_release_timer()
255 if (WARN_ON(domain->wake_count == 0)) in intel_uncore_fw_release_timer()
256 domain->wake_count++; in intel_uncore_fw_release_timer()
258 if (--domain->wake_count == 0) in intel_uncore_fw_release_timer()
259 domain->i915->uncore.funcs.force_wake_put(domain->i915, in intel_uncore_fw_release_timer()
260 1 << domain->id); in intel_uncore_fw_release_timer()
262 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); in intel_uncore_fw_release_timer()
269 struct intel_uncore_forcewake_domain *domain; in intel_uncore_forcewake_reset() local
[all …]
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c68 static int usnic_uiom_dma_fault(struct iommu_domain *domain, in usnic_uiom_dma_fault() argument
75 domain, iova, flags); in usnic_uiom_dma_fault()
209 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
286 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
303 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
468 void *domain; in usnic_uiom_alloc_pd() local
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); in usnic_uiom_alloc_pd()
475 if (IS_ERR_OR_NULL(domain)) { in usnic_uiom_alloc_pd()
477 PTR_ERR(pd->domain)); in usnic_uiom_alloc_pd()
479 return ERR_PTR(domain ? PTR_ERR(domain) : -ENOMEM); in usnic_uiom_alloc_pd()
[all …]
/linux-4.1.27/drivers/gpio/
Dgpio-tb10x.c58 struct irq_domain *domain; member
155 return irq_create_mapping(tb10x_gpio->domain, offset); in tb10x_gpio_to_irq()
179 generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i)); in tb10x_gpio_irq_cascade()
249 tb10x_gpio->domain = irq_domain_add_linear(dn, in tb10x_gpio_probe()
252 if (!tb10x_gpio->domain) { in tb10x_gpio_probe()
257 ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain, in tb10x_gpio_probe()
264 gc = tb10x_gpio->domain->gc->gc[0]; in tb10x_gpio_probe()
291 irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0], in tb10x_gpio_remove()
293 kfree(tb10x_gpio->domain->gc); in tb10x_gpio_remove()
294 irq_domain_remove(tb10x_gpio->domain); in tb10x_gpio_remove()
Dgpio-dwapb.c83 struct irq_domain *domain; member
115 return irq_find_mapping(gpio->domain, offset); in dwapb_gpio_to_irq()
137 int gpio_irq = irq_find_mapping(gpio->domain, hwirq); in dwapb_do_irq()
305 gpio->domain = irq_domain_add_linear(node, ngpio, in dwapb_configure_irqs()
307 if (!gpio->domain) in dwapb_configure_irqs()
310 err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, in dwapb_configure_irqs()
316 irq_domain_remove(gpio->domain); in dwapb_configure_irqs()
317 gpio->domain = NULL; in dwapb_configure_irqs()
321 irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); in dwapb_configure_irqs()
323 irq_domain_remove(gpio->domain); in dwapb_configure_irqs()
[all …]
Dgpio-grgpio.c78 struct irq_domain *domain; member
130 return irq_create_mapping(priv->domain, offset); in grgpio_to_irq()
407 priv->domain = irq_domain_add_linear(np, gc->ngpio, in grgpio_probe()
410 if (!priv->domain) { in grgpio_probe()
444 if (priv->domain) in grgpio_probe()
445 irq_domain_remove(priv->domain); in grgpio_probe()
450 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off"); in grgpio_probe()
464 if (priv->domain) { in grgpio_remove()
475 if (priv->domain) in grgpio_remove()
476 irq_domain_remove(priv->domain); in grgpio_remove()
Dgpio-msm-v2.c104 struct irq_domain *domain; member
185 struct irq_domain *domain = g_dev->domain; in msm_gpio_to_irq() local
187 return irq_create_mapping(domain, offset); in msm_gpio_to_irq()
325 generic_handle_irq(irq_find_mapping(msm_gpio.domain, in msm_summary_irq_handler()
422 msm_gpio.domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, in msm_gpio_probe()
425 if (!msm_gpio.domain) in msm_gpio_probe()
Dgpio-tz1090.c60 struct irq_domain *domain; member
259 if (!bank->domain) in tz1090_gpio_to_irq()
262 return irq_create_mapping(bank->domain, offset); in tz1090_gpio_to_irq()
270 return (struct tz1090_gpio_bank *)data->domain->host_data; in irqd_to_gpio_bank()
395 irq_no = irq_linear_revmap(bank->domain, hw); in tz1090_gpio_irq_handler()
466 bank->domain = irq_domain_add_linear(np, in tz1090_gpio_bank_probe()
472 err = irq_alloc_domain_generic_chips(bank->domain, bank->chip.ngpio, 2, in tz1090_gpio_bank_probe()
479 irq_domain_remove(bank->domain); in tz1090_gpio_bank_probe()
483 gc = irq_get_domain_generic_chip(bank->domain, 0); in tz1090_gpio_bank_probe()
Dgpio-mxs.c66 struct irq_domain *domain; member
172 generic_handle_irq(irq_find_mapping(port->domain, irqoffset)); in mxs_gpio_irq_handler()
227 return irq_find_mapping(port->domain, offset); in mxs_gpio_to_irq()
312 port->domain = irq_domain_add_legacy(np, 32, irq_base, 0, in mxs_gpio_probe()
314 if (!port->domain) { in mxs_gpio_probe()
/linux-4.1.27/arch/nios2/kernel/
Dirq.c77 struct irq_domain *domain; in init_IRQ() local
86 domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL); in init_IRQ()
87 BUG_ON(!domain); in init_IRQ()
89 irq_set_default_host(domain); in init_IRQ()
/linux-4.1.27/arch/x86/pci/
Dacpi.c363 int busnum, int domain, in probe_pci_root_info() argument
369 sprintf(info->name, "PCI Bus %04x:%02x", domain, busnum); in probe_pci_root_info()
394 int domain = root->segment; in pci_acpi_scan_root() local
404 domain = 0; in pci_acpi_scan_root()
406 if (domain && !pci_domains_supported) { in pci_acpi_scan_root()
409 domain, busnum); in pci_acpi_scan_root()
427 "ignored (out of memory)\n", domain, busnum); in pci_acpi_scan_root()
432 sd->domain = domain; in pci_acpi_scan_root()
436 bus = pci_find_bus(domain, busnum); in pci_acpi_scan_root()
452 probe_pci_root_info(info, device, busnum, domain, &crs_res); in pci_acpi_scan_root()
[all …]
Dintel_mid_pci.c93 unsigned int domain, busnum; in pci_device_update_fixed() local
96 domain = pci_domain_nr(bus); in pci_device_update_fixed()
102 raw_pci_ext_ops->read(domain, busnum, devfn, in pci_device_update_fixed()
125 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4, in pci_device_update_fixed()
130 return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val); in pci_device_update_fixed()
Dcommon.c41 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, in raw_pci_read() argument
44 if (domain == 0 && reg < 256 && raw_pci_ops) in raw_pci_read()
45 return raw_pci_ops->read(domain, bus, devfn, reg, len, val); in raw_pci_read()
47 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val); in raw_pci_read()
51 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, in raw_pci_write() argument
54 if (domain == 0 && reg < 256 && raw_pci_ops) in raw_pci_write()
55 return raw_pci_ops->write(domain, bus, devfn, reg, len, val); in raw_pci_write()
57 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val); in raw_pci_write()
Dxen.c513 domid_t domain; member
535 int domain = -ENODEV; in xen_find_device_domain_owner() local
540 domain = owner->domain; in xen_find_device_domain_owner()
542 return domain; in xen_find_device_domain_owner()
546 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) in xen_register_device_domain_owner() argument
560 owner->domain = domain; in xen_register_device_domain_owner()
/linux-4.1.27/arch/arm/mach-exynos/
Dpm_domains.c43 static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) in exynos_pd_power() argument
51 pd = container_of(domain, struct exynos_pm_domain, pd); in exynos_pd_power()
80 pr_err("Power domain %s %s failed\n", domain->name, op); in exynos_pd_power()
108 static int exynos_pd_power_on(struct generic_pm_domain *domain) in exynos_pd_power_on() argument
110 return exynos_pd_power(domain, true); in exynos_pd_power_on()
113 static int exynos_pd_power_off(struct generic_pm_domain *domain) in exynos_pd_power_off() argument
115 return exynos_pd_power(domain, false); in exynos_pd_power_off()
Dsuspend.c182 static int exynos_pmu_domain_xlate(struct irq_domain *domain, in exynos_pmu_domain_xlate() argument
189 if (domain->of_node != controller) in exynos_pmu_domain_xlate()
201 static int exynos_pmu_domain_alloc(struct irq_domain *domain, in exynos_pmu_domain_alloc() argument
218 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in exynos_pmu_domain_alloc()
222 parent_args.np = domain->parent->of_node; in exynos_pmu_domain_alloc()
223 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); in exynos_pmu_domain_alloc()
235 struct irq_domain *parent_domain, *domain; in exynos_pmu_irq_init() local
256 domain = irq_domain_add_hierarchy(parent_domain, 0, 0, in exynos_pmu_irq_init()
259 if (!domain) { in exynos_pmu_irq_init()
/linux-4.1.27/tools/testing/selftests/net/
Dsocket.c10 int domain; member
47 fd = socket(s->domain, s->type, s->protocol); in run_tests()
62 s->domain, s->type, s->protocol, in run_tests()
75 s->domain, s->type, s->protocol, in run_tests()
/linux-4.1.27/arch/mips/ath25/
Dar2315.c79 struct irq_domain *domain = irq_get_handler_data(irq); in ar2315_misc_irq_handler() local
82 misc_irq = irq_find_mapping(domain, nr); in ar2315_misc_irq_handler()
151 struct irq_domain *domain; in ar2315_arch_init_irq() local
156 domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, in ar2315_arch_init_irq()
158 if (!domain) in ar2315_arch_init_irq()
161 irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB); in ar2315_arch_init_irq()
165 irq_set_handler_data(AR2315_IRQ_MISC, domain); in ar2315_arch_init_irq()
167 ar2315_misc_irq_domain = domain; in ar2315_arch_init_irq()
Dar5312.c83 struct irq_domain *domain = irq_get_handler_data(irq); in ar5312_misc_irq_handler() local
86 misc_irq = irq_find_mapping(domain, nr); in ar5312_misc_irq_handler()
146 struct irq_domain *domain; in ar5312_arch_init_irq() local
151 domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, in ar5312_arch_init_irq()
153 if (!domain) in ar5312_arch_init_irq()
156 irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); in ar5312_arch_init_irq()
160 irq_set_handler_data(AR5312_IRQ_MISC, domain); in ar5312_arch_init_irq()
162 ar5312_misc_irq_domain = domain; in ar5312_arch_init_irq()
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_object.c96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) in radeon_ttm_placement_from_domain() argument
102 if (domain & RADEON_GEM_DOMAIN_VRAM) { in radeon_ttm_placement_from_domain()
121 if (domain & RADEON_GEM_DOMAIN_GTT) { in radeon_ttm_placement_from_domain()
140 if (domain & RADEON_GEM_DOMAIN_CPU) { in radeon_ttm_placement_from_domain()
180 u32 domain, u32 flags, struct sg_table *sg, in radeon_bo_create() argument
216 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | in radeon_bo_create()
250 radeon_ttm_placement_from_domain(bo, domain); in radeon_bo_create()
322 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, in radeon_bo_pin_restricted() argument
338 if (domain == RADEON_GEM_DOMAIN_VRAM) in radeon_bo_pin_restricted()
348 radeon_ttm_placement_from_domain(bo, domain); in radeon_bo_pin_restricted()
[all …]
Dradeon_gem.c97 uint32_t domain; in radeon_gem_set_domain() local
103 domain = wdomain; in radeon_gem_set_domain()
104 if (!domain) { in radeon_gem_set_domain()
105 domain = rdomain; in radeon_gem_set_domain()
107 if (!domain) { in radeon_gem_set_domain()
112 if (domain == RADEON_GEM_DOMAIN_CPU) { in radeon_gem_set_domain()
445 args->domain = radeon_mem_type_to_domain(cur_placement); in radeon_gem_busy_ioctl()
541 unsigned domain; in radeon_gem_va_update_vm() local
559 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); in radeon_gem_va_update_vm()
562 if (domain == RADEON_GEM_DOMAIN_CPU) in radeon_gem_va_update_vm()
[all …]
Dradeon_object.h127 bool kernel, u32 domain, u32 flags,
135 extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
136 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
175 unsigned size, u32 align, u32 domain,
Dradeon_cs.c139 uint32_t domain = r->write_domain ? in radeon_cs_parser_relocs() local
142 if (domain & RADEON_GEM_DOMAIN_CPU) { in radeon_cs_parser_relocs()
148 p->relocs[i].prefered_domains = domain; in radeon_cs_parser_relocs()
149 if (domain == RADEON_GEM_DOMAIN_VRAM) in radeon_cs_parser_relocs()
150 domain |= RADEON_GEM_DOMAIN_GTT; in radeon_cs_parser_relocs()
151 p->relocs[i].allowed_domains = domain; in radeon_cs_parser_relocs()
155 uint32_t domain = p->relocs[i].prefered_domains; in radeon_cs_parser_relocs() local
156 if (!(domain & RADEON_GEM_DOMAIN_GTT)) { in radeon_cs_parser_relocs()
162 domain = RADEON_GEM_DOMAIN_GTT; in radeon_cs_parser_relocs()
163 p->relocs[i].prefered_domains = domain; in radeon_cs_parser_relocs()
[all …]
Dradeon_sa.c52 unsigned size, u32 align, u32 domain, u32 flags) in radeon_sa_bo_manager_init() argument
59 sa_manager->domain = domain; in radeon_sa_bo_manager_init()
68 domain, flags, NULL, NULL, &sa_manager->bo); in radeon_sa_bo_manager_init()
112 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); in radeon_sa_bo_manager_start()
/linux-4.1.27/arch/arm/mach-imx/
D3ds_debugboard.c63 static struct irq_domain *domain; variable
104 generic_handle_irq(irq_find_mapping(domain, expio_irq)); in mxc_expio_irq_handler()
192 domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0, in mxc_expio_init()
194 WARN_ON(!domain); in mxc_expio_init()
208 smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET); in mxc_expio_init()
209 smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET); in mxc_expio_init()
Dgpc.c184 static int imx_gpc_domain_xlate(struct irq_domain *domain, in imx_gpc_domain_xlate() argument
191 if (domain->of_node != controller) in imx_gpc_domain_xlate()
203 static int imx_gpc_domain_alloc(struct irq_domain *domain, in imx_gpc_domain_alloc() argument
222 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, in imx_gpc_domain_alloc()
226 parent_args.np = domain->parent->of_node; in imx_gpc_domain_alloc()
227 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, &parent_args); in imx_gpc_domain_alloc()
239 struct irq_domain *parent_domain, *domain; in imx_gpc_init() local
257 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, in imx_gpc_init()
260 if (!domain) { in imx_gpc_init()
Dmach-mx31ads.c77 static struct irq_domain *domain; variable
123 serial_platform_data[0].irq = irq_find_mapping(domain, in mxc_init_extuart()
125 serial_platform_data[1].irq = irq_find_mapping(domain, in mxc_init_extuart()
133 irq_find_mapping(domain, EXPIO_INT_ENET_INT); in mxc_init_ext_ethernet()
135 irq_find_mapping(domain, EXPIO_INT_ENET_INT); in mxc_init_ext_ethernet()
171 generic_handle_irq(irq_find_mapping(domain, expio_irq)); in mx31ads_expio_irq_handler()
235 domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0, in mx31ads_init_expio()
237 WARN_ON(!domain); in mx31ads_init_expio()
Davic.c55 static struct irq_domain *domain; variable
147 handle_domain_irq(domain, nivector, regs); in avic_handle_irq()
182 domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, in mxc_init_irq()
184 WARN_ON(!domain); in mxc_init_irq()
Dtzic.c54 static struct irq_domain *domain; variable
144 handle_domain_irq(domain, irqofs + i * 32, regs); in tzic_handle_irq()
187 domain = irq_domain_add_legacy(np, TZIC_NUM_IRQS, irq_base, 0, in tzic_init_irq()
189 WARN_ON(!domain); in tzic_init_irq()
/linux-4.1.27/drivers/gpu/drm/qxl/
Dqxl_object.c53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) in qxl_ttm_placement_from_domain() argument
61 if (domain == QXL_GEM_DOMAIN_VRAM) in qxl_ttm_placement_from_domain()
63 if (domain == QXL_GEM_DOMAIN_SURFACE) in qxl_ttm_placement_from_domain()
65 if (domain == QXL_GEM_DOMAIN_CPU) in qxl_ttm_placement_from_domain()
79 unsigned long size, bool kernel, bool pinned, u32 domain, in qxl_bo_create() argument
101 bo->type = domain; in qxl_bo_create()
109 qxl_ttm_placement_from_domain(bo, domain, pinned); in qxl_bo_create()
118 size, domain); in qxl_bo_create()
227 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) in qxl_bo_pin() argument
238 qxl_ttm_placement_from_domain(bo, domain, true); in qxl_bo_pin()
Dqxl_object.h89 bool kernel, bool pinned, u32 domain,
98 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
100 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
Dqxl_gem.c71 u32 domain, in qxl_gem_object_create_with_handle() argument
84 domain, in qxl_gem_object_create_with_handle()
/linux-4.1.27/Documentation/devicetree/bindings/bus/
Dsimple-pm-bus.txt7 However, its bus controller is part of a PM domain, or under the control of a
8 functional clock. Hence, the bus controller's PM domain and/or clock must be
24 Optional platform-specific properties for clock or PM domain control (at least
27 - power-domains: Must contain a reference to the PM domain.
28 Please refer to the binding documentation for the clock and/or PM domain
Drenesas,bsc.txt10 domain, and may have a gateable functional clock.
11 Before a device connected to the BSC can be accessed, the PM domain
31 - power-domains: Must contain a reference to the PM domain, if available.
/linux-4.1.27/Documentation/devicetree/bindings/pci/
Dpci.txt14 - linux,pci-domain:
15 If present this property assigns a fixed PCI domain number to a host bridge,
18 host bridges in the system, otherwise potentially conflicting domain numbers
19 may be assigned to root buses behind different host bridges. The domain
Dbrcm,iproc-pcie.txt9 - linux,pci-domain: PCI domain ID. Should be unique for each host controller
29 linux,pci-domain = <0>;
51 linux,pci-domain = <1>;
/linux-4.1.27/include/net/
Dnetlabel.h208 char *domain; member
318 kfree(secattr->domain); in netlbl_secattr_destroy()
357 int netlbl_cfg_map_del(const char *domain,
362 int netlbl_cfg_unlbl_map_add(const char *domain,
384 const char *domain,
445 static inline int netlbl_cfg_map_del(const char *domain, in netlbl_cfg_map_del() argument
453 static inline int netlbl_cfg_unlbl_map_add(const char *domain, in netlbl_cfg_unlbl_map_add() argument
491 const char *domain, in netlbl_cfg_cipsov4_map_add() argument
/linux-4.1.27/drivers/sh/intc/
Dirqdomain.c62 d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, in intc_irq_domain_init()
65 d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); in intc_irq_domain_init()
67 BUG_ON(!d->domain); in intc_irq_domain_init()
/linux-4.1.27/arch/arm/include/asm/
Ddomain.h74 unsigned int domain = thread->cpu_domain; \
75 domain &= ~domain_val(dom, DOMAIN_MANAGER); \
76 thread->cpu_domain = domain | domain_val(dom, type); \
/linux-4.1.27/tools/power/cpupower/utils/helpers/
Dpci.c25 struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus, in pci_acc_init() argument
36 filter_nb_link.domain = domain; in pci_acc_init()
/linux-4.1.27/arch/arm/mach-omap2/
Domap-wakeupgen.c402 static int wakeupgen_domain_xlate(struct irq_domain *domain, in wakeupgen_domain_xlate() argument
409 if (domain->of_node != controller) in wakeupgen_domain_xlate()
421 static int wakeupgen_domain_alloc(struct irq_domain *domain, in wakeupgen_domain_alloc() argument
440 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, in wakeupgen_domain_alloc()
444 parent_args.np = domain->parent->of_node; in wakeupgen_domain_alloc()
445 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); in wakeupgen_domain_alloc()
460 struct irq_domain *parent_domain, *domain; in wakeupgen_init() local
495 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, in wakeupgen_init()
498 if (!domain) { in wakeupgen_init()
Dcm2xxx_3xxx.h78 static inline u32 omap2_cm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask) in omap2_cm_read_mod_bits_shift() argument
82 v = omap2_cm_read_mod_reg(domain, idx); in omap2_cm_read_mod_bits_shift()
/linux-4.1.27/drivers/pinctrl/
Dpinctrl-adi2.c128 struct irq_domain *domain[2]; member
135 u8 map, struct irq_domain *domain);
188 struct irq_domain *domain; member
542 struct irq_domain *domain; in adi_gpio_handle_pint_irq() local
551 domain = pint->domain[0]; in adi_gpio_handle_pint_irq()
557 domain = pint->domain[1]; in adi_gpio_handle_pint_irq()
564 generic_handle_irq(irq_find_mapping(domain, in adi_gpio_handle_pint_irq()
802 return irq_find_mapping(port->domain, offset); in adi_gpio_to_irq()
804 return irq_create_mapping(port->domain, offset); in adi_gpio_to_irq()
808 struct irq_domain *domain) in adi_pint_map_port() argument
[all …]
/linux-4.1.27/drivers/mfd/
Dlp8788-irq.c42 struct irq_domain *domain; member
127 handle_nested_irq(irq_find_mapping(irqd->domain, i)); in lp8788_irq_handler()
173 irqd->domain = irq_domain_add_linear(lp->dev->of_node, LP8788_INT_MAX, in lp8788_irq_init()
175 if (!irqd->domain) { in lp8788_irq_init()
180 lp->irqdm = irqd->domain; in lp8788_irq_init()
Dmax8998-irq.c225 struct irq_domain *domain; in max8998_irq_init() local
245 domain = irq_domain_add_simple(NULL, MAX8998_IRQ_NR, in max8998_irq_init()
247 if (!domain) { in max8998_irq_init()
251 max8998->irq_domain = domain; in max8998_irq_init()
Dmfd-core.c122 int irq_base, struct irq_domain *domain) in mfd_add_device() argument
191 if (domain) { in mfd_add_device()
196 domain, cell->resources[r].start); in mfd_add_device()
246 int irq_base, struct irq_domain *domain) in mfd_add_devices() argument
260 irq_base, domain); in mfd_add_devices()
Dtc3589x.c190 int virq = irq_create_mapping(tc3589x->domain, bit); in tc3589x_irq()
244 tc3589x->domain = irq_domain_add_simple( in tc3589x_irq_init()
248 if (!tc3589x->domain) { in tc3589x_irq_init()
299 0, tc3589x->domain); in tc3589x_device_init()
310 0, tc3589x->domain); in tc3589x_device_init()
/linux-4.1.27/drivers/remoteproc/
Dremoteproc_core.c75 static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, in rproc_iommu_fault() argument
93 struct iommu_domain *domain; in rproc_enable_iommu() local
102 domain = iommu_domain_alloc(dev->bus); in rproc_enable_iommu()
103 if (!domain) { in rproc_enable_iommu()
108 iommu_set_fault_handler(domain, rproc_iommu_fault, rproc); in rproc_enable_iommu()
110 ret = iommu_attach_device(domain, dev); in rproc_enable_iommu()
116 rproc->domain = domain; in rproc_enable_iommu()
121 iommu_domain_free(domain); in rproc_enable_iommu()
127 struct iommu_domain *domain = rproc->domain; in rproc_disable_iommu() local
130 if (!domain) in rproc_disable_iommu()
[all …]
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/
Dgk20a.c90 struct iommu_domain *domain; member
177 iommu_unmap(priv->domain, in gk20a_instobj_dtor_iommu()
194 if (priv->domain) in gk20a_instobj_dtor()
288 ret = iommu_map(priv->domain, offset, page_to_phys(p), in gk20a_instobj_ctor_iommu()
295 iommu_unmap(priv->domain, offset, PAGE_SIZE); in gk20a_instobj_ctor_iommu()
335 priv->domain ? "IOMMU" : "DMA", args->size, args->align); in gk20a_instobj_ctor()
341 if (priv->domain) in gk20a_instobj_ctor()
406 if (plat->gpu->iommu.domain) { in gk20a_instmem_ctor()
407 priv->domain = plat->gpu->iommu.domain; in gk20a_instmem_ctor()
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp5/
Dmdp5_irq.c106 mdp5_kms->irqcontroller.domain, hwirq)); in mdp5_irq()
192 mdp5_kms->irqcontroller.domain = d; in mdp5_irq_domain_init()
199 if (mdp5_kms->irqcontroller.domain) { in mdp5_irq_domain_fini()
200 irq_domain_remove(mdp5_kms->irqcontroller.domain); in mdp5_irq_domain_fini()
201 mdp5_kms->irqcontroller.domain = NULL; in mdp5_irq_domain_fini()
/linux-4.1.27/Documentation/tpm/
Dxen-tpmfront.txt15 of the vTPM's secrets (Keys, NVRAM, etc) are managed by a vTPM Manager domain,
19 major component of vTPM is implemented as a separate domain, providing secure
77 * vtpm-stubdom: A mini-os stub domain that implements a vTPM. There is a
82 * mini-os/tpmfront: Mini-os TPM frontend driver. The vTPM mini-os domain
85 domains such as pv-grub that talk to the vTPM domain.
87 * vtpmmgr-stubdom: A mini-os domain that implements the vTPM manager. There is
89 entire lifetime of the machine. This domain regulates
108 domain's configuration file.
/linux-4.1.27/arch/arm/mm/
Dmmu.c254 .domain = DOMAIN_IO,
260 .domain = DOMAIN_IO,
266 .domain = DOMAIN_IO,
272 .domain = DOMAIN_IO,
278 .domain = DOMAIN_IO,
282 .domain = DOMAIN_KERNEL,
287 .domain = DOMAIN_KERNEL,
294 .domain = DOMAIN_USER,
300 .domain = DOMAIN_USER,
306 .domain = DOMAIN_KERNEL,
[all …]
/linux-4.1.27/arch/mips/kernel/
Dirq_cpu.c147 struct irq_domain *domain; in __mips_cpu_irq_init() local
153 domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0, in __mips_cpu_irq_init()
155 if (!domain) in __mips_cpu_irq_init()
Di8259.c332 struct irq_domain *domain; in init_i8259_irqs() local
339 domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0, in init_i8259_irqs()
341 if (!domain) in init_i8259_irqs()
/linux-4.1.27/arch/ia64/include/asm/
Dhw_irq.h105 cpumask_t domain; member
112 #define irq_to_domain(x) irq_cfg[(x)].domain
128 extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
/linux-4.1.27/arch/arm64/kernel/
Dpci.c54 int raw_pci_read(unsigned int domain, unsigned int bus, in raw_pci_read() argument
60 int raw_pci_write(unsigned int domain, unsigned int bus, in raw_pci_write() argument
/linux-4.1.27/drivers/base/power/
Ddomain.c86 if (&gpd->domain == dev->pm_domain) { in pm_genpd_lookup_dev()
564 if (!genpd->gov->power_down_ok(&genpd->domain)) in pm_genpd_poweroff()
1443 dev->pm_domain = &genpd->domain; in genpd_alloc_dev_data()
1908 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; in pm_genpd_init()
1909 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; in pm_genpd_init()
1910 genpd->domain.ops.prepare = pm_genpd_prepare; in pm_genpd_init()
1911 genpd->domain.ops.suspend = pm_genpd_suspend; in pm_genpd_init()
1912 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; in pm_genpd_init()
1913 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; in pm_genpd_init()
1914 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; in pm_genpd_init()
[all …]
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-class-regulator153 output voltage setting for this domain measured in microvolts,
157 the power domain has no min microvolts constraint defined by
168 output voltage setting for this domain measured in microvolts,
172 the power domain has no max microvolts constraint defined by
183 output current limit setting for this domain measured in
187 the power domain has no min microamps constraint defined by
198 output current limit setting for this domain measured in
202 the power domain has no max microamps constraint defined by
255 voltage setting for this domain measured in microvolts when
266 voltage setting for this domain measured in microvolts when
[all …]
Dsysfs-firmware-sgi_uv23 domain. The coherence id indicates which coherence domain
/linux-4.1.27/arch/arm/mach-ux500/
Dpm_domains.c18 static int pd_power_off(struct generic_pm_domain *domain) in pd_power_off() argument
30 static int pd_power_on(struct generic_pm_domain *domain) in pd_power_on() argument
/linux-4.1.27/drivers/gpu/vga/
Dvga_switcheroo.c647 int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) in vga_switcheroo_init_domain_pm_ops() argument
651 domain->ops = *dev->bus->pm; in vga_switcheroo_init_domain_pm_ops()
652 domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend; in vga_switcheroo_init_domain_pm_ops()
653 domain->ops.runtime_resume = vga_switcheroo_runtime_resume; in vga_switcheroo_init_domain_pm_ops()
655 dev->pm_domain = domain; in vga_switcheroo_init_domain_pm_ops()
698 … vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) in vga_switcheroo_init_domain_pm_optimus_hdmi_audio() argument
702 domain->ops = *dev->bus->pm; in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
703 domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio; in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
705 dev->pm_domain = domain; in vga_switcheroo_init_domain_pm_optimus_hdmi_audio()
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
Dbe_cmds.c987 u32 if_id, u32 *pmac_id, u32 domain) in be_cmd_pmac_add() argument
1006 req->hdr.domain = domain; in be_cmd_pmac_add()
1049 req->hdr.domain = dom; in be_cmd_pmac_del()
1438 u32 *if_handle, u32 domain) in be_cmd_if_create() argument
1448 req->hdr.domain = domain; in be_cmd_if_create()
1467 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) in be_cmd_if_destroy() argument
1488 req->hdr.domain = domain; in be_cmd_if_destroy()
1625 req->hdr.domain = dom; in be_cmd_link_status_query()
1864 u32 num, u32 domain) in be_cmd_vlan_config() argument
1882 req->hdr.domain = domain; in be_cmd_vlan_config()
[all …]
/linux-4.1.27/include/xen/interface/io/
Dpciif.h77 uint32_t domain; /* PCI Domain/Segment */ member
101 uint32_t domain; /* PCI Domain/Segment*/ member
/linux-4.1.27/drivers/thermal/ti-soc-thermal/
Domap4-thermal-data.c84 .domain = "cpu",
224 .domain = "cpu",
257 .domain = "cpu",
Ddra752-thermal-data.c436 .domain = "cpu",
447 .domain = "gpu",
456 .domain = "core",
465 .domain = "dspeve",
474 .domain = "iva",
Dti-thermal.h87 int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain);
94 int ti_thermal_expose_sensor(struct ti_bandgap *bgp, int id, char *domain) in ti_thermal_expose_sensor() argument
Domap5-thermal-data.c336 .domain = "cpu",
347 .domain = "gpu",
356 .domain = "core",
/linux-4.1.27/fs/ocfs2/dlm/
Ddlmdomain.c259 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) in __dlm_lookup_domain_full() argument
269 memcmp(tmp->name, domain, len)==0) in __dlm_lookup_domain_full()
277 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) in __dlm_lookup_domain() argument
281 return __dlm_lookup_domain_full(domain, strlen(domain)); in __dlm_lookup_domain()
288 static int dlm_wait_on_domain_helper(const char *domain) in dlm_wait_on_domain_helper() argument
295 tmp = __dlm_lookup_domain(domain); in dlm_wait_on_domain_helper()
821 query->domain); in dlm_query_join_handler()
839 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); in dlm_query_join_handler()
936 assert->domain); in dlm_assert_joined_handler()
939 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); in dlm_assert_joined_handler()
[all …]
/linux-4.1.27/arch/arm/mach-s3c64xx/
Dpm.c45 static int s3c64xx_pd_off(struct generic_pm_domain *domain) in s3c64xx_pd_off() argument
50 pd = container_of(domain, struct s3c64xx_pm_domain, pd); in s3c64xx_pd_off()
59 static int s3c64xx_pd_on(struct generic_pm_domain *domain) in s3c64xx_pd_on() argument
65 pd = container_of(domain, struct s3c64xx_pm_domain, pd); in s3c64xx_pd_on()
/linux-4.1.27/Documentation/
DDMA-attributes.txt75 buffer from CPU domain to device domain. Some advanced use cases might
80 the buffer sharing. The first call transfers a buffer from 'CPU' domain
81 to 'device' domain, what synchronizes CPU caches for the given region
90 transferred to 'device' domain. This attribute can be also used for
92 device domain after releasing a mapping for it. Use this attribute with
DIntel-IOMMU.txt42 The Intel IOMMU driver allocates a virtual address per domain. Each PCIE
43 device has its own domain (hence protection). Devices under p2p bridges
48 but these are not global address spaces, but separate for each domain.
109 - For compatibility testing, could use unity map domain for all devices, just
110 provide a 1-1 for all useful memory under a single domain for all devices.
/linux-4.1.27/arch/x86/include/asm/xen/
Dpci.h19 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
31 uint16_t domain) in xen_register_device_domain_owner() argument
/linux-4.1.27/drivers/base/regmap/
Dregmap-irq.c32 struct irq_domain *domain; member
287 handle_nested_irq(irq_find_mapping(data->domain, i)); in regmap_irq_thread()
489 d->domain = irq_domain_add_legacy(map->dev->of_node, in regmap_add_irq_chip()
493 d->domain = irq_domain_add_linear(map->dev->of_node, in regmap_add_irq_chip()
496 if (!d->domain) { in regmap_add_irq_chip()
540 irq_domain_remove(d->domain); in regmap_del_irq_chip()
578 return irq_create_mapping(data->domain, irq); in regmap_irq_get_virq()
595 return data->domain; in regmap_irq_get_domain()
/linux-4.1.27/fs/ocfs2/dlmfs/
Ddlmfs.c472 struct qstr *domain = &dentry->d_name; in dlmfs_mkdir() local
476 mlog(0, "mkdir %.*s\n", domain->len, domain->name); in dlmfs_mkdir()
479 if (domain->len >= GROUP_NAME_MAX) { in dlmfs_mkdir()
494 conn = user_dlm_register(domain); in dlmfs_mkdir()
498 status, domain->len, domain->name); in dlmfs_mkdir()
/linux-4.1.27/Documentation/devicetree/bindings/arm/omap/
Dl3-noc.txt12 - reg: Contains L3 register address range for each noc domain.
13 - ti,hwmods: "l3_main_1", ... One hwmod for each noc domain.
/linux-4.1.27/drivers/power/avs/
DKconfig15 tristate "Rockchip IO domain support"
19 necessary for the io domain setting of the SoC to match the
/linux-4.1.27/Documentation/devicetree/bindings/dma/
Dfsl-imx-sdma.txt27 0 MCU domain SSI
31 4 MCU domain UART
34 7 MCU domain CSPI
/linux-4.1.27/security/apparmor/
Ddomain.c37 void aa_free_domain_entries(struct aa_domain *domain) in aa_free_domain_entries() argument
40 if (domain) { in aa_free_domain_entries()
41 if (!domain->table) in aa_free_domain_entries()
44 for (i = 0; i < domain->size; i++) in aa_free_domain_entries()
45 kzfree(domain->table[i]); in aa_free_domain_entries()
46 kzfree(domain->table); in aa_free_domain_entries()
47 domain->table = NULL; in aa_free_domain_entries()
/linux-4.1.27/drivers/gpu/drm/exynos/
Dexynos_drm_iommu.c138 if (!mapping || !mapping->domain) in drm_iommu_detach_device()
141 iommu_detach_device(mapping->domain, subdrv_dev); in drm_iommu_detach_device()
/linux-4.1.27/arch/mips/pci/
Dpci-ar2315.c164 struct irq_domain *domain; member
329 pci_irq = irq_find_mapping(apc->domain, __ffs(pending)); in ar2315_pci_irq_handler()
385 apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT); in ar2315_pci_irq_init()
461 apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT, in ar2315_pci_probe()
463 if (!apc->domain) { in ar2315_pci_probe()
/linux-4.1.27/include/uapi/drm/
Dnouveau_drm.h53 uint32_t domain; member
70 uint32_t domain; member
/linux-4.1.27/net/unix/
DKconfig6 tristate "Unix domain sockets"
8 If you say Y here, you will include support for Unix domain sockets;
/linux-4.1.27/arch/x86/include/asm/
Dpci.h15 int domain; /* PCI domain */ member
35 return sd->domain; in pci_domain_nr()
/linux-4.1.27/drivers/xen/
Dprivcmd.c199 domid_t domain; member
223 st->domain, NULL); in mmap_mfn_range()
272 state.domain = mmapcmd.dom; in privcmd_ioctl_mmap()
289 domid_t domain; member
326 st->domain, cur_pages); in mmap_batch_fn()
514 state.domain = m.dom; in privcmd_ioctl_mmap_batch()

123