Searched refs:domain (Results 1 - 200 of 1020) sorted by relevance

123456

/linux-4.4.14/kernel/irq/
H A Dirqdomain.c28 static void irq_domain_check_hierarchy(struct irq_domain *domain);
38 * identifying an irq domain
88 * @ops: domain callbacks
92 * Returns pointer to IRQ domain, or NULL on failure.
99 struct irq_domain *domain; __irq_domain_add() local
104 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), __irq_domain_add()
106 if (WARN_ON(!domain)) __irq_domain_add()
112 INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL); __irq_domain_add()
113 domain->ops = ops; __irq_domain_add()
114 domain->host_data = host_data; __irq_domain_add()
115 domain->fwnode = fwnode; __irq_domain_add()
116 domain->hwirq_max = hwirq_max; __irq_domain_add()
117 domain->revmap_size = size; __irq_domain_add()
118 domain->revmap_direct_max_irq = direct_max; __irq_domain_add()
119 irq_domain_check_hierarchy(domain); __irq_domain_add()
122 list_add(&domain->link, &irq_domain_list); __irq_domain_add()
125 pr_debug("Added domain %s\n", domain->name); __irq_domain_add()
126 return domain; __irq_domain_add()
131 * irq_domain_remove() - Remove an irq domain.
132 * @domain: domain to remove
134 * This routine is used to remove an irq domain. The caller must ensure
135 * that all mappings within the domain have been disposed of prior to
138 void irq_domain_remove(struct irq_domain *domain) irq_domain_remove() argument
147 WARN_ON(domain->revmap_tree.height); irq_domain_remove()
149 list_del(&domain->link); irq_domain_remove()
152 * If the going away domain is the default one, reset it. irq_domain_remove()
154 if (unlikely(irq_default_domain == domain)) irq_domain_remove()
159 pr_debug("Removed domain %s\n", domain->name); irq_domain_remove()
161 of_node_put(irq_domain_get_of_node(domain)); irq_domain_remove()
162 kfree(domain); irq_domain_remove()
170 * @first_irq: first number of irq block assigned to the domain,
172 * pre-map all of the irqs in the domain to virqs starting at first_irq.
173 * @ops: domain callbacks
190 struct irq_domain *domain; irq_domain_add_simple() local
192 domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data); irq_domain_add_simple()
193 if (!domain) irq_domain_add_simple()
205 irq_domain_associate_many(domain, first_irq, 0, size); irq_domain_add_simple()
208 return domain; irq_domain_add_simple()
216 * @first_irq: first number of irq block assigned to the domain
220 * @ops: map/unmap domain callbacks
234 struct irq_domain *domain; irq_domain_add_legacy() local
236 domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size, irq_domain_add_legacy()
238 if (domain) irq_domain_add_legacy()
239 irq_domain_associate_many(domain, first_irq, first_hwirq, size); irq_domain_add_legacy()
241 return domain; irq_domain_add_legacy()
246 * irq_find_matching_fwnode() - Locates a domain for a given fwnode
248 * @bus_token: domain-specific data
261 * bus_token == DOMAIN_BUS_ANY matches any domain, any other irq_find_matching_fwnode()
262 * values must generate an exact match for the domain to be irq_find_matching_fwnode()
285 * irq_set_default_host() - Set a "default" irq domain
286 * @domain: default domain pointer
288 * For convenience, it's possible to set a "default" domain that will be used
293 void irq_set_default_host(struct irq_domain *domain) irq_set_default_host() argument
295 pr_debug("Default domain set to @0x%p\n", domain); irq_set_default_host()
297 irq_default_domain = domain; irq_set_default_host()
301 void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq) irq_domain_disassociate() argument
306 if (WARN(!irq_data || irq_data->domain != domain, irq_domain_disassociate()
320 if (domain->ops->unmap) irq_domain_disassociate()
321 domain->ops->unmap(domain, irq); irq_domain_disassociate()
324 irq_data->domain = NULL; irq_domain_disassociate()
328 if (hwirq < domain->revmap_size) { irq_domain_disassociate()
329 domain->linear_revmap[hwirq] = 0; irq_domain_disassociate()
332 radix_tree_delete(&domain->revmap_tree, hwirq); irq_domain_disassociate()
337 int irq_domain_associate(struct irq_domain *domain, unsigned int virq, irq_domain_associate() argument
343 if (WARN(hwirq >= domain->hwirq_max, irq_domain_associate()
344 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name)) irq_domain_associate()
348 if (WARN(irq_data->domain, "error: virq%i is already associated", virq)) irq_domain_associate()
353 irq_data->domain = domain; irq_domain_associate()
354 if (domain->ops->map) { irq_domain_associate()
355 ret = domain->ops->map(domain, virq, hwirq); irq_domain_associate()
364 domain->name, hwirq, virq, ret); irq_domain_associate()
366 irq_data->domain = NULL; irq_domain_associate()
372 /* If not already assigned, give the domain the chip's name */ irq_domain_associate()
373 if (!domain->name && irq_data->chip) irq_domain_associate()
374 domain->name = irq_data->chip->name; irq_domain_associate()
377 if (hwirq < domain->revmap_size) { irq_domain_associate()
378 domain->linear_revmap[hwirq] = virq; irq_domain_associate()
381 radix_tree_insert(&domain->revmap_tree, hwirq, irq_data); irq_domain_associate()
392 void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_domain_associate_many() argument
398 of_node = irq_domain_get_of_node(domain); irq_domain_associate_many()
403 irq_domain_associate(domain, irq_base + i, hwirq_base + i); irq_domain_associate_many()
410 * @domain: domain to allocate the irq for or NULL for default domain
418 unsigned int irq_create_direct_mapping(struct irq_domain *domain) irq_create_direct_mapping() argument
423 if (domain == NULL) irq_create_direct_mapping()
424 domain = irq_default_domain; irq_create_direct_mapping()
426 of_node = irq_domain_get_of_node(domain); irq_create_direct_mapping()
432 if (virq >= domain->revmap_direct_max_irq) { irq_create_direct_mapping()
434 domain->revmap_direct_max_irq); irq_create_direct_mapping()
440 if (irq_domain_associate(domain, virq, virq)) { irq_create_direct_mapping()
451 * @domain: domain owning this hardware interrupt or NULL for default domain
452 * @hwirq: hardware irq number in that domain space
459 unsigned int irq_create_mapping(struct irq_domain *domain, irq_create_mapping() argument
465 pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); irq_create_mapping()
467 /* Look for default domain if nececssary */ irq_create_mapping()
468 if (domain == NULL) irq_create_mapping()
469 domain = irq_default_domain; irq_create_mapping()
470 if (domain == NULL) { irq_create_mapping()
471 WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq); irq_create_mapping()
474 pr_debug("-> using domain @%p\n", domain); irq_create_mapping()
476 of_node = irq_domain_get_of_node(domain); irq_create_mapping()
479 virq = irq_find_mapping(domain, hwirq); irq_create_mapping()
492 if (irq_domain_associate(domain, virq, hwirq)) { irq_create_mapping()
497 pr_debug("irq %lu on domain %s mapped to virtual irq %u\n", irq_create_mapping()
506 * @domain: domain owning the interrupt range
514 * to insert in to the domain.
517 * domain insertion.
522 int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, irq_create_strict_mappings() argument
528 of_node = irq_domain_get_of_node(domain); irq_create_strict_mappings()
534 irq_domain_associate_many(domain, irq_base, hwirq_base, count); irq_create_strict_mappings()
552 /* If domain has no translation, then we assume interrupt line */ irq_domain_translate()
571 struct irq_domain *domain; irq_create_fwspec_mapping() local
577 domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY); irq_create_fwspec_mapping()
579 domain = irq_default_domain; irq_create_fwspec_mapping()
581 if (!domain) { irq_create_fwspec_mapping()
582 pr_warn("no irq domain found for %s !\n", irq_create_fwspec_mapping()
587 if (irq_domain_translate(domain, fwspec, &hwirq, &type)) irq_create_fwspec_mapping()
590 if (irq_domain_is_hierarchy(domain)) { irq_create_fwspec_mapping()
595 virq = irq_find_mapping(domain, hwirq); irq_create_fwspec_mapping()
599 virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec); irq_create_fwspec_mapping()
604 virq = irq_create_mapping(domain, hwirq); irq_create_fwspec_mapping()
633 struct irq_domain *domain; irq_dispose_mapping() local
638 domain = irq_data->domain; irq_dispose_mapping()
639 if (WARN_ON(domain == NULL)) irq_dispose_mapping()
642 irq_domain_disassociate(domain, virq); irq_dispose_mapping()
649 * @domain: domain owning this hardware interrupt
650 * @hwirq: hardware irq number in that domain space
652 unsigned int irq_find_mapping(struct irq_domain *domain, irq_find_mapping() argument
657 /* Look for default domain if nececssary */ irq_find_mapping()
658 if (domain == NULL) irq_find_mapping()
659 domain = irq_default_domain; irq_find_mapping()
660 if (domain == NULL) irq_find_mapping()
663 if (hwirq < domain->revmap_direct_max_irq) { irq_find_mapping()
664 data = irq_domain_get_irq_data(domain, hwirq); irq_find_mapping()
670 if (hwirq < domain->revmap_size) irq_find_mapping()
671 return domain->linear_revmap[hwirq]; irq_find_mapping()
674 data = radix_tree_lookup(&domain->revmap_tree, hwirq); irq_find_mapping()
685 struct irq_domain *domain; virq_debug_show() local
693 list_for_each_entry(domain, &irq_domain_list, link) { virq_debug_show()
696 of_node = irq_domain_get_of_node(domain); virq_debug_show()
697 radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0) virq_debug_show()
700 domain == irq_default_domain ? '*' : ' ', domain->name, virq_debug_show()
701 domain->revmap_size + count, domain->revmap_size, virq_debug_show()
702 domain->revmap_direct_max_irq, virq_debug_show()
709 "active", "type", "domain"); virq_debug_show()
717 domain = desc->irq_data.domain; virq_debug_show()
719 if (domain) { virq_debug_show()
734 direct = (i == hwirq) && (i < domain->revmap_direct_max_irq); virq_debug_show()
736 (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX", virq_debug_show()
738 seq_printf(m, "%s\n", desc->irq_data.domain->name); virq_debug_show()
858 * @parent: Parent irq domain to associate with the new domain
859 * @flags: Irq domain flags associated to the domain
860 * @size: Size of the domain. See below
862 * @ops: Pointer to the interrupt domain callbacks
865 * If @size is 0 a tree domain is created, otherwise a linear domain.
867 * If successful the parent is associated to the new domain and the
868 * domain flags are set.
869 * Returns pointer to IRQ domain, or NULL on failure.
878 struct irq_domain *domain; irq_domain_create_hierarchy() local
881 domain = irq_domain_create_linear(fwnode, size, ops, host_data); irq_domain_create_hierarchy()
883 domain = irq_domain_create_tree(fwnode, ops, host_data); irq_domain_create_hierarchy()
884 if (domain) { irq_domain_create_hierarchy()
885 domain->parent = parent; irq_domain_create_hierarchy()
886 domain->flags |= flags; irq_domain_create_hierarchy()
889 return domain; irq_domain_create_hierarchy()
897 struct irq_domain *domain = data->domain; irq_domain_insert_irq() local
900 if (hwirq < domain->revmap_size) { irq_domain_insert_irq()
901 domain->linear_revmap[hwirq] = virq; irq_domain_insert_irq()
904 radix_tree_insert(&domain->revmap_tree, hwirq, data); irq_domain_insert_irq()
908 /* If not already assigned, give the domain the chip's name */ irq_domain_insert_irq()
909 if (!domain->name && data->chip) irq_domain_insert_irq()
910 domain->name = data->chip->name; irq_domain_insert_irq()
926 struct irq_domain *domain = data->domain; irq_domain_remove_irq() local
929 if (hwirq < domain->revmap_size) { irq_domain_remove_irq()
930 domain->linear_revmap[hwirq] = 0; irq_domain_remove_irq()
933 radix_tree_delete(&domain->revmap_tree, hwirq); irq_domain_remove_irq()
939 static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, irq_domain_insert_irq_data() argument
950 irq_data->domain = domain; irq_domain_insert_irq_data()
965 irq_data->domain = NULL; irq_domain_free_irq_data()
975 static int irq_domain_alloc_irq_data(struct irq_domain *domain, irq_domain_alloc_irq_data() argument
985 irq_data->domain = domain; irq_domain_alloc_irq_data()
987 for (parent = domain->parent; parent; parent = parent->parent) { irq_domain_alloc_irq_data()
1000 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1001 * @domain: domain to match
1004 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, irq_domain_get_irq_data() argument
1011 if (irq_data->domain == domain) irq_domain_get_irq_data()
1018 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1019 * @domain: Interrupt domain to match
1025 int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, irq_domain_set_hwirq_and_chip() argument
1029 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); irq_domain_set_hwirq_and_chip()
1042 * irq_domain_set_info - Set the complete data for a @virq in @domain
1043 * @domain: Interrupt domain to match
1052 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_domain_set_info() argument
1057 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); irq_domain_set_info()
1075 * @domain: Interrupt domain to match
1079 void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, irq_domain_free_irqs_common() argument
1086 irq_data = irq_domain_get_irq_data(domain, virq + i); irq_domain_free_irqs_common()
1090 irq_domain_free_irqs_parent(domain, virq, nr_irqs); irq_domain_free_irqs_common()
1095 * @domain: Interrupt domain to match
1099 void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, irq_domain_free_irqs_top() argument
1108 irq_domain_free_irqs_common(domain, virq, nr_irqs); irq_domain_free_irqs_top()
1111 static bool irq_domain_is_auto_recursive(struct irq_domain *domain) irq_domain_is_auto_recursive() argument
1113 return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; irq_domain_is_auto_recursive()
1116 static void irq_domain_free_irqs_recursive(struct irq_domain *domain, irq_domain_free_irqs_recursive() argument
1120 domain->ops->free(domain, irq_base, nr_irqs); irq_domain_free_irqs_recursive()
1121 if (irq_domain_is_auto_recursive(domain)) { irq_domain_free_irqs_recursive()
1122 BUG_ON(!domain->parent); irq_domain_free_irqs_recursive()
1123 irq_domain_free_irqs_recursive(domain->parent, irq_base, irq_domain_free_irqs_recursive()
1128 static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, irq_domain_alloc_irqs_recursive() argument
1133 struct irq_domain *parent = domain->parent; irq_domain_alloc_irqs_recursive()
1134 bool recursive = irq_domain_is_auto_recursive(domain); irq_domain_alloc_irqs_recursive()
1141 ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); irq_domain_alloc_irqs_recursive()
1149 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1150 * @domain: domain to allocate from
1154 * @arg: domain specific argument
1169 int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, __irq_domain_alloc_irqs() argument
1175 if (domain == NULL) { __irq_domain_alloc_irqs()
1176 domain = irq_default_domain; __irq_domain_alloc_irqs()
1177 if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) __irq_domain_alloc_irqs()
1181 if (!domain->ops->alloc) { __irq_domain_alloc_irqs()
1182 pr_debug("domain->ops->alloc() is NULL\n"); __irq_domain_alloc_irqs()
1197 if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { __irq_domain_alloc_irqs()
1204 ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); __irq_domain_alloc_irqs()
1232 if (WARN(!data || !data->domain || !data->domain->ops->free, irq_domain_free_irqs()
1239 irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); irq_domain_free_irqs()
1247 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1250 * @arg: Allocation data (arch/domain specific)
1252 * Check whether the domain has been setup recursive. If not allocate
1253 * through the parent domain.
1255 int irq_domain_alloc_irqs_parent(struct irq_domain *domain, irq_domain_alloc_irqs_parent() argument
1260 if (irq_domain_is_auto_recursive(domain)) irq_domain_alloc_irqs_parent()
1263 domain = domain->parent; irq_domain_alloc_irqs_parent()
1264 if (domain) irq_domain_alloc_irqs_parent()
1265 return irq_domain_alloc_irqs_recursive(domain, irq_base, irq_domain_alloc_irqs_parent()
1271 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1275 * Check whether the domain has been setup recursive. If not free
1276 * through the parent domain.
1278 void irq_domain_free_irqs_parent(struct irq_domain *domain, irq_domain_free_irqs_parent() argument
1282 if (!irq_domain_is_auto_recursive(domain) && domain->parent) irq_domain_free_irqs_parent()
1283 irq_domain_free_irqs_recursive(domain->parent, irq_base, irq_domain_free_irqs_parent()
1297 if (irq_data && irq_data->domain) { irq_domain_activate_irq()
1298 struct irq_domain *domain = irq_data->domain; irq_domain_activate_irq() local
1302 if (domain->ops->activate) irq_domain_activate_irq()
1303 domain->ops->activate(domain, irq_data); irq_domain_activate_irq()
1317 if (irq_data && irq_data->domain) { irq_domain_deactivate_irq()
1318 struct irq_domain *domain = irq_data->domain; irq_domain_deactivate_irq() local
1320 if (domain->ops->deactivate) irq_domain_deactivate_irq()
1321 domain->ops->deactivate(domain, irq_data); irq_domain_deactivate_irq()
1327 static void irq_domain_check_hierarchy(struct irq_domain *domain) irq_domain_check_hierarchy() argument
1330 if (domain->ops->alloc) irq_domain_check_hierarchy()
1331 domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; irq_domain_check_hierarchy()
1335 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1336 * @domain: domain to match
1339 struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, irq_domain_get_irq_data() argument
1344 return (irq_data && irq_data->domain == domain) ? irq_data : NULL; irq_domain_get_irq_data()
1348 * irq_domain_set_info - Set the complete data for a @virq in @domain
1349 * @domain: Interrupt domain to match
1358 void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, irq_domain_set_info() argument
1368 static void irq_domain_check_hierarchy(struct irq_domain *domain) irq_domain_check_hierarchy() argument
H A Dmsi.c83 static void msi_domain_activate(struct irq_domain *domain, msi_domain_activate() argument
92 static void msi_domain_deactivate(struct irq_domain *domain, msi_domain_deactivate() argument
101 static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, msi_domain_alloc() argument
104 struct msi_domain_info *info = domain->host_data; msi_domain_alloc()
109 if (irq_find_mapping(domain, hwirq) > 0) msi_domain_alloc()
112 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); msi_domain_alloc()
117 ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); msi_domain_alloc()
121 ops->msi_free(domain, info, virq + i); msi_domain_alloc()
123 irq_domain_free_irqs_top(domain, virq, nr_irqs); msi_domain_alloc()
131 static void msi_domain_free(struct irq_domain *domain, unsigned int virq, msi_domain_free() argument
134 struct msi_domain_info *info = domain->host_data; msi_domain_free()
139 info->ops->msi_free(domain, info, virq + i); msi_domain_free()
141 irq_domain_free_irqs_top(domain, virq, nr_irqs); msi_domain_free()
158 static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, msi_domain_ops_prepare() argument
176 static int msi_domain_ops_init(struct irq_domain *domain, msi_domain_ops_init() argument
181 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, msi_domain_ops_init()
191 static int msi_domain_ops_check(struct irq_domain *domain, msi_domain_ops_check() argument
237 * msi_create_irq_domain - Create a MSI interrupt domain
239 * @info: MSI domain info
240 * @parent: Parent irq domain
256 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
257 * @domain: The domain to allocate from
264 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, msi_domain_alloc_irqs() argument
267 struct msi_domain_info *info = domain->host_data; msi_domain_alloc_irqs()
273 ret = ops->msi_check(domain, info, dev); msi_domain_alloc_irqs()
275 ret = ops->msi_prepare(domain, dev, nvec, &arg); msi_domain_alloc_irqs()
286 virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, for_each_msi_entry()
291 ret = ops->handle_error(domain, desc, ret); for_each_msi_entry()
316 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
317 * @domain: The domain to managing the interrupts
321 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) msi_domain_free_irqs() argument
339 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
340 * @domain: The interrupt domain to retrieve data from
343 * @domain->host_data.
345 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) msi_get_domain_info() argument
347 return (struct msi_domain_info *)domain->host_data; msi_get_domain_info()
/linux-4.4.14/arch/x86/include/asm/
H A Dirqdomain.h40 extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
42 extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
44 extern void mp_irqdomain_activate(struct irq_domain *domain,
46 extern void mp_irqdomain_deactivate(struct irq_domain *domain,
48 extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
52 extern void arch_init_msi_domain(struct irq_domain *domain);
54 static inline void arch_init_msi_domain(struct irq_domain *domain) { } arch_init_msi_domain() argument
58 extern void arch_init_htirq_domain(struct irq_domain *domain); arch_init_msi_domain()
60 static inline void arch_init_htirq_domain(struct irq_domain *domain) { } argument
H A Dpci.h15 int domain; /* PCI domain */ member in struct:pci_sysdata
35 return sd->domain; pci_domain_nr()
/linux-4.4.14/drivers/pinctrl/meson/
H A Dpinctrl-meson.c21 * domain which can't be powered off; the bank also uses a set of
36 * register range of the domain; when all groups for a given pin are
70 * @domain: the domain containing the pin
76 static int meson_get_bank(struct meson_domain *domain, unsigned int pin, meson_get_bank() argument
81 for (i = 0; i < domain->data->num_banks; i++) { meson_get_bank()
82 if (pin >= domain->data->banks[i].first && meson_get_bank()
83 pin <= domain->data->banks[i].last) { meson_get_bank()
84 *bank = &domain->data->banks[i]; meson_get_bank()
93 * meson_get_domain_and_bank() - find domain and bank containing a given pin
97 * @domain: the found domain
103 struct meson_domain **domain, meson_get_domain_and_bank()
113 *domain = d; meson_get_domain_and_bank()
196 struct meson_domain *domain; meson_pmx_disable_other_groups() local
207 domain = &pc->domains[group->domain]; meson_pmx_disable_other_groups()
208 regmap_update_bits(domain->reg_mux, meson_pmx_disable_other_groups()
222 struct meson_domain *domain = &pc->domains[group->domain]; meson_pmx_set_mux() local
237 ret = regmap_update_bits(domain->reg_mux, group->reg * 4, meson_pmx_set_mux()
293 struct meson_domain *domain; meson_pinconf_set() local
300 ret = meson_get_domain_and_bank(pc, pin, &domain, &bank); meson_pinconf_set()
313 ret = regmap_update_bits(domain->reg_pull, reg, meson_pinconf_set()
323 ret = regmap_update_bits(domain->reg_pullen, reg, meson_pinconf_set()
329 ret = regmap_update_bits(domain->reg_pull, reg, meson_pinconf_set()
339 ret = regmap_update_bits(domain->reg_pullen, reg, meson_pinconf_set()
345 ret = regmap_update_bits(domain->reg_pull, reg, meson_pinconf_set()
360 struct meson_domain *domain; meson_pinconf_get_pull() local
365 ret = meson_get_domain_and_bank(pc, pin, &domain, &bank); meson_pinconf_get_pull()
371 ret = regmap_read(domain->reg_pullen, reg, &val); meson_pinconf_get_pull()
380 ret = regmap_read(domain->reg_pull, reg, &val); meson_pinconf_get_pull()
463 struct meson_domain *domain = to_meson_domain(chip); meson_gpio_free() local
465 pinctrl_free_gpio(domain->data->pin_base + gpio); meson_gpio_free()
470 struct meson_domain *domain = to_meson_domain(chip); meson_gpio_direction_input() local
475 pin = domain->data->pin_base + gpio; meson_gpio_direction_input()
476 ret = meson_get_bank(domain, pin, &bank); meson_gpio_direction_input()
482 return regmap_update_bits(domain->reg_gpio, reg, BIT(bit), BIT(bit)); meson_gpio_direction_input()
488 struct meson_domain *domain = to_meson_domain(chip); meson_gpio_direction_output() local
493 pin = domain->data->pin_base + gpio; meson_gpio_direction_output()
494 ret = meson_get_bank(domain, pin, &bank); meson_gpio_direction_output()
499 ret = regmap_update_bits(domain->reg_gpio, reg, BIT(bit), 0); meson_gpio_direction_output()
504 return regmap_update_bits(domain->reg_gpio, reg, BIT(bit), meson_gpio_direction_output()
510 struct meson_domain *domain = to_meson_domain(chip); meson_gpio_set() local
515 pin = domain->data->pin_base + gpio; meson_gpio_set()
516 ret = meson_get_bank(domain, pin, &bank); meson_gpio_set()
521 regmap_update_bits(domain->reg_gpio, reg, BIT(bit), meson_gpio_set()
527 struct meson_domain *domain = to_meson_domain(chip); meson_gpio_get() local
532 pin = domain->data->pin_base + gpio; meson_gpio_get()
533 ret = meson_get_bank(domain, pin, &bank); meson_gpio_get()
538 regmap_read(domain->reg_gpio, reg, &val); meson_gpio_get()
558 struct meson_domain *domain; meson_gpiolib_register() local
562 domain = &pc->domains[i]; meson_gpiolib_register()
564 domain->chip.label = domain->data->name; meson_gpiolib_register()
565 domain->chip.dev = pc->dev; meson_gpiolib_register()
566 domain->chip.request = meson_gpio_request; meson_gpiolib_register()
567 domain->chip.free = meson_gpio_free; meson_gpiolib_register()
568 domain->chip.direction_input = meson_gpio_direction_input; meson_gpiolib_register()
569 domain->chip.direction_output = meson_gpio_direction_output; meson_gpiolib_register()
570 domain->chip.get = meson_gpio_get; meson_gpiolib_register()
571 domain->chip.set = meson_gpio_set; meson_gpiolib_register()
572 domain->chip.base = domain->data->pin_base; meson_gpiolib_register()
573 domain->chip.ngpio = domain->data->num_pins; meson_gpiolib_register()
574 domain->chip.can_sleep = false; meson_gpiolib_register()
575 domain->chip.of_node = domain->of_node; meson_gpiolib_register()
576 domain->chip.of_gpio_n_cells = 2; meson_gpiolib_register()
578 ret = gpiochip_add(&domain->chip); meson_gpiolib_register()
581 domain->data->name); meson_gpiolib_register()
585 ret = gpiochip_add_pin_range(&domain->chip, dev_name(pc->dev), meson_gpiolib_register()
586 0, domain->data->pin_base, meson_gpiolib_register()
587 domain->chip.ngpio); meson_gpiolib_register()
650 struct meson_domain *domain; meson_pinctrl_parse_dt() local
673 domain = &pc->domains[i]; for_each_child_of_node()
675 domain->data = meson_get_domain_data(pc, np); for_each_child_of_node()
676 if (!domain->data) { for_each_child_of_node()
677 dev_err(pc->dev, "domain data not found for node %s\n", for_each_child_of_node()
682 domain->of_node = np; for_each_child_of_node()
684 domain->reg_mux = meson_map_resource(pc, np, "mux"); for_each_child_of_node()
685 if (IS_ERR(domain->reg_mux)) { for_each_child_of_node()
687 return PTR_ERR(domain->reg_mux); for_each_child_of_node()
690 domain->reg_pull = meson_map_resource(pc, np, "pull"); for_each_child_of_node()
691 if (IS_ERR(domain->reg_pull)) { for_each_child_of_node()
693 return PTR_ERR(domain->reg_pull); for_each_child_of_node()
696 domain->reg_pullen = meson_map_resource(pc, np, "pull-enable"); for_each_child_of_node()
698 if (IS_ERR(domain->reg_pullen)) for_each_child_of_node()
699 domain->reg_pullen = domain->reg_pull; for_each_child_of_node()
701 domain->reg_gpio = meson_map_resource(pc, np, "gpio"); for_each_child_of_node()
702 if (IS_ERR(domain->reg_gpio)) { for_each_child_of_node()
704 return PTR_ERR(domain->reg_gpio); for_each_child_of_node()
102 meson_get_domain_and_bank(struct meson_pinctrl *pc, unsigned int pin, struct meson_domain **domain, struct meson_bank **bank) meson_get_domain_and_bank() argument
H A Dpinctrl-meson.h26 * @reg: register offset for the group in the domain mux registers
28 * @domain: index of the domain this group belongs to
37 unsigned int domain; member in struct:meson_pmx_group
88 * bits in the domain registers. The structure specifies which bits in
100 * struct meson_domain_data - domain platform data
102 * @name: name of the domain
103 * @banks: set of banks belonging to the domain
104 * @num_banks: number of banks in the domain
121 * @chip: gpio chip associated with the domain
122 * @data; platform data for the domain
123 * @node: device tree node for the domain
125 * A domain represents a set of banks controlled by the same set of
167 .domain = 0, \
185 .domain = 1, \
/linux-4.4.14/drivers/iommu/
H A Dipmmu-vmsa.c199 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) ipmmu_ctx_read() argument
201 return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); ipmmu_ctx_read()
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, ipmmu_ctx_write() argument
207 ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); ipmmu_ctx_write()
215 static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) ipmmu_tlb_sync() argument
219 while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { ipmmu_tlb_sync()
222 dev_err_ratelimited(domain->mmu->dev, ipmmu_tlb_sync()
230 static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) ipmmu_tlb_invalidate() argument
234 reg = ipmmu_ctx_read(domain, IMCTR); ipmmu_tlb_invalidate()
236 ipmmu_ctx_write(domain, IMCTR, reg); ipmmu_tlb_invalidate()
238 ipmmu_tlb_sync(domain); ipmmu_tlb_invalidate()
244 static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, ipmmu_utlb_enable() argument
247 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_utlb_enable()
258 IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | ipmmu_utlb_enable()
265 static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, ipmmu_utlb_disable() argument
268 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_utlb_disable()
275 struct ipmmu_vmsa_domain *domain = cookie; ipmmu_tlb_flush_all() local
277 ipmmu_tlb_invalidate(domain); ipmmu_tlb_flush_all()
296 static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) ipmmu_domain_init_context() argument
311 domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; ipmmu_domain_init_context()
312 domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, ipmmu_domain_init_context()
313 domain->cfg.ias = 32; ipmmu_domain_init_context()
314 domain->cfg.oas = 40; ipmmu_domain_init_context()
315 domain->cfg.tlb = &ipmmu_gather_ops; ipmmu_domain_init_context()
320 domain->cfg.iommu_dev = domain->mmu->dev; ipmmu_domain_init_context()
322 domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, ipmmu_domain_init_context()
323 domain); ipmmu_domain_init_context()
324 if (!domain->iop) ipmmu_domain_init_context()
331 domain->context_id = 0; ipmmu_domain_init_context()
334 ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; ipmmu_domain_init_context()
335 ipmmu_ctx_write(domain, IMTTLBR0, ttbr); ipmmu_domain_init_context()
336 ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); ipmmu_domain_init_context()
343 ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | ipmmu_domain_init_context()
348 ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); ipmmu_domain_init_context()
351 ipmmu_ctx_write(domain, IMBUSCR, ipmmu_domain_init_context()
352 ipmmu_ctx_read(domain, IMBUSCR) & ipmmu_domain_init_context()
359 ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); ipmmu_domain_init_context()
368 ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); ipmmu_domain_init_context()
373 static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) ipmmu_domain_destroy_context() argument
381 ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); ipmmu_domain_destroy_context()
382 ipmmu_tlb_sync(domain); ipmmu_domain_destroy_context()
389 static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) ipmmu_domain_irq() argument
392 struct ipmmu_vmsa_device *mmu = domain->mmu; ipmmu_domain_irq()
396 status = ipmmu_ctx_read(domain, IMSTR); ipmmu_domain_irq()
400 iova = ipmmu_ctx_read(domain, IMEAR); ipmmu_domain_irq()
408 ipmmu_ctx_write(domain, IMSTR, 0); ipmmu_domain_irq()
427 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) ipmmu_domain_irq()
441 struct ipmmu_vmsa_domain *domain; ipmmu_irq() local
446 io_domain = mmu->mapping->domain; ipmmu_irq()
447 domain = to_vmsa_domain(io_domain); ipmmu_irq()
449 return ipmmu_domain_irq(domain); ipmmu_irq()
458 struct ipmmu_vmsa_domain *domain; ipmmu_domain_alloc() local
463 domain = kzalloc(sizeof(*domain), GFP_KERNEL); ipmmu_domain_alloc()
464 if (!domain) ipmmu_domain_alloc()
467 spin_lock_init(&domain->lock); ipmmu_domain_alloc()
469 return &domain->io_domain; ipmmu_domain_alloc()
474 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_domain_free() local
477 * Free the domain resources. We assume that all devices have already ipmmu_domain_free()
480 ipmmu_domain_destroy_context(domain); ipmmu_domain_free()
481 free_io_pgtable_ops(domain->iop); ipmmu_domain_free()
482 kfree(domain); ipmmu_domain_free()
490 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_attach_device() local
500 spin_lock_irqsave(&domain->lock, flags); ipmmu_attach_device()
502 if (!domain->mmu) { ipmmu_attach_device()
503 /* The domain hasn't been used yet, initialize it. */ ipmmu_attach_device()
504 domain->mmu = mmu; ipmmu_attach_device()
505 ret = ipmmu_domain_init_context(domain); ipmmu_attach_device()
506 } else if (domain->mmu != mmu) { ipmmu_attach_device()
509 * different IOMMUs to the same domain. ipmmu_attach_device()
511 dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", ipmmu_attach_device()
512 dev_name(mmu->dev), dev_name(domain->mmu->dev)); ipmmu_attach_device()
516 spin_unlock_irqrestore(&domain->lock, flags); ipmmu_attach_device()
522 ipmmu_utlb_enable(domain, archdata->utlbs[i]); ipmmu_attach_device()
531 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_detach_device() local
535 ipmmu_utlb_disable(domain, archdata->utlbs[i]); ipmmu_detach_device()
545 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_map() local
547 if (!domain) ipmmu_map()
550 return domain->iop->map(domain->iop, iova, paddr, size, prot); ipmmu_map()
556 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_unmap() local
558 return domain->iop->unmap(domain->iop, iova, size); ipmmu_unmap()
564 struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); ipmmu_iova_to_phys() local
568 return domain->iop->iova_to_phys(domain->iop, iova); ipmmu_iova_to_phys()
678 * VAs. This will allocate a corresponding IOMMU domain. ipmmu_add_device()
H A Damd_iommu.c90 struct list_head list; /* For domain->dev_list */
92 struct protection_domain *domain; /* Domain the device is bound to */ member in struct:iommu_dev_data
115 static void update_domain(struct protection_domain *domain);
116 static int protection_domain_init(struct protection_domain *domain);
126 return container_of(dom, struct protection_domain, domain); to_pdomain()
281 * dma_ops domain.
344 struct iommu_domain *domain; init_iommu_group() local
351 domain = iommu_group_default_domain(group); init_iommu_group()
352 if (!domain) init_iommu_group()
355 dma_domain = to_pdomain(domain)->priv; init_iommu_group()
550 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", iommu_print_event()
562 "domain=0x%04x address=0x%016llx flags=0x%04x]\n", iommu_print_event()
791 * TLB entries for this domain build_inv_iommu_pages()
822 * TLB entries for this domain build_inv_iotlb_pages()
1104 static void __domain_flush_pages(struct protection_domain *domain, __domain_flush_pages() argument
1111 build_inv_iommu_pages(&cmd, address, size, domain->id, pde); __domain_flush_pages()
1114 if (!domain->dev_iommu[i]) __domain_flush_pages()
1118 * Devices of this domain are behind this IOMMU __domain_flush_pages()
1124 list_for_each_entry(dev_data, &domain->dev_list, list) { __domain_flush_pages()
1135 static void domain_flush_pages(struct protection_domain *domain, domain_flush_pages() argument
1138 __domain_flush_pages(domain, address, size, 0); domain_flush_pages()
1141 /* Flush the whole IO/TLB for a given protection domain */ domain_flush_tlb()
1142 static void domain_flush_tlb(struct protection_domain *domain) domain_flush_tlb() argument
1144 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); domain_flush_tlb()
1147 /* Flush the whole IO/TLB for a given protection domain - including PDE */ domain_flush_tlb_pde()
1148 static void domain_flush_tlb_pde(struct protection_domain *domain) domain_flush_tlb_pde() argument
1150 __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); domain_flush_tlb_pde()
1153 static void domain_flush_complete(struct protection_domain *domain) domain_flush_complete() argument
1158 if (!domain->dev_iommu[i]) domain_flush_complete()
1162 * Devices of this domain are behind this IOMMU domain_flush_complete()
1171 * This function flushes the DTEs for all devices in domain
1173 static void domain_flush_devices(struct protection_domain *domain) domain_flush_devices() argument
1177 list_for_each_entry(dev_data, &domain->dev_list, list) domain_flush_devices()
1193 static bool increase_address_space(struct protection_domain *domain, increase_address_space() argument
1198 if (domain->mode == PAGE_MODE_6_LEVEL) increase_address_space()
1206 *pte = PM_LEVEL_PDE(domain->mode, increase_address_space()
1207 virt_to_phys(domain->pt_root)); increase_address_space()
1208 domain->pt_root = pte; increase_address_space()
1209 domain->mode += 1; increase_address_space()
1210 domain->updated = true; increase_address_space()
1215 static u64 *alloc_pte(struct protection_domain *domain, alloc_pte() argument
1226 while (address > PM_LEVEL_SIZE(domain->mode)) alloc_pte()
1227 increase_address_space(domain, gfp); alloc_pte()
1229 level = domain->mode - 1; alloc_pte()
1230 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; alloc_pte()
1263 static u64 *fetch_pte(struct protection_domain *domain, fetch_pte() argument
1270 if (address > PM_LEVEL_SIZE(domain->mode)) fetch_pte()
1273 level = domain->mode - 1; fetch_pte()
1274 pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; fetch_pte()
1412 * called with domain->lock held
1437 * aperture in case of dma_ops domain allocation or address allocation
1470 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, alloc_new_range()
1519 u64 *pte = fetch_pte(&dma_dom->domain, i, &pte_pgsize);
1527 update_domain(&dma_dom->domain);
1532 update_domain(&dma_dom->domain);
1622 * called with domain->lock held
1649 * The next functions belong to the domain allocation. A domain is
1650 * allocated for every IOMMU as the default domain. If device isolation
1651 * is enabled, every device get its own domain. The most important thing
1658 * This function adds a protection domain to the global protection domain list
1660 static void add_domain_to_list(struct protection_domain *domain) add_domain_to_list() argument
1665 list_add(&domain->list, &amd_iommu_pd_list); add_domain_to_list()
1670 * This function removes a protection domain to the global
1671 * protection domain list
1673 static void del_domain_from_list(struct protection_domain *domain) del_domain_from_list() argument
1678 list_del(&domain->list); del_domain_from_list()
1740 static void free_pagetable(struct protection_domain *domain) free_pagetable() argument
1742 unsigned long root = (unsigned long)domain->pt_root; free_pagetable()
1744 switch (domain->mode) { free_pagetable()
1800 static void free_gcr3_table(struct protection_domain *domain) free_gcr3_table() argument
1802 if (domain->glx == 2) free_gcr3_table()
1803 free_gcr3_tbl_level2(domain->gcr3_tbl); free_gcr3_table()
1804 else if (domain->glx == 1) free_gcr3_table()
1805 free_gcr3_tbl_level1(domain->gcr3_tbl); free_gcr3_table()
1807 BUG_ON(domain->glx != 0); free_gcr3_table()
1809 free_page((unsigned long)domain->gcr3_tbl); free_gcr3_table()
1813 * Free a domain, only used if something went wrong in the
1823 del_domain_from_list(&dom->domain); dma_ops_domain_free()
1825 free_pagetable(&dom->domain); dma_ops_domain_free()
1838 * Allocates a new protection domain usable for the dma_ops functions.
1850 if (protection_domain_init(&dma_dom->domain)) dma_ops_domain_alloc()
1853 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; dma_ops_domain_alloc()
1854 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); dma_ops_domain_alloc()
1855 dma_dom->domain.flags = PD_DMA_OPS_MASK; dma_ops_domain_alloc()
1856 dma_dom->domain.priv = dma_dom; dma_ops_domain_alloc()
1857 if (!dma_dom->domain.pt_root) dma_ops_domain_alloc()
1862 add_domain_to_list(&dma_dom->domain); dma_ops_domain_alloc()
1884 * little helper function to check whether a given protection domain is a
1885 * dma_ops domain
1887 static bool dma_ops_domain(struct protection_domain *domain) dma_ops_domain() argument
1889 return domain->flags & PD_DMA_OPS_MASK; dma_ops_domain()
1892 static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) set_dte_entry() argument
1897 if (domain->mode != PAGE_MODE_NONE) set_dte_entry()
1898 pte_root = virt_to_phys(domain->pt_root); set_dte_entry()
1900 pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) set_dte_entry()
1909 if (domain->flags & PD_IOMMUV2_MASK) { set_dte_entry()
1910 u64 gcr3 = __pa(domain->gcr3_tbl); set_dte_entry()
1911 u64 glx = domain->glx; set_dte_entry()
1936 flags |= domain->id; set_dte_entry()
1952 struct protection_domain *domain) do_attach()
1963 dev_data->domain = domain; do_attach()
1964 list_add(&dev_data->list, &domain->dev_list); do_attach()
1967 domain->dev_iommu[iommu->index] += 1; do_attach()
1968 domain->dev_cnt += 1; do_attach()
1971 set_dte_entry(dev_data->devid, domain, ats); do_attach()
1973 set_dte_entry(alias, domain, ats); do_attach()
1985 * be detached from its domain because the generic do_detach()
1989 if (!dev_data->domain) do_detach()
1996 dev_data->domain->dev_iommu[iommu->index] -= 1; do_detach()
1997 dev_data->domain->dev_cnt -= 1; do_detach()
2000 dev_data->domain = NULL; do_detach()
2011 * If a device is not yet associated with a domain, this function does
2015 struct protection_domain *domain) __attach_device()
2025 /* lock domain */ __attach_device()
2026 spin_lock(&domain->lock); __attach_device()
2029 if (dev_data->domain != NULL) __attach_device()
2033 do_attach(dev_data, domain); __attach_device()
2040 spin_unlock(&domain->lock); __attach_device()
2133 * If a device is not yet associated with a domain, this function
2137 struct protection_domain *domain) attach_device()
2146 if (domain->flags & PD_IOMMUV2_MASK) { attach_device()
2165 ret = __attach_device(dev_data, domain); attach_device()
2173 domain_flush_tlb_pde(domain); attach_device()
2179 * Removes a device from a protection domain (unlocked)
2183 struct protection_domain *domain; __detach_device() local
2191 if (WARN_ON(!dev_data->domain)) __detach_device()
2194 domain = dev_data->domain; __detach_device()
2196 spin_lock(&domain->lock); __detach_device()
2200 spin_unlock(&domain->lock); __detach_device()
2204 * Removes a device from a protection domain (with devtable_lock held)
2208 struct protection_domain *domain; detach_device() local
2213 domain = dev_data->domain; detach_device()
2220 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2) detach_device()
2231 struct iommu_domain *domain; amd_iommu_add_device() local
2262 domain = iommu_get_domain_for_dev(dev); amd_iommu_add_device()
2263 if (domain->type == IOMMU_DOMAIN_IDENTITY) amd_iommu_add_device()
2297 * finds the corresponding IOMMU, the protection domain and the
2299 * If the device is not yet associated with a domain this is also done
2304 struct protection_domain *domain; get_domain() local
2314 domain = to_pdomain(io_domain); get_domain()
2315 if (!dma_ops_domain(domain)) get_domain()
2318 return domain; get_domain()
2321 static void update_device_table(struct protection_domain *domain) update_device_table() argument
2325 list_for_each_entry(dev_data, &domain->dev_list, list) update_device_table()
2326 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); update_device_table()
2329 static void update_domain(struct protection_domain *domain) update_domain() argument
2331 if (!domain->updated) update_domain()
2334 update_device_table(domain); update_domain()
2336 domain_flush_devices(domain); update_domain()
2337 domain_flush_tlb_pde(domain); update_domain()
2339 domain->updated = false; update_domain()
2357 pte = alloc_pte(&dom->domain, address, PAGE_SIZE, &pte_page, dma_ops_get_pte()
2363 update_domain(&dom->domain); dma_ops_get_pte()
2370 * the given address in the DMA address space for the domain.
2434 * Must be called with the domain lock held.
2496 domain_flush_tlb(&dma_dom->domain); __map_single()
2499 domain_flush_pages(&dma_dom->domain, address, size); __map_single()
2518 * the domain lock held too
2548 domain_flush_pages(&dma_dom->domain, flush_addr, size); __unmap_single()
2562 struct protection_domain *domain; map_page() local
2569 domain = get_domain(dev); map_page()
2570 if (PTR_ERR(domain) == -EINVAL) map_page()
2572 else if (IS_ERR(domain)) map_page()
2577 spin_lock_irqsave(&domain->lock, flags); map_page()
2579 addr = __map_single(dev, domain->priv, paddr, size, dir, false, map_page()
2584 domain_flush_complete(domain); map_page()
2587 spin_unlock_irqrestore(&domain->lock, flags); map_page()
2599 struct protection_domain *domain; unmap_page() local
2603 domain = get_domain(dev); unmap_page()
2604 if (IS_ERR(domain)) unmap_page()
2607 spin_lock_irqsave(&domain->lock, flags); unmap_page()
2609 __unmap_single(domain->priv, dma_addr, size, dir); unmap_page()
2611 domain_flush_complete(domain); unmap_page()
2613 spin_unlock_irqrestore(&domain->lock, flags); unmap_page()
2625 struct protection_domain *domain; map_sg() local
2634 domain = get_domain(dev); map_sg()
2635 if (IS_ERR(domain)) map_sg()
2640 spin_lock_irqsave(&domain->lock, flags); map_sg()
2645 s->dma_address = __map_single(dev, domain->priv, for_each_sg()
2656 domain_flush_complete(domain);
2659 spin_unlock_irqrestore(&domain->lock, flags);
2665 __unmap_single(domain->priv, s->dma_address, for_each_sg()
2684 struct protection_domain *domain; unmap_sg() local
2690 domain = get_domain(dev); unmap_sg()
2691 if (IS_ERR(domain)) unmap_sg()
2694 spin_lock_irqsave(&domain->lock, flags); unmap_sg()
2697 __unmap_single(domain->priv, s->dma_address, for_each_sg()
2702 domain_flush_complete(domain);
2704 spin_unlock_irqrestore(&domain->lock, flags);
2715 struct protection_domain *domain; alloc_coherent() local
2721 domain = get_domain(dev); alloc_coherent()
2722 if (PTR_ERR(domain) == -EINVAL) { alloc_coherent()
2726 } else if (IS_ERR(domain)) alloc_coherent()
2748 spin_lock_irqsave(&domain->lock, flags); alloc_coherent()
2750 *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), alloc_coherent()
2754 spin_unlock_irqrestore(&domain->lock, flags); alloc_coherent()
2758 domain_flush_complete(domain); alloc_coherent()
2760 spin_unlock_irqrestore(&domain->lock, flags); alloc_coherent()
2779 struct protection_domain *domain; free_coherent() local
2788 domain = get_domain(dev); free_coherent()
2789 if (IS_ERR(domain)) free_coherent()
2792 spin_lock_irqsave(&domain->lock, flags); free_coherent()
2794 __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); free_coherent()
2796 domain_flush_complete(domain); free_coherent()
2798 spin_unlock_irqrestore(&domain->lock, flags); free_coherent()
2858 * like protection domain handling and assignement of devices to domains
2863 static void cleanup_domain(struct protection_domain *domain) cleanup_domain() argument
2870 while (!list_empty(&domain->dev_list)) { cleanup_domain()
2871 entry = list_first_entry(&domain->dev_list, cleanup_domain()
2879 static void protection_domain_free(struct protection_domain *domain) protection_domain_free() argument
2881 if (!domain) protection_domain_free()
2884 del_domain_from_list(domain); protection_domain_free()
2886 if (domain->id) protection_domain_free()
2887 domain_id_free(domain->id); protection_domain_free()
2889 kfree(domain); protection_domain_free()
2892 static int protection_domain_init(struct protection_domain *domain) protection_domain_init() argument
2894 spin_lock_init(&domain->lock); protection_domain_init()
2895 mutex_init(&domain->api_lock); protection_domain_init()
2896 domain->id = domain_id_alloc(); protection_domain_init()
2897 if (!domain->id) protection_domain_init()
2899 INIT_LIST_HEAD(&domain->dev_list); protection_domain_init()
2906 struct protection_domain *domain; protection_domain_alloc() local
2908 domain = kzalloc(sizeof(*domain), GFP_KERNEL); protection_domain_alloc()
2909 if (!domain) protection_domain_alloc()
2912 if (protection_domain_init(domain)) protection_domain_alloc()
2915 add_domain_to_list(domain); protection_domain_alloc()
2917 return domain; protection_domain_alloc()
2920 kfree(domain); protection_domain_alloc()
2943 pdomain->domain.geometry.aperture_start = 0; amd_iommu_domain_alloc()
2944 pdomain->domain.geometry.aperture_end = ~0ULL; amd_iommu_domain_alloc()
2945 pdomain->domain.geometry.force_aperture = true; amd_iommu_domain_alloc()
2954 pdomain = &dma_domain->domain; amd_iommu_domain_alloc()
2967 return &pdomain->domain; amd_iommu_domain_alloc()
2972 struct protection_domain *domain; amd_iommu_domain_free() local
2977 domain = to_pdomain(dom); amd_iommu_domain_free()
2979 if (domain->dev_cnt > 0) amd_iommu_domain_free()
2980 cleanup_domain(domain); amd_iommu_domain_free()
2982 BUG_ON(domain->dev_cnt != 0); amd_iommu_domain_free()
2984 if (domain->mode != PAGE_MODE_NONE) amd_iommu_domain_free()
2985 free_pagetable(domain); amd_iommu_domain_free()
2987 if (domain->flags & PD_IOMMUV2_MASK) amd_iommu_domain_free()
2988 free_gcr3_table(domain); amd_iommu_domain_free()
2990 protection_domain_free(domain); amd_iommu_domain_free()
3005 if (dev_data->domain != NULL) amd_iommu_detach_device()
3018 struct protection_domain *domain = to_pdomain(dom); amd_iommu_attach_device() local
3032 if (dev_data->domain) amd_iommu_attach_device()
3035 ret = attach_device(dev, domain); amd_iommu_attach_device()
3045 struct protection_domain *domain = to_pdomain(dom); amd_iommu_map() local
3049 if (domain->mode == PAGE_MODE_NONE) amd_iommu_map()
3057 mutex_lock(&domain->api_lock); amd_iommu_map()
3058 ret = iommu_map_page(domain, iova, paddr, prot, page_size); amd_iommu_map()
3059 mutex_unlock(&domain->api_lock); amd_iommu_map()
3067 struct protection_domain *domain = to_pdomain(dom); amd_iommu_unmap() local
3070 if (domain->mode == PAGE_MODE_NONE) amd_iommu_unmap()
3073 mutex_lock(&domain->api_lock); amd_iommu_unmap()
3074 unmap_size = iommu_unmap_page(domain, iova, page_size); amd_iommu_unmap()
3075 mutex_unlock(&domain->api_lock); amd_iommu_unmap()
3077 domain_flush_tlb_pde(domain); amd_iommu_unmap()
3085 struct protection_domain *domain = to_pdomain(dom); amd_iommu_iova_to_phys() local
3089 if (domain->mode == PAGE_MODE_NONE) amd_iommu_iova_to_phys()
3092 pte = fetch_pte(domain, iova, &pte_pgsize); amd_iommu_iova_to_phys()
3201 struct protection_domain *domain = to_pdomain(dom); amd_iommu_domain_direct_map() local
3204 spin_lock_irqsave(&domain->lock, flags); amd_iommu_domain_direct_map()
3207 domain->mode = PAGE_MODE_NONE; amd_iommu_domain_direct_map()
3208 domain->updated = true; amd_iommu_domain_direct_map()
3211 update_domain(domain); amd_iommu_domain_direct_map()
3214 free_pagetable(domain); amd_iommu_domain_direct_map()
3216 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_domain_direct_map()
3222 struct protection_domain *domain = to_pdomain(dom); amd_iommu_domain_enable_v2() local
3236 spin_lock_irqsave(&domain->lock, flags); amd_iommu_domain_enable_v2()
3240 * domain support IOMMUv2. Just force that the domain has no amd_iommu_domain_enable_v2()
3244 if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK) amd_iommu_domain_enable_v2()
3248 domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC); amd_iommu_domain_enable_v2()
3249 if (domain->gcr3_tbl == NULL) amd_iommu_domain_enable_v2()
3252 domain->glx = levels; amd_iommu_domain_enable_v2()
3253 domain->flags |= PD_IOMMUV2_MASK; amd_iommu_domain_enable_v2()
3254 domain->updated = true; amd_iommu_domain_enable_v2()
3256 update_domain(domain); amd_iommu_domain_enable_v2()
3261 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_domain_enable_v2()
3267 static int __flush_pasid(struct protection_domain *domain, int pasid, __flush_pasid() argument
3274 if (!(domain->flags & PD_IOMMUV2_MASK)) __flush_pasid()
3277 build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size); __flush_pasid()
3284 if (domain->dev_iommu[i] == 0) __flush_pasid()
3293 domain_flush_complete(domain); __flush_pasid()
3296 list_for_each_entry(dev_data, &domain->dev_list, list) { __flush_pasid()
3302 * domain. __flush_pasid()
3319 domain_flush_complete(domain); __flush_pasid()
3328 static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid, __amd_iommu_flush_page() argument
3333 return __flush_pasid(domain, pasid, address, false); __amd_iommu_flush_page()
3339 struct protection_domain *domain = to_pdomain(dom); amd_iommu_flush_page() local
3343 spin_lock_irqsave(&domain->lock, flags); amd_iommu_flush_page()
3344 ret = __amd_iommu_flush_page(domain, pasid, address); amd_iommu_flush_page()
3345 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_flush_page()
3351 static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid) __amd_iommu_flush_tlb() argument
3355 return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, __amd_iommu_flush_tlb()
3361 struct protection_domain *domain = to_pdomain(dom); amd_iommu_flush_tlb() local
3365 spin_lock_irqsave(&domain->lock, flags); amd_iommu_flush_tlb()
3366 ret = __amd_iommu_flush_tlb(domain, pasid); amd_iommu_flush_tlb()
3367 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_flush_tlb()
3405 static int __set_gcr3(struct protection_domain *domain, int pasid, __set_gcr3() argument
3410 if (domain->mode != PAGE_MODE_NONE) __set_gcr3()
3413 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true); __set_gcr3()
3419 return __amd_iommu_flush_tlb(domain, pasid); __set_gcr3()
3422 static int __clear_gcr3(struct protection_domain *domain, int pasid) __clear_gcr3() argument
3426 if (domain->mode != PAGE_MODE_NONE) __clear_gcr3()
3429 pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false); __clear_gcr3()
3435 return __amd_iommu_flush_tlb(domain, pasid); __clear_gcr3()
3441 struct protection_domain *domain = to_pdomain(dom); amd_iommu_domain_set_gcr3() local
3445 spin_lock_irqsave(&domain->lock, flags); amd_iommu_domain_set_gcr3()
3446 ret = __set_gcr3(domain, pasid, cr3); amd_iommu_domain_set_gcr3()
3447 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_domain_set_gcr3()
3455 struct protection_domain *domain = to_pdomain(dom); amd_iommu_domain_clear_gcr3() local
3459 spin_lock_irqsave(&domain->lock, flags); amd_iommu_domain_clear_gcr3()
3460 ret = __clear_gcr3(domain, pasid); amd_iommu_domain_clear_gcr3()
3461 spin_unlock_irqrestore(&domain->lock, flags); amd_iommu_domain_clear_gcr3()
3498 return &pdomain->domain; amd_iommu_get_v2_domain()
3898 static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, irq_remapping_alloc() argument
3925 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); irq_remapping_alloc()
3943 irq_data = irq_domain_get_irq_data(domain, virq + i); irq_remapping_alloc()
3966 irq_data = irq_domain_get_irq_data(domain, virq + i); irq_remapping_alloc()
3973 irq_domain_free_irqs_common(domain, virq, nr_irqs); irq_remapping_alloc()
3977 static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, irq_remapping_free() argument
3986 irq_data = irq_domain_get_irq_data(domain, virq + i); irq_remapping_free()
3994 irq_domain_free_irqs_common(domain, virq, nr_irqs); irq_remapping_free()
3997 static void irq_remapping_activate(struct irq_domain *domain, irq_remapping_activate() argument
4006 static void irq_remapping_deactivate(struct irq_domain *domain, irq_remapping_deactivate() argument
1951 do_attach(struct iommu_dev_data *dev_data, struct protection_domain *domain) do_attach() argument
2014 __attach_device(struct iommu_dev_data *dev_data, struct protection_domain *domain) __attach_device() argument
2136 attach_device(struct device *dev, struct protection_domain *domain) attach_device() argument
H A Ds390-iommu.c23 struct iommu_domain domain; member in struct:s390_domain
37 return container_of(dom, struct s390_domain, domain); to_s390_domain()
73 return &s390_domain->domain; s390_domain_alloc()
76 void s390_domain_free(struct iommu_domain *domain) s390_domain_free() argument
78 struct s390_domain *s390_domain = to_s390_domain(domain); s390_domain_free()
84 static int s390_iommu_attach_device(struct iommu_domain *domain, s390_iommu_attach_device() argument
87 struct s390_domain *s390_domain = to_s390_domain(domain); s390_iommu_attach_device()
113 domain->geometry.aperture_start = zdev->start_dma; s390_iommu_attach_device()
114 domain->geometry.aperture_end = zdev->end_dma; s390_iommu_attach_device()
115 domain->geometry.force_aperture = true; s390_iommu_attach_device()
117 } else if (domain->geometry.aperture_start != zdev->start_dma || s390_iommu_attach_device()
118 domain->geometry.aperture_end != zdev->end_dma) { s390_iommu_attach_device()
137 static void s390_iommu_detach_device(struct iommu_domain *domain, s390_iommu_detach_device() argument
140 struct s390_domain *s390_domain = to_s390_domain(domain); s390_iommu_detach_device()
189 struct iommu_domain *domain; s390_iommu_remove_device() local
203 domain = iommu_get_domain_for_dev(dev); s390_iommu_remove_device()
204 if (domain) s390_iommu_remove_device()
205 s390_iommu_detach_device(domain, dev); s390_iommu_remove_device()
222 if (dma_addr < s390_domain->domain.geometry.aperture_start || s390_iommu_update_trans()
223 dma_addr + size > s390_domain->domain.geometry.aperture_end) s390_iommu_update_trans()
269 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, s390_iommu_map() argument
272 struct s390_domain *s390_domain = to_s390_domain(domain); s390_iommu_map()
287 static phys_addr_t s390_iommu_iova_to_phys(struct iommu_domain *domain, s390_iommu_iova_to_phys() argument
290 struct s390_domain *s390_domain = to_s390_domain(domain); s390_iommu_iova_to_phys()
295 if (iova < domain->geometry.aperture_start || s390_iommu_iova_to_phys()
296 iova > domain->geometry.aperture_end) s390_iommu_iova_to_phys()
318 static size_t s390_iommu_unmap(struct iommu_domain *domain, s390_iommu_unmap() argument
321 struct s390_domain *s390_domain = to_s390_domain(domain); s390_iommu_unmap()
326 paddr = s390_iommu_iova_to_phys(domain, iova); s390_iommu_unmap()
H A Dfsl_pamu_domain.h33 * Indicates the geometry size for the domain.
35 * configured for the domain.
39 * Number of windows assocaited with this domain.
40 * During domain initialization, it is set to the
50 * windows for a domain. This is allocated only
51 * when the number of windows for the domain are
55 /* list of devices associated with the domain */
61 * domain. This translates to setting of the
78 /* domain-device relationship */
80 struct list_head link; /* link to domain siblings */
83 struct fsl_dma_domain *domain; /* pointer to domain */ member in struct:device_domain_info
H A Dexynos-iommu.c201 * been attached to this domain and page tables of IO address space defined by
202 * it. It is usually referenced by 'domain' pointer.
210 struct iommu_domain domain; /* generic domain data structure */ member in struct:exynos_iommu_domain
227 struct exynos_iommu_domain *domain; /* domain we belong to */ member in struct:sysmmu_drvdata
228 struct list_head domain_node; /* node for domain clients list */
236 return container_of(dom, struct exynos_iommu_domain, domain); to_exynos_domain()
358 if (data->domain) exynos_sysmmu_irq()
359 ret = report_iommu_fault(&data->domain->domain, exynos_sysmmu_irq()
402 data->domain = NULL; __sysmmu_disable()
458 struct exynos_iommu_domain *domain) __sysmmu_enable()
466 data->domain = domain; __sysmmu_enable()
662 struct exynos_iommu_domain *domain; exynos_iommu_domain_alloc() local
668 domain = kzalloc(sizeof(*domain), GFP_KERNEL); exynos_iommu_domain_alloc()
669 if (!domain) exynos_iommu_domain_alloc()
672 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); exynos_iommu_domain_alloc()
673 if (!domain->pgtable) exynos_iommu_domain_alloc()
676 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); exynos_iommu_domain_alloc()
677 if (!domain->lv2entcnt) exynos_iommu_domain_alloc()
682 domain->pgtable[i + 0] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
683 domain->pgtable[i + 1] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
684 domain->pgtable[i + 2] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
685 domain->pgtable[i + 3] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
686 domain->pgtable[i + 4] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
687 domain->pgtable[i + 5] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
688 domain->pgtable[i + 6] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
689 domain->pgtable[i + 7] = ZERO_LV2LINK; exynos_iommu_domain_alloc()
692 pgtable_flush(domain->pgtable, domain->pgtable + NUM_LV1ENTRIES); exynos_iommu_domain_alloc()
694 spin_lock_init(&domain->lock); exynos_iommu_domain_alloc()
695 spin_lock_init(&domain->pgtablelock); exynos_iommu_domain_alloc()
696 INIT_LIST_HEAD(&domain->clients); exynos_iommu_domain_alloc()
698 domain->domain.geometry.aperture_start = 0; exynos_iommu_domain_alloc()
699 domain->domain.geometry.aperture_end = ~0UL; exynos_iommu_domain_alloc()
700 domain->domain.geometry.force_aperture = true; exynos_iommu_domain_alloc()
702 return &domain->domain; exynos_iommu_domain_alloc()
705 free_pages((unsigned long)domain->pgtable, 2); exynos_iommu_domain_alloc()
707 kfree(domain); exynos_iommu_domain_alloc()
713 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_domain_free() local
718 WARN_ON(!list_empty(&domain->clients)); exynos_iommu_domain_free()
720 spin_lock_irqsave(&domain->lock, flags); exynos_iommu_domain_free()
722 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { exynos_iommu_domain_free()
728 spin_unlock_irqrestore(&domain->lock, flags); exynos_iommu_domain_free()
731 if (lv1ent_page(domain->pgtable + i)) exynos_iommu_domain_free()
733 phys_to_virt(lv2table_base(domain->pgtable + i))); exynos_iommu_domain_free()
735 free_pages((unsigned long)domain->pgtable, 2); exynos_iommu_domain_free()
736 free_pages((unsigned long)domain->lv2entcnt, 1); exynos_iommu_domain_free()
737 kfree(domain); exynos_iommu_domain_free()
744 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_attach_device() local
746 phys_addr_t pagetable = virt_to_phys(domain->pgtable); exynos_iommu_attach_device()
755 ret = __sysmmu_enable(data, pagetable, domain); exynos_iommu_attach_device()
759 spin_lock_irqsave(&domain->lock, flags); exynos_iommu_attach_device()
760 list_add_tail(&data->domain_node, &domain->clients); exynos_iommu_attach_device()
761 spin_unlock_irqrestore(&domain->lock, flags); exynos_iommu_attach_device()
780 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_detach_device() local
781 phys_addr_t pagetable = virt_to_phys(domain->pgtable); exynos_iommu_detach_device()
789 spin_lock_irqsave(&domain->lock, flags); exynos_iommu_detach_device()
790 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { exynos_iommu_detach_device()
800 spin_unlock_irqrestore(&domain->lock, flags); exynos_iommu_detach_device()
809 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, alloc_lv2entry() argument
852 spin_lock(&domain->lock); alloc_lv2entry()
853 list_for_each_entry(data, &domain->clients, domain_node) alloc_lv2entry()
855 spin_unlock(&domain->lock); alloc_lv2entry()
862 static int lv1set_section(struct exynos_iommu_domain *domain, lv1set_section() argument
887 spin_lock(&domain->lock); lv1set_section()
894 list_for_each_entry(data, &domain->clients, domain_node) lv1set_section()
897 spin_unlock(&domain->lock); lv1set_section()
961 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_map() local
967 BUG_ON(domain->pgtable == NULL); exynos_iommu_map()
969 spin_lock_irqsave(&domain->pgtablelock, flags); exynos_iommu_map()
971 entry = section_entry(domain->pgtable, iova); exynos_iommu_map()
974 ret = lv1set_section(domain, entry, iova, paddr, exynos_iommu_map()
975 &domain->lv2entcnt[lv1ent_offset(iova)]); exynos_iommu_map()
979 pent = alloc_lv2entry(domain, entry, iova, exynos_iommu_map()
980 &domain->lv2entcnt[lv1ent_offset(iova)]); exynos_iommu_map()
986 &domain->lv2entcnt[lv1ent_offset(iova)]); exynos_iommu_map()
993 spin_unlock_irqrestore(&domain->pgtablelock, flags); exynos_iommu_map()
998 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, exynos_iommu_tlb_invalidate_entry() argument
1004 spin_lock_irqsave(&domain->lock, flags); exynos_iommu_tlb_invalidate_entry()
1006 list_for_each_entry(data, &domain->clients, domain_node) exynos_iommu_tlb_invalidate_entry()
1009 spin_unlock_irqrestore(&domain->lock, flags); exynos_iommu_tlb_invalidate_entry()
1015 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_unmap() local
1021 BUG_ON(domain->pgtable == NULL); exynos_iommu_unmap()
1023 spin_lock_irqsave(&domain->pgtablelock, flags); exynos_iommu_unmap()
1025 ent = section_entry(domain->pgtable, iova); exynos_iommu_unmap()
1059 domain->lv2entcnt[lv1ent_offset(iova)] += 1; exynos_iommu_unmap()
1073 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; exynos_iommu_unmap()
1075 spin_unlock_irqrestore(&domain->pgtablelock, flags); exynos_iommu_unmap()
1077 exynos_iommu_tlb_invalidate_entry(domain, iova, size); exynos_iommu_unmap()
1081 spin_unlock_irqrestore(&domain->pgtablelock, flags); exynos_iommu_unmap()
1092 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); exynos_iommu_iova_to_phys() local
1097 spin_lock_irqsave(&domain->pgtablelock, flags); exynos_iommu_iova_to_phys()
1099 entry = section_entry(domain->pgtable, iova); exynos_iommu_iova_to_phys()
1112 spin_unlock_irqrestore(&domain->pgtablelock, flags); exynos_iommu_iova_to_phys()
457 __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable, struct exynos_iommu_domain *domain) __sysmmu_enable() argument
H A Diommu.c55 struct iommu_domain *domain; member in struct:iommu_group
82 static int __iommu_attach_device(struct iommu_domain *domain,
84 static int __iommu_attach_group(struct iommu_domain *domain,
86 static void __iommu_detach_group(struct iommu_domain *domain,
331 struct iommu_domain *domain = group->default_domain; iommu_group_create_direct_mappings() local
337 if (!domain || domain->type != IOMMU_DOMAIN_DMA) iommu_group_create_direct_mappings()
340 BUG_ON(!domain->ops->pgsize_bitmap); iommu_group_create_direct_mappings()
342 pg_size = 1UL << __ffs(domain->ops->pgsize_bitmap); iommu_group_create_direct_mappings()
357 phys_addr = iommu_iova_to_phys(domain, addr); iommu_group_create_direct_mappings()
361 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); iommu_group_create_direct_mappings()
434 if (group->domain) iommu_group_add_device()
435 __iommu_attach_device(group->domain, dev); iommu_group_add_device()
845 * Try to allocate a default domain - needs support from the iommu_group_get_for_dev()
851 if (!group->domain) iommu_group_get_for_dev()
852 group->domain = group->default_domain; iommu_group_get_for_dev()
1040 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1041 * @domain: iommu domain
1051 void iommu_set_fault_handler(struct iommu_domain *domain, iommu_set_fault_handler() argument
1055 BUG_ON(!domain); iommu_set_fault_handler()
1057 domain->handler = handler; iommu_set_fault_handler()
1058 domain->handler_token = token; iommu_set_fault_handler()
1065 struct iommu_domain *domain; __iommu_domain_alloc() local
1070 domain = bus->iommu_ops->domain_alloc(type); __iommu_domain_alloc()
1071 if (!domain) __iommu_domain_alloc()
1074 domain->ops = bus->iommu_ops; __iommu_domain_alloc()
1075 domain->type = type; __iommu_domain_alloc()
1077 return domain; __iommu_domain_alloc()
1086 void iommu_domain_free(struct iommu_domain *domain) iommu_domain_free() argument
1088 domain->ops->domain_free(domain); iommu_domain_free()
1092 static int __iommu_attach_device(struct iommu_domain *domain, __iommu_attach_device() argument
1096 if (unlikely(domain->ops->attach_dev == NULL)) __iommu_attach_device()
1099 ret = domain->ops->attach_dev(domain, dev); __iommu_attach_device()
1105 int iommu_attach_device(struct iommu_domain *domain, struct device *dev) iommu_attach_device() argument
1113 return __iommu_attach_device(domain, dev); iommu_attach_device()
1124 ret = __iommu_attach_group(domain, group); iommu_attach_device()
1134 static void __iommu_detach_device(struct iommu_domain *domain, __iommu_detach_device() argument
1137 if (unlikely(domain->ops->detach_dev == NULL)) __iommu_detach_device()
1140 domain->ops->detach_dev(domain, dev); __iommu_detach_device()
1144 void iommu_detach_device(struct iommu_domain *domain, struct device *dev) iommu_detach_device() argument
1151 return __iommu_detach_device(domain, dev); iommu_detach_device()
1159 __iommu_detach_group(domain, group); iommu_detach_device()
1169 struct iommu_domain *domain; iommu_get_domain_for_dev() local
1177 domain = group->domain; iommu_get_domain_for_dev()
1181 return domain; iommu_get_domain_for_dev()
1197 struct iommu_domain *domain = data; iommu_group_do_attach_device() local
1199 return __iommu_attach_device(domain, dev); iommu_group_do_attach_device()
1202 static int __iommu_attach_group(struct iommu_domain *domain, __iommu_attach_group() argument
1207 if (group->default_domain && group->domain != group->default_domain) __iommu_attach_group()
1210 ret = __iommu_group_for_each_dev(group, domain, __iommu_attach_group()
1213 group->domain = domain; __iommu_attach_group()
1218 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_attach_group() argument
1223 ret = __iommu_attach_group(domain, group); iommu_attach_group()
1232 struct iommu_domain *domain = data; iommu_group_do_detach_device() local
1234 __iommu_detach_device(domain, dev); iommu_group_do_detach_device()
1239 static void __iommu_detach_group(struct iommu_domain *domain, __iommu_detach_group() argument
1245 __iommu_group_for_each_dev(group, domain, __iommu_detach_group()
1247 group->domain = NULL; __iommu_detach_group()
1251 if (group->domain == group->default_domain) __iommu_detach_group()
1254 /* Detach by re-attaching to the default domain */ __iommu_detach_group()
1260 group->domain = group->default_domain; __iommu_detach_group()
1263 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) iommu_detach_group() argument
1266 __iommu_detach_group(domain, group); iommu_detach_group()
1271 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) iommu_iova_to_phys() argument
1273 if (unlikely(domain->ops->iova_to_phys == NULL)) iommu_iova_to_phys()
1276 return domain->ops->iova_to_phys(domain, iova); iommu_iova_to_phys()
1280 static size_t iommu_pgsize(struct iommu_domain *domain, iommu_pgsize() argument
1300 pgsize &= domain->ops->pgsize_bitmap; iommu_pgsize()
1312 int iommu_map(struct iommu_domain *domain, unsigned long iova, iommu_map() argument
1320 if (unlikely(domain->ops->map == NULL || iommu_map()
1321 domain->ops->pgsize_bitmap == 0UL)) iommu_map()
1324 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) iommu_map()
1328 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); iommu_map()
1344 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); iommu_map()
1349 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); iommu_map()
1360 iommu_unmap(domain, orig_iova, orig_size - size); iommu_map()
1368 size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) iommu_unmap() argument
1374 if (unlikely(domain->ops->unmap == NULL || iommu_unmap()
1375 domain->ops->pgsize_bitmap == 0UL)) iommu_unmap()
1378 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) iommu_unmap()
1382 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); iommu_unmap()
1402 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped); iommu_unmap()
1404 unmapped_page = domain->ops->unmap(domain, iova, pgsize); iommu_unmap()
1420 size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, default_iommu_map_sg() argument
1428 if (unlikely(domain->ops->pgsize_bitmap == 0UL)) default_iommu_map_sg()
1431 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); default_iommu_map_sg()
1445 ret = iommu_map(domain, iova + mapped, phys, s->length, prot); for_each_sg()
1456 iommu_unmap(domain, iova, mapped);
1463 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, iommu_domain_window_enable() argument
1466 if (unlikely(domain->ops->domain_window_enable == NULL)) iommu_domain_window_enable()
1469 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size, iommu_domain_window_enable()
1474 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr) iommu_domain_window_disable() argument
1476 if (unlikely(domain->ops->domain_window_disable == NULL)) iommu_domain_window_disable()
1479 return domain->ops->domain_window_disable(domain, wnd_nr); iommu_domain_window_disable()
1496 int iommu_domain_get_attr(struct iommu_domain *domain, iommu_domain_get_attr() argument
1507 *geometry = domain->geometry; iommu_domain_get_attr()
1512 *paging = (domain->ops->pgsize_bitmap != 0UL); iommu_domain_get_attr()
1517 if (domain->ops->domain_get_windows != NULL) iommu_domain_get_attr()
1518 *count = domain->ops->domain_get_windows(domain); iommu_domain_get_attr()
1524 if (!domain->ops->domain_get_attr) iommu_domain_get_attr()
1527 ret = domain->ops->domain_get_attr(domain, attr, data); iommu_domain_get_attr()
1534 int iommu_domain_set_attr(struct iommu_domain *domain, iommu_domain_set_attr() argument
1544 if (domain->ops->domain_set_windows != NULL) iommu_domain_set_attr()
1545 ret = domain->ops->domain_set_windows(domain, *count); iommu_domain_set_attr()
1551 if (domain->ops->domain_set_attr == NULL) iommu_domain_set_attr()
1554 ret = domain->ops->domain_set_attr(domain, attr, data); iommu_domain_set_attr()
1591 /* Check if the default domain is already direct mapped */ iommu_request_dm_for_dev()
1602 /* Allocate a direct mapped domain */ iommu_request_dm_for_dev()
1608 /* Attach the device to the domain */ iommu_request_dm_for_dev()
1615 /* Make the direct mapped domain the default for this group */ iommu_request_dm_for_dev()
H A Ddma-iommu.c38 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
39 * @domain: IOMMU domain to prepare for DMA-API usage
42 * callback when domain->type == IOMMU_DOMAIN_DMA.
44 int iommu_get_dma_cookie(struct iommu_domain *domain) iommu_get_dma_cookie() argument
48 if (domain->iova_cookie) iommu_get_dma_cookie()
52 domain->iova_cookie = iovad; iommu_get_dma_cookie()
59 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
60 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
64 void iommu_put_dma_cookie(struct iommu_domain *domain) iommu_put_dma_cookie() argument
66 struct iova_domain *iovad = domain->iova_cookie; iommu_put_dma_cookie()
73 domain->iova_cookie = NULL; iommu_put_dma_cookie()
78 * iommu_dma_init_domain - Initialise a DMA mapping domain
79 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
85 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
88 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) iommu_dma_init_domain() argument
90 struct iova_domain *iovad = domain->iova_cookie; iommu_dma_init_domain()
97 order = __ffs(domain->ops->pgsize_bitmap); iommu_dma_init_domain()
101 /* Check the domain allows at least some access to the device... */ iommu_dma_init_domain()
102 if (domain->geometry.force_aperture) { iommu_dma_init_domain()
103 if (base > domain->geometry.aperture_end || iommu_dma_init_domain()
104 base + size <= domain->geometry.aperture_start) { iommu_dma_init_domain()
110 domain->geometry.aperture_start >> order); iommu_dma_init_domain()
112 domain->geometry.aperture_end >> order); iommu_dma_init_domain()
115 /* All we can safely do with an existing domain is enlarge it */ iommu_dma_init_domain()
120 pr_warn("Incompatible range for DMA domain\n"); iommu_dma_init_domain()
162 * attribute to control this per-device, or at least per-domain... __alloc_iova()
168 static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) __iommu_dma_unmap() argument
170 struct iova_domain *iovad = domain->iova_cookie; __iommu_dma_unmap()
180 size -= iommu_unmap(domain, pfn << shift, size); __iommu_dma_unmap()
285 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); iommu_dma_alloc() local
286 struct iova_domain *iovad = domain->iova_cookie; iommu_dma_alloc()
320 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot) iommu_dma_alloc()
366 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); iommu_dma_map_page() local
367 struct iova_domain *iovad = domain->iova_cookie; iommu_dma_map_page()
377 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) { iommu_dma_map_page()
443 struct iommu_domain *domain = iommu_get_domain_for_dev(dev); iommu_dma_map_sg() local
444 struct iova_domain *iovad = domain->iova_cookie; iommu_dma_map_sg()
494 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
H A Dintel-iommu.c230 * 8-23: domain id
360 * This domain is a statically identity mapping domain.
361 * 1. This domain creats a static 1:1 mapping to all usable memory.
363 * 3. Each iommu mapps to this domain if successful.
370 * across iommus may be owned in one domain, e.g. kvm guest.
377 #define for_each_domain_iommu(idx, domain) \
379 if (domain->iommu_refcnt[idx])
390 * domain ids are 16 bit wide according
394 struct iova_domain iovad; /* iova's that belong to this domain */
402 int flags; /* flags to find out type of domain */
412 struct iommu_domain domain; /* generic domain data structure for member in struct:dmar_domain
416 /* PCI domain-device relationship */
418 struct list_head link; /* link to domain siblings */
431 struct dmar_domain *domain; /* pointer to domain */ member in struct:device_domain_info
465 struct dmar_domain *domain[HIGH_WATER_MARK]; member in struct:deferred_flush_tables
480 static void domain_exit(struct dmar_domain *domain);
481 static void domain_remove_dev_info(struct dmar_domain *domain);
482 static void dmar_remove_one_dev_info(struct dmar_domain *domain,
487 static int domain_detach_iommu(struct dmar_domain *domain,
567 return container_of(dom, struct dmar_domain, domain); to_dmar_domain()
628 struct dmar_domain *domain) set_iommu_domain()
642 domains[did & 0xff] = domain; set_iommu_domain()
681 static inline int domain_type_is_vm(struct dmar_domain *domain) domain_type_is_vm() argument
683 return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; domain_type_is_vm()
686 static inline int domain_type_is_si(struct dmar_domain *domain) domain_type_is_si() argument
688 return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; domain_type_is_si()
691 static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) domain_type_is_vm_or_si() argument
693 return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | domain_type_is_vm_or_si()
697 static inline int domain_pfn_supported(struct dmar_domain *domain, domain_pfn_supported() argument
700 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; domain_pfn_supported()
738 /* This functionin only returns single iommu in a domain */ domain_get_iommu()
739 static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) domain_get_iommu() argument
743 /* si_domain and vm domain should not get here. */ domain_get_iommu()
744 BUG_ON(domain_type_is_vm_or_si(domain)); domain_get_iommu()
745 for_each_domain_iommu(iommu_id, domain) domain_get_iommu()
754 static void domain_update_iommu_coherency(struct dmar_domain *domain) domain_update_iommu_coherency() argument
761 domain->iommu_coherency = 1; domain_update_iommu_coherency()
763 for_each_domain_iommu(i, domain) { for_each_domain_iommu()
766 domain->iommu_coherency = 0; for_each_domain_iommu()
777 domain->iommu_coherency = 0; for_each_active_iommu()
829 static void domain_update_iommu_cap(struct dmar_domain *domain) domain_update_iommu_cap() argument
831 domain_update_iommu_coherency(domain); domain_update_iommu_cap()
832 domain->iommu_snooping = domain_update_iommu_snooping(NULL); domain_update_iommu_cap()
833 domain->iommu_superpage = domain_update_iommu_superpage(NULL); domain_update_iommu_cap()
930 static void domain_flush_cache(struct dmar_domain *domain, domain_flush_cache() argument
933 if (!domain->iommu_coherency) domain_flush_cache()
994 static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, pfn_to_dma_pte() argument
998 int level = agaw_to_level(domain->agaw); pfn_to_dma_pte()
1001 BUG_ON(!domain->pgd); pfn_to_dma_pte()
1003 if (!domain_pfn_supported(domain, pfn)) pfn_to_dma_pte()
1007 parent = domain->pgd; pfn_to_dma_pte()
1022 tmp_page = alloc_pgtable_page(domain->nid); pfn_to_dma_pte()
1027 domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pfn_to_dma_pte()
1033 domain_flush_cache(domain, pte, sizeof(*pte)); pfn_to_dma_pte()
1050 static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, dma_pfn_level_pte() argument
1055 int total = agaw_to_level(domain->agaw); dma_pfn_level_pte()
1058 parent = domain->pgd; dma_pfn_level_pte()
1082 static void dma_pte_clear_range(struct dmar_domain *domain, dma_pte_clear_range() argument
1089 BUG_ON(!domain_pfn_supported(domain, start_pfn)); dma_pte_clear_range()
1090 BUG_ON(!domain_pfn_supported(domain, last_pfn)); dma_pte_clear_range()
1096 first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page); dma_pte_clear_range()
1107 domain_flush_cache(domain, first_pte, dma_pte_clear_range()
1113 static void dma_pte_free_level(struct dmar_domain *domain, int level, dma_pte_free_level() argument
1131 dma_pte_free_level(domain, level - 1, level_pte, dma_pte_free_level()
1138 domain_flush_cache(domain, pte, sizeof(*pte)); dma_pte_free_level()
1147 static void dma_pte_free_pagetable(struct dmar_domain *domain, dma_pte_free_pagetable() argument
1151 BUG_ON(!domain_pfn_supported(domain, start_pfn)); dma_pte_free_pagetable()
1152 BUG_ON(!domain_pfn_supported(domain, last_pfn)); dma_pte_free_pagetable()
1155 dma_pte_clear_range(domain, start_pfn, last_pfn); dma_pte_free_pagetable()
1158 dma_pte_free_level(domain, agaw_to_level(domain->agaw), dma_pte_free_pagetable()
1159 domain->pgd, 0, start_pfn, last_pfn); dma_pte_free_pagetable()
1162 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { dma_pte_free_pagetable()
1163 free_pgtable_page(domain->pgd); dma_pte_free_pagetable()
1164 domain->pgd = NULL; dma_pte_free_pagetable()
1174 static struct page *dma_pte_list_pagetables(struct dmar_domain *domain, dma_pte_list_pagetables() argument
1190 freelist = dma_pte_list_pagetables(domain, level - 1, dma_pte_list_pagetables()
1198 static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, dma_pte_clear_level() argument
1223 freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist); dma_pte_clear_level()
1231 freelist = dma_pte_clear_level(domain, level - 1, dma_pte_clear_level()
1241 domain_flush_cache(domain, first_pte, dma_pte_clear_level()
1250 static struct page *domain_unmap(struct dmar_domain *domain, domain_unmap() argument
1256 BUG_ON(!domain_pfn_supported(domain, start_pfn)); domain_unmap()
1257 BUG_ON(!domain_pfn_supported(domain, last_pfn)); domain_unmap()
1261 freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), domain_unmap()
1262 domain->pgd, 0, start_pfn, last_pfn, NULL); domain_unmap()
1265 if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { domain_unmap()
1266 struct page *pgd_page = virt_to_page(domain->pgd); domain_unmap()
1270 domain->pgd = NULL; domain_unmap()
1440 iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu, iommu_support_dev_iotlb() argument
1450 list_for_each_entry(info, &domain->devices, link) iommu_support_dev_iotlb()
1513 static void iommu_flush_dev_iotlb(struct dmar_domain *domain, iommu_flush_dev_iotlb() argument
1521 list_for_each_entry(info, &domain->devices, link) { iommu_flush_dev_iotlb()
1533 struct dmar_domain *domain, iommu_flush_iotlb_psi()
1539 u16 did = domain->iommu_did[iommu->seq_id]; iommu_flush_iotlb_psi()
1546 * Fallback to domain selective flush if no PSI support or the size is iommu_flush_iotlb_psi()
1631 pr_err("%s: Allocating domain id array failed\n", iommu_init_domains()
1645 pr_err("%s: Allocating domain array failed\n", iommu_init_domains()
1658 * with domain-id 0, hence we need to pre-allocate it. We also iommu_init_domains()
1659 * use domain-id 0 as a marker for non-allocated domain-id, so iommu_init_domains()
1660 * make sure it is not used for a real domain. iommu_init_domains()
1677 struct dmar_domain *domain; disable_dmar_iommu() local
1682 if (!info->dev || !info->domain) disable_dmar_iommu()
1685 domain = info->domain; disable_dmar_iommu()
1687 dmar_remove_one_dev_info(domain, info->dev); disable_dmar_iommu()
1689 if (!domain_type_is_vm_or_si(domain)) disable_dmar_iommu()
1690 domain_exit(domain); disable_dmar_iommu()
1728 struct dmar_domain *domain; alloc_domain() local
1730 domain = alloc_domain_mem(); alloc_domain()
1731 if (!domain) alloc_domain()
1734 memset(domain, 0, sizeof(*domain)); alloc_domain()
1735 domain->nid = -1; alloc_domain()
1736 domain->flags = flags; alloc_domain()
1737 INIT_LIST_HEAD(&domain->devices); alloc_domain()
1739 return domain; alloc_domain()
1743 static int domain_attach_iommu(struct dmar_domain *domain, domain_attach_iommu() argument
1752 domain->iommu_refcnt[iommu->seq_id] += 1; domain_attach_iommu()
1753 domain->iommu_count += 1; domain_attach_iommu()
1754 if (domain->iommu_refcnt[iommu->seq_id] == 1) { domain_attach_iommu()
1759 pr_err("%s: No free domain ids\n", iommu->name); domain_attach_iommu()
1760 domain->iommu_refcnt[iommu->seq_id] -= 1; domain_attach_iommu()
1761 domain->iommu_count -= 1; domain_attach_iommu()
1766 set_iommu_domain(iommu, num, domain); domain_attach_iommu()
1768 domain->iommu_did[iommu->seq_id] = num; domain_attach_iommu()
1769 domain->nid = iommu->node; domain_attach_iommu()
1771 domain_update_iommu_cap(domain); domain_attach_iommu()
1777 static int domain_detach_iommu(struct dmar_domain *domain, domain_detach_iommu() argument
1785 domain->iommu_refcnt[iommu->seq_id] -= 1; domain_detach_iommu()
1786 count = --domain->iommu_count; domain_detach_iommu()
1787 if (domain->iommu_refcnt[iommu->seq_id] == 0) { domain_detach_iommu()
1788 num = domain->iommu_did[iommu->seq_id]; domain_detach_iommu()
1792 domain_update_iommu_cap(domain); domain_detach_iommu()
1793 domain->iommu_did[iommu->seq_id] = 0; domain_detach_iommu()
1842 static void domain_reserve_special_ranges(struct dmar_domain *domain) domain_reserve_special_ranges() argument
1844 copy_reserved_iova(&reserved_iova_list, &domain->iovad); domain_reserve_special_ranges()
1861 static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, domain_init() argument
1867 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, domain_init()
1869 domain_reserve_special_ranges(domain); domain_init()
1874 domain->gaw = guest_width; domain_init()
1885 domain->agaw = agaw; domain_init()
1888 domain->iommu_coherency = 1; domain_init()
1890 domain->iommu_coherency = 0; domain_init()
1893 domain->iommu_snooping = 1; domain_init()
1895 domain->iommu_snooping = 0; domain_init()
1898 domain->iommu_superpage = fls(cap_super_page_val(iommu->cap)); domain_init()
1900 domain->iommu_superpage = 0; domain_init()
1902 domain->nid = iommu->node; domain_init()
1905 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); domain_init()
1906 if (!domain->pgd) domain_init()
1908 __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); domain_init()
1912 static void domain_exit(struct dmar_domain *domain) domain_exit() argument
1917 if (!domain) domain_exit()
1920 /* Flush any lazy unmaps that may reference this domain */ domain_exit()
1926 domain_remove_dev_info(domain); domain_exit()
1930 put_iova_domain(&domain->iovad); domain_exit()
1932 freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); domain_exit()
1936 free_domain_mem(domain); domain_exit()
1939 static int domain_context_mapping_one(struct dmar_domain *domain, domain_context_mapping_one() argument
1943 u16 did = domain->iommu_did[iommu->seq_id]; domain_context_mapping_one()
1953 if (hw_pass_through && domain_type_is_si(domain)) domain_context_mapping_one()
1959 BUG_ON(!domain->pgd); domain_context_mapping_one()
1973 pgd = domain->pgd; domain_context_mapping_one()
1983 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { domain_context_mapping_one()
1990 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); domain_context_mapping_one()
2010 domain_flush_cache(domain, context, sizeof(*context)); domain_context_mapping_one()
2016 * domain #0, which we have to flush: domain_context_mapping_one()
2039 struct dmar_domain *domain; member in struct:domain_context_mapping_data
2048 return domain_context_mapping_one(data->domain, data->iommu, domain_context_mapping_cb()
2053 domain_context_mapping(struct dmar_domain *domain, struct device *dev) domain_context_mapping() argument
2064 return domain_context_mapping_one(domain, iommu, bus, devfn); domain_context_mapping()
2066 data.domain = domain; domain_context_mapping()
2106 static inline int hardware_largepage_caps(struct dmar_domain *domain, hardware_largepage_caps() argument
2114 support = domain->iommu_superpage; hardware_largepage_caps()
2133 static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, __domain_mapping() argument
2143 BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); __domain_mapping()
2167 largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); __domain_mapping()
2169 first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); __domain_mapping()
2186 dma_pte_free_pagetable(domain, iov_pfn, end_pfn); __domain_mapping()
2232 domain_flush_cache(domain, first_pte, __domain_mapping()
2243 static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, domain_sg_mapping() argument
2247 return __domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); domain_sg_mapping()
2250 static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, domain_pfn_mapping() argument
2254 return __domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); domain_pfn_mapping()
2277 static void domain_remove_dev_info(struct dmar_domain *domain) domain_remove_dev_info() argument
2283 list_for_each_entry_safe(info, tmp, &domain->devices, link) domain_remove_dev_info()
2296 /* No lock here, assumes no domain exit in normal case */ find_domain()
2299 return info->domain; find_domain()
2319 struct dmar_domain *domain) dmar_insert_one_dev_info()
2336 info->domain = domain; dmar_insert_one_dev_info()
2368 found = info2->domain; dmar_insert_one_dev_info()
2376 /* Caller must free the original domain */ dmar_insert_one_dev_info()
2381 ret = domain_attach_iommu(domain, iommu); dmar_insert_one_dev_info()
2390 list_add(&info->link, &domain->devices); dmar_insert_one_dev_info()
2396 if (dev && domain_context_mapping(domain, dev)) { dmar_insert_one_dev_info()
2398 dmar_remove_one_dev_info(domain, dev); dmar_insert_one_dev_info()
2402 return domain; dmar_insert_one_dev_info()
2411 /* domain is initialized */ get_domain_for_dev()
2415 struct dmar_domain *domain, *tmp; get_domain_for_dev() local
2421 domain = find_domain(dev); get_domain_for_dev()
2422 if (domain) get_domain_for_dev()
2423 return domain; get_domain_for_dev()
2442 domain = info->domain; get_domain_for_dev()
2446 /* DMA alias already has a domain, uses it */ get_domain_for_dev()
2451 /* Allocate and initialize new domain for the device */ get_domain_for_dev()
2452 domain = alloc_domain(0); get_domain_for_dev()
2453 if (!domain) get_domain_for_dev()
2455 if (domain_init(domain, iommu, gaw)) { get_domain_for_dev()
2456 domain_exit(domain); get_domain_for_dev()
2463 dma_alias & 0xff, NULL, domain); get_domain_for_dev()
2465 if (!tmp || tmp != domain) { get_domain_for_dev()
2466 domain_exit(domain); get_domain_for_dev()
2467 domain = tmp; get_domain_for_dev()
2470 if (!domain) get_domain_for_dev()
2475 tmp = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); get_domain_for_dev()
2477 if (!tmp || tmp != domain) { get_domain_for_dev()
2478 domain_exit(domain); get_domain_for_dev()
2479 domain = tmp; get_domain_for_dev()
2482 return domain; get_domain_for_dev()
2485 static int iommu_domain_identity_map(struct dmar_domain *domain, iommu_domain_identity_map() argument
2492 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), iommu_domain_identity_map()
2503 dma_pte_clear_range(domain, first_vpfn, last_vpfn); iommu_domain_identity_map()
2505 return domain_pfn_mapping(domain, first_vpfn, first_vpfn, iommu_domain_identity_map()
2511 struct dmar_domain *domain, domain_prepare_identity_map()
2519 if (domain == si_domain && hw_pass_through) { domain_prepare_identity_map()
2537 if (end >> agaw_to_width(domain->agaw)) { domain_prepare_identity_map()
2540 agaw_to_width(domain->agaw), domain_prepare_identity_map()
2547 return iommu_domain_identity_map(domain, start, end); domain_prepare_identity_map()
2554 struct dmar_domain *domain; iommu_prepare_identity_map() local
2557 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); iommu_prepare_identity_map()
2558 if (!domain) iommu_prepare_identity_map()
2561 ret = domain_prepare_identity_map(dev, domain, start, end); iommu_prepare_identity_map()
2563 domain_exit(domain); iommu_prepare_identity_map()
2602 static int md_domain_init(struct dmar_domain *domain, int guest_width);
2617 pr_debug("Identity mapping domain allocated\n"); si_domain_init()
2646 return (info->domain == si_domain); identity_mapping()
2651 static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) domain_add_dev_info() argument
2661 ndomain = dmar_insert_one_dev_info(iommu, bus, devfn, dev, domain); domain_add_dev_info()
2662 if (ndomain != domain) domain_add_dev_info()
2696 * a device with associated RMRRs will never be in a "passthrough" domain.
2747 * We want to start off with all devices in the 1:1 domain, and iommu_should_identity_map()
2760 * the 1:1 domain, just in _case_ one of their siblings turns out iommu_should_identity_map()
2778 * take them out of the 1:1 domain later. iommu_should_identity_map()
3225 * locate drhd for dev, alloc domain for dev
3226 * allocate free domain
3231 * init context with domain, translation etc
3303 struct dmar_domain *domain, intel_alloc_iova()
3309 dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); intel_alloc_iova()
3319 iova = alloc_iova(&domain->iovad, nrpages, intel_alloc_iova()
3324 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); intel_alloc_iova()
3337 struct dmar_domain *domain; __get_valid_domain_for_dev() local
3341 domain = get_domain_for_dev(dev, DEFAULT_DOMAIN_ADDRESS_WIDTH); __get_valid_domain_for_dev()
3342 if (!domain) { __get_valid_domain_for_dev()
3343 pr_err("Allocating domain for %s failed\n", __get_valid_domain_for_dev()
3348 /* We have a new domain - setup possible RMRRs for the device */ __get_valid_domain_for_dev()
3356 ret = domain_prepare_identity_map(dev, domain, for_each_rmrr_units()
3365 return domain;
3372 /* No lock here, assumes no domain exit in normal case */ get_valid_domain_for_dev()
3375 return info->domain; get_valid_domain_for_dev()
3427 struct dmar_domain *domain; __intel_map_single() local
3440 domain = get_valid_domain_for_dev(dev); __intel_map_single()
3441 if (!domain) __intel_map_single()
3444 iommu = domain_get_iommu(domain); __intel_map_single()
3447 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); __intel_map_single()
3466 ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo), __intel_map_single()
3473 iommu_flush_iotlb_psi(iommu, domain, __intel_map_single()
3485 __free_iova(&domain->iovad, iova); __intel_map_single()
3522 struct dmar_domain *domain = deferred_flush[i].domain[j]; flush_unmaps() local
3526 iommu_flush_iotlb_psi(iommu, domain, flush_unmaps()
3531 iommu_flush_dev_iotlb(deferred_flush[i].domain[j], flush_unmaps()
3534 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); flush_unmaps()
3567 deferred_flush[iommu_id].domain[next] = dom; add_unmap()
3582 struct dmar_domain *domain; intel_unmap() local
3591 domain = find_domain(dev); intel_unmap()
3592 BUG_ON(!domain); intel_unmap()
3594 iommu = domain_get_iommu(domain); intel_unmap()
3596 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); intel_unmap()
3607 freelist = domain_unmap(domain, start_pfn, last_pfn); intel_unmap()
3610 iommu_flush_iotlb_psi(iommu, domain, start_pfn, intel_unmap()
3613 __free_iova(&domain->iovad, iova); intel_unmap()
3616 add_unmap(domain, iova, freelist); intel_unmap()
3717 struct dmar_domain *domain; intel_map_sg() local
3730 domain = get_valid_domain_for_dev(dev); intel_map_sg()
3731 if (!domain) intel_map_sg()
3734 iommu = domain_get_iommu(domain); intel_map_sg()
3739 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), intel_map_sg()
3758 ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); intel_map_sg()
3760 dma_pte_free_pagetable(domain, start_vpfn, intel_map_sg()
3762 __free_iova(&domain->iovad, iova); intel_map_sg()
3768 iommu_flush_iotlb_psi(iommu, domain, start_vpfn, size, 0, 1); intel_map_sg()
4403 * Added device is not attached to its DMAR domain here yet. That will happen
4410 struct dmar_domain *domain; device_notifier() local
4418 domain = find_domain(dev); device_notifier()
4419 if (!domain) device_notifier()
4422 dmar_remove_one_dev_info(domain, dev); device_notifier()
4423 if (!domain_type_is_vm_or_si(domain) && list_empty(&domain->devices)) device_notifier()
4424 domain_exit(domain); device_notifier()
4707 domain_detach_iommu(info->domain, iommu); __dmar_remove_one_dev_info()
4713 static void dmar_remove_one_dev_info(struct dmar_domain *domain, dmar_remove_one_dev_info() argument
4725 static int md_domain_init(struct dmar_domain *domain, int guest_width) md_domain_init() argument
4729 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, md_domain_init()
4731 domain_reserve_special_ranges(domain); md_domain_init()
4734 domain->gaw = guest_width; md_domain_init()
4736 domain->agaw = width_to_agaw(adjust_width); md_domain_init()
4738 domain->iommu_coherency = 0; md_domain_init()
4739 domain->iommu_snooping = 0; md_domain_init()
4740 domain->iommu_superpage = 0; md_domain_init()
4741 domain->max_addr = 0; md_domain_init()
4744 domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid); md_domain_init()
4745 if (!domain->pgd) md_domain_init()
4747 domain_flush_cache(domain, domain->pgd, PAGE_SIZE); md_domain_init()
4754 struct iommu_domain *domain; intel_iommu_domain_alloc() local
4771 domain = &dmar_domain->domain; intel_iommu_domain_alloc()
4772 domain->geometry.aperture_start = 0; intel_iommu_domain_alloc()
4773 domain->geometry.aperture_end = __DOMAIN_MAX_ADDR(dmar_domain->gaw); intel_iommu_domain_alloc()
4774 domain->geometry.force_aperture = true; intel_iommu_domain_alloc()
4776 return domain; intel_iommu_domain_alloc()
4779 static void intel_iommu_domain_free(struct iommu_domain *domain) intel_iommu_domain_free() argument
4781 domain_exit(to_dmar_domain(domain)); intel_iommu_domain_free()
4784 static int intel_iommu_attach_device(struct iommu_domain *domain, intel_iommu_attach_device() argument
4787 struct dmar_domain *dmar_domain = to_dmar_domain(domain); intel_iommu_attach_device()
4793 dev_warn(dev, "Device is ineligible for IOMMU domain attach due to platform RMRR requirement. Contact your platform vendor.\n"); intel_iommu_attach_device()
4848 static void intel_iommu_detach_device(struct iommu_domain *domain, intel_iommu_detach_device() argument
4851 dmar_remove_one_dev_info(to_dmar_domain(domain), dev); intel_iommu_detach_device()
4854 static int intel_iommu_map(struct iommu_domain *domain, intel_iommu_map() argument
4858 struct dmar_domain *dmar_domain = to_dmar_domain(domain); intel_iommu_map()
4892 static size_t intel_iommu_unmap(struct iommu_domain *domain, intel_iommu_unmap() argument
4895 struct dmar_domain *dmar_domain = to_dmar_domain(domain); intel_iommu_unmap()
4931 static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, intel_iommu_iova_to_phys() argument
4934 struct dmar_domain *dmar_domain = to_dmar_domain(domain); intel_iommu_iova_to_phys()
4996 struct dmar_domain *domain; intel_iommu_enable_pasid() local
5001 domain = get_valid_domain_for_dev(sdev->dev); intel_iommu_enable_pasid()
5002 if (!domain) intel_iommu_enable_pasid()
5019 sdev->did = domain->iommu_did[iommu->seq_id]; intel_iommu_enable_pasid()
627 set_iommu_domain(struct intel_iommu *iommu, u16 did, struct dmar_domain *domain) set_iommu_domain() argument
1532 iommu_flush_iotlb_psi(struct intel_iommu *iommu, struct dmar_domain *domain, unsigned long pfn, unsigned int pages, int ih, int map) iommu_flush_iotlb_psi() argument
2316 dmar_insert_one_dev_info(struct intel_iommu *iommu, int bus, int devfn, struct device *dev, struct dmar_domain *domain) dmar_insert_one_dev_info() argument
2510 domain_prepare_identity_map(struct device *dev, struct dmar_domain *domain, unsigned long long start, unsigned long long end) domain_prepare_identity_map() argument
3302 intel_alloc_iova(struct device *dev, struct dmar_domain *domain, unsigned long nrpages, uint64_t dma_mask) intel_alloc_iova() argument
H A Dfsl_pamu_domain.c20 #define pr_fmt(fmt) "fsl-pamu-domain: %s: " fmt, __func__
76 pr_debug("Number of windows/geometry not configured for the domain\n"); get_phys_addr()
307 struct fsl_dma_domain *domain; iommu_alloc_dma_domain() local
309 domain = kmem_cache_zalloc(fsl_pamu_domain_cache, GFP_KERNEL); iommu_alloc_dma_domain()
310 if (!domain) iommu_alloc_dma_domain()
313 domain->stash_id = ~(u32)0; iommu_alloc_dma_domain()
314 domain->snoop_id = ~(u32)0; iommu_alloc_dma_domain()
315 domain->win_cnt = pamu_get_max_subwin_cnt(); iommu_alloc_dma_domain()
316 domain->geom_size = 0; iommu_alloc_dma_domain()
318 INIT_LIST_HEAD(&domain->devices); iommu_alloc_dma_domain()
320 spin_lock_init(&domain->domain_lock); iommu_alloc_dma_domain()
322 return domain; iommu_alloc_dma_domain()
347 /* Remove the device from the domain device list */ detach_device()
362 * Check here if the device is already attached to domain or not. attach_device()
363 * If the device is already attached to a domain detach it. attach_device()
366 if (old_domain_info && old_domain_info->domain != dma_domain) { attach_device()
368 detach_device(dev, old_domain_info->domain); attach_device()
376 info->domain = dma_domain; attach_device()
382 * LIODNs share the same domain attach_device()
389 static phys_addr_t fsl_pamu_iova_to_phys(struct iommu_domain *domain, fsl_pamu_iova_to_phys() argument
392 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_iova_to_phys()
394 if (iova < domain->geometry.aperture_start || fsl_pamu_iova_to_phys()
395 iova > domain->geometry.aperture_end) fsl_pamu_iova_to_phys()
406 static void fsl_pamu_domain_free(struct iommu_domain *domain) fsl_pamu_domain_free() argument
408 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_domain_free()
439 /* Configure geometry settings for all LIODNs associated with domain */ pamu_set_domain_geometry()
457 /* Update stash destination for all LIODNs associated with the domain */ update_domain_stash()
472 /* Update domain mappings for all LIODNs associated with the domain */ update_domain_mapping()
504 static void fsl_pamu_window_disable(struct iommu_domain *domain, u32 wnd_nr) fsl_pamu_window_disable() argument
506 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_window_disable()
534 static int fsl_pamu_window_enable(struct iommu_domain *domain, u32 wnd_nr, fsl_pamu_window_enable() argument
537 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_window_enable()
576 ret = check_size(size, domain->geometry.aperture_start); fsl_pamu_window_enable()
606 * Attach the LIODN to the DMA domain and configure the geometry
614 struct iommu_domain *domain = &dma_domain->iommu_domain; handle_attach_device() local
631 * for the domain. If yes, set the geometry for handle_attach_device()
638 &domain->geometry, win_cnt); handle_attach_device()
657 static int fsl_pamu_attach_device(struct iommu_domain *domain, fsl_pamu_attach_device() argument
660 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_attach_device()
695 static void fsl_pamu_detach_device(struct iommu_domain *domain, fsl_pamu_detach_device() argument
698 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_detach_device()
727 static int configure_domain_geometry(struct iommu_domain *domain, void *data) configure_domain_geometry() argument
730 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); configure_domain_geometry()
747 pr_debug("Can't set geometry attributes as domain is active\n"); configure_domain_geometry()
752 /* Copy the domain geometry information */ configure_domain_geometry()
753 memcpy(&domain->geometry, geom_attr, configure_domain_geometry()
762 /* Set the domain stash attribute */ configure_domain_stash()
789 /* Configure domain dma state i.e. enable/disable DMA */ configure_domain_dma_state()
799 pr_debug("Can't enable DMA domain without valid mapping\n"); configure_domain_dma_state()
817 static int fsl_pamu_set_domain_attr(struct iommu_domain *domain, fsl_pamu_set_domain_attr() argument
820 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_set_domain_attr()
825 ret = configure_domain_geometry(domain, data); fsl_pamu_set_domain_attr()
842 static int fsl_pamu_get_domain_attr(struct iommu_domain *domain, fsl_pamu_get_domain_attr() argument
845 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_get_domain_attr()
994 static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count) fsl_pamu_set_windows() argument
996 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_set_windows()
1001 /* Ensure domain is inactive i.e. DMA should be disabled for the domain */ fsl_pamu_set_windows()
1003 pr_debug("Can't set geometry attributes as domain is active\n"); fsl_pamu_set_windows()
1008 /* Ensure that the geometry has been set for the domain */ fsl_pamu_set_windows()
1025 ret = pamu_set_domain_geometry(dma_domain, &domain->geometry, fsl_pamu_set_windows()
1043 static u32 fsl_pamu_get_windows(struct iommu_domain *domain) fsl_pamu_get_windows() argument
1045 struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain); fsl_pamu_get_windows()
H A Dtegra-gart.c67 struct iommu_domain domain; /* generic domain handle */ member in struct:gart_domain
78 return container_of(dom, struct gart_domain, domain); to_gart_domain()
166 static int gart_iommu_attach_dev(struct iommu_domain *domain, gart_iommu_attach_dev() argument
169 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_attach_dev()
199 static void gart_iommu_detach_dev(struct iommu_domain *domain, gart_iommu_detach_dev() argument
202 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_detach_dev()
238 gart_domain->domain.geometry.aperture_start = gart->iovmm_base; gart_iommu_domain_alloc()
239 gart_domain->domain.geometry.aperture_end = gart->iovmm_base + gart_iommu_domain_alloc()
241 gart_domain->domain.geometry.force_aperture = true; gart_iommu_domain_alloc()
243 return &gart_domain->domain; gart_iommu_domain_alloc()
246 static void gart_iommu_domain_free(struct iommu_domain *domain) gart_iommu_domain_free() argument
248 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_domain_free()
257 gart_iommu_detach_dev(domain, c->dev); gart_iommu_domain_free()
265 static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova, gart_iommu_map() argument
268 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_map()
289 static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova, gart_iommu_unmap() argument
292 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_unmap()
306 static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain, gart_iommu_iova_to_phys() argument
309 struct gart_domain *gart_domain = to_gart_domain(domain); gart_iommu_iova_to_phys()
H A Dshmobile-iommu.c45 struct iommu_domain domain; member in struct:shmobile_iommu_domain
53 return container_of(dom, struct shmobile_iommu_domain, domain); to_sh_domain()
112 return &sh_domain->domain; shmobile_iommu_domain_alloc()
115 static void shmobile_iommu_domain_free(struct iommu_domain *domain) shmobile_iommu_domain_free() argument
117 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_domain_free()
128 static int shmobile_iommu_attach_device(struct iommu_domain *domain, shmobile_iommu_attach_device() argument
132 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_attach_device()
157 static void shmobile_iommu_detach_device(struct iommu_domain *domain, shmobile_iommu_detach_device() argument
161 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_detach_device()
220 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, shmobile_iommu_map() argument
224 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_map()
264 static size_t shmobile_iommu_unmap(struct iommu_domain *domain, shmobile_iommu_unmap() argument
268 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_unmap()
305 static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain, shmobile_iommu_iova_to_phys() argument
308 struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain); shmobile_iommu_iova_to_phys()
H A Dmsm_iommu.c55 struct iommu_domain domain; member in struct:msm_priv
60 return container_of(dom, struct msm_priv, domain); to_msm_priv()
86 static int __flush_iotlb(struct iommu_domain *domain) __flush_iotlb() argument
88 struct msm_priv *priv = to_msm_priv(domain); __flush_iotlb()
238 priv->domain.geometry.aperture_start = 0; msm_iommu_domain_alloc()
239 priv->domain.geometry.aperture_end = (1ULL << 32) - 1; msm_iommu_domain_alloc()
240 priv->domain.geometry.force_aperture = true; msm_iommu_domain_alloc()
242 return &priv->domain; msm_iommu_domain_alloc()
249 static void msm_iommu_domain_free(struct iommu_domain *domain) msm_iommu_domain_free() argument
257 priv = to_msm_priv(domain); msm_iommu_domain_free()
273 static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) msm_iommu_attach_dev() argument
285 priv = to_msm_priv(domain); msm_iommu_attach_dev()
321 ret = __flush_iotlb(domain); msm_iommu_attach_dev()
328 static void msm_iommu_detach_dev(struct iommu_domain *domain, msm_iommu_detach_dev() argument
339 priv = to_msm_priv(domain); msm_iommu_detach_dev()
351 ret = __flush_iotlb(domain); msm_iommu_detach_dev()
367 static int msm_iommu_map(struct iommu_domain *domain, unsigned long va, msm_iommu_map() argument
391 priv = to_msm_priv(domain); msm_iommu_map()
468 ret = __flush_iotlb(domain); msm_iommu_map()
474 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va, msm_iommu_unmap() argument
489 priv = to_msm_priv(domain); msm_iommu_unmap()
544 ret = __flush_iotlb(domain); msm_iommu_unmap()
554 static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain, msm_iommu_iova_to_phys() argument
568 priv = to_msm_priv(domain); msm_iommu_iova_to_phys()
H A Domap-iommu.c46 * struct omap_iommu_domain - omap iommu domain
48 * @iommu_dev: an omap iommu device attached to this domain. only a single
50 * @dev: Device using this domain.
51 * @lock: domain lock, should be taken when attaching/detaching
58 struct iommu_domain domain; member in struct:omap_iommu_domain
76 * @dom: generic iommu domain handle
80 return container_of(dom, struct omap_iommu_domain, domain); to_omap_domain()
787 struct iommu_domain *domain = obj->domain; iommu_fault_handler() local
788 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); iommu_fault_handler()
798 if (!report_iommu_fault(domain, obj->dev, da, 0)) iommu_fault_handler()
830 * omap_iommu_attach() - attach iommu device to an iommu domain
1036 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, omap_iommu_map() argument
1039 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_map()
1063 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, omap_iommu_unmap() argument
1066 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_unmap()
1076 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) omap_iommu_attach_dev() argument
1078 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_attach_dev()
1090 /* only a single device is supported per domain for now */ omap_iommu_attach_dev()
1092 dev_err(dev, "iommu domain is already attached\n"); omap_iommu_attach_dev()
1107 oiommu->domain = domain; omap_iommu_attach_dev()
1120 /* only a single device is supported per domain for now */ _omap_iommu_detach_dev()
1132 oiommu->domain = NULL; _omap_iommu_detach_dev()
1135 static void omap_iommu_detach_dev(struct iommu_domain *domain, omap_iommu_detach_dev() argument
1138 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_detach_dev()
1169 omap_domain->domain.geometry.aperture_start = 0; omap_iommu_domain_alloc()
1170 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1; omap_iommu_domain_alloc()
1171 omap_domain->domain.geometry.force_aperture = true; omap_iommu_domain_alloc()
1173 return &omap_domain->domain; omap_iommu_domain_alloc()
1181 static void omap_iommu_domain_free(struct iommu_domain *domain) omap_iommu_domain_free() argument
1183 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_domain_free()
1196 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, omap_iommu_iova_to_phys() argument
1199 struct omap_iommu_domain *omap_domain = to_omap_domain(domain); omap_iommu_iova_to_phys()
H A Darm-smmu.c349 struct iommu_domain domain; member in struct:arm_smmu_domain
369 return container_of(dom, struct arm_smmu_domain, domain); to_smmu_domain()
632 struct iommu_domain *domain = dev; arm_smmu_context_fault() local
633 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_context_fault()
659 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) { arm_smmu_context_fault()
796 static int arm_smmu_init_domain_context(struct iommu_domain *domain, arm_smmu_init_domain_context() argument
804 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_init_domain_context()
901 * handler seeing a half-initialised domain state. arm_smmu_init_domain_context()
905 "arm-smmu-context-fault", domain); arm_smmu_init_domain_context()
925 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) arm_smmu_destroy_domain_context() argument
927 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_destroy_domain_context()
945 free_irq(irq, domain); arm_smmu_destroy_domain_context()
961 * Allocate the domain and initialise some of its data structures. arm_smmu_domain_alloc()
972 return &smmu_domain->domain; arm_smmu_domain_alloc()
975 static void arm_smmu_domain_free(struct iommu_domain *domain) arm_smmu_domain_free() argument
977 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_domain_free()
980 * Free the domain resources. We assume that all devices have arm_smmu_domain_free()
983 arm_smmu_destroy_domain_context(domain); arm_smmu_domain_free()
1111 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) arm_smmu_attach_dev() argument
1114 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_attach_dev()
1125 dev_err(dev, "already attached to IOMMU domain\n"); arm_smmu_attach_dev()
1129 /* Ensure that the domain is finalised */ arm_smmu_attach_dev()
1130 ret = arm_smmu_init_domain_context(domain, smmu); arm_smmu_attach_dev()
1135 * Sanity check the domain. We don't support domains across arm_smmu_attach_dev()
1140 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", arm_smmu_attach_dev()
1145 /* Looks ok, so add the device to the domain */ arm_smmu_attach_dev()
1152 dev->archdata.iommu = domain; arm_smmu_attach_dev()
1156 static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) arm_smmu_detach_dev() argument
1158 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_detach_dev()
1169 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, arm_smmu_map() argument
1174 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_map()
1186 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, arm_smmu_unmap() argument
1191 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_unmap()
1203 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, arm_smmu_iova_to_phys_hard() argument
1206 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_iova_to_phys_hard()
1245 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, arm_smmu_iova_to_phys() argument
1250 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_iova_to_phys()
1259 ret = arm_smmu_iova_to_phys_hard(domain, iova); arm_smmu_iova_to_phys()
1394 static int arm_smmu_domain_get_attr(struct iommu_domain *domain, arm_smmu_domain_get_attr() argument
1397 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_domain_get_attr()
1408 static int arm_smmu_domain_set_attr(struct iommu_domain *domain, arm_smmu_domain_set_attr() argument
1412 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); arm_smmu_domain_set_attr()
H A Damd_iommu_v2.c68 struct iommu_domain *domain; member in struct:device_state
138 * First detach device from domain - No more PRI requests will arrive free_device_state()
139 * from that device after it is unbound from the IOMMUv2 domain. free_device_state()
145 iommu_detach_group(dev_state->domain, group); free_device_state()
149 /* Everything is down now, free the IOMMUv2 domain */ free_device_state()
150 iommu_domain_free(dev_state->domain); free_device_state()
284 struct iommu_domain *domain; unbind_pasid() local
286 domain = pasid_state->device_state->domain; unbind_pasid()
298 amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid); unbind_pasid()
379 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address); __mn_flush_page()
411 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, mn_invalidate_range()
414 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid); mn_invalidate_range()
680 ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid, amd_iommu_bind_pasid()
801 dev_state->domain = iommu_domain_alloc(&pci_bus_type); amd_iommu_init_device()
802 if (dev_state->domain == NULL) amd_iommu_init_device()
805 amd_iommu_domain_direct_map(dev_state->domain); amd_iommu_init_device()
807 ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids); amd_iommu_init_device()
815 ret = iommu_attach_group(dev_state->domain, group); amd_iommu_init_device()
839 iommu_domain_free(dev_state->domain); amd_iommu_init_device()
H A Damd_iommu_types.h334 /* Protection domain flags */
335 #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
336 #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
337 domain for an IOMMU */
338 #define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
340 #define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
410 struct list_head dev_list; /* List of all devices in this domain */
411 struct iommu_domain domain; /* generic domain handle used by member in struct:protection_domain
415 u16 id; /* the domain id written to the device table */
420 unsigned long flags; /* flags to find out type of domain */
421 bool updated; /* complete domain flush required */
422 unsigned dev_cnt; /* devices assigned to this domain */
438 * leaf pages of the domain page table used for the aperture. This way
448 * Data container for a dma_ops specific protection domain
451 /* generic protection domain information */
452 struct protection_domain domain; member in struct:dma_ops_domain
517 /* pci domain of this IOMMU */
662 /* allocation bitmap for domain ids */
H A Dtegra-smmu.c41 struct iommu_domain domain; member in struct:tegra_smmu_as
54 return container_of(dom, struct tegra_smmu_as, domain); to_smmu_as()
295 as->domain.geometry.aperture_start = 0; tegra_smmu_domain_alloc()
296 as->domain.geometry.aperture_end = 0xffffffff; tegra_smmu_domain_alloc()
297 as->domain.geometry.force_aperture = true; tegra_smmu_domain_alloc()
299 return &as->domain; tegra_smmu_domain_alloc()
302 static void tegra_smmu_domain_free(struct iommu_domain *domain) tegra_smmu_domain_free() argument
304 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_domain_free()
440 static int tegra_smmu_attach_dev(struct iommu_domain *domain, tegra_smmu_attach_dev() argument
444 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_attach_dev()
475 static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) tegra_smmu_detach_dev() argument
477 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_detach_dev()
632 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova, tegra_smmu_map() argument
635 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_map()
653 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova, tegra_smmu_unmap() argument
656 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_unmap()
670 static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, tegra_smmu_iova_to_phys() argument
673 struct tegra_smmu_as *as = to_smmu_as(domain); tegra_smmu_iova_to_phys()
H A Drockchip-iommu.c84 struct iommu_domain domain; member in struct:rk_iommu_domain
92 struct iommu_domain *domain; /* domain to which iommu is attached */ member in struct:rk_iommu
107 return container_of(dom, struct rk_iommu_domain, domain); to_rk_domain()
488 if (iommu->domain) rk_iommu_irq()
489 report_iommu_fault(iommu->domain, iommu->dev, iova, rk_iommu_irq()
492 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); rk_iommu_irq()
510 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, rk_iommu_iova_to_phys() argument
513 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_iova_to_phys()
544 /* shootdown these iova from all iommus using this domain */ rk_iommu_zap_iova()
657 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, rk_iommu_map() argument
660 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_map()
688 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, rk_iommu_unmap() argument
691 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_unmap()
743 static int rk_iommu_attach_device(struct iommu_domain *domain, rk_iommu_attach_device() argument
747 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_attach_device()
753 * Allow 'virtual devices' (e.g., drm) to attach to domain. rk_iommu_attach_device()
768 iommu->domain = domain; rk_iommu_attach_device()
788 dev_dbg(dev, "Attached to iommu domain\n"); rk_iommu_attach_device()
795 static void rk_iommu_detach_device(struct iommu_domain *domain, rk_iommu_detach_device() argument
799 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_detach_device()
802 /* Allow 'virtual devices' (eg drm) to detach from domain */ rk_iommu_detach_device()
820 iommu->domain = NULL; rk_iommu_detach_device()
822 dev_dbg(dev, "Detached from iommu domain\n"); rk_iommu_detach_device()
851 return &rk_domain->domain; rk_iommu_domain_alloc()
858 static void rk_iommu_domain_free(struct iommu_domain *domain) rk_iommu_domain_free() argument
860 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); rk_iommu_domain_free()
/linux-4.4.14/drivers/gpu/drm/msm/
H A Dmsm_iommu.c23 struct iommu_domain *domain; member in struct:msm_iommu
37 return iommu_attach_device(iommu->domain, mmu->dev); msm_iommu_attach()
43 iommu_detach_device(iommu->domain, mmu->dev); msm_iommu_detach()
50 struct iommu_domain *domain = iommu->domain; msm_iommu_map() local
56 if (!domain || !sgt) msm_iommu_map()
65 ret = iommu_map(domain, da, pa, bytes, prot); msm_iommu_map()
79 iommu_unmap(domain, da, bytes); msm_iommu_map()
89 struct iommu_domain *domain = iommu->domain; msm_iommu_unmap() local
98 unmapped = iommu_unmap(domain, da, bytes); msm_iommu_unmap()
115 iommu_domain_free(iommu->domain); msm_iommu_destroy()
127 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain) msm_iommu_new() argument
135 iommu->domain = domain; msm_iommu_new()
137 iommu_set_fault_handler(domain, msm_fault_handler, dev); msm_iommu_new()
/linux-4.4.14/net/unix/
H A DMakefile2 # Makefile for the Linux unix domain socket layer.
/linux-4.4.14/arch/arm/mach-ux500/
H A Dpm_domains.c7 * Implements PM domains using the generic PM domain for ux500.
18 static int pd_power_off(struct generic_pm_domain *domain) pd_power_off() argument
21 * Handle the gating of the PM domain regulator here. pd_power_off()
23 * Drivers/subsystems handling devices in the PM domain needs to perform pd_power_off()
25 * callbacks, to be able to enable PM domain gating/ungating. pd_power_off()
30 static int pd_power_on(struct generic_pm_domain *domain) pd_power_on() argument
33 * Handle the ungating of the PM domain regulator here. pd_power_on()
35 * Drivers/subsystems handling devices in the PM domain needs to perform pd_power_on()
37 * callbacks, to be able to enable PM domain gating/ungating. pd_power_on()
/linux-4.4.14/arch/arm/include/asm/
H A Ddomain.h2 * arch/arm/include/asm/domain.h
21 * DOMAIN_IO - domain 2 includes all IO only
22 * DOMAIN_USER - domain 1 includes all user memory only
23 * DOMAIN_KERNEL - domain 0 includes all kernel memory only
25 * The domain numbering depends on whether we support 36 physical
28 * be set for domain 0. We could just default to DOMAIN_IO as zero,
89 unsigned int domain; get_domain() local
92 "mrc p15, 0, %0, c3, c0 @ get domain" get_domain()
93 : "=r" (domain) get_domain()
96 return domain; get_domain()
102 "mcr p15, 0, %0, c3, c0 @ set domain" set_domain()
110 unsigned int domain = get_domain(); \
111 domain &= ~domain_mask(dom); \
112 domain = domain | domain_val(dom, type); \
113 set_domain(domain); \
H A Ddma-iommu.h14 struct iommu_domain *domain; member in struct:dma_iommu_mapping
/linux-4.4.14/arch/arm/mach-keystone/
H A DMakefile8 # PM domain driver for Keystone SOCs
/linux-4.4.14/drivers/irqchip/
H A Dirq-atmel-aic5.c88 struct irq_domain *domain = d->domain; aic5_mask() local
89 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_mask()
105 struct irq_domain *domain = d->domain; aic5_unmask() local
106 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_unmask()
122 struct irq_domain *domain = d->domain; aic5_retrigger() local
123 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_retrigger()
136 struct irq_domain *domain = d->domain; aic5_set_type() local
137 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_set_type()
155 struct irq_domain *domain = d->domain; aic5_suspend() local
156 struct irq_domain_chip_generic *dgc = domain->gc; aic5_suspend()
157 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_suspend()
179 struct irq_domain *domain = d->domain; aic5_resume() local
180 struct irq_domain_chip_generic *dgc = domain->gc; aic5_resume()
181 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_resume()
203 struct irq_domain *domain = d->domain; aic5_pm_shutdown() local
204 struct irq_domain_chip_generic *dgc = domain->gc; aic5_pm_shutdown()
205 struct irq_chip_generic *bgc = irq_get_domain_generic_chip(domain, 0); aic5_pm_shutdown()
223 static void __init aic5_hw_init(struct irq_domain *domain) aic5_hw_init() argument
225 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); aic5_hw_init()
246 for (i = 0; i < domain->revmap_size; i++) { aic5_hw_init()
304 struct irq_domain *domain; aic5_of_init() local
314 domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5", aic5_of_init()
316 if (IS_ERR(domain)) aic5_of_init()
317 return PTR_ERR(domain); aic5_of_init()
321 aic5_domain = domain; aic5_of_init()
324 gc = irq_get_domain_generic_chip(domain, i * 32); aic5_of_init()
336 aic5_hw_init(domain); aic5_of_init()
H A Dirq-mmp.c48 struct irq_domain *domain; member in struct:icu_chip_data
65 struct irq_domain *domain = d->domain; icu_mask_ack_irq() local
66 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; icu_mask_ack_irq()
89 struct irq_domain *domain = d->domain; icu_mask_irq() local
90 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; icu_mask_irq()
108 struct irq_domain *domain = d->domain; icu_unmask_irq() local
109 struct icu_chip_data *data = (struct icu_chip_data *)domain->host_data; icu_unmask_irq()
135 struct irq_domain *domain; icu_mux_irq_demux() local
142 domain = icu_data[i].domain; icu_mux_irq_demux()
143 data = (struct icu_chip_data *)domain->host_data; icu_mux_irq_demux()
204 handle_domain_irq(icu_data[0].domain, hwirq, regs); mmp_handle_irq()
215 handle_domain_irq(icu_data[0].domain, hwirq, regs); mmp2_handle_irq()
230 icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, icu_init_irq()
237 irq_set_default_host(icu_data[0].domain); icu_init_irq()
253 icu_data[0].domain = irq_domain_add_legacy(NULL, 64, 0, 0, mmp2_init_icu()
264 icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, mmp2_init_icu()
273 icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, mmp2_init_icu()
282 icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, mmp2_init_icu()
291 icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, mmp2_init_icu()
300 icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, mmp2_init_icu()
309 icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, mmp2_init_icu()
318 icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, mmp2_init_icu()
339 irq_set_default_host(icu_data[0].domain); mmp2_init_icu()
361 icu_data[0].domain = irq_domain_add_linear(node, nr_irqs, mmp_init_bases()
365 ret = irq_create_mapping(icu_data[0].domain, irq); mmp_init_bases()
380 irq_domain_remove(icu_data[0].domain); mmp_init_bases()
397 irq_set_default_host(icu_data[0].domain); mmp_of_init()
416 irq_set_default_host(icu_data[0].domain); mmp2_of_init()
457 icu_data[i].domain = irq_domain_add_linear(node, nr_irqs, mmp2_mux_of_init()
461 ret = irq_create_mapping(icu_data[i].domain, irq); mmp2_mux_of_init()
484 irq_domain_remove(icu_data[i].domain); mmp2_mux_of_init()
H A Dirq-moxart.c40 struct irq_domain *domain; member in struct:moxart_irq_data
55 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); handle_irq()
74 intc.domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, moxart_of_intc_init()
76 if (!intc.domain) { moxart_of_intc_init()
77 pr_err("%s: unable to create IRQ domain\n", node->full_name); moxart_of_intc_init()
81 ret = irq_alloc_domain_generic_chips(intc.domain, 32, 1, moxart_of_intc_init()
87 irq_domain_remove(intc.domain); moxart_of_intc_init()
97 gc = irq_get_domain_generic_chip(intc.domain, 0); moxart_of_intc_init()
H A Dirq-gic-v3-its-platform-msi.c27 static int its_pmsi_prepare(struct irq_domain *domain, struct device *dev, its_pmsi_prepare() argument
34 msi_info = msi_get_domain_info(domain->parent); its_pmsi_prepare()
43 if (args.np == irq_domain_get_of_node(domain)) { its_pmsi_prepare()
57 return msi_info->ops->msi_prepare(domain->parent, its_pmsi_prepare()
88 pr_err("%s: unable to locate ITS domain\n", its_pmsi_init()
96 pr_err("%s: unable to create platform domain\n", its_pmsi_init()
101 pr_info("Platform MSI: %s domain created\n", np->full_name); its_pmsi_init()
H A Dirq-atmel-aic-common.c115 static void __init aic_common_ext_irq_of_init(struct irq_domain *domain) aic_common_ext_irq_of_init() argument
117 struct device_node *node = irq_domain_get_of_node(domain); aic_common_ext_irq_of_init()
124 gc = irq_get_domain_generic_chip(domain, 0); aic_common_ext_irq_of_init()
130 gc = irq_get_domain_generic_chip(domain, hwirq); aic_common_ext_irq_of_init()
133 hwirq, domain->revmap_size); aic_common_ext_irq_of_init()
220 struct irq_domain *domain; aic_common_of_init() local
239 domain = irq_domain_add_linear(node, nchips * 32, ops, aic); aic_common_of_init()
240 if (!domain) { aic_common_of_init()
245 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, aic_common_of_init()
253 gc = irq_get_domain_generic_chip(domain, i * 32); aic_common_of_init()
266 aic_common_ext_irq_of_init(domain); aic_common_of_init()
268 return domain; aic_common_of_init()
271 irq_domain_remove(domain); aic_common_of_init()
H A Dirq-mtk-sysirq.c79 /* No PPI should point to this domain */ mtk_sysirq_domain_translate()
91 static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, mtk_sysirq_domain_alloc() argument
108 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, mtk_sysirq_domain_alloc()
110 domain->host_data); mtk_sysirq_domain_alloc()
112 gic_fwspec.fwnode = domain->parent->fwnode; mtk_sysirq_domain_alloc()
113 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_fwspec); mtk_sysirq_domain_alloc()
125 struct irq_domain *domain, *domain_parent; mtk_sysirq_of_init() local
153 domain = irq_domain_add_hierarchy(domain_parent, 0, intpol_num, node, mtk_sysirq_of_init()
155 if (!domain) { mtk_sysirq_of_init()
H A Dirq-imx-gpcv2.c162 /* No PPI should point to this domain */ imx_gpcv2_domain_translate()
174 static int imx_gpcv2_domain_alloc(struct irq_domain *domain, imx_gpcv2_domain_alloc() argument
185 err = imx_gpcv2_domain_translate(domain, fwspec, &hwirq, &type); imx_gpcv2_domain_alloc()
193 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, imx_gpcv2_domain_alloc()
194 &gpcv2_irqchip_data_chip, domain->host_data); imx_gpcv2_domain_alloc()
198 parent_fwspec.fwnode = domain->parent->fwnode; imx_gpcv2_domain_alloc()
199 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, imx_gpcv2_domain_alloc()
212 struct irq_domain *parent_domain, *domain; imx_gpcv2_irqchip_init() local
223 pr_err("%s: unable to get parent domain\n", node->full_name); imx_gpcv2_irqchip_init()
240 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, imx_gpcv2_irqchip_init()
242 if (!domain) { imx_gpcv2_irqchip_init()
247 irq_set_default_host(domain); imx_gpcv2_irqchip_init()
H A Dirq-dw-apb-ictl.c73 struct irq_domain *domain; dw_apb_ictl_init() local
123 domain = irq_domain_add_linear(np, nrirqs, dw_apb_ictl_init()
125 if (!domain) { dw_apb_ictl_init()
126 pr_err("%s: unable to add irq domain\n", np->full_name); dw_apb_ictl_init()
131 ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name, dw_apb_ictl_init()
135 pr_err("%s: unable to alloc irq domain gc\n", np->full_name); dw_apb_ictl_init()
140 gc = irq_get_domain_generic_chip(domain, i * 32); dw_apb_ictl_init()
149 irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain); dw_apb_ictl_init()
H A Dirq-renesas-h8300h.c78 struct irq_domain *domain; h8300h_intc_of_init() local
87 domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); h8300h_intc_of_init()
88 BUG_ON(!domain); h8300h_intc_of_init()
89 irq_set_default_host(domain); h8300h_intc_of_init()
H A Dirq-gic-v3-its-pci-msi.c68 static int its_pci_msi_prepare(struct irq_domain *domain, struct device *dev, its_pci_msi_prepare() argument
78 msi_info = msi_get_domain_info(domain->parent); its_pci_msi_prepare()
87 info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain, pdev); its_pci_msi_prepare()
89 return msi_info->ops->msi_prepare(domain->parent, its_pci_msi_prepare()
121 pr_err("%s: unable to locate ITS domain\n", its_pci_msi_init()
129 pr_err("%s: unable to create PCI domain\n", its_pci_msi_init()
134 pr_info("PCI/MSI: %s domain created\n", np->full_name); its_pci_msi_init()
H A Dirq-orion.c67 panic("%s: unable to add irq domain\n", np->name); orion_irq_init()
74 panic("%s: unable to alloc irq domain gc\n", np->name); orion_irq_init()
143 struct irq_domain *domain; orion_bridge_irq_init() local
150 domain = irq_domain_add_linear(np, nrirqs, orion_bridge_irq_init()
152 if (!domain) { orion_bridge_irq_init()
153 pr_err("%s: unable to add irq domain\n", np->name); orion_bridge_irq_init()
157 ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name, orion_bridge_irq_init()
160 pr_err("%s: unable to alloc irq domain gc\n", np->name); orion_bridge_irq_init()
182 gc = irq_get_domain_generic_chip(domain, 0); orion_bridge_irq_init()
201 domain); orion_bridge_irq_init()
H A Dirq-sunxi-nmi.c66 struct irq_domain *domain = irq_desc_get_handler_data(desc); sunxi_sc_nmi_handle_irq() local
68 unsigned int virq = irq_find_mapping(domain, 0); sunxi_sc_nmi_handle_irq()
127 struct irq_domain *domain; sunxi_sc_nmi_irq_init() local
134 domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL); sunxi_sc_nmi_irq_init()
135 if (!domain) { sunxi_sc_nmi_irq_init()
136 pr_err("Could not register interrupt domain.\n"); sunxi_sc_nmi_irq_init()
140 ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME, sunxi_sc_nmi_irq_init()
155 gc = irq_get_domain_generic_chip(domain, 0); sunxi_sc_nmi_irq_init()
187 irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain); sunxi_sc_nmi_irq_init()
192 irq_domain_remove(domain); sunxi_sc_nmi_irq_init()
H A Dirq-tb10x.c102 struct irq_domain *domain = irq_desc_get_handler_data(desc); tb10x_irq_cascade() local
105 generic_handle_irq(irq_find_mapping(domain, irq)); tb10x_irq_cascade()
114 struct irq_domain *domain; of_tb10x_init_irq() local
136 domain = irq_domain_add_linear(ictl, AB_IRQCTL_MAXIRQ, of_tb10x_init_irq()
138 if (!domain) { of_tb10x_init_irq()
140 pr_err("%s: Could not register interrupt domain.\n", of_tb10x_init_irq()
145 ret = irq_alloc_domain_generic_chips(domain, AB_IRQCTL_MAXIRQ, of_tb10x_init_irq()
155 gc = domain->gc->gc[0]; of_tb10x_init_irq()
178 domain); of_tb10x_init_irq()
189 irq_domain_remove(domain); of_tb10x_init_irq()
H A Dirq-vf610-mscm-ir.c128 static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq, vf610_mscm_ir_domain_alloc() argument
136 if (!irq_domain_get_of_node(domain->parent)) vf610_mscm_ir_domain_alloc()
144 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, vf610_mscm_ir_domain_alloc()
146 domain->host_data); vf610_mscm_ir_domain_alloc()
148 parent_fwspec.fwnode = domain->parent->fwnode; vf610_mscm_ir_domain_alloc()
160 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, vf610_mscm_ir_domain_alloc()
185 struct irq_domain *domain, *domain_parent; vf610_mscm_ir_of_init() local
216 domain = irq_domain_add_hierarchy(domain_parent, 0, vf610_mscm_ir_of_init()
219 if (!domain) { vf610_mscm_ir_of_init()
224 if (of_device_is_compatible(irq_domain_get_of_node(domain->parent), vf610_mscm_ir_of_init()
H A Dirq-crossbar.c78 static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, allocate_gic_irq() argument
85 if (!irq_domain_get_of_node(domain->parent)) allocate_gic_irq()
100 fwspec.fwnode = domain->parent->fwnode; allocate_gic_irq()
106 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); allocate_gic_irq()
125 return -EINVAL; /* No PPI should point to this domain */ crossbar_domain_alloc()
146 * @domain: domain of irq to unmap
156 static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, crossbar_domain_free() argument
163 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); crossbar_domain_free()
181 /* No PPI should point to this domain */ crossbar_domain_translate()
339 struct irq_domain *parent_domain, *domain; irqcrossbar_init() local
349 pr_err("%s: unable to obtain parent domain\n", node->full_name); irqcrossbar_init()
357 domain = irq_domain_add_hierarchy(parent_domain, 0, irqcrossbar_init()
361 if (!domain) { irqcrossbar_init()
362 pr_err("%s: failed to allocated domain\n", node->full_name); irqcrossbar_init()
H A Dirq-tegra.c233 /* No PPI should point to this domain */ tegra_ictlr_domain_translate()
245 static int tegra_ictlr_domain_alloc(struct irq_domain *domain, tegra_ictlr_domain_alloc() argument
251 struct tegra_ictlr_info *info = domain->host_data; tegra_ictlr_domain_alloc()
258 return -EINVAL; /* No PPI should point to this domain */ tegra_ictlr_domain_alloc()
267 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, tegra_ictlr_domain_alloc()
273 parent_fwspec.fwnode = domain->parent->fwnode; tegra_ictlr_domain_alloc()
274 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, tegra_ictlr_domain_alloc()
278 static void tegra_ictlr_domain_free(struct irq_domain *domain, tegra_ictlr_domain_free() argument
285 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); tegra_ictlr_domain_free()
299 struct irq_domain *parent_domain, *domain; tegra_ictlr_init() local
312 pr_err("%s: unable to obtain parent domain\n", node->full_name); tegra_ictlr_init()
354 domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, tegra_ictlr_init()
357 if (!domain) { tegra_ictlr_init()
358 pr_err("%s: failed to allocated domain\n", node->full_name); tegra_ictlr_init()
H A Dirq-brcmstb-l2.c47 struct irq_domain *domain; member in struct:brcmstb_l2_intc_data
55 struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); brcmstb_l2_intc_irq_handle()
77 generic_handle_irq(irq_find_mapping(b->domain, irq)); brcmstb_l2_intc_irq_handle()
151 data->domain = irq_domain_add_linear(np, 32, brcmstb_l2_intc_of_init()
153 if (!data->domain) { brcmstb_l2_intc_of_init()
166 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, brcmstb_l2_intc_of_init()
177 gc = irq_get_domain_generic_chip(data->domain, 0); brcmstb_l2_intc_of_init()
208 irq_domain_remove(data->domain); brcmstb_l2_intc_of_init()
H A Dirq-renesas-h8s.c84 struct irq_domain *domain; h8s_intc_of_init() local
95 domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL); h8s_intc_of_init()
96 BUG_ON(!domain); h8s_intc_of_init()
97 irq_set_default_host(domain); h8s_intc_of_init()
H A Dirq-gic-v2m.c127 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, gicv2m_irq_gic_domain_alloc() argument
135 if (is_of_node(domain->parent->fwnode)) { gicv2m_irq_gic_domain_alloc()
136 fwspec.fwnode = domain->parent->fwnode; gicv2m_irq_gic_domain_alloc()
145 err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); gicv2m_irq_gic_domain_alloc()
150 d = irq_domain_get_irq_data(domain->parent, virq); gicv2m_irq_gic_domain_alloc()
170 static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, gicv2m_irq_domain_alloc() argument
192 err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); gicv2m_irq_domain_alloc()
198 irq_domain_set_hwirq_and_chip(domain, virq, hwirq, gicv2m_irq_domain_alloc()
204 static void gicv2m_irq_domain_free(struct irq_domain *domain, gicv2m_irq_domain_free() argument
207 struct irq_data *d = irq_domain_get_irq_data(domain, virq); gicv2m_irq_domain_free()
212 irq_domain_free_irqs_parent(domain, virq, nr_irqs); gicv2m_irq_domain_free()
274 pr_err("Failed to create GICv2m domain\n"); gicv2m_allocate_domains()
H A Dirq-versatile-fpga.c36 * @domain: IRQ domain for this instance
44 struct irq_domain *domain; member in struct:fpga_irq_data
82 generic_handle_irq(irq_find_mapping(f->domain, irq)); fpga_irq_handle()
99 handle_domain_irq(f->domain, irq, regs); handle_one_fpga()
164 f->domain = irq_domain_add_simple(node, fls(valid), irq_start, fpga_irq_init()
171 irq_create_mapping(f->domain, i); fpga_irq_init()
H A Dirq-clps711x.c72 struct irq_domain *domain; member in struct:__anon5480
84 handle_domain_irq(clps711x_intc->domain, clps711x_irqh()
90 handle_domain_irq(clps711x_intc->domain, clps711x_irqh()
190 clps711x_intc->domain = _clps711x_intc_init()
193 if (!clps711x_intc->domain) { _clps711x_intc_init()
198 irq_set_default_host(clps711x_intc->domain); _clps711x_intc_init()
H A Dirq-atmel-aic.c141 static void __init aic_hw_init(struct irq_domain *domain) aic_hw_init() argument
143 struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); aic_hw_init()
245 struct irq_domain *domain; aic_of_init() local
250 domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic", aic_of_init()
252 if (IS_ERR(domain)) aic_of_init()
253 return PTR_ERR(domain); aic_of_init()
257 aic_domain = domain; aic_of_init()
258 gc = irq_get_domain_generic_chip(domain, 0); aic_of_init()
271 aic_hw_init(domain); aic_of_init()
H A Dirq-nvic.c62 static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, nvic_irq_domain_alloc() argument
70 ret = nvic_irq_domain_translate(domain, fwspec, &hwirq, &type); nvic_irq_domain_alloc()
75 irq_map_generic_chip(domain, virq + i, hwirq + i); nvic_irq_domain_alloc()
110 pr_warn("Failed to allocate irq domain\n"); nvic_of_init()
H A Dirq-imgpdc.c70 * @domain: IRQ domain for PDC peripheral and syswake IRQs.
81 struct irq_domain *domain; member in struct:pdc_intc_priv
121 return (struct pdc_intc_priv *)data->domain->host_data; irqd_to_priv()
239 irq_no = irq_linear_revmap(priv->domain, i); pdc_intc_perip_isr()
260 irq_no = irq_linear_revmap(priv->domain, pdc_intc_syswake_isr()
384 /* Set up an IRQ domain */ pdc_intc_probe()
385 priv->domain = irq_domain_add_linear(node, 16, &irq_generic_chip_ops, pdc_intc_probe()
387 if (unlikely(!priv->domain)) { pdc_intc_probe()
388 dev_err(&pdev->dev, "cannot add IRQ domain\n"); pdc_intc_probe()
397 ret = irq_alloc_domain_generic_chips(priv->domain, 8, 2, "pdc", pdc_intc_probe()
405 gc = irq_get_domain_generic_chip(priv->domain, 0); pdc_intc_probe()
419 gc = irq_get_domain_generic_chip(priv->domain, 8); pdc_intc_probe()
470 irq_domain_remove(priv->domain); pdc_intc_probe()
478 irq_domain_remove(priv->domain); pdc_intc_remove()
H A Dirq-metag.c25 * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA)
29 struct irq_domain *domain; member in struct:metag_internal_irq_priv
239 irq_no = irq_linear_revmap(priv->domain, hw); metag_internal_irq_demux()
270 if (!priv->domain) internal_irq_map()
272 return irq_create_mapping(priv->domain, hw); internal_irq_map()
295 * @d: irq domain of internal trigger block
328 /* Set up an IRQ domain */ init_internal_IRQ()
329 priv->domain = irq_domain_add_linear(NULL, 32, init_internal_IRQ()
332 if (unlikely(!priv->domain)) { init_internal_IRQ()
333 pr_err("meta-internal-intc: cannot add IRQ domain\n"); init_internal_IRQ()
H A Dirq-bcm7120-l2.c51 struct irq_domain *domain; member in struct:bcm7120_l2_intc_data
71 irq_get_domain_generic_chip(b->domain, base); bcm7120_l2_intc_irq_handle()
82 generic_handle_irq(irq_find_mapping(b->domain, bcm7120_l2_intc_irq_handle()
266 data->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * data->n_words, bcm7120_l2_intc_probe()
268 if (!data->domain) { bcm7120_l2_intc_probe()
280 ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, bcm7120_l2_intc_probe()
292 gc = irq_get_domain_generic_chip(data->domain, irq); bcm7120_l2_intc_probe()
330 irq_domain_remove(data->domain); bcm7120_l2_intc_probe()
H A Dirq-vt8500.c76 struct irq_domain *domain; /* Domain for this controller */ member in struct:vt8500_irq_data
85 struct vt8500_irq_data *priv = d->domain->host_data; vt8500_irq_mask()
106 struct vt8500_irq_data *priv = d->domain->host_data; vt8500_irq_unmask()
117 struct vt8500_irq_data *priv = d->domain->host_data; vt8500_irq_set_type()
199 handle_domain_irq(intc[i].domain, irqnr, regs); vt8500_handle_irq()
216 intc[active_cnt].domain = irq_domain_add_linear(node, 64, vt8500_irq_init()
224 if (!intc[active_cnt].domain) { vt8500_irq_init()
225 pr_err("%s: Unable to add irq domain!\n", __func__); vt8500_irq_init()
H A Dirq-i8259.c334 struct irq_domain *domain; __init_i8259_irqs() local
341 domain = irq_domain_add_legacy(node, 16, I8259A_IRQ_BASE, 0, __init_i8259_irqs()
343 if (!domain) __init_i8259_irqs()
344 panic("Failed to add i8259 IRQ domain"); __init_i8259_irqs()
347 return domain; __init_i8259_irqs()
357 struct irq_domain *domain = irq_desc_get_handler_data(desc); i8259_irq_dispatch() local
364 irq = irq_linear_revmap(domain, hwirq); i8259_irq_dispatch()
370 struct irq_domain *domain; i8259_of_init() local
379 domain = __init_i8259_irqs(node); i8259_of_init()
381 domain); i8259_of_init()
H A Dirq-bcm2835.c93 struct irq_domain *domain; member in struct:armctrl_ic
154 intc.domain = irq_domain_add_linear(node, MAKE_HWIRQ(NR_BANKS, 0), armctrl_of_init()
156 if (!intc.domain) armctrl_of_init()
157 panic("%s: unable to create IRQ domain\n", node->full_name); armctrl_of_init()
165 irq = irq_create_mapping(intc.domain, MAKE_HWIRQ(b, i)); armctrl_of_init()
245 handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs); bcm2835_handle_irq()
253 generic_handle_irq(irq_linear_revmap(intc.domain, hwirq)); bcm2836_chained_handle_irq()
H A Dirq-ingenic.c93 struct irq_domain *domain; ingenic_intc_of_init() local
146 domain = irq_domain_add_legacy(node, num_chips * 32, JZ4740_IRQ_BASE, 0, ingenic_intc_of_init()
148 if (!domain) ingenic_intc_of_init()
149 pr_warn("unable to register IRQ domain\n"); ingenic_intc_of_init()
/linux-4.4.14/drivers/dca/
H A Ddca-core.c60 struct dca_domain *domain; dca_allocate_domain() local
62 domain = kzalloc(sizeof(*domain), GFP_NOWAIT); dca_allocate_domain()
63 if (!domain) dca_allocate_domain()
66 INIT_LIST_HEAD(&domain->dca_providers); dca_allocate_domain()
67 domain->pci_rc = rc; dca_allocate_domain()
69 return domain; dca_allocate_domain()
72 static void dca_free_domain(struct dca_domain *domain) dca_free_domain() argument
74 list_del(&domain->node); dca_free_domain()
75 kfree(domain); dca_free_domain()
97 struct dca_domain *domain; unregister_dca_providers() local
112 /* at this point only one domain in the list is expected */ unregister_dca_providers()
113 domain = list_first_entry(&dca_domains, struct dca_domain, node); unregister_dca_providers()
115 list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) unregister_dca_providers()
118 dca_free_domain(domain); unregister_dca_providers()
130 struct dca_domain *domain; dca_find_domain() local
132 list_for_each_entry(domain, &dca_domains, node) dca_find_domain()
133 if (domain->pci_rc == rc) dca_find_domain()
134 return domain; dca_find_domain()
142 struct dca_domain *domain; dca_get_domain() local
145 domain = dca_find_domain(rc); dca_get_domain()
147 if (!domain) { dca_get_domain()
152 return domain; dca_get_domain()
159 struct dca_domain *domain; dca_find_provider_by_dev() local
163 domain = dca_find_domain(rc); dca_find_provider_by_dev()
164 if (!domain) dca_find_provider_by_dev()
168 domain = list_first_entry(&dca_domains, dca_find_provider_by_dev()
175 list_for_each_entry(dca, &domain->dca_providers, node) dca_find_provider_by_dev()
192 struct dca_domain *domain; dca_add_requester() local
207 domain = dca_find_domain(pci_rc); dca_add_requester()
208 if (!domain) { dca_add_requester()
213 list_for_each_entry(dca, &domain->dca_providers, node) { dca_add_requester()
359 struct dca_domain *domain, *newdomain = NULL; register_dca_provider() local
373 domain = dca_get_domain(dev); register_dca_provider()
374 if (!domain) { register_dca_provider()
391 domain = dca_get_domain(dev); register_dca_provider()
392 if (!domain) { register_dca_provider()
393 domain = newdomain; register_dca_provider()
395 list_add(&domain->node, &dca_domains); register_dca_provider()
398 list_add(&dca->node, &domain->dca_providers); register_dca_provider()
416 struct dca_domain *domain; unregister_dca_provider() local
431 domain = dca_find_domain(pci_rc); unregister_dca_provider()
432 if (list_empty(&domain->dca_providers)) unregister_dca_provider()
433 dca_free_domain(domain); unregister_dca_provider()
/linux-4.4.14/net/netlabel/
H A Dnetlabel_domainhash.c4 * This file manages the domain hash table that NetLabel uses to determine
5 * which network labeling protocol to use for a given domain. The NetLabel
66 * netlbl_domhsh_free_entry - Frees a domain hash table entry
100 kfree(ptr->domain); netlbl_domhsh_free_entry()
105 * netlbl_domhsh_hash - Hashing function for the domain hash table
106 * @domain: the domain name to hash
109 * This is the hashing function for the domain hash table, it returns the
110 * correct bucket number for the domain. The caller is responsible for
130 * netlbl_domhsh_search - Search for a domain entry
131 * @domain: the domain
134 * Searches the domain hash table and returns a pointer to the hash table
140 static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain) netlbl_domhsh_search() argument
146 if (domain != NULL) { netlbl_domhsh_search()
147 bkt = netlbl_domhsh_hash(domain); netlbl_domhsh_search()
150 if (iter->valid && strcmp(iter->domain, domain) == 0) netlbl_domhsh_search()
158 * netlbl_domhsh_search_def - Search for a domain entry
159 * @domain: the domain
163 * Searches the domain hash table and returns a pointer to the hash table
170 static struct netlbl_dom_map *netlbl_domhsh_search_def(const char *domain) netlbl_domhsh_search_def() argument
174 entry = netlbl_domhsh_search(domain); netlbl_domhsh_search_def()
211 entry->domain ? entry->domain : "(default)"); netlbl_domhsh_audit_add()
248 * netlbl_domhsh_validate - Validate a new domain mapping entry
251 * This function validates the new domain mapping entry to ensure that it is
316 * netlbl_domhsh_init - Init for the domain hash
320 * Initializes the domain hash table, should be called only by
355 * netlbl_domhsh_add - Adds a entry to the domain hash table
360 * Adds a new entry to the domain hash table and handles any updates to the
387 if (entry->domain != NULL) netlbl_domhsh_add()
388 entry_old = netlbl_domhsh_search(entry->domain); netlbl_domhsh_add()
390 entry_old = netlbl_domhsh_search_def(entry->domain); netlbl_domhsh_add()
394 if (entry->domain != NULL) { netlbl_domhsh_add()
395 u32 bkt = netlbl_domhsh_hash(entry->domain); netlbl_domhsh_add()
426 * the selectors do not exist in the existing domain map */ netlbl_domhsh_add()
476 * netlbl_domhsh_add_default - Adds the default entry to the domain hash table
481 * Adds a new default entry to the domain hash table and handles any updates
493 * netlbl_domhsh_remove_entry - Removes a given entry from the domain table
498 * Removes an entry from the domain hash table and handles any updates to the
528 entry->domain ? entry->domain : "(default)", netlbl_domhsh_remove_entry()
559 * @domain: the domain
565 * Removes an individual address selector from a domain mapping and potentially
570 int netlbl_domhsh_remove_af4(const char *domain, netlbl_domhsh_remove_af4() argument
585 if (domain) netlbl_domhsh_remove_af4()
586 entry_map = netlbl_domhsh_search(domain); netlbl_domhsh_remove_af4()
588 entry_map = netlbl_domhsh_search_def(domain); netlbl_domhsh_remove_af4()
606 /* the domain mapping is empty so remove it from the mapping table */ netlbl_domhsh_remove_af4()
626 * netlbl_domhsh_remove - Removes an entry from the domain hash table
627 * @domain: the domain to remove
631 * Removes an entry from the domain hash table and handles any updates to the
636 int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info) netlbl_domhsh_remove() argument
642 if (domain) netlbl_domhsh_remove()
643 entry = netlbl_domhsh_search(domain); netlbl_domhsh_remove()
645 entry = netlbl_domhsh_search_def(domain); netlbl_domhsh_remove()
657 * Removes/resets the default entry for the domain hash table and handles any
668 * netlbl_domhsh_getentry - Get an entry from the domain hash table
669 * @domain: the domain name to search for
672 * Look through the domain hash table searching for an entry to match @domain,
677 struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain) netlbl_domhsh_getentry() argument
679 return netlbl_domhsh_search_def(domain); netlbl_domhsh_getentry()
683 * netlbl_domhsh_getentry_af4 - Get an entry from the domain hash table
684 * @domain: the domain name to search for
688 * Look through the domain hash table searching for an entry to match @domain
693 struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain, netlbl_domhsh_getentry_af4() argument
699 dom_iter = netlbl_domhsh_search_def(domain); netlbl_domhsh_getentry_af4()
713 * netlbl_domhsh_getentry_af6 - Get an entry from the domain hash table
714 * @domain: the domain name to search for
718 * Look through the domain hash table searching for an entry to match @domain
723 struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain, netlbl_domhsh_getentry_af6() argument
729 dom_iter = netlbl_domhsh_search_def(domain); netlbl_domhsh_getentry_af6()
743 * netlbl_domhsh_walk - Iterate through the domain mapping hash table
750 * Interate over the domain mapping hash table, skipping the first @skip_bkt
H A Dnetlabel_domainhash.h4 * This file manages the domain hash table that NetLabel uses to determine
5 * which network labeling protocol to use for a given domain. The NetLabel
72 char *domain; member in struct:netlbl_dom_map
83 /* Manipulate the domain hash table */
90 int netlbl_domhsh_remove_af4(const char *domain,
94 int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
96 struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
97 struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
100 struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
H A Dnetlabel_kapi.c55 * netlbl_cfg_map_del - Remove a NetLabel/LSM domain mapping
56 * @domain: the domain mapping to remove
63 * Removes a NetLabel/LSM domain mapping. A @domain value of NULL causes the
64 * default domain mapping to be removed. Returns zero on success, negative
68 int netlbl_cfg_map_del(const char *domain, netlbl_cfg_map_del() argument
75 return netlbl_domhsh_remove(domain, audit_info); netlbl_cfg_map_del()
79 return netlbl_domhsh_remove_af4(domain, addr, mask, netlbl_cfg_map_del()
90 * @domain: the domain mapping to add
97 * Adds a new unlabeled NetLabel/LSM domain mapping. A @domain value of NULL
98 * causes a new default domain mapping to be added. Returns zero on success,
102 int netlbl_cfg_unlbl_map_add(const char *domain, netlbl_cfg_unlbl_map_add() argument
117 if (domain != NULL) { netlbl_cfg_unlbl_map_add()
118 entry->domain = kstrdup(domain, GFP_ATOMIC); netlbl_cfg_unlbl_map_add()
119 if (entry->domain == NULL) netlbl_cfg_unlbl_map_add()
189 kfree(entry->domain); netlbl_cfg_unlbl_map_add()
318 * @domain: the domain mapping to add
324 * Add a new NetLabel/LSM domain mapping for the given CIPSO DOI to the NetLabel
325 * subsystem. A @domain value of NULL adds a new default domain mapping.
330 const char *domain, netlbl_cfg_cipsov4_map_add()
348 if (domain != NULL) { netlbl_cfg_cipsov4_map_add()
349 entry->domain = kstrdup(domain, GFP_ATOMIC); netlbl_cfg_cipsov4_map_add()
350 if (entry->domain == NULL) netlbl_cfg_cipsov4_map_add()
394 kfree(entry->domain); netlbl_cfg_cipsov4_map_add()
764 * Returns zero on success, -EDESTADDRREQ if the domain is configured to use
777 dom_entry = netlbl_domhsh_getentry(secattr->domain); netlbl_sock_setattr()
887 entry = netlbl_domhsh_getentry_af4(secattr->domain, netlbl_conn_setattr()
943 entry = netlbl_domhsh_getentry_af4(secattr->domain, netlbl_req_setattr()
1016 entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr); netlbl_skbuff_setattr()
1181 printk(KERN_INFO "NetLabel: domain hash size = %u\n", netlbl_init()
329 netlbl_cfg_cipsov4_map_add(u32 doi, const char *domain, const struct in_addr *addr, const struct in_addr *mask, struct netlbl_audit *audit_info) netlbl_cfg_cipsov4_map_add() argument
/linux-4.4.14/kernel/
H A Dasync.c78 struct async_domain *domain; member in struct:async_entry
85 static async_cookie_t lowest_in_progress(struct async_domain *domain) lowest_in_progress() argument
93 if (domain) lowest_in_progress()
94 pending = &domain->pending; lowest_in_progress()
148 static async_cookie_t __async_schedule(async_func_t func, void *data, struct async_domain *domain) __async_schedule() argument
176 entry->domain = domain; __async_schedule()
183 list_add_tail(&entry->domain_list, &domain->pending); __async_schedule()
184 if (domain->registered) __async_schedule()
214 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
217 * @domain: the domain
220 * @domain may be used in the async_synchronize_*_domain() functions to
221 * wait within a certain synchronization domain rather than globally. A
222 * synchronization domain is specified via @domain. Note: This function
226 struct async_domain *domain) async_schedule_domain()
228 return __async_schedule(func, data, domain); async_schedule_domain()
244 * async_unregister_domain - ensure no more anonymous waiters on this domain
245 * @domain: idle domain to flush out of any async_synchronize_full instances
248 * of these routines should know the lifetime of @domain
252 void async_unregister_domain(struct async_domain *domain) async_unregister_domain() argument
255 WARN_ON(!domain->registered || !list_empty(&domain->pending)); async_unregister_domain()
256 domain->registered = 0; async_unregister_domain()
262 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
263 * @domain: the domain to synchronize
266 * synchronization domain specified by @domain have been done.
268 void async_synchronize_full_domain(struct async_domain *domain) async_synchronize_full_domain() argument
270 async_synchronize_cookie_domain(ASYNC_COOKIE_MAX, domain); async_synchronize_full_domain()
275 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
277 * @domain: the domain to synchronize (%NULL for all registered domains)
280 * synchronization domain specified by @domain submitted prior to @cookie
283 void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *domain) async_synchronize_cookie_domain() argument
292 wait_event(async_done, lowest_in_progress(domain) >= cookie); async_synchronize_cookie_domain()
225 async_schedule_domain(async_func_t func, void *data, struct async_domain *domain) async_schedule_domain() argument
H A Dcpu_pm.c87 * cause some blocks in the same power domain as the cpu to reset.
121 * have caused some blocks in the same power domain as the cpu to reset.
144 * Notifies listeners that all cpus in a power domain are entering a low power
145 * state that may cause some blocks in the same power domain to reset.
148 * domain, and before cpu_pm_exit has been called on any cpu in the power
149 * domain. Notified drivers can include VFP co-processor, interrupt controller
179 * Notifies listeners that all cpus in a power domain are exiting form a
180 * low power state that may have caused some blocks in the same power domain
184 * domain, and before cpu_pm_exit has been called on any cpu in the power
185 * domain. Notified drivers can include VFP co-processor, interrupt controller
/linux-4.4.14/drivers/soc/dove/
H A Dpmu.c130 * This deals with the "old" Marvell sequence of bringing a power domain
142 static int pmu_domain_power_off(struct generic_pm_domain *domain) pmu_domain_power_off() argument
144 struct pmu_domain *pmu_dom = to_pmu_domain(domain); pmu_domain_power_off()
176 static int pmu_domain_power_on(struct generic_pm_domain *domain) pmu_domain_power_on() argument
178 struct pmu_domain *pmu_dom = to_pmu_domain(domain); pmu_domain_power_on()
210 static void __pmu_domain_register(struct pmu_domain *domain, __pmu_domain_register() argument
213 unsigned int val = readl_relaxed(domain->pmu->pmu_base + PMU_PWR); __pmu_domain_register()
215 domain->base.power_off = pmu_domain_power_off; __pmu_domain_register()
216 domain->base.power_on = pmu_domain_power_on; __pmu_domain_register()
218 pm_genpd_init(&domain->base, NULL, !(val & domain->pwr_mask)); __pmu_domain_register()
221 of_genpd_add_provider_simple(np, &domain->base); __pmu_domain_register()
229 struct irq_domain *domain = pmu->irq_domain; pmu_irq_handler() local
245 generic_handle_irq(irq_find_mapping(domain, hwirq)); pmu_irq_handler()
269 struct irq_domain *domain; dove_init_pmu_irq() local
276 domain = irq_domain_add_linear(pmu->of_node, NR_PMU_IRQS, dove_init_pmu_irq()
278 if (!domain) { dove_init_pmu_irq()
279 pr_err("%s: unable to add irq domain\n", name); dove_init_pmu_irq()
283 ret = irq_alloc_domain_generic_chips(domain, NR_PMU_IRQS, 1, name, dove_init_pmu_irq()
288 pr_err("%s: unable to alloc irq domain gc: %d\n", name, ret); dove_init_pmu_irq()
289 irq_domain_remove(domain); dove_init_pmu_irq()
293 gc = irq_get_domain_generic_chip(domain, 0); dove_init_pmu_irq()
299 pmu->irq_domain = domain; dove_init_pmu_irq()
315 * vpu_domain: vpu-domain {
316 * #power-domain-cells = <0>;
321 * gpu_domain: gpu-domain {
322 * #power-domain-cells = <0>;
366 struct pmu_domain *domain; for_each_available_child_of_node() local
368 domain = kzalloc(sizeof(*domain), GFP_KERNEL); for_each_available_child_of_node()
369 if (!domain) for_each_available_child_of_node()
372 domain->pmu = pmu; for_each_available_child_of_node()
373 domain->base.name = kstrdup(np->name, GFP_KERNEL); for_each_available_child_of_node()
374 if (!domain->base.name) { for_each_available_child_of_node()
375 kfree(domain); for_each_available_child_of_node()
380 &domain->pwr_mask); for_each_available_child_of_node()
382 &domain->iso_mask); for_each_available_child_of_node()
393 domain->rst_mask = BIT(args.args[0]); for_each_available_child_of_node()
397 __pmu_domain_register(domain, np); for_each_available_child_of_node()
/linux-4.4.14/drivers/base/
H A Dmap.c32 int kobj_map(struct kobj_map *domain, dev_t dev, unsigned long range, kobj_map() argument
56 mutex_lock(domain->lock); kobj_map()
58 struct probe **s = &domain->probes[index % 255]; kobj_map()
64 mutex_unlock(domain->lock); kobj_map()
68 void kobj_unmap(struct kobj_map *domain, dev_t dev, unsigned long range) kobj_unmap() argument
78 mutex_lock(domain->lock); kobj_unmap()
81 for (s = &domain->probes[index % 255]; *s; s = &(*s)->next) { kobj_unmap()
91 mutex_unlock(domain->lock); kobj_unmap()
95 struct kobject *kobj_lookup(struct kobj_map *domain, dev_t dev, int *index) kobj_lookup() argument
102 mutex_lock(domain->lock); kobj_lookup()
103 for (p = domain->probes[MAJOR(dev) % 255]; p; p = p->next) { kobj_lookup()
123 mutex_unlock(domain->lock); kobj_lookup()
131 mutex_unlock(domain->lock); kobj_lookup()
H A Dplatform-msi.c61 static int platform_msi_init(struct irq_domain *domain, platform_msi_init() argument
66 return irq_domain_set_hwirq_and_chip(domain, virq, hwirq, platform_msi_init()
154 * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
156 * @info: MSI domain info
157 * @parent: Parent irq domain
159 * Updates the domain and chip ops and creates a platform MSI
160 * interrupt domain.
163 * A domain pointer or NULL in case of failure.
169 struct irq_domain *domain; platform_msi_create_irq_domain() local
176 domain = msi_create_irq_domain(fwnode, info, parent); platform_msi_create_irq_domain()
177 if (domain) platform_msi_create_irq_domain()
178 domain->bus_token = DOMAIN_BUS_PLATFORM_MSI; platform_msi_create_irq_domain()
180 return domain; platform_msi_create_irq_domain()
/linux-4.4.14/arch/arm/mach-davinci/
H A Dpsc.c30 /* Return nonzero iff the domain's clock is active */ davinci_psc_is_clk_active()
51 /* Control "reset" line associated with PSC domain */ davinci_psc_reset()
76 /* Enable or disable a PSC domain */ davinci_psc_config()
77 void davinci_psc_config(unsigned int domain, unsigned int ctlr, davinci_psc_config() argument
107 pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain); davinci_psc_config()
109 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); davinci_psc_config()
111 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); davinci_psc_config()
113 ptcmd = 1 << domain; davinci_psc_config()
118 } while ((((epcpr >> domain) & 1) == 0)); davinci_psc_config()
120 pdctl = __raw_readl(psc_base + PDCTL + 4 * domain); davinci_psc_config()
122 __raw_writel(pdctl, psc_base + PDCTL + 4 * domain); davinci_psc_config()
124 ptcmd = 1 << domain; davinci_psc_config()
130 } while (!(((ptstat >> domain) & 1) == 0)); davinci_psc_config()
/linux-4.4.14/include/linux/
H A Dasync.h26 * domain participates in global async_synchronize_full
33 * domain is free to go out of scope as soon as all pending work is
34 * complete, this domain does not participate in async_synchronize_full
42 struct async_domain *domain);
43 void async_unregister_domain(struct async_domain *domain);
45 extern void async_synchronize_full_domain(struct async_domain *domain);
48 struct async_domain *domain);
H A Diommu.h61 * This are the possible domain-types
136 * @domain_init: init iommu domain
137 * @domain_destroy: destroy iommu domain
138 * @attach_dev: attach device to an iommu domain
139 * @detach_dev: detach device from an iommu domain
140 * @map: map a physically contiguous memory region to an iommu domain
141 * @unmap: unmap a physically contiguous memory region from an iommu domain
143 * to an iommu domain
147 * @domain_get_attr: Query domain attributes
148 * @domain_set_attr: Change domain attributes
160 int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
161 void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
162 int (*map)(struct iommu_domain *domain, unsigned long iova,
164 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
166 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
168 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
172 int (*domain_get_attr)(struct iommu_domain *domain,
174 int (*domain_set_attr)(struct iommu_domain *domain,
182 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
184 void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
185 /* Set the numer of window per domain */
186 int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
187 /* Get the numer of window per domain */
188 u32 (*domain_get_windows)(struct iommu_domain *domain);
210 extern void iommu_domain_free(struct iommu_domain *domain);
211 extern int iommu_attach_device(struct iommu_domain *domain,
213 extern void iommu_detach_device(struct iommu_domain *domain,
216 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
218 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
220 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
223 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
224 extern void iommu_set_fault_handler(struct iommu_domain *domain,
231 extern int iommu_attach_group(struct iommu_domain *domain,
233 extern void iommu_detach_group(struct iommu_domain *domain,
256 extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr,
258 extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr,
268 extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
271 extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
274 * @domain: the iommu domain where the fault has happened
296 static inline int report_iommu_fault(struct iommu_domain *domain, report_iommu_fault() argument
305 if (domain->handler) report_iommu_fault()
306 ret = domain->handler(domain, dev, iova, flags, report_iommu_fault()
307 domain->handler_token); report_iommu_fault()
313 static inline size_t iommu_map_sg(struct iommu_domain *domain, iommu_map_sg() argument
317 return domain->ops->map_sg(domain, iova, sg, nents, prot); iommu_map_sg()
350 static inline void iommu_domain_free(struct iommu_domain *domain) iommu_domain_free() argument
354 static inline int iommu_attach_device(struct iommu_domain *domain, iommu_attach_device() argument
360 static inline void iommu_detach_device(struct iommu_domain *domain, iommu_detach_device() argument
370 static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, iommu_map() argument
376 static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, iommu_unmap() argument
382 static inline size_t iommu_map_sg(struct iommu_domain *domain, iommu_map_sg() argument
389 static inline int iommu_domain_window_enable(struct iommu_domain *domain, iommu_domain_window_enable() argument
396 static inline void iommu_domain_window_disable(struct iommu_domain *domain, iommu_domain_window_disable() argument
401 static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) iommu_iova_to_phys() argument
406 static inline void iommu_set_fault_handler(struct iommu_domain *domain, iommu_set_fault_handler() argument
426 static inline int iommu_attach_group(struct iommu_domain *domain, iommu_attach_group() argument
432 static inline void iommu_detach_group(struct iommu_domain *domain, iommu_detach_group() argument
502 static inline int iommu_domain_get_attr(struct iommu_domain *domain, iommu_domain_get_attr() argument
508 static inline int iommu_domain_set_attr(struct iommu_domain *domain, iommu_domain_set_attr() argument
H A Dirqdomain.h13 * Interrupt controller "domain" data structure. This could be defined as a
14 * irq domain controller. That is, it handles the mapping between hardware
15 * and virtual interrupt numbers for a given interrupt domain. The domain
17 * (though a domain can cover more than one PIC if they have a flat number
18 * model). It's the domain callbacks that are responsible for setting the
22 * identify the domain. In some cases, and in order to preserve source
66 * different purposes (for example one domain is for PCI/MSI, and the
122 * @name: Name of interrupt domain
166 /* Irq domain flags */
168 /* Irq domain is hierarchical */
171 /* Core calls alloc/free recursive through the domain hierarchy. */
228 * @size: Number of interrupts in the domain.
229 * @ops: map/unmap domain callbacks
278 extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
280 extern void irq_domain_associate_many(struct irq_domain *domain,
283 extern void irq_domain_disassociate(struct irq_domain *domain,
293 * @domain: domain owning this hardware interrupt
294 * @hwirq: hardware irq number in that domain space
301 static inline unsigned int irq_linear_revmap(struct irq_domain *domain, irq_linear_revmap() argument
304 return hwirq < domain->revmap_size ? domain->linear_revmap[hwirq] : 0; irq_linear_revmap()
309 extern int irq_create_strict_mappings(struct irq_domain *domain,
333 extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
335 extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
357 extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
364 static inline int irq_domain_alloc_irqs(struct irq_domain *domain, irq_domain_alloc_irqs() argument
367 return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); irq_domain_alloc_irqs()
370 extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
376 extern void irq_domain_free_irqs_common(struct irq_domain *domain,
379 extern void irq_domain_free_irqs_top(struct irq_domain *domain,
382 extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
386 extern void irq_domain_free_irqs_parent(struct irq_domain *domain,
390 static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) irq_domain_is_hierarchy() argument
392 return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; irq_domain_is_hierarchy()
397 static inline int irq_domain_alloc_irqs(struct irq_domain *domain, irq_domain_alloc_irqs() argument
403 static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) irq_domain_is_hierarchy() argument
H A Dcpu_pm.h26 * power domain, the contents of some blocks (floating point coprocessors,
27 * interrupt controllers, caches, timers) in the same power domain can
40 * CPU cluster notifications apply to all CPUs in a single power domain. They
42 * after all the CPUs in the power domain have been notified of the low power
59 /* A cpu power domain is entering a low power state */
62 /* A cpu power domain failed to enter a low power state */
65 /* A cpu power domain is exiting a low power state */
H A Dpm_domain.h20 #define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
23 GPD_STATE_ACTIVE = 0, /* PM domain is active */
24 GPD_STATE_POWER_OFF, /* PM domain is off */
28 bool (*power_down_ok)(struct dev_pm_domain *domain);
41 struct dev_pm_domain domain; /* PM domain operations */ member in struct:generic_pm_domain
43 struct list_head master_links; /* Links with PM domain as a master */
44 struct list_head slave_links; /* Links with PM domain as a slave */
51 enum gpd_status status; /* Current state of the domain */
56 int (*power_off)(struct generic_pm_domain *domain);
58 int (*power_on)(struct generic_pm_domain *domain);
64 int (*attach_dev)(struct generic_pm_domain *domain,
66 void (*detach_dev)(struct generic_pm_domain *domain,
73 return container_of(pd, struct generic_pm_domain, domain); pd_to_genpd()
180 /* OF PM domain providers */ pm_genpd_syscore_poweron()
H A Dmsi.h183 * struct msi_domain_ops - MSI interrupt domain callbacks
187 * @msi_check: Callback for verification of the domain/info/dev data
188 * @msi_prepare: Prepare the allocation of the interrupts in the domain
203 int (*msi_init)(struct irq_domain *domain,
207 void (*msi_free)(struct irq_domain *domain,
210 int (*msi_check)(struct irq_domain *domain,
213 int (*msi_prepare)(struct irq_domain *domain,
219 int (*handle_error)(struct irq_domain *domain,
224 * struct msi_domain_info - MSI interrupt domain data
232 * @data: Optional: domain specific data
248 * Init non implemented ops callbacks with default MSI domain
271 int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
273 void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
274 struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
289 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev,
291 void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev);
297 int pci_msi_domain_check_cap(struct irq_domain *domain,
299 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
H A Diova.h26 /* holds all the iova translations for a domain */
29 struct rb_root rbroot; /* iova domain rbtree root */
31 unsigned long granule; /* pfn granularity for this domain */
32 unsigned long start_pfn; /* Lower limit for this domain */
H A Ddma-iommu.h28 int iommu_get_dma_cookie(struct iommu_domain *domain);
29 void iommu_put_dma_cookie(struct iommu_domain *domain);
32 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size);
74 static inline int iommu_get_dma_cookie(struct iommu_domain *domain) iommu_get_dma_cookie() argument
79 static inline void iommu_put_dma_cookie(struct iommu_domain *domain) iommu_put_dma_cookie() argument
H A Dscpi_protocol.h54 * @dvfs_get_idx: gets the Operating Point of the given power domain.
56 * @dvfs_set_idx: sets the Operating Point of the given power domain.
59 * domain. It includes the OPP list and the latency information
/linux-4.4.14/include/xen/interface/
H A Dsched.h45 * Halt execution of this domain (all VCPUs) and notify the system controller.
68 * Declare a shutdown for another domain. The main use of this function is
70 * domains. A para-virtualized domain may use SCHEDOP_shutdown directly.
75 domid_t domain_id; /* Remote domain ID */
80 * Latch a shutdown code, so that when the domain later shuts down it
87 * Setup, poke and destroy a domain watchdog timer.
89 * With id == 0, setup a domain watchdog timer to cause domain shutdown
91 * With id != 0 and timeout == 0, destroy domain watchdog timer.
112 * reset internal Xen state for the domain returning it to the point where it
113 * was created but leaving the domain's memory contents and vCPU contexts
114 * intact. This will allow the domain to start over and set up all Xen specific
H A Devent_channel.h18 * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
19 * accepting interdomain bindings from domain <remote_dom>. A fresh port
35 * the calling domain and <remote_dom>. <remote_dom,remote_port> must identify
37 * domain. A fresh port is allocated in the calling domain and returned as
71 * 1. A physical IRQ may be bound to at most one event channel per domain.
72 * 2. Only a sufficiently-privileged domain may bind to a physical IRQ.
123 * 2. Only a sufficiently-privileged domain may obtain the status of an event
134 #define EVTCHNSTAT_interdomain 2 /* Channel is connected to remote domain. */
181 * EVTCHNOP_reset: Close all event channels associated with specified domain.
184 * 2. Only a sufficiently-privileged domain may specify other than DOMID_SELF.
H A Dgrant_table.h61 * This cannot be done directly. Request assistance from the domain controller
88 * Reference to a grant entry in a specified domain's grant table.
106 /* The domain being granted foreign privileges. [GST] */
166 * Any given domain will have either a version 1 or a version 2 table,
170 * on the grant table version in use by the other domain.
219 * grant @gref in domain @trans_domid, as if it was the local
220 * domain. Obviously, the transitive access must be compatible
303 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
304 * 3. Xen may not support more than a single grant-table page per domain.
331 * GNTTABOP_transfer_grant_ref: Transfer <frame> to a foreign domain. The
332 * foreign domain has previously registered its interest in the transfer via
336 * to the calling domain *unless* the error is GNTST_bad_page.
352 * grant references. the foreign domain has to grant read/write access
395 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
433 * once in any given domain. It must be performed before any grants
434 * are activated; otherwise, the domain will be stuck with version 1.
454 * 2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
469 * effect for domain <dom>.
483 * page granted to the calling domain by a foreign domain.
540 #define GNTST_bad_domain (-2) /* Unrecognsed domain id. */
555 "unrecognised domain id", \
H A Dxenpmu.h84 * XENPMU_MODE_ALL mode, domain ID of another domain.
H A Dnmi.h35 * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
/linux-4.4.14/drivers/vfio/
H A Dvfio_iommu_type1.c65 struct iommu_domain *domain; member in struct:vfio_domain
339 struct vfio_domain *domain, *d; vfio_unmap_unpin() local
351 domain = d = list_first_entry(&iommu->domain_list, vfio_unmap_unpin()
355 iommu_unmap(d->domain, dma->iova, dma->size); vfio_unmap_unpin()
363 phys = iommu_iova_to_phys(domain->domain, iova); vfio_unmap_unpin()
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { vfio_unmap_unpin()
376 next = iommu_iova_to_phys(domain->domain, iova + len); vfio_unmap_unpin()
381 unmapped = iommu_unmap(domain->domain, iova, len); vfio_unmap_unpin()
405 struct vfio_domain *domain; vfio_pgsize_bitmap() local
409 list_for_each_entry(domain, &iommu->domain_list, next) vfio_pgsize_bitmap()
410 bitmap &= domain->domain->ops->pgsize_bitmap; vfio_pgsize_bitmap()
514 static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, map_try_harder() argument
521 ret = iommu_map(domain->domain, iova, map_try_harder()
523 PAGE_SIZE, prot | domain->prot); map_try_harder()
529 iommu_unmap(domain->domain, iova, PAGE_SIZE); map_try_harder()
541 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, vfio_iommu_map()
556 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); vfio_iommu_map()
655 struct vfio_domain *domain) vfio_iommu_replay()
661 /* Arbitrarily pick the first domain in the list for lookups */ vfio_iommu_replay()
665 /* If there's not a domain, there better not be any mappings */ vfio_iommu_replay()
677 phys_addr_t phys = iommu_iova_to_phys(d->domain, iova); vfio_iommu_replay()
688 phys + size == iommu_iova_to_phys(d->domain, vfio_iommu_replay()
692 ret = iommu_map(domain->domain, iova, phys, vfio_iommu_replay()
693 size, dma->prot | domain->prot); vfio_iommu_replay()
714 static void vfio_test_domain_fgsp(struct vfio_domain *domain) vfio_test_domain_fgsp() argument
723 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, vfio_test_domain_fgsp()
724 IOMMU_READ | IOMMU_WRITE | domain->prot); vfio_test_domain_fgsp()
726 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); vfio_test_domain_fgsp()
729 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); vfio_test_domain_fgsp()
731 domain->fgsp = true; vfio_test_domain_fgsp()
742 struct vfio_domain *domain, *d; vfio_iommu_type1_attach_group() local
759 domain = kzalloc(sizeof(*domain), GFP_KERNEL); vfio_iommu_type1_attach_group()
760 if (!group || !domain) { vfio_iommu_type1_attach_group()
767 /* Determine bus_type in order to allocate a domain */ vfio_iommu_type1_attach_group()
772 domain->domain = iommu_domain_alloc(bus); vfio_iommu_type1_attach_group()
773 if (!domain->domain) { vfio_iommu_type1_attach_group()
781 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, vfio_iommu_type1_attach_group()
787 ret = iommu_attach_group(domain->domain, iommu_group); vfio_iommu_type1_attach_group()
791 INIT_LIST_HEAD(&domain->group_list); vfio_iommu_type1_attach_group()
792 list_add(&group->next, &domain->group_list); vfio_iommu_type1_attach_group()
803 domain->prot |= IOMMU_CACHE; vfio_iommu_type1_attach_group()
806 * Try to match an existing compatible domain. We don't want to vfio_iommu_type1_attach_group()
808 * able to include different bus_types in the same IOMMU domain, so vfio_iommu_type1_attach_group()
813 if (d->domain->ops == domain->domain->ops && vfio_iommu_type1_attach_group()
814 d->prot == domain->prot) { vfio_iommu_type1_attach_group()
815 iommu_detach_group(domain->domain, iommu_group); vfio_iommu_type1_attach_group()
816 if (!iommu_attach_group(d->domain, iommu_group)) { vfio_iommu_type1_attach_group()
818 iommu_domain_free(domain->domain); vfio_iommu_type1_attach_group()
819 kfree(domain); vfio_iommu_type1_attach_group()
824 ret = iommu_attach_group(domain->domain, iommu_group); vfio_iommu_type1_attach_group()
830 vfio_test_domain_fgsp(domain); vfio_iommu_type1_attach_group()
833 ret = vfio_iommu_replay(iommu, domain); vfio_iommu_type1_attach_group()
837 list_add(&domain->next, &iommu->domain_list); vfio_iommu_type1_attach_group()
844 iommu_detach_group(domain->domain, iommu_group); vfio_iommu_type1_attach_group()
846 iommu_domain_free(domain->domain); vfio_iommu_type1_attach_group()
848 kfree(domain); vfio_iommu_type1_attach_group()
866 struct vfio_domain *domain; vfio_iommu_type1_detach_group() local
871 list_for_each_entry(domain, &iommu->domain_list, next) { vfio_iommu_type1_detach_group()
872 list_for_each_entry(group, &domain->group_list, next) { vfio_iommu_type1_detach_group()
876 iommu_detach_group(domain->domain, iommu_group); vfio_iommu_type1_detach_group()
881 * list is empty, the domain goes away. If it's the vfio_iommu_type1_detach_group()
882 * last domain, then all the mappings go away too. vfio_iommu_type1_detach_group()
884 if (list_empty(&domain->group_list)) { vfio_iommu_type1_detach_group()
887 iommu_domain_free(domain->domain); vfio_iommu_type1_detach_group()
888 list_del(&domain->next); vfio_iommu_type1_detach_group()
889 kfree(domain); vfio_iommu_type1_detach_group()
930 struct vfio_domain *domain, *domain_tmp; vfio_iommu_type1_release() local
935 list_for_each_entry_safe(domain, domain_tmp, vfio_iommu_type1_release()
938 &domain->group_list, next) { vfio_iommu_type1_release()
939 iommu_detach_group(domain->domain, group->iommu_group); vfio_iommu_type1_release()
943 iommu_domain_free(domain->domain); vfio_iommu_type1_release()
944 list_del(&domain->next); vfio_iommu_type1_release()
945 kfree(domain); vfio_iommu_type1_release()
953 struct vfio_domain *domain; vfio_domains_have_iommu_cache() local
957 list_for_each_entry(domain, &iommu->domain_list, next) { vfio_domains_have_iommu_cache()
958 if (!(domain->prot & IOMMU_CACHE)) { vfio_domains_have_iommu_cache()
654 vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) vfio_iommu_replay() argument
/linux-4.4.14/arch/arm/mach-exynos/
H A Dpm_domains.c2 * Exynos Generic power domain support.
7 * Implementation of Exynos specific power domain control which is used in
9 * based power domain support is included.
30 * Exynos specific wrapper around the generic power domain
43 static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) exynos_pd_power() argument
51 pd = container_of(domain, struct exynos_pm_domain, pd); exynos_pd_power()
60 /* Set oscclk before powering off a domain*/ exynos_pd_power()
81 pr_err("Power domain %s %s failed\n", domain->name, op); exynos_pd_power()
89 /* Restore clocks after powering on a domain*/ exynos_pd_power()
112 static int exynos_pd_power_on(struct generic_pm_domain *domain) exynos_pd_power_on() argument
114 return exynos_pd_power(domain, true); exynos_pd_power_on()
117 static int exynos_pd_power_off(struct generic_pm_domain *domain) exynos_pd_power_off() argument
119 return exynos_pd_power(domain, false); exynos_pd_power_off()
132 pr_err("%s: failed to allocate memory for domain\n", exynos4_pm_init_power_domain()
206 "#power-domain-cells", 0, &args) != 0) exynos4_pm_init_power_domain()
/linux-4.4.14/tools/power/cpupower/utils/helpers/
H A Dpci.c13 * domain: domain
25 struct pci_dev *pci_acc_init(struct pci_access **pacc, int domain, int bus, pci_acc_init() argument
36 filter_nb_link.domain = domain; pci_acc_init()
54 /* Typically one wants to get a specific slot(device)/func of the root domain
/linux-4.4.14/net/tipc/
H A Daddr.c67 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
84 * tipc_addr_domain_valid - validates a network domain address
89 * Returns 1 if domain address is valid, otherwise 0
116 int tipc_in_scope(u32 domain, u32 addr) tipc_in_scope() argument
118 if (!domain || (domain == addr)) tipc_in_scope()
120 if (domain == tipc_cluster_mask(addr)) /* domain <Z.C.0> */ tipc_in_scope()
122 if (domain == tipc_zone_mask(addr)) /* domain <Z.0.0> */ tipc_in_scope()
128 * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
130 int tipc_addr_scope(u32 domain) tipc_addr_scope() argument
132 if (likely(!domain)) tipc_addr_scope()
134 if (tipc_node(domain)) tipc_addr_scope()
136 if (tipc_cluster(domain)) tipc_addr_scope()
H A Ddiscover.c55 * @domain: network domain to which links can be established
66 u32 domain; member in struct:tipc_link_req
85 u32 dest_domain = b_ptr->domain; tipc_disc_init_msg()
159 if (!tipc_in_scope(bearer->domain, onode)) tipc_disc_rcv()
233 if (tipc_node(req->domain) && req->num_nodes) { disc_timeout()
266 * @dest_domain: network domain to which links can be established
289 req->domain = b_ptr->domain; tipc_disc_create()
317 * @dest_domain: network domain to which links can be established
328 req->domain = b_ptr->domain; tipc_disc_reset()
/linux-4.4.14/drivers/base/power/
H A Ddomain.c2 * drivers/base/power/domain.c - Common code related to device power domains.
41 * Get the generic PM domain for a particular struct device.
42 * This validates the struct device pointer, the PM domain pointer,
43 * and checks that the PM domain pointer is a real generic PM domain.
55 if (&gpd->domain == dev->pm_domain) { pm_genpd_lookup_dev()
67 * attached to the device is a genpd domain.
176 * __genpd_poweron - Restore power to a given PM domain and its masters.
177 * @genpd: PM domain to power up.
225 * genpd_poweron - Restore power to a given PM domain and its masters.
226 * @genpd: PM domain to power up.
290 * genpd_poweroff - Remove power from a given PM domain.
291 * @genpd: PM domain to power down.
292 * @is_async: PM domain is powered down from a scheduled work
304 * Do not try to power off the domain in the following situations: genpd_poweroff()
305 * (1) The domain is already in the "power off" state. genpd_poweroff()
332 if (!genpd->gov->power_down_ok(&genpd->domain)) genpd_poweroff()
366 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
381 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
385 * pm_domain field points to the domain member of an object of type
386 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
455 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
459 * pm_domain field points to the domain member of an object of type
460 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
478 /* If power.irq_safe, the PM domain is never powered off. */ pm_genpd_runtime_resume()
548 * pm_genpd_present - Check if the given PM domain has been initialized.
549 * @genpd: PM domain to check.
572 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
573 * @genpd: PM domain to power off, if possible.
576 * Check if the given PM domain can be powered off (during system suspend or
607 * pm_genpd_sync_poweron - Synchronously power on a PM domain and its masters.
608 * @genpd: PM domain to power on.
637 * @genpd: PM domain the device belongs to.
662 * pm_genpd_prepare - Start power transition of a device in a PM domain.
666 * under the assumption that its pm_domain field points to the domain member of
667 * an object of type struct generic_pm_domain representing a PM domain
713 * The PM domain must be in the GPD_STATE_ACTIVE state at this point, pm_genpd_prepare()
737 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
741 * domain member of an object of type struct generic_pm_domain representing
742 * a PM domain consisting of I/O devices.
758 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
762 * pm_domain field points to the domain member of an object of type
763 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
779 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
782 * Stop the device and remove power from the domain if all devices in it have
804 * the same PM domain, so it is not necessary to use locking here. pm_genpd_suspend_noirq()
813 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
816 * Restore power to the device's PM domain, if necessary, and start the device.
835 * the same PM domain, so it is not necessary to use locking here. pm_genpd_resume_noirq()
844 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
848 * pm_domain field points to the domain member of an object of type
849 * struct generic_pm_domain representing a power domain consisting of I/O
866 * pm_genpd_resume - Resume of device in an I/O PM domain.
870 * domain member of an object of type struct generic_pm_domain representing
871 * a power domain consisting of I/O devices.
887 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
891 * domain member of an object of type struct generic_pm_domain representing
892 * a power domain consisting of I/O devices.
908 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
912 * pm_domain field points to the domain member of an object of type
913 * struct generic_pm_domain representing a power domain consisting of I/O
930 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
934 * pm_domain field points to the domain member of an object of type
935 * struct generic_pm_domain representing a power domain consisting of I/O
952 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
955 * Start the device, unless power has been removed from the domain already
973 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
977 * pm_domain field points to the domain member of an object of type
978 * struct generic_pm_domain representing a power domain consisting of I/O
995 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
999 * domain member of an object of type struct generic_pm_domain representing
1000 * a power domain consisting of I/O devices.
1016 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1019 * Make sure the domain will be in the same power state as before the
1035 * the same PM domain, so it is not necessary to use locking here. pm_genpd_restore_noirq()
1038 * first time for the given domain in the present cycle. pm_genpd_restore_noirq()
1042 * The boot kernel might put the domain into arbitrary state, pm_genpd_restore_noirq()
1049 * If the domain was off before the hibernation, make pm_genpd_restore_noirq()
1067 * pm_genpd_complete - Complete power transition of a device in a power domain.
1072 * domain member of an object of type struct generic_pm_domain representing
1073 * a power domain consisting of I/O devices.
1191 dev->pm_domain = &genpd->domain; genpd_alloc_dev_data()
1220 * __pm_genpd_add_device - Add a device to an I/O PM domain.
1221 * @genpd: PM domain to add the device to.
1268 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1269 * @genpd: PM domain to remove the device from.
1318 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1319 * @genpd: Master PM domain to add the subdomain to.
1369 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1370 * @genpd: Master PM domain to remove the subdomain from.
1465 * pm_genpd_init - Initialize a generic I/O PM domain object.
1466 * @genpd: PM domain object to initialize.
1467 * @gov: PM domain governor to associate with the domain (may be NULL).
1468 * @is_off: Initial value of the domain's power_is_off field.
1487 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; pm_genpd_init()
1488 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; pm_genpd_init()
1489 genpd->domain.ops.prepare = pm_genpd_prepare; pm_genpd_init()
1490 genpd->domain.ops.suspend = pm_genpd_suspend; pm_genpd_init()
1491 genpd->domain.ops.suspend_late = pm_genpd_suspend_late; pm_genpd_init()
1492 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; pm_genpd_init()
1493 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; pm_genpd_init()
1494 genpd->domain.ops.resume_early = pm_genpd_resume_early; pm_genpd_init()
1495 genpd->domain.ops.resume = pm_genpd_resume; pm_genpd_init()
1496 genpd->domain.ops.freeze = pm_genpd_freeze; pm_genpd_init()
1497 genpd->domain.ops.freeze_late = pm_genpd_freeze_late; pm_genpd_init()
1498 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; pm_genpd_init()
1499 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; pm_genpd_init()
1500 genpd->domain.ops.thaw_early = pm_genpd_thaw_early; pm_genpd_init()
1501 genpd->domain.ops.thaw = pm_genpd_thaw; pm_genpd_init()
1502 genpd->domain.ops.poweroff = pm_genpd_suspend; pm_genpd_init()
1503 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late; pm_genpd_init()
1504 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; pm_genpd_init()
1505 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; pm_genpd_init()
1506 genpd->domain.ops.restore_early = pm_genpd_resume_early; pm_genpd_init()
1507 genpd->domain.ops.restore = pm_genpd_resume; pm_genpd_init()
1508 genpd->domain.ops.complete = pm_genpd_complete; pm_genpd_init()
1525 * Device Tree based PM domain providers.
1527 * The code below implements generic device tree based PM domain providers that
1531 * devices to these domains is supposed to register a PM domain provider, which
1532 * maps a PM domain specifier retrieved from the device tree to a PM domain.
1535 * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
1541 * struct of_genpd_provider - PM domain provider registration structure
1542 * @link: Entry in global list of PM domain providers
1543 * @node: Pointer to device tree node of PM domain provider
1545 * into a PM domain.
1555 /* List of registered PM domain providers. */
1561 * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
1562 * @genpdspec: OF phandle args to map into a PM domain
1581 * @genpdspec: OF phandle args to map into a PM domain
1584 * This is a generic xlate function that can be used to model simple PM domain
1600 pr_err("%s: invalid domain index %u\n", __func__, idx); __of_genpd_xlate_onecell()
1612 * __of_genpd_add_provider() - Register a PM domain provider for a node
1613 * @np: Device node pointer associated with the PM domain provider.
1614 * @xlate: Callback for decoding PM domain from phandle arguments.
1633 pr_debug("Added domain provider from %s\n", np->full_name); __of_genpd_add_provider()
1640 * of_genpd_del_provider() - Remove a previously registered PM domain provider
1641 * @np: Device node pointer associated with the PM domain provider
1661 * of_genpd_get_from_provider() - Look-up PM domain
1664 * Looks for a PM domain provider under the node specified by @genpdspec and if
1666 * domain.
1694 * genpd_dev_pm_detach - Detach a device from its PM domain.
1698 * Try to locate a corresponding generic PM domain, which the device was
1711 dev_dbg(dev, "removing from PM domain %s\n", pd->name); genpd_dev_pm_detach()
1723 dev_err(dev, "failed to remove from PM domain %s: %d", genpd_dev_pm_detach()
1728 /* Check if PM domain can be powered off after removing this device. */ genpd_dev_pm_detach()
1744 * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
1747 * Parse device's OF node to find a PM domain specifier. If such is found,
1753 * Returns 0 on successfully attached PM domain or negative error code. Note
1754 * that if a power-domain exists for the device, but it cannot be found or
1772 "#power-domain-cells", 0, &pd_args); genpd_dev_pm_attach()
1783 "samsung,power-domain", 0); genpd_dev_pm_attach()
1791 dev_dbg(dev, "%s() failed to find PM domain: %ld\n", genpd_dev_pm_attach()
1796 dev_dbg(dev, "adding to PM domain %s\n", pd->name); genpd_dev_pm_attach()
1808 dev_err(dev, "failed to add to PM domain %s: %d", genpd_dev_pm_attach()
1914 seq_puts(s, "domain status slaves\n"); pm_genpd_summary_show()
H A Ddomain_governor.c40 * default_stop_ok - Default PM domain governor routine for stopping devices.
96 * default_power_down_ok - Default generic PM domain power off governor routine.
97 * @pd: PM domain to check.
99 * This routine must be executed under the PM domain's lock.
146 * the current domain to turn off and on (that's how much time default_power_down_ok()
157 * Check if the devices in the domain can be off enough time. default_power_down_ok()
165 * domain to turn off and on (that's how much time it will default_power_down_ok()
193 * latency constraints, so the domain can spend arbitrary time in the default_power_down_ok()
201 * time and the time needed to turn the domain on is the maximum default_power_down_ok()
202 * theoretical time this domain can spend in the "off" state. default_power_down_ok()
208 static bool always_on_power_down_ok(struct dev_pm_domain *domain) always_on_power_down_ok() argument
H A Dcommon.c83 * dev_pm_domain_attach - Attach a device to its PM domain.
87 * The @dev may only be attached to a single PM domain. By iterating through
88 * the available alternatives we try to find a valid PM domain for the device.
99 * Returns 0 on successfully attached PM domain or negative error code.
114 * dev_pm_domain_detach - Detach a device from its PM domain.
119 * try to detach the @dev from its PM domain. Typically it should be invoked
/linux-4.4.14/security/tomoyo/
H A Ddomain.c2 * security/tomoyo/domain.c
13 /* The initial domain. */
76 * tomoyo_update_domain - Update an entry for domain policy.
160 const struct tomoyo_domain_info *domain = r->domain; tomoyo_check_acl() local
163 const struct list_head *list = &domain->acl_info_list; tomoyo_check_acl()
179 list = &domain->ns->acl_group[domain->group];
275 * tomoyo_scan_transition - Try to find specific domain transition type.
278 * @domainname: The name of current domain.
317 * tomoyo_transition_type - Get domain transition type.
320 * @domainname: The name of current domain.
324 * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if
325 * executing @program reinitializes domain transition within that namespace,
482 * @domainname: Name of domain.
495 * tomoyo_assign_domain - Create a domain or a namespace.
497 * @domainname: The name of domain.
498 * @transit: True if transit to domain found or created.
515 * that domain. Do not perform domain transition if tomoyo_assign_domain()
516 * profile for that domain is not yet created. tomoyo_assign_domain()
524 /* Requested domain does not exist. */ tomoyo_assign_domain()
525 /* Don't create requested domain if domainname is invalid. */ tomoyo_assign_domain()
532 * by automatically creating requested domain upon domain transition. tomoyo_assign_domain()
541 * domains are inherited from current domain. These are 0 for manually tomoyo_assign_domain()
545 const struct tomoyo_domain_info *domain = tomoyo_domain(); tomoyo_assign_domain() local
546 e.profile = domain->profile; tomoyo_assign_domain()
547 e.group = domain->group; tomoyo_assign_domain()
603 ee->r.profile = r->domain->profile; tomoyo_environ()
604 ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile, tomoyo_environ()
668 * tomoyo_find_next_domain - Find a domain.
679 struct tomoyo_domain_info *domain = NULL; tomoyo_find_next_domain() local
740 * Check for domain transition preference if "file execute" matched.
741 * If preference is given, make do_execve() fail if domain transition
742 * has failed, for domain transition preference should be used with
743 * destination domain defined.
771 * No domain transition preference specified.
772 * Calculate domain to transit to.
782 * Make do_execve() fail if domain transition across namespaces
795 /* Keep current domain. */
796 domain = old_domain;
802 * Needn't to transit from kernel domain before
803 * starting /sbin/init. But transit from kernel domain
807 domain = old_domain;
811 /* Normal domain transition. */
817 if (!domain)
818 domain = tomoyo_assign_domain(ee->tmp, true);
819 if (domain)
839 if (!domain)
840 domain = old_domain;
842 atomic_inc(&domain->users);
843 bprm->cred->security = domain;
846 ee->r.domain = domain;
H A DMakefile1 obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
H A Dcommon.c974 struct tomoyo_domain_info *domain = NULL; tomoyo_select_domain() local
988 domain = tomoyo_real_domain(p); tomoyo_select_domain()
990 } else if (!strncmp(data, "domain=", 7)) { tomoyo_select_domain()
992 domain = tomoyo_find_domain(data + 7); tomoyo_select_domain()
994 domain = tomoyo_find_domain_by_qid(pid); tomoyo_select_domain()
997 head->w.domain = domain; tomoyo_select_domain()
1003 if (domain) tomoyo_select_domain()
1004 head->r.domain = &domain->list; tomoyo_select_domain()
1008 if (domain && domain->is_deleted) tomoyo_select_domain()
1009 tomoyo_io_printf(head, "# This is a deleted domain.\n"); tomoyo_select_domain()
1056 * tomoyo_delete_domain - Delete a domain.
1058 * @domainname: The name of domain.
1066 struct tomoyo_domain_info *domain; tomoyo_delete_domain() local
1073 /* Is there an active domain? */ tomoyo_delete_domain()
1074 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { tomoyo_delete_domain()
1076 if (domain == &tomoyo_kernel_domain) tomoyo_delete_domain()
1078 if (domain->is_deleted || tomoyo_delete_domain()
1079 tomoyo_pathcmp(domain->domainname, &name)) tomoyo_delete_domain()
1081 domain->is_deleted = true; tomoyo_delete_domain()
1089 * tomoyo_write_domain2 - Write domain policy.
1131 /* String table for domain flags. */
1138 * tomoyo_write_domain - Write domain policy.
1150 struct tomoyo_domain_info *domain = head->w.domain; tomoyo_write_domain() local
1156 domain = NULL; tomoyo_write_domain()
1160 domain = tomoyo_find_domain(data); tomoyo_write_domain()
1162 domain = tomoyo_assign_domain(data, false); tomoyo_write_domain()
1163 head->w.domain = domain; tomoyo_write_domain()
1166 if (!domain) tomoyo_write_domain()
1168 ns = domain->ns; tomoyo_write_domain()
1172 domain->profile = (u8) profile; tomoyo_write_domain()
1178 domain->group = (u8) profile; tomoyo_write_domain()
1185 domain->flags[profile] = !is_delete; tomoyo_write_domain()
1188 return tomoyo_write_domain2(ns, &domain->acl_info_list, data, tomoyo_write_domain()
1549 * tomoyo_read_domain2 - Read domain policy.
1572 * tomoyo_read_domain - Read domain policy.
1582 list_for_each_cookie(head->r.domain, &tomoyo_domain_list) { tomoyo_read_domain()
1583 struct tomoyo_domain_info *domain = tomoyo_read_domain() local
1584 list_entry(head->r.domain, typeof(*domain), list); tomoyo_read_domain()
1588 if (domain->is_deleted && tomoyo_read_domain()
1592 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_domain()
1595 domain->profile); tomoyo_read_domain()
1597 domain->group); tomoyo_read_domain()
1599 if (domain->flags[i]) tomoyo_read_domain()
1605 if (!tomoyo_read_domain2(head, &domain->acl_info_list)) tomoyo_read_domain()
1650 struct tomoyo_domain_info *domain = NULL; tomoyo_read_pid() local
1669 domain = tomoyo_real_domain(p); tomoyo_read_pid()
1671 if (!domain) tomoyo_read_pid()
1673 tomoyo_io_printf(head, "%u %u ", pid, domain->profile); tomoyo_read_pid()
1674 tomoyo_set_string(head, domain->domainname->name); tomoyo_read_pid()
1677 /* String table for domain transition control keywords. */
1891 struct tomoyo_domain_info *domain; member in struct:tomoyo_query
1929 * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
1931 * @domain: Pointer to "struct tomoyo_domain_info".
1936 static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) tomoyo_add_entry() argument
1978 if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer, tomoyo_add_entry()
2038 tomoyo_add_entry(r->domain, entry.query); tomoyo_supervisor()
2042 entry.domain = r->domain; tomoyo_supervisor()
2090 * tomoyo_find_domain_by_qid - Get domain by query id.
2100 struct tomoyo_domain_info *domain = NULL; tomoyo_find_domain_by_qid() local
2105 domain = ptr->domain; tomoyo_find_domain_by_qid()
2109 return domain; tomoyo_find_domain_by_qid()
2628 head->w.domain = NULL; tomoyo_write_control()
2700 struct tomoyo_domain_info *domain; tomoyo_check_profile() local
2704 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { tomoyo_check_profile()
2705 const u8 profile = domain->profile; tomoyo_check_profile()
2706 const struct tomoyo_policy_namespace *ns = domain->ns; tomoyo_check_profile()
2714 profile, domain->domainname->name); tomoyo_check_profile()
H A Dutil.c589 * tomoyo_find_domain - Find a domain by the given name.
599 struct tomoyo_domain_info *domain; tomoyo_find_domain() local
604 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { tomoyo_find_domain()
605 if (!domain->is_deleted && tomoyo_find_domain()
606 !tomoyo_pathcmp(&name, domain->domainname)) tomoyo_find_domain()
607 return domain; tomoyo_find_domain()
997 * @domain: Pointer to "struct tomoyo_domain_info". NULL for tomoyo_domain().
1003 struct tomoyo_domain_info *domain, const u8 index) tomoyo_init_request_info()
1007 if (!domain) tomoyo_init_request_info()
1008 domain = tomoyo_domain(); tomoyo_init_request_info()
1009 r->domain = domain; tomoyo_init_request_info()
1010 profile = domain->profile; tomoyo_init_request_info()
1013 r->mode = tomoyo_get_mode(domain->ns, profile, index); tomoyo_init_request_info()
1018 * tomoyo_domain_quota_is_ok - Check for domain's quota.
1022 * Returns true if the domain is not exceeded quota, false otherwise.
1029 struct tomoyo_domain_info *domain = r->domain; tomoyo_domain_quota_is_ok() local
1034 if (!domain) tomoyo_domain_quota_is_ok()
1036 list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { tomoyo_domain_quota_is_ok()
1076 if (count < tomoyo_profile(domain->ns, domain->profile)-> tomoyo_domain_quota_is_ok()
1079 if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) { tomoyo_domain_quota_is_ok()
1080 domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true; tomoyo_domain_quota_is_ok()
1085 "Stopped learning mode.\n", domain->domainname->name); tomoyo_domain_quota_is_ok()
1002 tomoyo_init_request_info(struct tomoyo_request_info *r, struct tomoyo_domain_info *domain, const u8 index) tomoyo_init_request_info() argument
H A Dgc.c49 if (head->r.domain == element || head->r.group == element || tomoyo_struct_used_by_io_buffer()
50 head->r.acl == element || &head->w.domain->list == element) tomoyo_struct_used_by_io_buffer()
245 struct tomoyo_domain_info *domain = tomoyo_del_domain() local
246 container_of(element, typeof(*domain), list); tomoyo_del_domain()
250 * Since this domain is referenced from neither tomoyo_del_domain()
254 list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { tomoyo_del_domain()
258 tomoyo_put_name(domain->domainname); tomoyo_del_domain()
390 * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} tomoyo_try_to_gc()
391 * and "struct tomoyo_io_buffer"->w.domain forget this element. tomoyo_try_to_gc()
517 struct tomoyo_domain_info *domain; tomoyo_collect_entry() local
519 list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, tomoyo_collect_entry()
521 tomoyo_collect_acl(&domain->acl_info_list); tomoyo_collect_entry()
522 if (!domain->is_deleted || atomic_read(&domain->users)) tomoyo_collect_entry()
524 tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); tomoyo_collect_entry()
H A Dtomoyo.c36 struct tomoyo_domain_info *domain = old->security; tomoyo_cred_prepare() local
37 new->security = domain; tomoyo_cred_prepare()
38 if (domain) tomoyo_cred_prepare()
39 atomic_inc(&domain->users); tomoyo_cred_prepare()
61 struct tomoyo_domain_info *domain = cred->security; tomoyo_cred_free() local
62 if (domain) tomoyo_cred_free()
63 atomic_dec(&domain->users); tomoyo_cred_free()
114 struct tomoyo_domain_info *domain = bprm->cred->security; tomoyo_bprm_check_security() local
118 * using current domain. tomoyo_bprm_check_security()
120 if (!domain) { tomoyo_bprm_check_security()
127 * Read permission is checked against interpreters using next domain. tomoyo_bprm_check_security()
129 return tomoyo_check_open_permission(domain, &bprm->file->f_path, tomoyo_bprm_check_security()
/linux-4.4.14/arch/x86/kernel/apic/
H A Dmsi.c72 struct irq_domain *domain; native_setup_msi_irqs() local
79 domain = irq_remapping_get_irq_domain(&info); native_setup_msi_irqs()
80 if (domain == NULL) native_setup_msi_irqs()
81 domain = msi_default_domain; native_setup_msi_irqs()
82 if (domain == NULL) native_setup_msi_irqs()
85 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); native_setup_msi_irqs()
99 static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, pci_msi_prepare() argument
198 static int dmar_msi_init(struct irq_domain *domain, dmar_msi_init() argument
202 irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL, dmar_msi_init()
234 struct irq_domain *domain = dmar_get_irq_domain(); dmar_alloc_hwirq() local
237 if (!domain) dmar_alloc_hwirq()
245 return irq_domain_alloc_irqs(domain, 1, node, &info); dmar_alloc_hwirq()
258 static inline int hpet_dev_id(struct irq_domain *domain) hpet_dev_id() argument
260 struct msi_domain_info *info = msi_get_domain_info(domain); hpet_dev_id()
288 static int hpet_msi_init(struct irq_domain *domain, hpet_msi_init() argument
293 irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL, hpet_msi_init()
299 static void hpet_msi_free(struct irq_domain *domain, hpet_msi_free() argument
344 int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev, hpet_assign_irq() argument
352 info.hpet_id = hpet_dev_id(domain); hpet_assign_irq()
355 return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info); hpet_assign_irq()
H A Dhtirq.c63 static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq, htirq_domain_alloc() argument
79 if (irq_find_mapping(domain, hwirq) > 0) htirq_domain_alloc()
86 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info); htirq_domain_alloc()
99 irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg, htirq_domain_alloc()
105 static void htirq_domain_free(struct irq_domain *domain, unsigned int virq, htirq_domain_free() argument
108 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); htirq_domain_free()
112 irq_domain_free_irqs_top(domain, virq, nr_irqs); htirq_domain_free()
115 static void htirq_domain_activate(struct irq_domain *domain, htirq_domain_activate() argument
137 static void htirq_domain_deactivate(struct irq_domain *domain, htirq_domain_deactivate() argument
/linux-4.4.14/arch/ia64/kernel/
H A Dirq_ia64.c80 .domain = CPU_MASK_NONE
106 static inline int find_unassigned_vector(cpumask_t domain) find_unassigned_vector() argument
111 cpumask_and(&mask, &domain, cpu_online_mask); find_unassigned_vector()
117 cpumask_and(&mask, &domain, &vector_table[vector]); find_unassigned_vector()
125 static int __bind_irq_vector(int irq, int vector, cpumask_t domain) __bind_irq_vector() argument
134 cpumask_and(&mask, &domain, cpu_online_mask); __bind_irq_vector()
137 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain)) __bind_irq_vector()
144 cfg->domain = domain; __bind_irq_vector()
146 cpumask_or(&vector_table[vector], &vector_table[vector], &domain); __bind_irq_vector()
150 int bind_irq_vector(int irq, int vector, cpumask_t domain) bind_irq_vector() argument
156 ret = __bind_irq_vector(irq, vector, domain); bind_irq_vector()
164 cpumask_t domain; __clear_irq_vector() local
170 domain = cfg->domain; __clear_irq_vector()
171 for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask) __clear_irq_vector()
174 cfg->domain = CPU_MASK_NONE; __clear_irq_vector()
176 cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain); __clear_irq_vector()
193 cpumask_t domain = CPU_MASK_NONE; ia64_native_assign_irq_vector() local
199 domain = vector_allocation_domain(cpu); for_each_online_cpu()
200 vector = find_unassigned_vector(domain); for_each_online_cpu()
208 BUG_ON(__bind_irq_vector(irq, vector, domain));
245 if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain)) __setup_vector_irq()
270 cpumask_t domain; __irq_prepare_move() local
276 if (cpumask_test_cpu(cpu, &cfg->domain)) __irq_prepare_move()
278 domain = vector_allocation_domain(cpu); __irq_prepare_move()
279 vector = find_unassigned_vector(domain); __irq_prepare_move()
283 cfg->old_domain = cfg->domain; __irq_prepare_move()
285 cfg->domain = CPU_MASK_NONE; __irq_prepare_move()
286 BUG_ON(__bind_irq_vector(irq, vector, domain)); __irq_prepare_move()
397 cpumask_t domain = CPU_MASK_NONE; create_irq() local
402 domain = vector_allocation_domain(cpu); for_each_online_cpu()
403 vector = find_unassigned_vector(domain); for_each_online_cpu()
412 BUG_ON(__bind_irq_vector(irq, vector, domain));
/linux-4.4.14/arch/x86/platform/uv/
H A Duv_irq.c80 static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq, uv_domain_alloc() argument
85 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); uv_domain_alloc()
96 ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); uv_domain_alloc()
105 irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data, uv_domain_alloc()
114 static void uv_domain_free(struct irq_domain *domain, unsigned int virq, uv_domain_free() argument
117 struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); uv_domain_free()
123 irq_domain_free_irqs_top(domain, virq, nr_irqs); uv_domain_free()
130 static void uv_domain_activate(struct irq_domain *domain, uv_domain_activate() argument
140 static void uv_domain_deactivate(struct irq_domain *domain, uv_domain_deactivate() argument
184 struct irq_domain *domain = uv_get_irq_domain(); uv_setup_irq() local
186 if (!domain) uv_setup_irq()
196 return irq_domain_alloc_irqs(domain, 1, uv_setup_irq()
H A Dbios_uv.c170 * @domain: PCI domain number
175 * -EINVAL: Invalid domain or bus number
179 int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus) uv_bios_set_legacy_vga_target() argument
182 (u64)decode, (u64)domain, (u64)bus, 0, 0); uv_bios_set_legacy_vga_target()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/engine/device/
H A Dctrl.c73 const struct nvkm_domain *domain; nvkm_control_mthd_pstate_attr() local
93 domain = clk->domains; nvkm_control_mthd_pstate_attr()
95 while (domain->name != nv_clk_src_max) { nvkm_control_mthd_pstate_attr()
96 if (domain->mname && ++j == args->v0.index) nvkm_control_mthd_pstate_attr()
98 domain++; nvkm_control_mthd_pstate_attr()
101 if (domain->name == nv_clk_src_max) nvkm_control_mthd_pstate_attr()
110 lo = pstate->base.domain[domain->name]; nvkm_control_mthd_pstate_attr()
113 lo = min(lo, cstate->domain[domain->name]); nvkm_control_mthd_pstate_attr()
114 hi = max(hi, cstate->domain[domain->name]); nvkm_control_mthd_pstate_attr()
119 lo = max(nvkm_clk_read(clk, domain->name), 0); nvkm_control_mthd_pstate_attr()
123 snprintf(args->v0.name, sizeof(args->v0.name), "%s", domain->mname); nvkm_control_mthd_pstate_attr()
125 args->v0.min = lo / domain->mdiv; nvkm_control_mthd_pstate_attr()
126 args->v0.max = hi / domain->mdiv; nvkm_control_mthd_pstate_attr()
129 while ((++domain)->name != nv_clk_src_max) { nvkm_control_mthd_pstate_attr()
130 if (domain->mname) { nvkm_control_mthd_pstate_attr()
H A Dtegra.c94 tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); nvkm_device_tegra_probe_iommu()
95 if (IS_ERR(tdev->iommu.domain)) nvkm_device_tegra_probe_iommu()
103 pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; nvkm_device_tegra_probe_iommu()
115 ret = iommu_attach_device(tdev->iommu.domain, dev); nvkm_device_tegra_probe_iommu()
129 iommu_detach_device(tdev->iommu.domain, dev); nvkm_device_tegra_probe_iommu()
132 iommu_domain_free(tdev->iommu.domain); nvkm_device_tegra_probe_iommu()
135 tdev->iommu.domain = NULL; nvkm_device_tegra_probe_iommu()
145 if (tdev->iommu.domain) { nvkm_device_tegra_remove_iommu()
147 iommu_detach_device(tdev->iommu.domain, tdev->device.dev); nvkm_device_tegra_remove_iommu()
148 iommu_domain_free(tdev->iommu.domain); nvkm_device_tegra_remove_iommu()
/linux-4.4.14/arch/x86/kvm/
H A Diommu.c78 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_iommu_map_pages() local
82 if (!domain) kvm_iommu_map_pages()
99 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) { kvm_iommu_map_pages()
130 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn), kvm_iommu_map_pages()
175 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_assign_device() local
180 if (!domain) kvm_assign_device()
186 r = iommu_attach_device(domain, &pdev->dev); kvm_assign_device()
216 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_deassign_device() local
219 if (!domain) kvm_deassign_device()
225 iommu_detach_device(domain, &pdev->dev); kvm_deassign_device()
276 struct iommu_domain *domain; kvm_iommu_put_pages() local
281 domain = kvm->arch.iommu_domain; kvm_iommu_put_pages()
286 if (!domain) kvm_iommu_put_pages()
294 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn)); kvm_iommu_put_pages()
304 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE); kvm_iommu_put_pages()
343 struct iommu_domain *domain = kvm->arch.iommu_domain; kvm_iommu_unmap_guest() local
346 if (!domain) kvm_iommu_unmap_guest()
355 iommu_domain_free(domain); kvm_iommu_unmap_guest()
/linux-4.4.14/drivers/clk/
H A Dclk-mb86s7x.c35 u32 domain; member in struct:mb86s7x_peri_clk
49 u8 cntrlr, domain, port; member in struct:crg_clk
60 cmd.domain = crgclk->domain; crg_gate_control()
70 cmd.domain, cmd.port, cmd.en); crg_gate_control()
81 cmd.domain, cmd.port, cmd.en); crg_gate_control()
111 cmd.domain = crgclk->domain; crg_rate_control()
119 cmd.domain, cmd.port, cmd.frequency); crg_rate_control()
124 cmd.domain, cmd.port); crg_rate_control()
136 cmd.domain, cmd.port, cmd.frequency); crg_rate_control()
140 cmd.domain, cmd.port, cmd.frequency); crg_rate_control()
186 u32 cntrlr, domain, port; crg11_get() local
195 domain = clkspec->args[1]; crg11_get()
199 snprintf(clkp, 20, "UngatedCLK%d_%X", cntrlr, domain); crg11_get()
201 snprintf(clkp, 20, "CLK%d_%X_%d", cntrlr, domain, port); crg11_get()
223 crgclk->domain = domain; crg11_get()
/linux-4.4.14/lib/xz/
H A Dxz_dec_syms.c6 * This file has been put into the public domain.
23 * This code is in the public domain, but in Linux it's simplest to just
H A Dxz_crc32.c7 * This file has been put into the public domain.
/linux-4.4.14/drivers/pci/pcie/aer/
H A Daer_inject.c44 u16 domain; member in struct:aer_error_inj
49 u16 domain; member in struct:aer_error
77 static void aer_error_init(struct aer_error *err, u16 domain, aer_error_init() argument
82 err->domain = domain; aer_error_init()
89 static struct aer_error *__find_aer_error(u16 domain, unsigned int bus, __find_aer_error() argument
95 if (domain == err->domain && __find_aer_error()
106 int domain = pci_domain_nr(dev->bus); __find_aer_error_by_dev() local
107 if (domain < 0) __find_aer_error_by_dev()
109 return __find_aer_error((u16)domain, dev->bus->number, dev->devfn); __find_aer_error_by_dev()
191 int domain; pci_read_aer() local
196 domain = pci_domain_nr(bus); pci_read_aer()
197 if (domain < 0) pci_read_aer()
199 err = __find_aer_error((u16)domain, bus->number, devfn); pci_read_aer()
223 int domain; pci_write_aer() local
228 domain = pci_domain_nr(bus); pci_write_aer()
229 if (domain < 0) pci_write_aer()
231 err = __find_aer_error((u16)domain, bus->number, devfn); pci_write_aer()
332 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn); aer_inject()
386 aer_error_init(err, einj->domain, einj->bus, devfn, aer_inject()
482 if (usize < offsetof(struct aer_error_inj, domain) || aer_inject_write()
/linux-4.4.14/drivers/sh/intc/
H A Dirqdomain.c2 * IRQ domain support for SH INTC subsystem
62 d->domain = irq_domain_add_linear(NULL, hw->nr_vectors, intc_irq_domain_init()
65 d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL); intc_irq_domain_init()
67 BUG_ON(!d->domain); intc_irq_domain_init()
/linux-4.4.14/arch/arm/mach-omap2/
H A Dpowerdomains44xx_data.c33 /* core_44xx_pwrdm: CORE power domain */
59 /* gfx_44xx_pwrdm: 3D accelerator power domain */
76 /* abe_44xx_pwrdm: Audio back end power domain */
96 /* dss_44xx_pwrdm: Display subsystem power domain */
114 /* tesla_44xx_pwrdm: Tesla processor power domain */
136 /* wkup_44xx_pwrdm: Wake-up power domain */
152 /* cpu0_44xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
169 /* cpu1_44xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
186 /* emu_44xx_pwrdm: Emulation power domain */
202 /* mpu_44xx_pwrdm: Modena processor and the Neon coprocessor power domain */
223 /* ivahd_44xx_pwrdm: IVA-HD power domain */
247 /* cam_44xx_pwrdm: Camera subsystem power domain */
264 /* l3init_44xx_pwrdm: L3 initators pheripherals power domain */
282 /* l4per_44xx_pwrdm: Target peripherals power domain */
304 * domain
314 /* cefuse_44xx_pwrdm: Customer efuse controller power domain */
H A Dpowerdomains7xx_data.c33 /* iva_7xx_pwrdm: IVA-HD power domain */
64 /* custefuse_7xx_pwrdm: Customer efuse controller power domain */
73 /* ipu_7xx_pwrdm: Audio back end power domain */
92 /* dss_7xx_pwrdm: Display subsystem power domain */
109 /* l4per_7xx_pwrdm: Target peripherals power domain */
128 /* gpu_7xx_pwrdm: 3D accelerator power domain */
144 /* wkupaon_7xx_pwrdm: Wake-up power domain */
158 /* core_7xx_pwrdm: CORE power domain */
183 /* coreaon_7xx_pwrdm: Always ON logic that sits in VDD_CORE voltage domain */
191 /* cpu0_7xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
207 /* cpu1_7xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
240 /* mpu_7xx_pwrdm: Modena processor and the Neon coprocessor power domain */
258 /* l3init_7xx_pwrdm: L3 initators pheripherals power domain */
295 /* emu_7xx_pwrdm: Emulation power domain */
330 /* dsp1_7xx_pwrdm: Tesla processor power domain */
350 /* cam_7xx_pwrdm: Camera subsystem power domain */
H A Dpowerdomains54xx_data.c31 /* core_54xx_pwrdm: CORE power domain */
57 /* abe_54xx_pwrdm: Audio back end power domain */
77 /* coreaon_54xx_pwrdm: Always ON logic that sits in VDD_CORE voltage domain */
86 /* dss_54xx_pwrdm: Display subsystem power domain */
104 /* cpu0_54xx_pwrdm: MPU0 processor and Neon coprocessor power domain */
121 /* cpu1_54xx_pwrdm: MPU1 processor and Neon coprocessor power domain */
138 /* emu_54xx_pwrdm: Emulation power domain */
154 /* mpu_54xx_pwrdm: Modena processor and the Neon coprocessor power domain */
173 /* custefuse_54xx_pwrdm: Customer efuse controller power domain */
183 /* dsp_54xx_pwrdm: Tesla processor power domain */
205 /* cam_54xx_pwrdm: Camera subsystem power domain */
222 /* l3init_54xx_pwrdm: L3 initators pheripherals power domain */
242 /* gpu_54xx_pwrdm: 3D accelerator power domain */
259 /* wkupaon_54xx_pwrdm: Wake-up power domain */
274 /* iva_54xx_pwrdm: IVA-HD power domain */
H A Dvoltagedomains2xxx_data.c2 * OMAP3 voltage domain data
H A Dpowerdomains2xxx_3xxx_data.c20 * powerdomain is called the "DSP power domain." On the 2430, the
23 * the "DSP power domain." On the 3430, the DSP is a 'C64 DSP like the
25 * "IVA2 power domain."
H A Domap_opp_data.h34 * @hwmod_name: Name of the hwmod for this domain
40 * pairs that the device will support per voltage domain. This is called
43 * domain, you can have a set of {frequency, voltage} pairs and this is denoted
47 * which belongs to a voltage domain may define their own set of OPPs on top
H A Dvoltage.c52 * API to get the current non-auto-compensated voltage for a voltage domain.
66 * voltdm_scale() - API to scale voltage of a particular voltage domain.
67 * @voltdm: pointer to the voltage domain which is to be scaled.
68 * @target_volt: The target voltage of the voltage domain
71 * for a particular voltage domain during DVFS.
112 * voltdm_reset() - Resets the voltage of a particular voltage domain
114 * @voltdm: pointer to the voltage domain whose voltage is to be reset.
116 * This API finds out the correct voltage the voltage domain is supposed
141 * particular voltage domain.
169 * domain and tries to find a matching entry for the passed voltage volt.
175 * domain or if there is no matching entry.
H A Dcm2xxx.h28 * Module specific CM register offsets from CM_BASE + domain offset
45 /* Clock management domain register get/set */
H A Dvoltage.h50 * struct voltagedomain - omap voltage domain global structure.
51 * @name: Name of the voltage domain which can be used as a unique identifier.
52 * @scalable: Whether or not this voltage domain is scalable
61 * @nominal_volt: current nominal voltage for this voltage domain
63 * by the domain and other associated per voltage data.
H A Domap-mpuss-lowpower.c10 * CPU0, CPU1 and MPUSS each have there own power domain and
292 * power domain can transitions to programmed low power omap4_enter_lowpower()
295 * domain transition omap4_enter_lowpower()
318 /* Use the achievable power state for the domain */ omap4_hotplug_cpu()
386 /* Clear CPU previous power domain state */ omap4_mpuss_init()
390 /* Initialise CPU0 power domain state to ON */ omap4_mpuss_init()
407 /* Clear CPU previous power domain state */ omap4_mpuss_init()
411 /* Initialise CPU1 power domain state to ON */ omap4_mpuss_init()
416 pr_err("Failed to lookup MPUSS power domain\n"); omap4_mpuss_init()
H A Dcm2xxx_3xxx.h22 * Module specific CM register offsets from CM_BASE + domain offset
78 static inline u32 omap2_cm_read_mod_bits_shift(s16 domain, s16 idx, u32 mask) omap2_cm_read_mod_bits_shift() argument
82 v = omap2_cm_read_mod_reg(domain, idx); omap2_cm_read_mod_bits_shift()
H A Domap-wakeupgen.c8 * implemented in MPU always ON power domain. During normal operation,
412 /* No PPI should point to this domain */ wakeupgen_domain_translate()
424 static int wakeupgen_domain_alloc(struct irq_domain *domain, wakeupgen_domain_alloc() argument
436 return -EINVAL; /* No PPI should point to this domain */ wakeupgen_domain_alloc()
443 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, wakeupgen_domain_alloc()
447 parent_fwspec.fwnode = domain->parent->fwnode; wakeupgen_domain_alloc()
448 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, wakeupgen_domain_alloc()
464 struct irq_domain *parent_domain, *domain; wakeupgen_init() local
476 pr_err("%s: unable to obtain parent domain\n", node->full_name); wakeupgen_init()
499 domain = irq_domain_add_hierarchy(parent_domain, 0, max_irqs, wakeupgen_init()
502 if (!domain) { wakeupgen_init()
528 * This needs to be set one time thanks to always ON domain. wakeupgen_init()
/linux-4.4.14/tools/testing/selftests/net/
H A Dsocket.c10 int domain; member in struct:socket_testcase
47 fd = socket(s->domain, s->type, s->protocol); run_tests()
62 s->domain, s->type, s->protocol, run_tests()
75 s->domain, s->type, s->protocol, run_tests()
/linux-4.4.14/arch/mips/ralink/
H A Dirq.c104 struct irq_domain *domain = irq_desc_get_handler_data(desc); ralink_intc_irq_handler() local
105 generic_handle_irq(irq_find_mapping(domain, __ffs(pending))); ralink_intc_irq_handler()
152 struct irq_domain *domain; intc_of_init() local
181 domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT, intc_of_init()
183 if (!domain) intc_of_init()
188 irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain); intc_of_init()
191 rt_perfcount_irq = irq_create_mapping(domain, 9); intc_of_init()
/linux-4.4.14/drivers/xen/xen-pciback/
H A Dpassthrough.c20 unsigned int domain, __xen_pcibk_get_pci_dev()
31 if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) __xen_pcibk_get_pci_dev()
50 unsigned int domain, bus, devfn; __xen_pcibk_add_pci_dev() local
63 domain = (unsigned int)pci_domain_nr(dev->bus); __xen_pcibk_add_pci_dev()
66 err = publish_cb(pdev, domain, bus, devfn, devid); __xen_pcibk_add_pci_dev()
124 unsigned int domain, bus; __xen_pcibk_publish_pci_roots() local
143 domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); __xen_pcibk_publish_pci_roots()
147 err = publish_root_cb(pdev, domain, bus); __xen_pcibk_publish_pci_roots()
178 unsigned int *domain, unsigned int *bus, __xen_pcibk_get_pcifront_dev()
181 *domain = pci_domain_nr(pcidev->bus); __xen_pcibk_get_pcifront_dev()
19 __xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn) __xen_pcibk_get_pci_dev() argument
176 __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, unsigned int *domain, unsigned int *bus, unsigned int *devfn) __xen_pcibk_get_pcifront_dev() argument
H A Dpciback.h64 int domain, int bus,
86 unsigned int domain, unsigned int bus,
89 unsigned int domain, unsigned int bus);
100 unsigned int *domain, unsigned int *bus,
108 unsigned int domain, unsigned int bus,
134 xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, xen_pcibk_get_pci_dev() argument
138 return xen_pcibk_backend->get(pdev, domain, bus, devfn); xen_pcibk_get_pci_dev()
143 * Add for domain0 PCIE-AER handling. Get guest domain/bus/devfn in xen_pcibk
150 unsigned int *domain, xen_pcibk_get_pcifront_dev()
155 return xen_pcibk_backend->find(pcidev, pdev, domain, bus, xen_pcibk_get_pcifront_dev()
193 /* Handles shared IRQs that can to device domain and control domain. */
148 xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, unsigned int *domain, unsigned int *bus, unsigned int *devfn) xen_pcibk_get_pcifront_dev() argument
H A Dpci_stub.c38 int domain; member in struct:pcistub_device_id
152 static struct pcistub_device *pcistub_device_find(int domain, int bus, pcistub_device_find() argument
162 && domain == pci_domain_nr(psdev->dev->bus) pcistub_device_find()
201 int domain, int bus, pcistub_get_pci_dev_by_slot()
212 && domain == pci_domain_nr(psdev->dev->bus) pcistub_get_pci_dev_by_slot()
281 * (so it's ready for the next domain) pcistub_put_pci_dev()
317 /* Match the specified device by domain, bus, slot, func and also if pcistub_match_one()
321 if (pci_domain_nr(dev->bus) == pdev_id->domain pcistub_match_one()
574 pr_warn("****** removing device %s while still in-use by domain %d! ******\n", pcistub_remove()
576 pr_warn("****** driver domain may still access this device's i/o resources!\n"); pcistub_remove()
577 pr_warn("****** shutdown driver domain before binding device\n"); pcistub_remove()
614 snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0", kill_domain_by_device()
657 &aer_op->domain, &aer_op->bus, &aer_op->devfn); common_process()
667 aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn); common_process()
955 static inline int str_to_slot(const char *buf, int *domain, int *bus, str_to_slot() argument
960 switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func, str_to_slot()
964 sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed); str_to_slot()
968 sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed); str_to_slot()
974 /* try again without domain */ str_to_slot()
975 *domain = 0; str_to_slot()
992 static inline int str_to_quirk(const char *buf, int *domain, int *bus, int str_to_quirk() argument
997 sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func, str_to_quirk()
1002 /* try again without domain */ str_to_quirk()
1003 *domain = 0; str_to_quirk()
1012 static int pcistub_device_id_add(int domain, int bus, int slot, int func) pcistub_device_id_add() argument
1020 rc = pcistub_device_id_add(domain, bus, slot, func); pcistub_device_id_add()
1026 rc = pcistub_device_id_add(domain, bus, slot, func); pcistub_device_id_add()
1033 !pci_domains_supported ? domain : pcistub_device_id_add()
1035 domain < 0 || domain > 0xffff) pcistub_device_id_add()
1045 pci_dev_id->domain = domain; pcistub_device_id_add()
1050 domain, bus, slot, func); pcistub_device_id_add()
1059 static int pcistub_device_id_remove(int domain, int bus, int slot, int func) pcistub_device_id_remove() argument
1068 if (pci_dev_id->domain == domain && pci_dev_id->bus == bus pcistub_device_id_remove()
1080 domain, bus, slot, func); pcistub_device_id_remove()
1088 static int pcistub_reg_add(int domain, int bus, int slot, int func, pcistub_reg_add() argument
1100 psdev = pcistub_device_find(domain, bus, slot, func); pcistub_reg_add()
1133 int domain, bus, slot, func; pcistub_slot_add() local
1136 err = str_to_slot(buf, &domain, &bus, &slot, &func); pcistub_slot_add()
1140 err = pcistub_device_id_add(domain, bus, slot, func); pcistub_slot_add()
1152 int domain, bus, slot, func; pcistub_slot_remove() local
1155 err = str_to_slot(buf, &domain, &bus, &slot, &func); pcistub_slot_remove()
1159 err = pcistub_device_id_remove(domain, bus, slot, func); pcistub_slot_remove()
1181 pci_dev_id->domain, pci_dev_id->bus, pcistub_slot_show()
1226 int domain, bus, slot, func; pcistub_irq_handler_switch() local
1229 err = str_to_slot(buf, &domain, &bus, &slot, &func); pcistub_irq_handler_switch()
1233 psdev = pcistub_device_find(domain, bus, slot, func); pcistub_irq_handler_switch()
1265 int domain, bus, slot, func, reg, size, mask; pcistub_quirk_add() local
1268 err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size, pcistub_quirk_add()
1273 err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask); pcistub_quirk_add()
1330 int domain, bus, slot, func; permissive_add() local
1335 err = str_to_slot(buf, &domain, &bus, &slot, &func); permissive_add()
1339 psdev = pcistub_device_find(domain, bus, slot, func); permissive_add()
1412 int domain, bus, slot, func; pcistub_init() local
1421 &domain, &bus, &slot, &func, &parsed); pcistub_init()
1427 &domain, &bus, &slot, &parsed); pcistub_init()
1433 &domain, &bus, &parsed); pcistub_init()
1438 domain = 0; pcistub_init()
1461 err = pcistub_device_id_add(domain, bus, slot, func); pcistub_init()
200 pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func) pcistub_get_pci_dev_by_slot() argument
H A Dxenbus.c76 /* If the driver domain started an op, make sure we complete it xen_pcibk_disconnect()
121 "Error mapping other domain page in ours."); xen_pcibk_do_attach()
204 unsigned int domain, unsigned int bus, xen_pcibk_publish_pci_dev()
219 "%04x:%02x:%02x.%02x", domain, bus, xen_pcibk_publish_pci_dev()
227 int domain, int bus, int slot, int func, xen_pcibk_export_device()
234 domain, bus, slot, func); xen_pcibk_export_device()
236 dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func); xen_pcibk_export_device()
243 domain, bus, slot, func); xen_pcibk_export_device()
265 * driver domain, that all devices under that bridge are not given xen_pcibk_export_device()
274 int domain, int bus, int slot, int func) xen_pcibk_remove_device()
280 domain, bus, slot, func); xen_pcibk_remove_device()
282 dev = xen_pcibk_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func)); xen_pcibk_remove_device()
286 "(%04x:%02x:%02x.%d)! not owned by this domain\n", xen_pcibk_remove_device()
287 domain, bus, slot, func); xen_pcibk_remove_device()
303 unsigned int domain, unsigned int bus) xen_pcibk_publish_pci_root()
335 if (d == domain && b == bus) { xen_pcibk_publish_pci_root()
348 root_num, domain, bus); xen_pcibk_publish_pci_root()
351 "%04x:%02x", domain, bus); xen_pcibk_publish_pci_root()
366 int domain, bus, slot, func; xen_pcibk_reconfigure() local
419 &domain, &bus, &slot, &func); xen_pcibk_reconfigure()
434 err = xen_pcibk_export_device(pdev, domain, bus, slot, xen_pcibk_reconfigure()
473 &domain, &bus, &slot, &func); xen_pcibk_reconfigure()
488 err = xen_pcibk_remove_device(pdev, domain, bus, slot, xen_pcibk_reconfigure()
564 int domain, bus, slot, func; xen_pcibk_setup_backend() local
601 "%x:%x:%x.%x", &domain, &bus, &slot, &func); xen_pcibk_setup_backend()
615 err = xen_pcibk_export_device(pdev, domain, bus, slot, func, i); xen_pcibk_setup_backend()
203 xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn, unsigned int devid) xen_pcibk_publish_pci_dev() argument
226 xen_pcibk_export_device(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func, int devid) xen_pcibk_export_device() argument
273 xen_pcibk_remove_device(struct xen_pcibk_device *pdev, int domain, int bus, int slot, int func) xen_pcibk_remove_device() argument
302 xen_pcibk_publish_pci_root(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus) xen_pcibk_publish_pci_root() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/clk/
H A Dbase.c41 u8 pstate, u8 domain, u32 input) nvkm_clk_adjust()
61 if (subd && boostS.domain == domain) { nvkm_clk_adjust()
141 const struct nvkm_domain *domain = clk->domains; nvkm_cstate_new() local
158 while (domain && domain->name != nv_clk_src_max) { nvkm_cstate_new()
159 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { nvkm_cstate_new()
161 domain->bios, cstepX.freq); nvkm_cstate_new()
162 cstate->domain[domain->name] = freq; nvkm_cstate_new()
164 domain++; nvkm_cstate_new()
191 int khz = pstate->base.domain[nv_clk_src_mem]; nvkm_pstate_prog()
264 u32 lo = pstate->base.domain[clock->name]; nvkm_pstate_info()
271 u32 freq = cstate->domain[clock->name]; nvkm_pstate_info()
310 const struct nvkm_domain *domain = clk->domains - 1; nvkm_pstate_new() local
334 cstate->domain[nv_clk_src_core] = perfE.core; nvkm_pstate_new()
335 cstate->domain[nv_clk_src_shader] = perfE.shader; nvkm_pstate_new()
336 cstate->domain[nv_clk_src_mem] = perfE.memory; nvkm_pstate_new()
337 cstate->domain[nv_clk_src_vdec] = perfE.vdec; nvkm_pstate_new()
338 cstate->domain[nv_clk_src_dom6] = perfE.disp; nvkm_pstate_new()
340 while (ver >= 0x40 && (++domain)->name != nv_clk_src_max) { nvkm_pstate_new()
343 u32 perfSe = nvbios_perfSp(bios, data, domain->bios, nvkm_pstate_new()
348 if (domain->flags & NVKM_CLK_DOM_FLAG_CORE) { nvkm_pstate_new()
351 domain->bios, nvkm_pstate_new()
355 cstate->domain[domain->name] = perfS.v40.freq; nvkm_pstate_new()
512 clk->bstate.base.domain[clock->name] = ret; nvkm_clk_init()
40 nvkm_clk_adjust(struct nvkm_clk *clk, bool adjust, u8 pstate, u8 domain, u32 input) nvkm_clk_adjust() argument
H A Dgk20a.c472 .domain[nv_clk_src_gpc] = 72000,
478 .domain[nv_clk_src_gpc] = 108000,
484 .domain[nv_clk_src_gpc] = 180000,
490 .domain[nv_clk_src_gpc] = 252000,
496 .domain[nv_clk_src_gpc] = 324000,
502 .domain[nv_clk_src_gpc] = 396000,
508 .domain[nv_clk_src_gpc] = 468000,
514 .domain[nv_clk_src_gpc] = 540000,
520 .domain[nv_clk_src_gpc] = 612000,
526 .domain[nv_clk_src_gpc] = 648000,
532 .domain[nv_clk_src_gpc] = 684000,
538 .domain[nv_clk_src_gpc] = 708000,
544 .domain[nv_clk_src_gpc] = 756000,
550 .domain[nv_clk_src_gpc] = 804000,
556 .domain[nv_clk_src_gpc] = 852000,
586 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * gk20a_clk_calc()
/linux-4.4.14/drivers/pci/
H A Dmsi.c43 struct irq_domain *domain; pci_msi_get_domain() local
45 domain = dev_get_msi_domain(&dev->dev); pci_msi_get_domain()
46 if (domain) pci_msi_get_domain()
47 return domain; pci_msi_get_domain()
54 struct irq_domain *domain; pci_msi_setup_msi_irqs() local
56 domain = pci_msi_get_domain(dev); pci_msi_setup_msi_irqs()
57 if (domain && irq_domain_is_hierarchy(domain)) pci_msi_setup_msi_irqs()
58 return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); pci_msi_setup_msi_irqs()
65 struct irq_domain *domain; pci_msi_teardown_msi_irqs() local
67 domain = pci_msi_get_domain(dev); pci_msi_teardown_msi_irqs()
68 if (domain && irq_domain_is_hierarchy(domain)) pci_msi_teardown_msi_irqs()
69 pci_msi_domain_free_irqs(domain, dev); pci_msi_teardown_msi_irqs()
1177 * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev
1178 * @domain: The interrupt domain to check
1179 * @info: The domain info for verification
1184 * 1 if Multi MSI is requested, but the domain does not support it
1187 int pci_msi_domain_check_cap(struct irq_domain *domain, pci_msi_domain_check_cap() argument
1202 static int pci_msi_domain_handle_error(struct irq_domain *domain, pci_msi_domain_handle_error() argument
1260 * pci_msi_create_irq_domain - Create a MSI interrupt domain
1262 * @info: MSI domain info
1263 * @parent: Parent irq domain
1265 * Updates the domain and chip ops and creates a MSI interrupt domain.
1268 * A domain pointer or NULL in case of failure.
1274 struct irq_domain *domain; pci_msi_create_irq_domain() local
1281 domain = msi_create_irq_domain(fwnode, info, parent); pci_msi_create_irq_domain()
1282 if (!domain) pci_msi_create_irq_domain()
1285 domain->bus_token = DOMAIN_BUS_PCI_MSI; pci_msi_create_irq_domain()
1286 return domain; pci_msi_create_irq_domain()
1290 * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain
1291 * @domain: The interrupt domain to allocate from
1299 int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, pci_msi_domain_alloc_irqs() argument
1302 return msi_domain_alloc_irqs(domain, &dev->dev, nvec); pci_msi_domain_alloc_irqs()
1306 * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain
1307 * @domain: The interrupt domain
1310 void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) pci_msi_domain_free_irqs() argument
1312 msi_domain_free_irqs(domain, &dev->dev); pci_msi_domain_free_irqs()
1316 * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain
1318 * @info: MSI domain info
1319 * @parent: Parent irq domain
1321 * Returns: A domain pointer or NULL in case of failure. If successful
1327 struct irq_domain *domain; pci_msi_create_default_irq_domain() local
1331 pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); pci_msi_create_default_irq_domain()
1332 domain = NULL; pci_msi_create_default_irq_domain()
1334 domain = pci_msi_create_irq_domain(fwnode, info, parent); pci_msi_create_default_irq_domain()
1335 pci_msi_default_domain = domain; pci_msi_create_default_irq_domain()
1339 return domain; pci_msi_create_default_irq_domain()
1351 * @domain: The interrupt domain
1359 u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) pci_msi_domain_get_msi_rid() argument
1366 of_node = irq_domain_get_of_node(domain); pci_msi_domain_get_msi_rid()
1374 * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
1377 * Use the firmware data to find a device-specific MSI domain
1380 * Returns: The coresponding MSI domain or NULL if none has been found.
H A Dxen-pcifront.c67 unsigned int domain, unsigned int bus, pcifront_init_sd()
72 sd->sd.domain = domain; pcifront_init_sd()
183 .domain = pci_domain_nr(bus), pcifront_bus_read()
221 .domain = pci_domain_nr(bus), pcifront_bus_write()
254 .domain = pci_domain_nr(dev->bus), pci_frontend_enable_msix()
308 .domain = pci_domain_nr(dev->bus), pci_frontend_disable_msix()
327 .domain = pci_domain_nr(dev->bus), pci_frontend_enable_msi()
356 .domain = pci_domain_nr(dev->bus), pci_frontend_disable_msi()
417 unsigned int domain, unsigned int bus, pcifront_scan_bus()
438 "%04x:%02x:%02x.%d found.\n", domain, bus, pcifront_scan_bus()
446 unsigned int domain, unsigned int bus) pcifront_scan_root()
460 if (domain != 0) { pcifront_scan_root()
462 "PCI Root in non-zero PCI Domain! domain=%d\n", domain); pcifront_scan_root()
471 domain, bus); pcifront_scan_root()
482 pcifront_init_sd(sd, domain, bus, pdev); pcifront_scan_root()
503 err = pcifront_scan_bus(pdev, domain, bus, b); pcifront_scan_root()
522 unsigned int domain, unsigned int bus) pcifront_rescan_root()
528 if (domain != 0) { pcifront_rescan_root()
530 "PCI Root in non-zero PCI Domain! domain=%d\n", domain); pcifront_rescan_root()
538 domain, bus); pcifront_rescan_root()
540 b = pci_find_bus(domain, bus); pcifront_rescan_root()
543 return pcifront_scan_root(pdev, domain, bus); pcifront_rescan_root()
545 err = pcifront_scan_bus(pdev, domain, bus, b); pcifront_rescan_root()
858 unsigned int domain, bus; pcifront_try_connect() local
901 "%x:%x", &domain, &bus); pcifront_try_connect()
910 err = pcifront_scan_root(pdev, domain, bus); pcifront_try_connect()
914 domain, bus); pcifront_try_connect()
952 unsigned int domain, bus; pcifront_attach_devices() local
987 "%x:%x", &domain, &bus); pcifront_attach_devices()
996 err = pcifront_rescan_root(pdev, domain, bus); pcifront_attach_devices()
1000 domain, bus); pcifront_attach_devices()
1015 unsigned int domain, bus, slot, func; pcifront_detach_devices() local
1056 "%x:%x:%x.%x", &domain, &bus, &slot, &func); pcifront_detach_devices()
1065 pci_dev = pci_get_domain_bus_and_slot(domain, bus, pcifront_detach_devices()
1070 domain, bus, slot, func); pcifront_detach_devices()
1080 domain, bus, slot, func); pcifront_detach_devices()
66 pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) pcifront_init_sd() argument
416 pcifront_scan_bus(struct pcifront_device *pdev, unsigned int domain, unsigned int bus, struct pci_bus *b) pcifront_scan_bus() argument
445 pcifront_scan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) pcifront_scan_root() argument
521 pcifront_rescan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) pcifront_rescan_root() argument
H A Dsearch.c123 * pci_find_bus - locate PCI bus from a given domain and bus number
124 * @domain: number of PCI domain to search
127 * Given a PCI bus number and domain number, the desired PCI bus is located
131 struct pci_bus *pci_find_bus(int domain, int busnr) pci_find_bus() argument
137 if (pci_domain_nr(bus) != domain) pci_find_bus()
206 * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
207 * @domain: PCI domain/segment on which the PCI device resides.
213 * Given a PCI domain, bus, and slot/function number, the desired PCI
220 struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, pci_get_domain_bus_and_slot() argument
226 if (pci_domain_nr(dev->bus) == domain && for_each_pci_dev()
/linux-4.4.14/arch/arm/mach-zx/
H A Dzx296702-pm-domain.c38 static int normal_power_off(struct generic_pm_domain *domain) normal_power_off() argument
40 struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; normal_power_off()
67 pr_err("Error: %s %s fail\n", __func__, domain->name); normal_power_off()
74 static int normal_power_on(struct generic_pm_domain *domain) normal_power_on() argument
76 struct zx_pm_domain *zpd = (struct zx_pm_domain *)domain; normal_power_on()
88 pr_err("Error: %s %s fail\n", __func__, domain->name); normal_power_on()
/linux-4.4.14/arch/arm/mach-shmobile/
H A Dpm-rmobile.c146 * This domain should not be turned off. rmobile_pd_suspend_busy()
154 * Serial consoles make use of SCIF hardware located in this domain, rmobile_pd_suspend_console()
155 * hence keep the power domain on if "no_console_suspend" is set. rmobile_pd_suspend_console()
206 pr_debug("Special PM domain %s type %d for %s\n", pd->name, type, add_special_pd()
223 /* PM domain containing console */ get_special_pds()
259 * This domain contains the CPU core and therefore it should rmobile_setup_pm_domain()
262 pr_debug("PM domain %s contains CPU\n", name); rmobile_setup_pm_domain()
268 pr_debug("PM domain %s contains serial console\n", name); rmobile_setup_pm_domain()
275 * This domain contains the Coresight-ETM hardware block and rmobile_setup_pm_domain()
279 pr_debug("PM domain %s contains Coresight-ETM\n", name); rmobile_setup_pm_domain()
286 * This domain contains a memory-controller and therefore it rmobile_setup_pm_domain()
289 pr_debug("PM domain %s contains MEMCTL\n", name); rmobile_setup_pm_domain()
312 /* always-on domain */ for_each_child_of_node()
/linux-4.4.14/drivers/regulator/
H A Ddbx500-prcmu.h22 * @epod_id: id for EPOD (power domain)
23 * @is_ramret: RAM retention switch for EPOD (power domain)
/linux-4.4.14/drivers/staging/board/
H A Dboard.c140 const char *domain) board_staging_add_dev_domain()
146 np = of_find_node_by_path(domain); board_staging_add_dev_domain()
148 pr_err("Cannot find domain node %s\n", domain); board_staging_add_dev_domain()
156 pr_err("Cannot find genpd %s (%ld)\n", domain, PTR_ERR(pd)); board_staging_add_dev_domain()
166 const char *domain) board_staging_add_dev_domain()
190 if (dev->domain) board_staging_register_device()
191 board_staging_add_dev_domain(pdev, dev->domain); board_staging_register_device()
139 board_staging_add_dev_domain(struct platform_device *pdev, const char *domain) board_staging_add_dev_domain() argument
165 board_staging_add_dev_domain(struct platform_device *pdev, const char *domain) board_staging_add_dev_domain() argument
H A Dboard.h20 const char *domain; member in struct:board_staging_dev
/linux-4.4.14/drivers/mfd/
H A Dlp8788-irq.c36 * @domain : IRQ domain for handling nested interrupt
42 struct irq_domain *domain; member in struct:lp8788_irq_data
127 handle_nested_irq(irq_find_mapping(irqd->domain, i)); lp8788_irq_handler()
168 irqd->domain = irq_domain_add_linear(lp->dev->of_node, LP8788_INT_MAX, lp8788_irq_init()
170 if (!irqd->domain) { lp8788_irq_init()
171 dev_err(lp->dev, "failed to add irq domain err\n"); lp8788_irq_init()
175 lp->irqdm = irqd->domain; lp8788_irq_init()
H A Dmax8998-irq.c219 struct irq_domain *domain; max8998_irq_init() local
239 domain = irq_domain_add_simple(NULL, MAX8998_IRQ_NR, max8998_irq_init()
241 if (!domain) { max8998_irq_init()
242 dev_err(max8998->dev, "could not create irq domain\n"); max8998_irq_init()
245 max8998->irq_domain = domain; max8998_irq_init()
/linux-4.4.14/drivers/clk/qcom/
H A Dgdsc.c43 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
124 static int gdsc_enable(struct generic_pm_domain *domain) gdsc_enable() argument
126 struct gdsc *sc = domain_to_gdsc(domain); gdsc_enable()
140 * If clocks to this power domain were already on, they will take an gdsc_enable()
141 * additional 4 clock cycles to re-enable after the power domain is gdsc_enable()
151 static int gdsc_disable(struct generic_pm_domain *domain) gdsc_disable() argument
153 struct gdsc *sc = domain_to_gdsc(domain); gdsc_disable()
/linux-4.4.14/security/apparmor/include/
H A Ddomain.h4 * This file contains AppArmor security domain transition function definitions.
31 void aa_free_domain_entries(struct aa_domain *domain);
/linux-4.4.14/include/xen/
H A Dxen.h6 XEN_PV_DOMAIN, /* running in a PV domain */
7 XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
/linux-4.4.14/include/linux/clk/
H A Dshmobile.h29 int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev);
30 void cpg_mstp_detach_dev(struct generic_pm_domain *domain, struct device *dev);
/linux-4.4.14/drivers/staging/skein/
H A Dskein_block.h7 ** This algorithm and source code is released to the public domain.
/linux-4.4.14/include/net/
H A Dregulatory.h48 * regulatory domain. We have a few special codes:
49 * 00 - World regulatory domain
52 * 97 - regulatory domain has not yet been configured
53 * @dfs_region: If CRDA responded with a regulatory domain that requires
62 * the requested regulatory domain with the presently set regulatory
63 * domain.
66 * currently regulatory domain set on cfg80211 is updated from
93 * has its own custom regulatory domain and cannot identify the
104 * has regulatory domain that it wishes to be considered as the
106 * domain programmed further regulatory hints shall only be considered
110 * device's regulatory domain no user specified regulatory hint which
112 * the device's original regulatory domain will be trusted as the
117 * domain is set, and all other regulatory hints will be ignored
118 * until their own regulatory domain gets programmed.
127 * derived from the regulatory domain. The regulatory domain used will be
/linux-4.4.14/include/asm-generic/
H A Dmsi.h15 * @hwirq: Associated hw interrupt number in the domain
H A Dpci-bridge.h29 /* Enable domain numbers in /proc */
31 /* ... except for domain 0 */
/linux-4.4.14/include/linux/decompress/
H A Dunxz.h6 * This file has been put into the public domain.
/linux-4.4.14/include/linux/regulator/
H A Ddb8500-prcmu.h8 * Interface to power domain regulators on DB8500
/linux-4.4.14/arch/nios2/kernel/
H A Dirq.c77 struct irq_domain *domain; init_IRQ() local
86 domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL); init_IRQ()
87 BUG_ON(!domain); init_IRQ()
89 irq_set_default_host(domain); init_IRQ()
/linux-4.4.14/include/linux/platform_data/
H A Ddma-imx.h20 IMX_DMATYPE_SSI, /* MCU domain SSI */
24 IMX_DMATYPE_UART, /* MCU domain UART */
27 IMX_DMATYPE_CSPI, /* MCU domain CSPI */
/linux-4.4.14/drivers/gpio/
H A Dgpio-tb10x.c51 * @domain: IRQ domain of GPIO generated interrupts managed by this controller
58 struct irq_domain *domain; member in struct:tb10x_gpio
145 return irq_create_mapping(tb10x_gpio->domain, offset); tb10x_gpio_to_irq()
169 generic_handle_irq(irq_find_mapping(tb10x_gpio->domain, i)); tb10x_gpio_irq_cascade()
239 tb10x_gpio->domain = irq_domain_add_linear(dn, tb10x_gpio_probe()
242 if (!tb10x_gpio->domain) { tb10x_gpio_probe()
247 ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain, tb10x_gpio_probe()
254 gc = tb10x_gpio->domain->gc->gc[0]; tb10x_gpio_probe()
281 irq_remove_generic_chip(tb10x_gpio->domain->gc->gc[0], tb10x_gpio_remove()
283 kfree(tb10x_gpio->domain->gc); tb10x_gpio_remove()
284 irq_domain_remove(tb10x_gpio->domain); tb10x_gpio_remove()
H A Dgpio-dwapb.c83 struct irq_domain *domain; member in struct:dwapb_gpio
115 return irq_find_mapping(gpio->domain, offset); dwapb_gpio_to_irq()
137 int gpio_irq = irq_find_mapping(gpio->domain, hwirq); dwapb_do_irq()
305 gpio->domain = irq_domain_add_linear(node, ngpio, dwapb_configure_irqs()
307 if (!gpio->domain) dwapb_configure_irqs()
310 err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, dwapb_configure_irqs()
316 irq_domain_remove(gpio->domain); dwapb_configure_irqs()
317 gpio->domain = NULL; dwapb_configure_irqs()
321 irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); dwapb_configure_irqs()
323 irq_domain_remove(gpio->domain); dwapb_configure_irqs()
324 gpio->domain = NULL; dwapb_configure_irqs()
363 irq_domain_remove(gpio->domain); dwapb_configure_irqs()
364 gpio->domain = NULL; dwapb_configure_irqs()
370 irq_create_mapping(gpio->domain, hwirq); dwapb_configure_irqs()
382 if (!gpio->domain) dwapb_irq_teardown()
386 irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); dwapb_irq_teardown()
388 irq_domain_remove(gpio->domain); dwapb_irq_teardown()
389 gpio->domain = NULL; dwapb_irq_teardown()
H A Dgpio-grgpio.c75 * independently of each other. This driver sets up an irq domain and
78 struct irq_domain *domain; member in struct:grgpio_priv
125 return irq_create_mapping(priv->domain, offset); grgpio_to_irq()
404 priv->domain = irq_domain_add_linear(np, gc->ngpio, grgpio_probe()
407 if (!priv->domain) { grgpio_probe()
408 dev_err(&ofdev->dev, "Could not add irq domain\n"); grgpio_probe()
441 if (priv->domain) grgpio_probe()
442 irq_domain_remove(priv->domain); grgpio_probe()
447 priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off"); grgpio_probe()
461 if (priv->domain) { grgpio_remove()
472 if (priv->domain) grgpio_remove()
473 irq_domain_remove(priv->domain); grgpio_remove()
/linux-4.4.14/fs/ocfs2/dlm/
H A Ddlmdomain.c6 * defines domain join / leave apis
128 * be used to determine what a running domain is actually using.
134 * - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain
259 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) __dlm_lookup_domain_full() argument
266 * but domain may not be! */ __dlm_lookup_domain_full()
269 memcmp(tmp->name, domain, len)==0) __dlm_lookup_domain_full()
276 /* For null terminated domain strings ONLY */ __dlm_lookup_domain()
277 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) __dlm_lookup_domain() argument
281 return __dlm_lookup_domain_full(domain, strlen(domain)); __dlm_lookup_domain()
286 * 1) the domain does not exist
287 * 2) the domain exists and it's state is "joined" */ dlm_wait_on_domain_helper()
288 static int dlm_wait_on_domain_helper(const char *domain) dlm_wait_on_domain_helper() argument
295 tmp = __dlm_lookup_domain(domain); dlm_wait_on_domain_helper()
336 mlog(0, "freeing memory from domain %s\n", dlm->name); dlm_ctxt_release()
409 /* We've left the domain. Now we can take ourselves out of the dlm_complete_dlm_shutdown()
416 /* Wake up anyone waiting for us to remove this domain */ dlm_complete_dlm_shutdown()
428 mlog(0, "Migrating locks from domain %s\n", dlm->name); dlm_migrate_all_locks()
473 mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); dlm_migrate_all_locks()
499 mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node); dlm_begin_exit_domain_handler()
565 printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name); dlm_exit_domain_handler()
584 mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name, dlm_send_one_domain_exit()
593 mlog(ML_ERROR, "Error %d sending domain exit message %u " dlm_send_one_domain_exit()
594 "to node %u on domain %s\n", status, msg_type, node, dlm_send_one_domain_exit()
604 /* Support for begin exit domain was added in 1.2 */ dlm_begin_exit_domain()
639 /* Clear ourselves from the domain map */ dlm_leave_domain()
658 mlog(ML_NOTICE, "Error %d sending domain exit message " dlm_leave_domain()
704 * domain. Don't use DLM_CTXT_LEAVING yet as we still dlm_unregister_domain()
705 * want new domain joins to communicate with us at dlm_unregister_domain()
714 mlog(0, "shutting down domain %s\n", dlm->name); dlm_unregister_domain()
736 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name); dlm_unregister_domain()
820 mlog(0, "node %u wants to join domain %s\n", query->node_idx, dlm_query_join_handler()
821 query->domain); dlm_query_join_handler()
839 dlm = __dlm_lookup_domain_full(query->domain, query->name_len); dlm_query_join_handler()
863 * to be put in someone's domain map. dlm_query_join_handler()
889 "is still in the domain! needs recovery?\n", dlm_query_join_handler()
893 /* Alright we're fully a part of this domain dlm_query_join_handler()
935 mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, dlm_assert_joined_handler()
936 assert->domain); dlm_assert_joined_handler()
939 dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); dlm_assert_joined_handler()
945 * domain. Set him in the map and clean up our dlm_assert_joined_handler()
960 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ", dlm_assert_joined_handler()
1123 mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node, dlm_query_region_handler()
1136 mlog(ML_ERROR, "Node %d queried hb regions on domain %s " dlm_query_region_handler()
1137 "before join domain\n", qr->qr_node, qr->qr_domain); dlm_query_region_handler()
1143 mlog(ML_ERROR, "Node %d queried hb regions on domain %s " dlm_query_region_handler()
1152 mlog(ML_ERROR, "Node %d queried hb regions on domain %s " dlm_query_region_handler()
1299 mlog(0, "Node %u queries nodes on domain %s\n", qn->qn_nodenum, dlm_query_nodeinfo_handler()
1305 mlog(ML_ERROR, "Node %d queried nodes on domain %s before " dlm_query_nodeinfo_handler()
1306 "join domain\n", qn->qn_nodenum, qn->qn_domain); dlm_query_nodeinfo_handler()
1313 mlog(ML_ERROR, "Node %d queried nodes on domain %s but " dlm_query_nodeinfo_handler()
1322 mlog(ML_ERROR, "Node %d queried nodes on domain %s " dlm_query_nodeinfo_handler()
1347 mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, dlm_cancel_join_handler()
1348 cancel->domain); dlm_cancel_join_handler()
1351 dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); dlm_cancel_join_handler()
1377 memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); dlm_send_one_join_cancel()
1444 memcpy(join_msg.domain, dlm->name, join_msg.name_len); dlm_request_join()
1464 joined into the domain. */ dlm_request_join()
1529 memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); dlm_send_one_join_assert()
1659 /* Yay, everyone agree's we can join the domain. My domain is dlm_try_to_join_domain()
1661 * yes_resp_map. Copy that into our domain map and send a join dlm_try_to_join_domain()
1689 * we're not in the domain. */ dlm_try_to_join_domain()
1699 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name); dlm_try_to_join_domain()
1873 mlog(0, "Join domain %s\n", dlm->name); dlm_join_domain()
1922 mlog(ML_NOTICE, "Timed out joining dlm domain " dlm_join_domain()
1963 static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, dlm_alloc_ctxt() argument
1977 dlm->name = kstrdup(domain, GFP_KERNEL); dlm_alloc_ctxt()
2111 * dlm_register_domain: one-time setup per "domain".
2117 struct dlm_ctxt * dlm_register_domain(const char *domain, dlm_register_domain() argument
2125 if (strlen(domain) >= O2NM_MAX_NAME_LEN) { dlm_register_domain()
2127 mlog(ML_ERROR, "domain name length too long\n"); dlm_register_domain()
2131 mlog(0, "register called for domain \"%s\"\n", domain); dlm_register_domain()
2143 dlm = __dlm_lookup_domain(domain); dlm_register_domain()
2151 domain)); dlm_register_domain()
2159 "compatible with already registered domain " dlm_register_domain()
2160 "\"%s\"\n", domain); dlm_register_domain()
2178 new_ctxt = dlm_alloc_ctxt(domain, key); dlm_register_domain()
2191 /* add the new domain */ dlm_register_domain()
2280 * evict a node from it's domain *before* heartbeat fires, a similar
2283 /* Eviction is not expected to happen often, so a per-domain lock is
/linux-4.4.14/arch/powerpc/perf/
H A Dhv-24x7.c30 static const char *event_domain_suffix(unsigned domain) event_domain_suffix() argument
32 switch (domain) { event_domain_suffix()
39 WARN(1, "unknown domain %d\n", domain); event_domain_suffix()
44 static bool domain_is_valid(unsigned domain) domain_is_valid() argument
46 switch (domain) { domain_is_valid()
58 static bool is_physical_domain(unsigned domain) is_physical_domain() argument
60 switch (domain) { is_physical_domain()
71 static bool catalog_entry_domain_is_valid(unsigned domain) catalog_entry_domain_is_valid() argument
73 return is_physical_domain(domain); catalog_entry_domain_is_valid()
80 * - y = indexes in the domain (core, chip, vcpu, node, etc)
97 * perf stat -e 'hv_24x7/domain=2,offset=8,vcpu=0,lpar=0xffffffff/'
101 EVENT_DEFINE_RANGE_FORMAT(domain, config, 0, 3);
287 static char *event_fmt(struct hv_24x7_event_data *event, unsigned domain) event_fmt() argument
292 if (is_physical_domain(domain)) { event_fmt()
301 "domain=0x%x,offset=0x%x,%s=?,lpar=%s", event_fmt()
302 domain, event_fmt()
385 unsigned domain, event_to_attr()
393 if (!domain_is_valid(domain)) { event_to_attr()
394 pr_warn("catalog event %u has invalid domain %u\n", event_to_attr()
395 ix, domain); event_to_attr()
399 val = event_fmt(event, domain); event_to_attr()
403 ev_suffix = event_domain_suffix(domain); event_to_attr()
460 switch (event->domain) { event_data_to_attrs()
462 *attrs = event_to_attr(ix, event, event->domain, nonce); event_data_to_attrs()
478 pr_warn("catalog event %u: domain %u is not allowed in the " event_data_to_attrs()
479 "catalog\n", ix, event->domain); event_data_to_attrs()
486 switch (event->domain) { event_to_attr_ct()
510 unsigned domain; member in struct:event_uniq
538 unsigned domain) event_uniq_add()
549 result = ev_uniq_ord(name, nl, domain, it->name, it->nl, event_uniq_add()
550 it->domain); event_uniq_add()
573 .domain = domain, event_uniq_add()
797 if (!catalog_entry_domain_is_valid(event->domain)) { create_events_from_catalog()
798 pr_info("event %zu (%.*s) has invalid domain %d\n", create_events_from_catalog()
799 event_idx, nl, name, event->domain); create_events_from_catalog()
847 if (!catalog_entry_domain_is_valid(event->domain)) create_events_from_catalog()
851 nonce = event_uniq_add(&ev_uniq, name, nl, event->domain); create_events_from_catalog()
1148 unsigned domain; h_24x7_event_init() local
1190 domain = event_get_domain(event); h_24x7_event_init()
1191 if (domain > 6) { h_24x7_event_init()
1192 pr_devel("invalid domain %d\n", domain); h_24x7_event_init()
1203 if (!caps.collect_privileged && (is_physical_domain(domain) || h_24x7_event_init()
1206 is_physical_domain(domain), h_24x7_event_init()
383 event_to_attr(unsigned ix, struct hv_24x7_event_data *event, unsigned domain, int nonce) event_to_attr() argument
537 event_uniq_add(struct rb_root *root, const char *name, int nl, unsigned domain) event_uniq_add() argument
H A Dhv-24x7-catalog.h36 __u8 domain; /* Chip = 1, Core = 2 */ member in struct:hv_24x7_event_data
/linux-4.4.14/drivers/soc/rockchip/
H A Dpm_domains.c2 * Rockchip Generic power domain support.
169 static int rockchip_pd_power_on(struct generic_pm_domain *domain) rockchip_pd_power_on() argument
171 struct rockchip_pm_domain *pd = to_rockchip_pd(domain); rockchip_pd_power_on()
176 static int rockchip_pd_power_off(struct generic_pm_domain *domain) rockchip_pd_power_off() argument
178 struct rockchip_pm_domain *pd = to_rockchip_pd(domain); rockchip_pd_power_off()
190 dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name); rockchip_pd_attach_dev()
216 dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name); rockchip_pd_detach_dev()
235 "%s: failed to retrieve domain id (reg): %d\n", rockchip_pm_add_one_domain()
241 dev_err(pmu->dev, "%s: invalid domain id %d\n", rockchip_pm_add_one_domain()
248 dev_err(pmu->dev, "%s: undefined domain id %d\n", rockchip_pm_add_one_domain()
284 dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n", rockchip_pm_add_one_domain()
291 "failed to power on domain '%s': %d\n", rockchip_pm_add_one_domain()
353 /* First configure domain power down transition count ... */ rockchip_configure_pd_cnt()
476 .name = "rockchip-pm-domain",
479 * We can't forcibly eject devices form power domain,
/linux-4.4.14/arch/arm/mach-imx/
H A Dgpc.c195 /* No PPI should point to this domain */ imx_gpc_domain_translate()
207 static int imx_gpc_domain_alloc(struct irq_domain *domain, imx_gpc_domain_alloc() argument
219 return -EINVAL; /* No PPI should point to this domain */ imx_gpc_domain_alloc()
226 irq_domain_set_hwirq_and_chip(domain, irq + i, hwirq + i, imx_gpc_domain_alloc()
230 parent_fwspec.fwnode = domain->parent->fwnode; imx_gpc_domain_alloc()
231 return irq_domain_alloc_irqs_parent(domain, irq, nr_irqs, imx_gpc_domain_alloc()
244 struct irq_domain *parent_domain, *domain; imx_gpc_init() local
254 pr_err("%s: unable to obtain parent domain\n", node->full_name); imx_gpc_init()
262 domain = irq_domain_add_hierarchy(parent_domain, 0, GPC_MAX_IRQS, imx_gpc_init()
265 if (!domain) { imx_gpc_init()
304 /* Gate off PU domain when GPU/VPU when powered down */ _imx6q_pm_pu_power_off()
341 /* Enable reset clocks for all devices in the PU domain */ imx6q_pm_pu_power_on()
345 /* Gate off PU domain when GPU/VPU when powered down */ imx6q_pm_pu_power_on()
361 /* Disable reset clocks for all devices in the PU domain */ imx6q_pm_pu_power_on()
438 if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells")) imx_gpc_probe()
H A D3ds_debugboard.c63 static struct irq_domain *domain; variable in typeref:struct:irq_domain
104 generic_handle_irq(irq_find_mapping(domain, expio_irq)); mxc_expio_irq_handler()
192 domain = irq_domain_add_legacy(NULL, MXC_MAX_EXP_IO_LINES, irq_base, 0, mxc_expio_init()
194 WARN_ON(!domain); mxc_expio_init()
208 smsc911x_resources[1].start = irq_find_mapping(domain, EXPIO_INT_ENET); mxc_expio_init()
209 smsc911x_resources[1].end = irq_find_mapping(domain, EXPIO_INT_ENET); mxc_expio_init()
H A Davic.c55 static struct irq_domain *domain; variable in typeref:struct:irq_domain
147 handle_domain_irq(domain, nivector, regs); avic_handle_irq()
182 domain = irq_domain_add_legacy(np, AVIC_NUM_IRQS, irq_base, 0, mxc_init_irq()
184 WARN_ON(!domain); mxc_init_irq()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_object.c119 u32 domain, u64 flags) amdgpu_ttm_placement_init()
126 if (domain & AMDGPU_GEM_DOMAIN_VRAM) { amdgpu_ttm_placement_init()
141 if (domain & AMDGPU_GEM_DOMAIN_GTT) { amdgpu_ttm_placement_init()
152 if (domain & AMDGPU_GEM_DOMAIN_CPU) { amdgpu_ttm_placement_init()
163 if (domain & AMDGPU_GEM_DOMAIN_GDS) { amdgpu_ttm_placement_init()
168 if (domain & AMDGPU_GEM_DOMAIN_GWS) { amdgpu_ttm_placement_init()
173 if (domain & AMDGPU_GEM_DOMAIN_OA) { amdgpu_ttm_placement_init()
198 void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain) amdgpu_ttm_placement_from_domain() argument
201 rbo->placements, domain, rbo->flags); amdgpu_ttm_placement_from_domain()
219 bool kernel, u32 domain, u64 flags, amdgpu_bo_create_restricted()
257 bo->initial_domain = domain & (AMDGPU_GEM_DOMAIN_VRAM | amdgpu_bo_create_restricted()
289 bool kernel, u32 domain, u64 flags, amdgpu_bo_create()
301 placements, domain, flags); amdgpu_bo_create()
304 domain, flags, sg, &placement, amdgpu_bo_create()
363 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, amdgpu_bo_pin_restricted() argument
383 if (domain == AMDGPU_GEM_DOMAIN_VRAM) amdgpu_bo_pin_restricted()
393 amdgpu_ttm_placement_from_domain(bo, domain); amdgpu_bo_pin_restricted()
421 if (domain == AMDGPU_GEM_DOMAIN_VRAM) amdgpu_bo_pin_restricted()
431 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr) amdgpu_bo_pin() argument
433 return amdgpu_bo_pin_restricted(bo, domain, 0, 0, gpu_addr); amdgpu_bo_pin()
116 amdgpu_ttm_placement_init(struct amdgpu_device *adev, struct ttm_placement *placement, struct ttm_place *placements, u32 domain, u64 flags) amdgpu_ttm_placement_init() argument
217 amdgpu_bo_create_restricted(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct ttm_placement *placement, struct reservation_object *resv, struct amdgpu_bo **bo_ptr) amdgpu_bo_create_restricted() argument
287 amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size, int byte_align, bool kernel, u32 domain, u64 flags, struct sg_table *sg, struct reservation_object *resv, struct amdgpu_bo **bo_ptr) amdgpu_bo_create() argument
H A Damdgpu_object.h35 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
38 * Returns corresponding domain of the ttm mem_type
130 bool kernel, u32 domain, u64 flags,
136 bool kernel, u32 domain, u64 flags,
145 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
146 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
185 unsigned size, u32 align, u32 domain);
/linux-4.4.14/drivers/gpu/drm/qxl/
H A Dqxl_object.c53 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) qxl_ttm_placement_from_domain() argument
61 if (domain == QXL_GEM_DOMAIN_VRAM) qxl_ttm_placement_from_domain()
63 if (domain == QXL_GEM_DOMAIN_SURFACE) qxl_ttm_placement_from_domain()
65 if (domain == QXL_GEM_DOMAIN_CPU) qxl_ttm_placement_from_domain()
79 unsigned long size, bool kernel, bool pinned, u32 domain, qxl_bo_create()
101 bo->type = domain; qxl_bo_create()
109 qxl_ttm_placement_from_domain(bo, domain, pinned); qxl_bo_create()
118 size, domain); qxl_bo_create()
224 int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) qxl_bo_pin() argument
235 qxl_ttm_placement_from_domain(bo, domain, true); qxl_bo_pin()
78 qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr) qxl_bo_create() argument
H A Dqxl_object.h89 bool kernel, bool pinned, u32 domain,
98 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
100 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
/linux-4.4.14/drivers/gpu/drm/i915/
H A Dintel_uncore.c248 struct intel_uncore_forcewake_domain *domain = (void *)arg; intel_uncore_fw_release_timer() local
251 assert_device_not_suspended(domain->i915); intel_uncore_fw_release_timer()
253 spin_lock_irqsave(&domain->i915->uncore.lock, irqflags); intel_uncore_fw_release_timer()
254 if (WARN_ON(domain->wake_count == 0)) intel_uncore_fw_release_timer()
255 domain->wake_count++; intel_uncore_fw_release_timer()
257 if (--domain->wake_count == 0) intel_uncore_fw_release_timer()
258 domain->i915->uncore.funcs.force_wake_put(domain->i915, intel_uncore_fw_release_timer()
259 1 << domain->id); intel_uncore_fw_release_timer()
261 spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); intel_uncore_fw_release_timer()
268 struct intel_uncore_forcewake_domain *domain; intel_uncore_forcewake_reset() local
280 for_each_fw_domain(domain, dev_priv, id) { for_each_fw_domain()
281 if (del_timer_sync(&domain->timer) == 0) for_each_fw_domain()
284 intel_uncore_fw_release_timer((unsigned long)domain); for_each_fw_domain()
289 for_each_fw_domain(domain, dev_priv, id) { for_each_fw_domain()
290 if (timer_pending(&domain->timer)) for_each_fw_domain()
308 for_each_fw_domain(domain, dev_priv, id)
309 if (domain->wake_count)
388 struct intel_uncore_forcewake_domain *domain; __intel_uncore_forcewake_get() local
396 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { for_each_fw_domain_mask()
397 if (domain->wake_count++) for_each_fw_domain_mask()
406 * intel_uncore_forcewake_get - grab forcewake domain references
410 * This function can be used get GT's forcewake domain references.
434 * intel_uncore_forcewake_get__locked - grab forcewake domain references
455 struct intel_uncore_forcewake_domain *domain; __intel_uncore_forcewake_put() local
463 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { for_each_fw_domain_mask()
464 if (WARN_ON(domain->wake_count == 0)) for_each_fw_domain_mask()
467 if (--domain->wake_count) for_each_fw_domain_mask()
470 domain->wake_count++; for_each_fw_domain_mask()
471 fw_domain_arm_timer(domain); for_each_fw_domain_mask()
476 * intel_uncore_forcewake_put - release a forcewake domain reference
497 * intel_uncore_forcewake_put__locked - grab forcewake domain references
517 struct intel_uncore_forcewake_domain *domain; assert_forcewakes_inactive() local
523 for_each_fw_domain(domain, dev_priv, id) assert_forcewakes_inactive()
524 WARN_ON(domain->wake_count); assert_forcewakes_inactive()
696 struct intel_uncore_forcewake_domain *domain; __force_wake_get() local
703 for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { for_each_fw_domain_mask()
704 if (domain->wake_count) { for_each_fw_domain_mask()
709 domain->wake_count++; for_each_fw_domain_mask()
710 fw_domain_arm_timer(domain); for_each_fw_domain_mask()
/linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp5/
H A Dmdp5_irq.c110 mdp5_kms->irqcontroller.domain, hwirq)); mdp5_irq()
199 dev_err(dev, "mdp5 irq domain add failed\n"); mdp5_irq_domain_init()
204 mdp5_kms->irqcontroller.domain = d; mdp5_irq_domain_init()
211 if (mdp5_kms->irqcontroller.domain) { mdp5_irq_domain_fini()
212 irq_domain_remove(mdp5_kms->irqcontroller.domain); mdp5_irq_domain_fini()
213 mdp5_kms->irqcontroller.domain = NULL; mdp5_irq_domain_fini()
/linux-4.4.14/drivers/cpufreq/
H A Dscpi-cpufreq.c34 int domain = topology_physical_package_id(cpu_dev->id); scpi_get_dvfs_info() local
36 if (domain < 0) scpi_get_dvfs_info()
38 return scpi_ops->dvfs_get_info(domain); scpi_get_dvfs_info()
/linux-4.4.14/arch/x86/pci/
H A Dacpi.c191 seg = info->sd.domain; setup_mcfg_map()
220 pci_mmconfig_delete(info->sd.domain, teardown_mcfg_map()
319 int domain = root->segment; pci_acpi_scan_root() local
325 root->segment = domain = 0; pci_acpi_scan_root()
327 if (domain && !pci_domains_supported) { pci_acpi_scan_root()
330 domain, busnum); pci_acpi_scan_root()
334 bus = pci_find_bus(domain, busnum); pci_acpi_scan_root()
341 .domain = domain, pci_acpi_scan_root()
354 domain, busnum); pci_acpi_scan_root()
356 info->sd.domain = domain; pci_acpi_scan_root()
/linux-4.4.14/include/uapi/xen/
H A Dgntalloc.h8 * This file is in the public domain.
23 /* The ID of the domain to be given access to the grants. */
55 * cleanup if this side crashes. Required to implement cross-domain robust
H A Dprivcmd.h57 domid_t dom; /* target domain */
63 domid_t dom; /* target domain */
74 domid_t dom; /* target domain */
/linux-4.4.14/drivers/usb/gadget/function/
H A Dndis.h11 * This source code is offered for use in the public domain. You may
/linux-4.4.14/drivers/scsi/aic7xxx/
H A Dscsi_iu.h2 * This file is in the public domain.
/linux-4.4.14/drivers/infiniband/hw/usnic/
H A Dusnic_uiom.c68 static int usnic_uiom_dma_fault(struct iommu_domain *domain, usnic_uiom_dma_fault() argument
73 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", usnic_uiom_dma_fault()
75 domain, iova, flags); usnic_uiom_dma_fault()
209 iommu_unmap(pd->domain, va, PAGE_SIZE); list_for_each_entry_safe()
286 err = iommu_map(pd->domain, va_start, pa_start, list_for_each_entry()
303 err = iommu_map(pd->domain, va_start, pa_start, list_for_each_entry()
468 void *domain; usnic_uiom_alloc_pd() local
474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); usnic_uiom_alloc_pd()
475 if (!domain) { usnic_uiom_alloc_pd()
476 usnic_err("Failed to allocate IOMMU domain"); usnic_uiom_alloc_pd()
481 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); usnic_uiom_alloc_pd()
491 iommu_domain_free(pd->domain); usnic_uiom_dealloc_pd()
505 err = iommu_attach_device(pd->domain, dev); usnic_uiom_attach_dev_to_pd()
524 iommu_detach_device(pd->domain, dev); usnic_uiom_attach_dev_to_pd()
554 return iommu_detach_device(pd->domain, dev); usnic_uiom_detach_dev_from_pd()
/linux-4.4.14/drivers/pinctrl/
H A Dpinctrl-adi2.c52 Each IRQ domain is binding to a GPIO bank device. 2 GPIO bank devices can map
57 domain pointer in domain[0]. The IRQ domain pointer of the other bank is set
58 to domain[1]. PINT interrupt handler adi_gpio_handle_pint_irq() finds out
59 the current domain pointer according to whether the interrupt request mask
60 is in lower 16 bits (domain[0]) or upper 16bits (domain[1]).
113 * @domain: [0] irq domain of the gpio port, whose hardware interrupts are
115 * [1] irq domain of the gpio port, whose hardware interrupts are
128 struct irq_domain *domain[2]; member in struct:gpio_pint
135 u8 map, struct irq_domain *domain);
170 * @domain: The irq domain owned by the GPIO port.
188 struct irq_domain *domain; member in struct:gpio_port
541 struct irq_domain *domain; adi_gpio_handle_pint_irq() local
550 domain = pint->domain[0]; adi_gpio_handle_pint_irq()
552 /* domain pointer need to be changed only once at IRQ 16 when adi_gpio_handle_pint_irq()
556 domain = pint->domain[1]; adi_gpio_handle_pint_irq()
563 generic_handle_irq(irq_find_mapping(domain, adi_gpio_handle_pint_irq()
792 return irq_find_mapping(port->domain, offset); adi_gpio_to_irq()
794 return irq_create_mapping(port->domain, offset); adi_gpio_to_irq()
798 struct irq_domain *domain) adi_pint_map_port()
821 pint->domain[assign] = domain; adi_pint_map_port()
901 port->domain = irq_domain_add_linear(node, port->width, adi_gpio_init_int()
903 if (!port->domain) { adi_gpio_init_int()
916 port->pint_map, port->domain); adi_gpio_init_int()
921 ret = irq_create_strict_mappings(port->domain, port->irq_base, adi_gpio_init_int()
924 dev_err(port->dev, "Couldn't associate to domain\n"); adi_gpio_init_int()
1023 irq_domain_remove(port->domain); adi_gpio_probe()
1037 irq_dispose_mapping(irq_find_mapping(port->domain, adi_gpio_remove()
1039 irq_domain_remove(port->domain); adi_gpio_remove()
797 adi_pint_map_port(struct gpio_pint *pint, bool assign, u8 map, struct irq_domain *domain) adi_pint_map_port() argument
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/
H A Dboost.h18 u8 domain; member in struct:nvbios_boostS
/linux-4.4.14/arch/mips/ath25/
H A Dar2315.c79 struct irq_domain *domain = irq_desc_get_handler_data(desc); ar2315_misc_irq_handler() local
82 misc_irq = irq_find_mapping(domain, nr); ar2315_misc_irq_handler()
151 struct irq_domain *domain; ar2315_arch_init_irq() local
156 domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT, ar2315_arch_init_irq()
158 if (!domain) ar2315_arch_init_irq()
159 panic("Failed to add IRQ domain"); ar2315_arch_init_irq()
161 irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB); ar2315_arch_init_irq()
165 ar2315_misc_irq_handler, domain); ar2315_arch_init_irq()
167 ar2315_misc_irq_domain = domain; ar2315_arch_init_irq()
H A Dar5312.c83 struct irq_domain *domain = irq_desc_get_handler_data(desc); ar5312_misc_irq_handler() local
86 misc_irq = irq_find_mapping(domain, nr); ar5312_misc_irq_handler()
146 struct irq_domain *domain; ar5312_arch_init_irq() local
151 domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT, ar5312_arch_init_irq()
153 if (!domain) ar5312_arch_init_irq()
154 panic("Failed to add IRQ domain"); ar5312_arch_init_irq()
156 irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); ar5312_arch_init_irq()
160 ar5312_misc_irq_handler, domain); ar5312_arch_init_irq()
162 ar5312_misc_irq_domain = domain; ar5312_arch_init_irq()
/linux-4.4.14/arch/arm/mm/
H A Dfsr-2level.c15 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
17 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
55 { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
57 { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
/linux-4.4.14/include/xen/interface/io/
H A Dnetif.h82 * /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
83 * /local/domain/1/device/vif/0/queue-0 = ""
84 * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
85 * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
86 * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
87 * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
88 * /local/domain/1/device/vif/0/queue-1 = ""
89 * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
90 * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
91 * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
92 * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
/linux-4.4.14/drivers/of/
H A Dof_pci.c97 * This function will try to obtain the host bridge domain number by
98 * finding a property called "linux,pci-domain" of the given device node.
100 * @node: device tree node with the domain information
102 * Returns the associated domain number from DT in the range [0-0xffff], or
109 u16 domain; of_get_pci_domain_nr() local
111 value = of_get_property(node, "linux,pci-domain", &len); of_get_pci_domain_nr()
115 domain = (u16)be32_to_cpup(value); of_get_pci_domain_nr()
117 return domain; of_get_pci_domain_nr()
/linux-4.4.14/drivers/pci/host/
H A Dpcie-altera-msi.c124 static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, altera_irq_domain_alloc() argument
127 struct altera_msi *msi = domain->host_data; altera_irq_domain_alloc()
144 irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, altera_irq_domain_alloc()
145 domain->host_data, handle_simple_irq, altera_irq_domain_alloc()
155 static void altera_irq_domain_free(struct irq_domain *domain, altera_irq_domain_free() argument
158 struct irq_data *d = irq_domain_get_irq_data(domain, virq); altera_irq_domain_free()
189 dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); altera_allocate_domains()
196 dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); altera_allocate_domains()
/linux-4.4.14/arch/x86/include/asm/xen/
H A Dpci.h19 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain);
31 uint16_t domain) xen_register_device_domain_owner()
30 xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain) xen_register_device_domain_owner() argument
/linux-4.4.14/arch/powerpc/platforms/powernv/
H A Dopal-irqchip.c37 struct irq_domain *domain; member in struct:opal_event_irqchip
65 virq = irq_find_mapping(opal_event_irqchip.domain, opal_handle_events()
195 /* If dn is NULL it means the domain won't be linked to a DT opal_event_init()
203 opal_event_irqchip.domain = irq_domain_add_linear(dn, MAX_NUM_EVENTS, opal_event_init()
206 if (!opal_event_irqchip.domain) { opal_event_init()
207 pr_warn("opal: Unable to create irq domain\n"); opal_event_init()
261 if (WARN_ON_ONCE(!opal_event_irqchip.domain)) opal_event_request()
264 return irq_create_mapping(opal_event_irqchip.domain, opal_event_nr); opal_event_request()
/linux-4.4.14/arch/arm64/kernel/
H A Dpci.c67 int raw_pci_read(unsigned int domain, unsigned int bus, raw_pci_read() argument
73 int raw_pci_write(unsigned int domain, unsigned int bus, raw_pci_write() argument
/linux-4.4.14/arch/ia64/include/asm/
H A Dhw_irq.h101 cpumask_t domain; member in struct:irq_cfg
108 #define irq_to_domain(x) irq_cfg[(x)].domain
120 extern int bind_irq_vector(int irq, int vector, cpumask_t domain);
163 * interrupt domain that a CPU belongs to. This API abstracts such platform-dependent
177 * done in the context of the interrupt domain that the currently executing CPU belongs
/linux-4.4.14/drivers/net/wireless/ath/
H A Ddfs_pattern_detector.c25 * struct radar_types - contains array of patterns defined for one DFS domain
26 * @domain: DFS regulatory domain
135 * get_dfs_domain_radar_types() - get radar types for a given DFS domain
136 * @param domain DFS domain
137 * @return radar_types ptr on success, NULL if DFS domain is not supported
278 * domain are treated as detected radars for fail-safety dpd_add_pulse()
330 /* delete all channel detectors for previous DFS domain */ dpd_set_domain()
369 ath_dbg(common, DFS,"Could not set DFS domain to %d", region); dfs_pattern_detector_init()
/linux-4.4.14/drivers/powercap/
H A Dintel_rapl.c101 /* per domain data, some are optional */
145 struct rapl_domain *domain; member in struct:rapl_power_limit
206 /* per domain data. used to describe individual knobs such that access function
294 /* prevent CPU hotplug, make sure the RAPL domain does not go get_energy_counter()
390 /* per RAPL domain ops, in the order of rapl_domain_type */
596 /* called after domain detection and package level data are set */ rapl_init_domains()
653 pr_info("DRAM domain energy unit %dpj\n", rapl_init_domains()
682 /* per domain unit takes precedence */ rapl_unit_xlate()
777 /* special-case package domain, which uses a different bit*/ rapl_read_data_raw()
1121 pr_debug("update package %d domain %s data\n", rp->id, rapl_update_domain_data()
1176 char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ rapl_package_register_powercap()
1180 /* first we register package domain as the parent zone*/ rapl_package_register_powercap()
1184 pr_debug("register socket %d package domain %s\n", rapl_package_register_powercap()
1203 /* done, only one package domain per socket */ rapl_package_register_powercap()
1208 pr_err("no package domain found, unknown topology!\n"); rapl_package_register_powercap()
1216 /* number of power limits per domain varies */ rapl_package_register_powercap()
1236 * failed after the first domain setup. rapl_package_register_powercap()
1239 pr_debug("unregister package %d domain %s\n", rp->id, rd->name); rapl_package_register_powercap()
1269 pr_debug("unregister zone/package %d, %s domain\n", rapl_register_powercap()
1278 static int rapl_check_domain(int cpu, int domain) rapl_check_domain() argument
1283 switch (domain) { rapl_check_domain()
1297 pr_err("invalid domain id %d\n", domain); rapl_check_domain()
1300 /* make sure domain counters are available and contains non-zero rapl_check_domain()
1323 pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); rapl_detect_domains()
1343 /* check if the domain is locked by BIOS */ rapl_detect_domains()
1348 pr_info("RAPL package %d domain %s locked by BIOS\n", rapl_detect_domains()
1373 /* RAPL interface can be made of a two-level hierarchy: package level and domain
1427 pr_debug("remove package %d, %s domain\n", rp->id, rd->name); rapl_remove_package()
1475 * per-domain level.
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dradeon_object.c96 void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) radeon_ttm_placement_from_domain() argument
102 if (domain & RADEON_GEM_DOMAIN_VRAM) { radeon_ttm_placement_from_domain()
121 if (domain & RADEON_GEM_DOMAIN_GTT) { radeon_ttm_placement_from_domain()
140 if (domain & RADEON_GEM_DOMAIN_CPU) { radeon_ttm_placement_from_domain()
180 u32 domain, u32 flags, struct sg_table *sg, radeon_bo_create()
216 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | radeon_bo_create()
257 radeon_ttm_placement_from_domain(bo, domain); radeon_bo_create()
329 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, radeon_bo_pin_restricted() argument
345 if (domain == RADEON_GEM_DOMAIN_VRAM) radeon_bo_pin_restricted()
355 radeon_ttm_placement_from_domain(bo, domain); radeon_bo_pin_restricted()
374 if (domain == RADEON_GEM_DOMAIN_VRAM) radeon_bo_pin_restricted()
384 int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) radeon_bo_pin() argument
386 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); radeon_bo_pin()
539 u32 domain = lobj->prefered_domains; list_for_each_entry() local
553 (domain & current_domain) == 0 && /* will be moved */ list_for_each_entry()
556 domain = current_domain; list_for_each_entry()
560 radeon_ttm_placement_from_domain(bo, domain); list_for_each_entry()
571 domain != lobj->allowed_domains) { list_for_each_entry()
572 domain = lobj->allowed_domains; list_for_each_entry()
178 radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, struct sg_table *sg, struct reservation_object *resv, struct radeon_bo **bo_ptr) radeon_bo_create() argument
H A Dradeon_gem.c97 uint32_t domain; radeon_gem_set_domain() local
103 domain = wdomain; radeon_gem_set_domain()
104 if (!domain) { radeon_gem_set_domain()
105 domain = rdomain; radeon_gem_set_domain()
107 if (!domain) { radeon_gem_set_domain()
109 printk(KERN_WARNING "Set domain without domain !\n"); radeon_gem_set_domain()
112 if (domain == RADEON_GEM_DOMAIN_CPU) { radeon_gem_set_domain()
372 /* transition the BO to a domain - radeon_gem_set_domain_ioctl()
373 * just validate the BO into a certain domain */ radeon_gem_set_domain_ioctl()
380 /* for now if someone requests domain CPU - radeon_gem_set_domain_ioctl()
451 args->domain = radeon_mem_type_to_domain(cur_placement); radeon_gem_busy_ioctl()
546 unsigned domain; radeon_gem_va_update_vm() local
564 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type); radeon_gem_va_update_vm()
567 if (domain == RADEON_GEM_DOMAIN_CPU) radeon_gem_va_update_vm()
778 unsigned domain; radeon_debugfs_gem_info() local
781 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type); radeon_debugfs_gem_info()
782 switch (domain) { radeon_debugfs_gem_info()
/linux-4.4.14/drivers/thermal/ti-soc-thermal/
H A Domap4-thermal-data.c84 .domain = "cpu",
224 .domain = "cpu",
257 .domain = "cpu",
/linux-4.4.14/drivers/base/regmap/
H A Dregmap-irq.c32 struct irq_domain *domain; member in struct:regmap_irq_chip_data
308 handle_nested_irq(irq_find_mapping(data->domain, i)); regmap_irq_thread()
515 d->domain = irq_domain_add_legacy(map->dev->of_node, regmap_add_irq_chip()
519 d->domain = irq_domain_add_linear(map->dev->of_node, regmap_add_irq_chip()
522 if (!d->domain) { regmap_add_irq_chip()
523 dev_err(map->dev, "Failed to create IRQ domain\n"); regmap_add_irq_chip()
542 /* Should really dispose of the domain but... */ regmap_add_irq_chip()
566 irq_domain_remove(d->domain); regmap_del_irq_chip()
604 return irq_create_mapping(data->domain, irq); regmap_irq_get_virq()
613 * domain, allowing devices to just call this even if no domain is
621 return data->domain; regmap_irq_get_domain()
/linux-4.4.14/security/apparmor/
H A Ddomain.c4 * This file contains AppArmor policy attachment and domain transitions
26 #include "include/domain.h"
34 * aa_free_domain_entries - free entries in a domain table
35 * @domain: the domain table to free (MAYBE NULL)
37 void aa_free_domain_entries(struct aa_domain *domain) aa_free_domain_entries() argument
40 if (domain) { aa_free_domain_entries()
41 if (!domain->table) aa_free_domain_entries()
44 for (i = 0; i < domain->size; i++) aa_free_domain_entries()
45 kzfree(domain->table[i]); aa_free_domain_entries()
46 kzfree(domain->table); aa_free_domain_entries()
47 domain->table = NULL; aa_free_domain_entries()
56 * to trace the new domain
458 * Policy has specified a domain transition, if no_new_privs then apparmor_bprm_set_creds()
616 * Fail explicitly requested domain transitions if no_new_privs. aa_change_hat()
769 * Fail explicitly requested domain transitions if no_new_privs aa_change_profile()
825 /* check if tracing task is allowed to trace target domain */ aa_change_profile()
/linux-4.4.14/drivers/sh/
H A Dpm_runtime.c42 of_find_node_with_property(NULL, "#power-domain-cells")) sh_pm_runtime_init()
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dtegra.h25 struct iommu_domain *domain; member in struct:nvkm_device_tegra::__anon4315
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_gem.h20 uint32_t domain, uint32_t tile_mode,
/linux-4.4.14/include/uapi/linux/cifs/
H A Dcifs_mount.h20 #define CIFS_MAX_DOMAINNAME_LEN 256 /* max fully qualified domain name */
/linux-4.4.14/arch/blackfin/kernel/
H A Dentry.S38 * there is no need to unstall the root domain by ourselves

Completed in 4985 milliseconds

123456