1 /*
2  * Copyright (c) 2006, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Copyright (C) 2006-2008 Intel Corporation
18  * Author: Ashok Raj <ashok.raj@intel.com>
19  * Author: Shaohua Li <shaohua.li@intel.com>
20  * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21  *
22  * This file implements early detection/parsing of Remapping Devices
23  * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24  * tables.
25  *
26  * These routines are used by both DMA-remapping and Interrupt-remapping
27  */
28 
29 #define pr_fmt(fmt)     "DMAR: " fmt
30 
31 #include <linux/pci.h>
32 #include <linux/dmar.h>
33 #include <linux/iova.h>
34 #include <linux/intel-iommu.h>
35 #include <linux/timer.h>
36 #include <linux/irq.h>
37 #include <linux/interrupt.h>
38 #include <linux/tboot.h>
39 #include <linux/dmi.h>
40 #include <linux/slab.h>
41 #include <linux/iommu.h>
42 #include <asm/irq_remapping.h>
43 #include <asm/iommu_table.h>
44 
45 #include "irq_remapping.h"
46 
47 typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48 struct dmar_res_callback {
49 	dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED];
50 	void			*arg[ACPI_DMAR_TYPE_RESERVED];
51 	bool			ignore_unhandled;
52 	bool			print_entry;
53 };
54 
55 /*
56  * Assumptions:
57  * 1) The hotplug framework guarentees that DMAR unit will be hot-added
58  *    before IO devices managed by that unit.
59  * 2) The hotplug framework guarantees that DMAR unit will be hot-removed
60  *    after IO devices managed by that unit.
61  * 3) Hotplug events are rare.
62  *
63  * Locking rules for DMA and interrupt remapping related global data structures:
64  * 1) Use dmar_global_lock in process context
65  * 2) Use RCU in interrupt context
66  */
67 DECLARE_RWSEM(dmar_global_lock);
68 LIST_HEAD(dmar_drhd_units);
69 
70 struct acpi_table_header * __initdata dmar_tbl;
71 static acpi_size dmar_tbl_size;
72 static int dmar_dev_scope_status = 1;
73 static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
74 
75 static int alloc_iommu(struct dmar_drhd_unit *drhd);
76 static void free_iommu(struct intel_iommu *iommu);
77 
dmar_register_drhd_unit(struct dmar_drhd_unit * drhd)78 static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
79 {
80 	/*
81 	 * add INCLUDE_ALL at the tail, so scan the list will find it at
82 	 * the very end.
83 	 */
84 	if (drhd->include_all)
85 		list_add_tail_rcu(&drhd->list, &dmar_drhd_units);
86 	else
87 		list_add_rcu(&drhd->list, &dmar_drhd_units);
88 }
89 
dmar_alloc_dev_scope(void * start,void * end,int * cnt)90 void *dmar_alloc_dev_scope(void *start, void *end, int *cnt)
91 {
92 	struct acpi_dmar_device_scope *scope;
93 
94 	*cnt = 0;
95 	while (start < end) {
96 		scope = start;
97 		if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE ||
98 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT ||
99 		    scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
100 			(*cnt)++;
101 		else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC &&
102 			scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) {
103 			pr_warn("Unsupported device scope\n");
104 		}
105 		start += scope->length;
106 	}
107 	if (*cnt == 0)
108 		return NULL;
109 
110 	return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL);
111 }
112 
dmar_free_dev_scope(struct dmar_dev_scope ** devices,int * cnt)113 void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt)
114 {
115 	int i;
116 	struct device *tmp_dev;
117 
118 	if (*devices && *cnt) {
119 		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
120 			put_device(tmp_dev);
121 		kfree(*devices);
122 	}
123 
124 	*devices = NULL;
125 	*cnt = 0;
126 }
127 
128 /* Optimize out kzalloc()/kfree() for normal cases */
129 static char dmar_pci_notify_info_buf[64];
130 
131 static struct dmar_pci_notify_info *
dmar_alloc_pci_notify_info(struct pci_dev * dev,unsigned long event)132 dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
133 {
134 	int level = 0;
135 	size_t size;
136 	struct pci_dev *tmp;
137 	struct dmar_pci_notify_info *info;
138 
139 	BUG_ON(dev->is_virtfn);
140 
141 	/* Only generate path[] for device addition event */
142 	if (event == BUS_NOTIFY_ADD_DEVICE)
143 		for (tmp = dev; tmp; tmp = tmp->bus->self)
144 			level++;
145 
146 	size = sizeof(*info) + level * sizeof(struct acpi_dmar_pci_path);
147 	if (size <= sizeof(dmar_pci_notify_info_buf)) {
148 		info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf;
149 	} else {
150 		info = kzalloc(size, GFP_KERNEL);
151 		if (!info) {
152 			pr_warn("Out of memory when allocating notify_info "
153 				"for %s.\n", pci_name(dev));
154 			if (dmar_dev_scope_status == 0)
155 				dmar_dev_scope_status = -ENOMEM;
156 			return NULL;
157 		}
158 	}
159 
160 	info->event = event;
161 	info->dev = dev;
162 	info->seg = pci_domain_nr(dev->bus);
163 	info->level = level;
164 	if (event == BUS_NOTIFY_ADD_DEVICE) {
165 		for (tmp = dev; tmp; tmp = tmp->bus->self) {
166 			level--;
167 			info->path[level].bus = tmp->bus->number;
168 			info->path[level].device = PCI_SLOT(tmp->devfn);
169 			info->path[level].function = PCI_FUNC(tmp->devfn);
170 			if (pci_is_root_bus(tmp->bus))
171 				info->bus = tmp->bus->number;
172 		}
173 	}
174 
175 	return info;
176 }
177 
dmar_free_pci_notify_info(struct dmar_pci_notify_info * info)178 static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info)
179 {
180 	if ((void *)info != dmar_pci_notify_info_buf)
181 		kfree(info);
182 }
183 
dmar_match_pci_path(struct dmar_pci_notify_info * info,int bus,struct acpi_dmar_pci_path * path,int count)184 static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus,
185 				struct acpi_dmar_pci_path *path, int count)
186 {
187 	int i;
188 
189 	if (info->bus != bus)
190 		goto fallback;
191 	if (info->level != count)
192 		goto fallback;
193 
194 	for (i = 0; i < count; i++) {
195 		if (path[i].device != info->path[i].device ||
196 		    path[i].function != info->path[i].function)
197 			goto fallback;
198 	}
199 
200 	return true;
201 
202 fallback:
203 
204 	if (count != 1)
205 		return false;
206 
207 	i = info->level - 1;
208 	if (bus              == info->path[i].bus &&
209 	    path[0].device   == info->path[i].device &&
210 	    path[0].function == info->path[i].function) {
211 		pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n",
212 			bus, path[0].device, path[0].function);
213 		return true;
214 	}
215 
216 	return false;
217 }
218 
219 /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */
dmar_insert_dev_scope(struct dmar_pci_notify_info * info,void * start,void * end,u16 segment,struct dmar_dev_scope * devices,int devices_cnt)220 int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
221 			  void *start, void*end, u16 segment,
222 			  struct dmar_dev_scope *devices,
223 			  int devices_cnt)
224 {
225 	int i, level;
226 	struct device *tmp, *dev = &info->dev->dev;
227 	struct acpi_dmar_device_scope *scope;
228 	struct acpi_dmar_pci_path *path;
229 
230 	if (segment != info->seg)
231 		return 0;
232 
233 	for (; start < end; start += scope->length) {
234 		scope = start;
235 		if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT &&
236 		    scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE)
237 			continue;
238 
239 		path = (struct acpi_dmar_pci_path *)(scope + 1);
240 		level = (scope->length - sizeof(*scope)) / sizeof(*path);
241 		if (!dmar_match_pci_path(info, scope->bus, path, level))
242 			continue;
243 
244 		if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT) ^
245 		    (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL)) {
246 			pr_warn("Device scope type does not match for %s\n",
247 				pci_name(info->dev));
248 			return -EINVAL;
249 		}
250 
251 		for_each_dev_scope(devices, devices_cnt, i, tmp)
252 			if (tmp == NULL) {
253 				devices[i].bus = info->dev->bus->number;
254 				devices[i].devfn = info->dev->devfn;
255 				rcu_assign_pointer(devices[i].dev,
256 						   get_device(dev));
257 				return 1;
258 			}
259 		BUG_ON(i >= devices_cnt);
260 	}
261 
262 	return 0;
263 }
264 
dmar_remove_dev_scope(struct dmar_pci_notify_info * info,u16 segment,struct dmar_dev_scope * devices,int count)265 int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment,
266 			  struct dmar_dev_scope *devices, int count)
267 {
268 	int index;
269 	struct device *tmp;
270 
271 	if (info->seg != segment)
272 		return 0;
273 
274 	for_each_active_dev_scope(devices, count, index, tmp)
275 		if (tmp == &info->dev->dev) {
276 			RCU_INIT_POINTER(devices[index].dev, NULL);
277 			synchronize_rcu();
278 			put_device(tmp);
279 			return 1;
280 		}
281 
282 	return 0;
283 }
284 
dmar_pci_bus_add_dev(struct dmar_pci_notify_info * info)285 static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info)
286 {
287 	int ret = 0;
288 	struct dmar_drhd_unit *dmaru;
289 	struct acpi_dmar_hardware_unit *drhd;
290 
291 	for_each_drhd_unit(dmaru) {
292 		if (dmaru->include_all)
293 			continue;
294 
295 		drhd = container_of(dmaru->hdr,
296 				    struct acpi_dmar_hardware_unit, header);
297 		ret = dmar_insert_dev_scope(info, (void *)(drhd + 1),
298 				((void *)drhd) + drhd->header.length,
299 				dmaru->segment,
300 				dmaru->devices, dmaru->devices_cnt);
301 		if (ret != 0)
302 			break;
303 	}
304 	if (ret >= 0)
305 		ret = dmar_iommu_notify_scope_dev(info);
306 	if (ret < 0 && dmar_dev_scope_status == 0)
307 		dmar_dev_scope_status = ret;
308 
309 	return ret;
310 }
311 
dmar_pci_bus_del_dev(struct dmar_pci_notify_info * info)312 static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info)
313 {
314 	struct dmar_drhd_unit *dmaru;
315 
316 	for_each_drhd_unit(dmaru)
317 		if (dmar_remove_dev_scope(info, dmaru->segment,
318 			dmaru->devices, dmaru->devices_cnt))
319 			break;
320 	dmar_iommu_notify_scope_dev(info);
321 }
322 
dmar_pci_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)323 static int dmar_pci_bus_notifier(struct notifier_block *nb,
324 				 unsigned long action, void *data)
325 {
326 	struct pci_dev *pdev = to_pci_dev(data);
327 	struct dmar_pci_notify_info *info;
328 
329 	/* Only care about add/remove events for physical functions */
330 	if (pdev->is_virtfn)
331 		return NOTIFY_DONE;
332 	if (action != BUS_NOTIFY_ADD_DEVICE &&
333 	    action != BUS_NOTIFY_REMOVED_DEVICE)
334 		return NOTIFY_DONE;
335 
336 	info = dmar_alloc_pci_notify_info(pdev, action);
337 	if (!info)
338 		return NOTIFY_DONE;
339 
340 	down_write(&dmar_global_lock);
341 	if (action == BUS_NOTIFY_ADD_DEVICE)
342 		dmar_pci_bus_add_dev(info);
343 	else if (action == BUS_NOTIFY_REMOVED_DEVICE)
344 		dmar_pci_bus_del_dev(info);
345 	up_write(&dmar_global_lock);
346 
347 	dmar_free_pci_notify_info(info);
348 
349 	return NOTIFY_OK;
350 }
351 
352 static struct notifier_block dmar_pci_bus_nb = {
353 	.notifier_call = dmar_pci_bus_notifier,
354 	.priority = INT_MIN,
355 };
356 
357 static struct dmar_drhd_unit *
dmar_find_dmaru(struct acpi_dmar_hardware_unit * drhd)358 dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
359 {
360 	struct dmar_drhd_unit *dmaru;
361 
362 	list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
363 		if (dmaru->segment == drhd->segment &&
364 		    dmaru->reg_base_addr == drhd->address)
365 			return dmaru;
366 
367 	return NULL;
368 }
369 
370 /**
371  * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
372  * structure which uniquely represent one DMA remapping hardware unit
373  * present in the platform
374  */
dmar_parse_one_drhd(struct acpi_dmar_header * header,void * arg)375 static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
376 {
377 	struct acpi_dmar_hardware_unit *drhd;
378 	struct dmar_drhd_unit *dmaru;
379 	int ret = 0;
380 
381 	drhd = (struct acpi_dmar_hardware_unit *)header;
382 	dmaru = dmar_find_dmaru(drhd);
383 	if (dmaru)
384 		goto out;
385 
386 	dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
387 	if (!dmaru)
388 		return -ENOMEM;
389 
390 	/*
391 	 * If header is allocated from slab by ACPI _DSM method, we need to
392 	 * copy the content because the memory buffer will be freed on return.
393 	 */
394 	dmaru->hdr = (void *)(dmaru + 1);
395 	memcpy(dmaru->hdr, header, header->length);
396 	dmaru->reg_base_addr = drhd->address;
397 	dmaru->segment = drhd->segment;
398 	dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
399 	dmaru->devices = dmar_alloc_dev_scope((void *)(drhd + 1),
400 					      ((void *)drhd) + drhd->header.length,
401 					      &dmaru->devices_cnt);
402 	if (dmaru->devices_cnt && dmaru->devices == NULL) {
403 		kfree(dmaru);
404 		return -ENOMEM;
405 	}
406 
407 	ret = alloc_iommu(dmaru);
408 	if (ret) {
409 		dmar_free_dev_scope(&dmaru->devices,
410 				    &dmaru->devices_cnt);
411 		kfree(dmaru);
412 		return ret;
413 	}
414 	dmar_register_drhd_unit(dmaru);
415 
416 out:
417 	if (arg)
418 		(*(int *)arg)++;
419 
420 	return 0;
421 }
422 
dmar_free_drhd(struct dmar_drhd_unit * dmaru)423 static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
424 {
425 	if (dmaru->devices && dmaru->devices_cnt)
426 		dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
427 	if (dmaru->iommu)
428 		free_iommu(dmaru->iommu);
429 	kfree(dmaru);
430 }
431 
dmar_parse_one_andd(struct acpi_dmar_header * header,void * arg)432 static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
433 				      void *arg)
434 {
435 	struct acpi_dmar_andd *andd = (void *)header;
436 
437 	/* Check for NUL termination within the designated length */
438 	if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
439 		WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
440 			   "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
441 			   "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
442 			   dmi_get_system_info(DMI_BIOS_VENDOR),
443 			   dmi_get_system_info(DMI_BIOS_VERSION),
444 			   dmi_get_system_info(DMI_PRODUCT_VERSION));
445 		return -EINVAL;
446 	}
447 	pr_info("ANDD device: %x name: %s\n", andd->device_number,
448 		andd->device_name);
449 
450 	return 0;
451 }
452 
453 #ifdef CONFIG_ACPI_NUMA
dmar_parse_one_rhsa(struct acpi_dmar_header * header,void * arg)454 static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
455 {
456 	struct acpi_dmar_rhsa *rhsa;
457 	struct dmar_drhd_unit *drhd;
458 
459 	rhsa = (struct acpi_dmar_rhsa *)header;
460 	for_each_drhd_unit(drhd) {
461 		if (drhd->reg_base_addr == rhsa->base_address) {
462 			int node = acpi_map_pxm_to_node(rhsa->proximity_domain);
463 
464 			if (!node_online(node))
465 				node = -1;
466 			drhd->iommu->node = node;
467 			return 0;
468 		}
469 	}
470 	WARN_TAINT(
471 		1, TAINT_FIRMWARE_WORKAROUND,
472 		"Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
473 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
474 		drhd->reg_base_addr,
475 		dmi_get_system_info(DMI_BIOS_VENDOR),
476 		dmi_get_system_info(DMI_BIOS_VERSION),
477 		dmi_get_system_info(DMI_PRODUCT_VERSION));
478 
479 	return 0;
480 }
481 #else
482 #define	dmar_parse_one_rhsa		dmar_res_noop
483 #endif
484 
485 static void __init
dmar_table_print_dmar_entry(struct acpi_dmar_header * header)486 dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
487 {
488 	struct acpi_dmar_hardware_unit *drhd;
489 	struct acpi_dmar_reserved_memory *rmrr;
490 	struct acpi_dmar_atsr *atsr;
491 	struct acpi_dmar_rhsa *rhsa;
492 
493 	switch (header->type) {
494 	case ACPI_DMAR_TYPE_HARDWARE_UNIT:
495 		drhd = container_of(header, struct acpi_dmar_hardware_unit,
496 				    header);
497 		pr_info("DRHD base: %#016Lx flags: %#x\n",
498 			(unsigned long long)drhd->address, drhd->flags);
499 		break;
500 	case ACPI_DMAR_TYPE_RESERVED_MEMORY:
501 		rmrr = container_of(header, struct acpi_dmar_reserved_memory,
502 				    header);
503 		pr_info("RMRR base: %#016Lx end: %#016Lx\n",
504 			(unsigned long long)rmrr->base_address,
505 			(unsigned long long)rmrr->end_address);
506 		break;
507 	case ACPI_DMAR_TYPE_ROOT_ATS:
508 		atsr = container_of(header, struct acpi_dmar_atsr, header);
509 		pr_info("ATSR flags: %#x\n", atsr->flags);
510 		break;
511 	case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
512 		rhsa = container_of(header, struct acpi_dmar_rhsa, header);
513 		pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
514 		       (unsigned long long)rhsa->base_address,
515 		       rhsa->proximity_domain);
516 		break;
517 	case ACPI_DMAR_TYPE_NAMESPACE:
518 		/* We don't print this here because we need to sanity-check
519 		   it first. So print it in dmar_parse_one_andd() instead. */
520 		break;
521 	}
522 }
523 
524 /**
525  * dmar_table_detect - checks to see if the platform supports DMAR devices
526  */
dmar_table_detect(void)527 static int __init dmar_table_detect(void)
528 {
529 	acpi_status status = AE_OK;
530 
531 	/* if we could find DMAR table, then there are DMAR devices */
532 	status = acpi_get_table_with_size(ACPI_SIG_DMAR, 0,
533 				(struct acpi_table_header **)&dmar_tbl,
534 				&dmar_tbl_size);
535 
536 	if (ACPI_SUCCESS(status) && !dmar_tbl) {
537 		pr_warn("Unable to map DMAR\n");
538 		status = AE_NOT_FOUND;
539 	}
540 
541 	return (ACPI_SUCCESS(status) ? 1 : 0);
542 }
543 
dmar_walk_remapping_entries(struct acpi_dmar_header * start,size_t len,struct dmar_res_callback * cb)544 static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
545 				       size_t len, struct dmar_res_callback *cb)
546 {
547 	int ret = 0;
548 	struct acpi_dmar_header *iter, *next;
549 	struct acpi_dmar_header *end = ((void *)start) + len;
550 
551 	for (iter = start; iter < end && ret == 0; iter = next) {
552 		next = (void *)iter + iter->length;
553 		if (iter->length == 0) {
554 			/* Avoid looping forever on bad ACPI tables */
555 			pr_debug(FW_BUG "Invalid 0-length structure\n");
556 			break;
557 		} else if (next > end) {
558 			/* Avoid passing table end */
559 			pr_warn(FW_BUG "Record passes table end\n");
560 			ret = -EINVAL;
561 			break;
562 		}
563 
564 		if (cb->print_entry)
565 			dmar_table_print_dmar_entry(iter);
566 
567 		if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
568 			/* continue for forward compatibility */
569 			pr_debug("Unknown DMAR structure type %d\n",
570 				 iter->type);
571 		} else if (cb->cb[iter->type]) {
572 			ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
573 		} else if (!cb->ignore_unhandled) {
574 			pr_warn("No handler for DMAR structure type %d\n",
575 				iter->type);
576 			ret = -EINVAL;
577 		}
578 	}
579 
580 	return ret;
581 }
582 
dmar_walk_dmar_table(struct acpi_table_dmar * dmar,struct dmar_res_callback * cb)583 static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
584 				       struct dmar_res_callback *cb)
585 {
586 	return dmar_walk_remapping_entries((void *)(dmar + 1),
587 			dmar->header.length - sizeof(*dmar), cb);
588 }
589 
590 /**
591  * parse_dmar_table - parses the DMA reporting table
592  */
593 static int __init
parse_dmar_table(void)594 parse_dmar_table(void)
595 {
596 	struct acpi_table_dmar *dmar;
597 	int ret = 0;
598 	int drhd_count = 0;
599 	struct dmar_res_callback cb = {
600 		.print_entry = true,
601 		.ignore_unhandled = true,
602 		.arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
603 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
604 		.cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
605 		.cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
606 		.cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
607 		.cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
608 	};
609 
610 	/*
611 	 * Do it again, earlier dmar_tbl mapping could be mapped with
612 	 * fixed map.
613 	 */
614 	dmar_table_detect();
615 
616 	/*
617 	 * ACPI tables may not be DMA protected by tboot, so use DMAR copy
618 	 * SINIT saved in SinitMleData in TXT heap (which is DMA protected)
619 	 */
620 	dmar_tbl = tboot_get_dmar_table(dmar_tbl);
621 
622 	dmar = (struct acpi_table_dmar *)dmar_tbl;
623 	if (!dmar)
624 		return -ENODEV;
625 
626 	if (dmar->width < PAGE_SHIFT - 1) {
627 		pr_warn("Invalid DMAR haw\n");
628 		return -EINVAL;
629 	}
630 
631 	pr_info("Host address width %d\n", dmar->width + 1);
632 	ret = dmar_walk_dmar_table(dmar, &cb);
633 	if (ret == 0 && drhd_count == 0)
634 		pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
635 
636 	return ret;
637 }
638 
dmar_pci_device_match(struct dmar_dev_scope devices[],int cnt,struct pci_dev * dev)639 static int dmar_pci_device_match(struct dmar_dev_scope devices[],
640 				 int cnt, struct pci_dev *dev)
641 {
642 	int index;
643 	struct device *tmp;
644 
645 	while (dev) {
646 		for_each_active_dev_scope(devices, cnt, index, tmp)
647 			if (dev_is_pci(tmp) && dev == to_pci_dev(tmp))
648 				return 1;
649 
650 		/* Check our parent */
651 		dev = dev->bus->self;
652 	}
653 
654 	return 0;
655 }
656 
657 struct dmar_drhd_unit *
dmar_find_matched_drhd_unit(struct pci_dev * dev)658 dmar_find_matched_drhd_unit(struct pci_dev *dev)
659 {
660 	struct dmar_drhd_unit *dmaru;
661 	struct acpi_dmar_hardware_unit *drhd;
662 
663 	dev = pci_physfn(dev);
664 
665 	rcu_read_lock();
666 	for_each_drhd_unit(dmaru) {
667 		drhd = container_of(dmaru->hdr,
668 				    struct acpi_dmar_hardware_unit,
669 				    header);
670 
671 		if (dmaru->include_all &&
672 		    drhd->segment == pci_domain_nr(dev->bus))
673 			goto out;
674 
675 		if (dmar_pci_device_match(dmaru->devices,
676 					  dmaru->devices_cnt, dev))
677 			goto out;
678 	}
679 	dmaru = NULL;
680 out:
681 	rcu_read_unlock();
682 
683 	return dmaru;
684 }
685 
dmar_acpi_insert_dev_scope(u8 device_number,struct acpi_device * adev)686 static void __init dmar_acpi_insert_dev_scope(u8 device_number,
687 					      struct acpi_device *adev)
688 {
689 	struct dmar_drhd_unit *dmaru;
690 	struct acpi_dmar_hardware_unit *drhd;
691 	struct acpi_dmar_device_scope *scope;
692 	struct device *tmp;
693 	int i;
694 	struct acpi_dmar_pci_path *path;
695 
696 	for_each_drhd_unit(dmaru) {
697 		drhd = container_of(dmaru->hdr,
698 				    struct acpi_dmar_hardware_unit,
699 				    header);
700 
701 		for (scope = (void *)(drhd + 1);
702 		     (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length;
703 		     scope = ((void *)scope) + scope->length) {
704 			if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE)
705 				continue;
706 			if (scope->enumeration_id != device_number)
707 				continue;
708 
709 			path = (void *)(scope + 1);
710 			pr_info("ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n",
711 				dev_name(&adev->dev), dmaru->reg_base_addr,
712 				scope->bus, path->device, path->function);
713 			for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp)
714 				if (tmp == NULL) {
715 					dmaru->devices[i].bus = scope->bus;
716 					dmaru->devices[i].devfn = PCI_DEVFN(path->device,
717 									    path->function);
718 					rcu_assign_pointer(dmaru->devices[i].dev,
719 							   get_device(&adev->dev));
720 					return;
721 				}
722 			BUG_ON(i >= dmaru->devices_cnt);
723 		}
724 	}
725 	pr_warn("No IOMMU scope found for ANDD enumeration ID %d (%s)\n",
726 		device_number, dev_name(&adev->dev));
727 }
728 
dmar_acpi_dev_scope_init(void)729 static int __init dmar_acpi_dev_scope_init(void)
730 {
731 	struct acpi_dmar_andd *andd;
732 
733 	if (dmar_tbl == NULL)
734 		return -ENODEV;
735 
736 	for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar);
737 	     ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length;
738 	     andd = ((void *)andd) + andd->header.length) {
739 		if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) {
740 			acpi_handle h;
741 			struct acpi_device *adev;
742 
743 			if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT,
744 							  andd->device_name,
745 							  &h))) {
746 				pr_err("Failed to find handle for ACPI object %s\n",
747 				       andd->device_name);
748 				continue;
749 			}
750 			if (acpi_bus_get_device(h, &adev)) {
751 				pr_err("Failed to get device for ACPI object %s\n",
752 				       andd->device_name);
753 				continue;
754 			}
755 			dmar_acpi_insert_dev_scope(andd->device_number, adev);
756 		}
757 	}
758 	return 0;
759 }
760 
dmar_dev_scope_init(void)761 int __init dmar_dev_scope_init(void)
762 {
763 	struct pci_dev *dev = NULL;
764 	struct dmar_pci_notify_info *info;
765 
766 	if (dmar_dev_scope_status != 1)
767 		return dmar_dev_scope_status;
768 
769 	if (list_empty(&dmar_drhd_units)) {
770 		dmar_dev_scope_status = -ENODEV;
771 	} else {
772 		dmar_dev_scope_status = 0;
773 
774 		dmar_acpi_dev_scope_init();
775 
776 		for_each_pci_dev(dev) {
777 			if (dev->is_virtfn)
778 				continue;
779 
780 			info = dmar_alloc_pci_notify_info(dev,
781 					BUS_NOTIFY_ADD_DEVICE);
782 			if (!info) {
783 				return dmar_dev_scope_status;
784 			} else {
785 				dmar_pci_bus_add_dev(info);
786 				dmar_free_pci_notify_info(info);
787 			}
788 		}
789 
790 		bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
791 	}
792 
793 	return dmar_dev_scope_status;
794 }
795 
796 
dmar_table_init(void)797 int __init dmar_table_init(void)
798 {
799 	static int dmar_table_initialized;
800 	int ret;
801 
802 	if (dmar_table_initialized == 0) {
803 		ret = parse_dmar_table();
804 		if (ret < 0) {
805 			if (ret != -ENODEV)
806 				pr_info("Parse DMAR table failure.\n");
807 		} else  if (list_empty(&dmar_drhd_units)) {
808 			pr_info("No DMAR devices found\n");
809 			ret = -ENODEV;
810 		}
811 
812 		if (ret < 0)
813 			dmar_table_initialized = ret;
814 		else
815 			dmar_table_initialized = 1;
816 	}
817 
818 	return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
819 }
820 
warn_invalid_dmar(u64 addr,const char * message)821 static void warn_invalid_dmar(u64 addr, const char *message)
822 {
823 	WARN_TAINT_ONCE(
824 		1, TAINT_FIRMWARE_WORKAROUND,
825 		"Your BIOS is broken; DMAR reported at address %llx%s!\n"
826 		"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
827 		addr, message,
828 		dmi_get_system_info(DMI_BIOS_VENDOR),
829 		dmi_get_system_info(DMI_BIOS_VERSION),
830 		dmi_get_system_info(DMI_PRODUCT_VERSION));
831 }
832 
833 static int __ref
dmar_validate_one_drhd(struct acpi_dmar_header * entry,void * arg)834 dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
835 {
836 	struct acpi_dmar_hardware_unit *drhd;
837 	void __iomem *addr;
838 	u64 cap, ecap;
839 
840 	drhd = (void *)entry;
841 	if (!drhd->address) {
842 		warn_invalid_dmar(0, "");
843 		return -EINVAL;
844 	}
845 
846 	if (arg)
847 		addr = ioremap(drhd->address, VTD_PAGE_SIZE);
848 	else
849 		addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
850 	if (!addr) {
851 		pr_warn("Can't validate DRHD address: %llx\n", drhd->address);
852 		return -EINVAL;
853 	}
854 
855 	cap = dmar_readq(addr + DMAR_CAP_REG);
856 	ecap = dmar_readq(addr + DMAR_ECAP_REG);
857 
858 	if (arg)
859 		iounmap(addr);
860 	else
861 		early_iounmap(addr, VTD_PAGE_SIZE);
862 
863 	if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
864 		warn_invalid_dmar(drhd->address, " returns all ones");
865 		return -EINVAL;
866 	}
867 
868 	return 0;
869 }
870 
detect_intel_iommu(void)871 int __init detect_intel_iommu(void)
872 {
873 	int ret;
874 	struct dmar_res_callback validate_drhd_cb = {
875 		.cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
876 		.ignore_unhandled = true,
877 	};
878 
879 	down_write(&dmar_global_lock);
880 	ret = dmar_table_detect();
881 	if (ret)
882 		ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
883 					    &validate_drhd_cb);
884 	if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
885 		iommu_detected = 1;
886 		/* Make sure ACS will be enabled */
887 		pci_request_acs();
888 	}
889 
890 #ifdef CONFIG_X86
891 	if (ret)
892 		x86_init.iommu.iommu_init = intel_iommu_init;
893 #endif
894 
895 	early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
896 	dmar_tbl = NULL;
897 	up_write(&dmar_global_lock);
898 
899 	return ret ? 1 : -ENODEV;
900 }
901 
902 
unmap_iommu(struct intel_iommu * iommu)903 static void unmap_iommu(struct intel_iommu *iommu)
904 {
905 	iounmap(iommu->reg);
906 	release_mem_region(iommu->reg_phys, iommu->reg_size);
907 }
908 
909 /**
910  * map_iommu: map the iommu's registers
911  * @iommu: the iommu to map
912  * @phys_addr: the physical address of the base resgister
913  *
914  * Memory map the iommu's registers.  Start w/ a single page, and
915  * possibly expand if that turns out to be insufficent.
916  */
map_iommu(struct intel_iommu * iommu,u64 phys_addr)917 static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
918 {
919 	int map_size, err=0;
920 
921 	iommu->reg_phys = phys_addr;
922 	iommu->reg_size = VTD_PAGE_SIZE;
923 
924 	if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
925 		pr_err("Can't reserve memory\n");
926 		err = -EBUSY;
927 		goto out;
928 	}
929 
930 	iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
931 	if (!iommu->reg) {
932 		pr_err("Can't map the region\n");
933 		err = -ENOMEM;
934 		goto release;
935 	}
936 
937 	iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
938 	iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
939 
940 	if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
941 		err = -EINVAL;
942 		warn_invalid_dmar(phys_addr, " returns all ones");
943 		goto unmap;
944 	}
945 
946 	/* the registers might be more than one page */
947 	map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
948 			 cap_max_fault_reg_offset(iommu->cap));
949 	map_size = VTD_PAGE_ALIGN(map_size);
950 	if (map_size > iommu->reg_size) {
951 		iounmap(iommu->reg);
952 		release_mem_region(iommu->reg_phys, iommu->reg_size);
953 		iommu->reg_size = map_size;
954 		if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
955 					iommu->name)) {
956 			pr_err("Can't reserve memory\n");
957 			err = -EBUSY;
958 			goto out;
959 		}
960 		iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
961 		if (!iommu->reg) {
962 			pr_err("Can't map the region\n");
963 			err = -ENOMEM;
964 			goto release;
965 		}
966 	}
967 	err = 0;
968 	goto out;
969 
970 unmap:
971 	iounmap(iommu->reg);
972 release:
973 	release_mem_region(iommu->reg_phys, iommu->reg_size);
974 out:
975 	return err;
976 }
977 
dmar_alloc_seq_id(struct intel_iommu * iommu)978 static int dmar_alloc_seq_id(struct intel_iommu *iommu)
979 {
980 	iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
981 					    DMAR_UNITS_SUPPORTED);
982 	if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
983 		iommu->seq_id = -1;
984 	} else {
985 		set_bit(iommu->seq_id, dmar_seq_ids);
986 		sprintf(iommu->name, "dmar%d", iommu->seq_id);
987 	}
988 
989 	return iommu->seq_id;
990 }
991 
dmar_free_seq_id(struct intel_iommu * iommu)992 static void dmar_free_seq_id(struct intel_iommu *iommu)
993 {
994 	if (iommu->seq_id >= 0) {
995 		clear_bit(iommu->seq_id, dmar_seq_ids);
996 		iommu->seq_id = -1;
997 	}
998 }
999 
alloc_iommu(struct dmar_drhd_unit * drhd)1000 static int alloc_iommu(struct dmar_drhd_unit *drhd)
1001 {
1002 	struct intel_iommu *iommu;
1003 	u32 ver, sts;
1004 	int agaw = 0;
1005 	int msagaw = 0;
1006 	int err;
1007 
1008 	if (!drhd->reg_base_addr) {
1009 		warn_invalid_dmar(0, "");
1010 		return -EINVAL;
1011 	}
1012 
1013 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1014 	if (!iommu)
1015 		return -ENOMEM;
1016 
1017 	if (dmar_alloc_seq_id(iommu) < 0) {
1018 		pr_err("Failed to allocate seq_id\n");
1019 		err = -ENOSPC;
1020 		goto error;
1021 	}
1022 
1023 	err = map_iommu(iommu, drhd->reg_base_addr);
1024 	if (err) {
1025 		pr_err("Failed to map %s\n", iommu->name);
1026 		goto error_free_seq_id;
1027 	}
1028 
1029 	err = -EINVAL;
1030 	agaw = iommu_calculate_agaw(iommu);
1031 	if (agaw < 0) {
1032 		pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
1033 			iommu->seq_id);
1034 		goto err_unmap;
1035 	}
1036 	msagaw = iommu_calculate_max_sagaw(iommu);
1037 	if (msagaw < 0) {
1038 		pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
1039 			iommu->seq_id);
1040 		goto err_unmap;
1041 	}
1042 	iommu->agaw = agaw;
1043 	iommu->msagaw = msagaw;
1044 	iommu->segment = drhd->segment;
1045 
1046 	iommu->node = -1;
1047 
1048 	ver = readl(iommu->reg + DMAR_VER_REG);
1049 	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
1050 		iommu->name,
1051 		(unsigned long long)drhd->reg_base_addr,
1052 		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1053 		(unsigned long long)iommu->cap,
1054 		(unsigned long long)iommu->ecap);
1055 
1056 	/* Reflect status in gcmd */
1057 	sts = readl(iommu->reg + DMAR_GSTS_REG);
1058 	if (sts & DMA_GSTS_IRES)
1059 		iommu->gcmd |= DMA_GCMD_IRE;
1060 	if (sts & DMA_GSTS_TES)
1061 		iommu->gcmd |= DMA_GCMD_TE;
1062 	if (sts & DMA_GSTS_QIES)
1063 		iommu->gcmd |= DMA_GCMD_QIE;
1064 
1065 	raw_spin_lock_init(&iommu->register_lock);
1066 
1067 	drhd->iommu = iommu;
1068 
1069 	if (intel_iommu_enabled)
1070 		iommu->iommu_dev = iommu_device_create(NULL, iommu,
1071 						       intel_iommu_groups,
1072 						       "%s", iommu->name);
1073 
1074 	return 0;
1075 
1076 err_unmap:
1077 	unmap_iommu(iommu);
1078 error_free_seq_id:
1079 	dmar_free_seq_id(iommu);
1080 error:
1081 	kfree(iommu);
1082 	return err;
1083 }
1084 
free_iommu(struct intel_iommu * iommu)1085 static void free_iommu(struct intel_iommu *iommu)
1086 {
1087 	iommu_device_destroy(iommu->iommu_dev);
1088 
1089 	if (iommu->irq) {
1090 		if (iommu->pr_irq) {
1091 			free_irq(iommu->pr_irq, iommu);
1092 			dmar_free_hwirq(iommu->pr_irq);
1093 			iommu->pr_irq = 0;
1094 		}
1095 		free_irq(iommu->irq, iommu);
1096 		dmar_free_hwirq(iommu->irq);
1097 		iommu->irq = 0;
1098 	}
1099 
1100 	if (iommu->qi) {
1101 		free_page((unsigned long)iommu->qi->desc);
1102 		kfree(iommu->qi->desc_status);
1103 		kfree(iommu->qi);
1104 	}
1105 
1106 	if (iommu->reg)
1107 		unmap_iommu(iommu);
1108 
1109 	dmar_free_seq_id(iommu);
1110 	kfree(iommu);
1111 }
1112 
1113 /*
1114  * Reclaim all the submitted descriptors which have completed its work.
1115  */
reclaim_free_desc(struct q_inval * qi)1116 static inline void reclaim_free_desc(struct q_inval *qi)
1117 {
1118 	while (qi->desc_status[qi->free_tail] == QI_DONE ||
1119 	       qi->desc_status[qi->free_tail] == QI_ABORT) {
1120 		qi->desc_status[qi->free_tail] = QI_FREE;
1121 		qi->free_tail = (qi->free_tail + 1) % QI_LENGTH;
1122 		qi->free_cnt++;
1123 	}
1124 }
1125 
qi_check_fault(struct intel_iommu * iommu,int index)1126 static int qi_check_fault(struct intel_iommu *iommu, int index)
1127 {
1128 	u32 fault;
1129 	int head, tail;
1130 	struct q_inval *qi = iommu->qi;
1131 	int wait_index = (index + 1) % QI_LENGTH;
1132 
1133 	if (qi->desc_status[wait_index] == QI_ABORT)
1134 		return -EAGAIN;
1135 
1136 	fault = readl(iommu->reg + DMAR_FSTS_REG);
1137 
1138 	/*
1139 	 * If IQE happens, the head points to the descriptor associated
1140 	 * with the error. No new descriptors are fetched until the IQE
1141 	 * is cleared.
1142 	 */
1143 	if (fault & DMA_FSTS_IQE) {
1144 		head = readl(iommu->reg + DMAR_IQH_REG);
1145 		if ((head >> DMAR_IQ_SHIFT) == index) {
1146 			pr_err("VT-d detected invalid descriptor: "
1147 				"low=%llx, high=%llx\n",
1148 				(unsigned long long)qi->desc[index].low,
1149 				(unsigned long long)qi->desc[index].high);
1150 			memcpy(&qi->desc[index], &qi->desc[wait_index],
1151 					sizeof(struct qi_desc));
1152 			__iommu_flush_cache(iommu, &qi->desc[index],
1153 					sizeof(struct qi_desc));
1154 			writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
1155 			return -EINVAL;
1156 		}
1157 	}
1158 
1159 	/*
1160 	 * If ITE happens, all pending wait_desc commands are aborted.
1161 	 * No new descriptors are fetched until the ITE is cleared.
1162 	 */
1163 	if (fault & DMA_FSTS_ITE) {
1164 		head = readl(iommu->reg + DMAR_IQH_REG);
1165 		head = ((head >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1166 		head |= 1;
1167 		tail = readl(iommu->reg + DMAR_IQT_REG);
1168 		tail = ((tail >> DMAR_IQ_SHIFT) - 1 + QI_LENGTH) % QI_LENGTH;
1169 
1170 		writel(DMA_FSTS_ITE, iommu->reg + DMAR_FSTS_REG);
1171 
1172 		do {
1173 			if (qi->desc_status[head] == QI_IN_USE)
1174 				qi->desc_status[head] = QI_ABORT;
1175 			head = (head - 2 + QI_LENGTH) % QI_LENGTH;
1176 		} while (head != tail);
1177 
1178 		if (qi->desc_status[wait_index] == QI_ABORT)
1179 			return -EAGAIN;
1180 	}
1181 
1182 	if (fault & DMA_FSTS_ICE)
1183 		writel(DMA_FSTS_ICE, iommu->reg + DMAR_FSTS_REG);
1184 
1185 	return 0;
1186 }
1187 
1188 /*
1189  * Submit the queued invalidation descriptor to the remapping
1190  * hardware unit and wait for its completion.
1191  */
qi_submit_sync(struct qi_desc * desc,struct intel_iommu * iommu)1192 int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
1193 {
1194 	int rc;
1195 	struct q_inval *qi = iommu->qi;
1196 	struct qi_desc *hw, wait_desc;
1197 	int wait_index, index;
1198 	unsigned long flags;
1199 
1200 	if (!qi)
1201 		return 0;
1202 
1203 	hw = qi->desc;
1204 
1205 restart:
1206 	rc = 0;
1207 
1208 	raw_spin_lock_irqsave(&qi->q_lock, flags);
1209 	while (qi->free_cnt < 3) {
1210 		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1211 		cpu_relax();
1212 		raw_spin_lock_irqsave(&qi->q_lock, flags);
1213 	}
1214 
1215 	index = qi->free_head;
1216 	wait_index = (index + 1) % QI_LENGTH;
1217 
1218 	qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE;
1219 
1220 	hw[index] = *desc;
1221 
1222 	wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
1223 			QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
1224 	wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
1225 
1226 	hw[wait_index] = wait_desc;
1227 
1228 	__iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc));
1229 	__iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc));
1230 
1231 	qi->free_head = (qi->free_head + 2) % QI_LENGTH;
1232 	qi->free_cnt -= 2;
1233 
1234 	/*
1235 	 * update the HW tail register indicating the presence of
1236 	 * new descriptors.
1237 	 */
1238 	writel(qi->free_head << DMAR_IQ_SHIFT, iommu->reg + DMAR_IQT_REG);
1239 
1240 	while (qi->desc_status[wait_index] != QI_DONE) {
1241 		/*
1242 		 * We will leave the interrupts disabled, to prevent interrupt
1243 		 * context to queue another cmd while a cmd is already submitted
1244 		 * and waiting for completion on this cpu. This is to avoid
1245 		 * a deadlock where the interrupt context can wait indefinitely
1246 		 * for free slots in the queue.
1247 		 */
1248 		rc = qi_check_fault(iommu, index);
1249 		if (rc)
1250 			break;
1251 
1252 		raw_spin_unlock(&qi->q_lock);
1253 		cpu_relax();
1254 		raw_spin_lock(&qi->q_lock);
1255 	}
1256 
1257 	qi->desc_status[index] = QI_DONE;
1258 
1259 	reclaim_free_desc(qi);
1260 	raw_spin_unlock_irqrestore(&qi->q_lock, flags);
1261 
1262 	if (rc == -EAGAIN)
1263 		goto restart;
1264 
1265 	return rc;
1266 }
1267 
1268 /*
1269  * Flush the global interrupt entry cache.
1270  */
qi_global_iec(struct intel_iommu * iommu)1271 void qi_global_iec(struct intel_iommu *iommu)
1272 {
1273 	struct qi_desc desc;
1274 
1275 	desc.low = QI_IEC_TYPE;
1276 	desc.high = 0;
1277 
1278 	/* should never fail */
1279 	qi_submit_sync(&desc, iommu);
1280 }
1281 
qi_flush_context(struct intel_iommu * iommu,u16 did,u16 sid,u8 fm,u64 type)1282 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
1283 		      u64 type)
1284 {
1285 	struct qi_desc desc;
1286 
1287 	desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
1288 			| QI_CC_GRAN(type) | QI_CC_TYPE;
1289 	desc.high = 0;
1290 
1291 	qi_submit_sync(&desc, iommu);
1292 }
1293 
qi_flush_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type)1294 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1295 		    unsigned int size_order, u64 type)
1296 {
1297 	u8 dw = 0, dr = 0;
1298 
1299 	struct qi_desc desc;
1300 	int ih = 0;
1301 
1302 	if (cap_write_drain(iommu->cap))
1303 		dw = 1;
1304 
1305 	if (cap_read_drain(iommu->cap))
1306 		dr = 1;
1307 
1308 	desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1309 		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1310 	desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1311 		| QI_IOTLB_AM(size_order);
1312 
1313 	qi_submit_sync(&desc, iommu);
1314 }
1315 
qi_flush_dev_iotlb(struct intel_iommu * iommu,u16 sid,u16 qdep,u64 addr,unsigned mask)1316 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
1317 			u64 addr, unsigned mask)
1318 {
1319 	struct qi_desc desc;
1320 
1321 	if (mask) {
1322 		BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1));
1323 		addr |= (1 << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1324 		desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1325 	} else
1326 		desc.high = QI_DEV_IOTLB_ADDR(addr);
1327 
1328 	if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1329 		qdep = 0;
1330 
1331 	desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1332 		   QI_DIOTLB_TYPE;
1333 
1334 	qi_submit_sync(&desc, iommu);
1335 }
1336 
1337 /*
1338  * Disable Queued Invalidation interface.
1339  */
dmar_disable_qi(struct intel_iommu * iommu)1340 void dmar_disable_qi(struct intel_iommu *iommu)
1341 {
1342 	unsigned long flags;
1343 	u32 sts;
1344 	cycles_t start_time = get_cycles();
1345 
1346 	if (!ecap_qis(iommu->ecap))
1347 		return;
1348 
1349 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1350 
1351 	sts =  readl(iommu->reg + DMAR_GSTS_REG);
1352 	if (!(sts & DMA_GSTS_QIES))
1353 		goto end;
1354 
1355 	/*
1356 	 * Give a chance to HW to complete the pending invalidation requests.
1357 	 */
1358 	while ((readl(iommu->reg + DMAR_IQT_REG) !=
1359 		readl(iommu->reg + DMAR_IQH_REG)) &&
1360 		(DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time)))
1361 		cpu_relax();
1362 
1363 	iommu->gcmd &= ~DMA_GCMD_QIE;
1364 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1365 
1366 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl,
1367 		      !(sts & DMA_GSTS_QIES), sts);
1368 end:
1369 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1370 }
1371 
1372 /*
1373  * Enable queued invalidation.
1374  */
__dmar_enable_qi(struct intel_iommu * iommu)1375 static void __dmar_enable_qi(struct intel_iommu *iommu)
1376 {
1377 	u32 sts;
1378 	unsigned long flags;
1379 	struct q_inval *qi = iommu->qi;
1380 
1381 	qi->free_head = qi->free_tail = 0;
1382 	qi->free_cnt = QI_LENGTH;
1383 
1384 	raw_spin_lock_irqsave(&iommu->register_lock, flags);
1385 
1386 	/* write zero to the tail reg */
1387 	writel(0, iommu->reg + DMAR_IQT_REG);
1388 
1389 	dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc));
1390 
1391 	iommu->gcmd |= DMA_GCMD_QIE;
1392 	writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG);
1393 
1394 	/* Make sure hardware complete it */
1395 	IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts);
1396 
1397 	raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
1398 }
1399 
1400 /*
1401  * Enable Queued Invalidation interface. This is a must to support
1402  * interrupt-remapping. Also used by DMA-remapping, which replaces
1403  * register based IOTLB invalidation.
1404  */
dmar_enable_qi(struct intel_iommu * iommu)1405 int dmar_enable_qi(struct intel_iommu *iommu)
1406 {
1407 	struct q_inval *qi;
1408 	struct page *desc_page;
1409 
1410 	if (!ecap_qis(iommu->ecap))
1411 		return -ENOENT;
1412 
1413 	/*
1414 	 * queued invalidation is already setup and enabled.
1415 	 */
1416 	if (iommu->qi)
1417 		return 0;
1418 
1419 	iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC);
1420 	if (!iommu->qi)
1421 		return -ENOMEM;
1422 
1423 	qi = iommu->qi;
1424 
1425 
1426 	desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1427 	if (!desc_page) {
1428 		kfree(qi);
1429 		iommu->qi = NULL;
1430 		return -ENOMEM;
1431 	}
1432 
1433 	qi->desc = page_address(desc_page);
1434 
1435 	qi->desc_status = kzalloc(QI_LENGTH * sizeof(int), GFP_ATOMIC);
1436 	if (!qi->desc_status) {
1437 		free_page((unsigned long) qi->desc);
1438 		kfree(qi);
1439 		iommu->qi = NULL;
1440 		return -ENOMEM;
1441 	}
1442 
1443 	raw_spin_lock_init(&qi->q_lock);
1444 
1445 	__dmar_enable_qi(iommu);
1446 
1447 	return 0;
1448 }
1449 
1450 /* iommu interrupt handling. Most stuff are MSI-like. */
1451 
1452 enum faulttype {
1453 	DMA_REMAP,
1454 	INTR_REMAP,
1455 	UNKNOWN,
1456 };
1457 
1458 static const char *dma_remap_fault_reasons[] =
1459 {
1460 	"Software",
1461 	"Present bit in root entry is clear",
1462 	"Present bit in context entry is clear",
1463 	"Invalid context entry",
1464 	"Access beyond MGAW",
1465 	"PTE Write access is not set",
1466 	"PTE Read access is not set",
1467 	"Next page table ptr is invalid",
1468 	"Root table address invalid",
1469 	"Context table ptr is invalid",
1470 	"non-zero reserved fields in RTP",
1471 	"non-zero reserved fields in CTP",
1472 	"non-zero reserved fields in PTE",
1473 	"PCE for translation request specifies blocking",
1474 };
1475 
1476 static const char *irq_remap_fault_reasons[] =
1477 {
1478 	"Detected reserved fields in the decoded interrupt-remapped request",
1479 	"Interrupt index exceeded the interrupt-remapping table size",
1480 	"Present field in the IRTE entry is clear",
1481 	"Error accessing interrupt-remapping table pointed by IRTA_REG",
1482 	"Detected reserved fields in the IRTE entry",
1483 	"Blocked a compatibility format interrupt request",
1484 	"Blocked an interrupt request due to source-id verification failure",
1485 };
1486 
dmar_get_fault_reason(u8 fault_reason,int * fault_type)1487 static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1488 {
1489 	if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1490 					ARRAY_SIZE(irq_remap_fault_reasons))) {
1491 		*fault_type = INTR_REMAP;
1492 		return irq_remap_fault_reasons[fault_reason - 0x20];
1493 	} else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) {
1494 		*fault_type = DMA_REMAP;
1495 		return dma_remap_fault_reasons[fault_reason];
1496 	} else {
1497 		*fault_type = UNKNOWN;
1498 		return "Unknown";
1499 	}
1500 }
1501 
1502 
dmar_msi_reg(struct intel_iommu * iommu,int irq)1503 static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq)
1504 {
1505 	if (iommu->irq == irq)
1506 		return DMAR_FECTL_REG;
1507 	else if (iommu->pr_irq == irq)
1508 		return DMAR_PECTL_REG;
1509 	else
1510 		BUG();
1511 }
1512 
dmar_msi_unmask(struct irq_data * data)1513 void dmar_msi_unmask(struct irq_data *data)
1514 {
1515 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1516 	int reg = dmar_msi_reg(iommu, data->irq);
1517 	unsigned long flag;
1518 
1519 	/* unmask it */
1520 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1521 	writel(0, iommu->reg + reg);
1522 	/* Read a reg to force flush the post write */
1523 	readl(iommu->reg + reg);
1524 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1525 }
1526 
dmar_msi_mask(struct irq_data * data)1527 void dmar_msi_mask(struct irq_data *data)
1528 {
1529 	struct intel_iommu *iommu = irq_data_get_irq_handler_data(data);
1530 	int reg = dmar_msi_reg(iommu, data->irq);
1531 	unsigned long flag;
1532 
1533 	/* mask it */
1534 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1535 	writel(DMA_FECTL_IM, iommu->reg + reg);
1536 	/* Read a reg to force flush the post write */
1537 	readl(iommu->reg + reg);
1538 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1539 }
1540 
dmar_msi_write(int irq,struct msi_msg * msg)1541 void dmar_msi_write(int irq, struct msi_msg *msg)
1542 {
1543 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1544 	int reg = dmar_msi_reg(iommu, irq);
1545 	unsigned long flag;
1546 
1547 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1548 	writel(msg->data, iommu->reg + reg + 4);
1549 	writel(msg->address_lo, iommu->reg + reg + 8);
1550 	writel(msg->address_hi, iommu->reg + reg + 12);
1551 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1552 }
1553 
dmar_msi_read(int irq,struct msi_msg * msg)1554 void dmar_msi_read(int irq, struct msi_msg *msg)
1555 {
1556 	struct intel_iommu *iommu = irq_get_handler_data(irq);
1557 	int reg = dmar_msi_reg(iommu, irq);
1558 	unsigned long flag;
1559 
1560 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1561 	msg->data = readl(iommu->reg + reg + 4);
1562 	msg->address_lo = readl(iommu->reg + reg + 8);
1563 	msg->address_hi = readl(iommu->reg + reg + 12);
1564 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1565 }
1566 
dmar_fault_do_one(struct intel_iommu * iommu,int type,u8 fault_reason,u16 source_id,unsigned long long addr)1567 static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
1568 		u8 fault_reason, u16 source_id, unsigned long long addr)
1569 {
1570 	const char *reason;
1571 	int fault_type;
1572 
1573 	reason = dmar_get_fault_reason(fault_reason, &fault_type);
1574 
1575 	if (fault_type == INTR_REMAP)
1576 		pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
1577 		       "fault index %llx\n"
1578 			"INTR-REMAP:[fault reason %02d] %s\n",
1579 			(source_id >> 8), PCI_SLOT(source_id & 0xFF),
1580 			PCI_FUNC(source_id & 0xFF), addr >> 48,
1581 			fault_reason, reason);
1582 	else
1583 		pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
1584 		       "fault addr %llx \n"
1585 		       "DMAR:[fault reason %02d] %s\n",
1586 		       (type ? "DMA Read" : "DMA Write"),
1587 		       (source_id >> 8), PCI_SLOT(source_id & 0xFF),
1588 		       PCI_FUNC(source_id & 0xFF), addr, fault_reason, reason);
1589 	return 0;
1590 }
1591 
1592 #define PRIMARY_FAULT_REG_LEN (16)
dmar_fault(int irq,void * dev_id)1593 irqreturn_t dmar_fault(int irq, void *dev_id)
1594 {
1595 	struct intel_iommu *iommu = dev_id;
1596 	int reg, fault_index;
1597 	u32 fault_status;
1598 	unsigned long flag;
1599 
1600 	raw_spin_lock_irqsave(&iommu->register_lock, flag);
1601 	fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1602 	if (fault_status)
1603 		pr_err("DRHD: handling fault status reg %x\n", fault_status);
1604 
1605 	/* TBD: ignore advanced fault log currently */
1606 	if (!(fault_status & DMA_FSTS_PPF))
1607 		goto unlock_exit;
1608 
1609 	fault_index = dma_fsts_fault_record_index(fault_status);
1610 	reg = cap_fault_reg_offset(iommu->cap);
1611 	while (1) {
1612 		u8 fault_reason;
1613 		u16 source_id;
1614 		u64 guest_addr;
1615 		int type;
1616 		u32 data;
1617 
1618 		/* highest 32 bits */
1619 		data = readl(iommu->reg + reg +
1620 				fault_index * PRIMARY_FAULT_REG_LEN + 12);
1621 		if (!(data & DMA_FRCD_F))
1622 			break;
1623 
1624 		fault_reason = dma_frcd_fault_reason(data);
1625 		type = dma_frcd_type(data);
1626 
1627 		data = readl(iommu->reg + reg +
1628 				fault_index * PRIMARY_FAULT_REG_LEN + 8);
1629 		source_id = dma_frcd_source_id(data);
1630 
1631 		guest_addr = dmar_readq(iommu->reg + reg +
1632 				fault_index * PRIMARY_FAULT_REG_LEN);
1633 		guest_addr = dma_frcd_page_addr(guest_addr);
1634 		/* clear the fault */
1635 		writel(DMA_FRCD_F, iommu->reg + reg +
1636 			fault_index * PRIMARY_FAULT_REG_LEN + 12);
1637 
1638 		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1639 
1640 		dmar_fault_do_one(iommu, type, fault_reason,
1641 				source_id, guest_addr);
1642 
1643 		fault_index++;
1644 		if (fault_index >= cap_num_fault_regs(iommu->cap))
1645 			fault_index = 0;
1646 		raw_spin_lock_irqsave(&iommu->register_lock, flag);
1647 	}
1648 
1649 	writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG);
1650 
1651 unlock_exit:
1652 	raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1653 	return IRQ_HANDLED;
1654 }
1655 
dmar_set_interrupt(struct intel_iommu * iommu)1656 int dmar_set_interrupt(struct intel_iommu *iommu)
1657 {
1658 	int irq, ret;
1659 
1660 	/*
1661 	 * Check if the fault interrupt is already initialized.
1662 	 */
1663 	if (iommu->irq)
1664 		return 0;
1665 
1666 	irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
1667 	if (irq > 0) {
1668 		iommu->irq = irq;
1669 	} else {
1670 		pr_err("No free IRQ vectors\n");
1671 		return -EINVAL;
1672 	}
1673 
1674 	ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
1675 	if (ret)
1676 		pr_err("Can't request irq\n");
1677 	return ret;
1678 }
1679 
enable_drhd_fault_handling(void)1680 int __init enable_drhd_fault_handling(void)
1681 {
1682 	struct dmar_drhd_unit *drhd;
1683 	struct intel_iommu *iommu;
1684 
1685 	/*
1686 	 * Enable fault control interrupt.
1687 	 */
1688 	for_each_iommu(iommu, drhd) {
1689 		u32 fault_status;
1690 		int ret = dmar_set_interrupt(iommu);
1691 
1692 		if (ret) {
1693 			pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
1694 			       (unsigned long long)drhd->reg_base_addr, ret);
1695 			return -1;
1696 		}
1697 
1698 		/*
1699 		 * Clear any previous faults.
1700 		 */
1701 		dmar_fault(iommu->irq, iommu);
1702 		fault_status = readl(iommu->reg + DMAR_FSTS_REG);
1703 		writel(fault_status, iommu->reg + DMAR_FSTS_REG);
1704 	}
1705 
1706 	return 0;
1707 }
1708 
1709 /*
1710  * Re-enable Queued Invalidation interface.
1711  */
dmar_reenable_qi(struct intel_iommu * iommu)1712 int dmar_reenable_qi(struct intel_iommu *iommu)
1713 {
1714 	if (!ecap_qis(iommu->ecap))
1715 		return -ENOENT;
1716 
1717 	if (!iommu->qi)
1718 		return -ENOENT;
1719 
1720 	/*
1721 	 * First disable queued invalidation.
1722 	 */
1723 	dmar_disable_qi(iommu);
1724 	/*
1725 	 * Then enable queued invalidation again. Since there is no pending
1726 	 * invalidation requests now, it's safe to re-enable queued
1727 	 * invalidation.
1728 	 */
1729 	__dmar_enable_qi(iommu);
1730 
1731 	return 0;
1732 }
1733 
1734 /*
1735  * Check interrupt remapping support in DMAR table description.
1736  */
dmar_ir_support(void)1737 int __init dmar_ir_support(void)
1738 {
1739 	struct acpi_table_dmar *dmar;
1740 	dmar = (struct acpi_table_dmar *)dmar_tbl;
1741 	if (!dmar)
1742 		return 0;
1743 	return dmar->flags & 0x1;
1744 }
1745 
1746 /* Check whether DMAR units are in use */
dmar_in_use(void)1747 static inline bool dmar_in_use(void)
1748 {
1749 	return irq_remapping_enabled || intel_iommu_enabled;
1750 }
1751 
dmar_free_unused_resources(void)1752 static int __init dmar_free_unused_resources(void)
1753 {
1754 	struct dmar_drhd_unit *dmaru, *dmaru_n;
1755 
1756 	if (dmar_in_use())
1757 		return 0;
1758 
1759 	if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
1760 		bus_unregister_notifier(&pci_bus_type, &dmar_pci_bus_nb);
1761 
1762 	down_write(&dmar_global_lock);
1763 	list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1764 		list_del(&dmaru->list);
1765 		dmar_free_drhd(dmaru);
1766 	}
1767 	up_write(&dmar_global_lock);
1768 
1769 	return 0;
1770 }
1771 
1772 late_initcall(dmar_free_unused_resources);
1773 IOMMU_INIT_POST(detect_intel_iommu);
1774 
1775 /*
1776  * DMAR Hotplug Support
1777  * For more details, please refer to Intel(R) Virtualization Technology
1778  * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1779  * "Remapping Hardware Unit Hot Plug".
1780  */
1781 static u8 dmar_hp_uuid[] = {
1782 	/* 0000 */    0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1783 	/* 0008 */    0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1784 };
1785 
1786 /*
1787  * Currently there's only one revision and BIOS will not check the revision id,
1788  * so use 0 for safety.
1789  */
1790 #define	DMAR_DSM_REV_ID			0
1791 #define	DMAR_DSM_FUNC_DRHD		1
1792 #define	DMAR_DSM_FUNC_ATSR		2
1793 #define	DMAR_DSM_FUNC_RHSA		3
1794 
dmar_detect_dsm(acpi_handle handle,int func)1795 static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1796 {
1797 	return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1798 }
1799 
dmar_walk_dsm_resource(acpi_handle handle,int func,dmar_res_handler_t handler,void * arg)1800 static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1801 				  dmar_res_handler_t handler, void *arg)
1802 {
1803 	int ret = -ENODEV;
1804 	union acpi_object *obj;
1805 	struct acpi_dmar_header *start;
1806 	struct dmar_res_callback callback;
1807 	static int res_type[] = {
1808 		[DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1809 		[DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1810 		[DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1811 	};
1812 
1813 	if (!dmar_detect_dsm(handle, func))
1814 		return 0;
1815 
1816 	obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1817 				      func, NULL, ACPI_TYPE_BUFFER);
1818 	if (!obj)
1819 		return -ENODEV;
1820 
1821 	memset(&callback, 0, sizeof(callback));
1822 	callback.cb[res_type[func]] = handler;
1823 	callback.arg[res_type[func]] = arg;
1824 	start = (struct acpi_dmar_header *)obj->buffer.pointer;
1825 	ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1826 
1827 	ACPI_FREE(obj);
1828 
1829 	return ret;
1830 }
1831 
dmar_hp_add_drhd(struct acpi_dmar_header * header,void * arg)1832 static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1833 {
1834 	int ret;
1835 	struct dmar_drhd_unit *dmaru;
1836 
1837 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1838 	if (!dmaru)
1839 		return -ENODEV;
1840 
1841 	ret = dmar_ir_hotplug(dmaru, true);
1842 	if (ret == 0)
1843 		ret = dmar_iommu_hotplug(dmaru, true);
1844 
1845 	return ret;
1846 }
1847 
dmar_hp_remove_drhd(struct acpi_dmar_header * header,void * arg)1848 static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1849 {
1850 	int i, ret;
1851 	struct device *dev;
1852 	struct dmar_drhd_unit *dmaru;
1853 
1854 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1855 	if (!dmaru)
1856 		return 0;
1857 
1858 	/*
1859 	 * All PCI devices managed by this unit should have been destroyed.
1860 	 */
1861 	if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
1862 		for_each_active_dev_scope(dmaru->devices,
1863 					  dmaru->devices_cnt, i, dev)
1864 			return -EBUSY;
1865 
1866 	ret = dmar_ir_hotplug(dmaru, false);
1867 	if (ret == 0)
1868 		ret = dmar_iommu_hotplug(dmaru, false);
1869 
1870 	return ret;
1871 }
1872 
dmar_hp_release_drhd(struct acpi_dmar_header * header,void * arg)1873 static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1874 {
1875 	struct dmar_drhd_unit *dmaru;
1876 
1877 	dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1878 	if (dmaru) {
1879 		list_del_rcu(&dmaru->list);
1880 		synchronize_rcu();
1881 		dmar_free_drhd(dmaru);
1882 	}
1883 
1884 	return 0;
1885 }
1886 
dmar_hotplug_insert(acpi_handle handle)1887 static int dmar_hotplug_insert(acpi_handle handle)
1888 {
1889 	int ret;
1890 	int drhd_count = 0;
1891 
1892 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1893 				     &dmar_validate_one_drhd, (void *)1);
1894 	if (ret)
1895 		goto out;
1896 
1897 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1898 				     &dmar_parse_one_drhd, (void *)&drhd_count);
1899 	if (ret == 0 && drhd_count == 0) {
1900 		pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1901 		goto out;
1902 	} else if (ret) {
1903 		goto release_drhd;
1904 	}
1905 
1906 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1907 				     &dmar_parse_one_rhsa, NULL);
1908 	if (ret)
1909 		goto release_drhd;
1910 
1911 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1912 				     &dmar_parse_one_atsr, NULL);
1913 	if (ret)
1914 		goto release_atsr;
1915 
1916 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1917 				     &dmar_hp_add_drhd, NULL);
1918 	if (!ret)
1919 		return 0;
1920 
1921 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1922 			       &dmar_hp_remove_drhd, NULL);
1923 release_atsr:
1924 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1925 			       &dmar_release_one_atsr, NULL);
1926 release_drhd:
1927 	dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1928 			       &dmar_hp_release_drhd, NULL);
1929 out:
1930 	return ret;
1931 }
1932 
dmar_hotplug_remove(acpi_handle handle)1933 static int dmar_hotplug_remove(acpi_handle handle)
1934 {
1935 	int ret;
1936 
1937 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1938 				     &dmar_check_one_atsr, NULL);
1939 	if (ret)
1940 		return ret;
1941 
1942 	ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1943 				     &dmar_hp_remove_drhd, NULL);
1944 	if (ret == 0) {
1945 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1946 					       &dmar_release_one_atsr, NULL));
1947 		WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1948 					       &dmar_hp_release_drhd, NULL));
1949 	} else {
1950 		dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1951 				       &dmar_hp_add_drhd, NULL);
1952 	}
1953 
1954 	return ret;
1955 }
1956 
dmar_get_dsm_handle(acpi_handle handle,u32 lvl,void * context,void ** retval)1957 static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1958 				       void *context, void **retval)
1959 {
1960 	acpi_handle *phdl = retval;
1961 
1962 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1963 		*phdl = handle;
1964 		return AE_CTRL_TERMINATE;
1965 	}
1966 
1967 	return AE_OK;
1968 }
1969 
dmar_device_hotplug(acpi_handle handle,bool insert)1970 static int dmar_device_hotplug(acpi_handle handle, bool insert)
1971 {
1972 	int ret;
1973 	acpi_handle tmp = NULL;
1974 	acpi_status status;
1975 
1976 	if (!dmar_in_use())
1977 		return 0;
1978 
1979 	if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1980 		tmp = handle;
1981 	} else {
1982 		status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
1983 					     ACPI_UINT32_MAX,
1984 					     dmar_get_dsm_handle,
1985 					     NULL, NULL, &tmp);
1986 		if (ACPI_FAILURE(status)) {
1987 			pr_warn("Failed to locate _DSM method.\n");
1988 			return -ENXIO;
1989 		}
1990 	}
1991 	if (tmp == NULL)
1992 		return 0;
1993 
1994 	down_write(&dmar_global_lock);
1995 	if (insert)
1996 		ret = dmar_hotplug_insert(tmp);
1997 	else
1998 		ret = dmar_hotplug_remove(tmp);
1999 	up_write(&dmar_global_lock);
2000 
2001 	return ret;
2002 }
2003 
dmar_device_add(acpi_handle handle)2004 int dmar_device_add(acpi_handle handle)
2005 {
2006 	return dmar_device_hotplug(handle, true);
2007 }
2008 
dmar_device_remove(acpi_handle handle)2009 int dmar_device_remove(acpi_handle handle)
2010 {
2011 	return dmar_device_hotplug(handle, false);
2012 }
2013