This source file includes following definitions.
- translation_pre_enabled
- clear_translation_pre_enabled
- init_translation_status
- update_last_devid
- tbl_size
- amd_iommu_get_num_iommus
- iommu_read_l1
- iommu_write_l1
- iommu_read_l2
- iommu_write_l2
- iommu_set_exclusion_range
- iommu_set_device_table
- iommu_feature_enable
- iommu_feature_disable
- iommu_set_inv_tlb_timeout
- iommu_enable
- iommu_disable
- iommu_map_mmio_space
- iommu_unmap_mmio_space
- get_ivhd_header_size
- ivhd_entry_length
- find_last_devid_from_ivhd
- check_ivrs_checksum
- find_last_devid_acpi
- alloc_command_buffer
- amd_iommu_reset_cmd_buffer
- iommu_enable_command_buffer
- iommu_disable_command_buffer
- free_command_buffer
- alloc_event_buffer
- iommu_enable_event_buffer
- iommu_disable_event_buffer
- free_event_buffer
- alloc_ppr_log
- iommu_enable_ppr_log
- free_ppr_log
- free_ga_log
- iommu_ga_log_enable
- iommu_init_ga_log
- iommu_init_ga
- iommu_enable_xt
- iommu_enable_gt
- set_dev_entry_bit
- get_dev_entry_bit
- copy_device_table
- amd_iommu_apply_erratum_63
- set_iommu_for_device
- set_dev_entry_from_acpi
- add_special_device
- add_acpi_hid_device
- add_early_maps
- set_device_exclusion_range
- init_iommu_from_acpi
- free_iommu_one
- free_iommu_all
- amd_iommu_erratum_746_workaround
- amd_iommu_ats_write_check_workaround
- init_iommu_one
- get_highest_supported_ivhd_type
- init_iommu_all
- init_iommu_perf_ctr
- amd_iommu_show_cap
- amd_iommu_show_features
- iommu_init_pci
- print_iommu_info
- amd_iommu_init_pci
- iommu_setup_msi
- iommu_update_intcapxt
- _irq_notifier_notify
- _irq_notifier_release
- iommu_init_intcapxt
- iommu_init_msi
- free_unity_maps
- init_exclusion_range
- init_unity_map_range
- init_memory_definitions
- init_device_table_dma
- uninit_device_table_dma
- init_device_table
- iommu_init_flags
- iommu_apply_resume_quirks
- iommu_enable_ga
- early_enable_iommu
- early_enable_iommus
- enable_iommus_v2
- enable_iommus
- disable_iommus
- amd_iommu_resume
- amd_iommu_suspend
- free_iommu_resources
- check_ioapic_information
- free_dma_resources
- early_amd_iommu_init
- amd_iommu_enable_interrupts
- detect_ivrs
- state_next
- iommu_go_to_state
- amd_iommu_prepare
- amd_iommu_enable
- amd_iommu_disable
- amd_iommu_reenable
- amd_iommu_enable_faulting
- amd_iommu_init
- amd_iommu_sme_check
- amd_iommu_detect
- parse_amd_iommu_dump
- parse_amd_iommu_intr
- parse_amd_iommu_options
- parse_ivrs_ioapic
- parse_ivrs_hpet
- parse_ivrs_acpihid
- amd_iommu_v2_supported
- get_amd_iommu
- amd_iommu_pc_get_max_banks
- amd_iommu_pc_supported
- amd_iommu_pc_get_max_counters
- iommu_pc_get_set_reg
- amd_iommu_pc_get_reg
- amd_iommu_pc_set_reg
1
2
3
4
5
6
7
8 #define pr_fmt(fmt) "AMD-Vi: " fmt
9 #define dev_fmt(fmt) pr_fmt(fmt)
10
11 #include <linux/pci.h>
12 #include <linux/acpi.h>
13 #include <linux/list.h>
14 #include <linux/bitmap.h>
15 #include <linux/slab.h>
16 #include <linux/syscore_ops.h>
17 #include <linux/interrupt.h>
18 #include <linux/msi.h>
19 #include <linux/amd-iommu.h>
20 #include <linux/export.h>
21 #include <linux/iommu.h>
22 #include <linux/kmemleak.h>
23 #include <linux/mem_encrypt.h>
24 #include <asm/pci-direct.h>
25 #include <asm/iommu.h>
26 #include <asm/apic.h>
27 #include <asm/msidef.h>
28 #include <asm/gart.h>
29 #include <asm/x86_init.h>
30 #include <asm/iommu_table.h>
31 #include <asm/io_apic.h>
32 #include <asm/irq_remapping.h>
33
34 #include <linux/crash_dump.h>
35 #include "amd_iommu.h"
36 #include "amd_iommu_proto.h"
37 #include "amd_iommu_types.h"
38 #include "irq_remapping.h"
39
40
41
42
43 #define IVRS_HEADER_LENGTH 48
44
45 #define ACPI_IVHD_TYPE_MAX_SUPPORTED 0x40
46 #define ACPI_IVMD_TYPE_ALL 0x20
47 #define ACPI_IVMD_TYPE 0x21
48 #define ACPI_IVMD_TYPE_RANGE 0x22
49
50 #define IVHD_DEV_ALL 0x01
51 #define IVHD_DEV_SELECT 0x02
52 #define IVHD_DEV_SELECT_RANGE_START 0x03
53 #define IVHD_DEV_RANGE_END 0x04
54 #define IVHD_DEV_ALIAS 0x42
55 #define IVHD_DEV_ALIAS_RANGE 0x43
56 #define IVHD_DEV_EXT_SELECT 0x46
57 #define IVHD_DEV_EXT_SELECT_RANGE 0x47
58 #define IVHD_DEV_SPECIAL 0x48
59 #define IVHD_DEV_ACPI_HID 0xf0
60
61 #define UID_NOT_PRESENT 0
62 #define UID_IS_INTEGER 1
63 #define UID_IS_CHARACTER 2
64
65 #define IVHD_SPECIAL_IOAPIC 1
66 #define IVHD_SPECIAL_HPET 2
67
68 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01
69 #define IVHD_FLAG_PASSPW_EN_MASK 0x02
70 #define IVHD_FLAG_RESPASSPW_EN_MASK 0x04
71 #define IVHD_FLAG_ISOC_EN_MASK 0x08
72
73 #define IVMD_FLAG_EXCL_RANGE 0x08
74 #define IVMD_FLAG_UNITY_MAP 0x01
75
76 #define ACPI_DEVFLAG_INITPASS 0x01
77 #define ACPI_DEVFLAG_EXTINT 0x02
78 #define ACPI_DEVFLAG_NMI 0x04
79 #define ACPI_DEVFLAG_SYSMGT1 0x10
80 #define ACPI_DEVFLAG_SYSMGT2 0x20
81 #define ACPI_DEVFLAG_LINT0 0x40
82 #define ACPI_DEVFLAG_LINT1 0x80
83 #define ACPI_DEVFLAG_ATSDIS 0x10000000
84
85 #define LOOP_TIMEOUT 100000
86
87
88
89
90
91
92
93 extern const struct iommu_ops amd_iommu_ops;
94
95
96
97
98
99 struct ivhd_header {
100 u8 type;
101 u8 flags;
102 u16 length;
103 u16 devid;
104 u16 cap_ptr;
105 u64 mmio_phys;
106 u16 pci_seg;
107 u16 info;
108 u32 efr_attr;
109
110
111 u64 efr_reg;
112 u64 res;
113 } __attribute__((packed));
114
115
116
117
118
119 struct ivhd_entry {
120 u8 type;
121 u16 devid;
122 u8 flags;
123 u32 ext;
124 u32 hidh;
125 u64 cid;
126 u8 uidf;
127 u8 uidl;
128 u8 uid;
129 } __attribute__((packed));
130
131
132
133
134
135 struct ivmd_header {
136 u8 type;
137 u8 flags;
138 u16 length;
139 u16 devid;
140 u16 aux;
141 u64 resv;
142 u64 range_start;
143 u64 range_length;
144 } __attribute__((packed));
145
146 bool amd_iommu_dump;
147 bool amd_iommu_irq_remap __read_mostly;
148
149 int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
150 static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
151
152 static bool amd_iommu_detected;
153 static bool __initdata amd_iommu_disabled;
154 static int amd_iommu_target_ivhd_type;
155
156 u16 amd_iommu_last_bdf;
157
158 LIST_HEAD(amd_iommu_unity_map);
159
160 bool amd_iommu_unmap_flush;
161
162 LIST_HEAD(amd_iommu_list);
163
164
165
166 struct amd_iommu *amd_iommus[MAX_IOMMUS];
167
168
169 static int amd_iommus_present;
170
171
172 bool amd_iommu_np_cache __read_mostly;
173 bool amd_iommu_iotlb_sup __read_mostly = true;
174
175 u32 amd_iommu_max_pasid __read_mostly = ~0;
176
177 bool amd_iommu_v2_present __read_mostly;
178 static bool amd_iommu_pc_present __read_mostly;
179
180 bool amd_iommu_force_isolation __read_mostly;
181
182
183
184
185
186
187
188 struct dev_table_entry *amd_iommu_dev_table;
189
190
191
192
193 static struct dev_table_entry *old_dev_tbl_cpy;
194
195
196
197
198
199
200 u16 *amd_iommu_alias_table;
201
202
203
204
205
206 struct amd_iommu **amd_iommu_rlookup_table;
207 EXPORT_SYMBOL(amd_iommu_rlookup_table);
208
209
210
211
212
213 struct irq_remap_table **irq_lookup_table;
214
215
216
217
218
219 unsigned long *amd_iommu_pd_alloc_bitmap;
220
221 static u32 dev_table_size;
222 static u32 alias_table_size;
223 static u32 rlookup_table_size;
224
225 enum iommu_init_state {
226 IOMMU_START_STATE,
227 IOMMU_IVRS_DETECTED,
228 IOMMU_ACPI_FINISHED,
229 IOMMU_ENABLED,
230 IOMMU_PCI_INIT,
231 IOMMU_INTERRUPTS_EN,
232 IOMMU_DMA_OPS,
233 IOMMU_INITIALIZED,
234 IOMMU_NOT_FOUND,
235 IOMMU_INIT_ERROR,
236 IOMMU_CMDLINE_DISABLED,
237 };
238
239
240 #define EARLY_MAP_SIZE 4
241 static struct devid_map __initdata early_ioapic_map[EARLY_MAP_SIZE];
242 static struct devid_map __initdata early_hpet_map[EARLY_MAP_SIZE];
243 static struct acpihid_map_entry __initdata early_acpihid_map[EARLY_MAP_SIZE];
244
245 static int __initdata early_ioapic_map_size;
246 static int __initdata early_hpet_map_size;
247 static int __initdata early_acpihid_map_size;
248
249 static bool __initdata cmdline_maps;
250
251 static enum iommu_init_state init_state = IOMMU_START_STATE;
252
253 static int amd_iommu_enable_interrupts(void);
254 static int __init iommu_go_to_state(enum iommu_init_state state);
255 static void init_device_table_dma(void);
256
257 static bool amd_iommu_pre_enabled = true;
258
259 bool translation_pre_enabled(struct amd_iommu *iommu)
260 {
261 return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED);
262 }
263 EXPORT_SYMBOL(translation_pre_enabled);
264
265 static void clear_translation_pre_enabled(struct amd_iommu *iommu)
266 {
267 iommu->flags &= ~AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
268 }
269
270 static void init_translation_status(struct amd_iommu *iommu)
271 {
272 u64 ctrl;
273
274 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
275 if (ctrl & (1<<CONTROL_IOMMU_EN))
276 iommu->flags |= AMD_IOMMU_FLAG_TRANS_PRE_ENABLED;
277 }
278
279 static inline void update_last_devid(u16 devid)
280 {
281 if (devid > amd_iommu_last_bdf)
282 amd_iommu_last_bdf = devid;
283 }
284
285 static inline unsigned long tbl_size(int entry_size)
286 {
287 unsigned shift = PAGE_SHIFT +
288 get_order(((int)amd_iommu_last_bdf + 1) * entry_size);
289
290 return 1UL << shift;
291 }
292
293 int amd_iommu_get_num_iommus(void)
294 {
295 return amd_iommus_present;
296 }
297
298
299
300 static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address)
301 {
302 u32 val;
303
304 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
305 pci_read_config_dword(iommu->dev, 0xfc, &val);
306 return val;
307 }
308
309 static void iommu_write_l1(struct amd_iommu *iommu, u16 l1, u8 address, u32 val)
310 {
311 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16 | 1 << 31));
312 pci_write_config_dword(iommu->dev, 0xfc, val);
313 pci_write_config_dword(iommu->dev, 0xf8, (address | l1 << 16));
314 }
315
316 static u32 iommu_read_l2(struct amd_iommu *iommu, u8 address)
317 {
318 u32 val;
319
320 pci_write_config_dword(iommu->dev, 0xf0, address);
321 pci_read_config_dword(iommu->dev, 0xf4, &val);
322 return val;
323 }
324
325 static void iommu_write_l2(struct amd_iommu *iommu, u8 address, u32 val)
326 {
327 pci_write_config_dword(iommu->dev, 0xf0, (address | 1 << 8));
328 pci_write_config_dword(iommu->dev, 0xf4, val);
329 }
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344 static void iommu_set_exclusion_range(struct amd_iommu *iommu)
345 {
346 u64 start = iommu->exclusion_start & PAGE_MASK;
347 u64 limit = (start + iommu->exclusion_length - 1) & PAGE_MASK;
348 u64 entry;
349
350 if (!iommu->exclusion_start)
351 return;
352
353 entry = start | MMIO_EXCL_ENABLE_MASK;
354 memcpy_toio(iommu->mmio_base + MMIO_EXCL_BASE_OFFSET,
355 &entry, sizeof(entry));
356
357 entry = limit;
358 memcpy_toio(iommu->mmio_base + MMIO_EXCL_LIMIT_OFFSET,
359 &entry, sizeof(entry));
360 }
361
362
363 static void iommu_set_device_table(struct amd_iommu *iommu)
364 {
365 u64 entry;
366
367 BUG_ON(iommu->mmio_base == NULL);
368
369 entry = iommu_virt_to_phys(amd_iommu_dev_table);
370 entry |= (dev_table_size >> 12) - 1;
371 memcpy_toio(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET,
372 &entry, sizeof(entry));
373 }
374
375
376 static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit)
377 {
378 u64 ctrl;
379
380 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
381 ctrl |= (1ULL << bit);
382 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
383 }
384
385 static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
386 {
387 u64 ctrl;
388
389 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
390 ctrl &= ~(1ULL << bit);
391 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
392 }
393
394 static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
395 {
396 u64 ctrl;
397
398 ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
399 ctrl &= ~CTRL_INV_TO_MASK;
400 ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
401 writeq(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
402 }
403
404
405 static void iommu_enable(struct amd_iommu *iommu)
406 {
407 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
408 }
409
410 static void iommu_disable(struct amd_iommu *iommu)
411 {
412 if (!iommu->mmio_base)
413 return;
414
415
416 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
417
418
419 iommu_feature_disable(iommu, CONTROL_EVT_INT_EN);
420 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
421
422
423 iommu_feature_disable(iommu, CONTROL_GALOG_EN);
424 iommu_feature_disable(iommu, CONTROL_GAINT_EN);
425
426
427 iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
428 }
429
430
431
432
433
434 static u8 __iomem * __init iommu_map_mmio_space(u64 address, u64 end)
435 {
436 if (!request_mem_region(address, end, "amd_iommu")) {
437 pr_err("Can not reserve memory region %llx-%llx for mmio\n",
438 address, end);
439 pr_err("This is a BIOS bug. Please contact your hardware vendor\n");
440 return NULL;
441 }
442
443 return (u8 __iomem *)ioremap_nocache(address, end);
444 }
445
446 static void __init iommu_unmap_mmio_space(struct amd_iommu *iommu)
447 {
448 if (iommu->mmio_base)
449 iounmap(iommu->mmio_base);
450 release_mem_region(iommu->mmio_phys, iommu->mmio_phys_end);
451 }
452
453 static inline u32 get_ivhd_header_size(struct ivhd_header *h)
454 {
455 u32 size = 0;
456
457 switch (h->type) {
458 case 0x10:
459 size = 24;
460 break;
461 case 0x11:
462 case 0x40:
463 size = 40;
464 break;
465 }
466 return size;
467 }
468
469
470
471
472
473
474
475
476
477
478
479
480
481 static inline int ivhd_entry_length(u8 *ivhd)
482 {
483 u32 type = ((struct ivhd_entry *)ivhd)->type;
484
485 if (type < 0x80) {
486 return 0x04 << (*ivhd >> 6);
487 } else if (type == IVHD_DEV_ACPI_HID) {
488
489 return *((u8 *)ivhd + 21) + 22;
490 }
491 return 0;
492 }
493
494
495
496
497
498 static int __init find_last_devid_from_ivhd(struct ivhd_header *h)
499 {
500 u8 *p = (void *)h, *end = (void *)h;
501 struct ivhd_entry *dev;
502
503 u32 ivhd_size = get_ivhd_header_size(h);
504
505 if (!ivhd_size) {
506 pr_err("Unsupported IVHD type %#x\n", h->type);
507 return -EINVAL;
508 }
509
510 p += ivhd_size;
511 end += h->length;
512
513 while (p < end) {
514 dev = (struct ivhd_entry *)p;
515 switch (dev->type) {
516 case IVHD_DEV_ALL:
517
518 update_last_devid(0xffff);
519 break;
520 case IVHD_DEV_SELECT:
521 case IVHD_DEV_RANGE_END:
522 case IVHD_DEV_ALIAS:
523 case IVHD_DEV_EXT_SELECT:
524
525 update_last_devid(dev->devid);
526 break;
527 default:
528 break;
529 }
530 p += ivhd_entry_length(p);
531 }
532
533 WARN_ON(p != end);
534
535 return 0;
536 }
537
538 static int __init check_ivrs_checksum(struct acpi_table_header *table)
539 {
540 int i;
541 u8 checksum = 0, *p = (u8 *)table;
542
543 for (i = 0; i < table->length; ++i)
544 checksum += p[i];
545 if (checksum != 0) {
546
547 pr_err(FW_BUG "IVRS invalid checksum\n");
548 return -ENODEV;
549 }
550
551 return 0;
552 }
553
554
555
556
557
558
559 static int __init find_last_devid_acpi(struct acpi_table_header *table)
560 {
561 u8 *p = (u8 *)table, *end = (u8 *)table;
562 struct ivhd_header *h;
563
564 p += IVRS_HEADER_LENGTH;
565
566 end += table->length;
567 while (p < end) {
568 h = (struct ivhd_header *)p;
569 if (h->type == amd_iommu_target_ivhd_type) {
570 int ret = find_last_devid_from_ivhd(h);
571
572 if (ret)
573 return ret;
574 }
575 p += h->length;
576 }
577 WARN_ON(p != end);
578
579 return 0;
580 }
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596 static int __init alloc_command_buffer(struct amd_iommu *iommu)
597 {
598 iommu->cmd_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
599 get_order(CMD_BUFFER_SIZE));
600
601 return iommu->cmd_buf ? 0 : -ENOMEM;
602 }
603
604
605
606
607
608 void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
609 {
610 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
611
612 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
613 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
614 iommu->cmd_buf_head = 0;
615 iommu->cmd_buf_tail = 0;
616
617 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
618 }
619
620
621
622
623
624 static void iommu_enable_command_buffer(struct amd_iommu *iommu)
625 {
626 u64 entry;
627
628 BUG_ON(iommu->cmd_buf == NULL);
629
630 entry = iommu_virt_to_phys(iommu->cmd_buf);
631 entry |= MMIO_CMD_SIZE_512;
632
633 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
634 &entry, sizeof(entry));
635
636 amd_iommu_reset_cmd_buffer(iommu);
637 }
638
639
640
641
642 static void iommu_disable_command_buffer(struct amd_iommu *iommu)
643 {
644 iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
645 }
646
647 static void __init free_command_buffer(struct amd_iommu *iommu)
648 {
649 free_pages((unsigned long)iommu->cmd_buf, get_order(CMD_BUFFER_SIZE));
650 }
651
652
653 static int __init alloc_event_buffer(struct amd_iommu *iommu)
654 {
655 iommu->evt_buf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
656 get_order(EVT_BUFFER_SIZE));
657
658 return iommu->evt_buf ? 0 : -ENOMEM;
659 }
660
661 static void iommu_enable_event_buffer(struct amd_iommu *iommu)
662 {
663 u64 entry;
664
665 BUG_ON(iommu->evt_buf == NULL);
666
667 entry = iommu_virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
668
669 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
670 &entry, sizeof(entry));
671
672
673 writel(0x00, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
674 writel(0x00, iommu->mmio_base + MMIO_EVT_TAIL_OFFSET);
675
676 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
677 }
678
679
680
681
682 static void iommu_disable_event_buffer(struct amd_iommu *iommu)
683 {
684 iommu_feature_disable(iommu, CONTROL_EVT_LOG_EN);
685 }
686
687 static void __init free_event_buffer(struct amd_iommu *iommu)
688 {
689 free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
690 }
691
692
693 static int __init alloc_ppr_log(struct amd_iommu *iommu)
694 {
695 iommu->ppr_log = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
696 get_order(PPR_LOG_SIZE));
697
698 return iommu->ppr_log ? 0 : -ENOMEM;
699 }
700
701 static void iommu_enable_ppr_log(struct amd_iommu *iommu)
702 {
703 u64 entry;
704
705 if (iommu->ppr_log == NULL)
706 return;
707
708 entry = iommu_virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
709
710 memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
711 &entry, sizeof(entry));
712
713
714 writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
715 writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
716
717 iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
718 iommu_feature_enable(iommu, CONTROL_PPR_EN);
719 }
720
721 static void __init free_ppr_log(struct amd_iommu *iommu)
722 {
723 if (iommu->ppr_log == NULL)
724 return;
725
726 free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
727 }
728
729 static void free_ga_log(struct amd_iommu *iommu)
730 {
731 #ifdef CONFIG_IRQ_REMAP
732 if (iommu->ga_log)
733 free_pages((unsigned long)iommu->ga_log,
734 get_order(GA_LOG_SIZE));
735 if (iommu->ga_log_tail)
736 free_pages((unsigned long)iommu->ga_log_tail,
737 get_order(8));
738 #endif
739 }
740
741 static int iommu_ga_log_enable(struct amd_iommu *iommu)
742 {
743 #ifdef CONFIG_IRQ_REMAP
744 u32 status, i;
745
746 if (!iommu->ga_log)
747 return -EINVAL;
748
749 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
750
751
752 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
753 return 0;
754
755 iommu_feature_enable(iommu, CONTROL_GAINT_EN);
756 iommu_feature_enable(iommu, CONTROL_GALOG_EN);
757
758 for (i = 0; i < LOOP_TIMEOUT; ++i) {
759 status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
760 if (status & (MMIO_STATUS_GALOG_RUN_MASK))
761 break;
762 }
763
764 if (i >= LOOP_TIMEOUT)
765 return -EINVAL;
766 #endif
767 return 0;
768 }
769
770 #ifdef CONFIG_IRQ_REMAP
771 static int iommu_init_ga_log(struct amd_iommu *iommu)
772 {
773 u64 entry;
774
775 if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
776 return 0;
777
778 iommu->ga_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
779 get_order(GA_LOG_SIZE));
780 if (!iommu->ga_log)
781 goto err_out;
782
783 iommu->ga_log_tail = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
784 get_order(8));
785 if (!iommu->ga_log_tail)
786 goto err_out;
787
788 entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
789 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
790 &entry, sizeof(entry));
791 entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
792 (BIT_ULL(52)-1)) & ~7ULL;
793 memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
794 &entry, sizeof(entry));
795 writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
796 writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
797
798 return 0;
799 err_out:
800 free_ga_log(iommu);
801 return -EINVAL;
802 }
803 #endif
804
805 static int iommu_init_ga(struct amd_iommu *iommu)
806 {
807 int ret = 0;
808
809 #ifdef CONFIG_IRQ_REMAP
810
811
812
813 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
814 !iommu_feature(iommu, FEATURE_GAM_VAPIC))
815 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
816
817 ret = iommu_init_ga_log(iommu);
818 #endif
819
820 return ret;
821 }
822
823 static void iommu_enable_xt(struct amd_iommu *iommu)
824 {
825 #ifdef CONFIG_IRQ_REMAP
826
827
828
829
830 if (AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir) &&
831 amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
832 iommu_feature_enable(iommu, CONTROL_XT_EN);
833 #endif
834 }
835
836 static void iommu_enable_gt(struct amd_iommu *iommu)
837 {
838 if (!iommu_feature(iommu, FEATURE_GT))
839 return;
840
841 iommu_feature_enable(iommu, CONTROL_GT_EN);
842 }
843
844
845 static void set_dev_entry_bit(u16 devid, u8 bit)
846 {
847 int i = (bit >> 6) & 0x03;
848 int _bit = bit & 0x3f;
849
850 amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
851 }
852
853 static int get_dev_entry_bit(u16 devid, u8 bit)
854 {
855 int i = (bit >> 6) & 0x03;
856 int _bit = bit & 0x3f;
857
858 return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
859 }
860
861
862 static bool copy_device_table(void)
863 {
864 u64 int_ctl, int_tab_len, entry = 0, last_entry = 0;
865 struct dev_table_entry *old_devtb = NULL;
866 u32 lo, hi, devid, old_devtb_size;
867 phys_addr_t old_devtb_phys;
868 struct amd_iommu *iommu;
869 u16 dom_id, dte_v, irq_v;
870 gfp_t gfp_flag;
871 u64 tmp;
872
873 if (!amd_iommu_pre_enabled)
874 return false;
875
876 pr_warn("Translation is already enabled - trying to copy translation structures\n");
877 for_each_iommu(iommu) {
878
879 lo = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET);
880 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
881 entry = (((u64) hi) << 32) + lo;
882 if (last_entry && last_entry != entry) {
883 pr_err("IOMMU:%d should use the same dev table as others!\n",
884 iommu->index);
885 return false;
886 }
887 last_entry = entry;
888
889 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
890 if (old_devtb_size != dev_table_size) {
891 pr_err("The device table size of IOMMU:%d is not expected!\n",
892 iommu->index);
893 return false;
894 }
895 }
896
897
898
899
900
901
902 old_devtb_phys = __sme_clr(entry) & PAGE_MASK;
903
904 if (old_devtb_phys >= 0x100000000ULL) {
905 pr_err("The address of old device table is above 4G, not trustworthy!\n");
906 return false;
907 }
908 old_devtb = (sme_active() && is_kdump_kernel())
909 ? (__force void *)ioremap_encrypted(old_devtb_phys,
910 dev_table_size)
911 : memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
912
913 if (!old_devtb)
914 return false;
915
916 gfp_flag = GFP_KERNEL | __GFP_ZERO | GFP_DMA32;
917 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
918 get_order(dev_table_size));
919 if (old_dev_tbl_cpy == NULL) {
920 pr_err("Failed to allocate memory for copying old device table!\n");
921 return false;
922 }
923
924 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
925 old_dev_tbl_cpy[devid] = old_devtb[devid];
926 dom_id = old_devtb[devid].data[1] & DEV_DOMID_MASK;
927 dte_v = old_devtb[devid].data[0] & DTE_FLAG_V;
928
929 if (dte_v && dom_id) {
930 old_dev_tbl_cpy[devid].data[0] = old_devtb[devid].data[0];
931 old_dev_tbl_cpy[devid].data[1] = old_devtb[devid].data[1];
932 __set_bit(dom_id, amd_iommu_pd_alloc_bitmap);
933
934 if (old_devtb[devid].data[0] & DTE_FLAG_GV) {
935 tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
936 tmp |= DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
937 old_dev_tbl_cpy[devid].data[1] &= ~tmp;
938 tmp = DTE_GCR3_VAL_A(~0ULL) << DTE_GCR3_SHIFT_A;
939 tmp |= DTE_FLAG_GV;
940 old_dev_tbl_cpy[devid].data[0] &= ~tmp;
941 }
942 }
943
944 irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE;
945 int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK;
946 int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK;
947 if (irq_v && (int_ctl || int_tab_len)) {
948 if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
949 (int_tab_len != DTE_IRQ_TABLE_LEN)) {
950 pr_err("Wrong old irq remapping flag: %#x\n", devid);
951 return false;
952 }
953
954 old_dev_tbl_cpy[devid].data[2] = old_devtb[devid].data[2];
955 }
956 }
957 memunmap(old_devtb);
958
959 return true;
960 }
961
962 void amd_iommu_apply_erratum_63(u16 devid)
963 {
964 int sysmgt;
965
966 sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) |
967 (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1);
968
969 if (sysmgt == 0x01)
970 set_dev_entry_bit(devid, DEV_ENTRY_IW);
971 }
972
973
974 static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid)
975 {
976 amd_iommu_rlookup_table[devid] = iommu;
977 }
978
979
980
981
982
983 static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
984 u16 devid, u32 flags, u32 ext_flags)
985 {
986 if (flags & ACPI_DEVFLAG_INITPASS)
987 set_dev_entry_bit(devid, DEV_ENTRY_INIT_PASS);
988 if (flags & ACPI_DEVFLAG_EXTINT)
989 set_dev_entry_bit(devid, DEV_ENTRY_EINT_PASS);
990 if (flags & ACPI_DEVFLAG_NMI)
991 set_dev_entry_bit(devid, DEV_ENTRY_NMI_PASS);
992 if (flags & ACPI_DEVFLAG_SYSMGT1)
993 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1);
994 if (flags & ACPI_DEVFLAG_SYSMGT2)
995 set_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2);
996 if (flags & ACPI_DEVFLAG_LINT0)
997 set_dev_entry_bit(devid, DEV_ENTRY_LINT0_PASS);
998 if (flags & ACPI_DEVFLAG_LINT1)
999 set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS);
1000
1001 amd_iommu_apply_erratum_63(devid);
1002
1003 set_iommu_for_device(iommu, devid);
1004 }
1005
1006 int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1007 {
1008 struct devid_map *entry;
1009 struct list_head *list;
1010
1011 if (type == IVHD_SPECIAL_IOAPIC)
1012 list = &ioapic_map;
1013 else if (type == IVHD_SPECIAL_HPET)
1014 list = &hpet_map;
1015 else
1016 return -EINVAL;
1017
1018 list_for_each_entry(entry, list, list) {
1019 if (!(entry->id == id && entry->cmd_line))
1020 continue;
1021
1022 pr_info("Command-line override present for %s id %d - ignoring\n",
1023 type == IVHD_SPECIAL_IOAPIC ? "IOAPIC" : "HPET", id);
1024
1025 *devid = entry->devid;
1026
1027 return 0;
1028 }
1029
1030 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1031 if (!entry)
1032 return -ENOMEM;
1033
1034 entry->id = id;
1035 entry->devid = *devid;
1036 entry->cmd_line = cmd_line;
1037
1038 list_add_tail(&entry->list, list);
1039
1040 return 0;
1041 }
1042
1043 static int __init add_acpi_hid_device(u8 *hid, u8 *uid, u16 *devid,
1044 bool cmd_line)
1045 {
1046 struct acpihid_map_entry *entry;
1047 struct list_head *list = &acpihid_map;
1048
1049 list_for_each_entry(entry, list, list) {
1050 if (strcmp(entry->hid, hid) ||
1051 (*uid && *entry->uid && strcmp(entry->uid, uid)) ||
1052 !entry->cmd_line)
1053 continue;
1054
1055 pr_info("Command-line override for hid:%s uid:%s\n",
1056 hid, uid);
1057 *devid = entry->devid;
1058 return 0;
1059 }
1060
1061 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1062 if (!entry)
1063 return -ENOMEM;
1064
1065 memcpy(entry->uid, uid, strlen(uid));
1066 memcpy(entry->hid, hid, strlen(hid));
1067 entry->devid = *devid;
1068 entry->cmd_line = cmd_line;
1069 entry->root_devid = (entry->devid & (~0x7));
1070
1071 pr_info("%s, add hid:%s, uid:%s, rdevid:%d\n",
1072 entry->cmd_line ? "cmd" : "ivrs",
1073 entry->hid, entry->uid, entry->root_devid);
1074
1075 list_add_tail(&entry->list, list);
1076 return 0;
1077 }
1078
1079 static int __init add_early_maps(void)
1080 {
1081 int i, ret;
1082
1083 for (i = 0; i < early_ioapic_map_size; ++i) {
1084 ret = add_special_device(IVHD_SPECIAL_IOAPIC,
1085 early_ioapic_map[i].id,
1086 &early_ioapic_map[i].devid,
1087 early_ioapic_map[i].cmd_line);
1088 if (ret)
1089 return ret;
1090 }
1091
1092 for (i = 0; i < early_hpet_map_size; ++i) {
1093 ret = add_special_device(IVHD_SPECIAL_HPET,
1094 early_hpet_map[i].id,
1095 &early_hpet_map[i].devid,
1096 early_hpet_map[i].cmd_line);
1097 if (ret)
1098 return ret;
1099 }
1100
1101 for (i = 0; i < early_acpihid_map_size; ++i) {
1102 ret = add_acpi_hid_device(early_acpihid_map[i].hid,
1103 early_acpihid_map[i].uid,
1104 &early_acpihid_map[i].devid,
1105 early_acpihid_map[i].cmd_line);
1106 if (ret)
1107 return ret;
1108 }
1109
1110 return 0;
1111 }
1112
1113
1114
1115
1116
1117 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
1118 {
1119 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1120
1121 if (!(m->flags & IVMD_FLAG_EXCL_RANGE))
1122 return;
1123
1124 if (iommu) {
1125
1126
1127
1128
1129
1130 set_dev_entry_bit(devid, DEV_ENTRY_EX);
1131 iommu->exclusion_start = m->range_start;
1132 iommu->exclusion_length = m->range_length;
1133 }
1134 }
1135
1136
1137
1138
1139
1140 static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1141 struct ivhd_header *h)
1142 {
1143 u8 *p = (u8 *)h;
1144 u8 *end = p, flags = 0;
1145 u16 devid = 0, devid_start = 0, devid_to = 0;
1146 u32 dev_i, ext_flags = 0;
1147 bool alias = false;
1148 struct ivhd_entry *e;
1149 u32 ivhd_size;
1150 int ret;
1151
1152
1153 ret = add_early_maps();
1154 if (ret)
1155 return ret;
1156
1157 amd_iommu_apply_ivrs_quirks();
1158
1159
1160
1161
1162 iommu->acpi_flags = h->flags;
1163
1164
1165
1166
1167 ivhd_size = get_ivhd_header_size(h);
1168 if (!ivhd_size) {
1169 pr_err("Unsupported IVHD type %#x\n", h->type);
1170 return -EINVAL;
1171 }
1172
1173 p += ivhd_size;
1174
1175 end += h->length;
1176
1177
1178 while (p < end) {
1179 e = (struct ivhd_entry *)p;
1180 switch (e->type) {
1181 case IVHD_DEV_ALL:
1182
1183 DUMP_printk(" DEV_ALL\t\t\tflags: %02x\n", e->flags);
1184
1185 for (dev_i = 0; dev_i <= amd_iommu_last_bdf; ++dev_i)
1186 set_dev_entry_from_acpi(iommu, dev_i, e->flags, 0);
1187 break;
1188 case IVHD_DEV_SELECT:
1189
1190 DUMP_printk(" DEV_SELECT\t\t\t devid: %02x:%02x.%x "
1191 "flags: %02x\n",
1192 PCI_BUS_NUM(e->devid),
1193 PCI_SLOT(e->devid),
1194 PCI_FUNC(e->devid),
1195 e->flags);
1196
1197 devid = e->devid;
1198 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1199 break;
1200 case IVHD_DEV_SELECT_RANGE_START:
1201
1202 DUMP_printk(" DEV_SELECT_RANGE_START\t "
1203 "devid: %02x:%02x.%x flags: %02x\n",
1204 PCI_BUS_NUM(e->devid),
1205 PCI_SLOT(e->devid),
1206 PCI_FUNC(e->devid),
1207 e->flags);
1208
1209 devid_start = e->devid;
1210 flags = e->flags;
1211 ext_flags = 0;
1212 alias = false;
1213 break;
1214 case IVHD_DEV_ALIAS:
1215
1216 DUMP_printk(" DEV_ALIAS\t\t\t devid: %02x:%02x.%x "
1217 "flags: %02x devid_to: %02x:%02x.%x\n",
1218 PCI_BUS_NUM(e->devid),
1219 PCI_SLOT(e->devid),
1220 PCI_FUNC(e->devid),
1221 e->flags,
1222 PCI_BUS_NUM(e->ext >> 8),
1223 PCI_SLOT(e->ext >> 8),
1224 PCI_FUNC(e->ext >> 8));
1225
1226 devid = e->devid;
1227 devid_to = e->ext >> 8;
1228 set_dev_entry_from_acpi(iommu, devid , e->flags, 0);
1229 set_dev_entry_from_acpi(iommu, devid_to, e->flags, 0);
1230 amd_iommu_alias_table[devid] = devid_to;
1231 break;
1232 case IVHD_DEV_ALIAS_RANGE:
1233
1234 DUMP_printk(" DEV_ALIAS_RANGE\t\t "
1235 "devid: %02x:%02x.%x flags: %02x "
1236 "devid_to: %02x:%02x.%x\n",
1237 PCI_BUS_NUM(e->devid),
1238 PCI_SLOT(e->devid),
1239 PCI_FUNC(e->devid),
1240 e->flags,
1241 PCI_BUS_NUM(e->ext >> 8),
1242 PCI_SLOT(e->ext >> 8),
1243 PCI_FUNC(e->ext >> 8));
1244
1245 devid_start = e->devid;
1246 flags = e->flags;
1247 devid_to = e->ext >> 8;
1248 ext_flags = 0;
1249 alias = true;
1250 break;
1251 case IVHD_DEV_EXT_SELECT:
1252
1253 DUMP_printk(" DEV_EXT_SELECT\t\t devid: %02x:%02x.%x "
1254 "flags: %02x ext: %08x\n",
1255 PCI_BUS_NUM(e->devid),
1256 PCI_SLOT(e->devid),
1257 PCI_FUNC(e->devid),
1258 e->flags, e->ext);
1259
1260 devid = e->devid;
1261 set_dev_entry_from_acpi(iommu, devid, e->flags,
1262 e->ext);
1263 break;
1264 case IVHD_DEV_EXT_SELECT_RANGE:
1265
1266 DUMP_printk(" DEV_EXT_SELECT_RANGE\t devid: "
1267 "%02x:%02x.%x flags: %02x ext: %08x\n",
1268 PCI_BUS_NUM(e->devid),
1269 PCI_SLOT(e->devid),
1270 PCI_FUNC(e->devid),
1271 e->flags, e->ext);
1272
1273 devid_start = e->devid;
1274 flags = e->flags;
1275 ext_flags = e->ext;
1276 alias = false;
1277 break;
1278 case IVHD_DEV_RANGE_END:
1279
1280 DUMP_printk(" DEV_RANGE_END\t\t devid: %02x:%02x.%x\n",
1281 PCI_BUS_NUM(e->devid),
1282 PCI_SLOT(e->devid),
1283 PCI_FUNC(e->devid));
1284
1285 devid = e->devid;
1286 for (dev_i = devid_start; dev_i <= devid; ++dev_i) {
1287 if (alias) {
1288 amd_iommu_alias_table[dev_i] = devid_to;
1289 set_dev_entry_from_acpi(iommu,
1290 devid_to, flags, ext_flags);
1291 }
1292 set_dev_entry_from_acpi(iommu, dev_i,
1293 flags, ext_flags);
1294 }
1295 break;
1296 case IVHD_DEV_SPECIAL: {
1297 u8 handle, type;
1298 const char *var;
1299 u16 devid;
1300 int ret;
1301
1302 handle = e->ext & 0xff;
1303 devid = (e->ext >> 8) & 0xffff;
1304 type = (e->ext >> 24) & 0xff;
1305
1306 if (type == IVHD_SPECIAL_IOAPIC)
1307 var = "IOAPIC";
1308 else if (type == IVHD_SPECIAL_HPET)
1309 var = "HPET";
1310 else
1311 var = "UNKNOWN";
1312
1313 DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n",
1314 var, (int)handle,
1315 PCI_BUS_NUM(devid),
1316 PCI_SLOT(devid),
1317 PCI_FUNC(devid));
1318
1319 ret = add_special_device(type, handle, &devid, false);
1320 if (ret)
1321 return ret;
1322
1323
1324
1325
1326
1327
1328 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1329
1330 break;
1331 }
1332 case IVHD_DEV_ACPI_HID: {
1333 u16 devid;
1334 u8 hid[ACPIHID_HID_LEN];
1335 u8 uid[ACPIHID_UID_LEN];
1336 int ret;
1337
1338 if (h->type != 0x40) {
1339 pr_err(FW_BUG "Invalid IVHD device type %#x\n",
1340 e->type);
1341 break;
1342 }
1343
1344 memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
1345 hid[ACPIHID_HID_LEN - 1] = '\0';
1346
1347 if (!(*hid)) {
1348 pr_err(FW_BUG "Invalid HID.\n");
1349 break;
1350 }
1351
1352 uid[0] = '\0';
1353 switch (e->uidf) {
1354 case UID_NOT_PRESENT:
1355
1356 if (e->uidl != 0)
1357 pr_warn(FW_BUG "Invalid UID length.\n");
1358
1359 break;
1360 case UID_IS_INTEGER:
1361
1362 sprintf(uid, "%d", e->uid);
1363
1364 break;
1365 case UID_IS_CHARACTER:
1366
1367 memcpy(uid, &e->uid, e->uidl);
1368 uid[e->uidl] = '\0';
1369
1370 break;
1371 default:
1372 break;
1373 }
1374
1375 devid = e->devid;
1376 DUMP_printk(" DEV_ACPI_HID(%s[%s])\t\tdevid: %02x:%02x.%x\n",
1377 hid, uid,
1378 PCI_BUS_NUM(devid),
1379 PCI_SLOT(devid),
1380 PCI_FUNC(devid));
1381
1382 flags = e->flags;
1383
1384 ret = add_acpi_hid_device(hid, uid, &devid, false);
1385 if (ret)
1386 return ret;
1387
1388
1389
1390
1391
1392
1393 set_dev_entry_from_acpi(iommu, devid, e->flags, 0);
1394
1395 break;
1396 }
1397 default:
1398 break;
1399 }
1400
1401 p += ivhd_entry_length(p);
1402 }
1403
1404 return 0;
1405 }
1406
1407 static void __init free_iommu_one(struct amd_iommu *iommu)
1408 {
1409 free_command_buffer(iommu);
1410 free_event_buffer(iommu);
1411 free_ppr_log(iommu);
1412 free_ga_log(iommu);
1413 iommu_unmap_mmio_space(iommu);
1414 }
1415
1416 static void __init free_iommu_all(void)
1417 {
1418 struct amd_iommu *iommu, *next;
1419
1420 for_each_iommu_safe(iommu, next) {
1421 list_del(&iommu->list);
1422 free_iommu_one(iommu);
1423 kfree(iommu);
1424 }
1425 }
1426
1427
1428
1429
1430
1431
1432
1433 static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1434 {
1435 u32 value;
1436
1437 if ((boot_cpu_data.x86 != 0x15) ||
1438 (boot_cpu_data.x86_model < 0x10) ||
1439 (boot_cpu_data.x86_model > 0x1f))
1440 return;
1441
1442 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1443 pci_read_config_dword(iommu->dev, 0xf4, &value);
1444
1445 if (value & BIT(2))
1446 return;
1447
1448
1449 pci_write_config_dword(iommu->dev, 0xf0, 0x90 | (1 << 8));
1450
1451 pci_write_config_dword(iommu->dev, 0xf4, value | 0x4);
1452 pci_info(iommu->dev, "Applying erratum 746 workaround\n");
1453
1454
1455 pci_write_config_dword(iommu->dev, 0xf0, 0x90);
1456 }
1457
1458
1459
1460
1461
1462
1463
1464 static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1465 {
1466 u32 value;
1467
1468 if ((boot_cpu_data.x86 != 0x15) ||
1469 (boot_cpu_data.x86_model < 0x30) ||
1470 (boot_cpu_data.x86_model > 0x3f))
1471 return;
1472
1473
1474 value = iommu_read_l2(iommu, 0x47);
1475
1476 if (value & BIT(0))
1477 return;
1478
1479
1480 iommu_write_l2(iommu, 0x47, value | BIT(0));
1481
1482 pci_info(iommu->dev, "Applying ATS write check workaround\n");
1483 }
1484
1485
1486
1487
1488
1489
1490 static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1491 {
1492 int ret;
1493
1494 raw_spin_lock_init(&iommu->lock);
1495
1496
1497 list_add_tail(&iommu->list, &amd_iommu_list);
1498 iommu->index = amd_iommus_present++;
1499
1500 if (unlikely(iommu->index >= MAX_IOMMUS)) {
1501 WARN(1, "System has more IOMMUs than supported by this driver\n");
1502 return -ENOSYS;
1503 }
1504
1505
1506 amd_iommus[iommu->index] = iommu;
1507
1508
1509
1510
1511 iommu->devid = h->devid;
1512 iommu->cap_ptr = h->cap_ptr;
1513 iommu->pci_seg = h->pci_seg;
1514 iommu->mmio_phys = h->mmio_phys;
1515
1516 switch (h->type) {
1517 case 0x10:
1518
1519 if ((h->efr_attr != 0) &&
1520 ((h->efr_attr & (0xF << 13)) != 0) &&
1521 ((h->efr_attr & (0x3F << 17)) != 0))
1522 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1523 else
1524 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1525 if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
1526 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1527 break;
1528 case 0x11:
1529 case 0x40:
1530 if (h->efr_reg & (1 << 9))
1531 iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
1532 else
1533 iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;
1534 if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0))
1535 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
1536
1537
1538
1539
1540
1541
1542 if ((h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) &&
1543 (h->efr_reg & BIT(IOMMU_EFR_MSICAPMMIOSUP_SHIFT)))
1544 amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;
1545 break;
1546 default:
1547 return -EINVAL;
1548 }
1549
1550 iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
1551 iommu->mmio_phys_end);
1552 if (!iommu->mmio_base)
1553 return -ENOMEM;
1554
1555 if (alloc_command_buffer(iommu))
1556 return -ENOMEM;
1557
1558 if (alloc_event_buffer(iommu))
1559 return -ENOMEM;
1560
1561 iommu->int_enabled = false;
1562
1563 init_translation_status(iommu);
1564 if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
1565 iommu_disable(iommu);
1566 clear_translation_pre_enabled(iommu);
1567 pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
1568 iommu->index);
1569 }
1570 if (amd_iommu_pre_enabled)
1571 amd_iommu_pre_enabled = translation_pre_enabled(iommu);
1572
1573 ret = init_iommu_from_acpi(iommu, h);
1574 if (ret)
1575 return ret;
1576
1577 ret = amd_iommu_create_irq_domain(iommu);
1578 if (ret)
1579 return ret;
1580
1581
1582
1583
1584
1585 amd_iommu_rlookup_table[iommu->devid] = NULL;
1586
1587 return 0;
1588 }
1589
1590
1591
1592
1593
1594
1595
1596 static u8 get_highest_supported_ivhd_type(struct acpi_table_header *ivrs)
1597 {
1598 u8 *base = (u8 *)ivrs;
1599 struct ivhd_header *ivhd = (struct ivhd_header *)
1600 (base + IVRS_HEADER_LENGTH);
1601 u8 last_type = ivhd->type;
1602 u16 devid = ivhd->devid;
1603
1604 while (((u8 *)ivhd - base < ivrs->length) &&
1605 (ivhd->type <= ACPI_IVHD_TYPE_MAX_SUPPORTED)) {
1606 u8 *p = (u8 *) ivhd;
1607
1608 if (ivhd->devid == devid)
1609 last_type = ivhd->type;
1610 ivhd = (struct ivhd_header *)(p + ivhd->length);
1611 }
1612
1613 return last_type;
1614 }
1615
1616
1617
1618
1619
1620 static int __init init_iommu_all(struct acpi_table_header *table)
1621 {
1622 u8 *p = (u8 *)table, *end = (u8 *)table;
1623 struct ivhd_header *h;
1624 struct amd_iommu *iommu;
1625 int ret;
1626
1627 end += table->length;
1628 p += IVRS_HEADER_LENGTH;
1629
1630 while (p < end) {
1631 h = (struct ivhd_header *)p;
1632 if (*p == amd_iommu_target_ivhd_type) {
1633
1634 DUMP_printk("device: %02x:%02x.%01x cap: %04x "
1635 "seg: %d flags: %01x info %04x\n",
1636 PCI_BUS_NUM(h->devid), PCI_SLOT(h->devid),
1637 PCI_FUNC(h->devid), h->cap_ptr,
1638 h->pci_seg, h->flags, h->info);
1639 DUMP_printk(" mmio-addr: %016llx\n",
1640 h->mmio_phys);
1641
1642 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
1643 if (iommu == NULL)
1644 return -ENOMEM;
1645
1646 ret = init_iommu_one(iommu, h);
1647 if (ret)
1648 return ret;
1649 }
1650 p += h->length;
1651
1652 }
1653 WARN_ON(p != end);
1654
1655 return 0;
1656 }
1657
1658 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
1659 u8 fxn, u64 *value, bool is_write);
1660
1661 static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1662 {
1663 struct pci_dev *pdev = iommu->dev;
1664 u64 val = 0xabcd, val2 = 0, save_reg = 0;
1665
1666 if (!iommu_feature(iommu, FEATURE_PC))
1667 return;
1668
1669 amd_iommu_pc_present = true;
1670
1671
1672 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false))
1673 goto pc_false;
1674
1675
1676 if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) ||
1677 (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) ||
1678 (val != val2))
1679 goto pc_false;
1680
1681
1682 if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true))
1683 goto pc_false;
1684
1685 pci_info(pdev, "IOMMU performance counters supported\n");
1686
1687 val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET);
1688 iommu->max_banks = (u8) ((val >> 12) & 0x3f);
1689 iommu->max_counters = (u8) ((val >> 7) & 0xf);
1690
1691 return;
1692
1693 pc_false:
1694 pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n");
1695 amd_iommu_pc_present = false;
1696 return;
1697 }
1698
1699 static ssize_t amd_iommu_show_cap(struct device *dev,
1700 struct device_attribute *attr,
1701 char *buf)
1702 {
1703 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1704 return sprintf(buf, "%x\n", iommu->cap);
1705 }
1706 static DEVICE_ATTR(cap, S_IRUGO, amd_iommu_show_cap, NULL);
1707
1708 static ssize_t amd_iommu_show_features(struct device *dev,
1709 struct device_attribute *attr,
1710 char *buf)
1711 {
1712 struct amd_iommu *iommu = dev_to_amd_iommu(dev);
1713 return sprintf(buf, "%llx\n", iommu->features);
1714 }
1715 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
1716
1717 static struct attribute *amd_iommu_attrs[] = {
1718 &dev_attr_cap.attr,
1719 &dev_attr_features.attr,
1720 NULL,
1721 };
1722
1723 static struct attribute_group amd_iommu_group = {
1724 .name = "amd-iommu",
1725 .attrs = amd_iommu_attrs,
1726 };
1727
1728 static const struct attribute_group *amd_iommu_groups[] = {
1729 &amd_iommu_group,
1730 NULL,
1731 };
1732
1733 static int __init iommu_init_pci(struct amd_iommu *iommu)
1734 {
1735 int cap_ptr = iommu->cap_ptr;
1736 u32 range, misc, low, high;
1737 int ret;
1738
1739 iommu->dev = pci_get_domain_bus_and_slot(0, PCI_BUS_NUM(iommu->devid),
1740 iommu->devid & 0xff);
1741 if (!iommu->dev)
1742 return -ENODEV;
1743
1744
1745 iommu->dev->match_driver = false;
1746
1747 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET,
1748 &iommu->cap);
1749 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET,
1750 &range);
1751 pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET,
1752 &misc);
1753
1754 if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB)))
1755 amd_iommu_iotlb_sup = false;
1756
1757
1758 low = readl(iommu->mmio_base + MMIO_EXT_FEATURES);
1759 high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4);
1760
1761 iommu->features = ((u64)high << 32) | low;
1762
1763 if (iommu_feature(iommu, FEATURE_GT)) {
1764 int glxval;
1765 u32 max_pasid;
1766 u64 pasmax;
1767
1768 pasmax = iommu->features & FEATURE_PASID_MASK;
1769 pasmax >>= FEATURE_PASID_SHIFT;
1770 max_pasid = (1 << (pasmax + 1)) - 1;
1771
1772 amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
1773
1774 BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
1775
1776 glxval = iommu->features & FEATURE_GLXVAL_MASK;
1777 glxval >>= FEATURE_GLXVAL_SHIFT;
1778
1779 if (amd_iommu_max_glx_val == -1)
1780 amd_iommu_max_glx_val = glxval;
1781 else
1782 amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
1783 }
1784
1785 if (iommu_feature(iommu, FEATURE_GT) &&
1786 iommu_feature(iommu, FEATURE_PPR)) {
1787 iommu->is_iommu_v2 = true;
1788 amd_iommu_v2_present = true;
1789 }
1790
1791 if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
1792 return -ENOMEM;
1793
1794 ret = iommu_init_ga(iommu);
1795 if (ret)
1796 return ret;
1797
1798 if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
1799 amd_iommu_np_cache = true;
1800
1801 init_iommu_perf_ctr(iommu);
1802
1803 if (is_rd890_iommu(iommu->dev)) {
1804 int i, j;
1805
1806 iommu->root_pdev =
1807 pci_get_domain_bus_and_slot(0, iommu->dev->bus->number,
1808 PCI_DEVFN(0, 0));
1809
1810
1811
1812
1813
1814
1815 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4,
1816 &iommu->stored_addr_lo);
1817 pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8,
1818 &iommu->stored_addr_hi);
1819
1820
1821 iommu->stored_addr_lo &= ~1;
1822
1823 for (i = 0; i < 6; i++)
1824 for (j = 0; j < 0x12; j++)
1825 iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j);
1826
1827 for (i = 0; i < 0x83; i++)
1828 iommu->stored_l2[i] = iommu_read_l2(iommu, i);
1829 }
1830
1831 amd_iommu_erratum_746_workaround(iommu);
1832 amd_iommu_ats_write_check_workaround(iommu);
1833
1834 iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
1835 amd_iommu_groups, "ivhd%d", iommu->index);
1836 iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
1837 iommu_device_register(&iommu->iommu);
1838
1839 return pci_enable_device(iommu->dev);
1840 }
1841
1842 static void print_iommu_info(void)
1843 {
1844 static const char * const feat_str[] = {
1845 "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
1846 "IA", "GA", "HE", "PC"
1847 };
1848 struct amd_iommu *iommu;
1849
1850 for_each_iommu(iommu) {
1851 struct pci_dev *pdev = iommu->dev;
1852 int i;
1853
1854 pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr);
1855
1856 if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
1857 pci_info(pdev, "Extended features (%#llx):\n",
1858 iommu->features);
1859 for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
1860 if (iommu_feature(iommu, (1ULL << i)))
1861 pr_cont(" %s", feat_str[i]);
1862 }
1863
1864 if (iommu->features & FEATURE_GAM_VAPIC)
1865 pr_cont(" GA_vAPIC");
1866
1867 pr_cont("\n");
1868 }
1869 }
1870 if (irq_remapping_enabled) {
1871 pr_info("Interrupt remapping enabled\n");
1872 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
1873 pr_info("Virtual APIC enabled\n");
1874 if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
1875 pr_info("X2APIC enabled\n");
1876 }
1877 }
1878
1879 static int __init amd_iommu_init_pci(void)
1880 {
1881 struct amd_iommu *iommu;
1882 int ret = 0;
1883
1884 for_each_iommu(iommu) {
1885 ret = iommu_init_pci(iommu);
1886 if (ret)
1887 break;
1888 }
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900 ret = amd_iommu_init_api();
1901
1902 init_device_table_dma();
1903
1904 for_each_iommu(iommu)
1905 iommu_flush_all_caches(iommu);
1906
1907 if (!ret)
1908 print_iommu_info();
1909
1910 return ret;
1911 }
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922 static int iommu_setup_msi(struct amd_iommu *iommu)
1923 {
1924 int r;
1925
1926 r = pci_enable_msi(iommu->dev);
1927 if (r)
1928 return r;
1929
1930 r = request_threaded_irq(iommu->dev->irq,
1931 amd_iommu_int_handler,
1932 amd_iommu_int_thread,
1933 0, "AMD-Vi",
1934 iommu);
1935
1936 if (r) {
1937 pci_disable_msi(iommu->dev);
1938 return r;
1939 }
1940
1941 iommu->int_enabled = true;
1942
1943 return 0;
1944 }
1945
1946 #define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
1947 #define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
1948 #define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
1949 #define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
1950
1951
1952
1953
1954
1955
1956 static void iommu_update_intcapxt(struct amd_iommu *iommu)
1957 {
1958 u64 val;
1959 u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
1960 u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
1961 u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
1962 bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
1963 u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
1964
1965 if (x2apic_enabled())
1966 dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
1967
1968 val = XT_INT_VEC(data & 0xFF) |
1969 XT_INT_DEST_MODE(dm) |
1970 XT_INT_DEST_LO(dest) |
1971 XT_INT_DEST_HI(dest);
1972
1973
1974
1975
1976
1977 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
1978 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
1979 writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
1980 }
1981
1982 static void _irq_notifier_notify(struct irq_affinity_notify *notify,
1983 const cpumask_t *mask)
1984 {
1985 struct amd_iommu *iommu;
1986
1987 for_each_iommu(iommu) {
1988 if (iommu->dev->irq == notify->irq) {
1989 iommu_update_intcapxt(iommu);
1990 break;
1991 }
1992 }
1993 }
1994
1995 static void _irq_notifier_release(struct kref *ref)
1996 {
1997 }
1998
1999 static int iommu_init_intcapxt(struct amd_iommu *iommu)
2000 {
2001 int ret;
2002 struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
2003
2004
2005
2006
2007
2008 if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
2009 return 0;
2010
2011
2012
2013
2014
2015 notify->irq = iommu->dev->irq;
2016 notify->notify = _irq_notifier_notify,
2017 notify->release = _irq_notifier_release,
2018 ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
2019 if (ret) {
2020 pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
2021 iommu->devid, iommu->dev->irq);
2022 return ret;
2023 }
2024
2025 iommu_update_intcapxt(iommu);
2026 iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
2027 return ret;
2028 }
2029
2030 static int iommu_init_msi(struct amd_iommu *iommu)
2031 {
2032 int ret;
2033
2034 if (iommu->int_enabled)
2035 goto enable_faults;
2036
2037 if (iommu->dev->msi_cap)
2038 ret = iommu_setup_msi(iommu);
2039 else
2040 ret = -ENODEV;
2041
2042 if (ret)
2043 return ret;
2044
2045 enable_faults:
2046 ret = iommu_init_intcapxt(iommu);
2047 if (ret)
2048 return ret;
2049
2050 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
2051
2052 if (iommu->ppr_log != NULL)
2053 iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
2054
2055 iommu_ga_log_enable(iommu);
2056
2057 return 0;
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068 static void __init free_unity_maps(void)
2069 {
2070 struct unity_map_entry *entry, *next;
2071
2072 list_for_each_entry_safe(entry, next, &amd_iommu_unity_map, list) {
2073 list_del(&entry->list);
2074 kfree(entry);
2075 }
2076 }
2077
2078
2079 static int __init init_exclusion_range(struct ivmd_header *m)
2080 {
2081 int i;
2082
2083 switch (m->type) {
2084 case ACPI_IVMD_TYPE:
2085 set_device_exclusion_range(m->devid, m);
2086 break;
2087 case ACPI_IVMD_TYPE_ALL:
2088 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2089 set_device_exclusion_range(i, m);
2090 break;
2091 case ACPI_IVMD_TYPE_RANGE:
2092 for (i = m->devid; i <= m->aux; ++i)
2093 set_device_exclusion_range(i, m);
2094 break;
2095 default:
2096 break;
2097 }
2098
2099 return 0;
2100 }
2101
2102
2103 static int __init init_unity_map_range(struct ivmd_header *m)
2104 {
2105 struct unity_map_entry *e = NULL;
2106 char *s;
2107
2108 e = kzalloc(sizeof(*e), GFP_KERNEL);
2109 if (e == NULL)
2110 return -ENOMEM;
2111
2112 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2113 init_exclusion_range(m);
2114
2115 switch (m->type) {
2116 default:
2117 kfree(e);
2118 return 0;
2119 case ACPI_IVMD_TYPE:
2120 s = "IVMD_TYPEi\t\t\t";
2121 e->devid_start = e->devid_end = m->devid;
2122 break;
2123 case ACPI_IVMD_TYPE_ALL:
2124 s = "IVMD_TYPE_ALL\t\t";
2125 e->devid_start = 0;
2126 e->devid_end = amd_iommu_last_bdf;
2127 break;
2128 case ACPI_IVMD_TYPE_RANGE:
2129 s = "IVMD_TYPE_RANGE\t\t";
2130 e->devid_start = m->devid;
2131 e->devid_end = m->aux;
2132 break;
2133 }
2134 e->address_start = PAGE_ALIGN(m->range_start);
2135 e->address_end = e->address_start + PAGE_ALIGN(m->range_length);
2136 e->prot = m->flags >> 1;
2137
2138 DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x"
2139 " range_start: %016llx range_end: %016llx flags: %x\n", s,
2140 PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start),
2141 PCI_FUNC(e->devid_start), PCI_BUS_NUM(e->devid_end),
2142 PCI_SLOT(e->devid_end), PCI_FUNC(e->devid_end),
2143 e->address_start, e->address_end, m->flags);
2144
2145 list_add_tail(&e->list, &amd_iommu_unity_map);
2146
2147 return 0;
2148 }
2149
2150
2151 static int __init init_memory_definitions(struct acpi_table_header *table)
2152 {
2153 u8 *p = (u8 *)table, *end = (u8 *)table;
2154 struct ivmd_header *m;
2155
2156 end += table->length;
2157 p += IVRS_HEADER_LENGTH;
2158
2159 while (p < end) {
2160 m = (struct ivmd_header *)p;
2161 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2162 init_unity_map_range(m);
2163
2164 p += m->length;
2165 }
2166
2167 return 0;
2168 }
2169
2170
2171
2172
2173 static void init_device_table_dma(void)
2174 {
2175 u32 devid;
2176
2177 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2178 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
2179 set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION);
2180 }
2181 }
2182
2183 static void __init uninit_device_table_dma(void)
2184 {
2185 u32 devid;
2186
2187 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
2188 amd_iommu_dev_table[devid].data[0] = 0ULL;
2189 amd_iommu_dev_table[devid].data[1] = 0ULL;
2190 }
2191 }
2192
2193 static void init_device_table(void)
2194 {
2195 u32 devid;
2196
2197 if (!amd_iommu_irq_remap)
2198 return;
2199
2200 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid)
2201 set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN);
2202 }
2203
2204 static void iommu_init_flags(struct amd_iommu *iommu)
2205 {
2206 iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ?
2207 iommu_feature_enable(iommu, CONTROL_HT_TUN_EN) :
2208 iommu_feature_disable(iommu, CONTROL_HT_TUN_EN);
2209
2210 iommu->acpi_flags & IVHD_FLAG_PASSPW_EN_MASK ?
2211 iommu_feature_enable(iommu, CONTROL_PASSPW_EN) :
2212 iommu_feature_disable(iommu, CONTROL_PASSPW_EN);
2213
2214 iommu->acpi_flags & IVHD_FLAG_RESPASSPW_EN_MASK ?
2215 iommu_feature_enable(iommu, CONTROL_RESPASSPW_EN) :
2216 iommu_feature_disable(iommu, CONTROL_RESPASSPW_EN);
2217
2218 iommu->acpi_flags & IVHD_FLAG_ISOC_EN_MASK ?
2219 iommu_feature_enable(iommu, CONTROL_ISOC_EN) :
2220 iommu_feature_disable(iommu, CONTROL_ISOC_EN);
2221
2222
2223
2224
2225 iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
2226
2227
2228 iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
2229 }
2230
2231 static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
2232 {
2233 int i, j;
2234 u32 ioc_feature_control;
2235 struct pci_dev *pdev = iommu->root_pdev;
2236
2237
2238 if (!is_rd890_iommu(iommu->dev) || !pdev)
2239 return;
2240
2241
2242
2243
2244
2245
2246
2247 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
2248 pci_read_config_dword(pdev, 0x64, &ioc_feature_control);
2249
2250
2251 if (!(ioc_feature_control & 0x1))
2252 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
2253
2254
2255 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2256 iommu->stored_addr_lo);
2257 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 8,
2258 iommu->stored_addr_hi);
2259
2260
2261 for (i = 0; i < 6; i++)
2262 for (j = 0; j < 0x12; j++)
2263 iommu_write_l1(iommu, i, j, iommu->stored_l1[i][j]);
2264
2265
2266 for (i = 0; i < 0x83; i++)
2267 iommu_write_l2(iommu, i, iommu->stored_l2[i]);
2268
2269
2270 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
2271 iommu->stored_addr_lo | 1);
2272 }
2273
2274 static void iommu_enable_ga(struct amd_iommu *iommu)
2275 {
2276 #ifdef CONFIG_IRQ_REMAP
2277 switch (amd_iommu_guest_ir) {
2278 case AMD_IOMMU_GUEST_IR_VAPIC:
2279 iommu_feature_enable(iommu, CONTROL_GAM_EN);
2280
2281 case AMD_IOMMU_GUEST_IR_LEGACY_GA:
2282 iommu_feature_enable(iommu, CONTROL_GA_EN);
2283 iommu->irte_ops = &irte_128_ops;
2284 break;
2285 default:
2286 iommu->irte_ops = &irte_32_ops;
2287 break;
2288 }
2289 #endif
2290 }
2291
2292 static void early_enable_iommu(struct amd_iommu *iommu)
2293 {
2294 iommu_disable(iommu);
2295 iommu_init_flags(iommu);
2296 iommu_set_device_table(iommu);
2297 iommu_enable_command_buffer(iommu);
2298 iommu_enable_event_buffer(iommu);
2299 iommu_set_exclusion_range(iommu);
2300 iommu_enable_ga(iommu);
2301 iommu_enable_xt(iommu);
2302 iommu_enable(iommu);
2303 iommu_flush_all_caches(iommu);
2304 }
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314 static void early_enable_iommus(void)
2315 {
2316 struct amd_iommu *iommu;
2317
2318
2319 if (!copy_device_table()) {
2320
2321
2322
2323
2324
2325 if (amd_iommu_pre_enabled)
2326 pr_err("Failed to copy DEV table from previous kernel.\n");
2327 if (old_dev_tbl_cpy != NULL)
2328 free_pages((unsigned long)old_dev_tbl_cpy,
2329 get_order(dev_table_size));
2330
2331 for_each_iommu(iommu) {
2332 clear_translation_pre_enabled(iommu);
2333 early_enable_iommu(iommu);
2334 }
2335 } else {
2336 pr_info("Copied DEV table from previous kernel.\n");
2337 free_pages((unsigned long)amd_iommu_dev_table,
2338 get_order(dev_table_size));
2339 amd_iommu_dev_table = old_dev_tbl_cpy;
2340 for_each_iommu(iommu) {
2341 iommu_disable_command_buffer(iommu);
2342 iommu_disable_event_buffer(iommu);
2343 iommu_enable_command_buffer(iommu);
2344 iommu_enable_event_buffer(iommu);
2345 iommu_enable_ga(iommu);
2346 iommu_enable_xt(iommu);
2347 iommu_set_device_table(iommu);
2348 iommu_flush_all_caches(iommu);
2349 }
2350 }
2351
2352 #ifdef CONFIG_IRQ_REMAP
2353 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2354 amd_iommu_irq_ops.capability |= (1 << IRQ_POSTING_CAP);
2355 #endif
2356 }
2357
2358 static void enable_iommus_v2(void)
2359 {
2360 struct amd_iommu *iommu;
2361
2362 for_each_iommu(iommu) {
2363 iommu_enable_ppr_log(iommu);
2364 iommu_enable_gt(iommu);
2365 }
2366 }
2367
2368 static void enable_iommus(void)
2369 {
2370 early_enable_iommus();
2371
2372 enable_iommus_v2();
2373 }
2374
2375 static void disable_iommus(void)
2376 {
2377 struct amd_iommu *iommu;
2378
2379 for_each_iommu(iommu)
2380 iommu_disable(iommu);
2381
2382 #ifdef CONFIG_IRQ_REMAP
2383 if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
2384 amd_iommu_irq_ops.capability &= ~(1 << IRQ_POSTING_CAP);
2385 #endif
2386 }
2387
2388
2389
2390
2391
2392
2393 static void amd_iommu_resume(void)
2394 {
2395 struct amd_iommu *iommu;
2396
2397 for_each_iommu(iommu)
2398 iommu_apply_resume_quirks(iommu);
2399
2400
2401 enable_iommus();
2402
2403 amd_iommu_enable_interrupts();
2404 }
2405
2406 static int amd_iommu_suspend(void)
2407 {
2408
2409 disable_iommus();
2410
2411 return 0;
2412 }
2413
2414 static struct syscore_ops amd_iommu_syscore_ops = {
2415 .suspend = amd_iommu_suspend,
2416 .resume = amd_iommu_resume,
2417 };
2418
2419 static void __init free_iommu_resources(void)
2420 {
2421 kmemleak_free(irq_lookup_table);
2422 free_pages((unsigned long)irq_lookup_table,
2423 get_order(rlookup_table_size));
2424 irq_lookup_table = NULL;
2425
2426 kmem_cache_destroy(amd_iommu_irq_cache);
2427 amd_iommu_irq_cache = NULL;
2428
2429 free_pages((unsigned long)amd_iommu_rlookup_table,
2430 get_order(rlookup_table_size));
2431 amd_iommu_rlookup_table = NULL;
2432
2433 free_pages((unsigned long)amd_iommu_alias_table,
2434 get_order(alias_table_size));
2435 amd_iommu_alias_table = NULL;
2436
2437 free_pages((unsigned long)amd_iommu_dev_table,
2438 get_order(dev_table_size));
2439 amd_iommu_dev_table = NULL;
2440
2441 free_iommu_all();
2442 }
2443
2444
2445 #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0))
2446
2447 static bool __init check_ioapic_information(void)
2448 {
2449 const char *fw_bug = FW_BUG;
2450 bool ret, has_sb_ioapic;
2451 int idx;
2452
2453 has_sb_ioapic = false;
2454 ret = false;
2455
2456
2457
2458
2459
2460
2461 if (cmdline_maps)
2462 fw_bug = "";
2463
2464 for (idx = 0; idx < nr_ioapics; idx++) {
2465 int devid, id = mpc_ioapic_id(idx);
2466
2467 devid = get_ioapic_devid(id);
2468 if (devid < 0) {
2469 pr_err("%s: IOAPIC[%d] not in IVRS table\n",
2470 fw_bug, id);
2471 ret = false;
2472 } else if (devid == IOAPIC_SB_DEVID) {
2473 has_sb_ioapic = true;
2474 ret = true;
2475 }
2476 }
2477
2478 if (!has_sb_ioapic) {
2479
2480
2481
2482
2483
2484
2485
2486
2487 pr_err("%s: No southbridge IOAPIC found\n", fw_bug);
2488 }
2489
2490 if (!ret)
2491 pr_err("Disabling interrupt remapping\n");
2492
2493 return ret;
2494 }
2495
2496 static void __init free_dma_resources(void)
2497 {
2498 free_pages((unsigned long)amd_iommu_pd_alloc_bitmap,
2499 get_order(MAX_DOMAIN_ID/8));
2500 amd_iommu_pd_alloc_bitmap = NULL;
2501
2502 free_unity_maps();
2503 }
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532 static int __init early_amd_iommu_init(void)
2533 {
2534 struct acpi_table_header *ivrs_base;
2535 acpi_status status;
2536 int i, remap_cache_sz, ret = 0;
2537 u32 pci_id;
2538
2539 if (!amd_iommu_detected)
2540 return -ENODEV;
2541
2542 status = acpi_get_table("IVRS", 0, &ivrs_base);
2543 if (status == AE_NOT_FOUND)
2544 return -ENODEV;
2545 else if (ACPI_FAILURE(status)) {
2546 const char *err = acpi_format_exception(status);
2547 pr_err("IVRS table error: %s\n", err);
2548 return -EINVAL;
2549 }
2550
2551
2552
2553
2554
2555 ret = check_ivrs_checksum(ivrs_base);
2556 if (ret)
2557 goto out;
2558
2559 amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
2560 DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
2561
2562
2563
2564
2565
2566
2567 ret = find_last_devid_acpi(ivrs_base);
2568 if (ret)
2569 goto out;
2570
2571 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
2572 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
2573 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
2574
2575
2576 ret = -ENOMEM;
2577 amd_iommu_dev_table = (void *)__get_free_pages(
2578 GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
2579 get_order(dev_table_size));
2580 if (amd_iommu_dev_table == NULL)
2581 goto out;
2582
2583
2584
2585
2586
2587 amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
2588 get_order(alias_table_size));
2589 if (amd_iommu_alias_table == NULL)
2590 goto out;
2591
2592
2593 amd_iommu_rlookup_table = (void *)__get_free_pages(
2594 GFP_KERNEL | __GFP_ZERO,
2595 get_order(rlookup_table_size));
2596 if (amd_iommu_rlookup_table == NULL)
2597 goto out;
2598
2599 amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
2600 GFP_KERNEL | __GFP_ZERO,
2601 get_order(MAX_DOMAIN_ID/8));
2602 if (amd_iommu_pd_alloc_bitmap == NULL)
2603 goto out;
2604
2605
2606
2607
2608 for (i = 0; i <= amd_iommu_last_bdf; ++i)
2609 amd_iommu_alias_table[i] = i;
2610
2611
2612
2613
2614
2615 __set_bit(0, amd_iommu_pd_alloc_bitmap);
2616
2617
2618
2619
2620
2621 ret = init_iommu_all(ivrs_base);
2622 if (ret)
2623 goto out;
2624
2625
2626 for (i = 0; i < 32; i++) {
2627 pci_id = read_pci_config(0, i, 0, 0);
2628 if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
2629 pr_info("Disable IOMMU on Stoney Ridge\n");
2630 amd_iommu_disabled = true;
2631 break;
2632 }
2633 }
2634
2635
2636 if (!is_kdump_kernel() || amd_iommu_disabled)
2637 disable_iommus();
2638
2639 if (amd_iommu_irq_remap)
2640 amd_iommu_irq_remap = check_ioapic_information();
2641
2642 if (amd_iommu_irq_remap) {
2643
2644
2645
2646
2647 ret = -ENOMEM;
2648 if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
2649 remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
2650 else
2651 remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
2652 amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
2653 remap_cache_sz,
2654 IRQ_TABLE_ALIGNMENT,
2655 0, NULL);
2656 if (!amd_iommu_irq_cache)
2657 goto out;
2658
2659 irq_lookup_table = (void *)__get_free_pages(
2660 GFP_KERNEL | __GFP_ZERO,
2661 get_order(rlookup_table_size));
2662 kmemleak_alloc(irq_lookup_table, rlookup_table_size,
2663 1, GFP_KERNEL);
2664 if (!irq_lookup_table)
2665 goto out;
2666 }
2667
2668 ret = init_memory_definitions(ivrs_base);
2669 if (ret)
2670 goto out;
2671
2672
2673 init_device_table();
2674
2675 out:
2676
2677 acpi_put_table(ivrs_base);
2678 ivrs_base = NULL;
2679
2680 return ret;
2681 }
2682
2683 static int amd_iommu_enable_interrupts(void)
2684 {
2685 struct amd_iommu *iommu;
2686 int ret = 0;
2687
2688 for_each_iommu(iommu) {
2689 ret = iommu_init_msi(iommu);
2690 if (ret)
2691 goto out;
2692 }
2693
2694 out:
2695 return ret;
2696 }
2697
2698 static bool detect_ivrs(void)
2699 {
2700 struct acpi_table_header *ivrs_base;
2701 acpi_status status;
2702
2703 status = acpi_get_table("IVRS", 0, &ivrs_base);
2704 if (status == AE_NOT_FOUND)
2705 return false;
2706 else if (ACPI_FAILURE(status)) {
2707 const char *err = acpi_format_exception(status);
2708 pr_err("IVRS table error: %s\n", err);
2709 return false;
2710 }
2711
2712 acpi_put_table(ivrs_base);
2713
2714
2715 pci_request_acs();
2716
2717 return true;
2718 }
2719
2720
2721
2722
2723
2724
2725
2726 static int __init state_next(void)
2727 {
2728 int ret = 0;
2729
2730 switch (init_state) {
2731 case IOMMU_START_STATE:
2732 if (!detect_ivrs()) {
2733 init_state = IOMMU_NOT_FOUND;
2734 ret = -ENODEV;
2735 } else {
2736 init_state = IOMMU_IVRS_DETECTED;
2737 }
2738 break;
2739 case IOMMU_IVRS_DETECTED:
2740 ret = early_amd_iommu_init();
2741 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
2742 if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
2743 pr_info("AMD IOMMU disabled\n");
2744 init_state = IOMMU_CMDLINE_DISABLED;
2745 ret = -EINVAL;
2746 }
2747 break;
2748 case IOMMU_ACPI_FINISHED:
2749 early_enable_iommus();
2750 x86_platform.iommu_shutdown = disable_iommus;
2751 init_state = IOMMU_ENABLED;
2752 break;
2753 case IOMMU_ENABLED:
2754 register_syscore_ops(&amd_iommu_syscore_ops);
2755 ret = amd_iommu_init_pci();
2756 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_PCI_INIT;
2757 enable_iommus_v2();
2758 break;
2759 case IOMMU_PCI_INIT:
2760 ret = amd_iommu_enable_interrupts();
2761 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2762 break;
2763 case IOMMU_INTERRUPTS_EN:
2764 ret = amd_iommu_init_dma_ops();
2765 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2766 break;
2767 case IOMMU_DMA_OPS:
2768 init_state = IOMMU_INITIALIZED;
2769 break;
2770 case IOMMU_INITIALIZED:
2771
2772 break;
2773 case IOMMU_NOT_FOUND:
2774 case IOMMU_INIT_ERROR:
2775 case IOMMU_CMDLINE_DISABLED:
2776
2777 ret = -EINVAL;
2778 break;
2779 default:
2780
2781 BUG();
2782 }
2783
2784 if (ret) {
2785 free_dma_resources();
2786 if (!irq_remapping_enabled) {
2787 disable_iommus();
2788 free_iommu_resources();
2789 } else {
2790 struct amd_iommu *iommu;
2791
2792 uninit_device_table_dma();
2793 for_each_iommu(iommu)
2794 iommu_flush_all_caches(iommu);
2795 }
2796 }
2797 return ret;
2798 }
2799
2800 static int __init iommu_go_to_state(enum iommu_init_state state)
2801 {
2802 int ret = -EINVAL;
2803
2804 while (init_state != state) {
2805 if (init_state == IOMMU_NOT_FOUND ||
2806 init_state == IOMMU_INIT_ERROR ||
2807 init_state == IOMMU_CMDLINE_DISABLED)
2808 break;
2809 ret = state_next();
2810 }
2811
2812 return ret;
2813 }
2814
2815 #ifdef CONFIG_IRQ_REMAP
2816 int __init amd_iommu_prepare(void)
2817 {
2818 int ret;
2819
2820 amd_iommu_irq_remap = true;
2821
2822 ret = iommu_go_to_state(IOMMU_ACPI_FINISHED);
2823 if (ret)
2824 return ret;
2825 return amd_iommu_irq_remap ? 0 : -ENODEV;
2826 }
2827
2828 int __init amd_iommu_enable(void)
2829 {
2830 int ret;
2831
2832 ret = iommu_go_to_state(IOMMU_ENABLED);
2833 if (ret)
2834 return ret;
2835
2836 irq_remapping_enabled = 1;
2837 return amd_iommu_xt_mode;
2838 }
2839
2840 void amd_iommu_disable(void)
2841 {
2842 amd_iommu_suspend();
2843 }
2844
2845 int amd_iommu_reenable(int mode)
2846 {
2847 amd_iommu_resume();
2848
2849 return 0;
2850 }
2851
2852 int __init amd_iommu_enable_faulting(void)
2853 {
2854
2855 return 0;
2856 }
2857 #endif
2858
2859
2860
2861
2862
2863
2864 static int __init amd_iommu_init(void)
2865 {
2866 struct amd_iommu *iommu;
2867 int ret;
2868
2869 ret = iommu_go_to_state(IOMMU_INITIALIZED);
2870 #ifdef CONFIG_GART_IOMMU
2871 if (ret && list_empty(&amd_iommu_list)) {
2872
2873
2874
2875
2876 gart_iommu_init();
2877 }
2878 #endif
2879
2880 for_each_iommu(iommu)
2881 amd_iommu_debugfs_setup(iommu);
2882
2883 return ret;
2884 }
2885
2886 static bool amd_iommu_sme_check(void)
2887 {
2888 if (!sme_active() || (boot_cpu_data.x86 != 0x17))
2889 return true;
2890
2891
2892 if (boot_cpu_data.microcode >= 0x08001205)
2893 return true;
2894
2895 if ((boot_cpu_data.microcode >= 0x08001126) &&
2896 (boot_cpu_data.microcode <= 0x080011ff))
2897 return true;
2898
2899 pr_notice("IOMMU not currently supported when SME is active\n");
2900
2901 return false;
2902 }
2903
2904
2905
2906
2907
2908
2909
2910
2911 int __init amd_iommu_detect(void)
2912 {
2913 int ret;
2914
2915 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
2916 return -ENODEV;
2917
2918 if (!amd_iommu_sme_check())
2919 return -ENODEV;
2920
2921 ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
2922 if (ret)
2923 return ret;
2924
2925 amd_iommu_detected = true;
2926 iommu_detected = 1;
2927 x86_init.iommu.iommu_init = amd_iommu_init;
2928
2929 return 1;
2930 }
2931
2932
2933
2934
2935
2936
2937
2938
2939 static int __init parse_amd_iommu_dump(char *str)
2940 {
2941 amd_iommu_dump = true;
2942
2943 return 1;
2944 }
2945
2946 static int __init parse_amd_iommu_intr(char *str)
2947 {
2948 for (; *str; ++str) {
2949 if (strncmp(str, "legacy", 6) == 0) {
2950 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
2951 break;
2952 }
2953 if (strncmp(str, "vapic", 5) == 0) {
2954 amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
2955 break;
2956 }
2957 }
2958 return 1;
2959 }
2960
2961 static int __init parse_amd_iommu_options(char *str)
2962 {
2963 for (; *str; ++str) {
2964 if (strncmp(str, "fullflush", 9) == 0)
2965 amd_iommu_unmap_flush = true;
2966 if (strncmp(str, "off", 3) == 0)
2967 amd_iommu_disabled = true;
2968 if (strncmp(str, "force_isolation", 15) == 0)
2969 amd_iommu_force_isolation = true;
2970 }
2971
2972 return 1;
2973 }
2974
2975 static int __init parse_ivrs_ioapic(char *str)
2976 {
2977 unsigned int bus, dev, fn;
2978 int ret, id, i;
2979 u16 devid;
2980
2981 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
2982
2983 if (ret != 4) {
2984 pr_err("Invalid command line: ivrs_ioapic%s\n", str);
2985 return 1;
2986 }
2987
2988 if (early_ioapic_map_size == EARLY_MAP_SIZE) {
2989 pr_err("Early IOAPIC map overflow - ignoring ivrs_ioapic%s\n",
2990 str);
2991 return 1;
2992 }
2993
2994 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
2995
2996 cmdline_maps = true;
2997 i = early_ioapic_map_size++;
2998 early_ioapic_map[i].id = id;
2999 early_ioapic_map[i].devid = devid;
3000 early_ioapic_map[i].cmd_line = true;
3001
3002 return 1;
3003 }
3004
3005 static int __init parse_ivrs_hpet(char *str)
3006 {
3007 unsigned int bus, dev, fn;
3008 int ret, id, i;
3009 u16 devid;
3010
3011 ret = sscanf(str, "[%d]=%x:%x.%x", &id, &bus, &dev, &fn);
3012
3013 if (ret != 4) {
3014 pr_err("Invalid command line: ivrs_hpet%s\n", str);
3015 return 1;
3016 }
3017
3018 if (early_hpet_map_size == EARLY_MAP_SIZE) {
3019 pr_err("Early HPET map overflow - ignoring ivrs_hpet%s\n",
3020 str);
3021 return 1;
3022 }
3023
3024 devid = ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3025
3026 cmdline_maps = true;
3027 i = early_hpet_map_size++;
3028 early_hpet_map[i].id = id;
3029 early_hpet_map[i].devid = devid;
3030 early_hpet_map[i].cmd_line = true;
3031
3032 return 1;
3033 }
3034
3035 static int __init parse_ivrs_acpihid(char *str)
3036 {
3037 u32 bus, dev, fn;
3038 char *hid, *uid, *p;
3039 char acpiid[ACPIHID_UID_LEN + ACPIHID_HID_LEN] = {0};
3040 int ret, i;
3041
3042 ret = sscanf(str, "[%x:%x.%x]=%s", &bus, &dev, &fn, acpiid);
3043 if (ret != 4) {
3044 pr_err("Invalid command line: ivrs_acpihid(%s)\n", str);
3045 return 1;
3046 }
3047
3048 p = acpiid;
3049 hid = strsep(&p, ":");
3050 uid = p;
3051
3052 if (!hid || !(*hid) || !uid) {
3053 pr_err("Invalid command line: hid or uid\n");
3054 return 1;
3055 }
3056
3057 i = early_acpihid_map_size++;
3058 memcpy(early_acpihid_map[i].hid, hid, strlen(hid));
3059 memcpy(early_acpihid_map[i].uid, uid, strlen(uid));
3060 early_acpihid_map[i].devid =
3061 ((bus & 0xff) << 8) | ((dev & 0x1f) << 3) | (fn & 0x7);
3062 early_acpihid_map[i].cmd_line = true;
3063
3064 return 1;
3065 }
3066
3067 __setup("amd_iommu_dump", parse_amd_iommu_dump);
3068 __setup("amd_iommu=", parse_amd_iommu_options);
3069 __setup("amd_iommu_intr=", parse_amd_iommu_intr);
3070 __setup("ivrs_ioapic", parse_ivrs_ioapic);
3071 __setup("ivrs_hpet", parse_ivrs_hpet);
3072 __setup("ivrs_acpihid", parse_ivrs_acpihid);
3073
3074 IOMMU_INIT_FINISH(amd_iommu_detect,
3075 gart_iommu_hole_init,
3076 NULL,
3077 NULL);
3078
3079 bool amd_iommu_v2_supported(void)
3080 {
3081 return amd_iommu_v2_present;
3082 }
3083 EXPORT_SYMBOL(amd_iommu_v2_supported);
3084
3085 struct amd_iommu *get_amd_iommu(unsigned int idx)
3086 {
3087 unsigned int i = 0;
3088 struct amd_iommu *iommu;
3089
3090 for_each_iommu(iommu)
3091 if (i++ == idx)
3092 return iommu;
3093 return NULL;
3094 }
3095 EXPORT_SYMBOL(get_amd_iommu);
3096
3097
3098
3099
3100
3101
3102
3103
3104 u8 amd_iommu_pc_get_max_banks(unsigned int idx)
3105 {
3106 struct amd_iommu *iommu = get_amd_iommu(idx);
3107
3108 if (iommu)
3109 return iommu->max_banks;
3110
3111 return 0;
3112 }
3113 EXPORT_SYMBOL(amd_iommu_pc_get_max_banks);
3114
3115 bool amd_iommu_pc_supported(void)
3116 {
3117 return amd_iommu_pc_present;
3118 }
3119 EXPORT_SYMBOL(amd_iommu_pc_supported);
3120
3121 u8 amd_iommu_pc_get_max_counters(unsigned int idx)
3122 {
3123 struct amd_iommu *iommu = get_amd_iommu(idx);
3124
3125 if (iommu)
3126 return iommu->max_counters;
3127
3128 return 0;
3129 }
3130 EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
3131
3132 static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
3133 u8 fxn, u64 *value, bool is_write)
3134 {
3135 u32 offset;
3136 u32 max_offset_lim;
3137
3138
3139 if (!amd_iommu_pc_present)
3140 return -ENODEV;
3141
3142
3143 if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7)))
3144 return -ENODEV;
3145
3146 offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn);
3147
3148
3149 max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) |
3150 (iommu->max_counters << 8) | 0x28);
3151 if ((offset < MMIO_CNTR_REG_OFFSET) ||
3152 (offset > max_offset_lim))
3153 return -EINVAL;
3154
3155 if (is_write) {
3156 u64 val = *value & GENMASK_ULL(47, 0);
3157
3158 writel((u32)val, iommu->mmio_base + offset);
3159 writel((val >> 32), iommu->mmio_base + offset + 4);
3160 } else {
3161 *value = readl(iommu->mmio_base + offset + 4);
3162 *value <<= 32;
3163 *value |= readl(iommu->mmio_base + offset);
3164 *value &= GENMASK_ULL(47, 0);
3165 }
3166
3167 return 0;
3168 }
3169
3170 int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3171 {
3172 if (!iommu)
3173 return -EINVAL;
3174
3175 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false);
3176 }
3177 EXPORT_SYMBOL(amd_iommu_pc_get_reg);
3178
3179 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value)
3180 {
3181 if (!iommu)
3182 return -EINVAL;
3183
3184 return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true);
3185 }
3186 EXPORT_SYMBOL(amd_iommu_pc_set_reg);