1#ifndef _ASM_X86_IOMMU_TABLE_H
2#define _ASM_X86_IOMMU_TABLE_H
3
4#include <asm/swiotlb.h>
5
6/*
7 * History lesson:
8 * The execution chain of IOMMUs in 2.6.36 looks as so:
9 *
10 *            [xen-swiotlb]
11 *                 |
12 *         +----[swiotlb *]--+
13 *        /         |         \
14 *       /          |          \
15 *    [GART]     [Calgary]  [Intel VT-d]
16 *     /
17 *    /
18 * [AMD-Vi]
19 *
20 * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
21 * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
22 * Also it would surreptitiously initialize set the swiotlb=1 if there were
23 * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
24 * flag would be turned off by all IOMMUs except the Calgary one.
25 *
26 * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
27 * to be built by defining who we depend on.
28 *
29 * And all that needs to be done is to use one of the macros in the IOMMU
30 * and the pci-dma.c will take care of the rest.
31 */
32
33struct iommu_table_entry {
34	initcall_t	detect;
35	initcall_t	depend;
36	void		(*early_init)(void); /* No memory allocate available. */
37	void		(*late_init)(void); /* Yes, can allocate memory. */
38#define IOMMU_FINISH_IF_DETECTED (1<<0)
39#define IOMMU_DETECTED		 (1<<1)
40	int		flags;
41};
42/*
43 * Macro fills out an entry in the .iommu_table that is equivalent
44 * to the fields that 'struct iommu_table_entry' has. The entries
45 * that are put in the .iommu_table section are not put in any order
46 * hence during boot-time we will have to resort them based on
47 * dependency. */
48
49
50#define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
51	static const struct iommu_table_entry				\
52		__iommu_entry_##_detect __used				\
53	__attribute__ ((unused, __section__(".iommu_table"),		\
54			aligned((sizeof(void *)))))	\
55	= {_detect, _depend, _early_init, _late_init,			\
56	   _finish ? IOMMU_FINISH_IF_DETECTED : 0}
57/*
58 * The simplest IOMMU definition. Provide the detection routine
59 * and it will be run after the SWIOTLB and the other IOMMUs
60 * that utilize this macro. If the IOMMU is detected (ie, the
61 * detect routine returns a positive value), the other IOMMUs
62 * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer
63 * to stop detecting the other IOMMUs after yours has been detected.
64 */
65#define IOMMU_INIT_POST(_detect)					\
66	__IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 0)
67
68#define IOMMU_INIT_POST_FINISH(detect)					\
69	__IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 1)
70
71/*
72 * A more sophisticated version of IOMMU_INIT. This variant requires:
73 *  a). A detection routine function.
74 *  b). The name of the detection routine we depend on to get called
75 *      before us.
76 *  c). The init routine which gets called if the detection routine
77 *      returns a positive value from the pci_iommu_alloc. This means
78 *      no presence of a memory allocator.
79 *  d). Similar to the 'init', except that this gets called from pci_iommu_init
80 *      where we do have a memory allocator.
81 *
82 * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
83 * in that the former will continue detecting other IOMMUs in the call
84 * list after the detection routine returns a positive number, while the
85 * latter will stop the execution chain upon first successful detection.
86 * Both variants will still call the 'init' and 'late_init' functions if
87 * they are set.
88 */
89#define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init)		\
90	__IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
91
92#define IOMMU_INIT(_detect, _depend, _init, _late_init)			\
93	__IOMMU_INIT(_detect, _depend, _init, _late_init, 0)
94
95void sort_iommu_table(struct iommu_table_entry *start,
96		      struct iommu_table_entry *finish);
97
98void check_iommu_entries(struct iommu_table_entry *start,
99			 struct iommu_table_entry *finish);
100
101#endif /* _ASM_X86_IOMMU_TABLE_H */
102