root/arch/x86/include/asm/iommu_table.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _ASM_X86_IOMMU_TABLE_H
   3 #define _ASM_X86_IOMMU_TABLE_H
   4 
   5 #include <asm/swiotlb.h>
   6 
   7 /*
   8  * History lesson:
   9  * The execution chain of IOMMUs in 2.6.36 looks as so:
  10  *
  11  *            [xen-swiotlb]
  12  *                 |
  13  *         +----[swiotlb *]--+
  14  *        /         |         \
  15  *       /          |          \
  16  *    [GART]     [Calgary]  [Intel VT-d]
  17  *     /
  18  *    /
  19  * [AMD-Vi]
  20  *
  21  * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip
  22  * over the rest of IOMMUs and unconditionally initialize the SWIOTLB.
  23  * Also it would surreptitiously initialize set the swiotlb=1 if there were
  24  * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb
  25  * flag would be turned off by all IOMMUs except the Calgary one.
  26  *
  27  * The IOMMU_INIT* macros allow a similar tree (or more complex if desired)
  28  * to be built by defining who we depend on.
  29  *
  30  * And all that needs to be done is to use one of the macros in the IOMMU
  31  * and the pci-dma.c will take care of the rest.
  32  */
  33 
  34 struct iommu_table_entry {
  35         initcall_t      detect;
  36         initcall_t      depend;
  37         void            (*early_init)(void); /* No memory allocate available. */
  38         void            (*late_init)(void); /* Yes, can allocate memory. */
  39 #define IOMMU_FINISH_IF_DETECTED (1<<0)
  40 #define IOMMU_DETECTED           (1<<1)
  41         int             flags;
  42 };
  43 /*
  44  * Macro fills out an entry in the .iommu_table that is equivalent
  45  * to the fields that 'struct iommu_table_entry' has. The entries
  46  * that are put in the .iommu_table section are not put in any order
  47  * hence during boot-time we will have to resort them based on
  48  * dependency. */
  49 
  50 
  51 #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\
  52         static const struct iommu_table_entry                           \
  53                 __iommu_entry_##_detect __used                          \
  54         __attribute__ ((unused, __section__(".iommu_table"),            \
  55                         aligned((sizeof(void *)))))     \
  56         = {_detect, _depend, _early_init, _late_init,                   \
  57            _finish ? IOMMU_FINISH_IF_DETECTED : 0}
  58 /*
  59  * The simplest IOMMU definition. Provide the detection routine
  60  * and it will be run after the SWIOTLB and the other IOMMUs
  61  * that utilize this macro. If the IOMMU is detected (ie, the
  62  * detect routine returns a positive value), the other IOMMUs
  63  * are also checked. You can use IOMMU_INIT_POST_FINISH if you prefer
  64  * to stop detecting the other IOMMUs after yours has been detected.
  65  */
  66 #define IOMMU_INIT_POST(_detect)                                        \
  67         __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 0)
  68 
  69 #define IOMMU_INIT_POST_FINISH(detect)                                  \
  70         __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb,  NULL, NULL, 1)
  71 
  72 /*
  73  * A more sophisticated version of IOMMU_INIT. This variant requires:
  74  *  a). A detection routine function.
  75  *  b). The name of the detection routine we depend on to get called
  76  *      before us.
  77  *  c). The init routine which gets called if the detection routine
  78  *      returns a positive value from the pci_iommu_alloc. This means
  79  *      no presence of a memory allocator.
  80  *  d). Similar to the 'init', except that this gets called from pci_iommu_init
  81  *      where we do have a memory allocator.
  82  *
  83  * The standard IOMMU_INIT differs from the IOMMU_INIT_FINISH variant
  84  * in that the former will continue detecting other IOMMUs in the call
  85  * list after the detection routine returns a positive number, while the
  86  * latter will stop the execution chain upon first successful detection.
  87  * Both variants will still call the 'init' and 'late_init' functions if
  88  * they are set.
  89  */
  90 #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init)          \
  91         __IOMMU_INIT(_detect, _depend, _init, _late_init, 1)
  92 
  93 #define IOMMU_INIT(_detect, _depend, _init, _late_init)                 \
  94         __IOMMU_INIT(_detect, _depend, _init, _late_init, 0)
  95 
  96 void sort_iommu_table(struct iommu_table_entry *start,
  97                       struct iommu_table_entry *finish);
  98 
  99 void check_iommu_entries(struct iommu_table_entry *start,
 100                          struct iommu_table_entry *finish);
 101 
 102 #endif /* _ASM_X86_IOMMU_TABLE_H */

/* [<][>][^][v][top][bottom][index][help] */