This source file includes following definitions.
- flush_tce
- tce_build
- tce_free
- table_size_to_number_of_entries
- tce_table_setparms
- build_tce_table
- alloc_tce_table
- free_tce_table
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/types.h>
14 #include <linux/slab.h>
15 #include <linux/mm.h>
16 #include <linux/spinlock.h>
17 #include <linux/string.h>
18 #include <linux/pci.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/memblock.h>
21 #include <asm/tce.h>
22 #include <asm/calgary.h>
23 #include <asm/proto.h>
24 #include <asm/cacheflush.h>
25
26
27 static inline void flush_tce(void* tceaddr)
28 {
29
30 if (boot_cpu_has(X86_FEATURE_CLFLUSH))
31 clflush(tceaddr);
32 else
33 wbinvd();
34 }
35
36 void tce_build(struct iommu_table *tbl, unsigned long index,
37 unsigned int npages, unsigned long uaddr, int direction)
38 {
39 u64* tp;
40 u64 t;
41 u64 rpn;
42
43 t = (1 << TCE_READ_SHIFT);
44 if (direction != DMA_TO_DEVICE)
45 t |= (1 << TCE_WRITE_SHIFT);
46
47 tp = ((u64*)tbl->it_base) + index;
48
49 while (npages--) {
50 rpn = (virt_to_bus((void*)uaddr)) >> PAGE_SHIFT;
51 t &= ~TCE_RPN_MASK;
52 t |= (rpn << TCE_RPN_SHIFT);
53
54 *tp = cpu_to_be64(t);
55 flush_tce(tp);
56
57 uaddr += PAGE_SIZE;
58 tp++;
59 }
60 }
61
62 void tce_free(struct iommu_table *tbl, long index, unsigned int npages)
63 {
64 u64* tp;
65
66 tp = ((u64*)tbl->it_base) + index;
67
68 while (npages--) {
69 *tp = cpu_to_be64(0);
70 flush_tce(tp);
71 tp++;
72 }
73 }
74
75 static inline unsigned int table_size_to_number_of_entries(unsigned char size)
76 {
77
78
79
80
81
82 return (1 << size) << 13;
83 }
84
85 static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
86 {
87 unsigned int bitmapsz;
88 unsigned long bmppages;
89 int ret;
90
91 tbl->it_busno = dev->bus->number;
92
93
94 tbl->it_size = table_size_to_number_of_entries(specified_table_size);
95
96
97
98
99
100 bitmapsz = tbl->it_size / BITS_PER_BYTE;
101 bmppages = __get_free_pages(GFP_KERNEL, get_order(bitmapsz));
102 if (!bmppages) {
103 printk(KERN_ERR "Calgary: cannot allocate bitmap\n");
104 ret = -ENOMEM;
105 goto done;
106 }
107
108 tbl->it_map = (unsigned long*)bmppages;
109
110 memset(tbl->it_map, 0, bitmapsz);
111
112 tbl->it_hint = 0;
113
114 spin_lock_init(&tbl->it_lock);
115
116 return 0;
117
118 done:
119 return ret;
120 }
121
122 int __init build_tce_table(struct pci_dev *dev, void __iomem *bbar)
123 {
124 struct iommu_table *tbl;
125 int ret;
126
127 if (pci_iommu(dev->bus)) {
128 printk(KERN_ERR "Calgary: dev %p has sysdata->iommu %p\n",
129 dev, pci_iommu(dev->bus));
130 BUG();
131 }
132
133 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
134 if (!tbl) {
135 printk(KERN_ERR "Calgary: error allocating iommu_table\n");
136 ret = -ENOMEM;
137 goto done;
138 }
139
140 ret = tce_table_setparms(dev, tbl);
141 if (ret)
142 goto free_tbl;
143
144 tbl->bbar = bbar;
145
146 set_pci_iommu(dev->bus, tbl);
147
148 return 0;
149
150 free_tbl:
151 kfree(tbl);
152 done:
153 return ret;
154 }
155
156 void * __init alloc_tce_table(void)
157 {
158 unsigned int size;
159
160 size = table_size_to_number_of_entries(specified_table_size);
161 size *= TCE_ENTRY_SIZE;
162
163 return memblock_alloc_low(size, size);
164 }
165
166 void __init free_tce_table(void *tbl)
167 {
168 unsigned int size;
169
170 if (!tbl)
171 return;
172
173 size = table_size_to_number_of_entries(specified_table_size);
174 size *= TCE_ENTRY_SIZE;
175
176 memblock_free(__pa(tbl), size);
177 }