This source file includes following definitions.
- zpci_err_insn
- __mpcifc
- zpci_mod_fc
- __rpcit
- zpci_refresh_trans
- __zpci_set_irq_ctrl
- ____pcilg
- __pcilg
- __zpci_load
- zpci_load_fh
- __pcilg_mio
- zpci_load
- __pcistg
- __zpci_store
- zpci_store_fh
- __pcistg_mio
- zpci_store
- __pcistb
- __zpci_store_block
- zpci_write_block_fh
- __pcistb_mio
- zpci_write_block
- __pciwb_mio
- zpci_barrier
1
2
3
4
5
6
7
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/facility.h>
13 #include <asm/pci_insn.h>
14 #include <asm/pci_debug.h>
15 #include <asm/pci_io.h>
16 #include <asm/processor.h>
17
18 #define ZPCI_INSN_BUSY_DELAY 1
19
20 static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
21 {
22 struct {
23 u64 req;
24 u64 offset;
25 u8 cc;
26 u8 status;
27 } __packed data = {req, offset, cc, status};
28
29 zpci_err_hex(&data, sizeof(data));
30 }
31
32
33 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
34 {
35 u8 cc;
36
37 asm volatile (
38 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
39 " ipm %[cc]\n"
40 " srl %[cc],28\n"
41 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
42 : : "cc");
43 *status = req >> 24 & 0xff;
44 return cc;
45 }
46
47 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
48 {
49 u8 cc;
50
51 do {
52 cc = __mpcifc(req, fib, status);
53 if (cc == 2)
54 msleep(ZPCI_INSN_BUSY_DELAY);
55 } while (cc == 2);
56
57 if (cc)
58 zpci_err_insn(cc, *status, req, 0);
59
60 return cc;
61 }
62
63
64 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
65 {
66 register u64 __addr asm("2") = addr;
67 register u64 __range asm("3") = range;
68 u8 cc;
69
70 asm volatile (
71 " .insn rre,0xb9d30000,%[fn],%[addr]\n"
72 " ipm %[cc]\n"
73 " srl %[cc],28\n"
74 : [cc] "=d" (cc), [fn] "+d" (fn)
75 : [addr] "d" (__addr), "d" (__range)
76 : "cc");
77 *status = fn >> 24 & 0xff;
78 return cc;
79 }
80
81 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
82 {
83 u8 cc, status;
84
85 do {
86 cc = __rpcit(fn, addr, range, &status);
87 if (cc == 2)
88 udelay(ZPCI_INSN_BUSY_DELAY);
89 } while (cc == 2);
90
91 if (cc)
92 zpci_err_insn(cc, status, addr, range);
93
94 if (cc == 1 && (status == 4 || status == 16))
95 return -ENOMEM;
96
97 return (cc) ? -EIO : 0;
98 }
99
100
101 int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
102 {
103 if (!test_facility(72))
104 return -EIO;
105
106 asm volatile(
107 ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
108 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
109
110 return 0;
111 }
112
113
114 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
115 {
116 register u64 __req asm("2") = req;
117 register u64 __offset asm("3") = offset;
118 int cc = -ENXIO;
119 u64 __data;
120
121 asm volatile (
122 " .insn rre,0xb9d20000,%[data],%[req]\n"
123 "0: ipm %[cc]\n"
124 " srl %[cc],28\n"
125 "1:\n"
126 EX_TABLE(0b, 1b)
127 : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
128 : "d" (__offset)
129 : "cc");
130 *status = __req >> 24 & 0xff;
131 *data = __data;
132 return cc;
133 }
134
135 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
136 {
137 u64 __data;
138 int cc;
139
140 cc = ____pcilg(&__data, req, offset, status);
141 if (!cc)
142 *data = __data;
143
144 return cc;
145 }
146
147 int __zpci_load(u64 *data, u64 req, u64 offset)
148 {
149 u8 status;
150 int cc;
151
152 do {
153 cc = __pcilg(data, req, offset, &status);
154 if (cc == 2)
155 udelay(ZPCI_INSN_BUSY_DELAY);
156 } while (cc == 2);
157
158 if (cc)
159 zpci_err_insn(cc, status, req, offset);
160
161 return (cc > 0) ? -EIO : cc;
162 }
163 EXPORT_SYMBOL_GPL(__zpci_load);
164
165 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
166 unsigned long len)
167 {
168 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
169 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
170
171 return __zpci_load(data, req, ZPCI_OFFSET(addr));
172 }
173
174 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
175 {
176 register u64 addr asm("2") = ioaddr;
177 register u64 r3 asm("3") = len;
178 int cc = -ENXIO;
179 u64 __data;
180
181 asm volatile (
182 " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
183 "0: ipm %[cc]\n"
184 " srl %[cc],28\n"
185 "1:\n"
186 EX_TABLE(0b, 1b)
187 : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
188 : [ioaddr] "d" (addr)
189 : "cc");
190 *status = r3 >> 24 & 0xff;
191 *data = __data;
192 return cc;
193 }
194
195 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
196 {
197 u8 status;
198 int cc;
199
200 if (!static_branch_unlikely(&have_mio))
201 return zpci_load_fh(data, addr, len);
202
203 cc = __pcilg_mio(data, (__force u64) addr, len, &status);
204 if (cc)
205 zpci_err_insn(cc, status, 0, (__force u64) addr);
206
207 return (cc > 0) ? -EIO : cc;
208 }
209 EXPORT_SYMBOL_GPL(zpci_load);
210
211
212 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
213 {
214 register u64 __req asm("2") = req;
215 register u64 __offset asm("3") = offset;
216 int cc = -ENXIO;
217
218 asm volatile (
219 " .insn rre,0xb9d00000,%[data],%[req]\n"
220 "0: ipm %[cc]\n"
221 " srl %[cc],28\n"
222 "1:\n"
223 EX_TABLE(0b, 1b)
224 : [cc] "+d" (cc), [req] "+d" (__req)
225 : "d" (__offset), [data] "d" (data)
226 : "cc");
227 *status = __req >> 24 & 0xff;
228 return cc;
229 }
230
231 int __zpci_store(u64 data, u64 req, u64 offset)
232 {
233 u8 status;
234 int cc;
235
236 do {
237 cc = __pcistg(data, req, offset, &status);
238 if (cc == 2)
239 udelay(ZPCI_INSN_BUSY_DELAY);
240 } while (cc == 2);
241
242 if (cc)
243 zpci_err_insn(cc, status, req, offset);
244
245 return (cc > 0) ? -EIO : cc;
246 }
247 EXPORT_SYMBOL_GPL(__zpci_store);
248
249 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
250 unsigned long len)
251 {
252 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
253 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
254
255 return __zpci_store(data, req, ZPCI_OFFSET(addr));
256 }
257
258 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
259 {
260 register u64 addr asm("2") = ioaddr;
261 register u64 r3 asm("3") = len;
262 int cc = -ENXIO;
263
264 asm volatile (
265 " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
266 "0: ipm %[cc]\n"
267 " srl %[cc],28\n"
268 "1:\n"
269 EX_TABLE(0b, 1b)
270 : [cc] "+d" (cc), "+d" (r3)
271 : [data] "d" (data), [ioaddr] "d" (addr)
272 : "cc");
273 *status = r3 >> 24 & 0xff;
274 return cc;
275 }
276
277 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
278 {
279 u8 status;
280 int cc;
281
282 if (!static_branch_unlikely(&have_mio))
283 return zpci_store_fh(addr, data, len);
284
285 cc = __pcistg_mio(data, (__force u64) addr, len, &status);
286 if (cc)
287 zpci_err_insn(cc, status, 0, (__force u64) addr);
288
289 return (cc > 0) ? -EIO : cc;
290 }
291 EXPORT_SYMBOL_GPL(zpci_store);
292
293
294 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
295 {
296 int cc = -ENXIO;
297
298 asm volatile (
299 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
300 "0: ipm %[cc]\n"
301 " srl %[cc],28\n"
302 "1:\n"
303 EX_TABLE(0b, 1b)
304 : [cc] "+d" (cc), [req] "+d" (req)
305 : [offset] "d" (offset), [data] "Q" (*data)
306 : "cc");
307 *status = req >> 24 & 0xff;
308 return cc;
309 }
310
311 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
312 {
313 u8 status;
314 int cc;
315
316 do {
317 cc = __pcistb(data, req, offset, &status);
318 if (cc == 2)
319 udelay(ZPCI_INSN_BUSY_DELAY);
320 } while (cc == 2);
321
322 if (cc)
323 zpci_err_insn(cc, status, req, offset);
324
325 return (cc > 0) ? -EIO : cc;
326 }
327 EXPORT_SYMBOL_GPL(__zpci_store_block);
328
329 static inline int zpci_write_block_fh(volatile void __iomem *dst,
330 const void *src, unsigned long len)
331 {
332 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
333 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
334 u64 offset = ZPCI_OFFSET(dst);
335
336 return __zpci_store_block(src, req, offset);
337 }
338
339 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
340 {
341 int cc = -ENXIO;
342
343 asm volatile (
344 " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
345 "0: ipm %[cc]\n"
346 " srl %[cc],28\n"
347 "1:\n"
348 EX_TABLE(0b, 1b)
349 : [cc] "+d" (cc), [len] "+d" (len)
350 : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
351 : "cc");
352 *status = len >> 24 & 0xff;
353 return cc;
354 }
355
356 int zpci_write_block(volatile void __iomem *dst,
357 const void *src, unsigned long len)
358 {
359 u8 status;
360 int cc;
361
362 if (!static_branch_unlikely(&have_mio))
363 return zpci_write_block_fh(dst, src, len);
364
365 cc = __pcistb_mio(src, (__force u64) dst, len, &status);
366 if (cc)
367 zpci_err_insn(cc, status, 0, (__force u64) dst);
368
369 return (cc > 0) ? -EIO : cc;
370 }
371 EXPORT_SYMBOL_GPL(zpci_write_block);
372
373 static inline void __pciwb_mio(void)
374 {
375 unsigned long unused = 0;
376
377 asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n"
378 : [op] "+d" (unused));
379 }
380
381 void zpci_barrier(void)
382 {
383 if (static_branch_likely(&have_mio))
384 __pciwb_mio();
385 }
386 EXPORT_SYMBOL_GPL(zpci_barrier);