This source file includes following definitions.
- p1_base
- p1_size
- p2_base
- p2_size
- find_cxl_vsec
- dump_cxl_config_space
- dump_afu_descriptor
- get_phb_index
- get_capp_unit_id
- cxl_calc_capp_routing
- get_phb_indications
- cxl_get_xsl9_dsnctl
- init_implementation_adapter_regs_psl9
- init_implementation_adapter_regs_psl8
- write_timebase_ctrl_psl8
- timebase_read_psl9
- timebase_read_psl8
- cxl_setup_psl_timebase
- init_implementation_afu_regs_psl9
- init_implementation_afu_regs_psl8
- cxl_pci_setup_irq
- cxl_update_image_control
- cxl_pci_alloc_one_irq
- cxl_pci_release_one_irq
- cxl_pci_alloc_irq_ranges
- cxl_pci_release_irq_ranges
- setup_cxl_bars
- switch_card_to_cxl
- pci_map_slice_regs
- pci_unmap_slice_regs
- cxl_pci_release_afu
- cxl_read_afu_descriptor
- cxl_afu_descriptor_looks_ok
- sanitise_afu_regs_psl9
- sanitise_afu_regs_psl8
- cxl_pci_afu_read_err_buffer
- pci_configure_afu
- pci_deconfigure_afu
- pci_init_afu
- cxl_pci_remove_afu
- cxl_pci_reset
- cxl_map_adapter_regs
- cxl_unmap_adapter_regs
- cxl_read_vsec
- cxl_fixup_malformed_tlp
- cxl_compatible_caia_version
- cxl_vsec_looks_ok
- cxl_pci_read_adapter_vpd
- cxl_release_adapter
- sanitise_adapter_regs
- cxl_configure_adapter
- cxl_deconfigure_adapter
- cxl_stop_trace_psl9
- cxl_stop_trace_psl8
- set_sl_ops
- cxl_pci_init_adapter
- cxl_pci_remove_adapter
- cxl_slot_is_switched
- cxl_probe
- cxl_remove
- cxl_vphb_error_detected
- cxl_pci_error_detected
- cxl_pci_slot_reset
- cxl_pci_resume
1
2
3
4
5
6 #include <linux/pci_regs.h>
7 #include <linux/pci_ids.h>
8 #include <linux/device.h>
9 #include <linux/module.h>
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/sort.h>
13 #include <linux/pci.h>
14 #include <linux/of.h>
15 #include <linux/delay.h>
16 #include <asm/opal.h>
17 #include <asm/msi_bitmap.h>
18 #include <asm/pnv-pci.h>
19 #include <asm/io.h>
20 #include <asm/reg.h>
21
22 #include "cxl.h"
23 #include <misc/cxl.h>
24
25
26 #define CXL_PCI_VSEC_ID 0x1280
27 #define CXL_VSEC_MIN_SIZE 0x80
28
29 #define CXL_READ_VSEC_LENGTH(dev, vsec, dest) \
30 { \
31 pci_read_config_word(dev, vsec + 0x6, dest); \
32 *dest >>= 4; \
33 }
34 #define CXL_READ_VSEC_NAFUS(dev, vsec, dest) \
35 pci_read_config_byte(dev, vsec + 0x8, dest)
36
37 #define CXL_READ_VSEC_STATUS(dev, vsec, dest) \
38 pci_read_config_byte(dev, vsec + 0x9, dest)
39 #define CXL_STATUS_SECOND_PORT 0x80
40 #define CXL_STATUS_MSI_X_FULL 0x40
41 #define CXL_STATUS_MSI_X_SINGLE 0x20
42 #define CXL_STATUS_FLASH_RW 0x08
43 #define CXL_STATUS_FLASH_RO 0x04
44 #define CXL_STATUS_LOADABLE_AFU 0x02
45 #define CXL_STATUS_LOADABLE_PSL 0x01
46
47 #define CXL_UNSUPPORTED_FEATURES \
48 (CXL_STATUS_MSI_X_FULL | CXL_STATUS_MSI_X_SINGLE)
49
50 #define CXL_READ_VSEC_MODE_CONTROL(dev, vsec, dest) \
51 pci_read_config_byte(dev, vsec + 0xa, dest)
52 #define CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val) \
53 pci_write_config_byte(dev, vsec + 0xa, val)
54 #define CXL_VSEC_PROTOCOL_MASK 0xe0
55 #define CXL_VSEC_PROTOCOL_1024TB 0x80
56 #define CXL_VSEC_PROTOCOL_512TB 0x40
57 #define CXL_VSEC_PROTOCOL_256TB 0x20
58 #define CXL_VSEC_PROTOCOL_ENABLE 0x01
59
60 #define CXL_READ_VSEC_PSL_REVISION(dev, vsec, dest) \
61 pci_read_config_word(dev, vsec + 0xc, dest)
62 #define CXL_READ_VSEC_CAIA_MINOR(dev, vsec, dest) \
63 pci_read_config_byte(dev, vsec + 0xe, dest)
64 #define CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, dest) \
65 pci_read_config_byte(dev, vsec + 0xf, dest)
66 #define CXL_READ_VSEC_BASE_IMAGE(dev, vsec, dest) \
67 pci_read_config_word(dev, vsec + 0x10, dest)
68
69 #define CXL_READ_VSEC_IMAGE_STATE(dev, vsec, dest) \
70 pci_read_config_byte(dev, vsec + 0x13, dest)
71 #define CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, val) \
72 pci_write_config_byte(dev, vsec + 0x13, val)
73 #define CXL_VSEC_USER_IMAGE_LOADED 0x80
74 #define CXL_VSEC_PERST_LOADS_IMAGE 0x20
75 #define CXL_VSEC_PERST_SELECT_USER 0x10
76
77 #define CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, dest) \
78 pci_read_config_dword(dev, vsec + 0x20, dest)
79 #define CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, dest) \
80 pci_read_config_dword(dev, vsec + 0x24, dest)
81 #define CXL_READ_VSEC_PS_OFF(dev, vsec, dest) \
82 pci_read_config_dword(dev, vsec + 0x28, dest)
83 #define CXL_READ_VSEC_PS_SIZE(dev, vsec, dest) \
84 pci_read_config_dword(dev, vsec + 0x2c, dest)
85
86
87
88
89 #define AFUD_READ(afu, off) in_be64(afu->native->afu_desc_mmio + off)
90 #define AFUD_READ_LE(afu, off) in_le64(afu->native->afu_desc_mmio + off)
91 #define EXTRACT_PPC_BIT(val, bit) (!!(val & PPC_BIT(bit)))
92 #define EXTRACT_PPC_BITS(val, bs, be) ((val & PPC_BITMASK(bs, be)) >> PPC_BITLSHIFT(be))
93
94 #define AFUD_READ_INFO(afu) AFUD_READ(afu, 0x0)
95 #define AFUD_NUM_INTS_PER_PROC(val) EXTRACT_PPC_BITS(val, 0, 15)
96 #define AFUD_NUM_PROCS(val) EXTRACT_PPC_BITS(val, 16, 31)
97 #define AFUD_NUM_CRS(val) EXTRACT_PPC_BITS(val, 32, 47)
98 #define AFUD_MULTIMODE(val) EXTRACT_PPC_BIT(val, 48)
99 #define AFUD_PUSH_BLOCK_TRANSFER(val) EXTRACT_PPC_BIT(val, 55)
100 #define AFUD_DEDICATED_PROCESS(val) EXTRACT_PPC_BIT(val, 59)
101 #define AFUD_AFU_DIRECTED(val) EXTRACT_PPC_BIT(val, 61)
102 #define AFUD_TIME_SLICED(val) EXTRACT_PPC_BIT(val, 63)
103 #define AFUD_READ_CR(afu) AFUD_READ(afu, 0x20)
104 #define AFUD_CR_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
105 #define AFUD_READ_CR_OFF(afu) AFUD_READ(afu, 0x28)
106 #define AFUD_READ_PPPSA(afu) AFUD_READ(afu, 0x30)
107 #define AFUD_PPPSA_PP(val) EXTRACT_PPC_BIT(val, 6)
108 #define AFUD_PPPSA_PSA(val) EXTRACT_PPC_BIT(val, 7)
109 #define AFUD_PPPSA_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
110 #define AFUD_READ_PPPSA_OFF(afu) AFUD_READ(afu, 0x38)
111 #define AFUD_READ_EB(afu) AFUD_READ(afu, 0x40)
112 #define AFUD_EB_LEN(val) EXTRACT_PPC_BITS(val, 8, 63)
113 #define AFUD_READ_EB_OFF(afu) AFUD_READ(afu, 0x48)
114
115 static const struct pci_device_id cxl_pci_tbl[] = {
116 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0477), },
117 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x044b), },
118 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x04cf), },
119 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0601), },
120 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0623), },
121 { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x0628), },
122 { }
123 };
124 MODULE_DEVICE_TABLE(pci, cxl_pci_tbl);
125
126
127
128
129
130
131 static inline resource_size_t p1_base(struct pci_dev *dev)
132 {
133 return pci_resource_start(dev, 2);
134 }
135
136 static inline resource_size_t p1_size(struct pci_dev *dev)
137 {
138 return pci_resource_len(dev, 2);
139 }
140
141 static inline resource_size_t p2_base(struct pci_dev *dev)
142 {
143 return pci_resource_start(dev, 0);
144 }
145
146 static inline resource_size_t p2_size(struct pci_dev *dev)
147 {
148 return pci_resource_len(dev, 0);
149 }
150
151 static int find_cxl_vsec(struct pci_dev *dev)
152 {
153 int vsec = 0;
154 u16 val;
155
156 while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) {
157 pci_read_config_word(dev, vsec + 0x4, &val);
158 if (val == CXL_PCI_VSEC_ID)
159 return vsec;
160 }
161 return 0;
162
163 }
164
165 static void dump_cxl_config_space(struct pci_dev *dev)
166 {
167 int vsec;
168 u32 val;
169
170 dev_info(&dev->dev, "dump_cxl_config_space\n");
171
172 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &val);
173 dev_info(&dev->dev, "BAR0: %#.8x\n", val);
174 pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val);
175 dev_info(&dev->dev, "BAR1: %#.8x\n", val);
176 pci_read_config_dword(dev, PCI_BASE_ADDRESS_2, &val);
177 dev_info(&dev->dev, "BAR2: %#.8x\n", val);
178 pci_read_config_dword(dev, PCI_BASE_ADDRESS_3, &val);
179 dev_info(&dev->dev, "BAR3: %#.8x\n", val);
180 pci_read_config_dword(dev, PCI_BASE_ADDRESS_4, &val);
181 dev_info(&dev->dev, "BAR4: %#.8x\n", val);
182 pci_read_config_dword(dev, PCI_BASE_ADDRESS_5, &val);
183 dev_info(&dev->dev, "BAR5: %#.8x\n", val);
184
185 dev_info(&dev->dev, "p1 regs: %#llx, len: %#llx\n",
186 p1_base(dev), p1_size(dev));
187 dev_info(&dev->dev, "p2 regs: %#llx, len: %#llx\n",
188 p2_base(dev), p2_size(dev));
189 dev_info(&dev->dev, "BAR 4/5: %#llx, len: %#llx\n",
190 pci_resource_start(dev, 4), pci_resource_len(dev, 4));
191
192 if (!(vsec = find_cxl_vsec(dev)))
193 return;
194
195 #define show_reg(name, what) \
196 dev_info(&dev->dev, "cxl vsec: %30s: %#x\n", name, what)
197
198 pci_read_config_dword(dev, vsec + 0x0, &val);
199 show_reg("Cap ID", (val >> 0) & 0xffff);
200 show_reg("Cap Ver", (val >> 16) & 0xf);
201 show_reg("Next Cap Ptr", (val >> 20) & 0xfff);
202 pci_read_config_dword(dev, vsec + 0x4, &val);
203 show_reg("VSEC ID", (val >> 0) & 0xffff);
204 show_reg("VSEC Rev", (val >> 16) & 0xf);
205 show_reg("VSEC Length", (val >> 20) & 0xfff);
206 pci_read_config_dword(dev, vsec + 0x8, &val);
207 show_reg("Num AFUs", (val >> 0) & 0xff);
208 show_reg("Status", (val >> 8) & 0xff);
209 show_reg("Mode Control", (val >> 16) & 0xff);
210 show_reg("Reserved", (val >> 24) & 0xff);
211 pci_read_config_dword(dev, vsec + 0xc, &val);
212 show_reg("PSL Rev", (val >> 0) & 0xffff);
213 show_reg("CAIA Ver", (val >> 16) & 0xffff);
214 pci_read_config_dword(dev, vsec + 0x10, &val);
215 show_reg("Base Image Rev", (val >> 0) & 0xffff);
216 show_reg("Reserved", (val >> 16) & 0x0fff);
217 show_reg("Image Control", (val >> 28) & 0x3);
218 show_reg("Reserved", (val >> 30) & 0x1);
219 show_reg("Image Loaded", (val >> 31) & 0x1);
220
221 pci_read_config_dword(dev, vsec + 0x14, &val);
222 show_reg("Reserved", val);
223 pci_read_config_dword(dev, vsec + 0x18, &val);
224 show_reg("Reserved", val);
225 pci_read_config_dword(dev, vsec + 0x1c, &val);
226 show_reg("Reserved", val);
227
228 pci_read_config_dword(dev, vsec + 0x20, &val);
229 show_reg("AFU Descriptor Offset", val);
230 pci_read_config_dword(dev, vsec + 0x24, &val);
231 show_reg("AFU Descriptor Size", val);
232 pci_read_config_dword(dev, vsec + 0x28, &val);
233 show_reg("Problem State Offset", val);
234 pci_read_config_dword(dev, vsec + 0x2c, &val);
235 show_reg("Problem State Size", val);
236
237 pci_read_config_dword(dev, vsec + 0x30, &val);
238 show_reg("Reserved", val);
239 pci_read_config_dword(dev, vsec + 0x34, &val);
240 show_reg("Reserved", val);
241 pci_read_config_dword(dev, vsec + 0x38, &val);
242 show_reg("Reserved", val);
243 pci_read_config_dword(dev, vsec + 0x3c, &val);
244 show_reg("Reserved", val);
245
246 pci_read_config_dword(dev, vsec + 0x40, &val);
247 show_reg("PSL Programming Port", val);
248 pci_read_config_dword(dev, vsec + 0x44, &val);
249 show_reg("PSL Programming Control", val);
250
251 pci_read_config_dword(dev, vsec + 0x48, &val);
252 show_reg("Reserved", val);
253 pci_read_config_dword(dev, vsec + 0x4c, &val);
254 show_reg("Reserved", val);
255
256 pci_read_config_dword(dev, vsec + 0x50, &val);
257 show_reg("Flash Address Register", val);
258 pci_read_config_dword(dev, vsec + 0x54, &val);
259 show_reg("Flash Size Register", val);
260 pci_read_config_dword(dev, vsec + 0x58, &val);
261 show_reg("Flash Status/Control Register", val);
262 pci_read_config_dword(dev, vsec + 0x58, &val);
263 show_reg("Flash Data Port", val);
264
265 #undef show_reg
266 }
267
268 static void dump_afu_descriptor(struct cxl_afu *afu)
269 {
270 u64 val, afu_cr_num, afu_cr_off, afu_cr_len;
271 int i;
272
273 #define show_reg(name, what) \
274 dev_info(&afu->dev, "afu desc: %30s: %#llx\n", name, what)
275
276 val = AFUD_READ_INFO(afu);
277 show_reg("num_ints_per_process", AFUD_NUM_INTS_PER_PROC(val));
278 show_reg("num_of_processes", AFUD_NUM_PROCS(val));
279 show_reg("num_of_afu_CRs", AFUD_NUM_CRS(val));
280 show_reg("req_prog_mode", val & 0xffffULL);
281 afu_cr_num = AFUD_NUM_CRS(val);
282
283 val = AFUD_READ(afu, 0x8);
284 show_reg("Reserved", val);
285 val = AFUD_READ(afu, 0x10);
286 show_reg("Reserved", val);
287 val = AFUD_READ(afu, 0x18);
288 show_reg("Reserved", val);
289
290 val = AFUD_READ_CR(afu);
291 show_reg("Reserved", (val >> (63-7)) & 0xff);
292 show_reg("AFU_CR_len", AFUD_CR_LEN(val));
293 afu_cr_len = AFUD_CR_LEN(val) * 256;
294
295 val = AFUD_READ_CR_OFF(afu);
296 afu_cr_off = val;
297 show_reg("AFU_CR_offset", val);
298
299 val = AFUD_READ_PPPSA(afu);
300 show_reg("PerProcessPSA_control", (val >> (63-7)) & 0xff);
301 show_reg("PerProcessPSA Length", AFUD_PPPSA_LEN(val));
302
303 val = AFUD_READ_PPPSA_OFF(afu);
304 show_reg("PerProcessPSA_offset", val);
305
306 val = AFUD_READ_EB(afu);
307 show_reg("Reserved", (val >> (63-7)) & 0xff);
308 show_reg("AFU_EB_len", AFUD_EB_LEN(val));
309
310 val = AFUD_READ_EB_OFF(afu);
311 show_reg("AFU_EB_offset", val);
312
313 for (i = 0; i < afu_cr_num; i++) {
314 val = AFUD_READ_LE(afu, afu_cr_off + i * afu_cr_len);
315 show_reg("CR Vendor", val & 0xffff);
316 show_reg("CR Device", (val >> 16) & 0xffff);
317 }
318 #undef show_reg
319 }
320
321 #define P8_CAPP_UNIT0_ID 0xBA
322 #define P8_CAPP_UNIT1_ID 0XBE
323 #define P9_CAPP_UNIT0_ID 0xC0
324 #define P9_CAPP_UNIT1_ID 0xE0
325
326 static int get_phb_index(struct device_node *np, u32 *phb_index)
327 {
328 if (of_property_read_u32(np, "ibm,phb-index", phb_index))
329 return -ENODEV;
330 return 0;
331 }
332
333 static u64 get_capp_unit_id(struct device_node *np, u32 phb_index)
334 {
335
336
337
338
339
340
341
342 if (cxl_is_power8()) {
343 if (!pvr_version_is(PVR_POWER8NVL))
344 return P8_CAPP_UNIT0_ID;
345
346 if (phb_index == 0)
347 return P8_CAPP_UNIT0_ID;
348
349 if (phb_index == 1)
350 return P8_CAPP_UNIT1_ID;
351 }
352
353
354
355
356
357
358
359 if (cxl_is_power9()) {
360 if (phb_index == 0)
361 return P9_CAPP_UNIT0_ID;
362
363 if (phb_index == 3)
364 return P9_CAPP_UNIT1_ID;
365 }
366
367 return 0;
368 }
369
370 int cxl_calc_capp_routing(struct pci_dev *dev, u64 *chipid,
371 u32 *phb_index, u64 *capp_unit_id)
372 {
373 int rc;
374 struct device_node *np;
375 const __be32 *prop;
376
377 if (!(np = pnv_pci_get_phb_node(dev)))
378 return -ENODEV;
379
380 while (np && !(prop = of_get_property(np, "ibm,chip-id", NULL)))
381 np = of_get_next_parent(np);
382 if (!np)
383 return -ENODEV;
384
385 *chipid = be32_to_cpup(prop);
386
387 rc = get_phb_index(np, phb_index);
388 if (rc) {
389 pr_err("cxl: invalid phb index\n");
390 return rc;
391 }
392
393 *capp_unit_id = get_capp_unit_id(np, *phb_index);
394 of_node_put(np);
395 if (!*capp_unit_id) {
396 pr_err("cxl: invalid capp unit id (phb_index: %d)\n",
397 *phb_index);
398 return -ENODEV;
399 }
400
401 return 0;
402 }
403
404 static DEFINE_MUTEX(indications_mutex);
405
406 static int get_phb_indications(struct pci_dev *dev, u64 *capiind, u64 *asnind,
407 u64 *nbwind)
408 {
409 static u64 nbw, asn, capi = 0;
410 struct device_node *np;
411 const __be32 *prop;
412
413 mutex_lock(&indications_mutex);
414 if (!capi) {
415 if (!(np = pnv_pci_get_phb_node(dev))) {
416 mutex_unlock(&indications_mutex);
417 return -ENODEV;
418 }
419
420 prop = of_get_property(np, "ibm,phb-indications", NULL);
421 if (!prop) {
422 nbw = 0x0300UL;
423 asn = 0x0400UL;
424 capi = 0x0200UL;
425 } else {
426 nbw = (u64)be32_to_cpu(prop[2]);
427 asn = (u64)be32_to_cpu(prop[1]);
428 capi = (u64)be32_to_cpu(prop[0]);
429 }
430 of_node_put(np);
431 }
432 *capiind = capi;
433 *asnind = asn;
434 *nbwind = nbw;
435 mutex_unlock(&indications_mutex);
436 return 0;
437 }
438
439 int cxl_get_xsl9_dsnctl(struct pci_dev *dev, u64 capp_unit_id, u64 *reg)
440 {
441 u64 xsl_dsnctl;
442 u64 capiind, asnind, nbwind;
443
444
445
446
447
448
449 if (get_phb_indications(dev, &capiind, &asnind, &nbwind))
450 return -ENODEV;
451
452
453
454
455
456 xsl_dsnctl = (capiind << (63-15));
457 xsl_dsnctl |= (capp_unit_id << (63-15));
458
459
460 xsl_dsnctl |= ((u64)0x09 << (63-28));
461
462
463
464
465
466
467
468
469 xsl_dsnctl |= (nbwind << (63-55));
470
471
472
473
474
475
476 xsl_dsnctl |= asnind;
477
478 *reg = xsl_dsnctl;
479 return 0;
480 }
481
482 static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
483 struct pci_dev *dev)
484 {
485 u64 xsl_dsnctl, psl_fircntl;
486 u64 chipid;
487 u32 phb_index;
488 u64 capp_unit_id;
489 u64 psl_debug;
490 int rc;
491
492 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
493 if (rc)
494 return rc;
495
496 rc = cxl_get_xsl9_dsnctl(dev, capp_unit_id, &xsl_dsnctl);
497 if (rc)
498 return rc;
499
500 cxl_p1_write(adapter, CXL_XSL9_DSNCTL, xsl_dsnctl);
501
502
503 psl_fircntl = (0x2ULL << (63-3));
504 psl_fircntl |= (0x1ULL << (63-6));
505 psl_fircntl |= 0x1ULL;
506 cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
507
508
509
510
511 cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
512
513
514
515
516
517
518
519
520
521 cxl_p1_write(adapter, CXL_XSL9_DEF, 0x51F8000000000005ULL);
522
523
524 cxl_p1_write(adapter, CXL_XSL9_INV, 0x0000040007FFC200ULL);
525
526 if (phb_index == 3) {
527
528 cxl_p1_write(adapter, CXL_PSL9_APCDEDTYPE, 0x40000FF3FFFF0000ULL);
529 }
530
531
532 cxl_p1_write(adapter, CXL_PSL9_APCDEDALLOC, 0x800F000200000000ULL);
533
534
535 cxl_p1_write(adapter, CXL_PSL9_DEBUG, 0xC000000000000000ULL);
536
537
538
539
540
541 psl_debug = cxl_p1_read(adapter, CXL_PSL9_DEBUG);
542 if (psl_debug & CXL_PSL_DEBUG_CDC) {
543 dev_dbg(&dev->dev, "No data-cache present\n");
544 adapter->native->no_data_cache = true;
545 }
546
547 return 0;
548 }
549
550 static int init_implementation_adapter_regs_psl8(struct cxl *adapter, struct pci_dev *dev)
551 {
552 u64 psl_dsnctl, psl_fircntl;
553 u64 chipid;
554 u32 phb_index;
555 u64 capp_unit_id;
556 int rc;
557
558 rc = cxl_calc_capp_routing(dev, &chipid, &phb_index, &capp_unit_id);
559 if (rc)
560 return rc;
561
562 psl_dsnctl = 0x0000900000000000ULL;
563 psl_dsnctl |= (0x2ULL << (63-38));
564
565 psl_dsnctl |= (chipid << (63-5));
566 psl_dsnctl |= (capp_unit_id << (63-13));
567
568 cxl_p1_write(adapter, CXL_PSL_DSNDCTL, psl_dsnctl);
569 cxl_p1_write(adapter, CXL_PSL_RESLCKTO, 0x20000000200ULL);
570
571 cxl_p1_write(adapter, CXL_PSL_SNWRALLOC, 0x00000000FFFFFFFFULL);
572
573 psl_fircntl = (0x2ULL << (63-3));
574 psl_fircntl |= (0x1ULL << (63-6));
575 psl_fircntl |= 0x1ULL;
576 cxl_p1_write(adapter, CXL_PSL_FIR_CNTL, psl_fircntl);
577
578 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x0000FF7C00000000ULL);
579
580 return 0;
581 }
582
583
584 #define TBSYNC_CAL(n) (((u64)n & 0x7) << (63-3))
585 #define TBSYNC_CNT(n) (((u64)n & 0x7) << (63-6))
586
587 #define PSL_2048_250MHZ_CYCLES 1
588
589 static void write_timebase_ctrl_psl8(struct cxl *adapter)
590 {
591 cxl_p1_write(adapter, CXL_PSL_TB_CTLSTAT,
592 TBSYNC_CNT(2 * PSL_2048_250MHZ_CYCLES));
593 }
594
595 static u64 timebase_read_psl9(struct cxl *adapter)
596 {
597 return cxl_p1_read(adapter, CXL_PSL9_Timebase);
598 }
599
600 static u64 timebase_read_psl8(struct cxl *adapter)
601 {
602 return cxl_p1_read(adapter, CXL_PSL_Timebase);
603 }
604
605 static void cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
606 {
607 struct device_node *np;
608
609 adapter->psl_timebase_synced = false;
610
611 if (!(np = pnv_pci_get_phb_node(dev)))
612 return;
613
614
615 of_node_get(np);
616 if (! of_get_property(np, "ibm,capp-timebase-sync", NULL)) {
617 of_node_put(np);
618 dev_info(&dev->dev, "PSL timebase inactive: OPAL support missing\n");
619 return;
620 }
621 of_node_put(np);
622
623
624
625
626
627 if (adapter->native->sl_ops->write_timebase_ctrl)
628 adapter->native->sl_ops->write_timebase_ctrl(adapter);
629
630
631 cxl_p1_write(adapter, CXL_PSL_Control, 0x0000000000000000);
632 cxl_p1_write(adapter, CXL_PSL_Control, CXL_PSL_Control_tb);
633
634 return;
635 }
636
637 static int init_implementation_afu_regs_psl9(struct cxl_afu *afu)
638 {
639 return 0;
640 }
641
642 static int init_implementation_afu_regs_psl8(struct cxl_afu *afu)
643 {
644
645 cxl_p1n_write(afu, CXL_PSL_APCALLOC_A, 0xFFFFFFFEFEFEFEFEULL);
646
647 cxl_p1n_write(afu, CXL_PSL_COALLOC_A, 0xFF000000FEFEFEFEULL);
648
649 cxl_p1n_write(afu, CXL_PSL_SLICE_TRACE, 0x0000FFFF00000000ULL);
650 cxl_p1n_write(afu, CXL_PSL_RXCTL_A, CXL_PSL_RXCTL_AFUHP_4S);
651
652 return 0;
653 }
654
655 int cxl_pci_setup_irq(struct cxl *adapter, unsigned int hwirq,
656 unsigned int virq)
657 {
658 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
659
660 return pnv_cxl_ioda_msi_setup(dev, hwirq, virq);
661 }
662
663 int cxl_update_image_control(struct cxl *adapter)
664 {
665 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
666 int rc;
667 int vsec;
668 u8 image_state;
669
670 if (!(vsec = find_cxl_vsec(dev))) {
671 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
672 return -ENODEV;
673 }
674
675 if ((rc = CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state))) {
676 dev_err(&dev->dev, "failed to read image state: %i\n", rc);
677 return rc;
678 }
679
680 if (adapter->perst_loads_image)
681 image_state |= CXL_VSEC_PERST_LOADS_IMAGE;
682 else
683 image_state &= ~CXL_VSEC_PERST_LOADS_IMAGE;
684
685 if (adapter->perst_select_user)
686 image_state |= CXL_VSEC_PERST_SELECT_USER;
687 else
688 image_state &= ~CXL_VSEC_PERST_SELECT_USER;
689
690 if ((rc = CXL_WRITE_VSEC_IMAGE_STATE(dev, vsec, image_state))) {
691 dev_err(&dev->dev, "failed to update image control: %i\n", rc);
692 return rc;
693 }
694
695 return 0;
696 }
697
698 int cxl_pci_alloc_one_irq(struct cxl *adapter)
699 {
700 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
701
702 return pnv_cxl_alloc_hwirqs(dev, 1);
703 }
704
705 void cxl_pci_release_one_irq(struct cxl *adapter, int hwirq)
706 {
707 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
708
709 return pnv_cxl_release_hwirqs(dev, hwirq, 1);
710 }
711
712 int cxl_pci_alloc_irq_ranges(struct cxl_irq_ranges *irqs,
713 struct cxl *adapter, unsigned int num)
714 {
715 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
716
717 return pnv_cxl_alloc_hwirq_ranges(irqs, dev, num);
718 }
719
720 void cxl_pci_release_irq_ranges(struct cxl_irq_ranges *irqs,
721 struct cxl *adapter)
722 {
723 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
724
725 pnv_cxl_release_hwirq_ranges(irqs, dev);
726 }
727
728 static int setup_cxl_bars(struct pci_dev *dev)
729 {
730
731 if ((p1_base(dev) < 0x100000000ULL) ||
732 (p2_base(dev) < 0x100000000ULL)) {
733 dev_err(&dev->dev, "ABORTING: M32 BAR assignment incompatible with CXL\n");
734 return -ENODEV;
735 }
736
737
738
739
740
741
742 pci_write_config_dword(dev, PCI_BASE_ADDRESS_4, 0x00000000);
743 pci_write_config_dword(dev, PCI_BASE_ADDRESS_5, 0x00020000);
744
745 return 0;
746 }
747
748
749 static int switch_card_to_cxl(struct pci_dev *dev)
750 {
751 int vsec;
752 u8 val;
753 int rc;
754
755 dev_info(&dev->dev, "switch card to CXL\n");
756
757 if (!(vsec = find_cxl_vsec(dev))) {
758 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
759 return -ENODEV;
760 }
761
762 if ((rc = CXL_READ_VSEC_MODE_CONTROL(dev, vsec, &val))) {
763 dev_err(&dev->dev, "failed to read current mode control: %i", rc);
764 return rc;
765 }
766 val &= ~CXL_VSEC_PROTOCOL_MASK;
767 val |= CXL_VSEC_PROTOCOL_256TB | CXL_VSEC_PROTOCOL_ENABLE;
768 if ((rc = CXL_WRITE_VSEC_MODE_CONTROL(dev, vsec, val))) {
769 dev_err(&dev->dev, "failed to enable CXL protocol: %i", rc);
770 return rc;
771 }
772
773
774
775
776
777 msleep(100);
778
779 return 0;
780 }
781
782 static int pci_map_slice_regs(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
783 {
784 u64 p1n_base, p2n_base, afu_desc;
785 const u64 p1n_size = 0x100;
786 const u64 p2n_size = 0x1000;
787
788 p1n_base = p1_base(dev) + 0x10000 + (afu->slice * p1n_size);
789 p2n_base = p2_base(dev) + (afu->slice * p2n_size);
790 afu->psn_phys = p2_base(dev) + (adapter->native->ps_off + (afu->slice * adapter->ps_size));
791 afu_desc = p2_base(dev) + adapter->native->afu_desc_off + (afu->slice * adapter->native->afu_desc_size);
792
793 if (!(afu->native->p1n_mmio = ioremap(p1n_base, p1n_size)))
794 goto err;
795 if (!(afu->p2n_mmio = ioremap(p2n_base, p2n_size)))
796 goto err1;
797 if (afu_desc) {
798 if (!(afu->native->afu_desc_mmio = ioremap(afu_desc, adapter->native->afu_desc_size)))
799 goto err2;
800 }
801
802 return 0;
803 err2:
804 iounmap(afu->p2n_mmio);
805 err1:
806 iounmap(afu->native->p1n_mmio);
807 err:
808 dev_err(&afu->dev, "Error mapping AFU MMIO regions\n");
809 return -ENOMEM;
810 }
811
812 static void pci_unmap_slice_regs(struct cxl_afu *afu)
813 {
814 if (afu->p2n_mmio) {
815 iounmap(afu->p2n_mmio);
816 afu->p2n_mmio = NULL;
817 }
818 if (afu->native->p1n_mmio) {
819 iounmap(afu->native->p1n_mmio);
820 afu->native->p1n_mmio = NULL;
821 }
822 if (afu->native->afu_desc_mmio) {
823 iounmap(afu->native->afu_desc_mmio);
824 afu->native->afu_desc_mmio = NULL;
825 }
826 }
827
828 void cxl_pci_release_afu(struct device *dev)
829 {
830 struct cxl_afu *afu = to_cxl_afu(dev);
831
832 pr_devel("%s\n", __func__);
833
834 idr_destroy(&afu->contexts_idr);
835 cxl_release_spa(afu);
836
837 kfree(afu->native);
838 kfree(afu);
839 }
840
841
842 static int cxl_read_afu_descriptor(struct cxl_afu *afu)
843 {
844 u64 val;
845
846 val = AFUD_READ_INFO(afu);
847 afu->pp_irqs = AFUD_NUM_INTS_PER_PROC(val);
848 afu->max_procs_virtualised = AFUD_NUM_PROCS(val);
849 afu->crs_num = AFUD_NUM_CRS(val);
850
851 if (AFUD_AFU_DIRECTED(val))
852 afu->modes_supported |= CXL_MODE_DIRECTED;
853 if (AFUD_DEDICATED_PROCESS(val))
854 afu->modes_supported |= CXL_MODE_DEDICATED;
855 if (AFUD_TIME_SLICED(val))
856 afu->modes_supported |= CXL_MODE_TIME_SLICED;
857
858 val = AFUD_READ_PPPSA(afu);
859 afu->pp_size = AFUD_PPPSA_LEN(val) * 4096;
860 afu->psa = AFUD_PPPSA_PSA(val);
861 if ((afu->pp_psa = AFUD_PPPSA_PP(val)))
862 afu->native->pp_offset = AFUD_READ_PPPSA_OFF(afu);
863
864 val = AFUD_READ_CR(afu);
865 afu->crs_len = AFUD_CR_LEN(val) * 256;
866 afu->crs_offset = AFUD_READ_CR_OFF(afu);
867
868
869
870 afu->eb_len = AFUD_EB_LEN(AFUD_READ_EB(afu)) * 4096;
871 afu->eb_offset = AFUD_READ_EB_OFF(afu);
872
873
874 if (EXTRACT_PPC_BITS(afu->eb_offset, 0, 11) != 0) {
875 dev_warn(&afu->dev,
876 "Invalid AFU error buffer offset %Lx\n",
877 afu->eb_offset);
878 dev_info(&afu->dev,
879 "Ignoring AFU error buffer in the descriptor\n");
880
881 afu->eb_len = 0;
882 }
883
884 return 0;
885 }
886
887 static int cxl_afu_descriptor_looks_ok(struct cxl_afu *afu)
888 {
889 int i, rc;
890 u32 val;
891
892 if (afu->psa && afu->adapter->ps_size <
893 (afu->native->pp_offset + afu->pp_size*afu->max_procs_virtualised)) {
894 dev_err(&afu->dev, "per-process PSA can't fit inside the PSA!\n");
895 return -ENODEV;
896 }
897
898 if (afu->pp_psa && (afu->pp_size < PAGE_SIZE))
899 dev_warn(&afu->dev, "AFU uses pp_size(%#016llx) < PAGE_SIZE per-process PSA!\n", afu->pp_size);
900
901 for (i = 0; i < afu->crs_num; i++) {
902 rc = cxl_ops->afu_cr_read32(afu, i, 0, &val);
903 if (rc || val == 0) {
904 dev_err(&afu->dev, "ABORTING: AFU configuration record %i is invalid\n", i);
905 return -EINVAL;
906 }
907 }
908
909 if ((afu->modes_supported & ~CXL_MODE_DEDICATED) && afu->max_procs_virtualised == 0) {
910
911
912
913
914
915
916
917
918
919
920 dev_err(&afu->dev, "AFU does not support any processes\n");
921 return -EINVAL;
922 }
923
924 return 0;
925 }
926
927 static int sanitise_afu_regs_psl9(struct cxl_afu *afu)
928 {
929 u64 reg;
930
931
932
933
934
935
936 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
937 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
938 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
939 if (cxl_ops->afu_reset(afu))
940 return -EIO;
941 if (cxl_afu_disable(afu))
942 return -EIO;
943 if (cxl_psl_purge(afu))
944 return -EIO;
945 }
946 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
947 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
948 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
949 if (reg) {
950 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
951 if (reg & CXL_PSL9_DSISR_An_TF)
952 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
953 else
954 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
955 }
956 if (afu->adapter->native->sl_ops->register_serr_irq) {
957 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
958 if (reg) {
959 if (reg & ~0x000000007fffffff)
960 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
961 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
962 }
963 }
964 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
965 if (reg) {
966 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
967 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
968 }
969
970 return 0;
971 }
972
973 static int sanitise_afu_regs_psl8(struct cxl_afu *afu)
974 {
975 u64 reg;
976
977
978
979
980
981
982 reg = cxl_p2n_read(afu, CXL_AFU_Cntl_An);
983 if ((reg & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) {
984 dev_warn(&afu->dev, "WARNING: AFU was not disabled: %#016llx\n", reg);
985 if (cxl_ops->afu_reset(afu))
986 return -EIO;
987 if (cxl_afu_disable(afu))
988 return -EIO;
989 if (cxl_psl_purge(afu))
990 return -EIO;
991 }
992 cxl_p1n_write(afu, CXL_PSL_SPAP_An, 0x0000000000000000);
993 cxl_p1n_write(afu, CXL_PSL_IVTE_Limit_An, 0x0000000000000000);
994 cxl_p1n_write(afu, CXL_PSL_IVTE_Offset_An, 0x0000000000000000);
995 cxl_p1n_write(afu, CXL_PSL_AMBAR_An, 0x0000000000000000);
996 cxl_p1n_write(afu, CXL_PSL_SPOffset_An, 0x0000000000000000);
997 cxl_p1n_write(afu, CXL_HAURP_An, 0x0000000000000000);
998 cxl_p2n_write(afu, CXL_CSRP_An, 0x0000000000000000);
999 cxl_p2n_write(afu, CXL_AURP1_An, 0x0000000000000000);
1000 cxl_p2n_write(afu, CXL_AURP0_An, 0x0000000000000000);
1001 cxl_p2n_write(afu, CXL_SSTP1_An, 0x0000000000000000);
1002 cxl_p2n_write(afu, CXL_SSTP0_An, 0x0000000000000000);
1003 reg = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
1004 if (reg) {
1005 dev_warn(&afu->dev, "AFU had pending DSISR: %#016llx\n", reg);
1006 if (reg & CXL_PSL_DSISR_TRANS)
1007 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
1008 else
1009 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
1010 }
1011 if (afu->adapter->native->sl_ops->register_serr_irq) {
1012 reg = cxl_p1n_read(afu, CXL_PSL_SERR_An);
1013 if (reg) {
1014 if (reg & ~0xffff)
1015 dev_warn(&afu->dev, "AFU had pending SERR: %#016llx\n", reg);
1016 cxl_p1n_write(afu, CXL_PSL_SERR_An, reg & ~0xffff);
1017 }
1018 }
1019 reg = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
1020 if (reg) {
1021 dev_warn(&afu->dev, "AFU had pending error status: %#016llx\n", reg);
1022 cxl_p2n_write(afu, CXL_PSL_ErrStat_An, reg);
1023 }
1024
1025 return 0;
1026 }
1027
1028 #define ERR_BUFF_MAX_COPY_SIZE PAGE_SIZE
1029
1030
1031
1032
1033
1034
1035 ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
1036 loff_t off, size_t count)
1037 {
1038 loff_t aligned_start, aligned_end;
1039 size_t aligned_length;
1040 void *tbuf;
1041 const void __iomem *ebuf = afu->native->afu_desc_mmio + afu->eb_offset;
1042
1043 if (count == 0 || off < 0 || (size_t)off >= afu->eb_len)
1044 return 0;
1045
1046
1047 count = min((size_t)(afu->eb_len - off), count);
1048 aligned_start = round_down(off, 8);
1049 aligned_end = round_up(off + count, 8);
1050 aligned_length = aligned_end - aligned_start;
1051
1052
1053 if (aligned_length > ERR_BUFF_MAX_COPY_SIZE) {
1054 aligned_length = ERR_BUFF_MAX_COPY_SIZE;
1055 count = ERR_BUFF_MAX_COPY_SIZE - (off & 0x7);
1056 }
1057
1058
1059 tbuf = (void *)__get_free_page(GFP_KERNEL);
1060 if (!tbuf)
1061 return -ENOMEM;
1062
1063
1064 memcpy_fromio(tbuf, ebuf + aligned_start, aligned_length);
1065 memcpy(buf, tbuf + (off & 0x7), count);
1066
1067 free_page((unsigned long)tbuf);
1068
1069 return count;
1070 }
1071
1072 static int pci_configure_afu(struct cxl_afu *afu, struct cxl *adapter, struct pci_dev *dev)
1073 {
1074 int rc;
1075
1076 if ((rc = pci_map_slice_regs(afu, adapter, dev)))
1077 return rc;
1078
1079 if (adapter->native->sl_ops->sanitise_afu_regs) {
1080 rc = adapter->native->sl_ops->sanitise_afu_regs(afu);
1081 if (rc)
1082 goto err1;
1083 }
1084
1085
1086 if ((rc = cxl_ops->afu_reset(afu)))
1087 goto err1;
1088
1089 if (cxl_verbose)
1090 dump_afu_descriptor(afu);
1091
1092 if ((rc = cxl_read_afu_descriptor(afu)))
1093 goto err1;
1094
1095 if ((rc = cxl_afu_descriptor_looks_ok(afu)))
1096 goto err1;
1097
1098 if (adapter->native->sl_ops->afu_regs_init)
1099 if ((rc = adapter->native->sl_ops->afu_regs_init(afu)))
1100 goto err1;
1101
1102 if (adapter->native->sl_ops->register_serr_irq)
1103 if ((rc = adapter->native->sl_ops->register_serr_irq(afu)))
1104 goto err1;
1105
1106 if ((rc = cxl_native_register_psl_irq(afu)))
1107 goto err2;
1108
1109 atomic_set(&afu->configured_state, 0);
1110 return 0;
1111
1112 err2:
1113 if (adapter->native->sl_ops->release_serr_irq)
1114 adapter->native->sl_ops->release_serr_irq(afu);
1115 err1:
1116 pci_unmap_slice_regs(afu);
1117 return rc;
1118 }
1119
1120 static void pci_deconfigure_afu(struct cxl_afu *afu)
1121 {
1122
1123
1124
1125
1126 if (atomic_read(&afu->configured_state) != -1) {
1127 while (atomic_cmpxchg(&afu->configured_state, 0, -1) != -1)
1128 schedule();
1129 }
1130 cxl_native_release_psl_irq(afu);
1131 if (afu->adapter->native->sl_ops->release_serr_irq)
1132 afu->adapter->native->sl_ops->release_serr_irq(afu);
1133 pci_unmap_slice_regs(afu);
1134 }
1135
1136 static int pci_init_afu(struct cxl *adapter, int slice, struct pci_dev *dev)
1137 {
1138 struct cxl_afu *afu;
1139 int rc = -ENOMEM;
1140
1141 afu = cxl_alloc_afu(adapter, slice);
1142 if (!afu)
1143 return -ENOMEM;
1144
1145 afu->native = kzalloc(sizeof(struct cxl_afu_native), GFP_KERNEL);
1146 if (!afu->native)
1147 goto err_free_afu;
1148
1149 mutex_init(&afu->native->spa_mutex);
1150
1151 rc = dev_set_name(&afu->dev, "afu%i.%i", adapter->adapter_num, slice);
1152 if (rc)
1153 goto err_free_native;
1154
1155 rc = pci_configure_afu(afu, adapter, dev);
1156 if (rc)
1157 goto err_free_native;
1158
1159
1160 cxl_debugfs_afu_add(afu);
1161
1162
1163
1164
1165
1166 if ((rc = cxl_register_afu(afu)))
1167 goto err_put1;
1168
1169 if ((rc = cxl_sysfs_afu_add(afu)))
1170 goto err_put1;
1171
1172 adapter->afu[afu->slice] = afu;
1173
1174 if ((rc = cxl_pci_vphb_add(afu)))
1175 dev_info(&afu->dev, "Can't register vPHB\n");
1176
1177 return 0;
1178
1179 err_put1:
1180 pci_deconfigure_afu(afu);
1181 cxl_debugfs_afu_remove(afu);
1182 device_unregister(&afu->dev);
1183 return rc;
1184
1185 err_free_native:
1186 kfree(afu->native);
1187 err_free_afu:
1188 kfree(afu);
1189 return rc;
1190
1191 }
1192
1193 static void cxl_pci_remove_afu(struct cxl_afu *afu)
1194 {
1195 pr_devel("%s\n", __func__);
1196
1197 if (!afu)
1198 return;
1199
1200 cxl_pci_vphb_remove(afu);
1201 cxl_sysfs_afu_remove(afu);
1202 cxl_debugfs_afu_remove(afu);
1203
1204 spin_lock(&afu->adapter->afu_list_lock);
1205 afu->adapter->afu[afu->slice] = NULL;
1206 spin_unlock(&afu->adapter->afu_list_lock);
1207
1208 cxl_context_detach_all(afu);
1209 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1210
1211 pci_deconfigure_afu(afu);
1212 device_unregister(&afu->dev);
1213 }
1214
1215 int cxl_pci_reset(struct cxl *adapter)
1216 {
1217 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1218 int rc;
1219
1220 if (adapter->perst_same_image) {
1221 dev_warn(&dev->dev,
1222 "cxl: refusing to reset/reflash when perst_reloads_same_image is set.\n");
1223 return -EINVAL;
1224 }
1225
1226 dev_info(&dev->dev, "CXL reset\n");
1227
1228
1229
1230
1231 cxl_data_cache_flush(adapter);
1232
1233
1234
1235
1236 if ((rc = pci_set_pcie_reset_state(dev, pcie_warm_reset))) {
1237 dev_err(&dev->dev, "cxl: pcie_warm_reset failed\n");
1238 return rc;
1239 }
1240
1241 return rc;
1242 }
1243
1244 static int cxl_map_adapter_regs(struct cxl *adapter, struct pci_dev *dev)
1245 {
1246 if (pci_request_region(dev, 2, "priv 2 regs"))
1247 goto err1;
1248 if (pci_request_region(dev, 0, "priv 1 regs"))
1249 goto err2;
1250
1251 pr_devel("cxl_map_adapter_regs: p1: %#016llx %#llx, p2: %#016llx %#llx",
1252 p1_base(dev), p1_size(dev), p2_base(dev), p2_size(dev));
1253
1254 if (!(adapter->native->p1_mmio = ioremap(p1_base(dev), p1_size(dev))))
1255 goto err3;
1256
1257 if (!(adapter->native->p2_mmio = ioremap(p2_base(dev), p2_size(dev))))
1258 goto err4;
1259
1260 return 0;
1261
1262 err4:
1263 iounmap(adapter->native->p1_mmio);
1264 adapter->native->p1_mmio = NULL;
1265 err3:
1266 pci_release_region(dev, 0);
1267 err2:
1268 pci_release_region(dev, 2);
1269 err1:
1270 return -ENOMEM;
1271 }
1272
1273 static void cxl_unmap_adapter_regs(struct cxl *adapter)
1274 {
1275 if (adapter->native->p1_mmio) {
1276 iounmap(adapter->native->p1_mmio);
1277 adapter->native->p1_mmio = NULL;
1278 pci_release_region(to_pci_dev(adapter->dev.parent), 2);
1279 }
1280 if (adapter->native->p2_mmio) {
1281 iounmap(adapter->native->p2_mmio);
1282 adapter->native->p2_mmio = NULL;
1283 pci_release_region(to_pci_dev(adapter->dev.parent), 0);
1284 }
1285 }
1286
1287 static int cxl_read_vsec(struct cxl *adapter, struct pci_dev *dev)
1288 {
1289 int vsec;
1290 u32 afu_desc_off, afu_desc_size;
1291 u32 ps_off, ps_size;
1292 u16 vseclen;
1293 u8 image_state;
1294
1295 if (!(vsec = find_cxl_vsec(dev))) {
1296 dev_err(&dev->dev, "ABORTING: CXL VSEC not found!\n");
1297 return -ENODEV;
1298 }
1299
1300 CXL_READ_VSEC_LENGTH(dev, vsec, &vseclen);
1301 if (vseclen < CXL_VSEC_MIN_SIZE) {
1302 dev_err(&dev->dev, "ABORTING: CXL VSEC too short\n");
1303 return -EINVAL;
1304 }
1305
1306 CXL_READ_VSEC_STATUS(dev, vsec, &adapter->vsec_status);
1307 CXL_READ_VSEC_PSL_REVISION(dev, vsec, &adapter->psl_rev);
1308 CXL_READ_VSEC_CAIA_MAJOR(dev, vsec, &adapter->caia_major);
1309 CXL_READ_VSEC_CAIA_MINOR(dev, vsec, &adapter->caia_minor);
1310 CXL_READ_VSEC_BASE_IMAGE(dev, vsec, &adapter->base_image);
1311 CXL_READ_VSEC_IMAGE_STATE(dev, vsec, &image_state);
1312 adapter->user_image_loaded = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1313 adapter->perst_select_user = !!(image_state & CXL_VSEC_USER_IMAGE_LOADED);
1314 adapter->perst_loads_image = !!(image_state & CXL_VSEC_PERST_LOADS_IMAGE);
1315
1316 CXL_READ_VSEC_NAFUS(dev, vsec, &adapter->slices);
1317 CXL_READ_VSEC_AFU_DESC_OFF(dev, vsec, &afu_desc_off);
1318 CXL_READ_VSEC_AFU_DESC_SIZE(dev, vsec, &afu_desc_size);
1319 CXL_READ_VSEC_PS_OFF(dev, vsec, &ps_off);
1320 CXL_READ_VSEC_PS_SIZE(dev, vsec, &ps_size);
1321
1322
1323
1324 adapter->native->ps_off = ps_off * 64 * 1024;
1325 adapter->ps_size = ps_size * 64 * 1024;
1326 adapter->native->afu_desc_off = afu_desc_off * 64 * 1024;
1327 adapter->native->afu_desc_size = afu_desc_size * 64 * 1024;
1328
1329
1330 adapter->user_irqs = pnv_cxl_get_irq_count(dev) - 1 - 2*adapter->slices;
1331
1332 return 0;
1333 }
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343 static void cxl_fixup_malformed_tlp(struct cxl *adapter, struct pci_dev *dev)
1344 {
1345 int aer;
1346 u32 data;
1347
1348 if (adapter->psl_rev & 0xf000)
1349 return;
1350 if (!(aer = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)))
1351 return;
1352 pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &data);
1353 if (data & PCI_ERR_UNC_MALF_TLP)
1354 if (data & PCI_ERR_UNC_INTN)
1355 return;
1356 data |= PCI_ERR_UNC_MALF_TLP;
1357 data |= PCI_ERR_UNC_INTN;
1358 pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, data);
1359 }
1360
1361 static bool cxl_compatible_caia_version(struct cxl *adapter)
1362 {
1363 if (cxl_is_power8() && (adapter->caia_major == 1))
1364 return true;
1365
1366 if (cxl_is_power9() && (adapter->caia_major == 2))
1367 return true;
1368
1369 return false;
1370 }
1371
1372 static int cxl_vsec_looks_ok(struct cxl *adapter, struct pci_dev *dev)
1373 {
1374 if (adapter->vsec_status & CXL_STATUS_SECOND_PORT)
1375 return -EBUSY;
1376
1377 if (adapter->vsec_status & CXL_UNSUPPORTED_FEATURES) {
1378 dev_err(&dev->dev, "ABORTING: CXL requires unsupported features\n");
1379 return -EINVAL;
1380 }
1381
1382 if (!cxl_compatible_caia_version(adapter)) {
1383 dev_info(&dev->dev, "Ignoring card. PSL type is not supported (caia version: %d)\n",
1384 adapter->caia_major);
1385 return -ENODEV;
1386 }
1387
1388 if (!adapter->slices) {
1389
1390
1391 dev_err(&dev->dev, "ABORTING: Device has no AFUs\n");
1392 return -EINVAL;
1393 }
1394
1395 if (!adapter->native->afu_desc_off || !adapter->native->afu_desc_size) {
1396 dev_err(&dev->dev, "ABORTING: VSEC shows no AFU descriptors\n");
1397 return -EINVAL;
1398 }
1399
1400 if (adapter->ps_size > p2_size(dev) - adapter->native->ps_off) {
1401 dev_err(&dev->dev, "ABORTING: Problem state size larger than "
1402 "available in BAR2: 0x%llx > 0x%llx\n",
1403 adapter->ps_size, p2_size(dev) - adapter->native->ps_off);
1404 return -EINVAL;
1405 }
1406
1407 return 0;
1408 }
1409
1410 ssize_t cxl_pci_read_adapter_vpd(struct cxl *adapter, void *buf, size_t len)
1411 {
1412 return pci_read_vpd(to_pci_dev(adapter->dev.parent), 0, len, buf);
1413 }
1414
1415 static void cxl_release_adapter(struct device *dev)
1416 {
1417 struct cxl *adapter = to_cxl_adapter(dev);
1418
1419 pr_devel("cxl_release_adapter\n");
1420
1421 cxl_remove_adapter_nr(adapter);
1422
1423 kfree(adapter->native);
1424 kfree(adapter);
1425 }
1426
1427 #define CXL_PSL_ErrIVTE_tberror (0x1ull << (63-31))
1428
1429 static int sanitise_adapter_regs(struct cxl *adapter)
1430 {
1431 int rc = 0;
1432
1433
1434 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, CXL_PSL_ErrIVTE_tberror);
1435
1436 if (adapter->native->sl_ops->invalidate_all) {
1437
1438 if (cxl_is_power9() && (adapter->perst_loads_image))
1439 return 0;
1440 rc = adapter->native->sl_ops->invalidate_all(adapter);
1441 }
1442
1443 return rc;
1444 }
1445
1446
1447
1448
1449 static int cxl_configure_adapter(struct cxl *adapter, struct pci_dev *dev)
1450 {
1451 int rc;
1452
1453 adapter->dev.parent = &dev->dev;
1454 adapter->dev.release = cxl_release_adapter;
1455 pci_set_drvdata(dev, adapter);
1456
1457 rc = pci_enable_device(dev);
1458 if (rc) {
1459 dev_err(&dev->dev, "pci_enable_device failed: %i\n", rc);
1460 return rc;
1461 }
1462
1463 if ((rc = cxl_read_vsec(adapter, dev)))
1464 return rc;
1465
1466 if ((rc = cxl_vsec_looks_ok(adapter, dev)))
1467 return rc;
1468
1469 cxl_fixup_malformed_tlp(adapter, dev);
1470
1471 if ((rc = setup_cxl_bars(dev)))
1472 return rc;
1473
1474 if ((rc = switch_card_to_cxl(dev)))
1475 return rc;
1476
1477 if ((rc = cxl_update_image_control(adapter)))
1478 return rc;
1479
1480 if ((rc = cxl_map_adapter_regs(adapter, dev)))
1481 return rc;
1482
1483 if ((rc = sanitise_adapter_regs(adapter)))
1484 goto err;
1485
1486 if ((rc = adapter->native->sl_ops->adapter_regs_init(adapter, dev)))
1487 goto err;
1488
1489
1490 pci_set_master(dev);
1491
1492 adapter->tunneled_ops_supported = false;
1493
1494 if (cxl_is_power9()) {
1495 if (pnv_pci_set_tunnel_bar(dev, 0x00020000E0000000ull, 1))
1496 dev_info(&dev->dev, "Tunneled operations unsupported\n");
1497 else
1498 adapter->tunneled_ops_supported = true;
1499 }
1500
1501 if ((rc = pnv_phb_to_cxl_mode(dev, adapter->native->sl_ops->capi_mode)))
1502 goto err;
1503
1504
1505
1506 if ((rc = pnv_phb_to_cxl_mode(dev, OPAL_PHB_CAPI_MODE_SNOOP_ON)))
1507 goto err;
1508
1509
1510 cxl_setup_psl_timebase(adapter, dev);
1511
1512 if ((rc = cxl_native_register_psl_err_irq(adapter)))
1513 goto err;
1514
1515 return 0;
1516
1517 err:
1518 cxl_unmap_adapter_regs(adapter);
1519 return rc;
1520
1521 }
1522
1523 static void cxl_deconfigure_adapter(struct cxl *adapter)
1524 {
1525 struct pci_dev *pdev = to_pci_dev(adapter->dev.parent);
1526
1527 if (cxl_is_power9())
1528 pnv_pci_set_tunnel_bar(pdev, 0x00020000E0000000ull, 0);
1529
1530 cxl_native_release_psl_err_irq(adapter);
1531 cxl_unmap_adapter_regs(adapter);
1532
1533 pci_disable_device(pdev);
1534 }
1535
1536 static void cxl_stop_trace_psl9(struct cxl *adapter)
1537 {
1538 int traceid;
1539 u64 trace_state, trace_mask;
1540 struct pci_dev *dev = to_pci_dev(adapter->dev.parent);
1541
1542
1543 for (traceid = 0; traceid <= CXL_PSL9_TRACEID_MAX; ++traceid) {
1544 trace_state = cxl_p1_read(adapter, CXL_PSL9_CTCCFG);
1545 trace_mask = (0x3ULL << (62 - traceid * 2));
1546 trace_state = (trace_state & trace_mask) >> (62 - traceid * 2);
1547 dev_dbg(&dev->dev, "cxl: Traceid-%d trace_state=0x%0llX\n",
1548 traceid, trace_state);
1549
1550
1551 if (trace_state != CXL_PSL9_TRACESTATE_FIN)
1552 cxl_p1_write(adapter, CXL_PSL9_TRACECFG,
1553 0x8400000000000000ULL | traceid);
1554 }
1555 }
1556
1557 static void cxl_stop_trace_psl8(struct cxl *adapter)
1558 {
1559 int slice;
1560
1561
1562 cxl_p1_write(adapter, CXL_PSL_TRACE, 0x8000000000000017LL);
1563
1564
1565 spin_lock(&adapter->afu_list_lock);
1566 for (slice = 0; slice < adapter->slices; slice++) {
1567 if (adapter->afu[slice])
1568 cxl_p1n_write(adapter->afu[slice], CXL_PSL_SLICE_TRACE,
1569 0x8000000000000000LL);
1570 }
1571 spin_unlock(&adapter->afu_list_lock);
1572 }
1573
1574 static const struct cxl_service_layer_ops psl9_ops = {
1575 .adapter_regs_init = init_implementation_adapter_regs_psl9,
1576 .invalidate_all = cxl_invalidate_all_psl9,
1577 .afu_regs_init = init_implementation_afu_regs_psl9,
1578 .sanitise_afu_regs = sanitise_afu_regs_psl9,
1579 .register_serr_irq = cxl_native_register_serr_irq,
1580 .release_serr_irq = cxl_native_release_serr_irq,
1581 .handle_interrupt = cxl_irq_psl9,
1582 .fail_irq = cxl_fail_irq_psl,
1583 .activate_dedicated_process = cxl_activate_dedicated_process_psl9,
1584 .attach_afu_directed = cxl_attach_afu_directed_psl9,
1585 .attach_dedicated_process = cxl_attach_dedicated_process_psl9,
1586 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl9,
1587 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl9,
1588 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl9,
1589 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl9,
1590 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl9,
1591 .debugfs_stop_trace = cxl_stop_trace_psl9,
1592 .timebase_read = timebase_read_psl9,
1593 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1594 .needs_reset_before_disable = true,
1595 };
1596
1597 static const struct cxl_service_layer_ops psl8_ops = {
1598 .adapter_regs_init = init_implementation_adapter_regs_psl8,
1599 .invalidate_all = cxl_invalidate_all_psl8,
1600 .afu_regs_init = init_implementation_afu_regs_psl8,
1601 .sanitise_afu_regs = sanitise_afu_regs_psl8,
1602 .register_serr_irq = cxl_native_register_serr_irq,
1603 .release_serr_irq = cxl_native_release_serr_irq,
1604 .handle_interrupt = cxl_irq_psl8,
1605 .fail_irq = cxl_fail_irq_psl,
1606 .activate_dedicated_process = cxl_activate_dedicated_process_psl8,
1607 .attach_afu_directed = cxl_attach_afu_directed_psl8,
1608 .attach_dedicated_process = cxl_attach_dedicated_process_psl8,
1609 .update_dedicated_ivtes = cxl_update_dedicated_ivtes_psl8,
1610 .debugfs_add_adapter_regs = cxl_debugfs_add_adapter_regs_psl8,
1611 .debugfs_add_afu_regs = cxl_debugfs_add_afu_regs_psl8,
1612 .psl_irq_dump_registers = cxl_native_irq_dump_regs_psl8,
1613 .err_irq_dump_registers = cxl_native_err_irq_dump_regs_psl8,
1614 .debugfs_stop_trace = cxl_stop_trace_psl8,
1615 .write_timebase_ctrl = write_timebase_ctrl_psl8,
1616 .timebase_read = timebase_read_psl8,
1617 .capi_mode = OPAL_PHB_CAPI_MODE_CAPI,
1618 .needs_reset_before_disable = true,
1619 };
1620
1621 static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev)
1622 {
1623 if (cxl_is_power8()) {
1624 dev_info(&dev->dev, "Device uses a PSL8\n");
1625 adapter->native->sl_ops = &psl8_ops;
1626 } else {
1627 dev_info(&dev->dev, "Device uses a PSL9\n");
1628 adapter->native->sl_ops = &psl9_ops;
1629 }
1630 }
1631
1632
1633 static struct cxl *cxl_pci_init_adapter(struct pci_dev *dev)
1634 {
1635 struct cxl *adapter;
1636 int rc;
1637
1638 adapter = cxl_alloc_adapter();
1639 if (!adapter)
1640 return ERR_PTR(-ENOMEM);
1641
1642 adapter->native = kzalloc(sizeof(struct cxl_native), GFP_KERNEL);
1643 if (!adapter->native) {
1644 rc = -ENOMEM;
1645 goto err_release;
1646 }
1647
1648 set_sl_ops(adapter, dev);
1649
1650
1651
1652
1653 adapter->perst_loads_image = true;
1654 adapter->perst_same_image = false;
1655
1656 rc = cxl_configure_adapter(adapter, dev);
1657 if (rc) {
1658 pci_disable_device(dev);
1659 goto err_release;
1660 }
1661
1662
1663 cxl_debugfs_adapter_add(adapter);
1664
1665
1666
1667
1668
1669 if ((rc = cxl_register_adapter(adapter)))
1670 goto err_put1;
1671
1672 if ((rc = cxl_sysfs_adapter_add(adapter)))
1673 goto err_put1;
1674
1675
1676 cxl_adapter_context_unlock(adapter);
1677
1678 return adapter;
1679
1680 err_put1:
1681
1682
1683
1684 cxl_debugfs_adapter_remove(adapter);
1685 cxl_deconfigure_adapter(adapter);
1686 device_unregister(&adapter->dev);
1687 return ERR_PTR(rc);
1688
1689 err_release:
1690 cxl_release_adapter(&adapter->dev);
1691 return ERR_PTR(rc);
1692 }
1693
1694 static void cxl_pci_remove_adapter(struct cxl *adapter)
1695 {
1696 pr_devel("cxl_remove_adapter\n");
1697
1698 cxl_sysfs_adapter_remove(adapter);
1699 cxl_debugfs_adapter_remove(adapter);
1700
1701
1702
1703
1704 cxl_data_cache_flush(adapter);
1705
1706 cxl_deconfigure_adapter(adapter);
1707
1708 device_unregister(&adapter->dev);
1709 }
1710
1711 #define CXL_MAX_PCIEX_PARENT 2
1712
1713 int cxl_slot_is_switched(struct pci_dev *dev)
1714 {
1715 struct device_node *np;
1716 int depth = 0;
1717
1718 if (!(np = pci_device_to_OF_node(dev))) {
1719 pr_err("cxl: np = NULL\n");
1720 return -ENODEV;
1721 }
1722 of_node_get(np);
1723 while (np) {
1724 np = of_get_next_parent(np);
1725 if (!of_node_is_type(np, "pciex"))
1726 break;
1727 depth++;
1728 }
1729 of_node_put(np);
1730 return (depth > CXL_MAX_PCIEX_PARENT);
1731 }
1732
1733 static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
1734 {
1735 struct cxl *adapter;
1736 int slice;
1737 int rc;
1738
1739 if (cxl_pci_is_vphb_device(dev)) {
1740 dev_dbg(&dev->dev, "cxl_init_adapter: Ignoring cxl vphb device\n");
1741 return -ENODEV;
1742 }
1743
1744 if (cxl_slot_is_switched(dev)) {
1745 dev_info(&dev->dev, "Ignoring card on incompatible PCI slot\n");
1746 return -ENODEV;
1747 }
1748
1749 if (cxl_is_power9() && !radix_enabled()) {
1750 dev_info(&dev->dev, "Only Radix mode supported\n");
1751 return -ENODEV;
1752 }
1753
1754 if (cxl_verbose)
1755 dump_cxl_config_space(dev);
1756
1757 adapter = cxl_pci_init_adapter(dev);
1758 if (IS_ERR(adapter)) {
1759 dev_err(&dev->dev, "cxl_init_adapter failed: %li\n", PTR_ERR(adapter));
1760 return PTR_ERR(adapter);
1761 }
1762
1763 for (slice = 0; slice < adapter->slices; slice++) {
1764 if ((rc = pci_init_afu(adapter, slice, dev))) {
1765 dev_err(&dev->dev, "AFU %i failed to initialise: %i\n", slice, rc);
1766 continue;
1767 }
1768
1769 rc = cxl_afu_select_best_mode(adapter->afu[slice]);
1770 if (rc)
1771 dev_err(&dev->dev, "AFU %i failed to start: %i\n", slice, rc);
1772 }
1773
1774 return 0;
1775 }
1776
1777 static void cxl_remove(struct pci_dev *dev)
1778 {
1779 struct cxl *adapter = pci_get_drvdata(dev);
1780 struct cxl_afu *afu;
1781 int i;
1782
1783
1784
1785
1786
1787 for (i = 0; i < adapter->slices; i++) {
1788 afu = adapter->afu[i];
1789 cxl_pci_remove_afu(afu);
1790 }
1791 cxl_pci_remove_adapter(adapter);
1792 }
1793
1794 static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
1795 pci_channel_state_t state)
1796 {
1797 struct pci_dev *afu_dev;
1798 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1799 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1800
1801
1802
1803
1804 if (afu == NULL || afu->phb == NULL)
1805 return result;
1806
1807 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
1808 if (!afu_dev->driver)
1809 continue;
1810
1811 afu_dev->error_state = state;
1812
1813 if (afu_dev->driver->err_handler)
1814 afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
1815 state);
1816
1817 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1818 result = PCI_ERS_RESULT_DISCONNECT;
1819 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1820 (result == PCI_ERS_RESULT_NEED_RESET))
1821 result = PCI_ERS_RESULT_NONE;
1822 }
1823 return result;
1824 }
1825
1826 static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev,
1827 pci_channel_state_t state)
1828 {
1829 struct cxl *adapter = pci_get_drvdata(pdev);
1830 struct cxl_afu *afu;
1831 pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
1832 pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
1833 int i;
1834
1835
1836
1837
1838
1839 schedule();
1840
1841
1842 if (state == pci_channel_io_perm_failure) {
1843 spin_lock(&adapter->afu_list_lock);
1844 for (i = 0; i < adapter->slices; i++) {
1845 afu = adapter->afu[i];
1846
1847
1848
1849
1850 cxl_vphb_error_detected(afu, state);
1851 }
1852 spin_unlock(&adapter->afu_list_lock);
1853 return PCI_ERS_RESULT_DISCONNECT;
1854 }
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878 if (adapter->perst_loads_image && !adapter->perst_same_image) {
1879
1880 dev_info(&pdev->dev, "reflashing, so opting out of EEH!\n");
1881 return PCI_ERS_RESULT_NONE;
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936 spin_lock(&adapter->afu_list_lock);
1937
1938 for (i = 0; i < adapter->slices; i++) {
1939 afu = adapter->afu[i];
1940
1941 if (afu == NULL)
1942 continue;
1943
1944 afu_result = cxl_vphb_error_detected(afu, state);
1945 cxl_context_detach_all(afu);
1946 cxl_ops->afu_deactivate_mode(afu, afu->current_mode);
1947 pci_deconfigure_afu(afu);
1948
1949
1950 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
1951 result = PCI_ERS_RESULT_DISCONNECT;
1952 else if ((afu_result == PCI_ERS_RESULT_NONE) &&
1953 (result == PCI_ERS_RESULT_NEED_RESET))
1954 result = PCI_ERS_RESULT_NONE;
1955 }
1956 spin_unlock(&adapter->afu_list_lock);
1957
1958
1959 if (cxl_adapter_context_lock(adapter) != 0)
1960 dev_warn(&adapter->dev,
1961 "Couldn't take context lock with %d active-contexts\n",
1962 atomic_read(&adapter->contexts_num));
1963
1964 cxl_deconfigure_adapter(adapter);
1965
1966 return result;
1967 }
1968
1969 static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
1970 {
1971 struct cxl *adapter = pci_get_drvdata(pdev);
1972 struct cxl_afu *afu;
1973 struct cxl_context *ctx;
1974 struct pci_dev *afu_dev;
1975 pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
1976 pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
1977 int i;
1978
1979 if (cxl_configure_adapter(adapter, pdev))
1980 goto err;
1981
1982
1983
1984
1985
1986
1987 cxl_adapter_context_unlock(adapter);
1988
1989 spin_lock(&adapter->afu_list_lock);
1990 for (i = 0; i < adapter->slices; i++) {
1991 afu = adapter->afu[i];
1992
1993 if (afu == NULL)
1994 continue;
1995
1996 if (pci_configure_afu(afu, adapter, pdev))
1997 goto err_unlock;
1998
1999 if (cxl_afu_select_best_mode(afu))
2000 goto err_unlock;
2001
2002 if (afu->phb == NULL)
2003 continue;
2004
2005 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2006
2007
2008
2009 ctx = cxl_get_context(afu_dev);
2010
2011 if (ctx && cxl_release_context(ctx))
2012 goto err_unlock;
2013
2014 ctx = cxl_dev_context_init(afu_dev);
2015 if (IS_ERR(ctx))
2016 goto err_unlock;
2017
2018 afu_dev->dev.archdata.cxl_ctx = ctx;
2019
2020 if (cxl_ops->afu_check_and_enable(afu))
2021 goto err_unlock;
2022
2023 afu_dev->error_state = pci_channel_io_normal;
2024
2025
2026
2027
2028
2029
2030
2031 if (!afu_dev->driver)
2032 continue;
2033
2034 if (afu_dev->driver->err_handler &&
2035 afu_dev->driver->err_handler->slot_reset)
2036 afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
2037
2038 if (afu_result == PCI_ERS_RESULT_DISCONNECT)
2039 result = PCI_ERS_RESULT_DISCONNECT;
2040 }
2041 }
2042
2043 spin_unlock(&adapter->afu_list_lock);
2044 return result;
2045
2046 err_unlock:
2047 spin_unlock(&adapter->afu_list_lock);
2048
2049 err:
2050
2051
2052
2053
2054 dev_err(&pdev->dev, "EEH recovery failed. Asking to be disconnected.\n");
2055 return PCI_ERS_RESULT_DISCONNECT;
2056 }
2057
2058 static void cxl_pci_resume(struct pci_dev *pdev)
2059 {
2060 struct cxl *adapter = pci_get_drvdata(pdev);
2061 struct cxl_afu *afu;
2062 struct pci_dev *afu_dev;
2063 int i;
2064
2065
2066
2067
2068
2069 spin_lock(&adapter->afu_list_lock);
2070 for (i = 0; i < adapter->slices; i++) {
2071 afu = adapter->afu[i];
2072
2073 if (afu == NULL || afu->phb == NULL)
2074 continue;
2075
2076 list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
2077 if (afu_dev->driver && afu_dev->driver->err_handler &&
2078 afu_dev->driver->err_handler->resume)
2079 afu_dev->driver->err_handler->resume(afu_dev);
2080 }
2081 }
2082 spin_unlock(&adapter->afu_list_lock);
2083 }
2084
2085 static const struct pci_error_handlers cxl_err_handler = {
2086 .error_detected = cxl_pci_error_detected,
2087 .slot_reset = cxl_pci_slot_reset,
2088 .resume = cxl_pci_resume,
2089 };
2090
2091 struct pci_driver cxl_pci_driver = {
2092 .name = "cxl-pci",
2093 .id_table = cxl_pci_tbl,
2094 .probe = cxl_probe,
2095 .remove = cxl_remove,
2096 .shutdown = cxl_remove,
2097 .err_handler = &cxl_err_handler,
2098 };