This source file includes following definitions.
- get_skx_dev
- get_all_munits
- skx_check_ecc
- skx_get_dimm_config
- skx_sad_decode
- skx_do_interleave
- skx_tad_decode
- skx_rir_decode
- skx_bits
- skx_bank_bits
- skx_mad_decode
- skx_decode
- debugfs_u64_set
- setup_skx_debug
- teardown_skx_debug
- setup_skx_debug
- teardown_skx_debug
- skx_init
- skx_exit
1
2
3
4
5
6
7 #include <linux/kernel.h>
8 #include <linux/processor.h>
9 #include <asm/cpu_device_id.h>
10 #include <asm/intel-family.h>
11 #include <asm/mce.h>
12
13 #include "edac_module.h"
14 #include "skx_common.h"
15
16 #define EDAC_MOD_STR "skx_edac"
17
18
19
20
21 #define skx_printk(level, fmt, arg...) \
22 edac_printk(level, "skx", fmt, ##arg)
23
24 #define skx_mc_printk(mci, level, fmt, arg...) \
25 edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
26
27 static struct list_head *skx_edac_list;
28
29 static u64 skx_tolm, skx_tohm;
30 static int skx_num_sockets;
31 static unsigned int nvdimm_count;
32
33 #define MASK26 0x3FFFFFF
34 #define MASK29 0x1FFFFFFF
35
36 static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
37 {
38 struct skx_dev *d;
39
40 list_for_each_entry(d, skx_edac_list, list) {
41 if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
42 return d;
43 }
44
45 return NULL;
46 }
47
48 enum munittype {
49 CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD
50 };
51
52 struct munit {
53 u16 did;
54 u16 devfn[SKX_NUM_IMC];
55 u8 busidx;
56 u8 per_socket;
57 enum munittype mtype;
58 };
59
60
61
62
63
64
65 static const struct munit skx_all_munits[] = {
66 { 0x2054, { }, 1, 1, SAD_ALL },
67 { 0x2055, { }, 1, 1, UTIL_ALL },
68 { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
69 { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
70 { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
71 { 0x208e, { }, 1, 0, SAD },
72 { }
73 };
74
75 static int get_all_munits(const struct munit *m)
76 {
77 struct pci_dev *pdev, *prev;
78 struct skx_dev *d;
79 u32 reg;
80 int i = 0, ndev = 0;
81
82 prev = NULL;
83 for (;;) {
84 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
85 if (!pdev)
86 break;
87 ndev++;
88 if (m->per_socket == SKX_NUM_IMC) {
89 for (i = 0; i < SKX_NUM_IMC; i++)
90 if (m->devfn[i] == pdev->devfn)
91 break;
92 if (i == SKX_NUM_IMC)
93 goto fail;
94 }
95 d = get_skx_dev(pdev->bus, m->busidx);
96 if (!d)
97 goto fail;
98
99
100 if (unlikely(pci_enable_device(pdev) < 0)) {
101 skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n",
102 PCI_VENDOR_ID_INTEL, m->did);
103 goto fail;
104 }
105
106 switch (m->mtype) {
107 case CHAN0: case CHAN1: case CHAN2:
108 pci_dev_get(pdev);
109 d->imc[i].chan[m->mtype].cdev = pdev;
110 break;
111 case SAD_ALL:
112 pci_dev_get(pdev);
113 d->sad_all = pdev;
114 break;
115 case UTIL_ALL:
116 pci_dev_get(pdev);
117 d->util_all = pdev;
118 break;
119 case SAD:
120
121
122
123
124
125
126 pci_read_config_dword(pdev, 0xB4, ®);
127 if (reg != 0) {
128 if (d->mcroute == 0) {
129 d->mcroute = reg;
130 } else if (d->mcroute != reg) {
131 skx_printk(KERN_ERR, "mcroute mismatch\n");
132 goto fail;
133 }
134 }
135 ndev--;
136 break;
137 }
138
139 prev = pdev;
140 }
141
142 return ndev;
143 fail:
144 pci_dev_put(pdev);
145 return -ENODEV;
146 }
147
148 static const struct x86_cpu_id skx_cpuids[] = {
149 { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X, 0, 0 },
150 { }
151 };
152 MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
153
154 static bool skx_check_ecc(u32 mcmtr)
155 {
156 return !!GET_BITFIELD(mcmtr, 2, 2);
157 }
158
159 static int skx_get_dimm_config(struct mem_ctl_info *mci)
160 {
161 struct skx_pvt *pvt = mci->pvt_info;
162 u32 mtr, mcmtr, amap, mcddrtcfg;
163 struct skx_imc *imc = pvt->imc;
164 struct dimm_info *dimm;
165 int i, j;
166 int ndimms;
167
168
169 pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
170
171 for (i = 0; i < SKX_NUM_CHANNELS; i++) {
172 ndimms = 0;
173 pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
174 pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
175 for (j = 0; j < SKX_NUM_DIMMS; j++) {
176 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
177 mci->n_layers, i, j, 0);
178 pci_read_config_dword(imc->chan[i].cdev,
179 0x80 + 4 * j, &mtr);
180 if (IS_DIMM_PRESENT(mtr)) {
181 ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
182 } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
183 ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
184 EDAC_MOD_STR);
185 nvdimm_count++;
186 }
187 }
188 if (ndimms && !skx_check_ecc(mcmtr)) {
189 skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
190 return -ENODEV;
191 }
192 }
193
194 return 0;
195 }
196
197 #define SKX_MAX_SAD 24
198
199 #define SKX_GET_SAD(d, i, reg) \
200 pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &(reg))
201 #define SKX_GET_ILV(d, i, reg) \
202 pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &(reg))
203
204 #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
205 #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
206 #define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
207 #define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
208 #define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
209 #define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
210 #define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
211
212 #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
213 #define SKX_ILV_TARGET(tgt) ((tgt) & 7)
214
215 static bool skx_sad_decode(struct decoded_addr *res)
216 {
217 struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list);
218 u64 addr = res->addr;
219 int i, idx, tgt, lchan, shift;
220 u32 sad, ilv;
221 u64 limit, prev_limit;
222 int remote = 0;
223
224
225 if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
226 edac_dbg(0, "Address 0x%llx out of range\n", addr);
227 return false;
228 }
229
230 restart:
231 prev_limit = 0;
232 for (i = 0; i < SKX_MAX_SAD; i++) {
233 SKX_GET_SAD(d, i, sad);
234 limit = SKX_SAD_LIMIT(sad);
235 if (SKX_SAD_ENABLE(sad)) {
236 if (addr >= prev_limit && addr <= limit)
237 goto sad_found;
238 }
239 prev_limit = limit + 1;
240 }
241 edac_dbg(0, "No SAD entry for 0x%llx\n", addr);
242 return false;
243
244 sad_found:
245 SKX_GET_ILV(d, i, ilv);
246
247 switch (SKX_SAD_INTERLEAVE(sad)) {
248 case 0:
249 idx = GET_BITFIELD(addr, 6, 8);
250 break;
251 case 1:
252 idx = GET_BITFIELD(addr, 8, 10);
253 break;
254 case 2:
255 idx = GET_BITFIELD(addr, 12, 14);
256 break;
257 case 3:
258 idx = GET_BITFIELD(addr, 30, 32);
259 break;
260 }
261
262 tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
263
264
265 if (SKX_ILV_REMOTE(tgt)) {
266 if (remote) {
267 edac_dbg(0, "Double remote!\n");
268 return false;
269 }
270 remote = 1;
271 list_for_each_entry(d, skx_edac_list, list) {
272 if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
273 goto restart;
274 }
275 edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
276 return false;
277 }
278
279 if (SKX_SAD_MOD3(sad) == 0) {
280 lchan = SKX_ILV_TARGET(tgt);
281 } else {
282 switch (SKX_SAD_MOD3MODE(sad)) {
283 case 0:
284 shift = 6;
285 break;
286 case 1:
287 shift = 8;
288 break;
289 case 2:
290 shift = 12;
291 break;
292 default:
293 edac_dbg(0, "illegal mod3mode\n");
294 return false;
295 }
296 switch (SKX_SAD_MOD3ASMOD2(sad)) {
297 case 0:
298 lchan = (addr >> shift) % 3;
299 break;
300 case 1:
301 lchan = (addr >> shift) % 2;
302 break;
303 case 2:
304 lchan = (addr >> shift) % 2;
305 lchan = (lchan << 1) | !lchan;
306 break;
307 case 3:
308 lchan = ((addr >> shift) % 2) << 1;
309 break;
310 }
311 lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
312 }
313
314 res->dev = d;
315 res->socket = d->imc[0].src_id;
316 res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
317 res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
318
319 edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n",
320 res->addr, res->socket, res->imc, res->channel);
321 return true;
322 }
323
324 #define SKX_MAX_TAD 8
325
326 #define SKX_GET_TADBASE(d, mc, i, reg) \
327 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &(reg))
328 #define SKX_GET_TADWAYNESS(d, mc, i, reg) \
329 pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &(reg))
330 #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
331 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &(reg))
332
333 #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
334 #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
335 #define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
336 #define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
337 #define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
338 #define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
339 #define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
340
341
342 static int skx_granularity[] = { 6, 8, 12, 30 };
343
344 static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
345 {
346 addr >>= shift;
347 addr /= ways;
348 addr <<= shift;
349
350 return addr | (lowbits & ((1ull << shift) - 1));
351 }
352
353 static bool skx_tad_decode(struct decoded_addr *res)
354 {
355 int i;
356 u32 base, wayness, chnilvoffset;
357 int skt_interleave_bit, chn_interleave_bit;
358 u64 channel_addr;
359
360 for (i = 0; i < SKX_MAX_TAD; i++) {
361 SKX_GET_TADBASE(res->dev, res->imc, i, base);
362 SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
363 if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
364 goto tad_found;
365 }
366 edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr);
367 return false;
368
369 tad_found:
370 res->sktways = SKX_TAD_SKTWAYS(wayness);
371 res->chanways = SKX_TAD_CHNWAYS(wayness);
372 skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
373 chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
374
375 SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
376 channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
377
378 if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
379
380 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
381 res->chanways, channel_addr);
382 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
383 res->sktways, channel_addr);
384 } else {
385
386 channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
387 res->sktways, res->addr);
388 channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
389 res->chanways, res->addr);
390 }
391
392 res->chan_addr = channel_addr;
393
394 edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n",
395 res->addr, res->chan_addr, res->sktways, res->chanways);
396 return true;
397 }
398
399 #define SKX_MAX_RIR 4
400
401 #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
402 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
403 0x108 + 4 * (i), &(reg))
404 #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
405 pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
406 0x120 + 16 * (idx) + 4 * (i), &(reg))
407
408 #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
409 #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
410 #define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
411 #define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
412 #define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
413
414 static bool skx_rir_decode(struct decoded_addr *res)
415 {
416 int i, idx, chan_rank;
417 int shift;
418 u32 rirway, rirlv;
419 u64 rank_addr, prev_limit = 0, limit;
420
421 if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
422 shift = 6;
423 else
424 shift = 13;
425
426 for (i = 0; i < SKX_MAX_RIR; i++) {
427 SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
428 limit = SKX_RIR_LIMIT(rirway);
429 if (SKX_RIR_VALID(rirway)) {
430 if (prev_limit <= res->chan_addr &&
431 res->chan_addr <= limit)
432 goto rir_found;
433 }
434 prev_limit = limit;
435 }
436 edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr);
437 return false;
438
439 rir_found:
440 rank_addr = res->chan_addr >> shift;
441 rank_addr /= SKX_RIR_WAYS(rirway);
442 rank_addr <<= shift;
443 rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
444
445 res->rank_address = rank_addr;
446 idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
447
448 SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
449 res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
450 chan_rank = SKX_RIR_CHAN_RANK(rirlv);
451 res->channel_rank = chan_rank;
452 res->dimm = chan_rank / 4;
453 res->rank = chan_rank % 4;
454
455 edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n",
456 res->addr, res->dimm, res->rank,
457 res->channel_rank, res->rank_address);
458 return true;
459 }
460
461 static u8 skx_close_row[] = {
462 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
463 };
464
465 static u8 skx_close_column[] = {
466 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
467 };
468
469 static u8 skx_open_row[] = {
470 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
471 };
472
473 static u8 skx_open_column[] = {
474 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
475 };
476
477 static u8 skx_open_fine_column[] = {
478 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
479 };
480
481 static int skx_bits(u64 addr, int nbits, u8 *bits)
482 {
483 int i, res = 0;
484
485 for (i = 0; i < nbits; i++)
486 res |= ((addr >> bits[i]) & 1) << i;
487 return res;
488 }
489
490 static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
491 {
492 int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
493
494 if (do_xor)
495 ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
496
497 return ret;
498 }
499
500 static bool skx_mad_decode(struct decoded_addr *r)
501 {
502 struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
503 int bg0 = dimm->fine_grain_bank ? 6 : 13;
504
505 if (dimm->close_pg) {
506 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
507 r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
508 r->column |= 0x400;
509 r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
510 r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
511 } else {
512 r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
513 if (dimm->fine_grain_bank)
514 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
515 else
516 r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
517 r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
518 r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
519 }
520 r->row &= (1u << dimm->rowbits) - 1;
521
522 edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n",
523 r->addr, r->row, r->column, r->bank_address,
524 r->bank_group);
525 return true;
526 }
527
528 static bool skx_decode(struct decoded_addr *res)
529 {
530 return skx_sad_decode(res) && skx_tad_decode(res) &&
531 skx_rir_decode(res) && skx_mad_decode(res);
532 }
533
534 static struct notifier_block skx_mce_dec = {
535 .notifier_call = skx_mce_check_error,
536 .priority = MCE_PRIO_EDAC,
537 };
538
539 #ifdef CONFIG_EDAC_DEBUG
540
541
542
543
544
545 static struct dentry *skx_test;
546
547 static int debugfs_u64_set(void *data, u64 val)
548 {
549 struct mce m;
550
551 pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
552
553 memset(&m, 0, sizeof(m));
554
555 m.status = MCI_STATUS_ADDRV + 0x90;
556
557 m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
558 m.addr = val;
559 skx_mce_check_error(NULL, 0, &m);
560
561 return 0;
562 }
563 DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
564
565 static void setup_skx_debug(void)
566 {
567 skx_test = edac_debugfs_create_dir("skx_test");
568 if (!skx_test)
569 return;
570
571 if (!edac_debugfs_create_file("addr", 0200, skx_test,
572 NULL, &fops_u64_wo)) {
573 debugfs_remove(skx_test);
574 skx_test = NULL;
575 }
576 }
577
578 static void teardown_skx_debug(void)
579 {
580 debugfs_remove_recursive(skx_test);
581 }
582 #else
583 static inline void setup_skx_debug(void) {}
584 static inline void teardown_skx_debug(void) {}
585 #endif
586
587
588
589
590
591
592
593 static int __init skx_init(void)
594 {
595 const struct x86_cpu_id *id;
596 const struct munit *m;
597 const char *owner;
598 int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
599 u8 mc = 0, src_id, node_id;
600 struct skx_dev *d;
601
602 edac_dbg(2, "\n");
603
604 owner = edac_get_owner();
605 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
606 return -EBUSY;
607
608 id = x86_match_cpu(skx_cpuids);
609 if (!id)
610 return -ENODEV;
611
612 rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
613 if (rc)
614 return rc;
615
616 rc = skx_get_all_bus_mappings(0x2016, 0xcc, SKX, &skx_edac_list);
617 if (rc < 0)
618 goto fail;
619 if (rc == 0) {
620 edac_dbg(2, "No memory controllers found\n");
621 return -ENODEV;
622 }
623 skx_num_sockets = rc;
624
625 for (m = skx_all_munits; m->did; m++) {
626 rc = get_all_munits(m);
627 if (rc < 0)
628 goto fail;
629 if (rc != m->per_socket * skx_num_sockets) {
630 edac_dbg(2, "Expected %d, got %d of 0x%x\n",
631 m->per_socket * skx_num_sockets, rc, m->did);
632 rc = -ENODEV;
633 goto fail;
634 }
635 }
636
637 list_for_each_entry(d, skx_edac_list, list) {
638 rc = skx_get_src_id(d, 0xf0, &src_id);
639 if (rc < 0)
640 goto fail;
641 rc = skx_get_node_id(d, &node_id);
642 if (rc < 0)
643 goto fail;
644 edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
645 for (i = 0; i < SKX_NUM_IMC; i++) {
646 d->imc[i].mc = mc++;
647 d->imc[i].lmc = i;
648 d->imc[i].src_id = src_id;
649 d->imc[i].node_id = node_id;
650 rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
651 "Skylake Socket", EDAC_MOD_STR,
652 skx_get_dimm_config);
653 if (rc < 0)
654 goto fail;
655 }
656 }
657
658 skx_set_decode(skx_decode);
659
660 if (nvdimm_count && skx_adxl_get() == -ENODEV)
661 skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
662
663
664 opstate_init();
665
666 setup_skx_debug();
667
668 mce_register_decode_chain(&skx_mce_dec);
669
670 return 0;
671 fail:
672 skx_remove();
673 return rc;
674 }
675
676 static void __exit skx_exit(void)
677 {
678 edac_dbg(2, "\n");
679 mce_unregister_decode_chain(&skx_mce_dec);
680 teardown_skx_debug();
681 if (nvdimm_count)
682 skx_adxl_put();
683 skx_remove();
684 }
685
686 module_init(skx_init);
687 module_exit(skx_exit);
688
689 module_param(edac_op_state, int, 0444);
690 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
691
692 MODULE_LICENSE("GPL v2");
693 MODULE_AUTHOR("Tony Luck");
694 MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");