This source file includes following definitions.
- __amd64_read_pci_cfg_dword
- __amd64_write_pci_cfg_dword
- f15h_select_dct
- amd64_read_dct_pci_cfg
- __f17h_set_scrubval
- __set_scrub_rate
- set_scrub_rate
- get_scrub_rate
- base_limit_match
- find_mc_by_sys_addr
- get_cs_base_and_mask
- input_addr_to_csrow
- amd64_get_dram_hole_info
- sys_addr_to_dram_addr
- num_node_interleave_bits
- dram_addr_to_input_addr
- sys_addr_to_input_addr
- error_address_to_page_and_offset
- sys_addr_to_csrow
- determine_edac_cap
- debug_dump_dramcfg_low
- f17_get_cs_mode
- debug_display_dimm_sizes_df
- __dump_misc_regs_df
- __dump_misc_regs
- dump_misc_regs
- prep_chip_selects
- read_umc_base_mask
- read_dct_base_mask
- determine_memory_type
- k8_early_channel_count
- get_error_address
- pci_get_related_function
- read_dram_base_limit_regs
- k8_map_sysaddr_to_csrow
- ddr2_cs_size
- k8_dbam_to_chip_select
- f1x_early_channel_count
- f17_early_channel_count
- ddr3_cs_size
- ddr3_lrdimm_cs_size
- ddr4_cs_size
- f10_dbam_to_chip_select
- f15_dbam_to_chip_select
- f15_m60h_dbam_to_chip_select
- f16_dbam_to_chip_select
- f17_addr_mask_to_cs_size
- read_dram_ctl_register
- f15_m30h_determine_channel
- f1x_determine_channel
- f1x_get_norm_dct_addr
- f10_process_possible_spare
- f1x_lookup_addr_in_dct
- f1x_swap_interleaved_region
- f1x_match_to_this_node
- f15_m30h_match_to_this_node
- f1x_translate_sysaddr_to_cs
- f1x_map_sysaddr_to_csrow
- debug_display_dimm_sizes
- decode_syndrome
- map_err_sym_to_channel
- get_channel_from_ecc_syndrome
- __log_ecc_error
- decode_bus_error
- find_umc_channel
- decode_umc_error
- reserve_mc_sibling_devs
- free_mc_sibling_devs
- determine_ecc_sym_sz
- __read_mc_regs_df
- read_mc_regs
- get_csrow_nr_pages
- init_csrows_df
- init_csrows
- get_cpus_on_this_dct_cpumask
- nb_mce_bank_enabled_on_node
- toggle_ecc_err_reporting
- enable_ecc_error_reporting
- restore_ecc_error_reporting
- ecc_enabled
- f17h_determine_edac_ctl_cap
- setup_mci_misc_attrs
- per_family_init
- compute_num_umcs
- init_one_instance
- probe_one_instance
- remove_one_instance
- setup_pci_device
- amd64_edac_init
- amd64_edac_exit
1
2 #include "amd64_edac.h"
3 #include <asm/amd_nb.h>
4
5 static struct edac_pci_ctl_info *pci_ctl;
6
7 static int report_gart_errors;
8 module_param(report_gart_errors, int, 0644);
9
10
11
12
13
14 static int ecc_enable_override;
15 module_param(ecc_enable_override, int, 0644);
16
17 static struct msr __percpu *msrs;
18
19
20 static struct ecc_settings **ecc_stngs;
21
22
23 static u8 num_umcs;
24
25
26
27
28
29
30
31
32 static const struct scrubrate {
33 u32 scrubval;
34 u32 bandwidth;
35 } scrubrates[] = {
36 { 0x01, 1600000000UL},
37 { 0x02, 800000000UL},
38 { 0x03, 400000000UL},
39 { 0x04, 200000000UL},
40 { 0x05, 100000000UL},
41 { 0x06, 50000000UL},
42 { 0x07, 25000000UL},
43 { 0x08, 12284069UL},
44 { 0x09, 6274509UL},
45 { 0x0A, 3121951UL},
46 { 0x0B, 1560975UL},
47 { 0x0C, 781440UL},
48 { 0x0D, 390720UL},
49 { 0x0E, 195300UL},
50 { 0x0F, 97650UL},
51 { 0x10, 48854UL},
52 { 0x11, 24427UL},
53 { 0x12, 12213UL},
54 { 0x13, 6101UL},
55 { 0x14, 3051UL},
56 { 0x15, 1523UL},
57 { 0x16, 761UL},
58 { 0x00, 0UL},
59 };
60
61 int __amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
62 u32 *val, const char *func)
63 {
64 int err = 0;
65
66 err = pci_read_config_dword(pdev, offset, val);
67 if (err)
68 amd64_warn("%s: error reading F%dx%03x.\n",
69 func, PCI_FUNC(pdev->devfn), offset);
70
71 return err;
72 }
73
74 int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset,
75 u32 val, const char *func)
76 {
77 int err = 0;
78
79 err = pci_write_config_dword(pdev, offset, val);
80 if (err)
81 amd64_warn("%s: error writing to F%dx%03x.\n",
82 func, PCI_FUNC(pdev->devfn), offset);
83
84 return err;
85 }
86
87
88
89
90 static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
91 {
92 u32 reg = 0;
93
94 amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, ®);
95 reg &= (pvt->model == 0x30) ? ~3 : ~1;
96 reg |= dct;
97 amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
98 }
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114 static inline int amd64_read_dct_pci_cfg(struct amd64_pvt *pvt, u8 dct,
115 int offset, u32 *val)
116 {
117 switch (pvt->fam) {
118 case 0xf:
119 if (dct || offset >= 0x100)
120 return -EINVAL;
121 break;
122
123 case 0x10:
124 if (dct) {
125
126
127
128
129
130 if (dct_ganging_enabled(pvt))
131 return 0;
132
133 offset += 0x100;
134 }
135 break;
136
137 case 0x15:
138
139
140
141
142 dct = (dct && pvt->model == 0x30) ? 3 : dct;
143 f15h_select_dct(pvt, dct);
144 break;
145
146 case 0x16:
147 if (dct)
148 return -EINVAL;
149 break;
150
151 default:
152 break;
153 }
154 return amd64_read_pci_cfg(pvt->F2, offset, val);
155 }
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 static inline void __f17h_set_scrubval(struct amd64_pvt *pvt, u32 scrubval)
172 {
173
174
175
176
177
178 if (scrubval >= 0x5 && scrubval <= 0x14) {
179 scrubval -= 0x5;
180 pci_write_bits32(pvt->F6, F17H_SCR_LIMIT_ADDR, scrubval, 0xF);
181 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 1, 0x1);
182 } else {
183 pci_write_bits32(pvt->F6, F17H_SCR_BASE_ADDR, 0, 0x1);
184 }
185 }
186
187
188
189
190 static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
191 {
192 u32 scrubval;
193 int i;
194
195
196
197
198
199
200
201
202
203
204 for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) {
205
206
207
208
209 if (scrubrates[i].scrubval < min_rate)
210 continue;
211
212 if (scrubrates[i].bandwidth <= new_bw)
213 break;
214 }
215
216 scrubval = scrubrates[i].scrubval;
217
218 if (pvt->fam == 0x17 || pvt->fam == 0x18) {
219 __f17h_set_scrubval(pvt, scrubval);
220 } else if (pvt->fam == 0x15 && pvt->model == 0x60) {
221 f15h_select_dct(pvt, 0);
222 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
223 f15h_select_dct(pvt, 1);
224 pci_write_bits32(pvt->F2, F15H_M60H_SCRCTRL, scrubval, 0x001F);
225 } else {
226 pci_write_bits32(pvt->F3, SCRCTRL, scrubval, 0x001F);
227 }
228
229 if (scrubval)
230 return scrubrates[i].bandwidth;
231
232 return 0;
233 }
234
235 static int set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
236 {
237 struct amd64_pvt *pvt = mci->pvt_info;
238 u32 min_scrubrate = 0x5;
239
240 if (pvt->fam == 0xf)
241 min_scrubrate = 0x0;
242
243 if (pvt->fam == 0x15) {
244
245 if (pvt->model < 0x10)
246 f15h_select_dct(pvt, 0);
247
248 if (pvt->model == 0x60)
249 min_scrubrate = 0x6;
250 }
251 return __set_scrub_rate(pvt, bw, min_scrubrate);
252 }
253
254 static int get_scrub_rate(struct mem_ctl_info *mci)
255 {
256 struct amd64_pvt *pvt = mci->pvt_info;
257 int i, retval = -EINVAL;
258 u32 scrubval = 0;
259
260 switch (pvt->fam) {
261 case 0x15:
262
263 if (pvt->model < 0x10)
264 f15h_select_dct(pvt, 0);
265
266 if (pvt->model == 0x60)
267 amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
268 break;
269
270 case 0x17:
271 case 0x18:
272 amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
273 if (scrubval & BIT(0)) {
274 amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
275 scrubval &= 0xF;
276 scrubval += 0x5;
277 } else {
278 scrubval = 0;
279 }
280 break;
281
282 default:
283 amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
284 break;
285 }
286
287 scrubval = scrubval & 0x001F;
288
289 for (i = 0; i < ARRAY_SIZE(scrubrates); i++) {
290 if (scrubrates[i].scrubval == scrubval) {
291 retval = scrubrates[i].bandwidth;
292 break;
293 }
294 }
295 return retval;
296 }
297
298
299
300
301
302 static bool base_limit_match(struct amd64_pvt *pvt, u64 sys_addr, u8 nid)
303 {
304 u64 addr;
305
306
307
308
309
310
311
312 addr = sys_addr & 0x000000ffffffffffull;
313
314 return ((addr >= get_dram_base(pvt, nid)) &&
315 (addr <= get_dram_limit(pvt, nid)));
316 }
317
318
319
320
321
322
323
324 static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci,
325 u64 sys_addr)
326 {
327 struct amd64_pvt *pvt;
328 u8 node_id;
329 u32 intlv_en, bits;
330
331
332
333
334
335 pvt = mci->pvt_info;
336
337
338
339
340
341
342 intlv_en = dram_intlv_en(pvt, 0);
343
344 if (intlv_en == 0) {
345 for (node_id = 0; node_id < DRAM_RANGES; node_id++) {
346 if (base_limit_match(pvt, sys_addr, node_id))
347 goto found;
348 }
349 goto err_no_match;
350 }
351
352 if (unlikely((intlv_en != 0x01) &&
353 (intlv_en != 0x03) &&
354 (intlv_en != 0x07))) {
355 amd64_warn("DRAM Base[IntlvEn] junk value: 0x%x, BIOS bug?\n", intlv_en);
356 return NULL;
357 }
358
359 bits = (((u32) sys_addr) >> 12) & intlv_en;
360
361 for (node_id = 0; ; ) {
362 if ((dram_intlv_sel(pvt, node_id) & intlv_en) == bits)
363 break;
364
365 if (++node_id >= DRAM_RANGES)
366 goto err_no_match;
367 }
368
369
370 if (unlikely(!base_limit_match(pvt, sys_addr, node_id))) {
371 amd64_warn("%s: sys_addr 0x%llx falls outside base/limit address"
372 "range for node %d with node interleaving enabled.\n",
373 __func__, sys_addr, node_id);
374 return NULL;
375 }
376
377 found:
378 return edac_mc_find((int)node_id);
379
380 err_no_match:
381 edac_dbg(2, "sys_addr 0x%lx doesn't match any node\n",
382 (unsigned long)sys_addr);
383
384 return NULL;
385 }
386
387
388
389
390
391 static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
392 u64 *base, u64 *mask)
393 {
394 u64 csbase, csmask, base_bits, mask_bits;
395 u8 addr_shift;
396
397 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
398 csbase = pvt->csels[dct].csbases[csrow];
399 csmask = pvt->csels[dct].csmasks[csrow];
400 base_bits = GENMASK_ULL(31, 21) | GENMASK_ULL(15, 9);
401 mask_bits = GENMASK_ULL(29, 21) | GENMASK_ULL(15, 9);
402 addr_shift = 4;
403
404
405
406
407
408 } else if (pvt->fam == 0x16 ||
409 (pvt->fam == 0x15 && pvt->model >= 0x30)) {
410 csbase = pvt->csels[dct].csbases[csrow];
411 csmask = pvt->csels[dct].csmasks[csrow >> 1];
412
413 *base = (csbase & GENMASK_ULL(15, 5)) << 6;
414 *base |= (csbase & GENMASK_ULL(30, 19)) << 8;
415
416 *mask = ~0ULL;
417
418 *mask &= ~((GENMASK_ULL(15, 5) << 6) |
419 (GENMASK_ULL(30, 19) << 8));
420
421 *mask |= (csmask & GENMASK_ULL(15, 5)) << 6;
422 *mask |= (csmask & GENMASK_ULL(30, 19)) << 8;
423
424 return;
425 } else {
426 csbase = pvt->csels[dct].csbases[csrow];
427 csmask = pvt->csels[dct].csmasks[csrow >> 1];
428 addr_shift = 8;
429
430 if (pvt->fam == 0x15)
431 base_bits = mask_bits =
432 GENMASK_ULL(30,19) | GENMASK_ULL(13,5);
433 else
434 base_bits = mask_bits =
435 GENMASK_ULL(28,19) | GENMASK_ULL(13,5);
436 }
437
438 *base = (csbase & base_bits) << addr_shift;
439
440 *mask = ~0ULL;
441
442 *mask &= ~(mask_bits << addr_shift);
443
444 *mask |= (csmask & mask_bits) << addr_shift;
445 }
446
447 #define for_each_chip_select(i, dct, pvt) \
448 for (i = 0; i < pvt->csels[dct].b_cnt; i++)
449
450 #define chip_select_base(i, dct, pvt) \
451 pvt->csels[dct].csbases[i]
452
453 #define for_each_chip_select_mask(i, dct, pvt) \
454 for (i = 0; i < pvt->csels[dct].m_cnt; i++)
455
456 #define for_each_umc(i) \
457 for (i = 0; i < num_umcs; i++)
458
459
460
461
462
463 static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr)
464 {
465 struct amd64_pvt *pvt;
466 int csrow;
467 u64 base, mask;
468
469 pvt = mci->pvt_info;
470
471 for_each_chip_select(csrow, 0, pvt) {
472 if (!csrow_enabled(csrow, 0, pvt))
473 continue;
474
475 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
476
477 mask = ~mask;
478
479 if ((input_addr & mask) == (base & mask)) {
480 edac_dbg(2, "InputAddr 0x%lx matches csrow %d (node %d)\n",
481 (unsigned long)input_addr, csrow,
482 pvt->mc_node_id);
483
484 return csrow;
485 }
486 }
487 edac_dbg(2, "no matching csrow for InputAddr 0x%lx (MC node %d)\n",
488 (unsigned long)input_addr, pvt->mc_node_id);
489
490 return -1;
491 }
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509 int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
510 u64 *hole_offset, u64 *hole_size)
511 {
512 struct amd64_pvt *pvt = mci->pvt_info;
513
514
515 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
516 edac_dbg(1, " revision %d for node %d does not support DHAR\n",
517 pvt->ext_model, pvt->mc_node_id);
518 return 1;
519 }
520
521
522 if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
523 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
524 return 1;
525 }
526
527 if (!dhar_valid(pvt)) {
528 edac_dbg(1, " Dram Memory Hoisting is DISABLED on this node %d\n",
529 pvt->mc_node_id);
530 return 1;
531 }
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 *hole_base = dhar_base(pvt);
552 *hole_size = (1ULL << 32) - *hole_base;
553
554 *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
555 : k8_dhar_offset(pvt);
556
557 edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
558 pvt->mc_node_id, (unsigned long)*hole_base,
559 (unsigned long)*hole_offset, (unsigned long)*hole_size);
560
561 return 0;
562 }
563 EXPORT_SYMBOL_GPL(amd64_get_dram_hole_info);
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594 static u64 sys_addr_to_dram_addr(struct mem_ctl_info *mci, u64 sys_addr)
595 {
596 struct amd64_pvt *pvt = mci->pvt_info;
597 u64 dram_base, hole_base, hole_offset, hole_size, dram_addr;
598 int ret;
599
600 dram_base = get_dram_base(pvt, pvt->mc_node_id);
601
602 ret = amd64_get_dram_hole_info(mci, &hole_base, &hole_offset,
603 &hole_size);
604 if (!ret) {
605 if ((sys_addr >= (1ULL << 32)) &&
606 (sys_addr < ((1ULL << 32) + hole_size))) {
607
608 dram_addr = sys_addr - hole_offset;
609
610 edac_dbg(2, "using DHAR to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
611 (unsigned long)sys_addr,
612 (unsigned long)dram_addr);
613
614 return dram_addr;
615 }
616 }
617
618
619
620
621
622
623
624
625
626
627 dram_addr = (sys_addr & GENMASK_ULL(39, 0)) - dram_base;
628
629 edac_dbg(2, "using DRAM Base register to translate SysAddr 0x%lx to DramAddr 0x%lx\n",
630 (unsigned long)sys_addr, (unsigned long)dram_addr);
631 return dram_addr;
632 }
633
634
635
636
637
638
639 static int num_node_interleave_bits(unsigned intlv_en)
640 {
641 static const int intlv_shift_table[] = { 0, 1, 0, 2, 0, 0, 0, 3 };
642 int n;
643
644 BUG_ON(intlv_en > 7);
645 n = intlv_shift_table[intlv_en];
646 return n;
647 }
648
649
650 static u64 dram_addr_to_input_addr(struct mem_ctl_info *mci, u64 dram_addr)
651 {
652 struct amd64_pvt *pvt;
653 int intlv_shift;
654 u64 input_addr;
655
656 pvt = mci->pvt_info;
657
658
659
660
661
662 intlv_shift = num_node_interleave_bits(dram_intlv_en(pvt, 0));
663 input_addr = ((dram_addr >> intlv_shift) & GENMASK_ULL(35, 12)) +
664 (dram_addr & 0xfff);
665
666 edac_dbg(2, " Intlv Shift=%d DramAddr=0x%lx maps to InputAddr=0x%lx\n",
667 intlv_shift, (unsigned long)dram_addr,
668 (unsigned long)input_addr);
669
670 return input_addr;
671 }
672
673
674
675
676
677 static u64 sys_addr_to_input_addr(struct mem_ctl_info *mci, u64 sys_addr)
678 {
679 u64 input_addr;
680
681 input_addr =
682 dram_addr_to_input_addr(mci, sys_addr_to_dram_addr(mci, sys_addr));
683
684 edac_dbg(2, "SysAddr 0x%lx translates to InputAddr 0x%lx\n",
685 (unsigned long)sys_addr, (unsigned long)input_addr);
686
687 return input_addr;
688 }
689
690
691 static inline void error_address_to_page_and_offset(u64 error_address,
692 struct err_info *err)
693 {
694 err->page = (u32) (error_address >> PAGE_SHIFT);
695 err->offset = ((u32) error_address) & ~PAGE_MASK;
696 }
697
698
699
700
701
702
703
704
705
706 static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
707 {
708 int csrow;
709
710 csrow = input_addr_to_csrow(mci, sys_addr_to_input_addr(mci, sys_addr));
711
712 if (csrow == -1)
713 amd64_mc_err(mci, "Failed to translate InputAddr to csrow for "
714 "address 0x%lx\n", (unsigned long)sys_addr);
715 return csrow;
716 }
717
718 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
719
720
721
722
723
724 static unsigned long determine_edac_cap(struct amd64_pvt *pvt)
725 {
726 unsigned long edac_cap = EDAC_FLAG_NONE;
727 u8 bit;
728
729 if (pvt->umc) {
730 u8 i, umc_en_mask = 0, dimm_ecc_en_mask = 0;
731
732 for_each_umc(i) {
733 if (!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT))
734 continue;
735
736 umc_en_mask |= BIT(i);
737
738
739 if (pvt->umc[i].umc_cfg & BIT(12))
740 dimm_ecc_en_mask |= BIT(i);
741 }
742
743 if (umc_en_mask == dimm_ecc_en_mask)
744 edac_cap = EDAC_FLAG_SECDED;
745 } else {
746 bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
747 ? 19
748 : 17;
749
750 if (pvt->dclr0 & BIT(bit))
751 edac_cap = EDAC_FLAG_SECDED;
752 }
753
754 return edac_cap;
755 }
756
757 static void debug_display_dimm_sizes(struct amd64_pvt *, u8);
758
759 static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
760 {
761 edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
762
763 if (pvt->dram_type == MEM_LRDDR3) {
764 u32 dcsm = pvt->csels[chan].csmasks[0];
765
766
767
768
769
770 edac_dbg(1, " LRDIMM %dx rank multiply\n", (dcsm & 0x3));
771 }
772
773 edac_dbg(1, "All DIMMs support ECC:%s\n",
774 (dclr & BIT(19)) ? "yes" : "no");
775
776
777 edac_dbg(1, " PAR/ERR parity: %s\n",
778 (dclr & BIT(8)) ? "enabled" : "disabled");
779
780 if (pvt->fam == 0x10)
781 edac_dbg(1, " DCT 128bit mode width: %s\n",
782 (dclr & BIT(11)) ? "128b" : "64b");
783
784 edac_dbg(1, " x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
785 (dclr & BIT(12)) ? "yes" : "no",
786 (dclr & BIT(13)) ? "yes" : "no",
787 (dclr & BIT(14)) ? "yes" : "no",
788 (dclr & BIT(15)) ? "yes" : "no");
789 }
790
791 #define CS_EVEN_PRIMARY BIT(0)
792 #define CS_ODD_PRIMARY BIT(1)
793 #define CS_EVEN_SECONDARY BIT(2)
794 #define CS_ODD_SECONDARY BIT(3)
795
796 #define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
797 #define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
798
799 static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
800 {
801 int cs_mode = 0;
802
803 if (csrow_enabled(2 * dimm, ctrl, pvt))
804 cs_mode |= CS_EVEN_PRIMARY;
805
806 if (csrow_enabled(2 * dimm + 1, ctrl, pvt))
807 cs_mode |= CS_ODD_PRIMARY;
808
809
810 if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
811 cs_mode |= CS_ODD_SECONDARY;
812
813 return cs_mode;
814 }
815
816 static void debug_display_dimm_sizes_df(struct amd64_pvt *pvt, u8 ctrl)
817 {
818 int dimm, size0, size1, cs0, cs1, cs_mode;
819
820 edac_printk(KERN_DEBUG, EDAC_MC, "UMC%d chip selects:\n", ctrl);
821
822 for (dimm = 0; dimm < 2; dimm++) {
823 cs0 = dimm * 2;
824 cs1 = dimm * 2 + 1;
825
826 cs_mode = f17_get_cs_mode(dimm, ctrl, pvt);
827
828 size0 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs0);
829 size1 = pvt->ops->dbam_to_cs(pvt, ctrl, cs_mode, cs1);
830
831 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
832 cs0, size0,
833 cs1, size1);
834 }
835 }
836
837 static void __dump_misc_regs_df(struct amd64_pvt *pvt)
838 {
839 struct amd64_umc *umc;
840 u32 i, tmp, umc_base;
841
842 for_each_umc(i) {
843 umc_base = get_umc_base(i);
844 umc = &pvt->umc[i];
845
846 edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg);
847 edac_dbg(1, "UMC%d UMC cfg: 0x%x\n", i, umc->umc_cfg);
848 edac_dbg(1, "UMC%d SDP ctrl: 0x%x\n", i, umc->sdp_ctrl);
849 edac_dbg(1, "UMC%d ECC ctrl: 0x%x\n", i, umc->ecc_ctrl);
850
851 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ECC_BAD_SYMBOL, &tmp);
852 edac_dbg(1, "UMC%d ECC bad symbol: 0x%x\n", i, tmp);
853
854 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_UMC_CAP, &tmp);
855 edac_dbg(1, "UMC%d UMC cap: 0x%x\n", i, tmp);
856 edac_dbg(1, "UMC%d UMC cap high: 0x%x\n", i, umc->umc_cap_hi);
857
858 edac_dbg(1, "UMC%d ECC capable: %s, ChipKill ECC capable: %s\n",
859 i, (umc->umc_cap_hi & BIT(30)) ? "yes" : "no",
860 (umc->umc_cap_hi & BIT(31)) ? "yes" : "no");
861 edac_dbg(1, "UMC%d All DIMMs support ECC: %s\n",
862 i, (umc->umc_cfg & BIT(12)) ? "yes" : "no");
863 edac_dbg(1, "UMC%d x4 DIMMs present: %s\n",
864 i, (umc->dimm_cfg & BIT(6)) ? "yes" : "no");
865 edac_dbg(1, "UMC%d x16 DIMMs present: %s\n",
866 i, (umc->dimm_cfg & BIT(7)) ? "yes" : "no");
867
868 if (pvt->dram_type == MEM_LRDDR4) {
869 amd_smn_read(pvt->mc_node_id, umc_base + UMCCH_ADDR_CFG, &tmp);
870 edac_dbg(1, "UMC%d LRDIMM %dx rank multiply\n",
871 i, 1 << ((tmp >> 4) & 0x3));
872 }
873
874 debug_display_dimm_sizes_df(pvt, i);
875 }
876
877 edac_dbg(1, "F0x104 (DRAM Hole Address): 0x%08x, base: 0x%08x\n",
878 pvt->dhar, dhar_base(pvt));
879 }
880
881
882 static void __dump_misc_regs(struct amd64_pvt *pvt)
883 {
884 edac_dbg(1, "F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
885
886 edac_dbg(1, " NB two channel DRAM capable: %s\n",
887 (pvt->nbcap & NBCAP_DCT_DUAL) ? "yes" : "no");
888
889 edac_dbg(1, " ECC capable: %s, ChipKill ECC capable: %s\n",
890 (pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
891 (pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
892
893 debug_dump_dramcfg_low(pvt, pvt->dclr0, 0);
894
895 edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
896
897 edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
898 pvt->dhar, dhar_base(pvt),
899 (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
900 : f10_dhar_offset(pvt));
901
902 debug_display_dimm_sizes(pvt, 0);
903
904
905 if (pvt->fam == 0xf)
906 return;
907
908 debug_display_dimm_sizes(pvt, 1);
909
910
911 if (!dct_ganging_enabled(pvt))
912 debug_dump_dramcfg_low(pvt, pvt->dclr1, 1);
913 }
914
915
916 static void dump_misc_regs(struct amd64_pvt *pvt)
917 {
918 if (pvt->umc)
919 __dump_misc_regs_df(pvt);
920 else
921 __dump_misc_regs(pvt);
922
923 edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
924
925 amd64_info("using x%u syndromes.\n", pvt->ecc_sym_sz);
926 }
927
928
929
930
931 static void prep_chip_selects(struct amd64_pvt *pvt)
932 {
933 if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
934 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
935 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
936 } else if (pvt->fam == 0x15 && pvt->model == 0x30) {
937 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
938 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
939 } else if (pvt->fam >= 0x17) {
940 int umc;
941
942 for_each_umc(umc) {
943 pvt->csels[umc].b_cnt = 4;
944 pvt->csels[umc].m_cnt = 2;
945 }
946
947 } else {
948 pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
949 pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
950 }
951 }
952
953 static void read_umc_base_mask(struct amd64_pvt *pvt)
954 {
955 u32 umc_base_reg, umc_base_reg_sec;
956 u32 umc_mask_reg, umc_mask_reg_sec;
957 u32 base_reg, base_reg_sec;
958 u32 mask_reg, mask_reg_sec;
959 u32 *base, *base_sec;
960 u32 *mask, *mask_sec;
961 int cs, umc;
962
963 for_each_umc(umc) {
964 umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR;
965 umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC;
966
967 for_each_chip_select(cs, umc, pvt) {
968 base = &pvt->csels[umc].csbases[cs];
969 base_sec = &pvt->csels[umc].csbases_sec[cs];
970
971 base_reg = umc_base_reg + (cs * 4);
972 base_reg_sec = umc_base_reg_sec + (cs * 4);
973
974 if (!amd_smn_read(pvt->mc_node_id, base_reg, base))
975 edac_dbg(0, " DCSB%d[%d]=0x%08x reg: 0x%x\n",
976 umc, cs, *base, base_reg);
977
978 if (!amd_smn_read(pvt->mc_node_id, base_reg_sec, base_sec))
979 edac_dbg(0, " DCSB_SEC%d[%d]=0x%08x reg: 0x%x\n",
980 umc, cs, *base_sec, base_reg_sec);
981 }
982
983 umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK;
984 umc_mask_reg_sec = get_umc_base(umc) + UMCCH_ADDR_MASK_SEC;
985
986 for_each_chip_select_mask(cs, umc, pvt) {
987 mask = &pvt->csels[umc].csmasks[cs];
988 mask_sec = &pvt->csels[umc].csmasks_sec[cs];
989
990 mask_reg = umc_mask_reg + (cs * 4);
991 mask_reg_sec = umc_mask_reg_sec + (cs * 4);
992
993 if (!amd_smn_read(pvt->mc_node_id, mask_reg, mask))
994 edac_dbg(0, " DCSM%d[%d]=0x%08x reg: 0x%x\n",
995 umc, cs, *mask, mask_reg);
996
997 if (!amd_smn_read(pvt->mc_node_id, mask_reg_sec, mask_sec))
998 edac_dbg(0, " DCSM_SEC%d[%d]=0x%08x reg: 0x%x\n",
999 umc, cs, *mask_sec, mask_reg_sec);
1000 }
1001 }
1002 }
1003
1004
1005
1006
1007 static void read_dct_base_mask(struct amd64_pvt *pvt)
1008 {
1009 int cs;
1010
1011 prep_chip_selects(pvt);
1012
1013 if (pvt->umc)
1014 return read_umc_base_mask(pvt);
1015
1016 for_each_chip_select(cs, 0, pvt) {
1017 int reg0 = DCSB0 + (cs * 4);
1018 int reg1 = DCSB1 + (cs * 4);
1019 u32 *base0 = &pvt->csels[0].csbases[cs];
1020 u32 *base1 = &pvt->csels[1].csbases[cs];
1021
1022 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, base0))
1023 edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
1024 cs, *base0, reg0);
1025
1026 if (pvt->fam == 0xf)
1027 continue;
1028
1029 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, base1))
1030 edac_dbg(0, " DCSB1[%d]=0x%08x reg: F2x%x\n",
1031 cs, *base1, (pvt->fam == 0x10) ? reg1
1032 : reg0);
1033 }
1034
1035 for_each_chip_select_mask(cs, 0, pvt) {
1036 int reg0 = DCSM0 + (cs * 4);
1037 int reg1 = DCSM1 + (cs * 4);
1038 u32 *mask0 = &pvt->csels[0].csmasks[cs];
1039 u32 *mask1 = &pvt->csels[1].csmasks[cs];
1040
1041 if (!amd64_read_dct_pci_cfg(pvt, 0, reg0, mask0))
1042 edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
1043 cs, *mask0, reg0);
1044
1045 if (pvt->fam == 0xf)
1046 continue;
1047
1048 if (!amd64_read_dct_pci_cfg(pvt, 1, reg0, mask1))
1049 edac_dbg(0, " DCSM1[%d]=0x%08x reg: F2x%x\n",
1050 cs, *mask1, (pvt->fam == 0x10) ? reg1
1051 : reg0);
1052 }
1053 }
1054
1055 static void determine_memory_type(struct amd64_pvt *pvt)
1056 {
1057 u32 dram_ctrl, dcsm;
1058
1059 switch (pvt->fam) {
1060 case 0xf:
1061 if (pvt->ext_model >= K8_REV_F)
1062 goto ddr3;
1063
1064 pvt->dram_type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1065 return;
1066
1067 case 0x10:
1068 if (pvt->dchr0 & DDR3_MODE)
1069 goto ddr3;
1070
1071 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1072 return;
1073
1074 case 0x15:
1075 if (pvt->model < 0x60)
1076 goto ddr3;
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087 amd64_read_dct_pci_cfg(pvt, 0, DRAM_CONTROL, &dram_ctrl);
1088 dcsm = pvt->csels[0].csmasks[0];
1089
1090 if (((dram_ctrl >> 8) & 0x7) == 0x2)
1091 pvt->dram_type = MEM_DDR4;
1092 else if (pvt->dclr0 & BIT(16))
1093 pvt->dram_type = MEM_DDR3;
1094 else if (dcsm & 0x3)
1095 pvt->dram_type = MEM_LRDDR3;
1096 else
1097 pvt->dram_type = MEM_RDDR3;
1098
1099 return;
1100
1101 case 0x16:
1102 goto ddr3;
1103
1104 case 0x17:
1105 case 0x18:
1106 if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
1107 pvt->dram_type = MEM_LRDDR4;
1108 else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
1109 pvt->dram_type = MEM_RDDR4;
1110 else
1111 pvt->dram_type = MEM_DDR4;
1112 return;
1113
1114 default:
1115 WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam);
1116 pvt->dram_type = MEM_EMPTY;
1117 }
1118 return;
1119
1120 ddr3:
1121 pvt->dram_type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1122 }
1123
1124
1125 static int k8_early_channel_count(struct amd64_pvt *pvt)
1126 {
1127 int flag;
1128
1129 if (pvt->ext_model >= K8_REV_F)
1130
1131 flag = pvt->dclr0 & WIDTH_128;
1132 else
1133
1134 flag = pvt->dclr0 & REVE_WIDTH_128;
1135
1136
1137 pvt->dclr1 = 0;
1138
1139 return (flag) ? 2 : 1;
1140 }
1141
1142
1143 static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
1144 {
1145 u16 mce_nid = amd_get_nb_id(m->extcpu);
1146 struct mem_ctl_info *mci;
1147 u8 start_bit = 1;
1148 u8 end_bit = 47;
1149 u64 addr;
1150
1151 mci = edac_mc_find(mce_nid);
1152 if (!mci)
1153 return 0;
1154
1155 pvt = mci->pvt_info;
1156
1157 if (pvt->fam == 0xf) {
1158 start_bit = 3;
1159 end_bit = 39;
1160 }
1161
1162 addr = m->addr & GENMASK_ULL(end_bit, start_bit);
1163
1164
1165
1166
1167 if (pvt->fam == 0x15) {
1168 u64 cc6_base, tmp_addr;
1169 u32 tmp;
1170 u8 intlv_en;
1171
1172 if ((addr & GENMASK_ULL(47, 24)) >> 24 != 0x00fdf7)
1173 return addr;
1174
1175
1176 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_LIM, &tmp);
1177 intlv_en = tmp >> 21 & 0x7;
1178
1179
1180 cc6_base = (tmp & GENMASK_ULL(20, 0)) << 3;
1181
1182
1183 cc6_base |= intlv_en ^ 0x7;
1184
1185
1186 cc6_base <<= 24;
1187
1188 if (!intlv_en)
1189 return cc6_base | (addr & GENMASK_ULL(23, 0));
1190
1191 amd64_read_pci_cfg(pvt->F1, DRAM_LOCAL_NODE_BASE, &tmp);
1192
1193
1194 tmp_addr = (addr & GENMASK_ULL(23, 12)) << __fls(intlv_en + 1);
1195
1196
1197 tmp_addr |= (tmp & GENMASK_ULL(23, 21)) >> 9;
1198
1199
1200 tmp_addr |= addr & GENMASK_ULL(11, 0);
1201
1202 return cc6_base | tmp_addr;
1203 }
1204
1205 return addr;
1206 }
1207
1208 static struct pci_dev *pci_get_related_function(unsigned int vendor,
1209 unsigned int device,
1210 struct pci_dev *related)
1211 {
1212 struct pci_dev *dev = NULL;
1213
1214 while ((dev = pci_get_device(vendor, device, dev))) {
1215 if (pci_domain_nr(dev->bus) == pci_domain_nr(related->bus) &&
1216 (dev->bus->number == related->bus->number) &&
1217 (PCI_SLOT(dev->devfn) == PCI_SLOT(related->devfn)))
1218 break;
1219 }
1220
1221 return dev;
1222 }
1223
1224 static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
1225 {
1226 struct amd_northbridge *nb;
1227 struct pci_dev *f1 = NULL;
1228 unsigned int pci_func;
1229 int off = range << 3;
1230 u32 llim;
1231
1232 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
1233 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
1234
1235 if (pvt->fam == 0xf)
1236 return;
1237
1238 if (!dram_rw(pvt, range))
1239 return;
1240
1241 amd64_read_pci_cfg(pvt->F1, DRAM_BASE_HI + off, &pvt->ranges[range].base.hi);
1242 amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
1243
1244
1245 if (pvt->fam != 0x15)
1246 return;
1247
1248 nb = node_to_amd_nb(dram_dst_node(pvt, range));
1249 if (WARN_ON(!nb))
1250 return;
1251
1252 if (pvt->model == 0x60)
1253 pci_func = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1;
1254 else if (pvt->model == 0x30)
1255 pci_func = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1;
1256 else
1257 pci_func = PCI_DEVICE_ID_AMD_15H_NB_F1;
1258
1259 f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
1260 if (WARN_ON(!f1))
1261 return;
1262
1263 amd64_read_pci_cfg(f1, DRAM_LOCAL_NODE_LIM, &llim);
1264
1265 pvt->ranges[range].lim.lo &= GENMASK_ULL(15, 0);
1266
1267
1268 pvt->ranges[range].lim.lo |= ((llim & 0x1fff) << 3 | 0x7) << 16;
1269
1270 pvt->ranges[range].lim.hi &= GENMASK_ULL(7, 0);
1271
1272
1273 pvt->ranges[range].lim.hi |= llim >> 13;
1274
1275 pci_dev_put(f1);
1276 }
1277
1278 static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1279 struct err_info *err)
1280 {
1281 struct amd64_pvt *pvt = mci->pvt_info;
1282
1283 error_address_to_page_and_offset(sys_addr, err);
1284
1285
1286
1287
1288
1289 err->src_mci = find_mc_by_sys_addr(mci, sys_addr);
1290 if (!err->src_mci) {
1291 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1292 (unsigned long)sys_addr);
1293 err->err_code = ERR_NODE;
1294 return;
1295 }
1296
1297
1298 err->csrow = sys_addr_to_csrow(err->src_mci, sys_addr);
1299 if (err->csrow < 0) {
1300 err->err_code = ERR_CSROW;
1301 return;
1302 }
1303
1304
1305 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1306 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
1307 if (err->channel < 0) {
1308
1309
1310
1311
1312
1313 amd64_mc_warn(err->src_mci, "unknown syndrome 0x%04x - "
1314 "possible error reporting race\n",
1315 err->syndrome);
1316 err->err_code = ERR_CHANNEL;
1317 return;
1318 }
1319 } else {
1320
1321
1322
1323
1324
1325
1326
1327
1328 err->channel = ((sys_addr & BIT(3)) != 0);
1329 }
1330 }
1331
1332 static int ddr2_cs_size(unsigned i, bool dct_width)
1333 {
1334 unsigned shift = 0;
1335
1336 if (i <= 2)
1337 shift = i;
1338 else if (!(i & 0x1))
1339 shift = i >> 1;
1340 else
1341 shift = (i + 1) >> 1;
1342
1343 return 128 << (shift + !!dct_width);
1344 }
1345
1346 static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1347 unsigned cs_mode, int cs_mask_nr)
1348 {
1349 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1350
1351 if (pvt->ext_model >= K8_REV_F) {
1352 WARN_ON(cs_mode > 11);
1353 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1354 }
1355 else if (pvt->ext_model >= K8_REV_D) {
1356 unsigned diff;
1357 WARN_ON(cs_mode > 10);
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383 diff = cs_mode/3 + (unsigned)(cs_mode > 5);
1384
1385 return 32 << (cs_mode - diff);
1386 }
1387 else {
1388 WARN_ON(cs_mode > 6);
1389 return 32 << cs_mode;
1390 }
1391 }
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401 static int f1x_early_channel_count(struct amd64_pvt *pvt)
1402 {
1403 int i, j, channels = 0;
1404
1405
1406 if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
1407 return 2;
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417 edac_dbg(0, "Data width is not 128 bits - need more decoding\n");
1418
1419
1420
1421
1422
1423
1424 for (i = 0; i < 2; i++) {
1425 u32 dbam = (i ? pvt->dbam1 : pvt->dbam0);
1426
1427 for (j = 0; j < 4; j++) {
1428 if (DBAM_DIMM(j, dbam) > 0) {
1429 channels++;
1430 break;
1431 }
1432 }
1433 }
1434
1435 if (channels > 2)
1436 channels = 2;
1437
1438 amd64_info("MCT channel count: %d\n", channels);
1439
1440 return channels;
1441 }
1442
1443 static int f17_early_channel_count(struct amd64_pvt *pvt)
1444 {
1445 int i, channels = 0;
1446
1447
1448 for_each_umc(i)
1449 channels += !!(pvt->umc[i].sdp_ctrl & UMC_SDP_INIT);
1450
1451 amd64_info("MCT channel count: %d\n", channels);
1452
1453 return channels;
1454 }
1455
1456 static int ddr3_cs_size(unsigned i, bool dct_width)
1457 {
1458 unsigned shift = 0;
1459 int cs_size = 0;
1460
1461 if (i == 0 || i == 3 || i == 4)
1462 cs_size = -1;
1463 else if (i <= 2)
1464 shift = i;
1465 else if (i == 12)
1466 shift = 7;
1467 else if (!(i & 0x1))
1468 shift = i >> 1;
1469 else
1470 shift = (i + 1) >> 1;
1471
1472 if (cs_size != -1)
1473 cs_size = (128 * (1 << !!dct_width)) << shift;
1474
1475 return cs_size;
1476 }
1477
1478 static int ddr3_lrdimm_cs_size(unsigned i, unsigned rank_multiply)
1479 {
1480 unsigned shift = 0;
1481 int cs_size = 0;
1482
1483 if (i < 4 || i == 6)
1484 cs_size = -1;
1485 else if (i == 12)
1486 shift = 7;
1487 else if (!(i & 0x1))
1488 shift = i >> 1;
1489 else
1490 shift = (i + 1) >> 1;
1491
1492 if (cs_size != -1)
1493 cs_size = rank_multiply * (128 << shift);
1494
1495 return cs_size;
1496 }
1497
1498 static int ddr4_cs_size(unsigned i)
1499 {
1500 int cs_size = 0;
1501
1502 if (i == 0)
1503 cs_size = -1;
1504 else if (i == 1)
1505 cs_size = 1024;
1506 else
1507
1508 cs_size = 1024 * (1 << (i >> 1));
1509
1510 return cs_size;
1511 }
1512
1513 static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1514 unsigned cs_mode, int cs_mask_nr)
1515 {
1516 u32 dclr = dct ? pvt->dclr1 : pvt->dclr0;
1517
1518 WARN_ON(cs_mode > 11);
1519
1520 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1521 return ddr3_cs_size(cs_mode, dclr & WIDTH_128);
1522 else
1523 return ddr2_cs_size(cs_mode, dclr & WIDTH_128);
1524 }
1525
1526
1527
1528
1529 static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1530 unsigned cs_mode, int cs_mask_nr)
1531 {
1532 WARN_ON(cs_mode > 12);
1533
1534 return ddr3_cs_size(cs_mode, false);
1535 }
1536
1537
1538 static int f15_m60h_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1539 unsigned cs_mode, int cs_mask_nr)
1540 {
1541 int cs_size;
1542 u32 dcsm = pvt->csels[dct].csmasks[cs_mask_nr];
1543
1544 WARN_ON(cs_mode > 12);
1545
1546 if (pvt->dram_type == MEM_DDR4) {
1547 if (cs_mode > 9)
1548 return -1;
1549
1550 cs_size = ddr4_cs_size(cs_mode);
1551 } else if (pvt->dram_type == MEM_LRDDR3) {
1552 unsigned rank_multiply = dcsm & 0xf;
1553
1554 if (rank_multiply == 3)
1555 rank_multiply = 4;
1556 cs_size = ddr3_lrdimm_cs_size(cs_mode, rank_multiply);
1557 } else {
1558
1559 if (cs_mode == 0x1)
1560 return -1;
1561
1562 cs_size = ddr3_cs_size(cs_mode, false);
1563 }
1564
1565 return cs_size;
1566 }
1567
1568
1569
1570
1571 static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
1572 unsigned cs_mode, int cs_mask_nr)
1573 {
1574 WARN_ON(cs_mode > 12);
1575
1576 if (cs_mode == 6 || cs_mode == 8 ||
1577 cs_mode == 9 || cs_mode == 12)
1578 return -1;
1579 else
1580 return ddr3_cs_size(cs_mode, false);
1581 }
1582
1583 static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
1584 unsigned int cs_mode, int csrow_nr)
1585 {
1586 u32 addr_mask_orig, addr_mask_deinterleaved;
1587 u32 msb, weight, num_zero_bits;
1588 int dimm, size = 0;
1589
1590
1591 if (!cs_mode)
1592 return size;
1593
1594
1595 if (!(cs_mode & CS_EVEN) && !(csrow_nr & 1))
1596 return size;
1597
1598
1599 if (!(cs_mode & CS_ODD) && (csrow_nr & 1))
1600 return size;
1601
1602
1603
1604
1605
1606
1607 dimm = csrow_nr >> 1;
1608
1609
1610 if ((csrow_nr & 1) && (cs_mode & CS_ODD_SECONDARY))
1611 addr_mask_orig = pvt->csels[umc].csmasks_sec[dimm];
1612 else
1613 addr_mask_orig = pvt->csels[umc].csmasks[dimm];
1614
1615
1616
1617
1618
1619
1620
1621
1622 msb = fls(addr_mask_orig) - 1;
1623 weight = hweight_long(addr_mask_orig);
1624 num_zero_bits = msb - weight;
1625
1626
1627 addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
1628
1629 edac_dbg(1, "CS%d DIMM%d AddrMasks:\n", csrow_nr, dimm);
1630 edac_dbg(1, " Original AddrMask: 0x%x\n", addr_mask_orig);
1631 edac_dbg(1, " Deinterleaved AddrMask: 0x%x\n", addr_mask_deinterleaved);
1632
1633
1634 size = (addr_mask_deinterleaved >> 2) + 1;
1635
1636
1637 return size >> 10;
1638 }
1639
1640 static void read_dram_ctl_register(struct amd64_pvt *pvt)
1641 {
1642
1643 if (pvt->fam == 0xf)
1644 return;
1645
1646 if (!amd64_read_pci_cfg(pvt->F2, DCT_SEL_LO, &pvt->dct_sel_lo)) {
1647 edac_dbg(0, "F2x110 (DCTSelLow): 0x%08x, High range addrs at: 0x%x\n",
1648 pvt->dct_sel_lo, dct_sel_baseaddr(pvt));
1649
1650 edac_dbg(0, " DCTs operate in %s mode\n",
1651 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"));
1652
1653 if (!dct_ganging_enabled(pvt))
1654 edac_dbg(0, " Address range split per DCT: %s\n",
1655 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1656
1657 edac_dbg(0, " data interleave for ECC: %s, DRAM cleared since last warm reset: %s\n",
1658 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1659 (dct_memory_cleared(pvt) ? "yes" : "no"));
1660
1661 edac_dbg(0, " channel interleave: %s, "
1662 "interleave bits selector: 0x%x\n",
1663 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1664 dct_sel_interleave_addr(pvt));
1665 }
1666
1667 amd64_read_pci_cfg(pvt->F2, DCT_SEL_HI, &pvt->dct_sel_hi);
1668 }
1669
1670
1671
1672
1673
1674 static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1675 u8 intlv_en, int num_dcts_intlv,
1676 u32 dct_sel)
1677 {
1678 u8 channel = 0;
1679 u8 select;
1680
1681 if (!(intlv_en))
1682 return (u8)(dct_sel);
1683
1684 if (num_dcts_intlv == 2) {
1685 select = (sys_addr >> 8) & 0x3;
1686 channel = select ? 0x3 : 0;
1687 } else if (num_dcts_intlv == 4) {
1688 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1689 switch (intlv_addr) {
1690 case 0x4:
1691 channel = (sys_addr >> 8) & 0x3;
1692 break;
1693 case 0x5:
1694 channel = (sys_addr >> 9) & 0x3;
1695 break;
1696 }
1697 }
1698 return channel;
1699 }
1700
1701
1702
1703
1704
1705 static u8 f1x_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
1706 bool hi_range_sel, u8 intlv_en)
1707 {
1708 u8 dct_sel_high = (pvt->dct_sel_lo >> 1) & 1;
1709
1710 if (dct_ganging_enabled(pvt))
1711 return 0;
1712
1713 if (hi_range_sel)
1714 return dct_sel_high;
1715
1716
1717
1718
1719 if (dct_interleave_enabled(pvt)) {
1720 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1721
1722
1723 if (!intlv_addr)
1724 return sys_addr >> 6 & 1;
1725
1726 if (intlv_addr & 0x2) {
1727 u8 shift = intlv_addr & 0x1 ? 9 : 6;
1728 u32 temp = hweight_long((u32) ((sys_addr >> 16) & 0x1F)) & 1;
1729
1730 return ((sys_addr >> shift) & 1) ^ temp;
1731 }
1732
1733 if (intlv_addr & 0x4) {
1734 u8 shift = intlv_addr & 0x1 ? 9 : 8;
1735
1736 return (sys_addr >> shift) & 1;
1737 }
1738
1739 return (sys_addr >> (12 + hweight8(intlv_en))) & 1;
1740 }
1741
1742 if (dct_high_range_enabled(pvt))
1743 return ~dct_sel_high & 1;
1744
1745 return 0;
1746 }
1747
1748
1749 static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
1750 u64 sys_addr, bool hi_rng,
1751 u32 dct_sel_base_addr)
1752 {
1753 u64 chan_off;
1754 u64 dram_base = get_dram_base(pvt, range);
1755 u64 hole_off = f10_dhar_offset(pvt);
1756 u64 dct_sel_base_off = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
1757
1758 if (hi_rng) {
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770 if ((!(dct_sel_base_addr >> 16) ||
1771 dct_sel_base_addr < dhar_base(pvt)) &&
1772 dhar_valid(pvt) &&
1773 (sys_addr >= BIT_64(32)))
1774 chan_off = hole_off;
1775 else
1776 chan_off = dct_sel_base_off;
1777 } else {
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 if (dhar_valid(pvt) && (sys_addr >= BIT_64(32)))
1788 chan_off = hole_off;
1789 else
1790 chan_off = dram_base;
1791 }
1792
1793 return (sys_addr & GENMASK_ULL(47,6)) - (chan_off & GENMASK_ULL(47,23));
1794 }
1795
1796
1797
1798
1799
1800 static int f10_process_possible_spare(struct amd64_pvt *pvt, u8 dct, int csrow)
1801 {
1802 int tmp_cs;
1803
1804 if (online_spare_swap_done(pvt, dct) &&
1805 csrow == online_spare_bad_dramcs(pvt, dct)) {
1806
1807 for_each_chip_select(tmp_cs, dct, pvt) {
1808 if (chip_select_base(tmp_cs, dct, pvt) & 0x2) {
1809 csrow = tmp_cs;
1810 break;
1811 }
1812 }
1813 }
1814 return csrow;
1815 }
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825 static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
1826 {
1827 struct mem_ctl_info *mci;
1828 struct amd64_pvt *pvt;
1829 u64 cs_base, cs_mask;
1830 int cs_found = -EINVAL;
1831 int csrow;
1832
1833 mci = edac_mc_find(nid);
1834 if (!mci)
1835 return cs_found;
1836
1837 pvt = mci->pvt_info;
1838
1839 edac_dbg(1, "input addr: 0x%llx, DCT: %d\n", in_addr, dct);
1840
1841 for_each_chip_select(csrow, dct, pvt) {
1842 if (!csrow_enabled(csrow, dct, pvt))
1843 continue;
1844
1845 get_cs_base_and_mask(pvt, csrow, dct, &cs_base, &cs_mask);
1846
1847 edac_dbg(1, " CSROW=%d CSBase=0x%llx CSMask=0x%llx\n",
1848 csrow, cs_base, cs_mask);
1849
1850 cs_mask = ~cs_mask;
1851
1852 edac_dbg(1, " (InputAddr & ~CSMask)=0x%llx (CSBase & ~CSMask)=0x%llx\n",
1853 (in_addr & cs_mask), (cs_base & cs_mask));
1854
1855 if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
1856 if (pvt->fam == 0x15 && pvt->model >= 0x30) {
1857 cs_found = csrow;
1858 break;
1859 }
1860 cs_found = f10_process_possible_spare(pvt, dct, csrow);
1861
1862 edac_dbg(1, " MATCH csrow=%d\n", cs_found);
1863 break;
1864 }
1865 }
1866 return cs_found;
1867 }
1868
1869
1870
1871
1872
1873
1874 static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
1875 {
1876 u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
1877
1878 if (pvt->fam == 0x10) {
1879
1880 if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
1881 return sys_addr;
1882 }
1883
1884 amd64_read_pci_cfg(pvt->F2, SWAP_INTLV_REG, &swap_reg);
1885
1886 if (!(swap_reg & 0x1))
1887 return sys_addr;
1888
1889 swap_base = (swap_reg >> 3) & 0x7f;
1890 swap_limit = (swap_reg >> 11) & 0x7f;
1891 rgn_size = (swap_reg >> 20) & 0x7f;
1892 tmp_addr = sys_addr >> 27;
1893
1894 if (!(sys_addr >> 34) &&
1895 (((tmp_addr >= swap_base) &&
1896 (tmp_addr <= swap_limit)) ||
1897 (tmp_addr < rgn_size)))
1898 return sys_addr ^ (u64)swap_base << 27;
1899
1900 return sys_addr;
1901 }
1902
1903
1904 static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1905 u64 sys_addr, int *chan_sel)
1906 {
1907 int cs_found = -EINVAL;
1908 u64 chan_addr;
1909 u32 dct_sel_base;
1910 u8 channel;
1911 bool high_range = false;
1912
1913 u8 node_id = dram_dst_node(pvt, range);
1914 u8 intlv_en = dram_intlv_en(pvt, range);
1915 u32 intlv_sel = dram_intlv_sel(pvt, range);
1916
1917 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
1918 range, sys_addr, get_dram_limit(pvt, range));
1919
1920 if (dhar_valid(pvt) &&
1921 dhar_base(pvt) <= sys_addr &&
1922 sys_addr < BIT_64(32)) {
1923 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
1924 sys_addr);
1925 return -EINVAL;
1926 }
1927
1928 if (intlv_en && (intlv_sel != ((sys_addr >> 12) & intlv_en)))
1929 return -EINVAL;
1930
1931 sys_addr = f1x_swap_interleaved_region(pvt, sys_addr);
1932
1933 dct_sel_base = dct_sel_baseaddr(pvt);
1934
1935
1936
1937
1938
1939 if (dct_high_range_enabled(pvt) &&
1940 !dct_ganging_enabled(pvt) &&
1941 ((sys_addr >> 27) >= (dct_sel_base >> 11)))
1942 high_range = true;
1943
1944 channel = f1x_determine_channel(pvt, sys_addr, high_range, intlv_en);
1945
1946 chan_addr = f1x_get_norm_dct_addr(pvt, range, sys_addr,
1947 high_range, dct_sel_base);
1948
1949
1950 if (intlv_en)
1951 chan_addr = ((chan_addr >> (12 + hweight8(intlv_en))) << 12) |
1952 (chan_addr & 0xfff);
1953
1954
1955 if (dct_interleave_enabled(pvt) &&
1956 !dct_high_range_enabled(pvt) &&
1957 !dct_ganging_enabled(pvt)) {
1958
1959 if (dct_sel_interleave_addr(pvt) != 1) {
1960 if (dct_sel_interleave_addr(pvt) == 0x3)
1961
1962 chan_addr = ((chan_addr >> 10) << 9) |
1963 (chan_addr & 0x1ff);
1964 else
1965
1966 chan_addr = ((chan_addr >> 7) << 6) |
1967 (chan_addr & 0x3f);
1968 } else
1969
1970 chan_addr = ((chan_addr >> 13) << 12) |
1971 (chan_addr & 0xfff);
1972 }
1973
1974 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
1975
1976 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, channel);
1977
1978 if (cs_found >= 0)
1979 *chan_sel = channel;
1980
1981 return cs_found;
1982 }
1983
1984 static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
1985 u64 sys_addr, int *chan_sel)
1986 {
1987 int cs_found = -EINVAL;
1988 int num_dcts_intlv = 0;
1989 u64 chan_addr, chan_offset;
1990 u64 dct_base, dct_limit;
1991 u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
1992 u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
1993
1994 u64 dhar_offset = f10_dhar_offset(pvt);
1995 u8 intlv_addr = dct_sel_interleave_addr(pvt);
1996 u8 node_id = dram_dst_node(pvt, range);
1997 u8 intlv_en = dram_intlv_en(pvt, range);
1998
1999 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
2000 amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
2001
2002 dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
2003 dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
2004
2005 edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
2006 range, sys_addr, get_dram_limit(pvt, range));
2007
2008 if (!(get_dram_base(pvt, range) <= sys_addr) &&
2009 !(get_dram_limit(pvt, range) >= sys_addr))
2010 return -EINVAL;
2011
2012 if (dhar_valid(pvt) &&
2013 dhar_base(pvt) <= sys_addr &&
2014 sys_addr < BIT_64(32)) {
2015 amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
2016 sys_addr);
2017 return -EINVAL;
2018 }
2019
2020
2021 dct_base = (u64) dct_sel_baseaddr(pvt);
2022 dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
2023
2024 if (!(dct_cont_base_reg & BIT(0)) &&
2025 !(dct_base <= (sys_addr >> 27) &&
2026 dct_limit >= (sys_addr >> 27)))
2027 return -EINVAL;
2028
2029
2030 num_dcts_intlv = (int) hweight8(intlv_en);
2031
2032 if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
2033 return -EINVAL;
2034
2035 if (pvt->model >= 0x60)
2036 channel = f1x_determine_channel(pvt, sys_addr, false, intlv_en);
2037 else
2038 channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
2039 num_dcts_intlv, dct_sel);
2040
2041
2042 if (channel > 3)
2043 return -EINVAL;
2044
2045 leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
2046
2047
2048 if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
2049 chan_offset = dhar_offset;
2050 else
2051 chan_offset = dct_base << 27;
2052
2053 chan_addr = sys_addr - chan_offset;
2054
2055
2056 if (num_dcts_intlv == 2) {
2057 if (intlv_addr == 0x4)
2058 chan_addr = ((chan_addr >> 9) << 8) |
2059 (chan_addr & 0xff);
2060 else if (intlv_addr == 0x5)
2061 chan_addr = ((chan_addr >> 10) << 9) |
2062 (chan_addr & 0x1ff);
2063 else
2064 return -EINVAL;
2065
2066 } else if (num_dcts_intlv == 4) {
2067 if (intlv_addr == 0x4)
2068 chan_addr = ((chan_addr >> 10) << 8) |
2069 (chan_addr & 0xff);
2070 else if (intlv_addr == 0x5)
2071 chan_addr = ((chan_addr >> 11) << 9) |
2072 (chan_addr & 0x1ff);
2073 else
2074 return -EINVAL;
2075 }
2076
2077 if (dct_offset_en) {
2078 amd64_read_pci_cfg(pvt->F1,
2079 DRAM_CONT_HIGH_OFF + (int) channel * 4,
2080 &tmp);
2081 chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
2082 }
2083
2084 f15h_select_dct(pvt, channel);
2085
2086 edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096 alias_channel = (channel == 3) ? 1 : channel;
2097
2098 cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
2099
2100 if (cs_found >= 0)
2101 *chan_sel = alias_channel;
2102
2103 return cs_found;
2104 }
2105
2106 static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
2107 u64 sys_addr,
2108 int *chan_sel)
2109 {
2110 int cs_found = -EINVAL;
2111 unsigned range;
2112
2113 for (range = 0; range < DRAM_RANGES; range++) {
2114 if (!dram_rw(pvt, range))
2115 continue;
2116
2117 if (pvt->fam == 0x15 && pvt->model >= 0x30)
2118 cs_found = f15_m30h_match_to_this_node(pvt, range,
2119 sys_addr,
2120 chan_sel);
2121
2122 else if ((get_dram_base(pvt, range) <= sys_addr) &&
2123 (get_dram_limit(pvt, range) >= sys_addr)) {
2124 cs_found = f1x_match_to_this_node(pvt, range,
2125 sys_addr, chan_sel);
2126 if (cs_found >= 0)
2127 break;
2128 }
2129 }
2130 return cs_found;
2131 }
2132
2133
2134
2135
2136
2137
2138
2139
2140 static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
2141 struct err_info *err)
2142 {
2143 struct amd64_pvt *pvt = mci->pvt_info;
2144
2145 error_address_to_page_and_offset(sys_addr, err);
2146
2147 err->csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &err->channel);
2148 if (err->csrow < 0) {
2149 err->err_code = ERR_CSROW;
2150 return;
2151 }
2152
2153
2154
2155
2156
2157
2158 if (dct_ganging_enabled(pvt))
2159 err->channel = get_channel_from_ecc_syndrome(mci, err->syndrome);
2160 }
2161
2162
2163
2164
2165
2166 static void debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
2167 {
2168 int dimm, size0, size1;
2169 u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
2170 u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
2171
2172 if (pvt->fam == 0xf) {
2173
2174 if (pvt->ext_model < K8_REV_F)
2175 return;
2176 else
2177 WARN_ON(ctrl != 0);
2178 }
2179
2180 if (pvt->fam == 0x10) {
2181 dbam = (ctrl && !dct_ganging_enabled(pvt)) ? pvt->dbam1
2182 : pvt->dbam0;
2183 dcsb = (ctrl && !dct_ganging_enabled(pvt)) ?
2184 pvt->csels[1].csbases :
2185 pvt->csels[0].csbases;
2186 } else if (ctrl) {
2187 dbam = pvt->dbam0;
2188 dcsb = pvt->csels[1].csbases;
2189 }
2190 edac_dbg(1, "F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
2191 ctrl, dbam);
2192
2193 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
2194
2195
2196 for (dimm = 0; dimm < 4; dimm++) {
2197
2198 size0 = 0;
2199 if (dcsb[dimm*2] & DCSB_CS_ENABLE)
2200
2201
2202
2203
2204
2205
2206 size0 = pvt->ops->dbam_to_cs(pvt, ctrl,
2207 DBAM_DIMM(dimm, dbam),
2208 dimm);
2209
2210 size1 = 0;
2211 if (dcsb[dimm*2 + 1] & DCSB_CS_ENABLE)
2212 size1 = pvt->ops->dbam_to_cs(pvt, ctrl,
2213 DBAM_DIMM(dimm, dbam),
2214 dimm);
2215
2216 amd64_info(EDAC_MC ": %d: %5dMB %d: %5dMB\n",
2217 dimm * 2, size0,
2218 dimm * 2 + 1, size1);
2219 }
2220 }
2221
2222 static struct amd64_family_type family_types[] = {
2223 [K8_CPUS] = {
2224 .ctl_name = "K8",
2225 .f1_id = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
2226 .f2_id = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
2227 .ops = {
2228 .early_channel_count = k8_early_channel_count,
2229 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
2230 .dbam_to_cs = k8_dbam_to_chip_select,
2231 }
2232 },
2233 [F10_CPUS] = {
2234 .ctl_name = "F10h",
2235 .f1_id = PCI_DEVICE_ID_AMD_10H_NB_MAP,
2236 .f2_id = PCI_DEVICE_ID_AMD_10H_NB_DRAM,
2237 .ops = {
2238 .early_channel_count = f1x_early_channel_count,
2239 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2240 .dbam_to_cs = f10_dbam_to_chip_select,
2241 }
2242 },
2243 [F15_CPUS] = {
2244 .ctl_name = "F15h",
2245 .f1_id = PCI_DEVICE_ID_AMD_15H_NB_F1,
2246 .f2_id = PCI_DEVICE_ID_AMD_15H_NB_F2,
2247 .ops = {
2248 .early_channel_count = f1x_early_channel_count,
2249 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2250 .dbam_to_cs = f15_dbam_to_chip_select,
2251 }
2252 },
2253 [F15_M30H_CPUS] = {
2254 .ctl_name = "F15h_M30h",
2255 .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
2256 .f2_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
2257 .ops = {
2258 .early_channel_count = f1x_early_channel_count,
2259 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2260 .dbam_to_cs = f16_dbam_to_chip_select,
2261 }
2262 },
2263 [F15_M60H_CPUS] = {
2264 .ctl_name = "F15h_M60h",
2265 .f1_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F1,
2266 .f2_id = PCI_DEVICE_ID_AMD_15H_M60H_NB_F2,
2267 .ops = {
2268 .early_channel_count = f1x_early_channel_count,
2269 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2270 .dbam_to_cs = f15_m60h_dbam_to_chip_select,
2271 }
2272 },
2273 [F16_CPUS] = {
2274 .ctl_name = "F16h",
2275 .f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
2276 .f2_id = PCI_DEVICE_ID_AMD_16H_NB_F2,
2277 .ops = {
2278 .early_channel_count = f1x_early_channel_count,
2279 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2280 .dbam_to_cs = f16_dbam_to_chip_select,
2281 }
2282 },
2283 [F16_M30H_CPUS] = {
2284 .ctl_name = "F16h_M30h",
2285 .f1_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F1,
2286 .f2_id = PCI_DEVICE_ID_AMD_16H_M30H_NB_F2,
2287 .ops = {
2288 .early_channel_count = f1x_early_channel_count,
2289 .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
2290 .dbam_to_cs = f16_dbam_to_chip_select,
2291 }
2292 },
2293 [F17_CPUS] = {
2294 .ctl_name = "F17h",
2295 .f0_id = PCI_DEVICE_ID_AMD_17H_DF_F0,
2296 .f6_id = PCI_DEVICE_ID_AMD_17H_DF_F6,
2297 .ops = {
2298 .early_channel_count = f17_early_channel_count,
2299 .dbam_to_cs = f17_addr_mask_to_cs_size,
2300 }
2301 },
2302 [F17_M10H_CPUS] = {
2303 .ctl_name = "F17h_M10h",
2304 .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
2305 .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
2306 .ops = {
2307 .early_channel_count = f17_early_channel_count,
2308 .dbam_to_cs = f17_addr_mask_to_cs_size,
2309 }
2310 },
2311 [F17_M30H_CPUS] = {
2312 .ctl_name = "F17h_M30h",
2313 .f0_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F0,
2314 .f6_id = PCI_DEVICE_ID_AMD_17H_M30H_DF_F6,
2315 .ops = {
2316 .early_channel_count = f17_early_channel_count,
2317 .dbam_to_cs = f17_addr_mask_to_cs_size,
2318 }
2319 },
2320 [F17_M70H_CPUS] = {
2321 .ctl_name = "F17h_M70h",
2322 .f0_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F0,
2323 .f6_id = PCI_DEVICE_ID_AMD_17H_M70H_DF_F6,
2324 .ops = {
2325 .early_channel_count = f17_early_channel_count,
2326 .dbam_to_cs = f17_addr_mask_to_cs_size,
2327 }
2328 },
2329 };
2330
2331
2332
2333
2334
2335
2336
2337
2338 static const u16 x4_vectors[] = {
2339 0x2f57, 0x1afe, 0x66cc, 0xdd88,
2340 0x11eb, 0x3396, 0x7f4c, 0xeac8,
2341 0x0001, 0x0002, 0x0004, 0x0008,
2342 0x1013, 0x3032, 0x4044, 0x8088,
2343 0x106b, 0x30d6, 0x70fc, 0xe0a8,
2344 0x4857, 0xc4fe, 0x13cc, 0x3288,
2345 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
2346 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
2347 0x15c1, 0x2a42, 0x89ac, 0x4758,
2348 0x2b03, 0x1602, 0x4f0c, 0xca08,
2349 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
2350 0x8ba7, 0x465e, 0x244c, 0x1cc8,
2351 0x2b87, 0x164e, 0x642c, 0xdc18,
2352 0x40b9, 0x80de, 0x1094, 0x20e8,
2353 0x27db, 0x1eb6, 0x9dac, 0x7b58,
2354 0x11c1, 0x2242, 0x84ac, 0x4c58,
2355 0x1be5, 0x2d7a, 0x5e34, 0xa718,
2356 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
2357 0x4c97, 0xc87e, 0x11fc, 0x33a8,
2358 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
2359 0x16b3, 0x3d62, 0x4f34, 0x8518,
2360 0x1e2f, 0x391a, 0x5cac, 0xf858,
2361 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
2362 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
2363 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
2364 0x4397, 0xc27e, 0x17fc, 0x3ea8,
2365 0x1617, 0x3d3e, 0x6464, 0xb8b8,
2366 0x23ff, 0x12aa, 0xab6c, 0x56d8,
2367 0x2dfb, 0x1ba6, 0x913c, 0x7328,
2368 0x185d, 0x2ca6, 0x7914, 0x9e28,
2369 0x171b, 0x3e36, 0x7d7c, 0xebe8,
2370 0x4199, 0x82ee, 0x19f4, 0x2e58,
2371 0x4807, 0xc40e, 0x130c, 0x3208,
2372 0x1905, 0x2e0a, 0x5804, 0xac08,
2373 0x213f, 0x132a, 0xadfc, 0x5ba8,
2374 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
2375 };
2376
2377 static const u16 x8_vectors[] = {
2378 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2379 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2380 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2381 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
2382 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
2383 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
2384 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
2385 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
2386 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
2387 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
2388 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
2389 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
2390 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
2391 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
2392 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
2393 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
2394 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
2395 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
2396 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
2397 };
2398
2399 static int decode_syndrome(u16 syndrome, const u16 *vectors, unsigned num_vecs,
2400 unsigned v_dim)
2401 {
2402 unsigned int i, err_sym;
2403
2404 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
2405 u16 s = syndrome;
2406 unsigned v_idx = err_sym * v_dim;
2407 unsigned v_end = (err_sym + 1) * v_dim;
2408
2409
2410 for (i = 1; i < (1U << 16); i <<= 1) {
2411
2412
2413 if (v_idx < v_end && vectors[v_idx] & i) {
2414 u16 ev_comp = vectors[v_idx++];
2415
2416
2417 if (s & i) {
2418
2419 s ^= ev_comp;
2420
2421 if (!s)
2422 return err_sym;
2423 }
2424
2425 } else if (s & i)
2426
2427 break;
2428 }
2429 }
2430
2431 edac_dbg(0, "syndrome(%x) not found\n", syndrome);
2432 return -1;
2433 }
2434
2435 static int map_err_sym_to_channel(int err_sym, int sym_size)
2436 {
2437 if (sym_size == 4)
2438 switch (err_sym) {
2439 case 0x20:
2440 case 0x21:
2441 return 0;
2442 break;
2443 case 0x22:
2444 case 0x23:
2445 return 1;
2446 break;
2447 default:
2448 return err_sym >> 4;
2449 break;
2450 }
2451
2452 else
2453 switch (err_sym) {
2454
2455 case 0x10:
2456 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
2457 err_sym);
2458 return -1;
2459 break;
2460
2461 case 0x11:
2462 return 0;
2463 break;
2464 case 0x12:
2465 return 1;
2466 break;
2467 default:
2468 return err_sym >> 3;
2469 break;
2470 }
2471 return -1;
2472 }
2473
2474 static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
2475 {
2476 struct amd64_pvt *pvt = mci->pvt_info;
2477 int err_sym = -1;
2478
2479 if (pvt->ecc_sym_sz == 8)
2480 err_sym = decode_syndrome(syndrome, x8_vectors,
2481 ARRAY_SIZE(x8_vectors),
2482 pvt->ecc_sym_sz);
2483 else if (pvt->ecc_sym_sz == 4)
2484 err_sym = decode_syndrome(syndrome, x4_vectors,
2485 ARRAY_SIZE(x4_vectors),
2486 pvt->ecc_sym_sz);
2487 else {
2488 amd64_warn("Illegal syndrome type: %u\n", pvt->ecc_sym_sz);
2489 return err_sym;
2490 }
2491
2492 return map_err_sym_to_channel(err_sym, pvt->ecc_sym_sz);
2493 }
2494
2495 static void __log_ecc_error(struct mem_ctl_info *mci, struct err_info *err,
2496 u8 ecc_type)
2497 {
2498 enum hw_event_mc_err_type err_type;
2499 const char *string;
2500
2501 if (ecc_type == 2)
2502 err_type = HW_EVENT_ERR_CORRECTED;
2503 else if (ecc_type == 1)
2504 err_type = HW_EVENT_ERR_UNCORRECTED;
2505 else if (ecc_type == 3)
2506 err_type = HW_EVENT_ERR_DEFERRED;
2507 else {
2508 WARN(1, "Something is rotten in the state of Denmark.\n");
2509 return;
2510 }
2511
2512 switch (err->err_code) {
2513 case DECODE_OK:
2514 string = "";
2515 break;
2516 case ERR_NODE:
2517 string = "Failed to map error addr to a node";
2518 break;
2519 case ERR_CSROW:
2520 string = "Failed to map error addr to a csrow";
2521 break;
2522 case ERR_CHANNEL:
2523 string = "Unknown syndrome - possible error reporting race";
2524 break;
2525 case ERR_SYND:
2526 string = "MCA_SYND not valid - unknown syndrome and csrow";
2527 break;
2528 case ERR_NORM_ADDR:
2529 string = "Cannot decode normalized address";
2530 break;
2531 default:
2532 string = "WTF error";
2533 break;
2534 }
2535
2536 edac_mc_handle_error(err_type, mci, 1,
2537 err->page, err->offset, err->syndrome,
2538 err->csrow, err->channel, -1,
2539 string, "");
2540 }
2541
2542 static inline void decode_bus_error(int node_id, struct mce *m)
2543 {
2544 struct mem_ctl_info *mci;
2545 struct amd64_pvt *pvt;
2546 u8 ecc_type = (m->status >> 45) & 0x3;
2547 u8 xec = XEC(m->status, 0x1f);
2548 u16 ec = EC(m->status);
2549 u64 sys_addr;
2550 struct err_info err;
2551
2552 mci = edac_mc_find(node_id);
2553 if (!mci)
2554 return;
2555
2556 pvt = mci->pvt_info;
2557
2558
2559 if (PP(ec) == NBSL_PP_OBS)
2560 return;
2561
2562
2563 if (xec && xec != F10_NBSL_EXT_ERR_ECC)
2564 return;
2565
2566 memset(&err, 0, sizeof(err));
2567
2568 sys_addr = get_error_address(pvt, m);
2569
2570 if (ecc_type == 2)
2571 err.syndrome = extract_syndrome(m->status);
2572
2573 pvt->ops->map_sysaddr_to_csrow(mci, sys_addr, &err);
2574
2575 __log_ecc_error(mci, &err, ecc_type);
2576 }
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587 static int find_umc_channel(struct mce *m)
2588 {
2589 return (m->ipid & GENMASK(31, 0)) >> 20;
2590 }
2591
2592 static void decode_umc_error(int node_id, struct mce *m)
2593 {
2594 u8 ecc_type = (m->status >> 45) & 0x3;
2595 struct mem_ctl_info *mci;
2596 struct amd64_pvt *pvt;
2597 struct err_info err;
2598 u64 sys_addr;
2599
2600 mci = edac_mc_find(node_id);
2601 if (!mci)
2602 return;
2603
2604 pvt = mci->pvt_info;
2605
2606 memset(&err, 0, sizeof(err));
2607
2608 if (m->status & MCI_STATUS_DEFERRED)
2609 ecc_type = 3;
2610
2611 err.channel = find_umc_channel(m);
2612
2613 if (!(m->status & MCI_STATUS_SYNDV)) {
2614 err.err_code = ERR_SYND;
2615 goto log_error;
2616 }
2617
2618 if (ecc_type == 2) {
2619 u8 length = (m->synd >> 18) & 0x3f;
2620
2621 if (length)
2622 err.syndrome = (m->synd >> 32) & GENMASK(length - 1, 0);
2623 else
2624 err.err_code = ERR_CHANNEL;
2625 }
2626
2627 err.csrow = m->synd & 0x7;
2628
2629 if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
2630 err.err_code = ERR_NORM_ADDR;
2631 goto log_error;
2632 }
2633
2634 error_address_to_page_and_offset(sys_addr, &err);
2635
2636 log_error:
2637 __log_ecc_error(mci, &err, ecc_type);
2638 }
2639
2640
2641
2642
2643
2644
2645 static int
2646 reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2647 {
2648 if (pvt->umc) {
2649 pvt->F0 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2650 if (!pvt->F0) {
2651 amd64_err("F0 not found, device 0x%x (broken BIOS?)\n", pci_id1);
2652 return -ENODEV;
2653 }
2654
2655 pvt->F6 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2656 if (!pvt->F6) {
2657 pci_dev_put(pvt->F0);
2658 pvt->F0 = NULL;
2659
2660 amd64_err("F6 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2661 return -ENODEV;
2662 }
2663
2664 edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2665 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2666 edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2667
2668 return 0;
2669 }
2670
2671
2672 pvt->F1 = pci_get_related_function(pvt->F3->vendor, pci_id1, pvt->F3);
2673 if (!pvt->F1) {
2674 amd64_err("F1 not found: device 0x%x (broken BIOS?)\n", pci_id1);
2675 return -ENODEV;
2676 }
2677
2678
2679 pvt->F2 = pci_get_related_function(pvt->F3->vendor, pci_id2, pvt->F3);
2680 if (!pvt->F2) {
2681 pci_dev_put(pvt->F1);
2682 pvt->F1 = NULL;
2683
2684 amd64_err("F2 not found: device 0x%x (broken BIOS?)\n", pci_id2);
2685 return -ENODEV;
2686 }
2687
2688 edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2689 edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2690 edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2691
2692 return 0;
2693 }
2694
2695 static void free_mc_sibling_devs(struct amd64_pvt *pvt)
2696 {
2697 if (pvt->umc) {
2698 pci_dev_put(pvt->F0);
2699 pci_dev_put(pvt->F6);
2700 } else {
2701 pci_dev_put(pvt->F1);
2702 pci_dev_put(pvt->F2);
2703 }
2704 }
2705
2706 static void determine_ecc_sym_sz(struct amd64_pvt *pvt)
2707 {
2708 pvt->ecc_sym_sz = 4;
2709
2710 if (pvt->umc) {
2711 u8 i;
2712
2713 for_each_umc(i) {
2714
2715 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
2716 if (pvt->umc[i].ecc_ctrl & BIT(9)) {
2717 pvt->ecc_sym_sz = 16;
2718 return;
2719 } else if (pvt->umc[i].ecc_ctrl & BIT(7)) {
2720 pvt->ecc_sym_sz = 8;
2721 return;
2722 }
2723 }
2724 }
2725 } else if (pvt->fam >= 0x10) {
2726 u32 tmp;
2727
2728 amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
2729
2730 if (pvt->fam != 0x16)
2731 amd64_read_dct_pci_cfg(pvt, 1, DBAM0, &pvt->dbam1);
2732
2733
2734 if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
2735 pvt->ecc_sym_sz = 8;
2736 }
2737 }
2738
2739
2740
2741
2742 static void __read_mc_regs_df(struct amd64_pvt *pvt)
2743 {
2744 u8 nid = pvt->mc_node_id;
2745 struct amd64_umc *umc;
2746 u32 i, umc_base;
2747
2748
2749 for_each_umc(i) {
2750
2751 umc_base = get_umc_base(i);
2752 umc = &pvt->umc[i];
2753
2754 amd_smn_read(nid, umc_base + UMCCH_DIMM_CFG, &umc->dimm_cfg);
2755 amd_smn_read(nid, umc_base + UMCCH_UMC_CFG, &umc->umc_cfg);
2756 amd_smn_read(nid, umc_base + UMCCH_SDP_CTRL, &umc->sdp_ctrl);
2757 amd_smn_read(nid, umc_base + UMCCH_ECC_CTRL, &umc->ecc_ctrl);
2758 amd_smn_read(nid, umc_base + UMCCH_UMC_CAP_HI, &umc->umc_cap_hi);
2759 }
2760 }
2761
2762
2763
2764
2765
2766 static void read_mc_regs(struct amd64_pvt *pvt)
2767 {
2768 unsigned int range;
2769 u64 msr_val;
2770
2771
2772
2773
2774
2775 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2776 edac_dbg(0, " TOP_MEM: 0x%016llx\n", pvt->top_mem);
2777
2778
2779 rdmsrl(MSR_K8_SYSCFG, msr_val);
2780 if (msr_val & BIT(21)) {
2781 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2782 edac_dbg(0, " TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2783 } else {
2784 edac_dbg(0, " TOP_MEM2 disabled\n");
2785 }
2786
2787 if (pvt->umc) {
2788 __read_mc_regs_df(pvt);
2789 amd64_read_pci_cfg(pvt->F0, DF_DHAR, &pvt->dhar);
2790
2791 goto skip;
2792 }
2793
2794 amd64_read_pci_cfg(pvt->F3, NBCAP, &pvt->nbcap);
2795
2796 read_dram_ctl_register(pvt);
2797
2798 for (range = 0; range < DRAM_RANGES; range++) {
2799 u8 rw;
2800
2801
2802 read_dram_base_limit_regs(pvt, range);
2803
2804 rw = dram_rw(pvt, range);
2805 if (!rw)
2806 continue;
2807
2808 edac_dbg(1, " DRAM range[%d], base: 0x%016llx; limit: 0x%016llx\n",
2809 range,
2810 get_dram_base(pvt, range),
2811 get_dram_limit(pvt, range));
2812
2813 edac_dbg(1, " IntlvEn=%s; Range access: %s%s IntlvSel=%d DstNode=%d\n",
2814 dram_intlv_en(pvt, range) ? "Enabled" : "Disabled",
2815 (rw & 0x1) ? "R" : "-",
2816 (rw & 0x2) ? "W" : "-",
2817 dram_intlv_sel(pvt, range),
2818 dram_dst_node(pvt, range));
2819 }
2820
2821 amd64_read_pci_cfg(pvt->F1, DHAR, &pvt->dhar);
2822 amd64_read_dct_pci_cfg(pvt, 0, DBAM0, &pvt->dbam0);
2823
2824 amd64_read_pci_cfg(pvt->F3, F10_ONLINE_SPARE, &pvt->online_spare);
2825
2826 amd64_read_dct_pci_cfg(pvt, 0, DCLR0, &pvt->dclr0);
2827 amd64_read_dct_pci_cfg(pvt, 0, DCHR0, &pvt->dchr0);
2828
2829 if (!dct_ganging_enabled(pvt)) {
2830 amd64_read_dct_pci_cfg(pvt, 1, DCLR0, &pvt->dclr1);
2831 amd64_read_dct_pci_cfg(pvt, 1, DCHR0, &pvt->dchr1);
2832 }
2833
2834 skip:
2835 read_dct_base_mask(pvt);
2836
2837 determine_memory_type(pvt);
2838 edac_dbg(1, " DIMM type: %s\n", edac_mem_types[pvt->dram_type]);
2839
2840 determine_ecc_sym_sz(pvt);
2841
2842 dump_misc_regs(pvt);
2843 }
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874
2875
2876
2877
2878
2879 static u32 get_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr_orig)
2880 {
2881 u32 dbam = dct ? pvt->dbam1 : pvt->dbam0;
2882 int csrow_nr = csrow_nr_orig;
2883 u32 cs_mode, nr_pages;
2884
2885 if (!pvt->umc) {
2886 csrow_nr >>= 1;
2887 cs_mode = DBAM_DIMM(csrow_nr, dbam);
2888 } else {
2889 cs_mode = f17_get_cs_mode(csrow_nr >> 1, dct, pvt);
2890 }
2891
2892 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode, csrow_nr);
2893 nr_pages <<= 20 - PAGE_SHIFT;
2894
2895 edac_dbg(0, "csrow: %d, channel: %d, DBAM idx: %d\n",
2896 csrow_nr_orig, dct, cs_mode);
2897 edac_dbg(0, "nr_pages/channel: %u\n", nr_pages);
2898
2899 return nr_pages;
2900 }
2901
2902 static int init_csrows_df(struct mem_ctl_info *mci)
2903 {
2904 struct amd64_pvt *pvt = mci->pvt_info;
2905 enum edac_type edac_mode = EDAC_NONE;
2906 enum dev_type dev_type = DEV_UNKNOWN;
2907 struct dimm_info *dimm;
2908 int empty = 1;
2909 u8 umc, cs;
2910
2911 if (mci->edac_ctl_cap & EDAC_FLAG_S16ECD16ED) {
2912 edac_mode = EDAC_S16ECD16ED;
2913 dev_type = DEV_X16;
2914 } else if (mci->edac_ctl_cap & EDAC_FLAG_S8ECD8ED) {
2915 edac_mode = EDAC_S8ECD8ED;
2916 dev_type = DEV_X8;
2917 } else if (mci->edac_ctl_cap & EDAC_FLAG_S4ECD4ED) {
2918 edac_mode = EDAC_S4ECD4ED;
2919 dev_type = DEV_X4;
2920 } else if (mci->edac_ctl_cap & EDAC_FLAG_SECDED) {
2921 edac_mode = EDAC_SECDED;
2922 }
2923
2924 for_each_umc(umc) {
2925 for_each_chip_select(cs, umc, pvt) {
2926 if (!csrow_enabled(cs, umc, pvt))
2927 continue;
2928
2929 empty = 0;
2930 dimm = mci->csrows[cs]->channels[umc]->dimm;
2931
2932 edac_dbg(1, "MC node: %d, csrow: %d\n",
2933 pvt->mc_node_id, cs);
2934
2935 dimm->nr_pages = get_csrow_nr_pages(pvt, umc, cs);
2936 dimm->mtype = pvt->dram_type;
2937 dimm->edac_mode = edac_mode;
2938 dimm->dtype = dev_type;
2939 dimm->grain = 64;
2940 }
2941 }
2942
2943 return empty;
2944 }
2945
2946
2947
2948
2949
2950 static int init_csrows(struct mem_ctl_info *mci)
2951 {
2952 struct amd64_pvt *pvt = mci->pvt_info;
2953 enum edac_type edac_mode = EDAC_NONE;
2954 struct csrow_info *csrow;
2955 struct dimm_info *dimm;
2956 int i, j, empty = 1;
2957 int nr_pages = 0;
2958 u32 val;
2959
2960 if (pvt->umc)
2961 return init_csrows_df(mci);
2962
2963 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2964
2965 pvt->nbcfg = val;
2966
2967 edac_dbg(0, "node %d, NBCFG=0x%08x[ChipKillEccCap: %d|DramEccEn: %d]\n",
2968 pvt->mc_node_id, val,
2969 !!(val & NBCFG_CHIPKILL), !!(val & NBCFG_ECC_ENABLE));
2970
2971
2972
2973
2974 for_each_chip_select(i, 0, pvt) {
2975 bool row_dct0 = !!csrow_enabled(i, 0, pvt);
2976 bool row_dct1 = false;
2977
2978 if (pvt->fam != 0xf)
2979 row_dct1 = !!csrow_enabled(i, 1, pvt);
2980
2981 if (!row_dct0 && !row_dct1)
2982 continue;
2983
2984 csrow = mci->csrows[i];
2985 empty = 0;
2986
2987 edac_dbg(1, "MC node: %d, csrow: %d\n",
2988 pvt->mc_node_id, i);
2989
2990 if (row_dct0) {
2991 nr_pages = get_csrow_nr_pages(pvt, 0, i);
2992 csrow->channels[0]->dimm->nr_pages = nr_pages;
2993 }
2994
2995
2996 if (pvt->fam != 0xf && row_dct1) {
2997 int row_dct1_pages = get_csrow_nr_pages(pvt, 1, i);
2998
2999 csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
3000 nr_pages += row_dct1_pages;
3001 }
3002
3003 edac_dbg(1, "Total csrow%d pages: %u\n", i, nr_pages);
3004
3005
3006 if (pvt->nbcfg & NBCFG_ECC_ENABLE) {
3007 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL)
3008 ? EDAC_S4ECD4ED
3009 : EDAC_SECDED;
3010 }
3011
3012 for (j = 0; j < pvt->channel_count; j++) {
3013 dimm = csrow->channels[j]->dimm;
3014 dimm->mtype = pvt->dram_type;
3015 dimm->edac_mode = edac_mode;
3016 dimm->grain = 64;
3017 }
3018 }
3019
3020 return empty;
3021 }
3022
3023
3024 static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, u16 nid)
3025 {
3026 int cpu;
3027
3028 for_each_online_cpu(cpu)
3029 if (amd_get_nb_id(cpu) == nid)
3030 cpumask_set_cpu(cpu, mask);
3031 }
3032
3033
3034 static bool nb_mce_bank_enabled_on_node(u16 nid)
3035 {
3036 cpumask_var_t mask;
3037 int cpu, nbe;
3038 bool ret = false;
3039
3040 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
3041 amd64_warn("%s: Error allocating mask\n", __func__);
3042 return false;
3043 }
3044
3045 get_cpus_on_this_dct_cpumask(mask, nid);
3046
3047 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
3048
3049 for_each_cpu(cpu, mask) {
3050 struct msr *reg = per_cpu_ptr(msrs, cpu);
3051 nbe = reg->l & MSR_MCGCTL_NBE;
3052
3053 edac_dbg(0, "core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
3054 cpu, reg->q,
3055 (nbe ? "enabled" : "disabled"));
3056
3057 if (!nbe)
3058 goto out;
3059 }
3060 ret = true;
3061
3062 out:
3063 free_cpumask_var(mask);
3064 return ret;
3065 }
3066
3067 static int toggle_ecc_err_reporting(struct ecc_settings *s, u16 nid, bool on)
3068 {
3069 cpumask_var_t cmask;
3070 int cpu;
3071
3072 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
3073 amd64_warn("%s: error allocating mask\n", __func__);
3074 return -ENOMEM;
3075 }
3076
3077 get_cpus_on_this_dct_cpumask(cmask, nid);
3078
3079 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3080
3081 for_each_cpu(cpu, cmask) {
3082
3083 struct msr *reg = per_cpu_ptr(msrs, cpu);
3084
3085 if (on) {
3086 if (reg->l & MSR_MCGCTL_NBE)
3087 s->flags.nb_mce_enable = 1;
3088
3089 reg->l |= MSR_MCGCTL_NBE;
3090 } else {
3091
3092
3093
3094 if (!s->flags.nb_mce_enable)
3095 reg->l &= ~MSR_MCGCTL_NBE;
3096 }
3097 }
3098 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
3099
3100 free_cpumask_var(cmask);
3101
3102 return 0;
3103 }
3104
3105 static bool enable_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3106 struct pci_dev *F3)
3107 {
3108 bool ret = true;
3109 u32 value, mask = 0x3;
3110
3111 if (toggle_ecc_err_reporting(s, nid, ON)) {
3112 amd64_warn("Error enabling ECC reporting over MCGCTL!\n");
3113 return false;
3114 }
3115
3116 amd64_read_pci_cfg(F3, NBCTL, &value);
3117
3118 s->old_nbctl = value & mask;
3119 s->nbctl_valid = true;
3120
3121 value |= mask;
3122 amd64_write_pci_cfg(F3, NBCTL, value);
3123
3124 amd64_read_pci_cfg(F3, NBCFG, &value);
3125
3126 edac_dbg(0, "1: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3127 nid, value, !!(value & NBCFG_ECC_ENABLE));
3128
3129 if (!(value & NBCFG_ECC_ENABLE)) {
3130 amd64_warn("DRAM ECC disabled on this node, enabling...\n");
3131
3132 s->flags.nb_ecc_prev = 0;
3133
3134
3135 value |= NBCFG_ECC_ENABLE;
3136 amd64_write_pci_cfg(F3, NBCFG, value);
3137
3138 amd64_read_pci_cfg(F3, NBCFG, &value);
3139
3140 if (!(value & NBCFG_ECC_ENABLE)) {
3141 amd64_warn("Hardware rejected DRAM ECC enable,"
3142 "check memory DIMM configuration.\n");
3143 ret = false;
3144 } else {
3145 amd64_info("Hardware accepted DRAM ECC Enable\n");
3146 }
3147 } else {
3148 s->flags.nb_ecc_prev = 1;
3149 }
3150
3151 edac_dbg(0, "2: node %d, NBCFG=0x%08x[DramEccEn: %d]\n",
3152 nid, value, !!(value & NBCFG_ECC_ENABLE));
3153
3154 return ret;
3155 }
3156
3157 static void restore_ecc_error_reporting(struct ecc_settings *s, u16 nid,
3158 struct pci_dev *F3)
3159 {
3160 u32 value, mask = 0x3;
3161
3162 if (!s->nbctl_valid)
3163 return;
3164
3165 amd64_read_pci_cfg(F3, NBCTL, &value);
3166 value &= ~mask;
3167 value |= s->old_nbctl;
3168
3169 amd64_write_pci_cfg(F3, NBCTL, value);
3170
3171
3172 if (!s->flags.nb_ecc_prev) {
3173 amd64_read_pci_cfg(F3, NBCFG, &value);
3174 value &= ~NBCFG_ECC_ENABLE;
3175 amd64_write_pci_cfg(F3, NBCFG, value);
3176 }
3177
3178
3179 if (toggle_ecc_err_reporting(s, nid, OFF))
3180 amd64_warn("Error restoring NB MCGCTL settings!\n");
3181 }
3182
3183
3184
3185
3186
3187
3188
3189 static const char *ecc_msg =
3190 "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
3191 " Either enable ECC checking or force module loading by setting "
3192 "'ecc_enable_override'.\n"
3193 " (Note that use of the override may cause unknown side effects.)\n";
3194
3195 static bool ecc_enabled(struct pci_dev *F3, u16 nid)
3196 {
3197 bool nb_mce_en = false;
3198 u8 ecc_en = 0, i;
3199 u32 value;
3200
3201 if (boot_cpu_data.x86 >= 0x17) {
3202 u8 umc_en_mask = 0, ecc_en_mask = 0;
3203
3204 for_each_umc(i) {
3205 u32 base = get_umc_base(i);
3206
3207
3208 if (amd_smn_read(nid, base + UMCCH_SDP_CTRL, &value))
3209 continue;
3210
3211 if (!(value & UMC_SDP_INIT))
3212 continue;
3213
3214 umc_en_mask |= BIT(i);
3215
3216 if (amd_smn_read(nid, base + UMCCH_UMC_CAP_HI, &value))
3217 continue;
3218
3219 if (value & UMC_ECC_ENABLED)
3220 ecc_en_mask |= BIT(i);
3221 }
3222
3223
3224 if (umc_en_mask)
3225 ecc_en = umc_en_mask == ecc_en_mask;
3226 else
3227 edac_dbg(0, "Node %d: No enabled UMCs.\n", nid);
3228
3229
3230 nb_mce_en = true;
3231 } else {
3232 amd64_read_pci_cfg(F3, NBCFG, &value);
3233
3234 ecc_en = !!(value & NBCFG_ECC_ENABLE);
3235
3236 nb_mce_en = nb_mce_bank_enabled_on_node(nid);
3237 if (!nb_mce_en)
3238 edac_dbg(0, "NB MCE bank disabled, set MSR 0x%08x[4] on node %d to enable.\n",
3239 MSR_IA32_MCG_CTL, nid);
3240 }
3241
3242 amd64_info("Node %d: DRAM ECC %s.\n",
3243 nid, (ecc_en ? "enabled" : "disabled"));
3244
3245 if (!ecc_en || !nb_mce_en) {
3246 amd64_info("%s", ecc_msg);
3247 return false;
3248 }
3249 return true;
3250 }
3251
3252 static inline void
3253 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
3254 {
3255 u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
3256
3257 for_each_umc(i) {
3258 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
3259 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
3260 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
3261
3262 dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
3263 dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
3264 }
3265 }
3266
3267
3268 if (ecc_en) {
3269 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3270
3271 if (!cpk_en)
3272 return;
3273
3274 if (dev_x4)
3275 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3276 else if (dev_x16)
3277 mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
3278 else
3279 mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
3280 }
3281 }
3282
3283 static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
3284 struct amd64_family_type *fam)
3285 {
3286 struct amd64_pvt *pvt = mci->pvt_info;
3287
3288 mci->mtype_cap = MEM_FLAG_DDR2 | MEM_FLAG_RDDR2;
3289 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3290
3291 if (pvt->umc) {
3292 f17h_determine_edac_ctl_cap(mci, pvt);
3293 } else {
3294 if (pvt->nbcap & NBCAP_SECDED)
3295 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
3296
3297 if (pvt->nbcap & NBCAP_CHIPKILL)
3298 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
3299 }
3300
3301 mci->edac_cap = determine_edac_cap(pvt);
3302 mci->mod_name = EDAC_MOD_STR;
3303 mci->ctl_name = fam->ctl_name;
3304 mci->dev_name = pci_name(pvt->F3);
3305 mci->ctl_page_to_phys = NULL;
3306
3307
3308 mci->set_sdram_scrub_rate = set_scrub_rate;
3309 mci->get_sdram_scrub_rate = get_scrub_rate;
3310 }
3311
3312
3313
3314
3315 static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
3316 {
3317 struct amd64_family_type *fam_type = NULL;
3318
3319 pvt->ext_model = boot_cpu_data.x86_model >> 4;
3320 pvt->stepping = boot_cpu_data.x86_stepping;
3321 pvt->model = boot_cpu_data.x86_model;
3322 pvt->fam = boot_cpu_data.x86;
3323
3324 switch (pvt->fam) {
3325 case 0xf:
3326 fam_type = &family_types[K8_CPUS];
3327 pvt->ops = &family_types[K8_CPUS].ops;
3328 break;
3329
3330 case 0x10:
3331 fam_type = &family_types[F10_CPUS];
3332 pvt->ops = &family_types[F10_CPUS].ops;
3333 break;
3334
3335 case 0x15:
3336 if (pvt->model == 0x30) {
3337 fam_type = &family_types[F15_M30H_CPUS];
3338 pvt->ops = &family_types[F15_M30H_CPUS].ops;
3339 break;
3340 } else if (pvt->model == 0x60) {
3341 fam_type = &family_types[F15_M60H_CPUS];
3342 pvt->ops = &family_types[F15_M60H_CPUS].ops;
3343 break;
3344 }
3345
3346 fam_type = &family_types[F15_CPUS];
3347 pvt->ops = &family_types[F15_CPUS].ops;
3348 break;
3349
3350 case 0x16:
3351 if (pvt->model == 0x30) {
3352 fam_type = &family_types[F16_M30H_CPUS];
3353 pvt->ops = &family_types[F16_M30H_CPUS].ops;
3354 break;
3355 }
3356 fam_type = &family_types[F16_CPUS];
3357 pvt->ops = &family_types[F16_CPUS].ops;
3358 break;
3359
3360 case 0x17:
3361 if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
3362 fam_type = &family_types[F17_M10H_CPUS];
3363 pvt->ops = &family_types[F17_M10H_CPUS].ops;
3364 break;
3365 } else if (pvt->model >= 0x30 && pvt->model <= 0x3f) {
3366 fam_type = &family_types[F17_M30H_CPUS];
3367 pvt->ops = &family_types[F17_M30H_CPUS].ops;
3368 break;
3369 } else if (pvt->model >= 0x70 && pvt->model <= 0x7f) {
3370 fam_type = &family_types[F17_M70H_CPUS];
3371 pvt->ops = &family_types[F17_M70H_CPUS].ops;
3372 break;
3373 }
3374
3375 case 0x18:
3376 fam_type = &family_types[F17_CPUS];
3377 pvt->ops = &family_types[F17_CPUS].ops;
3378
3379 if (pvt->fam == 0x18)
3380 family_types[F17_CPUS].ctl_name = "F18h";
3381 break;
3382
3383 default:
3384 amd64_err("Unsupported family!\n");
3385 return NULL;
3386 }
3387
3388 amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
3389 (pvt->fam == 0xf ?
3390 (pvt->ext_model >= K8_REV_F ? "revF or later "
3391 : "revE or earlier ")
3392 : ""), pvt->mc_node_id);
3393 return fam_type;
3394 }
3395
3396 static const struct attribute_group *amd64_edac_attr_groups[] = {
3397 #ifdef CONFIG_EDAC_DEBUG
3398 &amd64_edac_dbg_group,
3399 #endif
3400 #ifdef CONFIG_EDAC_AMD64_ERROR_INJECTION
3401 &amd64_edac_inj_group,
3402 #endif
3403 NULL
3404 };
3405
3406
3407 static void compute_num_umcs(void)
3408 {
3409 u8 model = boot_cpu_data.x86_model;
3410
3411 if (boot_cpu_data.x86 < 0x17)
3412 return;
3413
3414 if (model >= 0x30 && model <= 0x3f)
3415 num_umcs = 8;
3416 else
3417 num_umcs = 2;
3418
3419 edac_dbg(1, "Number of UMCs: %x", num_umcs);
3420 }
3421
3422 static int init_one_instance(unsigned int nid)
3423 {
3424 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3425 struct amd64_family_type *fam_type = NULL;
3426 struct mem_ctl_info *mci = NULL;
3427 struct edac_mc_layer layers[2];
3428 struct amd64_pvt *pvt = NULL;
3429 u16 pci_id1, pci_id2;
3430 int err = 0, ret;
3431
3432 ret = -ENOMEM;
3433 pvt = kzalloc(sizeof(struct amd64_pvt), GFP_KERNEL);
3434 if (!pvt)
3435 goto err_ret;
3436
3437 pvt->mc_node_id = nid;
3438 pvt->F3 = F3;
3439
3440 ret = -EINVAL;
3441 fam_type = per_family_init(pvt);
3442 if (!fam_type)
3443 goto err_free;
3444
3445 if (pvt->fam >= 0x17) {
3446 pvt->umc = kcalloc(num_umcs, sizeof(struct amd64_umc), GFP_KERNEL);
3447 if (!pvt->umc) {
3448 ret = -ENOMEM;
3449 goto err_free;
3450 }
3451
3452 pci_id1 = fam_type->f0_id;
3453 pci_id2 = fam_type->f6_id;
3454 } else {
3455 pci_id1 = fam_type->f1_id;
3456 pci_id2 = fam_type->f2_id;
3457 }
3458
3459 err = reserve_mc_sibling_devs(pvt, pci_id1, pci_id2);
3460 if (err)
3461 goto err_post_init;
3462
3463 read_mc_regs(pvt);
3464
3465
3466
3467
3468
3469
3470 ret = -EINVAL;
3471 pvt->channel_count = pvt->ops->early_channel_count(pvt);
3472 if (pvt->channel_count < 0)
3473 goto err_siblings;
3474
3475 ret = -ENOMEM;
3476 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
3477 layers[0].size = pvt->csels[0].b_cnt;
3478 layers[0].is_virt_csrow = true;
3479 layers[1].type = EDAC_MC_LAYER_CHANNEL;
3480
3481
3482
3483
3484
3485
3486
3487
3488
3489 if (pvt->fam >= 0x17)
3490 layers[1].size = num_umcs;
3491 else
3492 layers[1].size = 2;
3493 layers[1].is_virt_csrow = false;
3494
3495 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
3496 if (!mci)
3497 goto err_siblings;
3498
3499 mci->pvt_info = pvt;
3500 mci->pdev = &pvt->F3->dev;
3501
3502 setup_mci_misc_attrs(mci, fam_type);
3503
3504 if (init_csrows(mci))
3505 mci->edac_cap = EDAC_FLAG_NONE;
3506
3507 ret = -ENODEV;
3508 if (edac_mc_add_mc_with_groups(mci, amd64_edac_attr_groups)) {
3509 edac_dbg(1, "failed edac_mc_add_mc()\n");
3510 goto err_add_mc;
3511 }
3512
3513 return 0;
3514
3515 err_add_mc:
3516 edac_mc_free(mci);
3517
3518 err_siblings:
3519 free_mc_sibling_devs(pvt);
3520
3521 err_post_init:
3522 if (pvt->fam >= 0x17)
3523 kfree(pvt->umc);
3524
3525 err_free:
3526 kfree(pvt);
3527
3528 err_ret:
3529 return ret;
3530 }
3531
3532 static int probe_one_instance(unsigned int nid)
3533 {
3534 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3535 struct ecc_settings *s;
3536 int ret;
3537
3538 ret = -ENOMEM;
3539 s = kzalloc(sizeof(struct ecc_settings), GFP_KERNEL);
3540 if (!s)
3541 goto err_out;
3542
3543 ecc_stngs[nid] = s;
3544
3545 if (!ecc_enabled(F3, nid)) {
3546 ret = 0;
3547
3548 if (!ecc_enable_override)
3549 goto err_enable;
3550
3551 if (boot_cpu_data.x86 >= 0x17) {
3552 amd64_warn("Forcing ECC on is not recommended on newer systems. Please enable ECC in BIOS.");
3553 goto err_enable;
3554 } else
3555 amd64_warn("Forcing ECC on!\n");
3556
3557 if (!enable_ecc_error_reporting(s, nid, F3))
3558 goto err_enable;
3559 }
3560
3561 ret = init_one_instance(nid);
3562 if (ret < 0) {
3563 amd64_err("Error probing instance: %d\n", nid);
3564
3565 if (boot_cpu_data.x86 < 0x17)
3566 restore_ecc_error_reporting(s, nid, F3);
3567
3568 goto err_enable;
3569 }
3570
3571 return ret;
3572
3573 err_enable:
3574 kfree(s);
3575 ecc_stngs[nid] = NULL;
3576
3577 err_out:
3578 return ret;
3579 }
3580
3581 static void remove_one_instance(unsigned int nid)
3582 {
3583 struct pci_dev *F3 = node_to_amd_nb(nid)->misc;
3584 struct ecc_settings *s = ecc_stngs[nid];
3585 struct mem_ctl_info *mci;
3586 struct amd64_pvt *pvt;
3587
3588 mci = find_mci_by_dev(&F3->dev);
3589 WARN_ON(!mci);
3590
3591
3592 mci = edac_mc_del_mc(&F3->dev);
3593 if (!mci)
3594 return;
3595
3596 pvt = mci->pvt_info;
3597
3598 restore_ecc_error_reporting(s, nid, F3);
3599
3600 free_mc_sibling_devs(pvt);
3601
3602 kfree(ecc_stngs[nid]);
3603 ecc_stngs[nid] = NULL;
3604
3605
3606 mci->pvt_info = NULL;
3607
3608 kfree(pvt);
3609 edac_mc_free(mci);
3610 }
3611
3612 static void setup_pci_device(void)
3613 {
3614 struct mem_ctl_info *mci;
3615 struct amd64_pvt *pvt;
3616
3617 if (pci_ctl)
3618 return;
3619
3620 mci = edac_mc_find(0);
3621 if (!mci)
3622 return;
3623
3624 pvt = mci->pvt_info;
3625 if (pvt->umc)
3626 pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3627 else
3628 pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3629 if (!pci_ctl) {
3630 pr_warn("%s(): Unable to create PCI control\n", __func__);
3631 pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3632 }
3633 }
3634
3635 static const struct x86_cpu_id amd64_cpuids[] = {
3636 { X86_VENDOR_AMD, 0xF, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3637 { X86_VENDOR_AMD, 0x10, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3638 { X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3639 { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3640 { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3641 { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
3642 { }
3643 };
3644 MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
3645
3646 static int __init amd64_edac_init(void)
3647 {
3648 const char *owner;
3649 int err = -ENODEV;
3650 int i;
3651
3652 owner = edac_get_owner();
3653 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3654 return -EBUSY;
3655
3656 if (!x86_match_cpu(amd64_cpuids))
3657 return -ENODEV;
3658
3659 if (amd_cache_northbridges() < 0)
3660 return -ENODEV;
3661
3662 opstate_init();
3663
3664 err = -ENOMEM;
3665 ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL);
3666 if (!ecc_stngs)
3667 goto err_free;
3668
3669 msrs = msrs_alloc();
3670 if (!msrs)
3671 goto err_free;
3672
3673 compute_num_umcs();
3674
3675 for (i = 0; i < amd_nb_num(); i++) {
3676 err = probe_one_instance(i);
3677 if (err) {
3678
3679 while (--i >= 0)
3680 remove_one_instance(i);
3681
3682 goto err_pci;
3683 }
3684 }
3685
3686 if (!edac_has_mcs()) {
3687 err = -ENODEV;
3688 goto err_pci;
3689 }
3690
3691
3692 if (report_gart_errors)
3693 amd_report_gart_errors(true);
3694
3695 if (boot_cpu_data.x86 >= 0x17)
3696 amd_register_ecc_decoder(decode_umc_error);
3697 else
3698 amd_register_ecc_decoder(decode_bus_error);
3699
3700 setup_pci_device();
3701
3702 #ifdef CONFIG_X86_32
3703 amd64_err("%s on 32-bit is unsupported. USE AT YOUR OWN RISK!\n", EDAC_MOD_STR);
3704 #endif
3705
3706 printk(KERN_INFO "AMD64 EDAC driver v%s\n", EDAC_AMD64_VERSION);
3707
3708 return 0;
3709
3710 err_pci:
3711 msrs_free(msrs);
3712 msrs = NULL;
3713
3714 err_free:
3715 kfree(ecc_stngs);
3716 ecc_stngs = NULL;
3717
3718 return err;
3719 }
3720
3721 static void __exit amd64_edac_exit(void)
3722 {
3723 int i;
3724
3725 if (pci_ctl)
3726 edac_pci_release_generic_ctl(pci_ctl);
3727
3728
3729 amd_report_gart_errors(false);
3730
3731 if (boot_cpu_data.x86 >= 0x17)
3732 amd_unregister_ecc_decoder(decode_umc_error);
3733 else
3734 amd_unregister_ecc_decoder(decode_bus_error);
3735
3736 for (i = 0; i < amd_nb_num(); i++)
3737 remove_one_instance(i);
3738
3739 kfree(ecc_stngs);
3740 ecc_stngs = NULL;
3741
3742 msrs_free(msrs);
3743 msrs = NULL;
3744 }
3745
3746 module_init(amd64_edac_init);
3747 module_exit(amd64_edac_exit);
3748
3749 MODULE_LICENSE("GPL");
3750 MODULE_AUTHOR("SoftwareBitMaker: Doug Thompson, "
3751 "Dave Peterson, Thayne Harbaugh");
3752 MODULE_DESCRIPTION("MC support for AMD64 memory controllers - "
3753 EDAC_AMD64_VERSION);
3754
3755 module_param(edac_op_state, int, 0444);
3756 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");