This source file includes following definitions.
- show_dram_attr
- sad_pkg
- numrank
- numrow
- numcol
- get_sbridge_dev
- alloc_sbridge_dev
- free_sbridge_dev
- sbridge_get_tolm
- sbridge_get_tohm
- ibridge_get_tolm
- ibridge_get_tohm
- rir_limit
- sad_limit
- interleave_mode
- dram_attr
- knl_sad_limit
- knl_interleave_mode
- get_intlv_mode_str
- dram_attr_knl
- get_memory_type
- haswell_get_memory_type
- knl_get_width
- sbridge_get_width
- __ibridge_get_width
- ibridge_get_width
- broadwell_get_width
- knl_get_memory_type
- get_node_id
- haswell_get_node_id
- knl_get_node_id
- sbridge_get_ha
- ibridge_get_ha
- knl_get_ha
- haswell_get_tolm
- haswell_get_tohm
- knl_get_tolm
- knl_get_tohm
- haswell_rir_limit
- sad_pkg_socket
- sad_pkg_ha
- haswell_chan_hash
- knl_get_tad
- knl_channel_mc
- knl_get_edc_route
- knl_get_mc_route
- knl_show_edc_route
- knl_show_mc_route
- knl_get_dimm_capacity
- get_source_id
- __populate_dimms
- get_dimm_config
- get_memory_layout
- get_mci_for_node_id
- get_memory_error_data
- get_memory_error_data_from_mce
- sbridge_put_devices
- sbridge_put_all_devices
- sbridge_get_onedevice
- sbridge_get_all_devices
- sbridge_mci_bind_devs
- ibridge_mci_bind_devs
- haswell_mci_bind_devs
- broadwell_mci_bind_devs
- knl_mci_bind_devs
- sbridge_mce_output_error
- sbridge_mce_check_error
- sbridge_unregister_mci
- sbridge_register_mci
- sbridge_probe
- sbridge_remove
- sbridge_init
- sbridge_exit
1
2
3
4
5
6
7
8
9
10
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/pci.h>
14 #include <linux/pci_ids.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/edac.h>
18 #include <linux/mmzone.h>
19 #include <linux/smp.h>
20 #include <linux/bitmap.h>
21 #include <linux/math64.h>
22 #include <linux/mod_devicetable.h>
23 #include <asm/cpu_device_id.h>
24 #include <asm/intel-family.h>
25 #include <asm/processor.h>
26 #include <asm/mce.h>
27
28 #include "edac_module.h"
29
30
31 static LIST_HEAD(sbridge_edac_list);
32
33
34
35
36 #define SBRIDGE_REVISION " Ver: 1.1.2 "
37 #define EDAC_MOD_STR "sb_edac"
38
39
40
41
42 #define sbridge_printk(level, fmt, arg...) \
43 edac_printk(level, "sbridge", fmt, ##arg)
44
45 #define sbridge_mc_printk(mci, level, fmt, arg...) \
46 edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
47
48
49
50
51 #define GET_BITFIELD(v, lo, hi) \
52 (((v) & GENMASK_ULL(hi, lo)) >> (lo))
53
54
55 static const u32 sbridge_dram_rule[] = {
56 0x80, 0x88, 0x90, 0x98, 0xa0,
57 0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
58 };
59
60 static const u32 ibridge_dram_rule[] = {
61 0x60, 0x68, 0x70, 0x78, 0x80,
62 0x88, 0x90, 0x98, 0xa0, 0xa8,
63 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
64 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
65 };
66
67 static const u32 knl_dram_rule[] = {
68 0x60, 0x68, 0x70, 0x78, 0x80,
69 0x88, 0x90, 0x98, 0xa0, 0xa8,
70 0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
71 0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
72 0x100, 0x108, 0x110, 0x118,
73 };
74
75 #define DRAM_RULE_ENABLE(reg) GET_BITFIELD(reg, 0, 0)
76 #define A7MODE(reg) GET_BITFIELD(reg, 26, 26)
77
78 static char *show_dram_attr(u32 attr)
79 {
80 switch (attr) {
81 case 0:
82 return "DRAM";
83 case 1:
84 return "MMCFG";
85 case 2:
86 return "NXM";
87 default:
88 return "unknown";
89 }
90 }
91
92 static const u32 sbridge_interleave_list[] = {
93 0x84, 0x8c, 0x94, 0x9c, 0xa4,
94 0xac, 0xb4, 0xbc, 0xc4, 0xcc,
95 };
96
97 static const u32 ibridge_interleave_list[] = {
98 0x64, 0x6c, 0x74, 0x7c, 0x84,
99 0x8c, 0x94, 0x9c, 0xa4, 0xac,
100 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
101 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
102 };
103
104 static const u32 knl_interleave_list[] = {
105 0x64, 0x6c, 0x74, 0x7c, 0x84,
106 0x8c, 0x94, 0x9c, 0xa4, 0xac,
107 0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
108 0xdc, 0xe4, 0xec, 0xf4, 0xfc,
109 0x104, 0x10c, 0x114, 0x11c,
110 };
111 #define MAX_INTERLEAVE \
112 (max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list), \
113 max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list), \
114 ARRAY_SIZE(knl_interleave_list))))
115
116 struct interleave_pkg {
117 unsigned char start;
118 unsigned char end;
119 };
120
121 static const struct interleave_pkg sbridge_interleave_pkg[] = {
122 { 0, 2 },
123 { 3, 5 },
124 { 8, 10 },
125 { 11, 13 },
126 { 16, 18 },
127 { 19, 21 },
128 { 24, 26 },
129 { 27, 29 },
130 };
131
132 static const struct interleave_pkg ibridge_interleave_pkg[] = {
133 { 0, 3 },
134 { 4, 7 },
135 { 8, 11 },
136 { 12, 15 },
137 { 16, 19 },
138 { 20, 23 },
139 { 24, 27 },
140 { 28, 31 },
141 };
142
143 static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
144 int interleave)
145 {
146 return GET_BITFIELD(reg, table[interleave].start,
147 table[interleave].end);
148 }
149
150
151
152 #define TOLM 0x80
153 #define TOHM 0x84
154 #define HASWELL_TOLM 0xd0
155 #define HASWELL_TOHM_0 0xd4
156 #define HASWELL_TOHM_1 0xd8
157 #define KNL_TOLM 0xd0
158 #define KNL_TOHM_0 0xd4
159 #define KNL_TOHM_1 0xd8
160
161 #define GET_TOLM(reg) ((GET_BITFIELD(reg, 0, 3) << 28) | 0x3ffffff)
162 #define GET_TOHM(reg) ((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
163
164
165
166 #define SAD_TARGET 0xf0
167
168 #define SOURCE_ID(reg) GET_BITFIELD(reg, 9, 11)
169
170 #define SOURCE_ID_KNL(reg) GET_BITFIELD(reg, 12, 14)
171
172 #define SAD_CONTROL 0xf4
173
174
175
176 static const u32 tad_dram_rule[] = {
177 0x40, 0x44, 0x48, 0x4c,
178 0x50, 0x54, 0x58, 0x5c,
179 0x60, 0x64, 0x68, 0x6c,
180 };
181 #define MAX_TAD ARRAY_SIZE(tad_dram_rule)
182
183 #define TAD_LIMIT(reg) ((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
184 #define TAD_SOCK(reg) GET_BITFIELD(reg, 10, 11)
185 #define TAD_CH(reg) GET_BITFIELD(reg, 8, 9)
186 #define TAD_TGT3(reg) GET_BITFIELD(reg, 6, 7)
187 #define TAD_TGT2(reg) GET_BITFIELD(reg, 4, 5)
188 #define TAD_TGT1(reg) GET_BITFIELD(reg, 2, 3)
189 #define TAD_TGT0(reg) GET_BITFIELD(reg, 0, 1)
190
191
192
193 #define MCMTR 0x7c
194 #define KNL_MCMTR 0x624
195
196 #define IS_ECC_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 2, 2)
197 #define IS_LOCKSTEP_ENABLED(mcmtr) GET_BITFIELD(mcmtr, 1, 1)
198 #define IS_CLOSE_PG(mcmtr) GET_BITFIELD(mcmtr, 0, 0)
199
200
201
202 #define RASENABLES 0xac
203 #define IS_MIRROR_ENABLED(reg) GET_BITFIELD(reg, 0, 0)
204
205
206
207 static const int mtr_regs[] = {
208 0x80, 0x84, 0x88,
209 };
210
211 static const int knl_mtr_reg = 0xb60;
212
213 #define RANK_DISABLE(mtr) GET_BITFIELD(mtr, 16, 19)
214 #define IS_DIMM_PRESENT(mtr) GET_BITFIELD(mtr, 14, 14)
215 #define RANK_CNT_BITS(mtr) GET_BITFIELD(mtr, 12, 13)
216 #define RANK_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 2, 4)
217 #define COL_WIDTH_BITS(mtr) GET_BITFIELD(mtr, 0, 1)
218
219 static const u32 tad_ch_nilv_offset[] = {
220 0x90, 0x94, 0x98, 0x9c,
221 0xa0, 0xa4, 0xa8, 0xac,
222 0xb0, 0xb4, 0xb8, 0xbc,
223 };
224 #define CHN_IDX_OFFSET(reg) GET_BITFIELD(reg, 28, 29)
225 #define TAD_OFFSET(reg) (GET_BITFIELD(reg, 6, 25) << 26)
226
227 static const u32 rir_way_limit[] = {
228 0x108, 0x10c, 0x110, 0x114, 0x118,
229 };
230 #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
231
232 #define IS_RIR_VALID(reg) GET_BITFIELD(reg, 31, 31)
233 #define RIR_WAY(reg) GET_BITFIELD(reg, 28, 29)
234
235 #define MAX_RIR_WAY 8
236
237 static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
238 { 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
239 { 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
240 { 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
241 { 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
242 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
243 };
244
245 #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
246 GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
247
248 #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
249 GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
250
251
252
253
254
255
256
257 static const u32 correrrcnt[] = {
258 0x104, 0x108, 0x10c, 0x110,
259 };
260
261 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
262 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
263 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
264 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
265
266 static const u32 correrrthrsld[] = {
267 0x11c, 0x120, 0x124, 0x128,
268 };
269
270 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
271 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
272
273
274
275
276 #define SB_RANK_CFG_A 0x0328
277
278 #define IB_RANK_CFG_A 0x0320
279
280
281
282
283
284 #define NUM_CHANNELS 6
285 #define MAX_DIMMS 3
286 #define KNL_MAX_CHAS 38
287 #define KNL_MAX_CHANNELS 6
288 #define KNL_MAX_EDCS 8
289 #define CHANNEL_UNSPECIFIED 0xf
290
291 enum type {
292 SANDY_BRIDGE,
293 IVY_BRIDGE,
294 HASWELL,
295 BROADWELL,
296 KNIGHTS_LANDING,
297 };
298
299 enum domain {
300 IMC0 = 0,
301 IMC1,
302 SOCK,
303 };
304
305 enum mirroring_mode {
306 NON_MIRRORING,
307 ADDR_RANGE_MIRRORING,
308 FULL_MIRRORING,
309 };
310
311 struct sbridge_pvt;
312 struct sbridge_info {
313 enum type type;
314 u32 mcmtr;
315 u32 rankcfgr;
316 u64 (*get_tolm)(struct sbridge_pvt *pvt);
317 u64 (*get_tohm)(struct sbridge_pvt *pvt);
318 u64 (*rir_limit)(u32 reg);
319 u64 (*sad_limit)(u32 reg);
320 u32 (*interleave_mode)(u32 reg);
321 u32 (*dram_attr)(u32 reg);
322 const u32 *dram_rule;
323 const u32 *interleave_list;
324 const struct interleave_pkg *interleave_pkg;
325 u8 max_sad;
326 u8 (*get_node_id)(struct sbridge_pvt *pvt);
327 u8 (*get_ha)(u8 bank);
328 enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
329 enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
330 struct pci_dev *pci_vtd;
331 };
332
333 struct sbridge_channel {
334 u32 ranks;
335 u32 dimms;
336 };
337
338 struct pci_id_descr {
339 int dev_id;
340 int optional;
341 enum domain dom;
342 };
343
344 struct pci_id_table {
345 const struct pci_id_descr *descr;
346 int n_devs_per_imc;
347 int n_devs_per_sock;
348 int n_imcs_per_sock;
349 enum type type;
350 };
351
352 struct sbridge_dev {
353 struct list_head list;
354 int seg;
355 u8 bus, mc;
356 u8 node_id, source_id;
357 struct pci_dev **pdev;
358 enum domain dom;
359 int n_devs;
360 int i_devs;
361 struct mem_ctl_info *mci;
362 };
363
364 struct knl_pvt {
365 struct pci_dev *pci_cha[KNL_MAX_CHAS];
366 struct pci_dev *pci_channel[KNL_MAX_CHANNELS];
367 struct pci_dev *pci_mc0;
368 struct pci_dev *pci_mc1;
369 struct pci_dev *pci_mc0_misc;
370 struct pci_dev *pci_mc1_misc;
371 struct pci_dev *pci_mc_info;
372 };
373
374 struct sbridge_pvt {
375
376 struct pci_dev *pci_ddrio;
377 struct pci_dev *pci_sad0, *pci_sad1;
378 struct pci_dev *pci_br0, *pci_br1;
379
380 struct pci_dev *pci_ha, *pci_ta, *pci_ras;
381 struct pci_dev *pci_tad[NUM_CHANNELS];
382
383 struct sbridge_dev *sbridge_dev;
384
385 struct sbridge_info info;
386 struct sbridge_channel channel[NUM_CHANNELS];
387
388
389 bool is_cur_addr_mirrored, is_lockstep, is_close_pg;
390 bool is_chan_hash;
391 enum mirroring_mode mirror_mode;
392
393
394 u64 tolm, tohm;
395 struct knl_pvt knl;
396 };
397
398 #define PCI_DESCR(device_id, opt, domain) \
399 .dev_id = (device_id), \
400 .optional = opt, \
401 .dom = domain
402
403 static const struct pci_id_descr pci_dev_descr_sbridge[] = {
404
405 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0, 0, IMC0) },
406
407
408 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA, 0, IMC0) },
409 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS, 0, IMC0) },
410 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0, 0, IMC0) },
411 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1, 0, IMC0) },
412 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2, 0, IMC0) },
413 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3, 0, IMC0) },
414 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
415
416
417 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0, 0, SOCK) },
418 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1, 0, SOCK) },
419
420
421 { PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR, 0, SOCK) },
422 };
423
424 #define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
425 .descr = A, \
426 .n_devs_per_imc = N, \
427 .n_devs_per_sock = ARRAY_SIZE(A), \
428 .n_imcs_per_sock = M, \
429 .type = T \
430 }
431
432 static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
433 PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
434 {0,}
435 };
436
437
438
439
440
441
442
443 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0 0x0eb8
444 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0 0x0ebc
445
446
447 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0 0x0ea0
448 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA 0x0ea8
449 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS 0x0e71
450 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0 0x0eaa
451 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1 0x0eab
452 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2 0x0eac
453 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3 0x0ead
454 #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD 0x0ec8
455 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0 0x0ec9
456 #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1 0x0eca
457 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1 0x0e60
458 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA 0x0e68
459 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS 0x0e79
460 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0 0x0e6a
461 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1 0x0e6b
462 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2 0x0e6c
463 #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3 0x0e6d
464
465 static const struct pci_id_descr pci_dev_descr_ibridge[] = {
466
467 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) },
468 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) },
469
470
471 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) },
472 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS, 0, IMC0) },
473 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0, 0, IMC0) },
474 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1, 0, IMC0) },
475 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2, 0, IMC0) },
476 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) },
477
478
479 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) },
480 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) },
481 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) },
482 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1, 1, IMC1) },
483 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2, 1, IMC1) },
484 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3, 1, IMC1) },
485
486 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
487 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
488
489
490 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD, 0, SOCK) },
491
492
493 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0, 1, SOCK) },
494 { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1, 0, SOCK) },
495
496 };
497
498 static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
499 PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
500 {0,}
501 };
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519 #define HASWELL_DDRCRCLKCONTROLS 0xa10
520 #define HASWELL_HASYSDEFEATURE2 0x84
521 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
522 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0 0x2fa0
523 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1 0x2f60
524 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA 0x2fa8
525 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM 0x2f71
526 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA 0x2f68
527 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM 0x2f79
528 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
529 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
530 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
531 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
532 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
533 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
534 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
535 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
536 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
537 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
538 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
539 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
540 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
541 #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
542 static const struct pci_id_descr pci_dev_descr_haswell[] = {
543
544 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0, 0, IMC0) },
545 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1, 1, IMC1) },
546
547 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA, 0, IMC0) },
548 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM, 0, IMC0) },
549 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
550 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
551 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
552 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
553
554 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA, 1, IMC1) },
555 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM, 1, IMC1) },
556 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
557 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
558 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
559 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
560
561 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
562 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
563 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0, 1, SOCK) },
564 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1, 1, SOCK) },
565 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2, 1, SOCK) },
566 { PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3, 1, SOCK) },
567 };
568
569 static const struct pci_id_table pci_dev_descr_haswell_table[] = {
570 PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
571 {0,}
572 };
573
574
575
576
577
578
579 #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
580
581
582 #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
583
584 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843
585
586 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844
587
588 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a
589
590 #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b
591
592 #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c
593
594 #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
595
596
597
598
599
600
601
602
603 static const struct pci_id_descr pci_dev_descr_knl[] = {
604 [0 ... 1] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC, 0, IMC0)},
605 [2 ... 7] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN, 0, IMC0) },
606 [8] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA, 0, IMC0) },
607 [9] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
608 [10] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0, 0, SOCK) },
609 [11] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1, 0, SOCK) },
610 [12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA, 0, SOCK) },
611 };
612
613 static const struct pci_id_table pci_dev_descr_knl_table[] = {
614 PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
615 {0,}
616 };
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
637 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0
638 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60
639 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8
640 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71
641 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68
642 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79
643 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
644 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
645 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
646 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
647 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
648 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
649 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
650 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
651 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
652 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
653 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
654
655 static const struct pci_id_descr pci_dev_descr_broadwell[] = {
656
657 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
658 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
659
660 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA, 0, IMC0) },
661 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM, 0, IMC0) },
662 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
663 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
664 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
665 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
666
667 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA, 1, IMC1) },
668 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM, 1, IMC1) },
669 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
670 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
671 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
672 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
673
674 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
675 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
676 { PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0, 1, SOCK) },
677 };
678
679 static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
680 PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
681 {0,}
682 };
683
684
685
686
687
688
689 static inline int numrank(enum type type, u32 mtr)
690 {
691 int ranks = (1 << RANK_CNT_BITS(mtr));
692 int max = 4;
693
694 if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
695 max = 8;
696
697 if (ranks > max) {
698 edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
699 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
700 return -EINVAL;
701 }
702
703 return ranks;
704 }
705
706 static inline int numrow(u32 mtr)
707 {
708 int rows = (RANK_WIDTH_BITS(mtr) + 12);
709
710 if (rows < 13 || rows > 18) {
711 edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
712 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
713 return -EINVAL;
714 }
715
716 return 1 << rows;
717 }
718
719 static inline int numcol(u32 mtr)
720 {
721 int cols = (COL_WIDTH_BITS(mtr) + 10);
722
723 if (cols > 12) {
724 edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
725 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
726 return -EINVAL;
727 }
728
729 return 1 << cols;
730 }
731
732 static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
733 int multi_bus,
734 struct sbridge_dev *prev)
735 {
736 struct sbridge_dev *sbridge_dev;
737
738
739
740
741
742 if (multi_bus) {
743 return list_first_entry_or_null(&sbridge_edac_list,
744 struct sbridge_dev, list);
745 }
746
747 sbridge_dev = list_entry(prev ? prev->list.next
748 : sbridge_edac_list.next, struct sbridge_dev, list);
749
750 list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
751 if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
752 (dom == SOCK || dom == sbridge_dev->dom))
753 return sbridge_dev;
754 }
755
756 return NULL;
757 }
758
759 static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
760 const struct pci_id_table *table)
761 {
762 struct sbridge_dev *sbridge_dev;
763
764 sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
765 if (!sbridge_dev)
766 return NULL;
767
768 sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
769 sizeof(*sbridge_dev->pdev),
770 GFP_KERNEL);
771 if (!sbridge_dev->pdev) {
772 kfree(sbridge_dev);
773 return NULL;
774 }
775
776 sbridge_dev->seg = seg;
777 sbridge_dev->bus = bus;
778 sbridge_dev->dom = dom;
779 sbridge_dev->n_devs = table->n_devs_per_imc;
780 list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
781
782 return sbridge_dev;
783 }
784
785 static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
786 {
787 list_del(&sbridge_dev->list);
788 kfree(sbridge_dev->pdev);
789 kfree(sbridge_dev);
790 }
791
792 static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
793 {
794 u32 reg;
795
796
797 pci_read_config_dword(pvt->pci_sad1, TOLM, ®);
798 return GET_TOLM(reg);
799 }
800
801 static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
802 {
803 u32 reg;
804
805 pci_read_config_dword(pvt->pci_sad1, TOHM, ®);
806 return GET_TOHM(reg);
807 }
808
809 static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
810 {
811 u32 reg;
812
813 pci_read_config_dword(pvt->pci_br1, TOLM, ®);
814
815 return GET_TOLM(reg);
816 }
817
818 static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
819 {
820 u32 reg;
821
822 pci_read_config_dword(pvt->pci_br1, TOHM, ®);
823
824 return GET_TOHM(reg);
825 }
826
827 static u64 rir_limit(u32 reg)
828 {
829 return ((u64)GET_BITFIELD(reg, 1, 10) << 29) | 0x1fffffff;
830 }
831
832 static u64 sad_limit(u32 reg)
833 {
834 return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
835 }
836
837 static u32 interleave_mode(u32 reg)
838 {
839 return GET_BITFIELD(reg, 1, 1);
840 }
841
842 static u32 dram_attr(u32 reg)
843 {
844 return GET_BITFIELD(reg, 2, 3);
845 }
846
847 static u64 knl_sad_limit(u32 reg)
848 {
849 return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
850 }
851
852 static u32 knl_interleave_mode(u32 reg)
853 {
854 return GET_BITFIELD(reg, 1, 2);
855 }
856
857 static const char * const knl_intlv_mode[] = {
858 "[8:6]", "[10:8]", "[14:12]", "[32:30]"
859 };
860
861 static const char *get_intlv_mode_str(u32 reg, enum type t)
862 {
863 if (t == KNIGHTS_LANDING)
864 return knl_intlv_mode[knl_interleave_mode(reg)];
865 else
866 return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
867 }
868
869 static u32 dram_attr_knl(u32 reg)
870 {
871 return GET_BITFIELD(reg, 3, 4);
872 }
873
874
875 static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
876 {
877 u32 reg;
878 enum mem_type mtype;
879
880 if (pvt->pci_ddrio) {
881 pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
882 ®);
883 if (GET_BITFIELD(reg, 11, 11))
884
885 mtype = MEM_RDDR3;
886 else
887 mtype = MEM_DDR3;
888 } else
889 mtype = MEM_UNKNOWN;
890
891 return mtype;
892 }
893
894 static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
895 {
896 u32 reg;
897 bool registered = false;
898 enum mem_type mtype = MEM_UNKNOWN;
899
900 if (!pvt->pci_ddrio)
901 goto out;
902
903 pci_read_config_dword(pvt->pci_ddrio,
904 HASWELL_DDRCRCLKCONTROLS, ®);
905
906 if (GET_BITFIELD(reg, 16, 16))
907 registered = true;
908
909 pci_read_config_dword(pvt->pci_ta, MCMTR, ®);
910 if (GET_BITFIELD(reg, 14, 14)) {
911 if (registered)
912 mtype = MEM_RDDR4;
913 else
914 mtype = MEM_DDR4;
915 } else {
916 if (registered)
917 mtype = MEM_RDDR3;
918 else
919 mtype = MEM_DDR3;
920 }
921
922 out:
923 return mtype;
924 }
925
926 static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
927 {
928
929 return DEV_X16;
930 }
931
932 static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
933 {
934
935 return DEV_UNKNOWN;
936 }
937
938 static enum dev_type __ibridge_get_width(u32 mtr)
939 {
940 enum dev_type type;
941
942 switch (mtr) {
943 case 3:
944 type = DEV_UNKNOWN;
945 break;
946 case 2:
947 type = DEV_X16;
948 break;
949 case 1:
950 type = DEV_X8;
951 break;
952 case 0:
953 type = DEV_X4;
954 break;
955 }
956
957 return type;
958 }
959
960 static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
961 {
962
963
964
965
966 return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
967 }
968
969 static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
970 {
971
972 return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
973 }
974
975 static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
976 {
977
978 return MEM_RDDR4;
979 }
980
981 static u8 get_node_id(struct sbridge_pvt *pvt)
982 {
983 u32 reg;
984 pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, ®);
985 return GET_BITFIELD(reg, 0, 2);
986 }
987
988 static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
989 {
990 u32 reg;
991
992 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
993 return GET_BITFIELD(reg, 0, 3);
994 }
995
996 static u8 knl_get_node_id(struct sbridge_pvt *pvt)
997 {
998 u32 reg;
999
1000 pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, ®);
1001 return GET_BITFIELD(reg, 0, 2);
1002 }
1003
1004
1005
1006
1007
1008
1009
1010 static u8 sbridge_get_ha(u8 bank)
1011 {
1012 return 0;
1013 }
1014
1015
1016
1017
1018
1019
1020 static u8 ibridge_get_ha(u8 bank)
1021 {
1022 switch (bank) {
1023 case 7 ... 8:
1024 return bank - 7;
1025 case 9 ... 16:
1026 return (bank - 9) / 4;
1027 default:
1028 return 0xff;
1029 }
1030 }
1031
1032
1033 static u8 knl_get_ha(u8 bank)
1034 {
1035 return 0xff;
1036 }
1037
1038 static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1039 {
1040 u32 reg;
1041
1042 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, ®);
1043 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1044 }
1045
1046 static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1047 {
1048 u64 rc;
1049 u32 reg;
1050
1051 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, ®);
1052 rc = GET_BITFIELD(reg, 26, 31);
1053 pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, ®);
1054 rc = ((reg << 6) | rc) << 26;
1055
1056 return rc | 0x1ffffff;
1057 }
1058
1059 static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1060 {
1061 u32 reg;
1062
1063 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, ®);
1064 return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1065 }
1066
1067 static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1068 {
1069 u64 rc;
1070 u32 reg_lo, reg_hi;
1071
1072 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, ®_lo);
1073 pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, ®_hi);
1074 rc = ((u64)reg_hi << 32) | reg_lo;
1075 return rc | 0x3ffffff;
1076 }
1077
1078
1079 static u64 haswell_rir_limit(u32 reg)
1080 {
1081 return (((u64)GET_BITFIELD(reg, 1, 11) + 1) << 29) - 1;
1082 }
1083
1084 static inline u8 sad_pkg_socket(u8 pkg)
1085 {
1086
1087 return ((pkg >> 3) << 2) | (pkg & 0x3);
1088 }
1089
1090 static inline u8 sad_pkg_ha(u8 pkg)
1091 {
1092 return (pkg >> 2) & 0x1;
1093 }
1094
1095 static int haswell_chan_hash(int idx, u64 addr)
1096 {
1097 int i;
1098
1099
1100
1101
1102
1103 for (i = 12; i < 28; i += 2)
1104 idx ^= (addr >> i) & 3;
1105
1106 return idx;
1107 }
1108
1109
1110 static const u32 knl_tad_dram_limit_lo[] = {
1111 0x400, 0x500, 0x600, 0x700,
1112 0x800, 0x900, 0xa00, 0xb00,
1113 };
1114
1115
1116 static const u32 knl_tad_dram_offset_lo[] = {
1117 0x404, 0x504, 0x604, 0x704,
1118 0x804, 0x904, 0xa04, 0xb04,
1119 };
1120
1121
1122 static const u32 knl_tad_dram_hi[] = {
1123 0x408, 0x508, 0x608, 0x708,
1124 0x808, 0x908, 0xa08, 0xb08,
1125 };
1126
1127
1128 static const u32 knl_tad_ways[] = {
1129 8, 6, 4, 3, 2, 1,
1130 };
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147 static int knl_get_tad(const struct sbridge_pvt *pvt,
1148 const int entry,
1149 const int mc,
1150 u64 *offset,
1151 u64 *limit,
1152 int *ways)
1153 {
1154 u32 reg_limit_lo, reg_offset_lo, reg_hi;
1155 struct pci_dev *pci_mc;
1156 int way_id;
1157
1158 switch (mc) {
1159 case 0:
1160 pci_mc = pvt->knl.pci_mc0;
1161 break;
1162 case 1:
1163 pci_mc = pvt->knl.pci_mc1;
1164 break;
1165 default:
1166 WARN_ON(1);
1167 return -EINVAL;
1168 }
1169
1170 pci_read_config_dword(pci_mc,
1171 knl_tad_dram_limit_lo[entry], ®_limit_lo);
1172 pci_read_config_dword(pci_mc,
1173 knl_tad_dram_offset_lo[entry], ®_offset_lo);
1174 pci_read_config_dword(pci_mc,
1175 knl_tad_dram_hi[entry], ®_hi);
1176
1177
1178 if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1179 return -ENODEV;
1180
1181 way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1182
1183 if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1184 *ways = knl_tad_ways[way_id];
1185 } else {
1186 *ways = 0;
1187 sbridge_printk(KERN_ERR,
1188 "Unexpected value %d in mc_tad_limit_lo wayness field\n",
1189 way_id);
1190 return -ENODEV;
1191 }
1192
1193
1194
1195
1196
1197 *offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1198 ((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
1199 *limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
1200 ((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1201
1202 return 0;
1203 }
1204
1205
1206 static int knl_channel_mc(int channel)
1207 {
1208 WARN_ON(channel < 0 || channel >= 6);
1209
1210 return channel < 3 ? 1 : 0;
1211 }
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 static u32 knl_get_edc_route(int entry, u32 reg)
1229 {
1230 WARN_ON(entry >= KNL_MAX_EDCS);
1231 return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1232 }
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251 static u32 knl_get_mc_route(int entry, u32 reg)
1252 {
1253 int mc, chan;
1254
1255 WARN_ON(entry >= KNL_MAX_CHANNELS);
1256
1257 mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1258 chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1259
1260 return knl_channel_remap(mc, chan);
1261 }
1262
1263
1264
1265
1266
1267 static void knl_show_edc_route(u32 reg, char *s)
1268 {
1269 int i;
1270
1271 for (i = 0; i < KNL_MAX_EDCS; i++) {
1272 s[i*2] = knl_get_edc_route(i, reg) + '0';
1273 s[i*2+1] = '-';
1274 }
1275
1276 s[KNL_MAX_EDCS*2 - 1] = '\0';
1277 }
1278
1279
1280
1281
1282
1283 static void knl_show_mc_route(u32 reg, char *s)
1284 {
1285 int i;
1286
1287 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1288 s[i*2] = knl_get_mc_route(i, reg) + '0';
1289 s[i*2+1] = '-';
1290 }
1291
1292 s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1293 }
1294
1295 #define KNL_EDC_ROUTE 0xb8
1296 #define KNL_MC_ROUTE 0xb4
1297
1298
1299 #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1300
1301
1302 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1303
1304
1305 #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1306
1307
1308 #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1309
1310
1311 #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1342 {
1343 u64 sad_base, sad_size, sad_limit = 0;
1344 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1345 int sad_rule = 0;
1346 int tad_rule = 0;
1347 int intrlv_ways, tad_ways;
1348 u32 first_pkg, pkg;
1349 int i;
1350 u64 sad_actual_size[2];
1351 u32 dram_rule, interleave_reg;
1352 u32 mc_route_reg[KNL_MAX_CHAS];
1353 u32 edc_route_reg[KNL_MAX_CHAS];
1354 int edram_only;
1355 char edc_route_string[KNL_MAX_EDCS*2];
1356 char mc_route_string[KNL_MAX_CHANNELS*2];
1357 int cur_reg_start;
1358 int mc;
1359 int channel;
1360 int participants[KNL_MAX_CHANNELS];
1361
1362 for (i = 0; i < KNL_MAX_CHANNELS; i++)
1363 mc_sizes[i] = 0;
1364
1365
1366 cur_reg_start = 0;
1367 for (i = 0; i < KNL_MAX_CHAS; i++) {
1368 pci_read_config_dword(pvt->knl.pci_cha[i],
1369 KNL_EDC_ROUTE, &edc_route_reg[i]);
1370
1371 if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1372 knl_show_edc_route(edc_route_reg[i-1],
1373 edc_route_string);
1374 if (cur_reg_start == i-1)
1375 edac_dbg(0, "edc route table for CHA %d: %s\n",
1376 cur_reg_start, edc_route_string);
1377 else
1378 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1379 cur_reg_start, i-1, edc_route_string);
1380 cur_reg_start = i;
1381 }
1382 }
1383 knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1384 if (cur_reg_start == i-1)
1385 edac_dbg(0, "edc route table for CHA %d: %s\n",
1386 cur_reg_start, edc_route_string);
1387 else
1388 edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1389 cur_reg_start, i-1, edc_route_string);
1390
1391
1392 cur_reg_start = 0;
1393 for (i = 0; i < KNL_MAX_CHAS; i++) {
1394 pci_read_config_dword(pvt->knl.pci_cha[i],
1395 KNL_MC_ROUTE, &mc_route_reg[i]);
1396
1397 if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1398 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1399 if (cur_reg_start == i-1)
1400 edac_dbg(0, "mc route table for CHA %d: %s\n",
1401 cur_reg_start, mc_route_string);
1402 else
1403 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1404 cur_reg_start, i-1, mc_route_string);
1405 cur_reg_start = i;
1406 }
1407 }
1408 knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1409 if (cur_reg_start == i-1)
1410 edac_dbg(0, "mc route table for CHA %d: %s\n",
1411 cur_reg_start, mc_route_string);
1412 else
1413 edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1414 cur_reg_start, i-1, mc_route_string);
1415
1416
1417 for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1418
1419 sad_base = sad_limit;
1420
1421 pci_read_config_dword(pvt->pci_sad0,
1422 pvt->info.dram_rule[sad_rule], &dram_rule);
1423
1424 if (!DRAM_RULE_ENABLE(dram_rule))
1425 break;
1426
1427 edram_only = KNL_EDRAM_ONLY(dram_rule);
1428
1429 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1430 sad_size = sad_limit - sad_base;
1431
1432 pci_read_config_dword(pvt->pci_sad0,
1433 pvt->info.interleave_list[sad_rule], &interleave_reg);
1434
1435
1436
1437
1438
1439 first_pkg = sad_pkg(pvt->info.interleave_pkg,
1440 interleave_reg, 0);
1441 for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1442 pkg = sad_pkg(pvt->info.interleave_pkg,
1443 interleave_reg, intrlv_ways);
1444
1445 if ((pkg & 0x8) == 0) {
1446
1447
1448
1449
1450 edac_dbg(0, "Unexpected interleave target %d\n",
1451 pkg);
1452 return -1;
1453 }
1454
1455 if (pkg == first_pkg)
1456 break;
1457 }
1458 if (KNL_MOD3(dram_rule))
1459 intrlv_ways *= 3;
1460
1461 edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1462 sad_rule,
1463 sad_base,
1464 sad_limit,
1465 intrlv_ways,
1466 edram_only ? ", EDRAM" : "");
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 for (mc = 0; mc < 2; mc++) {
1480 sad_actual_size[mc] = 0;
1481 tad_livespace = 0;
1482 for (tad_rule = 0;
1483 tad_rule < ARRAY_SIZE(
1484 knl_tad_dram_limit_lo);
1485 tad_rule++) {
1486 if (knl_get_tad(pvt,
1487 tad_rule,
1488 mc,
1489 &tad_deadspace,
1490 &tad_limit,
1491 &tad_ways))
1492 break;
1493
1494 tad_size = (tad_limit+1) -
1495 (tad_livespace + tad_deadspace);
1496 tad_livespace += tad_size;
1497 tad_base = (tad_limit+1) - tad_size;
1498
1499 if (tad_base < sad_base) {
1500 if (tad_limit > sad_base)
1501 edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1502 } else if (tad_base < sad_limit) {
1503 if (tad_limit+1 > sad_limit) {
1504 edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1505 } else {
1506
1507 edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1508 tad_rule, tad_base,
1509 tad_limit, tad_size,
1510 mc);
1511 sad_actual_size[mc] += tad_size;
1512 }
1513 }
1514 }
1515 }
1516
1517 for (mc = 0; mc < 2; mc++) {
1518 edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1519 mc, sad_actual_size[mc], sad_actual_size[mc]);
1520 }
1521
1522
1523 if (edram_only)
1524 continue;
1525
1526
1527 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1528 participants[channel] = 0;
1529
1530
1531
1532
1533 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1534 int target;
1535 int cha;
1536
1537 for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1538 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1539 if (knl_get_mc_route(target,
1540 mc_route_reg[cha]) == channel
1541 && !participants[channel]) {
1542 participants[channel] = 1;
1543 break;
1544 }
1545 }
1546 }
1547 }
1548
1549 for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1550 mc = knl_channel_mc(channel);
1551 if (participants[channel]) {
1552 edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1553 channel,
1554 sad_actual_size[mc]/intrlv_ways,
1555 sad_rule);
1556 mc_sizes[channel] +=
1557 sad_actual_size[mc]/intrlv_ways;
1558 }
1559 }
1560 }
1561
1562 return 0;
1563 }
1564
1565 static void get_source_id(struct mem_ctl_info *mci)
1566 {
1567 struct sbridge_pvt *pvt = mci->pvt_info;
1568 u32 reg;
1569
1570 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1571 pvt->info.type == KNIGHTS_LANDING)
1572 pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
1573 else
1574 pci_read_config_dword(pvt->pci_br0, SAD_TARGET, ®);
1575
1576 if (pvt->info.type == KNIGHTS_LANDING)
1577 pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1578 else
1579 pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1580 }
1581
1582 static int __populate_dimms(struct mem_ctl_info *mci,
1583 u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1584 enum edac_type mode)
1585 {
1586 struct sbridge_pvt *pvt = mci->pvt_info;
1587 int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1588 : NUM_CHANNELS;
1589 unsigned int i, j, banks, ranks, rows, cols, npages;
1590 struct dimm_info *dimm;
1591 enum mem_type mtype;
1592 u64 size;
1593
1594 mtype = pvt->info.get_memory_type(pvt);
1595 if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1596 edac_dbg(0, "Memory is registered\n");
1597 else if (mtype == MEM_UNKNOWN)
1598 edac_dbg(0, "Cannot determine memory type\n");
1599 else
1600 edac_dbg(0, "Memory is unregistered\n");
1601
1602 if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1603 banks = 16;
1604 else
1605 banks = 8;
1606
1607 for (i = 0; i < channels; i++) {
1608 u32 mtr;
1609
1610 int max_dimms_per_channel;
1611
1612 if (pvt->info.type == KNIGHTS_LANDING) {
1613 max_dimms_per_channel = 1;
1614 if (!pvt->knl.pci_channel[i])
1615 continue;
1616 } else {
1617 max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1618 if (!pvt->pci_tad[i])
1619 continue;
1620 }
1621
1622 for (j = 0; j < max_dimms_per_channel; j++) {
1623 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0);
1624 if (pvt->info.type == KNIGHTS_LANDING) {
1625 pci_read_config_dword(pvt->knl.pci_channel[i],
1626 knl_mtr_reg, &mtr);
1627 } else {
1628 pci_read_config_dword(pvt->pci_tad[i],
1629 mtr_regs[j], &mtr);
1630 }
1631 edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
1632 if (IS_DIMM_PRESENT(mtr)) {
1633 if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1634 sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1635 pvt->sbridge_dev->source_id,
1636 pvt->sbridge_dev->dom, i);
1637 return -ENODEV;
1638 }
1639 pvt->channel[i].dimms++;
1640
1641 ranks = numrank(pvt->info.type, mtr);
1642
1643 if (pvt->info.type == KNIGHTS_LANDING) {
1644
1645 cols = 1 << 10;
1646 rows = knl_mc_sizes[i] /
1647 ((u64) cols * ranks * banks * 8);
1648 } else {
1649 rows = numrow(mtr);
1650 cols = numcol(mtr);
1651 }
1652
1653 size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1654 npages = MiB_TO_PAGES(size);
1655
1656 edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1657 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1658 size, npages,
1659 banks, ranks, rows, cols);
1660
1661 dimm->nr_pages = npages;
1662 dimm->grain = 32;
1663 dimm->dtype = pvt->info.get_width(pvt, mtr);
1664 dimm->mtype = mtype;
1665 dimm->edac_mode = mode;
1666 snprintf(dimm->label, sizeof(dimm->label),
1667 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1668 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1669 }
1670 }
1671 }
1672
1673 return 0;
1674 }
1675
1676 static int get_dimm_config(struct mem_ctl_info *mci)
1677 {
1678 struct sbridge_pvt *pvt = mci->pvt_info;
1679 u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1680 enum edac_type mode;
1681 u32 reg;
1682
1683 pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1684 edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1685 pvt->sbridge_dev->mc,
1686 pvt->sbridge_dev->node_id,
1687 pvt->sbridge_dev->source_id);
1688
1689
1690
1691
1692 if (pvt->info.type == KNIGHTS_LANDING) {
1693 mode = EDAC_S4ECD4ED;
1694 pvt->mirror_mode = NON_MIRRORING;
1695 pvt->is_cur_addr_mirrored = false;
1696
1697 if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1698 return -1;
1699 if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1700 edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1701 return -ENODEV;
1702 }
1703 } else {
1704 if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1705 if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®)) {
1706 edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1707 return -ENODEV;
1708 }
1709 pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1710 if (GET_BITFIELD(reg, 28, 28)) {
1711 pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1712 edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1713 goto next;
1714 }
1715 }
1716 if (pci_read_config_dword(pvt->pci_ras, RASENABLES, ®)) {
1717 edac_dbg(0, "Failed to read RASENABLES register\n");
1718 return -ENODEV;
1719 }
1720 if (IS_MIRROR_ENABLED(reg)) {
1721 pvt->mirror_mode = FULL_MIRRORING;
1722 edac_dbg(0, "Full memory mirroring is enabled\n");
1723 } else {
1724 pvt->mirror_mode = NON_MIRRORING;
1725 edac_dbg(0, "Memory mirroring is disabled\n");
1726 }
1727
1728 next:
1729 if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1730 edac_dbg(0, "Failed to read MCMTR register\n");
1731 return -ENODEV;
1732 }
1733 if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1734 edac_dbg(0, "Lockstep is enabled\n");
1735 mode = EDAC_S8ECD8ED;
1736 pvt->is_lockstep = true;
1737 } else {
1738 edac_dbg(0, "Lockstep is disabled\n");
1739 mode = EDAC_S4ECD4ED;
1740 pvt->is_lockstep = false;
1741 }
1742 if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1743 edac_dbg(0, "address map is on closed page mode\n");
1744 pvt->is_close_pg = true;
1745 } else {
1746 edac_dbg(0, "address map is on open page mode\n");
1747 pvt->is_close_pg = false;
1748 }
1749 }
1750
1751 return __populate_dimms(mci, knl_mc_sizes, mode);
1752 }
1753
1754 static void get_memory_layout(const struct mem_ctl_info *mci)
1755 {
1756 struct sbridge_pvt *pvt = mci->pvt_info;
1757 int i, j, k, n_sads, n_tads, sad_interl;
1758 u32 reg;
1759 u64 limit, prv = 0;
1760 u64 tmp_mb;
1761 u32 gb, mb;
1762 u32 rir_way;
1763
1764
1765
1766
1767
1768 pvt->tolm = pvt->info.get_tolm(pvt);
1769 tmp_mb = (1 + pvt->tolm) >> 20;
1770
1771 gb = div_u64_rem(tmp_mb, 1024, &mb);
1772 edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1773 gb, (mb*1000)/1024, (u64)pvt->tolm);
1774
1775
1776 pvt->tohm = pvt->info.get_tohm(pvt);
1777 tmp_mb = (1 + pvt->tohm) >> 20;
1778
1779 gb = div_u64_rem(tmp_mb, 1024, &mb);
1780 edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1781 gb, (mb*1000)/1024, (u64)pvt->tohm);
1782
1783
1784
1785
1786
1787
1788
1789 prv = 0;
1790 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1791
1792 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1793 ®);
1794 limit = pvt->info.sad_limit(reg);
1795
1796 if (!DRAM_RULE_ENABLE(reg))
1797 continue;
1798
1799 if (limit <= prv)
1800 break;
1801
1802 tmp_mb = (limit + 1) >> 20;
1803 gb = div_u64_rem(tmp_mb, 1024, &mb);
1804 edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1805 n_sads,
1806 show_dram_attr(pvt->info.dram_attr(reg)),
1807 gb, (mb*1000)/1024,
1808 ((u64)tmp_mb) << 20L,
1809 get_intlv_mode_str(reg, pvt->info.type),
1810 reg);
1811 prv = limit;
1812
1813 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1814 ®);
1815 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1816 for (j = 0; j < 8; j++) {
1817 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1818 if (j > 0 && sad_interl == pkg)
1819 break;
1820
1821 edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1822 n_sads, j, pkg);
1823 }
1824 }
1825
1826 if (pvt->info.type == KNIGHTS_LANDING)
1827 return;
1828
1829
1830
1831
1832 prv = 0;
1833 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1834 pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], ®);
1835 limit = TAD_LIMIT(reg);
1836 if (limit <= prv)
1837 break;
1838 tmp_mb = (limit + 1) >> 20;
1839
1840 gb = div_u64_rem(tmp_mb, 1024, &mb);
1841 edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1842 n_tads, gb, (mb*1000)/1024,
1843 ((u64)tmp_mb) << 20L,
1844 (u32)(1 << TAD_SOCK(reg)),
1845 (u32)TAD_CH(reg) + 1,
1846 (u32)TAD_TGT0(reg),
1847 (u32)TAD_TGT1(reg),
1848 (u32)TAD_TGT2(reg),
1849 (u32)TAD_TGT3(reg),
1850 reg);
1851 prv = limit;
1852 }
1853
1854
1855
1856
1857 for (i = 0; i < NUM_CHANNELS; i++) {
1858 if (!pvt->channel[i].dimms)
1859 continue;
1860 for (j = 0; j < n_tads; j++) {
1861 pci_read_config_dword(pvt->pci_tad[i],
1862 tad_ch_nilv_offset[j],
1863 ®);
1864 tmp_mb = TAD_OFFSET(reg) >> 20;
1865 gb = div_u64_rem(tmp_mb, 1024, &mb);
1866 edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1867 i, j,
1868 gb, (mb*1000)/1024,
1869 ((u64)tmp_mb) << 20L,
1870 reg);
1871 }
1872 }
1873
1874
1875
1876
1877 for (i = 0; i < NUM_CHANNELS; i++) {
1878 if (!pvt->channel[i].dimms)
1879 continue;
1880 for (j = 0; j < MAX_RIR_RANGES; j++) {
1881 pci_read_config_dword(pvt->pci_tad[i],
1882 rir_way_limit[j],
1883 ®);
1884
1885 if (!IS_RIR_VALID(reg))
1886 continue;
1887
1888 tmp_mb = pvt->info.rir_limit(reg) >> 20;
1889 rir_way = 1 << RIR_WAY(reg);
1890 gb = div_u64_rem(tmp_mb, 1024, &mb);
1891 edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1892 i, j,
1893 gb, (mb*1000)/1024,
1894 ((u64)tmp_mb) << 20L,
1895 rir_way,
1896 reg);
1897
1898 for (k = 0; k < rir_way; k++) {
1899 pci_read_config_dword(pvt->pci_tad[i],
1900 rir_offset[j][k],
1901 ®);
1902 tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1903
1904 gb = div_u64_rem(tmp_mb, 1024, &mb);
1905 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1906 i, j, k,
1907 gb, (mb*1000)/1024,
1908 ((u64)tmp_mb) << 20L,
1909 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1910 reg);
1911 }
1912 }
1913 }
1914 }
1915
1916 static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1917 {
1918 struct sbridge_dev *sbridge_dev;
1919
1920 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1921 if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1922 return sbridge_dev->mci;
1923 }
1924 return NULL;
1925 }
1926
1927 static int get_memory_error_data(struct mem_ctl_info *mci,
1928 u64 addr,
1929 u8 *socket, u8 *ha,
1930 long *channel_mask,
1931 u8 *rank,
1932 char **area_type, char *msg)
1933 {
1934 struct mem_ctl_info *new_mci;
1935 struct sbridge_pvt *pvt = mci->pvt_info;
1936 struct pci_dev *pci_ha;
1937 int n_rir, n_sads, n_tads, sad_way, sck_xch;
1938 int sad_interl, idx, base_ch;
1939 int interleave_mode, shiftup = 0;
1940 unsigned int sad_interleave[MAX_INTERLEAVE];
1941 u32 reg, dram_rule;
1942 u8 ch_way, sck_way, pkg, sad_ha = 0;
1943 u32 tad_offset;
1944 u32 rir_way;
1945 u32 mb, gb;
1946 u64 ch_addr, offset, limit = 0, prv = 0;
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1957 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1958 return -EINVAL;
1959 }
1960 if (addr >= (u64)pvt->tohm) {
1961 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1962 return -EINVAL;
1963 }
1964
1965
1966
1967
1968 for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1969 pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1970 ®);
1971
1972 if (!DRAM_RULE_ENABLE(reg))
1973 continue;
1974
1975 limit = pvt->info.sad_limit(reg);
1976 if (limit <= prv) {
1977 sprintf(msg, "Can't discover the memory socket");
1978 return -EINVAL;
1979 }
1980 if (addr <= limit)
1981 break;
1982 prv = limit;
1983 }
1984 if (n_sads == pvt->info.max_sad) {
1985 sprintf(msg, "Can't discover the memory socket");
1986 return -EINVAL;
1987 }
1988 dram_rule = reg;
1989 *area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1990 interleave_mode = pvt->info.interleave_mode(dram_rule);
1991
1992 pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1993 ®);
1994
1995 if (pvt->info.type == SANDY_BRIDGE) {
1996 sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1997 for (sad_way = 0; sad_way < 8; sad_way++) {
1998 u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1999 if (sad_way > 0 && sad_interl == pkg)
2000 break;
2001 sad_interleave[sad_way] = pkg;
2002 edac_dbg(0, "SAD interleave #%d: %d\n",
2003 sad_way, sad_interleave[sad_way]);
2004 }
2005 edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2006 pvt->sbridge_dev->mc,
2007 n_sads,
2008 addr,
2009 limit,
2010 sad_way + 7,
2011 !interleave_mode ? "" : "XOR[18:16]");
2012 if (interleave_mode)
2013 idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2014 else
2015 idx = (addr >> 6) & 7;
2016 switch (sad_way) {
2017 case 1:
2018 idx = 0;
2019 break;
2020 case 2:
2021 idx = idx & 1;
2022 break;
2023 case 4:
2024 idx = idx & 3;
2025 break;
2026 case 8:
2027 break;
2028 default:
2029 sprintf(msg, "Can't discover socket interleave");
2030 return -EINVAL;
2031 }
2032 *socket = sad_interleave[idx];
2033 edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2034 idx, sad_way, *socket);
2035 } else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2036 int bits, a7mode = A7MODE(dram_rule);
2037
2038 if (a7mode) {
2039
2040 bits = GET_BITFIELD(addr, 7, 8) << 1;
2041 bits |= GET_BITFIELD(addr, 9, 9);
2042 } else
2043 bits = GET_BITFIELD(addr, 6, 8);
2044
2045 if (interleave_mode == 0) {
2046
2047 idx = GET_BITFIELD(addr, 16, 18);
2048 idx ^= bits;
2049 } else
2050 idx = bits;
2051
2052 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2053 *socket = sad_pkg_socket(pkg);
2054 sad_ha = sad_pkg_ha(pkg);
2055
2056 if (a7mode) {
2057
2058 pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, ®);
2059 shiftup = GET_BITFIELD(reg, 22, 22);
2060 }
2061
2062 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2063 idx, *socket, sad_ha, shiftup);
2064 } else {
2065
2066 idx = (addr >> 6) & 7;
2067 pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2068 *socket = sad_pkg_socket(pkg);
2069 sad_ha = sad_pkg_ha(pkg);
2070 edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2071 idx, *socket, sad_ha);
2072 }
2073
2074 *ha = sad_ha;
2075
2076
2077
2078
2079
2080 new_mci = get_mci_for_node_id(*socket, sad_ha);
2081 if (!new_mci) {
2082 sprintf(msg, "Struct for socket #%u wasn't initialized",
2083 *socket);
2084 return -EINVAL;
2085 }
2086 mci = new_mci;
2087 pvt = mci->pvt_info;
2088
2089
2090
2091
2092 prv = 0;
2093 pci_ha = pvt->pci_ha;
2094 for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2095 pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
2096 limit = TAD_LIMIT(reg);
2097 if (limit <= prv) {
2098 sprintf(msg, "Can't discover the memory channel");
2099 return -EINVAL;
2100 }
2101 if (addr <= limit)
2102 break;
2103 prv = limit;
2104 }
2105 if (n_tads == MAX_TAD) {
2106 sprintf(msg, "Can't discover the memory channel");
2107 return -EINVAL;
2108 }
2109
2110 ch_way = TAD_CH(reg) + 1;
2111 sck_way = TAD_SOCK(reg);
2112
2113 if (ch_way == 3)
2114 idx = addr >> 6;
2115 else {
2116 idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2117 if (pvt->is_chan_hash)
2118 idx = haswell_chan_hash(idx, addr);
2119 }
2120 idx = idx % ch_way;
2121
2122
2123
2124
2125 switch (idx) {
2126 case 0:
2127 base_ch = TAD_TGT0(reg);
2128 break;
2129 case 1:
2130 base_ch = TAD_TGT1(reg);
2131 break;
2132 case 2:
2133 base_ch = TAD_TGT2(reg);
2134 break;
2135 case 3:
2136 base_ch = TAD_TGT3(reg);
2137 break;
2138 default:
2139 sprintf(msg, "Can't discover the TAD target");
2140 return -EINVAL;
2141 }
2142 *channel_mask = 1 << base_ch;
2143
2144 pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2145
2146 if (pvt->mirror_mode == FULL_MIRRORING ||
2147 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2148 *channel_mask |= 1 << ((base_ch + 2) % 4);
2149 switch(ch_way) {
2150 case 2:
2151 case 4:
2152 sck_xch = (1 << sck_way) * (ch_way >> 1);
2153 break;
2154 default:
2155 sprintf(msg, "Invalid mirror set. Can't decode addr");
2156 return -EINVAL;
2157 }
2158
2159 pvt->is_cur_addr_mirrored = true;
2160 } else {
2161 sck_xch = (1 << sck_way) * ch_way;
2162 pvt->is_cur_addr_mirrored = false;
2163 }
2164
2165 if (pvt->is_lockstep)
2166 *channel_mask |= 1 << ((base_ch + 1) % 4);
2167
2168 offset = TAD_OFFSET(tad_offset);
2169
2170 edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2171 n_tads,
2172 addr,
2173 limit,
2174 sck_way,
2175 ch_way,
2176 offset,
2177 idx,
2178 base_ch,
2179 *channel_mask);
2180
2181
2182
2183
2184 if (offset > addr) {
2185 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2186 offset, addr);
2187 return -EINVAL;
2188 }
2189
2190 ch_addr = addr - offset;
2191 ch_addr >>= (6 + shiftup);
2192 ch_addr /= sck_xch;
2193 ch_addr <<= (6 + shiftup);
2194 ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2195
2196
2197
2198
2199 for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2200 pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], ®);
2201
2202 if (!IS_RIR_VALID(reg))
2203 continue;
2204
2205 limit = pvt->info.rir_limit(reg);
2206 gb = div_u64_rem(limit >> 20, 1024, &mb);
2207 edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2208 n_rir,
2209 gb, (mb*1000)/1024,
2210 limit,
2211 1 << RIR_WAY(reg));
2212 if (ch_addr <= limit)
2213 break;
2214 }
2215 if (n_rir == MAX_RIR_RANGES) {
2216 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2217 ch_addr);
2218 return -EINVAL;
2219 }
2220 rir_way = RIR_WAY(reg);
2221
2222 if (pvt->is_close_pg)
2223 idx = (ch_addr >> 6);
2224 else
2225 idx = (ch_addr >> 13);
2226 idx %= 1 << rir_way;
2227
2228 pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], ®);
2229 *rank = RIR_RNK_TGT(pvt->info.type, reg);
2230
2231 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2232 n_rir,
2233 ch_addr,
2234 limit,
2235 rir_way,
2236 idx);
2237
2238 return 0;
2239 }
2240
2241 static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2242 const struct mce *m, u8 *socket,
2243 u8 *ha, long *channel_mask,
2244 char *msg)
2245 {
2246 u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2247 struct mem_ctl_info *new_mci;
2248 struct sbridge_pvt *pvt;
2249 struct pci_dev *pci_ha;
2250 bool tad0;
2251
2252 if (channel >= NUM_CHANNELS) {
2253 sprintf(msg, "Invalid channel 0x%x", channel);
2254 return -EINVAL;
2255 }
2256
2257 pvt = mci->pvt_info;
2258 if (!pvt->info.get_ha) {
2259 sprintf(msg, "No get_ha()");
2260 return -EINVAL;
2261 }
2262 *ha = pvt->info.get_ha(m->bank);
2263 if (*ha != 0 && *ha != 1) {
2264 sprintf(msg, "Impossible bank %d", m->bank);
2265 return -EINVAL;
2266 }
2267
2268 *socket = m->socketid;
2269 new_mci = get_mci_for_node_id(*socket, *ha);
2270 if (!new_mci) {
2271 strcpy(msg, "mci socket got corrupted!");
2272 return -EINVAL;
2273 }
2274
2275 pvt = new_mci->pvt_info;
2276 pci_ha = pvt->pci_ha;
2277 pci_read_config_dword(pci_ha, tad_dram_rule[0], ®);
2278 tad0 = m->addr <= TAD_LIMIT(reg);
2279
2280 *channel_mask = 1 << channel;
2281 if (pvt->mirror_mode == FULL_MIRRORING ||
2282 (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2283 *channel_mask |= 1 << ((channel + 2) % 4);
2284 pvt->is_cur_addr_mirrored = true;
2285 } else {
2286 pvt->is_cur_addr_mirrored = false;
2287 }
2288
2289 if (pvt->is_lockstep)
2290 *channel_mask |= 1 << ((channel + 1) % 4);
2291
2292 return 0;
2293 }
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303 static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2304 {
2305 int i;
2306
2307 edac_dbg(0, "\n");
2308 for (i = 0; i < sbridge_dev->n_devs; i++) {
2309 struct pci_dev *pdev = sbridge_dev->pdev[i];
2310 if (!pdev)
2311 continue;
2312 edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2313 pdev->bus->number,
2314 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2315 pci_dev_put(pdev);
2316 }
2317 }
2318
2319 static void sbridge_put_all_devices(void)
2320 {
2321 struct sbridge_dev *sbridge_dev, *tmp;
2322
2323 list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2324 sbridge_put_devices(sbridge_dev);
2325 free_sbridge_dev(sbridge_dev);
2326 }
2327 }
2328
2329 static int sbridge_get_onedevice(struct pci_dev **prev,
2330 u8 *num_mc,
2331 const struct pci_id_table *table,
2332 const unsigned devno,
2333 const int multi_bus)
2334 {
2335 struct sbridge_dev *sbridge_dev = NULL;
2336 const struct pci_id_descr *dev_descr = &table->descr[devno];
2337 struct pci_dev *pdev = NULL;
2338 int seg = 0;
2339 u8 bus = 0;
2340 int i = 0;
2341
2342 sbridge_printk(KERN_DEBUG,
2343 "Seeking for: PCI ID %04x:%04x\n",
2344 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2345
2346 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2347 dev_descr->dev_id, *prev);
2348
2349 if (!pdev) {
2350 if (*prev) {
2351 *prev = pdev;
2352 return 0;
2353 }
2354
2355 if (dev_descr->optional)
2356 return 0;
2357
2358
2359 if (devno == 0)
2360 return -ENODEV;
2361
2362 sbridge_printk(KERN_INFO,
2363 "Device not found: %04x:%04x\n",
2364 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2365
2366
2367 return -ENODEV;
2368 }
2369 seg = pci_domain_nr(pdev->bus);
2370 bus = pdev->bus->number;
2371
2372 next_imc:
2373 sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2374 multi_bus, sbridge_dev);
2375 if (!sbridge_dev) {
2376
2377 if (dev_descr->dom == IMC1 && devno != 1) {
2378 edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2379 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2380 pci_dev_put(pdev);
2381 return 0;
2382 }
2383
2384 if (dev_descr->dom == SOCK)
2385 goto out_imc;
2386
2387 sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2388 if (!sbridge_dev) {
2389 pci_dev_put(pdev);
2390 return -ENOMEM;
2391 }
2392 (*num_mc)++;
2393 }
2394
2395 if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2396 sbridge_printk(KERN_ERR,
2397 "Duplicated device for %04x:%04x\n",
2398 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2399 pci_dev_put(pdev);
2400 return -ENODEV;
2401 }
2402
2403 sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2404
2405
2406 if (++i > 1)
2407 pci_dev_get(pdev);
2408
2409 if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2410 goto next_imc;
2411
2412 out_imc:
2413
2414 if (unlikely(pci_enable_device(pdev) < 0)) {
2415 sbridge_printk(KERN_ERR,
2416 "Couldn't enable %04x:%04x\n",
2417 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2418 return -ENODEV;
2419 }
2420
2421 edac_dbg(0, "Detected %04x:%04x\n",
2422 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2423
2424
2425
2426
2427
2428
2429 pci_dev_get(pdev);
2430
2431 *prev = pdev;
2432
2433 return 0;
2434 }
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445 static int sbridge_get_all_devices(u8 *num_mc,
2446 const struct pci_id_table *table)
2447 {
2448 int i, rc;
2449 struct pci_dev *pdev = NULL;
2450 int allow_dups = 0;
2451 int multi_bus = 0;
2452
2453 if (table->type == KNIGHTS_LANDING)
2454 allow_dups = multi_bus = 1;
2455 while (table && table->descr) {
2456 for (i = 0; i < table->n_devs_per_sock; i++) {
2457 if (!allow_dups || i == 0 ||
2458 table->descr[i].dev_id !=
2459 table->descr[i-1].dev_id) {
2460 pdev = NULL;
2461 }
2462 do {
2463 rc = sbridge_get_onedevice(&pdev, num_mc,
2464 table, i, multi_bus);
2465 if (rc < 0) {
2466 if (i == 0) {
2467 i = table->n_devs_per_sock;
2468 break;
2469 }
2470 sbridge_put_all_devices();
2471 return -ENODEV;
2472 }
2473 } while (pdev && !allow_dups);
2474 }
2475 table++;
2476 }
2477
2478 return 0;
2479 }
2480
2481
2482
2483
2484
2485
2486 #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2487
2488 static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2489 struct sbridge_dev *sbridge_dev)
2490 {
2491 struct sbridge_pvt *pvt = mci->pvt_info;
2492 struct pci_dev *pdev;
2493 u8 saw_chan_mask = 0;
2494 int i;
2495
2496 for (i = 0; i < sbridge_dev->n_devs; i++) {
2497 pdev = sbridge_dev->pdev[i];
2498 if (!pdev)
2499 continue;
2500
2501 switch (pdev->device) {
2502 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2503 pvt->pci_sad0 = pdev;
2504 break;
2505 case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2506 pvt->pci_sad1 = pdev;
2507 break;
2508 case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2509 pvt->pci_br0 = pdev;
2510 break;
2511 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2512 pvt->pci_ha = pdev;
2513 break;
2514 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2515 pvt->pci_ta = pdev;
2516 break;
2517 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2518 pvt->pci_ras = pdev;
2519 break;
2520 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2521 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2522 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2523 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2524 {
2525 int id = TAD_DEV_TO_CHAN(pdev->device);
2526 pvt->pci_tad[id] = pdev;
2527 saw_chan_mask |= 1 << id;
2528 }
2529 break;
2530 case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2531 pvt->pci_ddrio = pdev;
2532 break;
2533 default:
2534 goto error;
2535 }
2536
2537 edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2538 pdev->vendor, pdev->device,
2539 sbridge_dev->bus,
2540 pdev);
2541 }
2542
2543
2544 if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2545 !pvt->pci_ras || !pvt->pci_ta)
2546 goto enodev;
2547
2548 if (saw_chan_mask != 0x0f)
2549 goto enodev;
2550 return 0;
2551
2552 enodev:
2553 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2554 return -ENODEV;
2555
2556 error:
2557 sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2558 PCI_VENDOR_ID_INTEL, pdev->device);
2559 return -EINVAL;
2560 }
2561
2562 static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2563 struct sbridge_dev *sbridge_dev)
2564 {
2565 struct sbridge_pvt *pvt = mci->pvt_info;
2566 struct pci_dev *pdev;
2567 u8 saw_chan_mask = 0;
2568 int i;
2569
2570 for (i = 0; i < sbridge_dev->n_devs; i++) {
2571 pdev = sbridge_dev->pdev[i];
2572 if (!pdev)
2573 continue;
2574
2575 switch (pdev->device) {
2576 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2577 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2578 pvt->pci_ha = pdev;
2579 break;
2580 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2581 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2582 pvt->pci_ta = pdev;
2583 break;
2584 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2585 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2586 pvt->pci_ras = pdev;
2587 break;
2588 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2589 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2590 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2591 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2592 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2593 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2594 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2595 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2596 {
2597 int id = TAD_DEV_TO_CHAN(pdev->device);
2598 pvt->pci_tad[id] = pdev;
2599 saw_chan_mask |= 1 << id;
2600 }
2601 break;
2602 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2603 pvt->pci_ddrio = pdev;
2604 break;
2605 case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2606 pvt->pci_ddrio = pdev;
2607 break;
2608 case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2609 pvt->pci_sad0 = pdev;
2610 break;
2611 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2612 pvt->pci_br0 = pdev;
2613 break;
2614 case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2615 pvt->pci_br1 = pdev;
2616 break;
2617 default:
2618 goto error;
2619 }
2620
2621 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2622 sbridge_dev->bus,
2623 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2624 pdev);
2625 }
2626
2627
2628 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2629 !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2630 goto enodev;
2631
2632 if (saw_chan_mask != 0x0f &&
2633 saw_chan_mask != 0x03)
2634 goto enodev;
2635 return 0;
2636
2637 enodev:
2638 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2639 return -ENODEV;
2640
2641 error:
2642 sbridge_printk(KERN_ERR,
2643 "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2644 pdev->device);
2645 return -EINVAL;
2646 }
2647
2648 static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2649 struct sbridge_dev *sbridge_dev)
2650 {
2651 struct sbridge_pvt *pvt = mci->pvt_info;
2652 struct pci_dev *pdev;
2653 u8 saw_chan_mask = 0;
2654 int i;
2655
2656
2657 if (pvt->info.pci_vtd == NULL)
2658
2659 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2660 PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2661 NULL);
2662
2663 for (i = 0; i < sbridge_dev->n_devs; i++) {
2664 pdev = sbridge_dev->pdev[i];
2665 if (!pdev)
2666 continue;
2667
2668 switch (pdev->device) {
2669 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2670 pvt->pci_sad0 = pdev;
2671 break;
2672 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2673 pvt->pci_sad1 = pdev;
2674 break;
2675 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2676 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2677 pvt->pci_ha = pdev;
2678 break;
2679 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2680 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2681 pvt->pci_ta = pdev;
2682 break;
2683 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2684 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2685 pvt->pci_ras = pdev;
2686 break;
2687 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2688 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2689 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2690 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2691 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2692 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2693 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2694 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2695 {
2696 int id = TAD_DEV_TO_CHAN(pdev->device);
2697 pvt->pci_tad[id] = pdev;
2698 saw_chan_mask |= 1 << id;
2699 }
2700 break;
2701 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2702 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2703 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2704 case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2705 if (!pvt->pci_ddrio)
2706 pvt->pci_ddrio = pdev;
2707 break;
2708 default:
2709 break;
2710 }
2711
2712 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2713 sbridge_dev->bus,
2714 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2715 pdev);
2716 }
2717
2718
2719 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2720 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2721 goto enodev;
2722
2723 if (saw_chan_mask != 0x0f &&
2724 saw_chan_mask != 0x03)
2725 goto enodev;
2726 return 0;
2727
2728 enodev:
2729 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2730 return -ENODEV;
2731 }
2732
2733 static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2734 struct sbridge_dev *sbridge_dev)
2735 {
2736 struct sbridge_pvt *pvt = mci->pvt_info;
2737 struct pci_dev *pdev;
2738 u8 saw_chan_mask = 0;
2739 int i;
2740
2741
2742 if (pvt->info.pci_vtd == NULL)
2743
2744 pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2745 PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2746 NULL);
2747
2748 for (i = 0; i < sbridge_dev->n_devs; i++) {
2749 pdev = sbridge_dev->pdev[i];
2750 if (!pdev)
2751 continue;
2752
2753 switch (pdev->device) {
2754 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2755 pvt->pci_sad0 = pdev;
2756 break;
2757 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2758 pvt->pci_sad1 = pdev;
2759 break;
2760 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2761 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2762 pvt->pci_ha = pdev;
2763 break;
2764 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2765 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2766 pvt->pci_ta = pdev;
2767 break;
2768 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2769 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2770 pvt->pci_ras = pdev;
2771 break;
2772 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2773 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2774 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2775 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2776 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2777 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2778 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2779 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2780 {
2781 int id = TAD_DEV_TO_CHAN(pdev->device);
2782 pvt->pci_tad[id] = pdev;
2783 saw_chan_mask |= 1 << id;
2784 }
2785 break;
2786 case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2787 pvt->pci_ddrio = pdev;
2788 break;
2789 default:
2790 break;
2791 }
2792
2793 edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2794 sbridge_dev->bus,
2795 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2796 pdev);
2797 }
2798
2799
2800 if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2801 !pvt->pci_ras || !pvt->pci_ta || !pvt->info.pci_vtd)
2802 goto enodev;
2803
2804 if (saw_chan_mask != 0x0f &&
2805 saw_chan_mask != 0x03)
2806 goto enodev;
2807 return 0;
2808
2809 enodev:
2810 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2811 return -ENODEV;
2812 }
2813
2814 static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2815 struct sbridge_dev *sbridge_dev)
2816 {
2817 struct sbridge_pvt *pvt = mci->pvt_info;
2818 struct pci_dev *pdev;
2819 int dev, func;
2820
2821 int i;
2822 int devidx;
2823
2824 for (i = 0; i < sbridge_dev->n_devs; i++) {
2825 pdev = sbridge_dev->pdev[i];
2826 if (!pdev)
2827 continue;
2828
2829
2830 dev = (pdev->devfn >> 3) & 0x1f;
2831 func = pdev->devfn & 0x7;
2832
2833 switch (pdev->device) {
2834 case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2835 if (dev == 8)
2836 pvt->knl.pci_mc0 = pdev;
2837 else if (dev == 9)
2838 pvt->knl.pci_mc1 = pdev;
2839 else {
2840 sbridge_printk(KERN_ERR,
2841 "Memory controller in unexpected place! (dev %d, fn %d)\n",
2842 dev, func);
2843 continue;
2844 }
2845 break;
2846
2847 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2848 pvt->pci_sad0 = pdev;
2849 break;
2850
2851 case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2852 pvt->pci_sad1 = pdev;
2853 break;
2854
2855 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2856
2857
2858
2859 devidx = ((dev-14)*8)+func;
2860
2861 if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2862 sbridge_printk(KERN_ERR,
2863 "Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2864 dev, func);
2865 continue;
2866 }
2867
2868 WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2869
2870 pvt->knl.pci_cha[devidx] = pdev;
2871 break;
2872
2873 case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2874 devidx = -1;
2875
2876
2877
2878
2879
2880
2881 if (dev == 9)
2882 devidx = func-2;
2883 else if (dev == 8)
2884 devidx = 3 + (func-2);
2885
2886 if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2887 sbridge_printk(KERN_ERR,
2888 "DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2889 dev, func);
2890 continue;
2891 }
2892
2893 WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2894 pvt->knl.pci_channel[devidx] = pdev;
2895 break;
2896
2897 case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2898 pvt->knl.pci_mc_info = pdev;
2899 break;
2900
2901 case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2902 pvt->pci_ta = pdev;
2903 break;
2904
2905 default:
2906 sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2907 pdev->device);
2908 break;
2909 }
2910 }
2911
2912 if (!pvt->knl.pci_mc0 || !pvt->knl.pci_mc1 ||
2913 !pvt->pci_sad0 || !pvt->pci_sad1 ||
2914 !pvt->pci_ta) {
2915 goto enodev;
2916 }
2917
2918 for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2919 if (!pvt->knl.pci_channel[i]) {
2920 sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2921 goto enodev;
2922 }
2923 }
2924
2925 for (i = 0; i < KNL_MAX_CHAS; i++) {
2926 if (!pvt->knl.pci_cha[i]) {
2927 sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2928 goto enodev;
2929 }
2930 }
2931
2932 return 0;
2933
2934 enodev:
2935 sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2936 return -ENODEV;
2937 }
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949 static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2950 const struct mce *m)
2951 {
2952 struct mem_ctl_info *new_mci;
2953 struct sbridge_pvt *pvt = mci->pvt_info;
2954 enum hw_event_mc_err_type tp_event;
2955 char *type, *optype, msg[256];
2956 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2957 bool overflow = GET_BITFIELD(m->status, 62, 62);
2958 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2959 bool recoverable;
2960 u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2961 u32 mscod = GET_BITFIELD(m->status, 16, 31);
2962 u32 errcode = GET_BITFIELD(m->status, 0, 15);
2963 u32 channel = GET_BITFIELD(m->status, 0, 3);
2964 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2965
2966
2967
2968
2969
2970 u32 lsb = GET_BITFIELD(m->misc, 0, 5);
2971 long channel_mask, first_channel;
2972 u8 rank = 0xff, socket, ha;
2973 int rc, dimm;
2974 char *area_type = "DRAM";
2975
2976 if (pvt->info.type != SANDY_BRIDGE)
2977 recoverable = true;
2978 else
2979 recoverable = GET_BITFIELD(m->status, 56, 56);
2980
2981 if (uncorrected_error) {
2982 core_err_cnt = 1;
2983 if (ripv) {
2984 type = "FATAL";
2985 tp_event = HW_EVENT_ERR_FATAL;
2986 } else {
2987 type = "NON_FATAL";
2988 tp_event = HW_EVENT_ERR_UNCORRECTED;
2989 }
2990 } else {
2991 type = "CORRECTED";
2992 tp_event = HW_EVENT_ERR_CORRECTED;
2993 }
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003
3004
3005
3006 switch (optypenum) {
3007 case 0:
3008 optype = "generic undef request error";
3009 break;
3010 case 1:
3011 optype = "memory read error";
3012 break;
3013 case 2:
3014 optype = "memory write error";
3015 break;
3016 case 3:
3017 optype = "addr/cmd error";
3018 break;
3019 case 4:
3020 optype = "memory scrubbing error";
3021 break;
3022 default:
3023 optype = "reserved";
3024 break;
3025 }
3026
3027 if (pvt->info.type == KNIGHTS_LANDING) {
3028 if (channel == 14) {
3029 edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3030 overflow ? " OVERFLOW" : "",
3031 (uncorrected_error && recoverable)
3032 ? " recoverable" : "",
3033 mscod, errcode,
3034 m->bank);
3035 } else {
3036 char A = *("A");
3037
3038
3039
3040
3041
3042
3043
3044 channel = knl_channel_remap(m->bank == 16, channel);
3045 channel_mask = 1 << channel;
3046
3047 snprintf(msg, sizeof(msg),
3048 "%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3049 overflow ? " OVERFLOW" : "",
3050 (uncorrected_error && recoverable)
3051 ? " recoverable" : " ",
3052 mscod, errcode, channel, A + channel);
3053 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3054 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3055 channel, 0, -1,
3056 optype, msg);
3057 }
3058 return;
3059 } else if (lsb < 12) {
3060 rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3061 &channel_mask, &rank,
3062 &area_type, msg);
3063 } else {
3064 rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3065 &channel_mask, msg);
3066 }
3067
3068 if (rc < 0)
3069 goto err_parsing;
3070 new_mci = get_mci_for_node_id(socket, ha);
3071 if (!new_mci) {
3072 strcpy(msg, "Error: socket got corrupted!");
3073 goto err_parsing;
3074 }
3075 mci = new_mci;
3076 pvt = mci->pvt_info;
3077
3078 first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3079
3080 if (rank == 0xff)
3081 dimm = -1;
3082 else if (rank < 4)
3083 dimm = 0;
3084 else if (rank < 8)
3085 dimm = 1;
3086 else
3087 dimm = 2;
3088
3089
3090
3091
3092
3093
3094
3095 if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3096 channel = first_channel;
3097
3098 snprintf(msg, sizeof(msg),
3099 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3100 overflow ? " OVERFLOW" : "",
3101 (uncorrected_error && recoverable) ? " recoverable" : "",
3102 area_type,
3103 mscod, errcode,
3104 socket, ha,
3105 channel_mask,
3106 rank);
3107
3108 edac_dbg(0, "%s\n", msg);
3109
3110
3111
3112 if (channel == CHANNEL_UNSPECIFIED)
3113 channel = -1;
3114
3115
3116 edac_mc_handle_error(tp_event, mci, core_err_cnt,
3117 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3118 channel, dimm, -1,
3119 optype, msg);
3120 return;
3121 err_parsing:
3122 edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3123 -1, -1, -1,
3124 msg, "");
3125
3126 }
3127
3128
3129
3130
3131
3132 static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3133 void *data)
3134 {
3135 struct mce *mce = (struct mce *)data;
3136 struct mem_ctl_info *mci;
3137 char *type;
3138
3139 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3140 return NOTIFY_DONE;
3141
3142
3143
3144
3145
3146
3147
3148 if ((mce->status & 0xefff) >> 7 != 1)
3149 return NOTIFY_DONE;
3150
3151
3152 if (!GET_BITFIELD(mce->status, 58, 58))
3153 return NOTIFY_DONE;
3154
3155
3156 if (!GET_BITFIELD(mce->status, 59, 59))
3157 return NOTIFY_DONE;
3158
3159
3160 if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3161 return NOTIFY_DONE;
3162
3163 mci = get_mci_for_node_id(mce->socketid, IMC0);
3164 if (!mci)
3165 return NOTIFY_DONE;
3166
3167 if (mce->mcgstatus & MCG_STATUS_MCIP)
3168 type = "Exception";
3169 else
3170 type = "Event";
3171
3172 sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3173
3174 sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3175 "Bank %d: %016Lx\n", mce->extcpu, type,
3176 mce->mcgstatus, mce->bank, mce->status);
3177 sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3178 sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3179 sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3180
3181 sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3182 "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3183 mce->time, mce->socketid, mce->apicid);
3184
3185 sbridge_mce_output_error(mci, mce);
3186
3187
3188 return NOTIFY_STOP;
3189 }
3190
3191 static struct notifier_block sbridge_mce_dec = {
3192 .notifier_call = sbridge_mce_check_error,
3193 .priority = MCE_PRIO_EDAC,
3194 };
3195
3196
3197
3198
3199
3200 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3201 {
3202 struct mem_ctl_info *mci = sbridge_dev->mci;
3203 struct sbridge_pvt *pvt;
3204
3205 if (unlikely(!mci || !mci->pvt_info)) {
3206 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3207
3208 sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3209 return;
3210 }
3211
3212 pvt = mci->pvt_info;
3213
3214 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3215 mci, &sbridge_dev->pdev[0]->dev);
3216
3217
3218 edac_mc_del_mc(mci->pdev);
3219
3220 edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3221 kfree(mci->ctl_name);
3222 edac_mc_free(mci);
3223 sbridge_dev->mci = NULL;
3224 }
3225
3226 static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3227 {
3228 struct mem_ctl_info *mci;
3229 struct edac_mc_layer layers[2];
3230 struct sbridge_pvt *pvt;
3231 struct pci_dev *pdev = sbridge_dev->pdev[0];
3232 int rc;
3233
3234
3235 layers[0].type = EDAC_MC_LAYER_CHANNEL;
3236 layers[0].size = type == KNIGHTS_LANDING ?
3237 KNL_MAX_CHANNELS : NUM_CHANNELS;
3238 layers[0].is_virt_csrow = false;
3239 layers[1].type = EDAC_MC_LAYER_SLOT;
3240 layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3241 layers[1].is_virt_csrow = true;
3242 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3243 sizeof(*pvt));
3244
3245 if (unlikely(!mci))
3246 return -ENOMEM;
3247
3248 edac_dbg(0, "MC: mci = %p, dev = %p\n",
3249 mci, &pdev->dev);
3250
3251 pvt = mci->pvt_info;
3252 memset(pvt, 0, sizeof(*pvt));
3253
3254
3255 pvt->sbridge_dev = sbridge_dev;
3256 sbridge_dev->mci = mci;
3257
3258 mci->mtype_cap = type == KNIGHTS_LANDING ?
3259 MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3260 mci->edac_ctl_cap = EDAC_FLAG_NONE;
3261 mci->edac_cap = EDAC_FLAG_NONE;
3262 mci->mod_name = EDAC_MOD_STR;
3263 mci->dev_name = pci_name(pdev);
3264 mci->ctl_page_to_phys = NULL;
3265
3266 pvt->info.type = type;
3267 switch (type) {
3268 case IVY_BRIDGE:
3269 pvt->info.rankcfgr = IB_RANK_CFG_A;
3270 pvt->info.get_tolm = ibridge_get_tolm;
3271 pvt->info.get_tohm = ibridge_get_tohm;
3272 pvt->info.dram_rule = ibridge_dram_rule;
3273 pvt->info.get_memory_type = get_memory_type;
3274 pvt->info.get_node_id = get_node_id;
3275 pvt->info.get_ha = ibridge_get_ha;
3276 pvt->info.rir_limit = rir_limit;
3277 pvt->info.sad_limit = sad_limit;
3278 pvt->info.interleave_mode = interleave_mode;
3279 pvt->info.dram_attr = dram_attr;
3280 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3281 pvt->info.interleave_list = ibridge_interleave_list;
3282 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3283 pvt->info.get_width = ibridge_get_width;
3284
3285
3286 rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3287 if (unlikely(rc < 0))
3288 goto fail0;
3289 get_source_id(mci);
3290 mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3291 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3292 break;
3293 case SANDY_BRIDGE:
3294 pvt->info.rankcfgr = SB_RANK_CFG_A;
3295 pvt->info.get_tolm = sbridge_get_tolm;
3296 pvt->info.get_tohm = sbridge_get_tohm;
3297 pvt->info.dram_rule = sbridge_dram_rule;
3298 pvt->info.get_memory_type = get_memory_type;
3299 pvt->info.get_node_id = get_node_id;
3300 pvt->info.get_ha = sbridge_get_ha;
3301 pvt->info.rir_limit = rir_limit;
3302 pvt->info.sad_limit = sad_limit;
3303 pvt->info.interleave_mode = interleave_mode;
3304 pvt->info.dram_attr = dram_attr;
3305 pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3306 pvt->info.interleave_list = sbridge_interleave_list;
3307 pvt->info.interleave_pkg = sbridge_interleave_pkg;
3308 pvt->info.get_width = sbridge_get_width;
3309
3310
3311 rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3312 if (unlikely(rc < 0))
3313 goto fail0;
3314 get_source_id(mci);
3315 mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3316 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3317 break;
3318 case HASWELL:
3319
3320 pvt->info.get_tolm = haswell_get_tolm;
3321 pvt->info.get_tohm = haswell_get_tohm;
3322 pvt->info.dram_rule = ibridge_dram_rule;
3323 pvt->info.get_memory_type = haswell_get_memory_type;
3324 pvt->info.get_node_id = haswell_get_node_id;
3325 pvt->info.get_ha = ibridge_get_ha;
3326 pvt->info.rir_limit = haswell_rir_limit;
3327 pvt->info.sad_limit = sad_limit;
3328 pvt->info.interleave_mode = interleave_mode;
3329 pvt->info.dram_attr = dram_attr;
3330 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3331 pvt->info.interleave_list = ibridge_interleave_list;
3332 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3333 pvt->info.get_width = ibridge_get_width;
3334
3335
3336 rc = haswell_mci_bind_devs(mci, sbridge_dev);
3337 if (unlikely(rc < 0))
3338 goto fail0;
3339 get_source_id(mci);
3340 mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3341 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3342 break;
3343 case BROADWELL:
3344
3345 pvt->info.get_tolm = haswell_get_tolm;
3346 pvt->info.get_tohm = haswell_get_tohm;
3347 pvt->info.dram_rule = ibridge_dram_rule;
3348 pvt->info.get_memory_type = haswell_get_memory_type;
3349 pvt->info.get_node_id = haswell_get_node_id;
3350 pvt->info.get_ha = ibridge_get_ha;
3351 pvt->info.rir_limit = haswell_rir_limit;
3352 pvt->info.sad_limit = sad_limit;
3353 pvt->info.interleave_mode = interleave_mode;
3354 pvt->info.dram_attr = dram_attr;
3355 pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3356 pvt->info.interleave_list = ibridge_interleave_list;
3357 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3358 pvt->info.get_width = broadwell_get_width;
3359
3360
3361 rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3362 if (unlikely(rc < 0))
3363 goto fail0;
3364 get_source_id(mci);
3365 mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3366 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3367 break;
3368 case KNIGHTS_LANDING:
3369
3370 pvt->info.get_tolm = knl_get_tolm;
3371 pvt->info.get_tohm = knl_get_tohm;
3372 pvt->info.dram_rule = knl_dram_rule;
3373 pvt->info.get_memory_type = knl_get_memory_type;
3374 pvt->info.get_node_id = knl_get_node_id;
3375 pvt->info.get_ha = knl_get_ha;
3376 pvt->info.rir_limit = NULL;
3377 pvt->info.sad_limit = knl_sad_limit;
3378 pvt->info.interleave_mode = knl_interleave_mode;
3379 pvt->info.dram_attr = dram_attr_knl;
3380 pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3381 pvt->info.interleave_list = knl_interleave_list;
3382 pvt->info.interleave_pkg = ibridge_interleave_pkg;
3383 pvt->info.get_width = knl_get_width;
3384
3385 rc = knl_mci_bind_devs(mci, sbridge_dev);
3386 if (unlikely(rc < 0))
3387 goto fail0;
3388 get_source_id(mci);
3389 mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3390 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3391 break;
3392 }
3393
3394 if (!mci->ctl_name) {
3395 rc = -ENOMEM;
3396 goto fail0;
3397 }
3398
3399
3400 rc = get_dimm_config(mci);
3401 if (rc < 0) {
3402 edac_dbg(0, "MC: failed to get_dimm_config()\n");
3403 goto fail;
3404 }
3405 get_memory_layout(mci);
3406
3407
3408 mci->pdev = &pdev->dev;
3409
3410
3411 if (unlikely(edac_mc_add_mc(mci))) {
3412 edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3413 rc = -EINVAL;
3414 goto fail;
3415 }
3416
3417 return 0;
3418
3419 fail:
3420 kfree(mci->ctl_name);
3421 fail0:
3422 edac_mc_free(mci);
3423 sbridge_dev->mci = NULL;
3424 return rc;
3425 }
3426
3427 static const struct x86_cpu_id sbridge_cpuids[] = {
3428 INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
3429 INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
3430 INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
3431 INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
3432 INTEL_CPU_FAM6(BROADWELL_D, pci_dev_descr_broadwell_table),
3433 INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
3434 INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
3435 { }
3436 };
3437 MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3438
3439
3440
3441
3442
3443
3444
3445
3446
3447 static int sbridge_probe(const struct x86_cpu_id *id)
3448 {
3449 int rc = -ENODEV;
3450 u8 mc, num_mc = 0;
3451 struct sbridge_dev *sbridge_dev;
3452 struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3453
3454
3455 rc = sbridge_get_all_devices(&num_mc, ptable);
3456
3457 if (unlikely(rc < 0)) {
3458 edac_dbg(0, "couldn't get all devices\n");
3459 goto fail0;
3460 }
3461
3462 mc = 0;
3463
3464 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3465 edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3466 mc, mc + 1, num_mc);
3467
3468 sbridge_dev->mc = mc++;
3469 rc = sbridge_register_mci(sbridge_dev, ptable->type);
3470 if (unlikely(rc < 0))
3471 goto fail1;
3472 }
3473
3474 sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3475
3476 return 0;
3477
3478 fail1:
3479 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3480 sbridge_unregister_mci(sbridge_dev);
3481
3482 sbridge_put_all_devices();
3483 fail0:
3484 return rc;
3485 }
3486
3487
3488
3489
3490
3491 static void sbridge_remove(void)
3492 {
3493 struct sbridge_dev *sbridge_dev;
3494
3495 edac_dbg(0, "\n");
3496
3497 list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3498 sbridge_unregister_mci(sbridge_dev);
3499
3500
3501 sbridge_put_all_devices();
3502 }
3503
3504
3505
3506
3507
3508 static int __init sbridge_init(void)
3509 {
3510 const struct x86_cpu_id *id;
3511 const char *owner;
3512 int rc;
3513
3514 edac_dbg(2, "\n");
3515
3516 owner = edac_get_owner();
3517 if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3518 return -EBUSY;
3519
3520 id = x86_match_cpu(sbridge_cpuids);
3521 if (!id)
3522 return -ENODEV;
3523
3524
3525 opstate_init();
3526
3527 rc = sbridge_probe(id);
3528
3529 if (rc >= 0) {
3530 mce_register_decode_chain(&sbridge_mce_dec);
3531 if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
3532 sbridge_printk(KERN_WARNING, "Loading driver, error reporting disabled.\n");
3533 return 0;
3534 }
3535
3536 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3537 rc);
3538
3539 return rc;
3540 }
3541
3542
3543
3544
3545
3546 static void __exit sbridge_exit(void)
3547 {
3548 edac_dbg(2, "\n");
3549 sbridge_remove();
3550 mce_unregister_decode_chain(&sbridge_mce_dec);
3551 }
3552
3553 module_init(sbridge_init);
3554 module_exit(sbridge_exit);
3555
3556 module_param(edac_op_state, int, 0444);
3557 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3558
3559 MODULE_LICENSE("GPL");
3560 MODULE_AUTHOR("Mauro Carvalho Chehab");
3561 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
3562 MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3563 SBRIDGE_REVISION);