root/drivers/edac/i5100_edac.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. i5100_mc_scrben
  2. i5100_mc_errdeten
  3. i5100_mc_scrbdone
  4. i5100_spddata_rdo
  5. i5100_spddata_sbe
  6. i5100_spddata_busy
  7. i5100_spddata_data
  8. i5100_spdcmd_create
  9. i5100_tolm_tolm
  10. i5100_mir_limit
  11. i5100_mir_way1
  12. i5100_mir_way0
  13. i5100_ferr_nf_mem_chan_indx
  14. i5100_ferr_nf_mem_any
  15. i5100_nerr_nf_mem_any
  16. i5100_dmir_limit
  17. i5100_dmir_rank
  18. i5100_mtr_present
  19. i5100_mtr_ethrottle
  20. i5100_mtr_width
  21. i5100_mtr_numbank
  22. i5100_mtr_numrow
  23. i5100_mtr_numcol
  24. i5100_validlog_redmemvalid
  25. i5100_validlog_recmemvalid
  26. i5100_validlog_nrecmemvalid
  27. i5100_nrecmema_merr
  28. i5100_nrecmema_bank
  29. i5100_nrecmema_rank
  30. i5100_nrecmema_dm_buf_id
  31. i5100_nrecmemb_cas
  32. i5100_nrecmemb_ras
  33. i5100_redmemb_ecc_locator
  34. i5100_recmema_merr
  35. i5100_recmema_bank
  36. i5100_recmema_rank
  37. i5100_recmemb_cas
  38. i5100_recmemb_ras
  39. i5100_rank_to_slot
  40. i5100_err_msg
  41. i5100_csrow_to_rank
  42. i5100_csrow_to_chan
  43. i5100_handle_ce
  44. i5100_handle_ue
  45. i5100_read_log
  46. i5100_check_error
  47. i5100_refresh_scrubbing
  48. i5100_set_scrub_rate
  49. i5100_get_scrub_rate
  50. pci_get_device_func
  51. i5100_npages
  52. i5100_init_mtr
  53. i5100_read_spd_byte
  54. i5100_init_dimm_csmap
  55. i5100_init_dimm_layout
  56. i5100_init_interleaving
  57. i5100_init_csrows
  58. i5100_do_inject
  59. inject_enable_write
  60. i5100_setup_debugfs
  61. i5100_init_one
  62. i5100_remove_one
  63. i5100_init
  64. i5100_exit

   1 /*
   2  * Intel 5100 Memory Controllers kernel module
   3  *
   4  * This file may be distributed under the terms of the
   5  * GNU General Public License.
   6  *
   7  * This module is based on the following document:
   8  *
   9  * Intel 5100X Chipset Memory Controller Hub (MCH) - Datasheet
  10  *      http://download.intel.com/design/chipsets/datashts/318378.pdf
  11  *
  12  * The intel 5100 has two independent channels. EDAC core currently
  13  * can not reflect this configuration so instead the chip-select
  14  * rows for each respective channel are laid out one after another,
  15  * the first half belonging to channel 0, the second half belonging
  16  * to channel 1.
  17  *
  18  * This driver is for DDR2 DIMMs, and it uses chip select to select among the
  19  * several ranks. However, instead of showing memories as ranks, it outputs
  20  * them as DIMM's. An internal table creates the association between ranks
  21  * and DIMM's.
  22  */
  23 #include <linux/module.h>
  24 #include <linux/init.h>
  25 #include <linux/pci.h>
  26 #include <linux/pci_ids.h>
  27 #include <linux/edac.h>
  28 #include <linux/delay.h>
  29 #include <linux/mmzone.h>
  30 #include <linux/debugfs.h>
  31 
  32 #include "edac_module.h"
  33 
  34 /* register addresses */
  35 
  36 /* device 16, func 1 */
  37 #define I5100_MC                0x40    /* Memory Control Register */
  38 #define         I5100_MC_SCRBEN_MASK    (1 << 7)
  39 #define         I5100_MC_SCRBDONE_MASK  (1 << 4)
  40 #define I5100_MS                0x44    /* Memory Status Register */
  41 #define I5100_SPDDATA           0x48    /* Serial Presence Detect Status Reg */
  42 #define I5100_SPDCMD            0x4c    /* Serial Presence Detect Command Reg */
  43 #define I5100_TOLM              0x6c    /* Top of Low Memory */
  44 #define I5100_MIR0              0x80    /* Memory Interleave Range 0 */
  45 #define I5100_MIR1              0x84    /* Memory Interleave Range 1 */
  46 #define I5100_AMIR_0            0x8c    /* Adjusted Memory Interleave Range 0 */
  47 #define I5100_AMIR_1            0x90    /* Adjusted Memory Interleave Range 1 */
  48 #define I5100_FERR_NF_MEM       0xa0    /* MC First Non Fatal Errors */
  49 #define         I5100_FERR_NF_MEM_M16ERR_MASK   (1 << 16)
  50 #define         I5100_FERR_NF_MEM_M15ERR_MASK   (1 << 15)
  51 #define         I5100_FERR_NF_MEM_M14ERR_MASK   (1 << 14)
  52 #define         I5100_FERR_NF_MEM_M12ERR_MASK   (1 << 12)
  53 #define         I5100_FERR_NF_MEM_M11ERR_MASK   (1 << 11)
  54 #define         I5100_FERR_NF_MEM_M10ERR_MASK   (1 << 10)
  55 #define         I5100_FERR_NF_MEM_M6ERR_MASK    (1 << 6)
  56 #define         I5100_FERR_NF_MEM_M5ERR_MASK    (1 << 5)
  57 #define         I5100_FERR_NF_MEM_M4ERR_MASK    (1 << 4)
  58 #define         I5100_FERR_NF_MEM_M1ERR_MASK    (1 << 1)
  59 #define         I5100_FERR_NF_MEM_ANY_MASK      \
  60                         (I5100_FERR_NF_MEM_M16ERR_MASK | \
  61                         I5100_FERR_NF_MEM_M15ERR_MASK | \
  62                         I5100_FERR_NF_MEM_M14ERR_MASK | \
  63                         I5100_FERR_NF_MEM_M12ERR_MASK | \
  64                         I5100_FERR_NF_MEM_M11ERR_MASK | \
  65                         I5100_FERR_NF_MEM_M10ERR_MASK | \
  66                         I5100_FERR_NF_MEM_M6ERR_MASK | \
  67                         I5100_FERR_NF_MEM_M5ERR_MASK | \
  68                         I5100_FERR_NF_MEM_M4ERR_MASK | \
  69                         I5100_FERR_NF_MEM_M1ERR_MASK)
  70 #define I5100_NERR_NF_MEM       0xa4    /* MC Next Non-Fatal Errors */
  71 #define I5100_EMASK_MEM         0xa8    /* MC Error Mask Register */
  72 #define I5100_MEM0EINJMSK0      0x200   /* Injection Mask0 Register Channel 0 */
  73 #define I5100_MEM1EINJMSK0      0x208   /* Injection Mask0 Register Channel 1 */
  74 #define         I5100_MEMXEINJMSK0_EINJEN       (1 << 27)
  75 #define I5100_MEM0EINJMSK1      0x204   /* Injection Mask1 Register Channel 0 */
  76 #define I5100_MEM1EINJMSK1      0x206   /* Injection Mask1 Register Channel 1 */
  77 
  78 /* Device 19, Function 0 */
  79 #define I5100_DINJ0 0x9a
  80 
  81 /* device 21 and 22, func 0 */
  82 #define I5100_MTR_0     0x154   /* Memory Technology Registers 0-3 */
  83 #define I5100_DMIR      0x15c   /* DIMM Interleave Range */
  84 #define I5100_VALIDLOG  0x18c   /* Valid Log Markers */
  85 #define I5100_NRECMEMA  0x190   /* Non-Recoverable Memory Error Log Reg A */
  86 #define I5100_NRECMEMB  0x194   /* Non-Recoverable Memory Error Log Reg B */
  87 #define I5100_REDMEMA   0x198   /* Recoverable Memory Data Error Log Reg A */
  88 #define I5100_REDMEMB   0x19c   /* Recoverable Memory Data Error Log Reg B */
  89 #define I5100_RECMEMA   0x1a0   /* Recoverable Memory Error Log Reg A */
  90 #define I5100_RECMEMB   0x1a4   /* Recoverable Memory Error Log Reg B */
  91 #define I5100_MTR_4     0x1b0   /* Memory Technology Registers 4,5 */
  92 
  93 /* bit field accessors */
  94 
  95 static inline u32 i5100_mc_scrben(u32 mc)
  96 {
  97         return mc >> 7 & 1;
  98 }
  99 
 100 static inline u32 i5100_mc_errdeten(u32 mc)
 101 {
 102         return mc >> 5 & 1;
 103 }
 104 
 105 static inline u32 i5100_mc_scrbdone(u32 mc)
 106 {
 107         return mc >> 4 & 1;
 108 }
 109 
 110 static inline u16 i5100_spddata_rdo(u16 a)
 111 {
 112         return a >> 15 & 1;
 113 }
 114 
 115 static inline u16 i5100_spddata_sbe(u16 a)
 116 {
 117         return a >> 13 & 1;
 118 }
 119 
 120 static inline u16 i5100_spddata_busy(u16 a)
 121 {
 122         return a >> 12 & 1;
 123 }
 124 
 125 static inline u16 i5100_spddata_data(u16 a)
 126 {
 127         return a & ((1 << 8) - 1);
 128 }
 129 
 130 static inline u32 i5100_spdcmd_create(u32 dti, u32 ckovrd, u32 sa, u32 ba,
 131                                       u32 data, u32 cmd)
 132 {
 133         return  ((dti & ((1 << 4) - 1))  << 28) |
 134                 ((ckovrd & 1)            << 27) |
 135                 ((sa & ((1 << 3) - 1))   << 24) |
 136                 ((ba & ((1 << 8) - 1))   << 16) |
 137                 ((data & ((1 << 8) - 1)) <<  8) |
 138                 (cmd & 1);
 139 }
 140 
 141 static inline u16 i5100_tolm_tolm(u16 a)
 142 {
 143         return a >> 12 & ((1 << 4) - 1);
 144 }
 145 
 146 static inline u16 i5100_mir_limit(u16 a)
 147 {
 148         return a >> 4 & ((1 << 12) - 1);
 149 }
 150 
 151 static inline u16 i5100_mir_way1(u16 a)
 152 {
 153         return a >> 1 & 1;
 154 }
 155 
 156 static inline u16 i5100_mir_way0(u16 a)
 157 {
 158         return a & 1;
 159 }
 160 
 161 static inline u32 i5100_ferr_nf_mem_chan_indx(u32 a)
 162 {
 163         return a >> 28 & 1;
 164 }
 165 
 166 static inline u32 i5100_ferr_nf_mem_any(u32 a)
 167 {
 168         return a & I5100_FERR_NF_MEM_ANY_MASK;
 169 }
 170 
 171 static inline u32 i5100_nerr_nf_mem_any(u32 a)
 172 {
 173         return i5100_ferr_nf_mem_any(a);
 174 }
 175 
 176 static inline u32 i5100_dmir_limit(u32 a)
 177 {
 178         return a >> 16 & ((1 << 11) - 1);
 179 }
 180 
 181 static inline u32 i5100_dmir_rank(u32 a, u32 i)
 182 {
 183         return a >> (4 * i) & ((1 << 2) - 1);
 184 }
 185 
 186 static inline u16 i5100_mtr_present(u16 a)
 187 {
 188         return a >> 10 & 1;
 189 }
 190 
 191 static inline u16 i5100_mtr_ethrottle(u16 a)
 192 {
 193         return a >> 9 & 1;
 194 }
 195 
 196 static inline u16 i5100_mtr_width(u16 a)
 197 {
 198         return a >> 8 & 1;
 199 }
 200 
 201 static inline u16 i5100_mtr_numbank(u16 a)
 202 {
 203         return a >> 6 & 1;
 204 }
 205 
 206 static inline u16 i5100_mtr_numrow(u16 a)
 207 {
 208         return a >> 2 & ((1 << 2) - 1);
 209 }
 210 
 211 static inline u16 i5100_mtr_numcol(u16 a)
 212 {
 213         return a & ((1 << 2) - 1);
 214 }
 215 
 216 
 217 static inline u32 i5100_validlog_redmemvalid(u32 a)
 218 {
 219         return a >> 2 & 1;
 220 }
 221 
 222 static inline u32 i5100_validlog_recmemvalid(u32 a)
 223 {
 224         return a >> 1 & 1;
 225 }
 226 
 227 static inline u32 i5100_validlog_nrecmemvalid(u32 a)
 228 {
 229         return a & 1;
 230 }
 231 
 232 static inline u32 i5100_nrecmema_merr(u32 a)
 233 {
 234         return a >> 15 & ((1 << 5) - 1);
 235 }
 236 
 237 static inline u32 i5100_nrecmema_bank(u32 a)
 238 {
 239         return a >> 12 & ((1 << 3) - 1);
 240 }
 241 
 242 static inline u32 i5100_nrecmema_rank(u32 a)
 243 {
 244         return a >>  8 & ((1 << 3) - 1);
 245 }
 246 
 247 static inline u32 i5100_nrecmema_dm_buf_id(u32 a)
 248 {
 249         return a & ((1 << 8) - 1);
 250 }
 251 
 252 static inline u32 i5100_nrecmemb_cas(u32 a)
 253 {
 254         return a >> 16 & ((1 << 13) - 1);
 255 }
 256 
 257 static inline u32 i5100_nrecmemb_ras(u32 a)
 258 {
 259         return a & ((1 << 16) - 1);
 260 }
 261 
 262 static inline u32 i5100_redmemb_ecc_locator(u32 a)
 263 {
 264         return a & ((1 << 18) - 1);
 265 }
 266 
 267 static inline u32 i5100_recmema_merr(u32 a)
 268 {
 269         return i5100_nrecmema_merr(a);
 270 }
 271 
 272 static inline u32 i5100_recmema_bank(u32 a)
 273 {
 274         return i5100_nrecmema_bank(a);
 275 }
 276 
 277 static inline u32 i5100_recmema_rank(u32 a)
 278 {
 279         return i5100_nrecmema_rank(a);
 280 }
 281 
 282 static inline u32 i5100_recmemb_cas(u32 a)
 283 {
 284         return i5100_nrecmemb_cas(a);
 285 }
 286 
 287 static inline u32 i5100_recmemb_ras(u32 a)
 288 {
 289         return i5100_nrecmemb_ras(a);
 290 }
 291 
 292 /* some generic limits */
 293 #define I5100_MAX_RANKS_PER_CHAN        6
 294 #define I5100_CHANNELS                      2
 295 #define I5100_MAX_RANKS_PER_DIMM        4
 296 #define I5100_DIMM_ADDR_LINES           (6 - 3) /* 64 bits / 8 bits per byte */
 297 #define I5100_MAX_DIMM_SLOTS_PER_CHAN   4
 298 #define I5100_MAX_RANK_INTERLEAVE       4
 299 #define I5100_MAX_DMIRS                 5
 300 #define I5100_SCRUB_REFRESH_RATE        (5 * 60 * HZ)
 301 
 302 struct i5100_priv {
 303         /* ranks on each dimm -- 0 maps to not present -- obtained via SPD */
 304         int dimm_numrank[I5100_CHANNELS][I5100_MAX_DIMM_SLOTS_PER_CHAN];
 305 
 306         /*
 307          * mainboard chip select map -- maps i5100 chip selects to
 308          * DIMM slot chip selects.  In the case of only 4 ranks per
 309          * channel, the mapping is fairly obvious but not unique.
 310          * we map -1 -> NC and assume both channels use the same
 311          * map...
 312          *
 313          */
 314         int dimm_csmap[I5100_MAX_DIMM_SLOTS_PER_CHAN][I5100_MAX_RANKS_PER_DIMM];
 315 
 316         /* memory interleave range */
 317         struct {
 318                 u64      limit;
 319                 unsigned way[2];
 320         } mir[I5100_CHANNELS];
 321 
 322         /* adjusted memory interleave range register */
 323         unsigned amir[I5100_CHANNELS];
 324 
 325         /* dimm interleave range */
 326         struct {
 327                 unsigned rank[I5100_MAX_RANK_INTERLEAVE];
 328                 u64      limit;
 329         } dmir[I5100_CHANNELS][I5100_MAX_DMIRS];
 330 
 331         /* memory technology registers... */
 332         struct {
 333                 unsigned present;       /* 0 or 1 */
 334                 unsigned ethrottle;     /* 0 or 1 */
 335                 unsigned width;         /* 4 or 8 bits  */
 336                 unsigned numbank;       /* 2 or 3 lines */
 337                 unsigned numrow;        /* 13 .. 16 lines */
 338                 unsigned numcol;        /* 11 .. 12 lines */
 339         } mtr[I5100_CHANNELS][I5100_MAX_RANKS_PER_CHAN];
 340 
 341         u64 tolm;               /* top of low memory in bytes */
 342         unsigned ranksperchan;  /* number of ranks per channel */
 343 
 344         struct pci_dev *mc;     /* device 16 func 1 */
 345         struct pci_dev *einj;   /* device 19 func 0 */
 346         struct pci_dev *ch0mm;  /* device 21 func 0 */
 347         struct pci_dev *ch1mm;  /* device 22 func 0 */
 348 
 349         struct delayed_work i5100_scrubbing;
 350         int scrub_enable;
 351 
 352         /* Error injection */
 353         u8 inject_channel;
 354         u8 inject_hlinesel;
 355         u8 inject_deviceptr1;
 356         u8 inject_deviceptr2;
 357         u16 inject_eccmask1;
 358         u16 inject_eccmask2;
 359 
 360         struct dentry *debugfs;
 361 };
 362 
 363 static struct dentry *i5100_debugfs;
 364 
 365 /* map a rank/chan to a slot number on the mainboard */
 366 static int i5100_rank_to_slot(const struct mem_ctl_info *mci,
 367                               int chan, int rank)
 368 {
 369         const struct i5100_priv *priv = mci->pvt_info;
 370         int i;
 371 
 372         for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
 373                 int j;
 374                 const int numrank = priv->dimm_numrank[chan][i];
 375 
 376                 for (j = 0; j < numrank; j++)
 377                         if (priv->dimm_csmap[i][j] == rank)
 378                                 return i * 2 + chan;
 379         }
 380 
 381         return -1;
 382 }
 383 
 384 static const char *i5100_err_msg(unsigned err)
 385 {
 386         static const char *merrs[] = {
 387                 "unknown", /* 0 */
 388                 "uncorrectable data ECC on replay", /* 1 */
 389                 "unknown", /* 2 */
 390                 "unknown", /* 3 */
 391                 "aliased uncorrectable demand data ECC", /* 4 */
 392                 "aliased uncorrectable spare-copy data ECC", /* 5 */
 393                 "aliased uncorrectable patrol data ECC", /* 6 */
 394                 "unknown", /* 7 */
 395                 "unknown", /* 8 */
 396                 "unknown", /* 9 */
 397                 "non-aliased uncorrectable demand data ECC", /* 10 */
 398                 "non-aliased uncorrectable spare-copy data ECC", /* 11 */
 399                 "non-aliased uncorrectable patrol data ECC", /* 12 */
 400                 "unknown", /* 13 */
 401                 "correctable demand data ECC", /* 14 */
 402                 "correctable spare-copy data ECC", /* 15 */
 403                 "correctable patrol data ECC", /* 16 */
 404                 "unknown", /* 17 */
 405                 "SPD protocol error", /* 18 */
 406                 "unknown", /* 19 */
 407                 "spare copy initiated", /* 20 */
 408                 "spare copy completed", /* 21 */
 409         };
 410         unsigned i;
 411 
 412         for (i = 0; i < ARRAY_SIZE(merrs); i++)
 413                 if (1 << i & err)
 414                         return merrs[i];
 415 
 416         return "none";
 417 }
 418 
 419 /* convert csrow index into a rank (per channel -- 0..5) */
 420 static unsigned int i5100_csrow_to_rank(const struct mem_ctl_info *mci,
 421                                         unsigned int csrow)
 422 {
 423         const struct i5100_priv *priv = mci->pvt_info;
 424 
 425         return csrow % priv->ranksperchan;
 426 }
 427 
 428 /* convert csrow index into a channel (0..1) */
 429 static unsigned int i5100_csrow_to_chan(const struct mem_ctl_info *mci,
 430                                         unsigned int csrow)
 431 {
 432         const struct i5100_priv *priv = mci->pvt_info;
 433 
 434         return csrow / priv->ranksperchan;
 435 }
 436 
 437 static void i5100_handle_ce(struct mem_ctl_info *mci,
 438                             int chan,
 439                             unsigned bank,
 440                             unsigned rank,
 441                             unsigned long syndrome,
 442                             unsigned cas,
 443                             unsigned ras,
 444                             const char *msg)
 445 {
 446         char detail[80];
 447 
 448         /* Form out message */
 449         snprintf(detail, sizeof(detail),
 450                  "bank %u, cas %u, ras %u\n",
 451                  bank, cas, ras);
 452 
 453         edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
 454                              0, 0, syndrome,
 455                              chan, rank, -1,
 456                              msg, detail);
 457 }
 458 
 459 static void i5100_handle_ue(struct mem_ctl_info *mci,
 460                             int chan,
 461                             unsigned bank,
 462                             unsigned rank,
 463                             unsigned long syndrome,
 464                             unsigned cas,
 465                             unsigned ras,
 466                             const char *msg)
 467 {
 468         char detail[80];
 469 
 470         /* Form out message */
 471         snprintf(detail, sizeof(detail),
 472                  "bank %u, cas %u, ras %u\n",
 473                  bank, cas, ras);
 474 
 475         edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
 476                              0, 0, syndrome,
 477                              chan, rank, -1,
 478                              msg, detail);
 479 }
 480 
 481 static void i5100_read_log(struct mem_ctl_info *mci, int chan,
 482                            u32 ferr, u32 nerr)
 483 {
 484         struct i5100_priv *priv = mci->pvt_info;
 485         struct pci_dev *pdev = (chan) ? priv->ch1mm : priv->ch0mm;
 486         u32 dw;
 487         u32 dw2;
 488         unsigned syndrome = 0;
 489         unsigned ecc_loc = 0;
 490         unsigned merr;
 491         unsigned bank;
 492         unsigned rank;
 493         unsigned cas;
 494         unsigned ras;
 495 
 496         pci_read_config_dword(pdev, I5100_VALIDLOG, &dw);
 497 
 498         if (i5100_validlog_redmemvalid(dw)) {
 499                 pci_read_config_dword(pdev, I5100_REDMEMA, &dw2);
 500                 syndrome = dw2;
 501                 pci_read_config_dword(pdev, I5100_REDMEMB, &dw2);
 502                 ecc_loc = i5100_redmemb_ecc_locator(dw2);
 503         }
 504 
 505         if (i5100_validlog_recmemvalid(dw)) {
 506                 const char *msg;
 507 
 508                 pci_read_config_dword(pdev, I5100_RECMEMA, &dw2);
 509                 merr = i5100_recmema_merr(dw2);
 510                 bank = i5100_recmema_bank(dw2);
 511                 rank = i5100_recmema_rank(dw2);
 512 
 513                 pci_read_config_dword(pdev, I5100_RECMEMB, &dw2);
 514                 cas = i5100_recmemb_cas(dw2);
 515                 ras = i5100_recmemb_ras(dw2);
 516 
 517                 /* FIXME:  not really sure if this is what merr is...
 518                  */
 519                 if (!merr)
 520                         msg = i5100_err_msg(ferr);
 521                 else
 522                         msg = i5100_err_msg(nerr);
 523 
 524                 i5100_handle_ce(mci, chan, bank, rank, syndrome, cas, ras, msg);
 525         }
 526 
 527         if (i5100_validlog_nrecmemvalid(dw)) {
 528                 const char *msg;
 529 
 530                 pci_read_config_dword(pdev, I5100_NRECMEMA, &dw2);
 531                 merr = i5100_nrecmema_merr(dw2);
 532                 bank = i5100_nrecmema_bank(dw2);
 533                 rank = i5100_nrecmema_rank(dw2);
 534 
 535                 pci_read_config_dword(pdev, I5100_NRECMEMB, &dw2);
 536                 cas = i5100_nrecmemb_cas(dw2);
 537                 ras = i5100_nrecmemb_ras(dw2);
 538 
 539                 /* FIXME:  not really sure if this is what merr is...
 540                  */
 541                 if (!merr)
 542                         msg = i5100_err_msg(ferr);
 543                 else
 544                         msg = i5100_err_msg(nerr);
 545 
 546                 i5100_handle_ue(mci, chan, bank, rank, syndrome, cas, ras, msg);
 547         }
 548 
 549         pci_write_config_dword(pdev, I5100_VALIDLOG, dw);
 550 }
 551 
 552 static void i5100_check_error(struct mem_ctl_info *mci)
 553 {
 554         struct i5100_priv *priv = mci->pvt_info;
 555         u32 dw, dw2;
 556 
 557         pci_read_config_dword(priv->mc, I5100_FERR_NF_MEM, &dw);
 558         if (i5100_ferr_nf_mem_any(dw)) {
 559 
 560                 pci_read_config_dword(priv->mc, I5100_NERR_NF_MEM, &dw2);
 561 
 562                 i5100_read_log(mci, i5100_ferr_nf_mem_chan_indx(dw),
 563                                i5100_ferr_nf_mem_any(dw),
 564                                i5100_nerr_nf_mem_any(dw2));
 565 
 566                 pci_write_config_dword(priv->mc, I5100_NERR_NF_MEM, dw2);
 567         }
 568         pci_write_config_dword(priv->mc, I5100_FERR_NF_MEM, dw);
 569 }
 570 
 571 /* The i5100 chipset will scrub the entire memory once, then
 572  * set a done bit. Continuous scrubbing is achieved by enqueing
 573  * delayed work to a workqueue, checking every few minutes if
 574  * the scrubbing has completed and if so reinitiating it.
 575  */
 576 
 577 static void i5100_refresh_scrubbing(struct work_struct *work)
 578 {
 579         struct delayed_work *i5100_scrubbing = to_delayed_work(work);
 580         struct i5100_priv *priv = container_of(i5100_scrubbing,
 581                                                struct i5100_priv,
 582                                                i5100_scrubbing);
 583         u32 dw;
 584 
 585         pci_read_config_dword(priv->mc, I5100_MC, &dw);
 586 
 587         if (priv->scrub_enable) {
 588 
 589                 pci_read_config_dword(priv->mc, I5100_MC, &dw);
 590 
 591                 if (i5100_mc_scrbdone(dw)) {
 592                         dw |= I5100_MC_SCRBEN_MASK;
 593                         pci_write_config_dword(priv->mc, I5100_MC, dw);
 594                         pci_read_config_dword(priv->mc, I5100_MC, &dw);
 595                 }
 596 
 597                 schedule_delayed_work(&(priv->i5100_scrubbing),
 598                                       I5100_SCRUB_REFRESH_RATE);
 599         }
 600 }
 601 /*
 602  * The bandwidth is based on experimentation, feel free to refine it.
 603  */
 604 static int i5100_set_scrub_rate(struct mem_ctl_info *mci, u32 bandwidth)
 605 {
 606         struct i5100_priv *priv = mci->pvt_info;
 607         u32 dw;
 608 
 609         pci_read_config_dword(priv->mc, I5100_MC, &dw);
 610         if (bandwidth) {
 611                 priv->scrub_enable = 1;
 612                 dw |= I5100_MC_SCRBEN_MASK;
 613                 schedule_delayed_work(&(priv->i5100_scrubbing),
 614                                       I5100_SCRUB_REFRESH_RATE);
 615         } else {
 616                 priv->scrub_enable = 0;
 617                 dw &= ~I5100_MC_SCRBEN_MASK;
 618                 cancel_delayed_work(&(priv->i5100_scrubbing));
 619         }
 620         pci_write_config_dword(priv->mc, I5100_MC, dw);
 621 
 622         pci_read_config_dword(priv->mc, I5100_MC, &dw);
 623 
 624         bandwidth = 5900000 * i5100_mc_scrben(dw);
 625 
 626         return bandwidth;
 627 }
 628 
 629 static int i5100_get_scrub_rate(struct mem_ctl_info *mci)
 630 {
 631         struct i5100_priv *priv = mci->pvt_info;
 632         u32 dw;
 633 
 634         pci_read_config_dword(priv->mc, I5100_MC, &dw);
 635 
 636         return 5900000 * i5100_mc_scrben(dw);
 637 }
 638 
 639 static struct pci_dev *pci_get_device_func(unsigned vendor,
 640                                            unsigned device,
 641                                            unsigned func)
 642 {
 643         struct pci_dev *ret = NULL;
 644 
 645         while (1) {
 646                 ret = pci_get_device(vendor, device, ret);
 647 
 648                 if (!ret)
 649                         break;
 650 
 651                 if (PCI_FUNC(ret->devfn) == func)
 652                         break;
 653         }
 654 
 655         return ret;
 656 }
 657 
 658 static unsigned long i5100_npages(struct mem_ctl_info *mci, unsigned int csrow)
 659 {
 660         struct i5100_priv *priv = mci->pvt_info;
 661         const unsigned int chan_rank = i5100_csrow_to_rank(mci, csrow);
 662         const unsigned int chan = i5100_csrow_to_chan(mci, csrow);
 663         unsigned addr_lines;
 664 
 665         /* dimm present? */
 666         if (!priv->mtr[chan][chan_rank].present)
 667                 return 0ULL;
 668 
 669         addr_lines =
 670                 I5100_DIMM_ADDR_LINES +
 671                 priv->mtr[chan][chan_rank].numcol +
 672                 priv->mtr[chan][chan_rank].numrow +
 673                 priv->mtr[chan][chan_rank].numbank;
 674 
 675         return (unsigned long)
 676                 ((unsigned long long) (1ULL << addr_lines) / PAGE_SIZE);
 677 }
 678 
 679 static void i5100_init_mtr(struct mem_ctl_info *mci)
 680 {
 681         struct i5100_priv *priv = mci->pvt_info;
 682         struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
 683         int i;
 684 
 685         for (i = 0; i < I5100_CHANNELS; i++) {
 686                 int j;
 687                 struct pci_dev *pdev = mms[i];
 688 
 689                 for (j = 0; j < I5100_MAX_RANKS_PER_CHAN; j++) {
 690                         const unsigned addr =
 691                                 (j < 4) ? I5100_MTR_0 + j * 2 :
 692                                           I5100_MTR_4 + (j - 4) * 2;
 693                         u16 w;
 694 
 695                         pci_read_config_word(pdev, addr, &w);
 696 
 697                         priv->mtr[i][j].present = i5100_mtr_present(w);
 698                         priv->mtr[i][j].ethrottle = i5100_mtr_ethrottle(w);
 699                         priv->mtr[i][j].width = 4 + 4 * i5100_mtr_width(w);
 700                         priv->mtr[i][j].numbank = 2 + i5100_mtr_numbank(w);
 701                         priv->mtr[i][j].numrow = 13 + i5100_mtr_numrow(w);
 702                         priv->mtr[i][j].numcol = 10 + i5100_mtr_numcol(w);
 703                 }
 704         }
 705 }
 706 
 707 /*
 708  * FIXME: make this into a real i2c adapter (so that dimm-decode
 709  * will work)?
 710  */
 711 static int i5100_read_spd_byte(const struct mem_ctl_info *mci,
 712                                u8 ch, u8 slot, u8 addr, u8 *byte)
 713 {
 714         struct i5100_priv *priv = mci->pvt_info;
 715         u16 w;
 716         unsigned long et;
 717 
 718         pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
 719         if (i5100_spddata_busy(w))
 720                 return -1;
 721 
 722         pci_write_config_dword(priv->mc, I5100_SPDCMD,
 723                                i5100_spdcmd_create(0xa, 1, ch * 4 + slot, addr,
 724                                                    0, 0));
 725 
 726         /* wait up to 100ms */
 727         et = jiffies + HZ / 10;
 728         udelay(100);
 729         while (1) {
 730                 pci_read_config_word(priv->mc, I5100_SPDDATA, &w);
 731                 if (!i5100_spddata_busy(w))
 732                         break;
 733                 udelay(100);
 734         }
 735 
 736         if (!i5100_spddata_rdo(w) || i5100_spddata_sbe(w))
 737                 return -1;
 738 
 739         *byte = i5100_spddata_data(w);
 740 
 741         return 0;
 742 }
 743 
 744 /*
 745  * fill dimm chip select map
 746  *
 747  * FIXME:
 748  *   o not the only way to may chip selects to dimm slots
 749  *   o investigate if there is some way to obtain this map from the bios
 750  */
 751 static void i5100_init_dimm_csmap(struct mem_ctl_info *mci)
 752 {
 753         struct i5100_priv *priv = mci->pvt_info;
 754         int i;
 755 
 756         for (i = 0; i < I5100_MAX_DIMM_SLOTS_PER_CHAN; i++) {
 757                 int j;
 758 
 759                 for (j = 0; j < I5100_MAX_RANKS_PER_DIMM; j++)
 760                         priv->dimm_csmap[i][j] = -1; /* default NC */
 761         }
 762 
 763         /* only 2 chip selects per slot... */
 764         if (priv->ranksperchan == 4) {
 765                 priv->dimm_csmap[0][0] = 0;
 766                 priv->dimm_csmap[0][1] = 3;
 767                 priv->dimm_csmap[1][0] = 1;
 768                 priv->dimm_csmap[1][1] = 2;
 769                 priv->dimm_csmap[2][0] = 2;
 770                 priv->dimm_csmap[3][0] = 3;
 771         } else {
 772                 priv->dimm_csmap[0][0] = 0;
 773                 priv->dimm_csmap[0][1] = 1;
 774                 priv->dimm_csmap[1][0] = 2;
 775                 priv->dimm_csmap[1][1] = 3;
 776                 priv->dimm_csmap[2][0] = 4;
 777                 priv->dimm_csmap[2][1] = 5;
 778         }
 779 }
 780 
 781 static void i5100_init_dimm_layout(struct pci_dev *pdev,
 782                                    struct mem_ctl_info *mci)
 783 {
 784         struct i5100_priv *priv = mci->pvt_info;
 785         int i;
 786 
 787         for (i = 0; i < I5100_CHANNELS; i++) {
 788                 int j;
 789 
 790                 for (j = 0; j < I5100_MAX_DIMM_SLOTS_PER_CHAN; j++) {
 791                         u8 rank;
 792 
 793                         if (i5100_read_spd_byte(mci, i, j, 5, &rank) < 0)
 794                                 priv->dimm_numrank[i][j] = 0;
 795                         else
 796                                 priv->dimm_numrank[i][j] = (rank & 3) + 1;
 797                 }
 798         }
 799 
 800         i5100_init_dimm_csmap(mci);
 801 }
 802 
 803 static void i5100_init_interleaving(struct pci_dev *pdev,
 804                                     struct mem_ctl_info *mci)
 805 {
 806         u16 w;
 807         u32 dw;
 808         struct i5100_priv *priv = mci->pvt_info;
 809         struct pci_dev *mms[2] = { priv->ch0mm, priv->ch1mm };
 810         int i;
 811 
 812         pci_read_config_word(pdev, I5100_TOLM, &w);
 813         priv->tolm = (u64) i5100_tolm_tolm(w) * 256 * 1024 * 1024;
 814 
 815         pci_read_config_word(pdev, I5100_MIR0, &w);
 816         priv->mir[0].limit = (u64) i5100_mir_limit(w) << 28;
 817         priv->mir[0].way[1] = i5100_mir_way1(w);
 818         priv->mir[0].way[0] = i5100_mir_way0(w);
 819 
 820         pci_read_config_word(pdev, I5100_MIR1, &w);
 821         priv->mir[1].limit = (u64) i5100_mir_limit(w) << 28;
 822         priv->mir[1].way[1] = i5100_mir_way1(w);
 823         priv->mir[1].way[0] = i5100_mir_way0(w);
 824 
 825         pci_read_config_word(pdev, I5100_AMIR_0, &w);
 826         priv->amir[0] = w;
 827         pci_read_config_word(pdev, I5100_AMIR_1, &w);
 828         priv->amir[1] = w;
 829 
 830         for (i = 0; i < I5100_CHANNELS; i++) {
 831                 int j;
 832 
 833                 for (j = 0; j < 5; j++) {
 834                         int k;
 835 
 836                         pci_read_config_dword(mms[i], I5100_DMIR + j * 4, &dw);
 837 
 838                         priv->dmir[i][j].limit =
 839                                 (u64) i5100_dmir_limit(dw) << 28;
 840                         for (k = 0; k < I5100_MAX_RANKS_PER_DIMM; k++)
 841                                 priv->dmir[i][j].rank[k] =
 842                                         i5100_dmir_rank(dw, k);
 843                 }
 844         }
 845 
 846         i5100_init_mtr(mci);
 847 }
 848 
 849 static void i5100_init_csrows(struct mem_ctl_info *mci)
 850 {
 851         int i;
 852         struct i5100_priv *priv = mci->pvt_info;
 853 
 854         for (i = 0; i < mci->tot_dimms; i++) {
 855                 struct dimm_info *dimm;
 856                 const unsigned long npages = i5100_npages(mci, i);
 857                 const unsigned int chan = i5100_csrow_to_chan(mci, i);
 858                 const unsigned int rank = i5100_csrow_to_rank(mci, i);
 859 
 860                 if (!npages)
 861                         continue;
 862 
 863                 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
 864                                chan, rank, 0);
 865 
 866                 dimm->nr_pages = npages;
 867                 dimm->grain = 32;
 868                 dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
 869                                 DEV_X4 : DEV_X8;
 870                 dimm->mtype = MEM_RDDR2;
 871                 dimm->edac_mode = EDAC_SECDED;
 872                 snprintf(dimm->label, sizeof(dimm->label), "DIMM%u",
 873                          i5100_rank_to_slot(mci, chan, rank));
 874 
 875                 edac_dbg(2, "dimm channel %d, rank %d, size %ld\n",
 876                          chan, rank, (long)PAGES_TO_MiB(npages));
 877         }
 878 }
 879 
 880 /****************************************************************************
 881  *                       Error injection routines
 882  ****************************************************************************/
 883 
 884 static void i5100_do_inject(struct mem_ctl_info *mci)
 885 {
 886         struct i5100_priv *priv = mci->pvt_info;
 887         u32 mask0;
 888         u16 mask1;
 889 
 890         /* MEM[1:0]EINJMSK0
 891          * 31    - ADDRMATCHEN
 892          * 29:28 - HLINESEL
 893          *         00 Reserved
 894          *         01 Lower half of cache line
 895          *         10 Upper half of cache line
 896          *         11 Both upper and lower parts of cache line
 897          * 27    - EINJEN
 898          * 25:19 - XORMASK1 for deviceptr1
 899          * 9:5   - SEC2RAM for deviceptr2
 900          * 4:0   - FIR2RAM for deviceptr1
 901          */
 902         mask0 = ((priv->inject_hlinesel & 0x3) << 28) |
 903                 I5100_MEMXEINJMSK0_EINJEN |
 904                 ((priv->inject_eccmask1 & 0xffff) << 10) |
 905                 ((priv->inject_deviceptr2 & 0x1f) << 5) |
 906                 (priv->inject_deviceptr1 & 0x1f);
 907 
 908         /* MEM[1:0]EINJMSK1
 909          * 15:0  - XORMASK2 for deviceptr2
 910          */
 911         mask1 = priv->inject_eccmask2;
 912 
 913         if (priv->inject_channel == 0) {
 914                 pci_write_config_dword(priv->mc, I5100_MEM0EINJMSK0, mask0);
 915                 pci_write_config_word(priv->mc, I5100_MEM0EINJMSK1, mask1);
 916         } else {
 917                 pci_write_config_dword(priv->mc, I5100_MEM1EINJMSK0, mask0);
 918                 pci_write_config_word(priv->mc, I5100_MEM1EINJMSK1, mask1);
 919         }
 920 
 921         /* Error Injection Response Function
 922          * Intel 5100 Memory Controller Hub Chipset (318378) datasheet
 923          * hints about this register but carry no data about them. All
 924          * data regarding device 19 is based on experimentation and the
 925          * Intel 7300 Chipset Memory Controller Hub (318082) datasheet
 926          * which appears to be accurate for the i5100 in this area.
 927          *
 928          * The injection code don't work without setting this register.
 929          * The register needs to be flipped off then on else the hardware
 930          * will only preform the first injection.
 931          *
 932          * Stop condition bits 7:4
 933          * 1010 - Stop after one injection
 934          * 1011 - Never stop injecting faults
 935          *
 936          * Start condition bits 3:0
 937          * 1010 - Never start
 938          * 1011 - Start immediately
 939          */
 940         pci_write_config_byte(priv->einj, I5100_DINJ0, 0xaa);
 941         pci_write_config_byte(priv->einj, I5100_DINJ0, 0xab);
 942 }
 943 
 944 #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
 945 static ssize_t inject_enable_write(struct file *file, const char __user *data,
 946                 size_t count, loff_t *ppos)
 947 {
 948         struct device *dev = file->private_data;
 949         struct mem_ctl_info *mci = to_mci(dev);
 950 
 951         i5100_do_inject(mci);
 952 
 953         return count;
 954 }
 955 
 956 static const struct file_operations i5100_inject_enable_fops = {
 957         .open = simple_open,
 958         .write = inject_enable_write,
 959         .llseek = generic_file_llseek,
 960 };
 961 
 962 static int i5100_setup_debugfs(struct mem_ctl_info *mci)
 963 {
 964         struct i5100_priv *priv = mci->pvt_info;
 965 
 966         if (!i5100_debugfs)
 967                 return -ENODEV;
 968 
 969         priv->debugfs = edac_debugfs_create_dir_at(mci->bus->name, i5100_debugfs);
 970 
 971         if (!priv->debugfs)
 972                 return -ENOMEM;
 973 
 974         edac_debugfs_create_x8("inject_channel", S_IRUGO | S_IWUSR, priv->debugfs,
 975                                 &priv->inject_channel);
 976         edac_debugfs_create_x8("inject_hlinesel", S_IRUGO | S_IWUSR, priv->debugfs,
 977                                 &priv->inject_hlinesel);
 978         edac_debugfs_create_x8("inject_deviceptr1", S_IRUGO | S_IWUSR, priv->debugfs,
 979                                 &priv->inject_deviceptr1);
 980         edac_debugfs_create_x8("inject_deviceptr2", S_IRUGO | S_IWUSR, priv->debugfs,
 981                                 &priv->inject_deviceptr2);
 982         edac_debugfs_create_x16("inject_eccmask1", S_IRUGO | S_IWUSR, priv->debugfs,
 983                                 &priv->inject_eccmask1);
 984         edac_debugfs_create_x16("inject_eccmask2", S_IRUGO | S_IWUSR, priv->debugfs,
 985                                 &priv->inject_eccmask2);
 986         edac_debugfs_create_file("inject_enable", S_IWUSR, priv->debugfs,
 987                                 &mci->dev, &i5100_inject_enable_fops);
 988 
 989         return 0;
 990 
 991 }
 992 
 993 static int i5100_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 994 {
 995         int rc;
 996         struct mem_ctl_info *mci;
 997         struct edac_mc_layer layers[2];
 998         struct i5100_priv *priv;
 999         struct pci_dev *ch0mm, *ch1mm, *einj;
1000         int ret = 0;
1001         u32 dw;
1002         int ranksperch;
1003 
1004         if (PCI_FUNC(pdev->devfn) != 1)
1005                 return -ENODEV;
1006 
1007         rc = pci_enable_device(pdev);
1008         if (rc < 0) {
1009                 ret = rc;
1010                 goto bail;
1011         }
1012 
1013         /* ECC enabled? */
1014         pci_read_config_dword(pdev, I5100_MC, &dw);
1015         if (!i5100_mc_errdeten(dw)) {
1016                 printk(KERN_INFO "i5100_edac: ECC not enabled.\n");
1017                 ret = -ENODEV;
1018                 goto bail_pdev;
1019         }
1020 
1021         /* figure out how many ranks, from strapped state of 48GB_Mode input */
1022         pci_read_config_dword(pdev, I5100_MS, &dw);
1023         ranksperch = !!(dw & (1 << 8)) * 2 + 4;
1024 
1025         /* enable error reporting... */
1026         pci_read_config_dword(pdev, I5100_EMASK_MEM, &dw);
1027         dw &= ~I5100_FERR_NF_MEM_ANY_MASK;
1028         pci_write_config_dword(pdev, I5100_EMASK_MEM, dw);
1029 
1030         /* device 21, func 0, Channel 0 Memory Map, Error Flag/Mask, etc... */
1031         ch0mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1032                                     PCI_DEVICE_ID_INTEL_5100_21, 0);
1033         if (!ch0mm) {
1034                 ret = -ENODEV;
1035                 goto bail_pdev;
1036         }
1037 
1038         rc = pci_enable_device(ch0mm);
1039         if (rc < 0) {
1040                 ret = rc;
1041                 goto bail_ch0;
1042         }
1043 
1044         /* device 22, func 0, Channel 1 Memory Map, Error Flag/Mask, etc... */
1045         ch1mm = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1046                                     PCI_DEVICE_ID_INTEL_5100_22, 0);
1047         if (!ch1mm) {
1048                 ret = -ENODEV;
1049                 goto bail_disable_ch0;
1050         }
1051 
1052         rc = pci_enable_device(ch1mm);
1053         if (rc < 0) {
1054                 ret = rc;
1055                 goto bail_ch1;
1056         }
1057 
1058         layers[0].type = EDAC_MC_LAYER_CHANNEL;
1059         layers[0].size = 2;
1060         layers[0].is_virt_csrow = false;
1061         layers[1].type = EDAC_MC_LAYER_SLOT;
1062         layers[1].size = ranksperch;
1063         layers[1].is_virt_csrow = true;
1064         mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
1065                             sizeof(*priv));
1066         if (!mci) {
1067                 ret = -ENOMEM;
1068                 goto bail_disable_ch1;
1069         }
1070 
1071 
1072         /* device 19, func 0, Error injection */
1073         einj = pci_get_device_func(PCI_VENDOR_ID_INTEL,
1074                                     PCI_DEVICE_ID_INTEL_5100_19, 0);
1075         if (!einj) {
1076                 ret = -ENODEV;
1077                 goto bail_einj;
1078         }
1079 
1080         rc = pci_enable_device(einj);
1081         if (rc < 0) {
1082                 ret = rc;
1083                 goto bail_disable_einj;
1084         }
1085 
1086 
1087         mci->pdev = &pdev->dev;
1088 
1089         priv = mci->pvt_info;
1090         priv->ranksperchan = ranksperch;
1091         priv->mc = pdev;
1092         priv->ch0mm = ch0mm;
1093         priv->ch1mm = ch1mm;
1094         priv->einj = einj;
1095 
1096         INIT_DELAYED_WORK(&(priv->i5100_scrubbing), i5100_refresh_scrubbing);
1097 
1098         /* If scrubbing was already enabled by the bios, start maintaining it */
1099         pci_read_config_dword(pdev, I5100_MC, &dw);
1100         if (i5100_mc_scrben(dw)) {
1101                 priv->scrub_enable = 1;
1102                 schedule_delayed_work(&(priv->i5100_scrubbing),
1103                                       I5100_SCRUB_REFRESH_RATE);
1104         }
1105 
1106         i5100_init_dimm_layout(pdev, mci);
1107         i5100_init_interleaving(pdev, mci);
1108 
1109         mci->mtype_cap = MEM_FLAG_FB_DDR2;
1110         mci->edac_ctl_cap = EDAC_FLAG_SECDED;
1111         mci->edac_cap = EDAC_FLAG_SECDED;
1112         mci->mod_name = "i5100_edac.c";
1113         mci->ctl_name = "i5100";
1114         mci->dev_name = pci_name(pdev);
1115         mci->ctl_page_to_phys = NULL;
1116 
1117         mci->edac_check = i5100_check_error;
1118         mci->set_sdram_scrub_rate = i5100_set_scrub_rate;
1119         mci->get_sdram_scrub_rate = i5100_get_scrub_rate;
1120 
1121         priv->inject_channel = 0;
1122         priv->inject_hlinesel = 0;
1123         priv->inject_deviceptr1 = 0;
1124         priv->inject_deviceptr2 = 0;
1125         priv->inject_eccmask1 = 0;
1126         priv->inject_eccmask2 = 0;
1127 
1128         i5100_init_csrows(mci);
1129 
1130         /* this strange construction seems to be in every driver, dunno why */
1131         switch (edac_op_state) {
1132         case EDAC_OPSTATE_POLL:
1133         case EDAC_OPSTATE_NMI:
1134                 break;
1135         default:
1136                 edac_op_state = EDAC_OPSTATE_POLL;
1137                 break;
1138         }
1139 
1140         if (edac_mc_add_mc(mci)) {
1141                 ret = -ENODEV;
1142                 goto bail_scrub;
1143         }
1144 
1145         i5100_setup_debugfs(mci);
1146 
1147         return ret;
1148 
1149 bail_scrub:
1150         priv->scrub_enable = 0;
1151         cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1152         edac_mc_free(mci);
1153 
1154 bail_disable_einj:
1155         pci_disable_device(einj);
1156 
1157 bail_einj:
1158         pci_dev_put(einj);
1159 
1160 bail_disable_ch1:
1161         pci_disable_device(ch1mm);
1162 
1163 bail_ch1:
1164         pci_dev_put(ch1mm);
1165 
1166 bail_disable_ch0:
1167         pci_disable_device(ch0mm);
1168 
1169 bail_ch0:
1170         pci_dev_put(ch0mm);
1171 
1172 bail_pdev:
1173         pci_disable_device(pdev);
1174 
1175 bail:
1176         return ret;
1177 }
1178 
1179 static void i5100_remove_one(struct pci_dev *pdev)
1180 {
1181         struct mem_ctl_info *mci;
1182         struct i5100_priv *priv;
1183 
1184         mci = edac_mc_del_mc(&pdev->dev);
1185 
1186         if (!mci)
1187                 return;
1188 
1189         priv = mci->pvt_info;
1190 
1191         edac_debugfs_remove_recursive(priv->debugfs);
1192 
1193         priv->scrub_enable = 0;
1194         cancel_delayed_work_sync(&(priv->i5100_scrubbing));
1195 
1196         pci_disable_device(pdev);
1197         pci_disable_device(priv->ch0mm);
1198         pci_disable_device(priv->ch1mm);
1199         pci_disable_device(priv->einj);
1200         pci_dev_put(priv->ch0mm);
1201         pci_dev_put(priv->ch1mm);
1202         pci_dev_put(priv->einj);
1203 
1204         edac_mc_free(mci);
1205 }
1206 
1207 static const struct pci_device_id i5100_pci_tbl[] = {
1208         /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
1209         { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
1210         { 0, }
1211 };
1212 MODULE_DEVICE_TABLE(pci, i5100_pci_tbl);
1213 
1214 static struct pci_driver i5100_driver = {
1215         .name = KBUILD_BASENAME,
1216         .probe = i5100_init_one,
1217         .remove = i5100_remove_one,
1218         .id_table = i5100_pci_tbl,
1219 };
1220 
1221 static int __init i5100_init(void)
1222 {
1223         int pci_rc;
1224 
1225         i5100_debugfs = edac_debugfs_create_dir_at("i5100_edac", NULL);
1226 
1227         pci_rc = pci_register_driver(&i5100_driver);
1228         return (pci_rc < 0) ? pci_rc : 0;
1229 }
1230 
1231 static void __exit i5100_exit(void)
1232 {
1233         edac_debugfs_remove(i5100_debugfs);
1234 
1235         pci_unregister_driver(&i5100_driver);
1236 }
1237 
1238 module_init(i5100_init);
1239 module_exit(i5100_exit);
1240 
1241 MODULE_LICENSE("GPL");
1242 MODULE_AUTHOR
1243     ("Arthur Jones <ajones@riverbed.com>");
1244 MODULE_DESCRIPTION("MC Driver for Intel I5100 memory controllers");

/* [<][>][^][v][top][bottom][index][help] */