i3000_edac.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. /*
  2. * Intel 3000/3010 Memory Controller kernel module
  3. * Copyright (C) 2007 Akamai Technologies, Inc.
  4. * Shamelessly copied from:
  5. * Intel D82875P Memory Controller kernel module
  6. * (C) 2003 Linux Networx (http://lnxi.com)
  7. *
  8. * This file may be distributed under the terms of the
  9. * GNU General Public License.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci_ids.h>
  15. #include <linux/edac.h>
  16. #include "edac_core.h"
  17. #define I3000_REVISION "1.1"
  18. #define EDAC_MOD_STR "i3000_edac"
  19. #define I3000_RANKS 8
  20. #define I3000_RANKS_PER_CHANNEL 4
  21. #define I3000_CHANNELS 2
  22. /* Intel 3000 register addresses - device 0 function 0 - DRAM Controller */
  23. #define I3000_MCHBAR 0x44 /* MCH Memory Mapped Register BAR */
  24. #define I3000_MCHBAR_MASK 0xffffc000
  25. #define I3000_MMR_WINDOW_SIZE 16384
  26. #define I3000_EDEAP 0x70 /* Extended DRAM Error Address Pointer (8b)
  27. *
  28. * 7:1 reserved
  29. * 0 bit 32 of address
  30. */
  31. #define I3000_DEAP 0x58 /* DRAM Error Address Pointer (32b)
  32. *
  33. * 31:7 address
  34. * 6:1 reserved
  35. * 0 Error channel 0/1
  36. */
  37. #define I3000_DEAP_GRAIN (1 << 7)
  38. /*
  39. * Helper functions to decode the DEAP/EDEAP hardware registers.
  40. *
  41. * The type promotion here is deliberate; we're deriving an
  42. * unsigned long pfn and offset from hardware regs which are u8/u32.
  43. */
  44. static inline unsigned long deap_pfn(u8 edeap, u32 deap)
  45. {
  46. deap >>= PAGE_SHIFT;
  47. deap |= (edeap & 1) << (32 - PAGE_SHIFT);
  48. return deap;
  49. }
  50. static inline unsigned long deap_offset(u32 deap)
  51. {
  52. return deap & ~(I3000_DEAP_GRAIN - 1) & ~PAGE_MASK;
  53. }
  54. static inline int deap_channel(u32 deap)
  55. {
  56. return deap & 1;
  57. }
  58. #define I3000_DERRSYN 0x5c /* DRAM Error Syndrome (8b)
  59. *
  60. * 7:0 DRAM ECC Syndrome
  61. */
  62. #define I3000_ERRSTS 0xc8 /* Error Status Register (16b)
  63. *
  64. * 15:12 reserved
  65. * 11 MCH Thermal Sensor Event
  66. * for SMI/SCI/SERR
  67. * 10 reserved
  68. * 9 LOCK to non-DRAM Memory Flag (LCKF)
  69. * 8 Received Refresh Timeout Flag (RRTOF)
  70. * 7:2 reserved
  71. * 1 Multi-bit DRAM ECC Error Flag (DMERR)
  72. * 0 Single-bit DRAM ECC Error Flag (DSERR)
  73. */
  74. #define I3000_ERRSTS_BITS 0x0b03 /* bits which indicate errors */
  75. #define I3000_ERRSTS_UE 0x0002
  76. #define I3000_ERRSTS_CE 0x0001
  77. #define I3000_ERRCMD 0xca /* Error Command (16b)
  78. *
  79. * 15:12 reserved
  80. * 11 SERR on MCH Thermal Sensor Event
  81. * (TSESERR)
  82. * 10 reserved
  83. * 9 SERR on LOCK to non-DRAM Memory
  84. * (LCKERR)
  85. * 8 SERR on DRAM Refresh Timeout
  86. * (DRTOERR)
  87. * 7:2 reserved
  88. * 1 SERR Multi-Bit DRAM ECC Error
  89. * (DMERR)
  90. * 0 SERR on Single-Bit ECC Error
  91. * (DSERR)
  92. */
  93. /* Intel MMIO register space - device 0 function 0 - MMR space */
  94. #define I3000_DRB_SHIFT 25 /* 32MiB grain */
  95. #define I3000_C0DRB 0x100 /* Channel 0 DRAM Rank Boundary (8b x 4)
  96. *
  97. * 7:0 Channel 0 DRAM Rank Boundary Address
  98. */
  99. #define I3000_C1DRB 0x180 /* Channel 1 DRAM Rank Boundary (8b x 4)
  100. *
  101. * 7:0 Channel 1 DRAM Rank Boundary Address
  102. */
  103. #define I3000_C0DRA 0x108 /* Channel 0 DRAM Rank Attribute (8b x 2)
  104. *
  105. * 7 reserved
  106. * 6:4 DRAM odd Rank Attribute
  107. * 3 reserved
  108. * 2:0 DRAM even Rank Attribute
  109. *
  110. * Each attribute defines the page
  111. * size of the corresponding rank:
  112. * 000: unpopulated
  113. * 001: reserved
  114. * 010: 4 KB
  115. * 011: 8 KB
  116. * 100: 16 KB
  117. * Others: reserved
  118. */
  119. #define I3000_C1DRA 0x188 /* Channel 1 DRAM Rank Attribute (8b x 2) */
  120. static inline unsigned char odd_rank_attrib(unsigned char dra)
  121. {
  122. return (dra & 0x70) >> 4;
  123. }
  124. static inline unsigned char even_rank_attrib(unsigned char dra)
  125. {
  126. return dra & 0x07;
  127. }
  128. #define I3000_C0DRC0 0x120 /* DRAM Controller Mode 0 (32b)
  129. *
  130. * 31:30 reserved
  131. * 29 Initialization Complete (IC)
  132. * 28:11 reserved
  133. * 10:8 Refresh Mode Select (RMS)
  134. * 7 reserved
  135. * 6:4 Mode Select (SMS)
  136. * 3:2 reserved
  137. * 1:0 DRAM Type (DT)
  138. */
  139. #define I3000_C0DRC1 0x124 /* DRAM Controller Mode 1 (32b)
  140. *
  141. * 31 Enhanced Addressing Enable (ENHADE)
  142. * 30:0 reserved
  143. */
  144. enum i3000p_chips {
  145. I3000 = 0,
  146. };
  147. struct i3000_dev_info {
  148. const char *ctl_name;
  149. };
  150. struct i3000_error_info {
  151. u16 errsts;
  152. u8 derrsyn;
  153. u8 edeap;
  154. u32 deap;
  155. u16 errsts2;
  156. };
  157. static const struct i3000_dev_info i3000_devs[] = {
  158. [I3000] = {
  159. .ctl_name = "i3000"},
  160. };
  161. static struct pci_dev *mci_pdev;
  162. static int i3000_registered = 1;
  163. static struct edac_pci_ctl_info *i3000_pci;
  164. static void i3000_get_error_info(struct mem_ctl_info *mci,
  165. struct i3000_error_info *info)
  166. {
  167. struct pci_dev *pdev;
  168. pdev = to_pci_dev(mci->pdev);
  169. /*
  170. * This is a mess because there is no atomic way to read all the
  171. * registers at once and the registers can transition from CE being
  172. * overwritten by UE.
  173. */
  174. pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts);
  175. if (!(info->errsts & I3000_ERRSTS_BITS))
  176. return;
  177. pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
  178. pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
  179. pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
  180. pci_read_config_word(pdev, I3000_ERRSTS, &info->errsts2);
  181. /*
  182. * If the error is the same for both reads then the first set
  183. * of reads is valid. If there is a change then there is a CE
  184. * with no info and the second set of reads is valid and
  185. * should be UE info.
  186. */
  187. if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
  188. pci_read_config_byte(pdev, I3000_EDEAP, &info->edeap);
  189. pci_read_config_dword(pdev, I3000_DEAP, &info->deap);
  190. pci_read_config_byte(pdev, I3000_DERRSYN, &info->derrsyn);
  191. }
  192. /*
  193. * Clear any error bits.
  194. * (Yes, we really clear bits by writing 1 to them.)
  195. */
  196. pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
  197. I3000_ERRSTS_BITS);
  198. }
  199. static int i3000_process_error_info(struct mem_ctl_info *mci,
  200. struct i3000_error_info *info,
  201. int handle_errors)
  202. {
  203. int row, multi_chan, channel;
  204. unsigned long pfn, offset;
  205. multi_chan = mci->csrows[0]->nr_channels - 1;
  206. if (!(info->errsts & I3000_ERRSTS_BITS))
  207. return 0;
  208. if (!handle_errors)
  209. return 1;
  210. if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
  211. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
  212. -1, -1, -1,
  213. "UE overwrote CE", "");
  214. info->errsts = info->errsts2;
  215. }
  216. pfn = deap_pfn(info->edeap, info->deap);
  217. offset = deap_offset(info->deap);
  218. channel = deap_channel(info->deap);
  219. row = edac_mc_find_csrow_by_page(mci, pfn);
  220. if (info->errsts & I3000_ERRSTS_UE)
  221. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  222. pfn, offset, 0,
  223. row, -1, -1,
  224. "i3000 UE", "");
  225. else
  226. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  227. pfn, offset, info->derrsyn,
  228. row, multi_chan ? channel : 0, -1,
  229. "i3000 CE", "");
  230. return 1;
  231. }
  232. static void i3000_check(struct mem_ctl_info *mci)
  233. {
  234. struct i3000_error_info info;
  235. edac_dbg(1, "MC%d\n", mci->mc_idx);
  236. i3000_get_error_info(mci, &info);
  237. i3000_process_error_info(mci, &info, 1);
  238. }
  239. static int i3000_is_interleaved(const unsigned char *c0dra,
  240. const unsigned char *c1dra,
  241. const unsigned char *c0drb,
  242. const unsigned char *c1drb)
  243. {
  244. int i;
  245. /*
  246. * If the channels aren't populated identically then
  247. * we're not interleaved.
  248. */
  249. for (i = 0; i < I3000_RANKS_PER_CHANNEL / 2; i++)
  250. if (odd_rank_attrib(c0dra[i]) != odd_rank_attrib(c1dra[i]) ||
  251. even_rank_attrib(c0dra[i]) !=
  252. even_rank_attrib(c1dra[i]))
  253. return 0;
  254. /*
  255. * If the rank boundaries for the two channels are different
  256. * then we're not interleaved.
  257. */
  258. for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++)
  259. if (c0drb[i] != c1drb[i])
  260. return 0;
  261. return 1;
  262. }
  263. static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
  264. {
  265. int rc;
  266. int i, j;
  267. struct mem_ctl_info *mci = NULL;
  268. struct edac_mc_layer layers[2];
  269. unsigned long last_cumul_size, nr_pages;
  270. int interleaved, nr_channels;
  271. unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
  272. unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
  273. unsigned char *c0drb = drb, *c1drb = &drb[I3000_RANKS_PER_CHANNEL];
  274. unsigned long mchbar;
  275. void __iomem *window;
  276. edac_dbg(0, "MC:\n");
  277. pci_read_config_dword(pdev, I3000_MCHBAR, (u32 *) & mchbar);
  278. mchbar &= I3000_MCHBAR_MASK;
  279. window = ioremap_nocache(mchbar, I3000_MMR_WINDOW_SIZE);
  280. if (!window) {
  281. printk(KERN_ERR "i3000: cannot map mmio space at 0x%lx\n",
  282. mchbar);
  283. return -ENODEV;
  284. }
  285. c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */
  286. c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */
  287. c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */
  288. c1dra[1] = readb(window + I3000_C1DRA + 1); /* ranks 2,3 */
  289. for (i = 0; i < I3000_RANKS_PER_CHANNEL; i++) {
  290. c0drb[i] = readb(window + I3000_C0DRB + i);
  291. c1drb[i] = readb(window + I3000_C1DRB + i);
  292. }
  293. iounmap(window);
  294. /*
  295. * Figure out how many channels we have.
  296. *
  297. * If we have what the datasheet calls "asymmetric channels"
  298. * (essentially the same as what was called "virtual single
  299. * channel mode" in the i82875) then it's a single channel as
  300. * far as EDAC is concerned.
  301. */
  302. interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
  303. nr_channels = interleaved ? 2 : 1;
  304. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  305. layers[0].size = I3000_RANKS / nr_channels;
  306. layers[0].is_virt_csrow = true;
  307. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  308. layers[1].size = nr_channels;
  309. layers[1].is_virt_csrow = false;
  310. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
  311. if (!mci)
  312. return -ENOMEM;
  313. edac_dbg(3, "MC: init mci\n");
  314. mci->pdev = &pdev->dev;
  315. mci->mtype_cap = MEM_FLAG_DDR2;
  316. mci->edac_ctl_cap = EDAC_FLAG_SECDED;
  317. mci->edac_cap = EDAC_FLAG_SECDED;
  318. mci->mod_name = EDAC_MOD_STR;
  319. mci->mod_ver = I3000_REVISION;
  320. mci->ctl_name = i3000_devs[dev_idx].ctl_name;
  321. mci->dev_name = pci_name(pdev);
  322. mci->edac_check = i3000_check;
  323. mci->ctl_page_to_phys = NULL;
  324. /*
  325. * The dram rank boundary (DRB) reg values are boundary addresses
  326. * for each DRAM rank with a granularity of 32MB. DRB regs are
  327. * cumulative; the last one will contain the total memory
  328. * contained in all ranks.
  329. *
  330. * If we're in interleaved mode then we're only walking through
  331. * the ranks of controller 0, so we double all the values we see.
  332. */
  333. for (last_cumul_size = i = 0; i < mci->nr_csrows; i++) {
  334. u8 value;
  335. u32 cumul_size;
  336. struct csrow_info *csrow = mci->csrows[i];
  337. value = drb[i];
  338. cumul_size = value << (I3000_DRB_SHIFT - PAGE_SHIFT);
  339. if (interleaved)
  340. cumul_size <<= 1;
  341. edac_dbg(3, "MC: (%d) cumul_size 0x%x\n", i, cumul_size);
  342. if (cumul_size == last_cumul_size)
  343. continue;
  344. csrow->first_page = last_cumul_size;
  345. csrow->last_page = cumul_size - 1;
  346. nr_pages = cumul_size - last_cumul_size;
  347. last_cumul_size = cumul_size;
  348. for (j = 0; j < nr_channels; j++) {
  349. struct dimm_info *dimm = csrow->channels[j]->dimm;
  350. dimm->nr_pages = nr_pages / nr_channels;
  351. dimm->grain = I3000_DEAP_GRAIN;
  352. dimm->mtype = MEM_DDR2;
  353. dimm->dtype = DEV_UNKNOWN;
  354. dimm->edac_mode = EDAC_UNKNOWN;
  355. }
  356. }
  357. /*
  358. * Clear any error bits.
  359. * (Yes, we really clear bits by writing 1 to them.)
  360. */
  361. pci_write_bits16(pdev, I3000_ERRSTS, I3000_ERRSTS_BITS,
  362. I3000_ERRSTS_BITS);
  363. rc = -ENODEV;
  364. if (edac_mc_add_mc(mci)) {
  365. edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
  366. goto fail;
  367. }
  368. /* allocating generic PCI control info */
  369. i3000_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
  370. if (!i3000_pci) {
  371. printk(KERN_WARNING
  372. "%s(): Unable to create PCI control\n",
  373. __func__);
  374. printk(KERN_WARNING
  375. "%s(): PCI error report via EDAC not setup\n",
  376. __func__);
  377. }
  378. /* get this far and it's successful */
  379. edac_dbg(3, "MC: success\n");
  380. return 0;
  381. fail:
  382. if (mci)
  383. edac_mc_free(mci);
  384. return rc;
  385. }
  386. /* returns count (>= 0), or negative on error */
  387. static int i3000_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  388. {
  389. int rc;
  390. edac_dbg(0, "MC:\n");
  391. if (pci_enable_device(pdev) < 0)
  392. return -EIO;
  393. rc = i3000_probe1(pdev, ent->driver_data);
  394. if (!mci_pdev)
  395. mci_pdev = pci_dev_get(pdev);
  396. return rc;
  397. }
  398. static void i3000_remove_one(struct pci_dev *pdev)
  399. {
  400. struct mem_ctl_info *mci;
  401. edac_dbg(0, "\n");
  402. if (i3000_pci)
  403. edac_pci_release_generic_ctl(i3000_pci);
  404. mci = edac_mc_del_mc(&pdev->dev);
  405. if (!mci)
  406. return;
  407. edac_mc_free(mci);
  408. }
  409. static const struct pci_device_id i3000_pci_tbl[] = {
  410. {
  411. PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  412. I3000},
  413. {
  414. 0,
  415. } /* 0 terminated list. */
  416. };
  417. MODULE_DEVICE_TABLE(pci, i3000_pci_tbl);
  418. static struct pci_driver i3000_driver = {
  419. .name = EDAC_MOD_STR,
  420. .probe = i3000_init_one,
  421. .remove = i3000_remove_one,
  422. .id_table = i3000_pci_tbl,
  423. };
  424. static int __init i3000_init(void)
  425. {
  426. int pci_rc;
  427. edac_dbg(3, "MC:\n");
  428. /* Ensure that the OPSTATE is set correctly for POLL or NMI */
  429. opstate_init();
  430. pci_rc = pci_register_driver(&i3000_driver);
  431. if (pci_rc < 0)
  432. goto fail0;
  433. if (!mci_pdev) {
  434. i3000_registered = 0;
  435. mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
  436. PCI_DEVICE_ID_INTEL_3000_HB, NULL);
  437. if (!mci_pdev) {
  438. edac_dbg(0, "i3000 pci_get_device fail\n");
  439. pci_rc = -ENODEV;
  440. goto fail1;
  441. }
  442. pci_rc = i3000_init_one(mci_pdev, i3000_pci_tbl);
  443. if (pci_rc < 0) {
  444. edac_dbg(0, "i3000 init fail\n");
  445. pci_rc = -ENODEV;
  446. goto fail1;
  447. }
  448. }
  449. return 0;
  450. fail1:
  451. pci_unregister_driver(&i3000_driver);
  452. fail0:
  453. pci_dev_put(mci_pdev);
  454. return pci_rc;
  455. }
  456. static void __exit i3000_exit(void)
  457. {
  458. edac_dbg(3, "MC:\n");
  459. pci_unregister_driver(&i3000_driver);
  460. if (!i3000_registered) {
  461. i3000_remove_one(mci_pdev);
  462. pci_dev_put(mci_pdev);
  463. }
  464. }
  465. module_init(i3000_init);
  466. module_exit(i3000_exit);
  467. MODULE_LICENSE("GPL");
  468. MODULE_AUTHOR("Akamai Technologies Arthur Ulfeldt/Jason Uhlenkott");
  469. MODULE_DESCRIPTION("MC support for Intel 3000 memory hub controllers");
  470. module_param(edac_op_state, int, 0444);
  471. MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");