highbank_mc_edac.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. /*
  2. * Copyright 2011-2012 Calxeda, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program. If not, see <http://www.gnu.org/licenses/>.
  15. */
  16. #include <linux/types.h>
  17. #include <linux/kernel.h>
  18. #include <linux/ctype.h>
  19. #include <linux/edac.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/of_platform.h>
  23. #include <linux/uaccess.h>
  24. #include "edac_core.h"
  25. #include "edac_module.h"
  26. /* DDR Ctrlr Error Registers */
  27. #define HB_DDR_ECC_ERR_BASE 0x128
  28. #define MW_DDR_ECC_ERR_BASE 0x1b4
  29. #define HB_DDR_ECC_OPT 0x00
  30. #define HB_DDR_ECC_U_ERR_ADDR 0x08
  31. #define HB_DDR_ECC_U_ERR_STAT 0x0c
  32. #define HB_DDR_ECC_U_ERR_DATAL 0x10
  33. #define HB_DDR_ECC_U_ERR_DATAH 0x14
  34. #define HB_DDR_ECC_C_ERR_ADDR 0x18
  35. #define HB_DDR_ECC_C_ERR_STAT 0x1c
  36. #define HB_DDR_ECC_C_ERR_DATAL 0x20
  37. #define HB_DDR_ECC_C_ERR_DATAH 0x24
  38. #define HB_DDR_ECC_OPT_MODE_MASK 0x3
  39. #define HB_DDR_ECC_OPT_FWC 0x100
  40. #define HB_DDR_ECC_OPT_XOR_SHIFT 16
  41. /* DDR Ctrlr Interrupt Registers */
  42. #define HB_DDR_ECC_INT_BASE 0x180
  43. #define MW_DDR_ECC_INT_BASE 0x218
  44. #define HB_DDR_ECC_INT_STATUS 0x00
  45. #define HB_DDR_ECC_INT_ACK 0x04
  46. #define HB_DDR_ECC_INT_STAT_CE 0x8
  47. #define HB_DDR_ECC_INT_STAT_DOUBLE_CE 0x10
  48. #define HB_DDR_ECC_INT_STAT_UE 0x20
  49. #define HB_DDR_ECC_INT_STAT_DOUBLE_UE 0x40
  50. struct hb_mc_drvdata {
  51. void __iomem *mc_err_base;
  52. void __iomem *mc_int_base;
  53. };
  54. static irqreturn_t highbank_mc_err_handler(int irq, void *dev_id)
  55. {
  56. struct mem_ctl_info *mci = dev_id;
  57. struct hb_mc_drvdata *drvdata = mci->pvt_info;
  58. u32 status, err_addr;
  59. /* Read the interrupt status register */
  60. status = readl(drvdata->mc_int_base + HB_DDR_ECC_INT_STATUS);
  61. if (status & HB_DDR_ECC_INT_STAT_UE) {
  62. err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_U_ERR_ADDR);
  63. edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
  64. err_addr >> PAGE_SHIFT,
  65. err_addr & ~PAGE_MASK, 0,
  66. 0, 0, -1,
  67. mci->ctl_name, "");
  68. }
  69. if (status & HB_DDR_ECC_INT_STAT_CE) {
  70. u32 syndrome = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_STAT);
  71. syndrome = (syndrome >> 8) & 0xff;
  72. err_addr = readl(drvdata->mc_err_base + HB_DDR_ECC_C_ERR_ADDR);
  73. edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
  74. err_addr >> PAGE_SHIFT,
  75. err_addr & ~PAGE_MASK, syndrome,
  76. 0, 0, -1,
  77. mci->ctl_name, "");
  78. }
  79. /* clear the error, clears the interrupt */
  80. writel(status, drvdata->mc_int_base + HB_DDR_ECC_INT_ACK);
  81. return IRQ_HANDLED;
  82. }
  83. static void highbank_mc_err_inject(struct mem_ctl_info *mci, u8 synd)
  84. {
  85. struct hb_mc_drvdata *pdata = mci->pvt_info;
  86. u32 reg;
  87. reg = readl(pdata->mc_err_base + HB_DDR_ECC_OPT);
  88. reg &= HB_DDR_ECC_OPT_MODE_MASK;
  89. reg |= (synd << HB_DDR_ECC_OPT_XOR_SHIFT) | HB_DDR_ECC_OPT_FWC;
  90. writel(reg, pdata->mc_err_base + HB_DDR_ECC_OPT);
  91. }
  92. #define to_mci(k) container_of(k, struct mem_ctl_info, dev)
  93. static ssize_t highbank_mc_inject_ctrl(struct device *dev,
  94. struct device_attribute *attr, const char *buf, size_t count)
  95. {
  96. struct mem_ctl_info *mci = to_mci(dev);
  97. u8 synd;
  98. if (kstrtou8(buf, 16, &synd))
  99. return -EINVAL;
  100. highbank_mc_err_inject(mci, synd);
  101. return count;
  102. }
  103. static DEVICE_ATTR(inject_ctrl, S_IWUSR, NULL, highbank_mc_inject_ctrl);
  104. static struct attribute *highbank_dev_attrs[] = {
  105. &dev_attr_inject_ctrl.attr,
  106. NULL
  107. };
  108. ATTRIBUTE_GROUPS(highbank_dev);
  109. struct hb_mc_settings {
  110. int err_offset;
  111. int int_offset;
  112. };
  113. static struct hb_mc_settings hb_settings = {
  114. .err_offset = HB_DDR_ECC_ERR_BASE,
  115. .int_offset = HB_DDR_ECC_INT_BASE,
  116. };
  117. static struct hb_mc_settings mw_settings = {
  118. .err_offset = MW_DDR_ECC_ERR_BASE,
  119. .int_offset = MW_DDR_ECC_INT_BASE,
  120. };
  121. static const struct of_device_id hb_ddr_ctrl_of_match[] = {
  122. { .compatible = "calxeda,hb-ddr-ctrl", .data = &hb_settings },
  123. { .compatible = "calxeda,ecx-2000-ddr-ctrl", .data = &mw_settings },
  124. {},
  125. };
  126. MODULE_DEVICE_TABLE(of, hb_ddr_ctrl_of_match);
  127. static int highbank_mc_probe(struct platform_device *pdev)
  128. {
  129. const struct of_device_id *id;
  130. const struct hb_mc_settings *settings;
  131. struct edac_mc_layer layers[2];
  132. struct mem_ctl_info *mci;
  133. struct hb_mc_drvdata *drvdata;
  134. struct dimm_info *dimm;
  135. struct resource *r;
  136. void __iomem *base;
  137. u32 control;
  138. int irq;
  139. int res = 0;
  140. id = of_match_device(hb_ddr_ctrl_of_match, &pdev->dev);
  141. if (!id)
  142. return -ENODEV;
  143. layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
  144. layers[0].size = 1;
  145. layers[0].is_virt_csrow = true;
  146. layers[1].type = EDAC_MC_LAYER_CHANNEL;
  147. layers[1].size = 1;
  148. layers[1].is_virt_csrow = false;
  149. mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
  150. sizeof(struct hb_mc_drvdata));
  151. if (!mci)
  152. return -ENOMEM;
  153. mci->pdev = &pdev->dev;
  154. drvdata = mci->pvt_info;
  155. platform_set_drvdata(pdev, mci);
  156. if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
  157. return -ENOMEM;
  158. r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  159. if (!r) {
  160. dev_err(&pdev->dev, "Unable to get mem resource\n");
  161. res = -ENODEV;
  162. goto err;
  163. }
  164. if (!devm_request_mem_region(&pdev->dev, r->start,
  165. resource_size(r), dev_name(&pdev->dev))) {
  166. dev_err(&pdev->dev, "Error while requesting mem region\n");
  167. res = -EBUSY;
  168. goto err;
  169. }
  170. base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
  171. if (!base) {
  172. dev_err(&pdev->dev, "Unable to map regs\n");
  173. res = -ENOMEM;
  174. goto err;
  175. }
  176. settings = id->data;
  177. drvdata->mc_err_base = base + settings->err_offset;
  178. drvdata->mc_int_base = base + settings->int_offset;
  179. control = readl(drvdata->mc_err_base + HB_DDR_ECC_OPT) & 0x3;
  180. if (!control || (control == 0x2)) {
  181. dev_err(&pdev->dev, "No ECC present, or ECC disabled\n");
  182. res = -ENODEV;
  183. goto err;
  184. }
  185. mci->mtype_cap = MEM_FLAG_DDR3;
  186. mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
  187. mci->edac_cap = EDAC_FLAG_SECDED;
  188. mci->mod_name = pdev->dev.driver->name;
  189. mci->mod_ver = "1";
  190. mci->ctl_name = id->compatible;
  191. mci->dev_name = dev_name(&pdev->dev);
  192. mci->scrub_mode = SCRUB_SW_SRC;
  193. /* Only a single 4GB DIMM is supported */
  194. dimm = *mci->dimms;
  195. dimm->nr_pages = (~0UL >> PAGE_SHIFT) + 1;
  196. dimm->grain = 8;
  197. dimm->dtype = DEV_X8;
  198. dimm->mtype = MEM_DDR3;
  199. dimm->edac_mode = EDAC_SECDED;
  200. res = edac_mc_add_mc_with_groups(mci, highbank_dev_groups);
  201. if (res < 0)
  202. goto err;
  203. irq = platform_get_irq(pdev, 0);
  204. res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
  205. 0, dev_name(&pdev->dev), mci);
  206. if (res < 0) {
  207. dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
  208. goto err2;
  209. }
  210. devres_close_group(&pdev->dev, NULL);
  211. return 0;
  212. err2:
  213. edac_mc_del_mc(&pdev->dev);
  214. err:
  215. devres_release_group(&pdev->dev, NULL);
  216. edac_mc_free(mci);
  217. return res;
  218. }
  219. static int highbank_mc_remove(struct platform_device *pdev)
  220. {
  221. struct mem_ctl_info *mci = platform_get_drvdata(pdev);
  222. edac_mc_del_mc(&pdev->dev);
  223. edac_mc_free(mci);
  224. return 0;
  225. }
  226. static struct platform_driver highbank_mc_edac_driver = {
  227. .probe = highbank_mc_probe,
  228. .remove = highbank_mc_remove,
  229. .driver = {
  230. .name = "hb_mc_edac",
  231. .of_match_table = hb_ddr_ctrl_of_match,
  232. },
  233. };
  234. module_platform_driver(highbank_mc_edac_driver);
  235. MODULE_LICENSE("GPL v2");
  236. MODULE_AUTHOR("Calxeda, Inc.");
  237. MODULE_DESCRIPTION("EDAC Driver for Calxeda Highbank");