omap_l3_noc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377
  1. /*
  2. * OMAP L3 Interconnect error handling driver
  3. *
  4. * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
  5. * Santosh Shilimkar <santosh.shilimkar@ti.com>
  6. * Sricharan <r.sricharan@ti.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  13. * kind, whether express or implied; without even the implied warranty
  14. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include <linux/init.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/io.h>
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/of_device.h>
  23. #include <linux/of.h>
  24. #include <linux/platform_device.h>
  25. #include <linux/slab.h>
  26. #include "omap_l3_noc.h"
  27. /**
  28. * l3_handle_target() - Handle Target specific parse and reporting
  29. * @l3: pointer to l3 struct
  30. * @base: base address of clkdm
  31. * @flag_mux: flagmux corresponding to the event
  32. * @err_src: error source index of the slave (target)
  33. *
  34. * This does the second part of the error interrupt handling:
  35. * 3) Parse in the slave information
  36. * 4) Print the logged information.
  37. * 5) Add dump stack to provide kernel trace.
  38. * 6) Clear the source if known.
  39. *
  40. * This handles two types of errors:
  41. * 1) Custom errors in L3 :
  42. * Target like DMM/FW/EMIF generates SRESP=ERR error
  43. * 2) Standard L3 error:
  44. * - Unsupported CMD.
  45. * L3 tries to access target while it is idle
  46. * - OCP disconnect.
  47. * - Address hole error:
  48. * If DSS/ISS/FDIF/USBHOSTFS access a target where they
  49. * do not have connectivity, the error is logged in
  50. * their default target which is DMM2.
  51. *
  52. * On High Secure devices, firewall errors are possible and those
  53. * can be trapped as well. But the trapping is implemented as part
  54. * secure software and hence need not be implemented here.
  55. */
  56. static int l3_handle_target(struct omap_l3 *l3, void __iomem *base,
  57. struct l3_flagmux_data *flag_mux, int err_src)
  58. {
  59. int k;
  60. u32 std_err_main, clear, masterid;
  61. u8 op_code, m_req_info;
  62. void __iomem *l3_targ_base;
  63. void __iomem *l3_targ_stderr, *l3_targ_slvofslsb, *l3_targ_mstaddr;
  64. void __iomem *l3_targ_hdr, *l3_targ_info;
  65. struct l3_target_data *l3_targ_inst;
  66. struct l3_masters_data *master;
  67. char *target_name, *master_name = "UN IDENTIFIED";
  68. char *err_description;
  69. char err_string[30] = { 0 };
  70. char info_string[60] = { 0 };
  71. /* We DONOT expect err_src to go out of bounds */
  72. BUG_ON(err_src > MAX_CLKDM_TARGETS);
  73. if (err_src < flag_mux->num_targ_data) {
  74. l3_targ_inst = &flag_mux->l3_targ[err_src];
  75. target_name = l3_targ_inst->name;
  76. l3_targ_base = base + l3_targ_inst->offset;
  77. } else {
  78. target_name = L3_TARGET_NOT_SUPPORTED;
  79. }
  80. if (target_name == L3_TARGET_NOT_SUPPORTED)
  81. return -ENODEV;
  82. /* Read the stderrlog_main_source from clk domain */
  83. l3_targ_stderr = l3_targ_base + L3_TARG_STDERRLOG_MAIN;
  84. l3_targ_slvofslsb = l3_targ_base + L3_TARG_STDERRLOG_SLVOFSLSB;
  85. std_err_main = readl_relaxed(l3_targ_stderr);
  86. switch (std_err_main & CUSTOM_ERROR) {
  87. case STANDARD_ERROR:
  88. err_description = "Standard";
  89. snprintf(err_string, sizeof(err_string),
  90. ": At Address: 0x%08X ",
  91. readl_relaxed(l3_targ_slvofslsb));
  92. l3_targ_mstaddr = l3_targ_base + L3_TARG_STDERRLOG_MSTADDR;
  93. l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_HDR;
  94. l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_INFO;
  95. break;
  96. case CUSTOM_ERROR:
  97. err_description = "Custom";
  98. l3_targ_mstaddr = l3_targ_base +
  99. L3_TARG_STDERRLOG_CINFO_MSTADDR;
  100. l3_targ_hdr = l3_targ_base + L3_TARG_STDERRLOG_CINFO_OPCODE;
  101. l3_targ_info = l3_targ_base + L3_TARG_STDERRLOG_CINFO_INFO;
  102. break;
  103. default:
  104. /* Nothing to be handled here as of now */
  105. return 0;
  106. }
  107. /* STDERRLOG_MSTADDR Stores the NTTP master address. */
  108. masterid = (readl_relaxed(l3_targ_mstaddr) &
  109. l3->mst_addr_mask) >> __ffs(l3->mst_addr_mask);
  110. for (k = 0, master = l3->l3_masters; k < l3->num_masters;
  111. k++, master++) {
  112. if (masterid == master->id) {
  113. master_name = master->name;
  114. break;
  115. }
  116. }
  117. op_code = readl_relaxed(l3_targ_hdr) & 0x7;
  118. m_req_info = readl_relaxed(l3_targ_info) & 0xF;
  119. snprintf(info_string, sizeof(info_string),
  120. ": %s in %s mode during %s access",
  121. (m_req_info & BIT(0)) ? "Opcode Fetch" : "Data Access",
  122. (m_req_info & BIT(1)) ? "Supervisor" : "User",
  123. (m_req_info & BIT(3)) ? "Debug" : "Functional");
  124. WARN(true,
  125. "%s:L3 %s Error: MASTER %s TARGET %s (%s)%s%s\n",
  126. dev_name(l3->dev),
  127. err_description,
  128. master_name, target_name,
  129. l3_transaction_type[op_code],
  130. err_string, info_string);
  131. /* clear the std error log*/
  132. clear = std_err_main | CLEAR_STDERR_LOG;
  133. writel_relaxed(clear, l3_targ_stderr);
  134. return 0;
  135. }
  136. /**
  137. * l3_interrupt_handler() - interrupt handler for l3 events
  138. * @irq: irq number
  139. * @_l3: pointer to l3 structure
  140. *
  141. * Interrupt Handler for L3 error detection.
  142. * 1) Identify the L3 clockdomain partition to which the error belongs to.
  143. * 2) Identify the slave where the error information is logged
  144. * ... handle the slave event..
  145. * 7) if the slave is unknown, mask out the slave.
  146. */
  147. static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
  148. {
  149. struct omap_l3 *l3 = _l3;
  150. int inttype, i, ret;
  151. int err_src = 0;
  152. u32 err_reg, mask_val;
  153. void __iomem *base, *mask_reg;
  154. struct l3_flagmux_data *flag_mux;
  155. /* Get the Type of interrupt */
  156. inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR;
  157. for (i = 0; i < l3->num_modules; i++) {
  158. /*
  159. * Read the regerr register of the clock domain
  160. * to determine the source
  161. */
  162. base = l3->l3_base[i];
  163. flag_mux = l3->l3_flagmux[i];
  164. err_reg = readl_relaxed(base + flag_mux->offset +
  165. L3_FLAGMUX_REGERR0 + (inttype << 3));
  166. err_reg &= ~(inttype ? flag_mux->mask_app_bits :
  167. flag_mux->mask_dbg_bits);
  168. /* Get the corresponding error and analyse */
  169. if (err_reg) {
  170. /* Identify the source from control status register */
  171. err_src = __ffs(err_reg);
  172. ret = l3_handle_target(l3, base, flag_mux, err_src);
  173. /*
  174. * Certain plaforms may have "undocumented" status
  175. * pending on boot. So dont generate a severe warning
  176. * here. Just mask it off to prevent the error from
  177. * reoccuring and locking up the system.
  178. */
  179. if (ret) {
  180. dev_err(l3->dev,
  181. "L3 %s error: target %d mod:%d %s\n",
  182. inttype ? "debug" : "application",
  183. err_src, i, "(unclearable)");
  184. mask_reg = base + flag_mux->offset +
  185. L3_FLAGMUX_MASK0 + (inttype << 3);
  186. mask_val = readl_relaxed(mask_reg);
  187. mask_val &= ~(1 << err_src);
  188. writel_relaxed(mask_val, mask_reg);
  189. /* Mark these bits as to be ignored */
  190. if (inttype)
  191. flag_mux->mask_app_bits |= 1 << err_src;
  192. else
  193. flag_mux->mask_dbg_bits |= 1 << err_src;
  194. }
  195. /* Error found so break the for loop */
  196. return IRQ_HANDLED;
  197. }
  198. }
  199. dev_err(l3->dev, "L3 %s IRQ not handled!!\n",
  200. inttype ? "debug" : "application");
  201. return IRQ_NONE;
  202. }
  203. static const struct of_device_id l3_noc_match[] = {
  204. {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
  205. {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
  206. {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
  207. {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
  208. {},
  209. };
  210. MODULE_DEVICE_TABLE(of, l3_noc_match);
  211. static int omap_l3_probe(struct platform_device *pdev)
  212. {
  213. const struct of_device_id *of_id;
  214. static struct omap_l3 *l3;
  215. int ret, i, res_idx;
  216. of_id = of_match_device(l3_noc_match, &pdev->dev);
  217. if (!of_id) {
  218. dev_err(&pdev->dev, "OF data missing\n");
  219. return -EINVAL;
  220. }
  221. l3 = devm_kzalloc(&pdev->dev, sizeof(*l3), GFP_KERNEL);
  222. if (!l3)
  223. return -ENOMEM;
  224. memcpy(l3, of_id->data, sizeof(*l3));
  225. l3->dev = &pdev->dev;
  226. platform_set_drvdata(pdev, l3);
  227. /* Get mem resources */
  228. for (i = 0, res_idx = 0; i < l3->num_modules; i++) {
  229. struct resource *res;
  230. if (l3->l3_base[i] == L3_BASE_IS_SUBMODULE) {
  231. /* First entry cannot be submodule */
  232. BUG_ON(i == 0);
  233. l3->l3_base[i] = l3->l3_base[i - 1];
  234. continue;
  235. }
  236. res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx);
  237. l3->l3_base[i] = devm_ioremap_resource(&pdev->dev, res);
  238. if (IS_ERR(l3->l3_base[i])) {
  239. dev_err(l3->dev, "ioremap %d failed\n", i);
  240. return PTR_ERR(l3->l3_base[i]);
  241. }
  242. res_idx++;
  243. }
  244. /*
  245. * Setup interrupt Handlers
  246. */
  247. l3->debug_irq = platform_get_irq(pdev, 0);
  248. ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
  249. 0x0, "l3-dbg-irq", l3);
  250. if (ret) {
  251. dev_err(l3->dev, "request_irq failed for %d\n",
  252. l3->debug_irq);
  253. return ret;
  254. }
  255. l3->app_irq = platform_get_irq(pdev, 1);
  256. ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
  257. 0x0, "l3-app-irq", l3);
  258. if (ret)
  259. dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
  260. return ret;
  261. }
  262. #ifdef CONFIG_PM_SLEEP
  263. /**
  264. * l3_resume_noirq() - resume function for l3_noc
  265. * @dev: pointer to l3_noc device structure
  266. *
  267. * We only have the resume handler only since we
  268. * have already maintained the delta register
  269. * configuration as part of configuring the system
  270. */
  271. static int l3_resume_noirq(struct device *dev)
  272. {
  273. struct omap_l3 *l3 = dev_get_drvdata(dev);
  274. int i;
  275. struct l3_flagmux_data *flag_mux;
  276. void __iomem *base, *mask_regx = NULL;
  277. u32 mask_val;
  278. for (i = 0; i < l3->num_modules; i++) {
  279. base = l3->l3_base[i];
  280. flag_mux = l3->l3_flagmux[i];
  281. if (!flag_mux->mask_app_bits && !flag_mux->mask_dbg_bits)
  282. continue;
  283. mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
  284. (L3_APPLICATION_ERROR << 3);
  285. mask_val = readl_relaxed(mask_regx);
  286. mask_val &= ~(flag_mux->mask_app_bits);
  287. writel_relaxed(mask_val, mask_regx);
  288. mask_regx = base + flag_mux->offset + L3_FLAGMUX_MASK0 +
  289. (L3_DEBUG_ERROR << 3);
  290. mask_val = readl_relaxed(mask_regx);
  291. mask_val &= ~(flag_mux->mask_dbg_bits);
  292. writel_relaxed(mask_val, mask_regx);
  293. }
  294. /* Dummy read to force OCP barrier */
  295. if (mask_regx)
  296. (void)readl(mask_regx);
  297. return 0;
  298. }
  299. static const struct dev_pm_ops l3_dev_pm_ops = {
  300. SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(NULL, l3_resume_noirq)
  301. };
  302. #define L3_DEV_PM_OPS (&l3_dev_pm_ops)
  303. #else
  304. #define L3_DEV_PM_OPS NULL
  305. #endif
  306. static struct platform_driver omap_l3_driver = {
  307. .probe = omap_l3_probe,
  308. .driver = {
  309. .name = "omap_l3_noc",
  310. .pm = L3_DEV_PM_OPS,
  311. .of_match_table = of_match_ptr(l3_noc_match),
  312. },
  313. };
  314. static int __init omap_l3_init(void)
  315. {
  316. return platform_driver_register(&omap_l3_driver);
  317. }
  318. postcore_initcall_sync(omap_l3_init);
  319. static void __exit omap_l3_exit(void)
  320. {
  321. platform_driver_unregister(&omap_l3_driver);
  322. }
  323. module_exit(omap_l3_exit);