einj.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /*
  2. * APEI Error INJection support
  3. *
  4. * EINJ provides a hardware error injection mechanism, this is useful
  5. * for debugging and testing of other APEI and RAS features.
  6. *
  7. * For more information about EINJ, please refer to ACPI Specification
  8. * version 4.0, section 17.5.
  9. *
  10. * Copyright 2009-2010 Intel Corp.
  11. * Author: Huang Ying <ying.huang@intel.com>
  12. *
  13. * This program is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU General Public License version
  15. * 2 as published by the Free Software Foundation.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/module.h>
  24. #include <linux/init.h>
  25. #include <linux/io.h>
  26. #include <linux/debugfs.h>
  27. #include <linux/seq_file.h>
  28. #include <linux/nmi.h>
  29. #include <linux/delay.h>
  30. #include <linux/mm.h>
  31. #include <asm/unaligned.h>
  32. #include "apei-internal.h"
  33. #define EINJ_PFX "EINJ: "
  34. #define SPIN_UNIT 100 /* 100ns */
  35. /* Firmware should respond within 1 milliseconds */
  36. #define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
  37. #define ACPI5_VENDOR_BIT BIT(31)
  38. #define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
  39. ACPI_EINJ_MEMORY_UNCORRECTABLE | \
  40. ACPI_EINJ_MEMORY_FATAL)
  41. /*
  42. * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
  43. */
  44. static int acpi5;
  45. struct set_error_type_with_address {
  46. u32 type;
  47. u32 vendor_extension;
  48. u32 flags;
  49. u32 apicid;
  50. u64 memory_address;
  51. u64 memory_address_range;
  52. u32 pcie_sbdf;
  53. };
  54. enum {
  55. SETWA_FLAGS_APICID = 1,
  56. SETWA_FLAGS_MEM = 2,
  57. SETWA_FLAGS_PCIE_SBDF = 4,
  58. };
  59. /*
  60. * Vendor extensions for platform specific operations
  61. */
  62. struct vendor_error_type_extension {
  63. u32 length;
  64. u32 pcie_sbdf;
  65. u16 vendor_id;
  66. u16 device_id;
  67. u8 rev_id;
  68. u8 reserved[3];
  69. };
  70. static u32 notrigger;
  71. static u32 vendor_flags;
  72. static struct debugfs_blob_wrapper vendor_blob;
  73. static char vendor_dev[64];
  74. /*
  75. * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
  76. * EINJ table through an unpublished extension. Use with caution as
  77. * most will ignore the parameter and make their own choice of address
  78. * for error injection. This extension is used only if
  79. * param_extension module parameter is specified.
  80. */
  81. struct einj_parameter {
  82. u64 type;
  83. u64 reserved1;
  84. u64 reserved2;
  85. u64 param1;
  86. u64 param2;
  87. };
  88. #define EINJ_OP_BUSY 0x1
  89. #define EINJ_STATUS_SUCCESS 0x0
  90. #define EINJ_STATUS_FAIL 0x1
  91. #define EINJ_STATUS_INVAL 0x2
  92. #define EINJ_TAB_ENTRY(tab) \
  93. ((struct acpi_whea_header *)((char *)(tab) + \
  94. sizeof(struct acpi_table_einj)))
  95. static bool param_extension;
  96. module_param(param_extension, bool, 0);
  97. static struct acpi_table_einj *einj_tab;
  98. static struct apei_resources einj_resources;
  99. static struct apei_exec_ins_type einj_ins_type[] = {
  100. [ACPI_EINJ_READ_REGISTER] = {
  101. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  102. .run = apei_exec_read_register,
  103. },
  104. [ACPI_EINJ_READ_REGISTER_VALUE] = {
  105. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  106. .run = apei_exec_read_register_value,
  107. },
  108. [ACPI_EINJ_WRITE_REGISTER] = {
  109. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  110. .run = apei_exec_write_register,
  111. },
  112. [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
  113. .flags = APEI_EXEC_INS_ACCESS_REGISTER,
  114. .run = apei_exec_write_register_value,
  115. },
  116. [ACPI_EINJ_NOOP] = {
  117. .flags = 0,
  118. .run = apei_exec_noop,
  119. },
  120. };
  121. /*
  122. * Prevent EINJ interpreter to run simultaneously, because the
  123. * corresponding firmware implementation may not work properly when
  124. * invoked simultaneously.
  125. */
  126. static DEFINE_MUTEX(einj_mutex);
  127. static void *einj_param;
  128. static void einj_exec_ctx_init(struct apei_exec_context *ctx)
  129. {
  130. apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
  131. EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
  132. }
  133. static int __einj_get_available_error_type(u32 *type)
  134. {
  135. struct apei_exec_context ctx;
  136. int rc;
  137. einj_exec_ctx_init(&ctx);
  138. rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
  139. if (rc)
  140. return rc;
  141. *type = apei_exec_ctx_get_output(&ctx);
  142. return 0;
  143. }
  144. /* Get error injection capabilities of the platform */
  145. static int einj_get_available_error_type(u32 *type)
  146. {
  147. int rc;
  148. mutex_lock(&einj_mutex);
  149. rc = __einj_get_available_error_type(type);
  150. mutex_unlock(&einj_mutex);
  151. return rc;
  152. }
  153. static int einj_timedout(u64 *t)
  154. {
  155. if ((s64)*t < SPIN_UNIT) {
  156. pr_warning(FW_WARN EINJ_PFX
  157. "Firmware does not respond in time\n");
  158. return 1;
  159. }
  160. *t -= SPIN_UNIT;
  161. ndelay(SPIN_UNIT);
  162. touch_nmi_watchdog();
  163. return 0;
  164. }
  165. static void check_vendor_extension(u64 paddr,
  166. struct set_error_type_with_address *v5param)
  167. {
  168. int offset = v5param->vendor_extension;
  169. struct vendor_error_type_extension *v;
  170. u32 sbdf;
  171. if (!offset)
  172. return;
  173. v = acpi_os_map_iomem(paddr + offset, sizeof(*v));
  174. if (!v)
  175. return;
  176. sbdf = v->pcie_sbdf;
  177. sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
  178. sbdf >> 24, (sbdf >> 16) & 0xff,
  179. (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
  180. v->vendor_id, v->device_id, v->rev_id);
  181. acpi_os_unmap_iomem(v, sizeof(*v));
  182. }
  183. static void *einj_get_parameter_address(void)
  184. {
  185. int i;
  186. u64 pa_v4 = 0, pa_v5 = 0;
  187. struct acpi_whea_header *entry;
  188. entry = EINJ_TAB_ENTRY(einj_tab);
  189. for (i = 0; i < einj_tab->entries; i++) {
  190. if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
  191. entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
  192. entry->register_region.space_id ==
  193. ACPI_ADR_SPACE_SYSTEM_MEMORY)
  194. pa_v4 = get_unaligned(&entry->register_region.address);
  195. if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
  196. entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
  197. entry->register_region.space_id ==
  198. ACPI_ADR_SPACE_SYSTEM_MEMORY)
  199. pa_v5 = get_unaligned(&entry->register_region.address);
  200. entry++;
  201. }
  202. if (pa_v5) {
  203. struct set_error_type_with_address *v5param;
  204. v5param = acpi_os_map_iomem(pa_v5, sizeof(*v5param));
  205. if (v5param) {
  206. acpi5 = 1;
  207. check_vendor_extension(pa_v5, v5param);
  208. return v5param;
  209. }
  210. }
  211. if (param_extension && pa_v4) {
  212. struct einj_parameter *v4param;
  213. v4param = acpi_os_map_iomem(pa_v4, sizeof(*v4param));
  214. if (!v4param)
  215. return NULL;
  216. if (v4param->reserved1 || v4param->reserved2) {
  217. acpi_os_unmap_iomem(v4param, sizeof(*v4param));
  218. return NULL;
  219. }
  220. return v4param;
  221. }
  222. return NULL;
  223. }
  224. /* do sanity check to trigger table */
  225. static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
  226. {
  227. if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
  228. return -EINVAL;
  229. if (trigger_tab->table_size > PAGE_SIZE ||
  230. trigger_tab->table_size < trigger_tab->header_size)
  231. return -EINVAL;
  232. if (trigger_tab->entry_count !=
  233. (trigger_tab->table_size - trigger_tab->header_size) /
  234. sizeof(struct acpi_einj_entry))
  235. return -EINVAL;
  236. return 0;
  237. }
  238. static struct acpi_generic_address *einj_get_trigger_parameter_region(
  239. struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
  240. {
  241. int i;
  242. struct acpi_whea_header *entry;
  243. entry = (struct acpi_whea_header *)
  244. ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
  245. for (i = 0; i < trigger_tab->entry_count; i++) {
  246. if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
  247. entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
  248. entry->register_region.space_id ==
  249. ACPI_ADR_SPACE_SYSTEM_MEMORY &&
  250. (entry->register_region.address & param2) == (param1 & param2))
  251. return &entry->register_region;
  252. entry++;
  253. }
  254. return NULL;
  255. }
  256. /* Execute instructions in trigger error action table */
  257. static int __einj_error_trigger(u64 trigger_paddr, u32 type,
  258. u64 param1, u64 param2)
  259. {
  260. struct acpi_einj_trigger *trigger_tab = NULL;
  261. struct apei_exec_context trigger_ctx;
  262. struct apei_resources trigger_resources;
  263. struct acpi_whea_header *trigger_entry;
  264. struct resource *r;
  265. u32 table_size;
  266. int rc = -EIO;
  267. struct acpi_generic_address *trigger_param_region = NULL;
  268. r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
  269. "APEI EINJ Trigger Table");
  270. if (!r) {
  271. pr_err(EINJ_PFX
  272. "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
  273. (unsigned long long)trigger_paddr,
  274. (unsigned long long)trigger_paddr +
  275. sizeof(*trigger_tab) - 1);
  276. goto out;
  277. }
  278. trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
  279. if (!trigger_tab) {
  280. pr_err(EINJ_PFX "Failed to map trigger table!\n");
  281. goto out_rel_header;
  282. }
  283. rc = einj_check_trigger_header(trigger_tab);
  284. if (rc) {
  285. pr_warning(FW_BUG EINJ_PFX
  286. "The trigger error action table is invalid\n");
  287. goto out_rel_header;
  288. }
  289. /* No action structures in the TRIGGER_ERROR table, nothing to do */
  290. if (!trigger_tab->entry_count)
  291. goto out_rel_header;
  292. rc = -EIO;
  293. table_size = trigger_tab->table_size;
  294. r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
  295. table_size - sizeof(*trigger_tab),
  296. "APEI EINJ Trigger Table");
  297. if (!r) {
  298. pr_err(EINJ_PFX
  299. "Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
  300. (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
  301. (unsigned long long)trigger_paddr + table_size - 1);
  302. goto out_rel_header;
  303. }
  304. iounmap(trigger_tab);
  305. trigger_tab = ioremap_cache(trigger_paddr, table_size);
  306. if (!trigger_tab) {
  307. pr_err(EINJ_PFX "Failed to map trigger table!\n");
  308. goto out_rel_entry;
  309. }
  310. trigger_entry = (struct acpi_whea_header *)
  311. ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
  312. apei_resources_init(&trigger_resources);
  313. apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
  314. ARRAY_SIZE(einj_ins_type),
  315. trigger_entry, trigger_tab->entry_count);
  316. rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
  317. if (rc)
  318. goto out_fini;
  319. rc = apei_resources_sub(&trigger_resources, &einj_resources);
  320. if (rc)
  321. goto out_fini;
  322. /*
  323. * Some firmware will access target address specified in
  324. * param1 to trigger the error when injecting memory error.
  325. * This will cause resource conflict with regular memory. So
  326. * remove it from trigger table resources.
  327. */
  328. if ((param_extension || acpi5) && (type & MEM_ERROR_MASK) && param2) {
  329. struct apei_resources addr_resources;
  330. apei_resources_init(&addr_resources);
  331. trigger_param_region = einj_get_trigger_parameter_region(
  332. trigger_tab, param1, param2);
  333. if (trigger_param_region) {
  334. rc = apei_resources_add(&addr_resources,
  335. trigger_param_region->address,
  336. trigger_param_region->bit_width/8, true);
  337. if (rc)
  338. goto out_fini;
  339. rc = apei_resources_sub(&trigger_resources,
  340. &addr_resources);
  341. }
  342. apei_resources_fini(&addr_resources);
  343. if (rc)
  344. goto out_fini;
  345. }
  346. rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
  347. if (rc)
  348. goto out_fini;
  349. rc = apei_exec_pre_map_gars(&trigger_ctx);
  350. if (rc)
  351. goto out_release;
  352. rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
  353. apei_exec_post_unmap_gars(&trigger_ctx);
  354. out_release:
  355. apei_resources_release(&trigger_resources);
  356. out_fini:
  357. apei_resources_fini(&trigger_resources);
  358. out_rel_entry:
  359. release_mem_region(trigger_paddr + sizeof(*trigger_tab),
  360. table_size - sizeof(*trigger_tab));
  361. out_rel_header:
  362. release_mem_region(trigger_paddr, sizeof(*trigger_tab));
  363. out:
  364. if (trigger_tab)
  365. iounmap(trigger_tab);
  366. return rc;
  367. }
  368. static int __einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
  369. u64 param3, u64 param4)
  370. {
  371. struct apei_exec_context ctx;
  372. u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
  373. int rc;
  374. einj_exec_ctx_init(&ctx);
  375. rc = apei_exec_run_optional(&ctx, ACPI_EINJ_BEGIN_OPERATION);
  376. if (rc)
  377. return rc;
  378. apei_exec_ctx_set_input(&ctx, type);
  379. if (acpi5) {
  380. struct set_error_type_with_address *v5param = einj_param;
  381. v5param->type = type;
  382. if (type & ACPI5_VENDOR_BIT) {
  383. switch (vendor_flags) {
  384. case SETWA_FLAGS_APICID:
  385. v5param->apicid = param1;
  386. break;
  387. case SETWA_FLAGS_MEM:
  388. v5param->memory_address = param1;
  389. v5param->memory_address_range = param2;
  390. break;
  391. case SETWA_FLAGS_PCIE_SBDF:
  392. v5param->pcie_sbdf = param1;
  393. break;
  394. }
  395. v5param->flags = vendor_flags;
  396. } else if (flags) {
  397. v5param->flags = flags;
  398. v5param->memory_address = param1;
  399. v5param->memory_address_range = param2;
  400. v5param->apicid = param3;
  401. v5param->pcie_sbdf = param4;
  402. } else {
  403. switch (type) {
  404. case ACPI_EINJ_PROCESSOR_CORRECTABLE:
  405. case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
  406. case ACPI_EINJ_PROCESSOR_FATAL:
  407. v5param->apicid = param1;
  408. v5param->flags = SETWA_FLAGS_APICID;
  409. break;
  410. case ACPI_EINJ_MEMORY_CORRECTABLE:
  411. case ACPI_EINJ_MEMORY_UNCORRECTABLE:
  412. case ACPI_EINJ_MEMORY_FATAL:
  413. v5param->memory_address = param1;
  414. v5param->memory_address_range = param2;
  415. v5param->flags = SETWA_FLAGS_MEM;
  416. break;
  417. case ACPI_EINJ_PCIX_CORRECTABLE:
  418. case ACPI_EINJ_PCIX_UNCORRECTABLE:
  419. case ACPI_EINJ_PCIX_FATAL:
  420. v5param->pcie_sbdf = param1;
  421. v5param->flags = SETWA_FLAGS_PCIE_SBDF;
  422. break;
  423. }
  424. }
  425. } else {
  426. rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
  427. if (rc)
  428. return rc;
  429. if (einj_param) {
  430. struct einj_parameter *v4param = einj_param;
  431. v4param->param1 = param1;
  432. v4param->param2 = param2;
  433. }
  434. }
  435. rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
  436. if (rc)
  437. return rc;
  438. for (;;) {
  439. rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
  440. if (rc)
  441. return rc;
  442. val = apei_exec_ctx_get_output(&ctx);
  443. if (!(val & EINJ_OP_BUSY))
  444. break;
  445. if (einj_timedout(&timeout))
  446. return -EIO;
  447. }
  448. rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
  449. if (rc)
  450. return rc;
  451. val = apei_exec_ctx_get_output(&ctx);
  452. if (val != EINJ_STATUS_SUCCESS)
  453. return -EBUSY;
  454. rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
  455. if (rc)
  456. return rc;
  457. trigger_paddr = apei_exec_ctx_get_output(&ctx);
  458. if (notrigger == 0) {
  459. rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
  460. if (rc)
  461. return rc;
  462. }
  463. rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
  464. return rc;
  465. }
  466. /* Inject the specified hardware error */
  467. static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
  468. u64 param3, u64 param4)
  469. {
  470. int rc;
  471. unsigned long pfn;
  472. /* If user manually set "flags", make sure it is legal */
  473. if (flags && (flags &
  474. ~(SETWA_FLAGS_APICID|SETWA_FLAGS_MEM|SETWA_FLAGS_PCIE_SBDF)))
  475. return -EINVAL;
  476. /*
  477. * We need extra sanity checks for memory errors.
  478. * Other types leap directly to injection.
  479. */
  480. /* ensure param1/param2 existed */
  481. if (!(param_extension || acpi5))
  482. goto inject;
  483. /* ensure injection is memory related */
  484. if (type & ACPI5_VENDOR_BIT) {
  485. if (vendor_flags != SETWA_FLAGS_MEM)
  486. goto inject;
  487. } else if (!(type & MEM_ERROR_MASK) && !(flags & SETWA_FLAGS_MEM))
  488. goto inject;
  489. /*
  490. * Disallow crazy address masks that give BIOS leeway to pick
  491. * injection address almost anywhere. Insist on page or
  492. * better granularity and that target address is normal RAM.
  493. */
  494. pfn = PFN_DOWN(param1 & param2);
  495. if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK))
  496. return -EINVAL;
  497. inject:
  498. mutex_lock(&einj_mutex);
  499. rc = __einj_error_inject(type, flags, param1, param2, param3, param4);
  500. mutex_unlock(&einj_mutex);
  501. return rc;
  502. }
  503. static u32 error_type;
  504. static u32 error_flags;
  505. static u64 error_param1;
  506. static u64 error_param2;
  507. static u64 error_param3;
  508. static u64 error_param4;
  509. static struct dentry *einj_debug_dir;
  510. static int available_error_type_show(struct seq_file *m, void *v)
  511. {
  512. int rc;
  513. u32 available_error_type = 0;
  514. rc = einj_get_available_error_type(&available_error_type);
  515. if (rc)
  516. return rc;
  517. if (available_error_type & 0x0001)
  518. seq_printf(m, "0x00000001\tProcessor Correctable\n");
  519. if (available_error_type & 0x0002)
  520. seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
  521. if (available_error_type & 0x0004)
  522. seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
  523. if (available_error_type & 0x0008)
  524. seq_printf(m, "0x00000008\tMemory Correctable\n");
  525. if (available_error_type & 0x0010)
  526. seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
  527. if (available_error_type & 0x0020)
  528. seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
  529. if (available_error_type & 0x0040)
  530. seq_printf(m, "0x00000040\tPCI Express Correctable\n");
  531. if (available_error_type & 0x0080)
  532. seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
  533. if (available_error_type & 0x0100)
  534. seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
  535. if (available_error_type & 0x0200)
  536. seq_printf(m, "0x00000200\tPlatform Correctable\n");
  537. if (available_error_type & 0x0400)
  538. seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
  539. if (available_error_type & 0x0800)
  540. seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
  541. return 0;
  542. }
  543. static int available_error_type_open(struct inode *inode, struct file *file)
  544. {
  545. return single_open(file, available_error_type_show, NULL);
  546. }
  547. static const struct file_operations available_error_type_fops = {
  548. .open = available_error_type_open,
  549. .read = seq_read,
  550. .llseek = seq_lseek,
  551. .release = single_release,
  552. };
  553. static int error_type_get(void *data, u64 *val)
  554. {
  555. *val = error_type;
  556. return 0;
  557. }
  558. static int error_type_set(void *data, u64 val)
  559. {
  560. int rc;
  561. u32 available_error_type = 0;
  562. u32 tval, vendor;
  563. /*
  564. * Vendor defined types have 0x80000000 bit set, and
  565. * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
  566. */
  567. vendor = val & ACPI5_VENDOR_BIT;
  568. tval = val & 0x7fffffff;
  569. /* Only one error type can be specified */
  570. if (tval & (tval - 1))
  571. return -EINVAL;
  572. if (!vendor) {
  573. rc = einj_get_available_error_type(&available_error_type);
  574. if (rc)
  575. return rc;
  576. if (!(val & available_error_type))
  577. return -EINVAL;
  578. }
  579. error_type = val;
  580. return 0;
  581. }
  582. DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
  583. error_type_set, "0x%llx\n");
  584. static int error_inject_set(void *data, u64 val)
  585. {
  586. if (!error_type)
  587. return -EINVAL;
  588. return einj_error_inject(error_type, error_flags, error_param1, error_param2,
  589. error_param3, error_param4);
  590. }
  591. DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
  592. error_inject_set, "%llu\n");
  593. static int einj_check_table(struct acpi_table_einj *einj_tab)
  594. {
  595. if ((einj_tab->header_length !=
  596. (sizeof(struct acpi_table_einj) - sizeof(einj_tab->header)))
  597. && (einj_tab->header_length != sizeof(struct acpi_table_einj)))
  598. return -EINVAL;
  599. if (einj_tab->header.length < sizeof(struct acpi_table_einj))
  600. return -EINVAL;
  601. if (einj_tab->entries !=
  602. (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
  603. sizeof(struct acpi_einj_entry))
  604. return -EINVAL;
  605. return 0;
  606. }
  607. static int __init einj_init(void)
  608. {
  609. int rc;
  610. acpi_status status;
  611. struct dentry *fentry;
  612. struct apei_exec_context ctx;
  613. if (acpi_disabled)
  614. return -ENODEV;
  615. status = acpi_get_table(ACPI_SIG_EINJ, 0,
  616. (struct acpi_table_header **)&einj_tab);
  617. if (status == AE_NOT_FOUND)
  618. return -ENODEV;
  619. else if (ACPI_FAILURE(status)) {
  620. const char *msg = acpi_format_exception(status);
  621. pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
  622. return -EINVAL;
  623. }
  624. rc = einj_check_table(einj_tab);
  625. if (rc) {
  626. pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
  627. return -EINVAL;
  628. }
  629. rc = -ENOMEM;
  630. einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
  631. if (!einj_debug_dir)
  632. goto err_cleanup;
  633. fentry = debugfs_create_file("available_error_type", S_IRUSR,
  634. einj_debug_dir, NULL,
  635. &available_error_type_fops);
  636. if (!fentry)
  637. goto err_cleanup;
  638. fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
  639. einj_debug_dir, NULL, &error_type_fops);
  640. if (!fentry)
  641. goto err_cleanup;
  642. fentry = debugfs_create_file("error_inject", S_IWUSR,
  643. einj_debug_dir, NULL, &error_inject_fops);
  644. if (!fentry)
  645. goto err_cleanup;
  646. apei_resources_init(&einj_resources);
  647. einj_exec_ctx_init(&ctx);
  648. rc = apei_exec_collect_resources(&ctx, &einj_resources);
  649. if (rc)
  650. goto err_fini;
  651. rc = apei_resources_request(&einj_resources, "APEI EINJ");
  652. if (rc)
  653. goto err_fini;
  654. rc = apei_exec_pre_map_gars(&ctx);
  655. if (rc)
  656. goto err_release;
  657. rc = -ENOMEM;
  658. einj_param = einj_get_parameter_address();
  659. if ((param_extension || acpi5) && einj_param) {
  660. fentry = debugfs_create_x32("flags", S_IRUSR | S_IWUSR,
  661. einj_debug_dir, &error_flags);
  662. if (!fentry)
  663. goto err_unmap;
  664. fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
  665. einj_debug_dir, &error_param1);
  666. if (!fentry)
  667. goto err_unmap;
  668. fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
  669. einj_debug_dir, &error_param2);
  670. if (!fentry)
  671. goto err_unmap;
  672. fentry = debugfs_create_x64("param3", S_IRUSR | S_IWUSR,
  673. einj_debug_dir, &error_param3);
  674. if (!fentry)
  675. goto err_unmap;
  676. fentry = debugfs_create_x64("param4", S_IRUSR | S_IWUSR,
  677. einj_debug_dir, &error_param4);
  678. if (!fentry)
  679. goto err_unmap;
  680. fentry = debugfs_create_x32("notrigger", S_IRUSR | S_IWUSR,
  681. einj_debug_dir, &notrigger);
  682. if (!fentry)
  683. goto err_unmap;
  684. }
  685. if (vendor_dev[0]) {
  686. vendor_blob.data = vendor_dev;
  687. vendor_blob.size = strlen(vendor_dev);
  688. fentry = debugfs_create_blob("vendor", S_IRUSR,
  689. einj_debug_dir, &vendor_blob);
  690. if (!fentry)
  691. goto err_unmap;
  692. fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
  693. einj_debug_dir, &vendor_flags);
  694. if (!fentry)
  695. goto err_unmap;
  696. }
  697. pr_info(EINJ_PFX "Error INJection is initialized.\n");
  698. return 0;
  699. err_unmap:
  700. if (einj_param) {
  701. acpi_size size = (acpi5) ?
  702. sizeof(struct set_error_type_with_address) :
  703. sizeof(struct einj_parameter);
  704. acpi_os_unmap_iomem(einj_param, size);
  705. }
  706. apei_exec_post_unmap_gars(&ctx);
  707. err_release:
  708. apei_resources_release(&einj_resources);
  709. err_fini:
  710. apei_resources_fini(&einj_resources);
  711. err_cleanup:
  712. debugfs_remove_recursive(einj_debug_dir);
  713. return rc;
  714. }
  715. static void __exit einj_exit(void)
  716. {
  717. struct apei_exec_context ctx;
  718. if (einj_param) {
  719. acpi_size size = (acpi5) ?
  720. sizeof(struct set_error_type_with_address) :
  721. sizeof(struct einj_parameter);
  722. acpi_os_unmap_iomem(einj_param, size);
  723. }
  724. einj_exec_ctx_init(&ctx);
  725. apei_exec_post_unmap_gars(&ctx);
  726. apei_resources_release(&einj_resources);
  727. apei_resources_fini(&einj_resources);
  728. debugfs_remove_recursive(einj_debug_dir);
  729. }
  730. module_init(einj_init);
  731. module_exit(einj_exit);
  732. MODULE_AUTHOR("Huang Ying");
  733. MODULE_DESCRIPTION("APEI Error INJection support");
  734. MODULE_LICENSE("GPL");