aerdrv_core.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837
  1. /*
  2. * drivers/pci/pcie/aer/aerdrv_core.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * This file implements the core part of PCI-Express AER. When an pci-express
  9. * error is delivered, an error message will be collected and printed to
  10. * console, then, an error recovery procedure will be executed by following
  11. * the pci error recovery rules.
  12. *
  13. * Copyright (C) 2006 Intel Corp.
  14. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  15. * Zhang Yanmin (yanmin.zhang@intel.com)
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/pm.h>
  23. #include <linux/suspend.h>
  24. #include <linux/delay.h>
  25. #include <linux/slab.h>
  26. #include <linux/kfifo.h>
  27. #include "aerdrv.h"
  28. static bool forceload;
  29. static bool nosourceid;
  30. module_param(forceload, bool, 0);
  31. module_param(nosourceid, bool, 0);
  32. #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
  33. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
  34. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  35. {
  36. if (pcie_aer_get_firmware_first(dev))
  37. return -EIO;
  38. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
  39. return -EIO;
  40. return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
  41. }
  42. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  43. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  44. {
  45. if (pcie_aer_get_firmware_first(dev))
  46. return -EIO;
  47. return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
  48. PCI_EXP_AER_FLAGS);
  49. }
  50. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  51. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  52. {
  53. int pos;
  54. u32 status;
  55. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  56. if (!pos)
  57. return -EIO;
  58. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  59. if (status)
  60. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  61. return 0;
  62. }
  63. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
  64. int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
  65. {
  66. int pos;
  67. u32 status;
  68. int port_type;
  69. if (!pci_is_pcie(dev))
  70. return -ENODEV;
  71. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  72. if (!pos)
  73. return -EIO;
  74. port_type = pci_pcie_type(dev);
  75. if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
  76. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
  77. pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
  78. }
  79. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  80. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
  81. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  82. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  83. return 0;
  84. }
  85. /**
  86. * add_error_device - list device to be handled
  87. * @e_info: pointer to error info
  88. * @dev: pointer to pci_dev to be added
  89. */
  90. static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
  91. {
  92. if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
  93. e_info->dev[e_info->error_dev_num] = dev;
  94. e_info->error_dev_num++;
  95. return 0;
  96. }
  97. return -ENOSPC;
  98. }
  99. /**
  100. * is_error_source - check whether the device is source of reported error
  101. * @dev: pointer to pci_dev to be checked
  102. * @e_info: pointer to reported error info
  103. */
  104. static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
  105. {
  106. int pos;
  107. u32 status, mask;
  108. u16 reg16;
  109. /*
  110. * When bus id is equal to 0, it might be a bad id
  111. * reported by root port.
  112. */
  113. if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
  114. /* Device ID match? */
  115. if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
  116. return true;
  117. /* Continue id comparing if there is no multiple error */
  118. if (!e_info->multi_error_valid)
  119. return false;
  120. }
  121. /*
  122. * When either
  123. * 1) nosourceid==y;
  124. * 2) bus id is equal to 0. Some ports might lose the bus
  125. * id of error source id;
  126. * 3) There are multiple errors and prior id comparing fails;
  127. * We check AER status registers to find possible reporter.
  128. */
  129. if (atomic_read(&dev->enable_cnt) == 0)
  130. return false;
  131. /* Check if AER is enabled */
  132. pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
  133. if (!(reg16 & PCI_EXP_AER_FLAGS))
  134. return false;
  135. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  136. if (!pos)
  137. return false;
  138. /* Check if error is recorded */
  139. if (e_info->severity == AER_CORRECTABLE) {
  140. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  141. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
  142. } else {
  143. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  144. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
  145. }
  146. if (status & ~mask)
  147. return true;
  148. return false;
  149. }
  150. static int find_device_iter(struct pci_dev *dev, void *data)
  151. {
  152. struct aer_err_info *e_info = (struct aer_err_info *)data;
  153. if (is_error_source(dev, e_info)) {
  154. /* List this device */
  155. if (add_error_device(e_info, dev)) {
  156. /* We cannot handle more... Stop iteration */
  157. /* TODO: Should print error message here? */
  158. return 1;
  159. }
  160. /* If there is only a single error, stop iteration */
  161. if (!e_info->multi_error_valid)
  162. return 1;
  163. }
  164. return 0;
  165. }
  166. /**
  167. * find_source_device - search through device hierarchy for source device
  168. * @parent: pointer to Root Port pci_dev data structure
  169. * @e_info: including detailed error information such like id
  170. *
  171. * Return true if found.
  172. *
  173. * Invoked by DPC when error is detected at the Root Port.
  174. * Caller of this function must set id, severity, and multi_error_valid of
  175. * struct aer_err_info pointed by @e_info properly. This function must fill
  176. * e_info->error_dev_num and e_info->dev[], based on the given information.
  177. */
  178. static bool find_source_device(struct pci_dev *parent,
  179. struct aer_err_info *e_info)
  180. {
  181. struct pci_dev *dev = parent;
  182. int result;
  183. /* Must reset in this function */
  184. e_info->error_dev_num = 0;
  185. /* Is Root Port an agent that sends error message? */
  186. result = find_device_iter(dev, e_info);
  187. if (result)
  188. return true;
  189. pci_walk_bus(parent->subordinate, find_device_iter, e_info);
  190. if (!e_info->error_dev_num) {
  191. dev_printk(KERN_DEBUG, &parent->dev,
  192. "can't find device of ID%04x\n",
  193. e_info->id);
  194. return false;
  195. }
  196. return true;
  197. }
  198. static int report_error_detected(struct pci_dev *dev, void *data)
  199. {
  200. pci_ers_result_t vote;
  201. const struct pci_error_handlers *err_handler;
  202. struct aer_broadcast_data *result_data;
  203. result_data = (struct aer_broadcast_data *) data;
  204. device_lock(&dev->dev);
  205. dev->error_state = result_data->state;
  206. if (!dev->driver ||
  207. !dev->driver->err_handler ||
  208. !dev->driver->err_handler->error_detected) {
  209. if (result_data->state == pci_channel_io_frozen &&
  210. !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
  211. /*
  212. * In case of fatal recovery, if one of down-
  213. * stream device has no driver. We might be
  214. * unable to recover because a later insmod
  215. * of a driver for this device is unaware of
  216. * its hw state.
  217. */
  218. dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
  219. dev->driver ?
  220. "no AER-aware driver" : "no driver");
  221. }
  222. /*
  223. * If there's any device in the subtree that does not
  224. * have an error_detected callback, returning
  225. * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
  226. * the subsequent mmio_enabled/slot_reset/resume
  227. * callbacks of "any" device in the subtree. All the
  228. * devices in the subtree are left in the error state
  229. * without recovery.
  230. */
  231. if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
  232. vote = PCI_ERS_RESULT_NO_AER_DRIVER;
  233. else
  234. vote = PCI_ERS_RESULT_NONE;
  235. } else {
  236. err_handler = dev->driver->err_handler;
  237. vote = err_handler->error_detected(dev, result_data->state);
  238. }
  239. result_data->result = merge_result(result_data->result, vote);
  240. device_unlock(&dev->dev);
  241. return 0;
  242. }
  243. static int report_mmio_enabled(struct pci_dev *dev, void *data)
  244. {
  245. pci_ers_result_t vote;
  246. const struct pci_error_handlers *err_handler;
  247. struct aer_broadcast_data *result_data;
  248. result_data = (struct aer_broadcast_data *) data;
  249. device_lock(&dev->dev);
  250. if (!dev->driver ||
  251. !dev->driver->err_handler ||
  252. !dev->driver->err_handler->mmio_enabled)
  253. goto out;
  254. err_handler = dev->driver->err_handler;
  255. vote = err_handler->mmio_enabled(dev);
  256. result_data->result = merge_result(result_data->result, vote);
  257. out:
  258. device_unlock(&dev->dev);
  259. return 0;
  260. }
  261. static int report_slot_reset(struct pci_dev *dev, void *data)
  262. {
  263. pci_ers_result_t vote;
  264. const struct pci_error_handlers *err_handler;
  265. struct aer_broadcast_data *result_data;
  266. result_data = (struct aer_broadcast_data *) data;
  267. device_lock(&dev->dev);
  268. if (!dev->driver ||
  269. !dev->driver->err_handler ||
  270. !dev->driver->err_handler->slot_reset)
  271. goto out;
  272. err_handler = dev->driver->err_handler;
  273. vote = err_handler->slot_reset(dev);
  274. result_data->result = merge_result(result_data->result, vote);
  275. out:
  276. device_unlock(&dev->dev);
  277. return 0;
  278. }
  279. static int report_resume(struct pci_dev *dev, void *data)
  280. {
  281. const struct pci_error_handlers *err_handler;
  282. device_lock(&dev->dev);
  283. dev->error_state = pci_channel_io_normal;
  284. if (!dev->driver ||
  285. !dev->driver->err_handler ||
  286. !dev->driver->err_handler->resume)
  287. goto out;
  288. err_handler = dev->driver->err_handler;
  289. err_handler->resume(dev);
  290. out:
  291. device_unlock(&dev->dev);
  292. return 0;
  293. }
  294. /**
  295. * broadcast_error_message - handle message broadcast to downstream drivers
  296. * @dev: pointer to from where in a hierarchy message is broadcasted down
  297. * @state: error state
  298. * @error_mesg: message to print
  299. * @cb: callback to be broadcasted
  300. *
  301. * Invoked during error recovery process. Once being invoked, the content
  302. * of error severity will be broadcasted to all downstream drivers in a
  303. * hierarchy in question.
  304. */
  305. static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
  306. enum pci_channel_state state,
  307. char *error_mesg,
  308. int (*cb)(struct pci_dev *, void *))
  309. {
  310. struct aer_broadcast_data result_data;
  311. dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
  312. result_data.state = state;
  313. if (cb == report_error_detected)
  314. result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
  315. else
  316. result_data.result = PCI_ERS_RESULT_RECOVERED;
  317. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  318. /*
  319. * If the error is reported by a bridge, we think this error
  320. * is related to the downstream link of the bridge, so we
  321. * do error recovery on all subordinates of the bridge instead
  322. * of the bridge and clear the error status of the bridge.
  323. */
  324. if (cb == report_error_detected)
  325. dev->error_state = state;
  326. pci_walk_bus(dev->subordinate, cb, &result_data);
  327. if (cb == report_resume) {
  328. pci_cleanup_aer_uncorrect_error_status(dev);
  329. dev->error_state = pci_channel_io_normal;
  330. }
  331. } else {
  332. /*
  333. * If the error is reported by an end point, we think this
  334. * error is related to the upstream link of the end point.
  335. */
  336. if (state == pci_channel_io_normal)
  337. /*
  338. * the error is non fatal so the bus is ok, just invoke
  339. * the callback for the function that logged the error.
  340. */
  341. cb(dev, &result_data);
  342. else
  343. pci_walk_bus(dev->bus, cb, &result_data);
  344. }
  345. return result_data.result;
  346. }
  347. /**
  348. * default_reset_link - default reset function
  349. * @dev: pointer to pci_dev data structure
  350. *
  351. * Invoked when performing link reset on a Downstream Port or a
  352. * Root Port with no aer driver.
  353. */
  354. static pci_ers_result_t default_reset_link(struct pci_dev *dev)
  355. {
  356. pci_reset_bridge_secondary_bus(dev);
  357. dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
  358. return PCI_ERS_RESULT_RECOVERED;
  359. }
  360. static int find_aer_service_iter(struct device *device, void *data)
  361. {
  362. struct pcie_port_service_driver *service_driver, **drv;
  363. drv = (struct pcie_port_service_driver **) data;
  364. if (device->bus == &pcie_port_bus_type && device->driver) {
  365. service_driver = to_service_driver(device->driver);
  366. if (service_driver->service == PCIE_PORT_SERVICE_AER) {
  367. *drv = service_driver;
  368. return 1;
  369. }
  370. }
  371. return 0;
  372. }
  373. static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
  374. {
  375. struct pcie_port_service_driver *drv = NULL;
  376. device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
  377. return drv;
  378. }
  379. static pci_ers_result_t reset_link(struct pci_dev *dev)
  380. {
  381. struct pci_dev *udev;
  382. pci_ers_result_t status;
  383. struct pcie_port_service_driver *driver;
  384. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  385. /* Reset this port for all subordinates */
  386. udev = dev;
  387. } else {
  388. /* Reset the upstream component (likely downstream port) */
  389. udev = dev->bus->self;
  390. }
  391. /* Use the aer driver of the component firstly */
  392. driver = find_aer_service(udev);
  393. if (driver && driver->reset_link) {
  394. status = driver->reset_link(udev);
  395. } else if (udev->has_secondary_link) {
  396. status = default_reset_link(udev);
  397. } else {
  398. dev_printk(KERN_DEBUG, &dev->dev,
  399. "no link-reset support at upstream device %s\n",
  400. pci_name(udev));
  401. return PCI_ERS_RESULT_DISCONNECT;
  402. }
  403. if (status != PCI_ERS_RESULT_RECOVERED) {
  404. dev_printk(KERN_DEBUG, &dev->dev,
  405. "link reset at upstream device %s failed\n",
  406. pci_name(udev));
  407. return PCI_ERS_RESULT_DISCONNECT;
  408. }
  409. return status;
  410. }
  411. /**
  412. * do_recovery - handle nonfatal/fatal error recovery process
  413. * @dev: pointer to a pci_dev data structure of agent detecting an error
  414. * @severity: error severity type
  415. *
  416. * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
  417. * error detected message to all downstream drivers within a hierarchy in
  418. * question and return the returned code.
  419. */
  420. static void do_recovery(struct pci_dev *dev, int severity)
  421. {
  422. pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
  423. enum pci_channel_state state;
  424. if (severity == AER_FATAL)
  425. state = pci_channel_io_frozen;
  426. else
  427. state = pci_channel_io_normal;
  428. status = broadcast_error_message(dev,
  429. state,
  430. "error_detected",
  431. report_error_detected);
  432. if (severity == AER_FATAL) {
  433. result = reset_link(dev);
  434. if (result != PCI_ERS_RESULT_RECOVERED)
  435. goto failed;
  436. }
  437. if (status == PCI_ERS_RESULT_CAN_RECOVER)
  438. status = broadcast_error_message(dev,
  439. state,
  440. "mmio_enabled",
  441. report_mmio_enabled);
  442. if (status == PCI_ERS_RESULT_NEED_RESET) {
  443. /*
  444. * TODO: Should call platform-specific
  445. * functions to reset slot before calling
  446. * drivers' slot_reset callbacks?
  447. */
  448. status = broadcast_error_message(dev,
  449. state,
  450. "slot_reset",
  451. report_slot_reset);
  452. }
  453. if (status != PCI_ERS_RESULT_RECOVERED)
  454. goto failed;
  455. broadcast_error_message(dev,
  456. state,
  457. "resume",
  458. report_resume);
  459. dev_info(&dev->dev, "AER: Device recovery successful\n");
  460. return;
  461. failed:
  462. /* TODO: Should kernel panic here? */
  463. dev_info(&dev->dev, "AER: Device recovery failed\n");
  464. }
  465. /**
  466. * handle_error_source - handle logging error into an event log
  467. * @aerdev: pointer to pcie_device data structure of the root port
  468. * @dev: pointer to pci_dev data structure of error source device
  469. * @info: comprehensive error information
  470. *
  471. * Invoked when an error being detected by Root Port.
  472. */
  473. static void handle_error_source(struct pcie_device *aerdev,
  474. struct pci_dev *dev,
  475. struct aer_err_info *info)
  476. {
  477. int pos;
  478. if (info->severity == AER_CORRECTABLE) {
  479. /*
  480. * Correctable error does not need software intervention.
  481. * No need to go through error recovery process.
  482. */
  483. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  484. if (pos)
  485. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  486. info->status);
  487. } else
  488. do_recovery(dev, info->severity);
  489. }
  490. #ifdef CONFIG_ACPI_APEI_PCIEAER
  491. static void aer_recover_work_func(struct work_struct *work);
  492. #define AER_RECOVER_RING_ORDER 4
  493. #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
  494. struct aer_recover_entry {
  495. u8 bus;
  496. u8 devfn;
  497. u16 domain;
  498. int severity;
  499. struct aer_capability_regs *regs;
  500. };
  501. static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
  502. AER_RECOVER_RING_SIZE);
  503. /*
  504. * Mutual exclusion for writers of aer_recover_ring, reader side don't
  505. * need lock, because there is only one reader and lock is not needed
  506. * between reader and writer.
  507. */
  508. static DEFINE_SPINLOCK(aer_recover_ring_lock);
  509. static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
  510. void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
  511. int severity, struct aer_capability_regs *aer_regs)
  512. {
  513. unsigned long flags;
  514. struct aer_recover_entry entry = {
  515. .bus = bus,
  516. .devfn = devfn,
  517. .domain = domain,
  518. .severity = severity,
  519. .regs = aer_regs,
  520. };
  521. spin_lock_irqsave(&aer_recover_ring_lock, flags);
  522. if (kfifo_put(&aer_recover_ring, entry))
  523. schedule_work(&aer_recover_work);
  524. else
  525. pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
  526. domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  527. spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
  528. }
  529. EXPORT_SYMBOL_GPL(aer_recover_queue);
  530. static void aer_recover_work_func(struct work_struct *work)
  531. {
  532. struct aer_recover_entry entry;
  533. struct pci_dev *pdev;
  534. while (kfifo_get(&aer_recover_ring, &entry)) {
  535. pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
  536. entry.devfn);
  537. if (!pdev) {
  538. pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
  539. entry.domain, entry.bus,
  540. PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
  541. continue;
  542. }
  543. cper_print_aer(pdev, entry.severity, entry.regs);
  544. do_recovery(pdev, entry.severity);
  545. pci_dev_put(pdev);
  546. }
  547. }
  548. #endif
  549. /**
  550. * get_device_error_info - read error status from dev and store it to info
  551. * @dev: pointer to the device expected to have a error record
  552. * @info: pointer to structure to store the error record
  553. *
  554. * Return 1 on success, 0 on error.
  555. *
  556. * Note that @info is reused among all error devices. Clear fields properly.
  557. */
  558. static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  559. {
  560. int pos, temp;
  561. /* Must reset in this function */
  562. info->status = 0;
  563. info->tlp_header_valid = 0;
  564. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  565. /* The device might not support AER */
  566. if (!pos)
  567. return 1;
  568. if (info->severity == AER_CORRECTABLE) {
  569. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  570. &info->status);
  571. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
  572. &info->mask);
  573. if (!(info->status & ~info->mask))
  574. return 0;
  575. } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
  576. info->severity == AER_NONFATAL) {
  577. /* Link is still healthy for IO reads */
  578. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  579. &info->status);
  580. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
  581. &info->mask);
  582. if (!(info->status & ~info->mask))
  583. return 0;
  584. /* Get First Error Pointer */
  585. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
  586. info->first_error = PCI_ERR_CAP_FEP(temp);
  587. if (info->status & AER_LOG_TLP_MASKS) {
  588. info->tlp_header_valid = 1;
  589. pci_read_config_dword(dev,
  590. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  591. pci_read_config_dword(dev,
  592. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  593. pci_read_config_dword(dev,
  594. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  595. pci_read_config_dword(dev,
  596. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  597. }
  598. }
  599. return 1;
  600. }
  601. static inline void aer_process_err_devices(struct pcie_device *p_device,
  602. struct aer_err_info *e_info)
  603. {
  604. int i;
  605. /* Report all before handle them, not to lost records by reset etc. */
  606. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  607. if (get_device_error_info(e_info->dev[i], e_info))
  608. aer_print_error(e_info->dev[i], e_info);
  609. }
  610. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  611. if (get_device_error_info(e_info->dev[i], e_info))
  612. handle_error_source(p_device, e_info->dev[i], e_info);
  613. }
  614. }
  615. /**
  616. * aer_isr_one_error - consume an error detected by root port
  617. * @p_device: pointer to error root port service device
  618. * @e_src: pointer to an error source
  619. */
  620. static void aer_isr_one_error(struct pcie_device *p_device,
  621. struct aer_err_source *e_src)
  622. {
  623. struct aer_err_info *e_info;
  624. /* struct aer_err_info might be big, so we allocate it with slab */
  625. e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
  626. if (!e_info) {
  627. dev_printk(KERN_DEBUG, &p_device->port->dev,
  628. "Can't allocate mem when processing AER errors\n");
  629. return;
  630. }
  631. /*
  632. * There is a possibility that both correctable error and
  633. * uncorrectable error being logged. Report correctable error first.
  634. */
  635. if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
  636. e_info->id = ERR_COR_ID(e_src->id);
  637. e_info->severity = AER_CORRECTABLE;
  638. if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
  639. e_info->multi_error_valid = 1;
  640. else
  641. e_info->multi_error_valid = 0;
  642. aer_print_port_info(p_device->port, e_info);
  643. if (find_source_device(p_device->port, e_info))
  644. aer_process_err_devices(p_device, e_info);
  645. }
  646. if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
  647. e_info->id = ERR_UNCOR_ID(e_src->id);
  648. if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
  649. e_info->severity = AER_FATAL;
  650. else
  651. e_info->severity = AER_NONFATAL;
  652. if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
  653. e_info->multi_error_valid = 1;
  654. else
  655. e_info->multi_error_valid = 0;
  656. aer_print_port_info(p_device->port, e_info);
  657. if (find_source_device(p_device->port, e_info))
  658. aer_process_err_devices(p_device, e_info);
  659. }
  660. kfree(e_info);
  661. }
  662. /**
  663. * get_e_source - retrieve an error source
  664. * @rpc: pointer to the root port which holds an error
  665. * @e_src: pointer to store retrieved error source
  666. *
  667. * Return 1 if an error source is retrieved, otherwise 0.
  668. *
  669. * Invoked by DPC handler to consume an error.
  670. */
  671. static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
  672. {
  673. unsigned long flags;
  674. /* Lock access to Root error producer/consumer index */
  675. spin_lock_irqsave(&rpc->e_lock, flags);
  676. if (rpc->prod_idx == rpc->cons_idx) {
  677. spin_unlock_irqrestore(&rpc->e_lock, flags);
  678. return 0;
  679. }
  680. *e_src = rpc->e_sources[rpc->cons_idx];
  681. rpc->cons_idx++;
  682. if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
  683. rpc->cons_idx = 0;
  684. spin_unlock_irqrestore(&rpc->e_lock, flags);
  685. return 1;
  686. }
  687. /**
  688. * aer_isr - consume errors detected by root port
  689. * @work: definition of this work item
  690. *
  691. * Invoked, as DPC, when root port records new detected error
  692. */
  693. void aer_isr(struct work_struct *work)
  694. {
  695. struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
  696. struct pcie_device *p_device = rpc->rpd;
  697. struct aer_err_source uninitialized_var(e_src);
  698. mutex_lock(&rpc->rpc_mutex);
  699. while (get_e_source(rpc, &e_src))
  700. aer_isr_one_error(p_device, &e_src);
  701. mutex_unlock(&rpc->rpc_mutex);
  702. }
  703. /**
  704. * aer_init - provide AER initialization
  705. * @dev: pointer to AER pcie device
  706. *
  707. * Invoked when AER service driver is loaded.
  708. */
  709. int aer_init(struct pcie_device *dev)
  710. {
  711. if (forceload) {
  712. dev_printk(KERN_DEBUG, &dev->device,
  713. "aerdrv forceload requested.\n");
  714. pcie_aer_force_firmware_first(dev->port, 0);
  715. }
  716. return 0;
  717. }