acpi_ipmi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657
  1. /*
  2. * acpi_ipmi.c - ACPI IPMI opregion
  3. *
  4. * Copyright (C) 2010, 2013 Intel Corporation
  5. * Author: Zhao Yakui <yakui.zhao@intel.com>
  6. * Lv Zheng <lv.zheng@intel.com>
  7. *
  8. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or (at
  13. * your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful, but
  16. * WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  21. */
  22. #include <linux/module.h>
  23. #include <linux/acpi.h>
  24. #include <linux/ipmi.h>
  25. #include <linux/spinlock.h>
  26. MODULE_AUTHOR("Zhao Yakui");
  27. MODULE_DESCRIPTION("ACPI IPMI Opregion driver");
  28. MODULE_LICENSE("GPL");
  29. #define ACPI_IPMI_OK 0
  30. #define ACPI_IPMI_TIMEOUT 0x10
  31. #define ACPI_IPMI_UNKNOWN 0x07
  32. /* the IPMI timeout is 5s */
  33. #define IPMI_TIMEOUT (5000)
  34. #define ACPI_IPMI_MAX_MSG_LENGTH 64
  35. struct acpi_ipmi_device {
  36. /* the device list attached to driver_data.ipmi_devices */
  37. struct list_head head;
  38. /* the IPMI request message list */
  39. struct list_head tx_msg_list;
  40. spinlock_t tx_msg_lock;
  41. acpi_handle handle;
  42. struct device *dev;
  43. ipmi_user_t user_interface;
  44. int ipmi_ifnum; /* IPMI interface number */
  45. long curr_msgid;
  46. bool dead;
  47. struct kref kref;
  48. };
  49. struct ipmi_driver_data {
  50. struct list_head ipmi_devices;
  51. struct ipmi_smi_watcher bmc_events;
  52. struct ipmi_user_hndl ipmi_hndlrs;
  53. struct mutex ipmi_lock;
  54. /*
  55. * NOTE: IPMI System Interface Selection
  56. * There is no system interface specified by the IPMI operation
  57. * region access. We try to select one system interface with ACPI
  58. * handle set. IPMI messages passed from the ACPI codes are sent
  59. * to this selected global IPMI system interface.
  60. */
  61. struct acpi_ipmi_device *selected_smi;
  62. };
  63. struct acpi_ipmi_msg {
  64. struct list_head head;
  65. /*
  66. * General speaking the addr type should be SI_ADDR_TYPE. And
  67. * the addr channel should be BMC.
  68. * In fact it can also be IPMB type. But we will have to
  69. * parse it from the Netfn command buffer. It is so complex
  70. * that it is skipped.
  71. */
  72. struct ipmi_addr addr;
  73. long tx_msgid;
  74. /* it is used to track whether the IPMI message is finished */
  75. struct completion tx_complete;
  76. struct kernel_ipmi_msg tx_message;
  77. int msg_done;
  78. /* tx/rx data . And copy it from/to ACPI object buffer */
  79. u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  80. u8 rx_len;
  81. struct acpi_ipmi_device *device;
  82. struct kref kref;
  83. };
  84. /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */
  85. struct acpi_ipmi_buffer {
  86. u8 status;
  87. u8 length;
  88. u8 data[ACPI_IPMI_MAX_MSG_LENGTH];
  89. };
  90. static void ipmi_register_bmc(int iface, struct device *dev);
  91. static void ipmi_bmc_gone(int iface);
  92. static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data);
  93. static struct ipmi_driver_data driver_data = {
  94. .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices),
  95. .bmc_events = {
  96. .owner = THIS_MODULE,
  97. .new_smi = ipmi_register_bmc,
  98. .smi_gone = ipmi_bmc_gone,
  99. },
  100. .ipmi_hndlrs = {
  101. .ipmi_recv_hndl = ipmi_msg_handler,
  102. },
  103. .ipmi_lock = __MUTEX_INITIALIZER(driver_data.ipmi_lock)
  104. };
  105. static struct acpi_ipmi_device *
  106. ipmi_dev_alloc(int iface, struct device *dev, acpi_handle handle)
  107. {
  108. struct acpi_ipmi_device *ipmi_device;
  109. int err;
  110. ipmi_user_t user;
  111. ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL);
  112. if (!ipmi_device)
  113. return NULL;
  114. kref_init(&ipmi_device->kref);
  115. INIT_LIST_HEAD(&ipmi_device->head);
  116. INIT_LIST_HEAD(&ipmi_device->tx_msg_list);
  117. spin_lock_init(&ipmi_device->tx_msg_lock);
  118. ipmi_device->handle = handle;
  119. ipmi_device->dev = get_device(dev);
  120. ipmi_device->ipmi_ifnum = iface;
  121. err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs,
  122. ipmi_device, &user);
  123. if (err) {
  124. put_device(dev);
  125. kfree(ipmi_device);
  126. return NULL;
  127. }
  128. ipmi_device->user_interface = user;
  129. return ipmi_device;
  130. }
  131. static void ipmi_dev_release(struct acpi_ipmi_device *ipmi_device)
  132. {
  133. ipmi_destroy_user(ipmi_device->user_interface);
  134. put_device(ipmi_device->dev);
  135. kfree(ipmi_device);
  136. }
  137. static void ipmi_dev_release_kref(struct kref *kref)
  138. {
  139. struct acpi_ipmi_device *ipmi =
  140. container_of(kref, struct acpi_ipmi_device, kref);
  141. ipmi_dev_release(ipmi);
  142. }
  143. static void __ipmi_dev_kill(struct acpi_ipmi_device *ipmi_device)
  144. {
  145. list_del(&ipmi_device->head);
  146. if (driver_data.selected_smi == ipmi_device)
  147. driver_data.selected_smi = NULL;
  148. /*
  149. * Always setting dead flag after deleting from the list or
  150. * list_for_each_entry() codes must get changed.
  151. */
  152. ipmi_device->dead = true;
  153. }
  154. static struct acpi_ipmi_device *acpi_ipmi_dev_get(void)
  155. {
  156. struct acpi_ipmi_device *ipmi_device = NULL;
  157. mutex_lock(&driver_data.ipmi_lock);
  158. if (driver_data.selected_smi) {
  159. ipmi_device = driver_data.selected_smi;
  160. kref_get(&ipmi_device->kref);
  161. }
  162. mutex_unlock(&driver_data.ipmi_lock);
  163. return ipmi_device;
  164. }
  165. static void acpi_ipmi_dev_put(struct acpi_ipmi_device *ipmi_device)
  166. {
  167. kref_put(&ipmi_device->kref, ipmi_dev_release_kref);
  168. }
  169. static struct acpi_ipmi_msg *ipmi_msg_alloc(void)
  170. {
  171. struct acpi_ipmi_device *ipmi;
  172. struct acpi_ipmi_msg *ipmi_msg;
  173. ipmi = acpi_ipmi_dev_get();
  174. if (!ipmi)
  175. return NULL;
  176. ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL);
  177. if (!ipmi_msg) {
  178. acpi_ipmi_dev_put(ipmi);
  179. return NULL;
  180. }
  181. kref_init(&ipmi_msg->kref);
  182. init_completion(&ipmi_msg->tx_complete);
  183. INIT_LIST_HEAD(&ipmi_msg->head);
  184. ipmi_msg->device = ipmi;
  185. ipmi_msg->msg_done = ACPI_IPMI_UNKNOWN;
  186. return ipmi_msg;
  187. }
  188. static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg)
  189. {
  190. acpi_ipmi_dev_put(tx_msg->device);
  191. kfree(tx_msg);
  192. }
  193. static void ipmi_msg_release_kref(struct kref *kref)
  194. {
  195. struct acpi_ipmi_msg *tx_msg =
  196. container_of(kref, struct acpi_ipmi_msg, kref);
  197. ipmi_msg_release(tx_msg);
  198. }
  199. static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg)
  200. {
  201. kref_get(&tx_msg->kref);
  202. return tx_msg;
  203. }
  204. static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg)
  205. {
  206. kref_put(&tx_msg->kref, ipmi_msg_release_kref);
  207. }
  208. #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff)
  209. #define IPMI_OP_RGN_CMD(offset) (offset & 0xff)
  210. static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg,
  211. acpi_physical_address address,
  212. acpi_integer *value)
  213. {
  214. struct kernel_ipmi_msg *msg;
  215. struct acpi_ipmi_buffer *buffer;
  216. struct acpi_ipmi_device *device;
  217. unsigned long flags;
  218. msg = &tx_msg->tx_message;
  219. /*
  220. * IPMI network function and command are encoded in the address
  221. * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3.
  222. */
  223. msg->netfn = IPMI_OP_RGN_NETFN(address);
  224. msg->cmd = IPMI_OP_RGN_CMD(address);
  225. msg->data = tx_msg->data;
  226. /*
  227. * value is the parameter passed by the IPMI opregion space handler.
  228. * It points to the IPMI request message buffer
  229. */
  230. buffer = (struct acpi_ipmi_buffer *)value;
  231. /* copy the tx message data */
  232. if (buffer->length > ACPI_IPMI_MAX_MSG_LENGTH) {
  233. dev_WARN_ONCE(tx_msg->device->dev, true,
  234. "Unexpected request (msg len %d).\n",
  235. buffer->length);
  236. return -EINVAL;
  237. }
  238. msg->data_len = buffer->length;
  239. memcpy(tx_msg->data, buffer->data, msg->data_len);
  240. /*
  241. * now the default type is SYSTEM_INTERFACE and channel type is BMC.
  242. * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE,
  243. * the addr type should be changed to IPMB. Then we will have to parse
  244. * the IPMI request message buffer to get the IPMB address.
  245. * If so, please fix me.
  246. */
  247. tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
  248. tx_msg->addr.channel = IPMI_BMC_CHANNEL;
  249. tx_msg->addr.data[0] = 0;
  250. /* Get the msgid */
  251. device = tx_msg->device;
  252. spin_lock_irqsave(&device->tx_msg_lock, flags);
  253. device->curr_msgid++;
  254. tx_msg->tx_msgid = device->curr_msgid;
  255. spin_unlock_irqrestore(&device->tx_msg_lock, flags);
  256. return 0;
  257. }
  258. static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg,
  259. acpi_integer *value)
  260. {
  261. struct acpi_ipmi_buffer *buffer;
  262. /*
  263. * value is also used as output parameter. It represents the response
  264. * IPMI message returned by IPMI command.
  265. */
  266. buffer = (struct acpi_ipmi_buffer *)value;
  267. /*
  268. * If the flag of msg_done is not set, it means that the IPMI command is
  269. * not executed correctly.
  270. */
  271. buffer->status = msg->msg_done;
  272. if (msg->msg_done != ACPI_IPMI_OK)
  273. return;
  274. /*
  275. * If the IPMI response message is obtained correctly, the status code
  276. * will be ACPI_IPMI_OK
  277. */
  278. buffer->length = msg->rx_len;
  279. memcpy(buffer->data, msg->data, msg->rx_len);
  280. }
  281. static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi)
  282. {
  283. struct acpi_ipmi_msg *tx_msg;
  284. unsigned long flags;
  285. /*
  286. * NOTE: On-going ipmi_recv_msg
  287. * ipmi_msg_handler() may still be invoked by ipmi_si after
  288. * flushing. But it is safe to do a fast flushing on module_exit()
  289. * without waiting for all ipmi_recv_msg(s) to complete from
  290. * ipmi_msg_handler() as it is ensured by ipmi_si that all
  291. * ipmi_recv_msg(s) are freed after invoking ipmi_destroy_user().
  292. */
  293. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  294. while (!list_empty(&ipmi->tx_msg_list)) {
  295. tx_msg = list_first_entry(&ipmi->tx_msg_list,
  296. struct acpi_ipmi_msg,
  297. head);
  298. list_del(&tx_msg->head);
  299. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  300. /* wake up the sleep thread on the Tx msg */
  301. complete(&tx_msg->tx_complete);
  302. acpi_ipmi_msg_put(tx_msg);
  303. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  304. }
  305. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  306. }
  307. static void ipmi_cancel_tx_msg(struct acpi_ipmi_device *ipmi,
  308. struct acpi_ipmi_msg *msg)
  309. {
  310. struct acpi_ipmi_msg *tx_msg, *temp;
  311. bool msg_found = false;
  312. unsigned long flags;
  313. spin_lock_irqsave(&ipmi->tx_msg_lock, flags);
  314. list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) {
  315. if (msg == tx_msg) {
  316. msg_found = true;
  317. list_del(&tx_msg->head);
  318. break;
  319. }
  320. }
  321. spin_unlock_irqrestore(&ipmi->tx_msg_lock, flags);
  322. if (msg_found)
  323. acpi_ipmi_msg_put(tx_msg);
  324. }
  325. static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data)
  326. {
  327. struct acpi_ipmi_device *ipmi_device = user_msg_data;
  328. bool msg_found = false;
  329. struct acpi_ipmi_msg *tx_msg, *temp;
  330. struct device *dev = ipmi_device->dev;
  331. unsigned long flags;
  332. if (msg->user != ipmi_device->user_interface) {
  333. dev_warn(dev,
  334. "Unexpected response is returned. returned user %p, expected user %p\n",
  335. msg->user, ipmi_device->user_interface);
  336. goto out_msg;
  337. }
  338. spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
  339. list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) {
  340. if (msg->msgid == tx_msg->tx_msgid) {
  341. msg_found = true;
  342. list_del(&tx_msg->head);
  343. break;
  344. }
  345. }
  346. spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
  347. if (!msg_found) {
  348. dev_warn(dev,
  349. "Unexpected response (msg id %ld) is returned.\n",
  350. msg->msgid);
  351. goto out_msg;
  352. }
  353. /* copy the response data to Rx_data buffer */
  354. if (msg->msg.data_len > ACPI_IPMI_MAX_MSG_LENGTH) {
  355. dev_WARN_ONCE(dev, true,
  356. "Unexpected response (msg len %d).\n",
  357. msg->msg.data_len);
  358. goto out_comp;
  359. }
  360. /* response msg is an error msg */
  361. msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
  362. if (msg->recv_type == IPMI_RESPONSE_RECV_TYPE &&
  363. msg->msg.data_len == 1) {
  364. if (msg->msg.data[0] == IPMI_TIMEOUT_COMPLETION_CODE) {
  365. dev_WARN_ONCE(dev, true,
  366. "Unexpected response (timeout).\n");
  367. tx_msg->msg_done = ACPI_IPMI_TIMEOUT;
  368. }
  369. goto out_comp;
  370. }
  371. tx_msg->rx_len = msg->msg.data_len;
  372. memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len);
  373. tx_msg->msg_done = ACPI_IPMI_OK;
  374. out_comp:
  375. complete(&tx_msg->tx_complete);
  376. acpi_ipmi_msg_put(tx_msg);
  377. out_msg:
  378. ipmi_free_recv_msg(msg);
  379. }
  380. static void ipmi_register_bmc(int iface, struct device *dev)
  381. {
  382. struct acpi_ipmi_device *ipmi_device, *temp;
  383. int err;
  384. struct ipmi_smi_info smi_data;
  385. acpi_handle handle;
  386. err = ipmi_get_smi_info(iface, &smi_data);
  387. if (err)
  388. return;
  389. if (smi_data.addr_src != SI_ACPI)
  390. goto err_ref;
  391. handle = smi_data.addr_info.acpi_info.acpi_handle;
  392. if (!handle)
  393. goto err_ref;
  394. ipmi_device = ipmi_dev_alloc(iface, smi_data.dev, handle);
  395. if (!ipmi_device) {
  396. dev_warn(smi_data.dev, "Can't create IPMI user interface\n");
  397. goto err_ref;
  398. }
  399. mutex_lock(&driver_data.ipmi_lock);
  400. list_for_each_entry(temp, &driver_data.ipmi_devices, head) {
  401. /*
  402. * if the corresponding ACPI handle is already added
  403. * to the device list, don't add it again.
  404. */
  405. if (temp->handle == handle)
  406. goto err_lock;
  407. }
  408. if (!driver_data.selected_smi)
  409. driver_data.selected_smi = ipmi_device;
  410. list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices);
  411. mutex_unlock(&driver_data.ipmi_lock);
  412. put_device(smi_data.dev);
  413. return;
  414. err_lock:
  415. mutex_unlock(&driver_data.ipmi_lock);
  416. ipmi_dev_release(ipmi_device);
  417. err_ref:
  418. put_device(smi_data.dev);
  419. return;
  420. }
  421. static void ipmi_bmc_gone(int iface)
  422. {
  423. struct acpi_ipmi_device *ipmi_device, *temp;
  424. bool dev_found = false;
  425. mutex_lock(&driver_data.ipmi_lock);
  426. list_for_each_entry_safe(ipmi_device, temp,
  427. &driver_data.ipmi_devices, head) {
  428. if (ipmi_device->ipmi_ifnum != iface) {
  429. dev_found = true;
  430. __ipmi_dev_kill(ipmi_device);
  431. break;
  432. }
  433. }
  434. if (!driver_data.selected_smi)
  435. driver_data.selected_smi = list_first_entry_or_null(
  436. &driver_data.ipmi_devices,
  437. struct acpi_ipmi_device, head);
  438. mutex_unlock(&driver_data.ipmi_lock);
  439. if (dev_found) {
  440. ipmi_flush_tx_msg(ipmi_device);
  441. acpi_ipmi_dev_put(ipmi_device);
  442. }
  443. }
  444. /*
  445. * This is the IPMI opregion space handler.
  446. * @function: indicates the read/write. In fact as the IPMI message is driven
  447. * by command, only write is meaningful.
  448. * @address: This contains the netfn/command of IPMI request message.
  449. * @bits : not used.
  450. * @value : it is an in/out parameter. It points to the IPMI message buffer.
  451. * Before the IPMI message is sent, it represents the actual request
  452. * IPMI message. After the IPMI message is finished, it represents
  453. * the response IPMI message returned by IPMI command.
  454. * @handler_context: IPMI device context.
  455. */
  456. static acpi_status
  457. acpi_ipmi_space_handler(u32 function, acpi_physical_address address,
  458. u32 bits, acpi_integer *value,
  459. void *handler_context, void *region_context)
  460. {
  461. struct acpi_ipmi_msg *tx_msg;
  462. struct acpi_ipmi_device *ipmi_device;
  463. int err;
  464. acpi_status status;
  465. unsigned long flags;
  466. /*
  467. * IPMI opregion message.
  468. * IPMI message is firstly written to the BMC and system software
  469. * can get the respsonse. So it is unmeaningful for the read access
  470. * of IPMI opregion.
  471. */
  472. if ((function & ACPI_IO_MASK) == ACPI_READ)
  473. return AE_TYPE;
  474. tx_msg = ipmi_msg_alloc();
  475. if (!tx_msg)
  476. return AE_NOT_EXIST;
  477. ipmi_device = tx_msg->device;
  478. if (acpi_format_ipmi_request(tx_msg, address, value) != 0) {
  479. ipmi_msg_release(tx_msg);
  480. return AE_TYPE;
  481. }
  482. acpi_ipmi_msg_get(tx_msg);
  483. mutex_lock(&driver_data.ipmi_lock);
  484. /* Do not add a tx_msg that can not be flushed. */
  485. if (ipmi_device->dead) {
  486. mutex_unlock(&driver_data.ipmi_lock);
  487. ipmi_msg_release(tx_msg);
  488. return AE_NOT_EXIST;
  489. }
  490. spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags);
  491. list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list);
  492. spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags);
  493. mutex_unlock(&driver_data.ipmi_lock);
  494. err = ipmi_request_settime(ipmi_device->user_interface,
  495. &tx_msg->addr,
  496. tx_msg->tx_msgid,
  497. &tx_msg->tx_message,
  498. NULL, 0, 0, IPMI_TIMEOUT);
  499. if (err) {
  500. status = AE_ERROR;
  501. goto out_msg;
  502. }
  503. wait_for_completion(&tx_msg->tx_complete);
  504. acpi_format_ipmi_response(tx_msg, value);
  505. status = AE_OK;
  506. out_msg:
  507. ipmi_cancel_tx_msg(ipmi_device, tx_msg);
  508. acpi_ipmi_msg_put(tx_msg);
  509. return status;
  510. }
  511. static int __init acpi_ipmi_init(void)
  512. {
  513. int result;
  514. acpi_status status;
  515. if (acpi_disabled)
  516. return 0;
  517. status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
  518. ACPI_ADR_SPACE_IPMI,
  519. &acpi_ipmi_space_handler,
  520. NULL, NULL);
  521. if (ACPI_FAILURE(status)) {
  522. pr_warn("Can't register IPMI opregion space handle\n");
  523. return -EINVAL;
  524. }
  525. result = ipmi_smi_watcher_register(&driver_data.bmc_events);
  526. if (result)
  527. pr_err("Can't register IPMI system interface watcher\n");
  528. return result;
  529. }
  530. static void __exit acpi_ipmi_exit(void)
  531. {
  532. struct acpi_ipmi_device *ipmi_device;
  533. if (acpi_disabled)
  534. return;
  535. ipmi_smi_watcher_unregister(&driver_data.bmc_events);
  536. /*
  537. * When one smi_watcher is unregistered, it is only deleted
  538. * from the smi_watcher list. But the smi_gone callback function
  539. * is not called. So explicitly uninstall the ACPI IPMI oregion
  540. * handler and free it.
  541. */
  542. mutex_lock(&driver_data.ipmi_lock);
  543. while (!list_empty(&driver_data.ipmi_devices)) {
  544. ipmi_device = list_first_entry(&driver_data.ipmi_devices,
  545. struct acpi_ipmi_device,
  546. head);
  547. __ipmi_dev_kill(ipmi_device);
  548. mutex_unlock(&driver_data.ipmi_lock);
  549. ipmi_flush_tx_msg(ipmi_device);
  550. acpi_ipmi_dev_put(ipmi_device);
  551. mutex_lock(&driver_data.ipmi_lock);
  552. }
  553. mutex_unlock(&driver_data.ipmi_lock);
  554. acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
  555. ACPI_ADR_SPACE_IPMI,
  556. &acpi_ipmi_space_handler);
  557. }
  558. module_init(acpi_ipmi_init);
  559. module_exit(acpi_ipmi_exit);