ec.c 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712
  1. /*
  2. * ec.c - ACPI Embedded Controller Driver (v3)
  3. *
  4. * Copyright (C) 2001-2015 Intel Corporation
  5. * Author: 2014, 2015 Lv Zheng <lv.zheng@intel.com>
  6. * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
  7. * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
  8. * 2004 Luming Yu <luming.yu@intel.com>
  9. * 2001, 2002 Andy Grover <andrew.grover@intel.com>
  10. * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  11. * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
  12. *
  13. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  14. *
  15. * This program is free software; you can redistribute it and/or modify
  16. * it under the terms of the GNU General Public License as published by
  17. * the Free Software Foundation; either version 2 of the License, or (at
  18. * your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful, but
  21. * WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  23. * General Public License for more details.
  24. *
  25. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  26. */
  27. /* Uncomment next line to get verbose printout */
  28. /* #define DEBUG */
  29. #define pr_fmt(fmt) "ACPI : EC: " fmt
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/types.h>
  34. #include <linux/delay.h>
  35. #include <linux/interrupt.h>
  36. #include <linux/list.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/slab.h>
  39. #include <linux/acpi.h>
  40. #include <linux/dmi.h>
  41. #include <asm/io.h>
  42. #include "internal.h"
  43. #define ACPI_EC_CLASS "embedded_controller"
  44. #define ACPI_EC_DEVICE_NAME "Embedded Controller"
  45. #define ACPI_EC_FILE_INFO "info"
  46. /* EC status register */
  47. #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
  48. #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
  49. #define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
  50. #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
  51. #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
  52. /*
  53. * The SCI_EVT clearing timing is not defined by the ACPI specification.
  54. * This leads to lots of practical timing issues for the host EC driver.
  55. * The following variations are defined (from the target EC firmware's
  56. * perspective):
  57. * STATUS: After indicating SCI_EVT edge triggered IRQ to the host, the
  58. * target can clear SCI_EVT at any time so long as the host can see
  59. * the indication by reading the status register (EC_SC). So the
  60. * host should re-check SCI_EVT after the first time the SCI_EVT
  61. * indication is seen, which is the same time the query request
  62. * (QR_EC) is written to the command register (EC_CMD). SCI_EVT set
  63. * at any later time could indicate another event. Normally such
  64. * kind of EC firmware has implemented an event queue and will
  65. * return 0x00 to indicate "no outstanding event".
  66. * QUERY: After seeing the query request (QR_EC) written to the command
  67. * register (EC_CMD) by the host and having prepared the responding
  68. * event value in the data register (EC_DATA), the target can safely
  69. * clear SCI_EVT because the target can confirm that the current
  70. * event is being handled by the host. The host then should check
  71. * SCI_EVT right after reading the event response from the data
  72. * register (EC_DATA).
  73. * EVENT: After seeing the event response read from the data register
  74. * (EC_DATA) by the host, the target can clear SCI_EVT. As the
  75. * target requires time to notice the change in the data register
  76. * (EC_DATA), the host may be required to wait additional guarding
  77. * time before checking the SCI_EVT again. Such guarding may not be
  78. * necessary if the host is notified via another IRQ.
  79. */
  80. #define ACPI_EC_EVT_TIMING_STATUS 0x00
  81. #define ACPI_EC_EVT_TIMING_QUERY 0x01
  82. #define ACPI_EC_EVT_TIMING_EVENT 0x02
  83. /* EC commands */
  84. enum ec_command {
  85. ACPI_EC_COMMAND_READ = 0x80,
  86. ACPI_EC_COMMAND_WRITE = 0x81,
  87. ACPI_EC_BURST_ENABLE = 0x82,
  88. ACPI_EC_BURST_DISABLE = 0x83,
  89. ACPI_EC_COMMAND_QUERY = 0x84,
  90. };
  91. #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
  92. #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
  93. #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */
  94. #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
  95. * when trying to clear the EC */
  96. #define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */
  97. enum {
  98. EC_FLAGS_QUERY_PENDING, /* Query is pending */
  99. EC_FLAGS_QUERY_GUARDING, /* Guard for SCI_EVT check */
  100. EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and
  101. * OpReg are installed */
  102. EC_FLAGS_STARTED, /* Driver is started */
  103. EC_FLAGS_STOPPED, /* Driver is stopped */
  104. EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
  105. * current command processing */
  106. };
  107. #define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
  108. #define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
  109. /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
  110. static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
  111. module_param(ec_delay, uint, 0644);
  112. MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes");
  113. static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES;
  114. module_param(ec_max_queries, uint, 0644);
  115. MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations");
  116. static bool ec_busy_polling __read_mostly;
  117. module_param(ec_busy_polling, bool, 0644);
  118. MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction");
  119. static unsigned int ec_polling_guard __read_mostly = ACPI_EC_UDELAY_POLL;
  120. module_param(ec_polling_guard, uint, 0644);
  121. MODULE_PARM_DESC(ec_polling_guard, "Guard time(us) between EC accesses in polling modes");
  122. static unsigned int ec_event_clearing __read_mostly = ACPI_EC_EVT_TIMING_QUERY;
  123. /*
  124. * If the number of false interrupts per one transaction exceeds
  125. * this threshold, will think there is a GPE storm happened and
  126. * will disable the GPE for normal transaction.
  127. */
  128. static unsigned int ec_storm_threshold __read_mostly = 8;
  129. module_param(ec_storm_threshold, uint, 0644);
  130. MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm");
  131. struct acpi_ec_query_handler {
  132. struct list_head node;
  133. acpi_ec_query_func func;
  134. acpi_handle handle;
  135. void *data;
  136. u8 query_bit;
  137. struct kref kref;
  138. };
  139. struct transaction {
  140. const u8 *wdata;
  141. u8 *rdata;
  142. unsigned short irq_count;
  143. u8 command;
  144. u8 wi;
  145. u8 ri;
  146. u8 wlen;
  147. u8 rlen;
  148. u8 flags;
  149. };
  150. struct acpi_ec_query {
  151. struct transaction transaction;
  152. struct work_struct work;
  153. struct acpi_ec_query_handler *handler;
  154. };
  155. static int acpi_ec_query(struct acpi_ec *ec, u8 *data);
  156. static void advance_transaction(struct acpi_ec *ec);
  157. static void acpi_ec_event_handler(struct work_struct *work);
  158. static void acpi_ec_event_processor(struct work_struct *work);
  159. struct acpi_ec *boot_ec, *first_ec;
  160. EXPORT_SYMBOL(first_ec);
  161. static struct workqueue_struct *ec_query_wq;
  162. static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
  163. static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
  164. static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
  165. static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
  166. /* --------------------------------------------------------------------------
  167. * Logging/Debugging
  168. * -------------------------------------------------------------------------- */
  169. /*
  170. * Splitters used by the developers to track the boundary of the EC
  171. * handling processes.
  172. */
  173. #ifdef DEBUG
  174. #define EC_DBG_SEP " "
  175. #define EC_DBG_DRV "+++++"
  176. #define EC_DBG_STM "====="
  177. #define EC_DBG_REQ "*****"
  178. #define EC_DBG_EVT "#####"
  179. #else
  180. #define EC_DBG_SEP ""
  181. #define EC_DBG_DRV
  182. #define EC_DBG_STM
  183. #define EC_DBG_REQ
  184. #define EC_DBG_EVT
  185. #endif
  186. #define ec_log_raw(fmt, ...) \
  187. pr_info(fmt "\n", ##__VA_ARGS__)
  188. #define ec_dbg_raw(fmt, ...) \
  189. pr_debug(fmt "\n", ##__VA_ARGS__)
  190. #define ec_log(filter, fmt, ...) \
  191. ec_log_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  192. #define ec_dbg(filter, fmt, ...) \
  193. ec_dbg_raw(filter EC_DBG_SEP fmt EC_DBG_SEP filter, ##__VA_ARGS__)
  194. #define ec_log_drv(fmt, ...) \
  195. ec_log(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  196. #define ec_dbg_drv(fmt, ...) \
  197. ec_dbg(EC_DBG_DRV, fmt, ##__VA_ARGS__)
  198. #define ec_dbg_stm(fmt, ...) \
  199. ec_dbg(EC_DBG_STM, fmt, ##__VA_ARGS__)
  200. #define ec_dbg_req(fmt, ...) \
  201. ec_dbg(EC_DBG_REQ, fmt, ##__VA_ARGS__)
  202. #define ec_dbg_evt(fmt, ...) \
  203. ec_dbg(EC_DBG_EVT, fmt, ##__VA_ARGS__)
  204. #define ec_dbg_ref(ec, fmt, ...) \
  205. ec_dbg_raw("%lu: " fmt, ec->reference_count, ## __VA_ARGS__)
  206. /* --------------------------------------------------------------------------
  207. * Device Flags
  208. * -------------------------------------------------------------------------- */
  209. static bool acpi_ec_started(struct acpi_ec *ec)
  210. {
  211. return test_bit(EC_FLAGS_STARTED, &ec->flags) &&
  212. !test_bit(EC_FLAGS_STOPPED, &ec->flags);
  213. }
  214. static bool acpi_ec_flushed(struct acpi_ec *ec)
  215. {
  216. return ec->reference_count == 1;
  217. }
  218. /* --------------------------------------------------------------------------
  219. * EC Registers
  220. * -------------------------------------------------------------------------- */
  221. static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
  222. {
  223. u8 x = inb(ec->command_addr);
  224. ec_dbg_raw("EC_SC(R) = 0x%2.2x "
  225. "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d",
  226. x,
  227. !!(x & ACPI_EC_FLAG_SCI),
  228. !!(x & ACPI_EC_FLAG_BURST),
  229. !!(x & ACPI_EC_FLAG_CMD),
  230. !!(x & ACPI_EC_FLAG_IBF),
  231. !!(x & ACPI_EC_FLAG_OBF));
  232. return x;
  233. }
  234. static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
  235. {
  236. u8 x = inb(ec->data_addr);
  237. ec->timestamp = jiffies;
  238. ec_dbg_raw("EC_DATA(R) = 0x%2.2x", x);
  239. return x;
  240. }
  241. static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
  242. {
  243. ec_dbg_raw("EC_SC(W) = 0x%2.2x", command);
  244. outb(command, ec->command_addr);
  245. ec->timestamp = jiffies;
  246. }
  247. static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
  248. {
  249. ec_dbg_raw("EC_DATA(W) = 0x%2.2x", data);
  250. outb(data, ec->data_addr);
  251. ec->timestamp = jiffies;
  252. }
  253. #ifdef DEBUG
  254. static const char *acpi_ec_cmd_string(u8 cmd)
  255. {
  256. switch (cmd) {
  257. case 0x80:
  258. return "RD_EC";
  259. case 0x81:
  260. return "WR_EC";
  261. case 0x82:
  262. return "BE_EC";
  263. case 0x83:
  264. return "BD_EC";
  265. case 0x84:
  266. return "QR_EC";
  267. }
  268. return "UNKNOWN";
  269. }
  270. #else
  271. #define acpi_ec_cmd_string(cmd) "UNDEF"
  272. #endif
  273. /* --------------------------------------------------------------------------
  274. * GPE Registers
  275. * -------------------------------------------------------------------------- */
  276. static inline bool acpi_ec_is_gpe_raised(struct acpi_ec *ec)
  277. {
  278. acpi_event_status gpe_status = 0;
  279. (void)acpi_get_gpe_status(NULL, ec->gpe, &gpe_status);
  280. return (gpe_status & ACPI_EVENT_FLAG_STATUS_SET) ? true : false;
  281. }
  282. static inline void acpi_ec_enable_gpe(struct acpi_ec *ec, bool open)
  283. {
  284. if (open)
  285. acpi_enable_gpe(NULL, ec->gpe);
  286. else {
  287. BUG_ON(ec->reference_count < 1);
  288. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
  289. }
  290. if (acpi_ec_is_gpe_raised(ec)) {
  291. /*
  292. * On some platforms, EN=1 writes cannot trigger GPE. So
  293. * software need to manually trigger a pseudo GPE event on
  294. * EN=1 writes.
  295. */
  296. ec_dbg_raw("Polling quirk");
  297. advance_transaction(ec);
  298. }
  299. }
  300. static inline void acpi_ec_disable_gpe(struct acpi_ec *ec, bool close)
  301. {
  302. if (close)
  303. acpi_disable_gpe(NULL, ec->gpe);
  304. else {
  305. BUG_ON(ec->reference_count < 1);
  306. acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
  307. }
  308. }
  309. static inline void acpi_ec_clear_gpe(struct acpi_ec *ec)
  310. {
  311. /*
  312. * GPE STS is a W1C register, which means:
  313. * 1. Software can clear it without worrying about clearing other
  314. * GPEs' STS bits when the hardware sets them in parallel.
  315. * 2. As long as software can ensure only clearing it when it is
  316. * set, hardware won't set it in parallel.
  317. * So software can clear GPE in any contexts.
  318. * Warning: do not move the check into advance_transaction() as the
  319. * EC commands will be sent without GPE raised.
  320. */
  321. if (!acpi_ec_is_gpe_raised(ec))
  322. return;
  323. acpi_clear_gpe(NULL, ec->gpe);
  324. }
  325. /* --------------------------------------------------------------------------
  326. * Transaction Management
  327. * -------------------------------------------------------------------------- */
  328. static void acpi_ec_submit_request(struct acpi_ec *ec)
  329. {
  330. ec->reference_count++;
  331. if (ec->reference_count == 1)
  332. acpi_ec_enable_gpe(ec, true);
  333. }
  334. static void acpi_ec_complete_request(struct acpi_ec *ec)
  335. {
  336. bool flushed = false;
  337. ec->reference_count--;
  338. if (ec->reference_count == 0)
  339. acpi_ec_disable_gpe(ec, true);
  340. flushed = acpi_ec_flushed(ec);
  341. if (flushed)
  342. wake_up(&ec->wait);
  343. }
  344. static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
  345. {
  346. if (!test_bit(flag, &ec->flags)) {
  347. acpi_ec_disable_gpe(ec, false);
  348. ec_dbg_drv("Polling enabled");
  349. set_bit(flag, &ec->flags);
  350. }
  351. }
  352. static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
  353. {
  354. if (test_bit(flag, &ec->flags)) {
  355. clear_bit(flag, &ec->flags);
  356. acpi_ec_enable_gpe(ec, false);
  357. ec_dbg_drv("Polling disabled");
  358. }
  359. }
  360. /*
  361. * acpi_ec_submit_flushable_request() - Increase the reference count unless
  362. * the flush operation is not in
  363. * progress
  364. * @ec: the EC device
  365. *
  366. * This function must be used before taking a new action that should hold
  367. * the reference count. If this function returns false, then the action
  368. * must be discarded or it will prevent the flush operation from being
  369. * completed.
  370. */
  371. static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
  372. {
  373. if (!acpi_ec_started(ec))
  374. return false;
  375. acpi_ec_submit_request(ec);
  376. return true;
  377. }
  378. static void acpi_ec_submit_query(struct acpi_ec *ec)
  379. {
  380. if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  381. ec_dbg_evt("Command(%s) submitted/blocked",
  382. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  383. ec->nr_pending_queries++;
  384. schedule_work(&ec->work);
  385. }
  386. }
  387. static void acpi_ec_complete_query(struct acpi_ec *ec)
  388. {
  389. if (test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
  390. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  391. ec_dbg_evt("Command(%s) unblocked",
  392. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  393. }
  394. }
  395. static bool acpi_ec_guard_event(struct acpi_ec *ec)
  396. {
  397. bool guarded = true;
  398. unsigned long flags;
  399. spin_lock_irqsave(&ec->lock, flags);
  400. /*
  401. * If firmware SCI_EVT clearing timing is "event", we actually
  402. * don't know when the SCI_EVT will be cleared by firmware after
  403. * evaluating _Qxx, so we need to re-check SCI_EVT after waiting an
  404. * acceptable period.
  405. *
  406. * The guarding period begins when EC_FLAGS_QUERY_PENDING is
  407. * flagged, which means SCI_EVT check has just been performed.
  408. * But if the current transaction is ACPI_EC_COMMAND_QUERY, the
  409. * guarding should have already been performed (via
  410. * EC_FLAGS_QUERY_GUARDING) and should not be applied so that the
  411. * ACPI_EC_COMMAND_QUERY transaction can be transitioned into
  412. * ACPI_EC_COMMAND_POLL state immediately.
  413. */
  414. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  415. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY ||
  416. !test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags) ||
  417. (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY))
  418. guarded = false;
  419. spin_unlock_irqrestore(&ec->lock, flags);
  420. return guarded;
  421. }
  422. static int ec_transaction_polled(struct acpi_ec *ec)
  423. {
  424. unsigned long flags;
  425. int ret = 0;
  426. spin_lock_irqsave(&ec->lock, flags);
  427. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL))
  428. ret = 1;
  429. spin_unlock_irqrestore(&ec->lock, flags);
  430. return ret;
  431. }
  432. static int ec_transaction_completed(struct acpi_ec *ec)
  433. {
  434. unsigned long flags;
  435. int ret = 0;
  436. spin_lock_irqsave(&ec->lock, flags);
  437. if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
  438. ret = 1;
  439. spin_unlock_irqrestore(&ec->lock, flags);
  440. return ret;
  441. }
  442. static inline void ec_transaction_transition(struct acpi_ec *ec, unsigned long flag)
  443. {
  444. ec->curr->flags |= flag;
  445. if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
  446. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS &&
  447. flag == ACPI_EC_COMMAND_POLL)
  448. acpi_ec_complete_query(ec);
  449. if (ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY &&
  450. flag == ACPI_EC_COMMAND_COMPLETE)
  451. acpi_ec_complete_query(ec);
  452. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  453. flag == ACPI_EC_COMMAND_COMPLETE)
  454. set_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  455. }
  456. }
  457. static void advance_transaction(struct acpi_ec *ec)
  458. {
  459. struct transaction *t;
  460. u8 status;
  461. bool wakeup = false;
  462. ec_dbg_stm("%s (%d)", in_interrupt() ? "IRQ" : "TASK",
  463. smp_processor_id());
  464. /*
  465. * By always clearing STS before handling all indications, we can
  466. * ensure a hardware STS 0->1 change after this clearing can always
  467. * trigger a GPE interrupt.
  468. */
  469. acpi_ec_clear_gpe(ec);
  470. status = acpi_ec_read_status(ec);
  471. t = ec->curr;
  472. /*
  473. * Another IRQ or a guarded polling mode advancement is detected,
  474. * the next QR_EC submission is then allowed.
  475. */
  476. if (!t || !(t->flags & ACPI_EC_COMMAND_POLL)) {
  477. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT &&
  478. (!ec->nr_pending_queries ||
  479. test_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags))) {
  480. clear_bit(EC_FLAGS_QUERY_GUARDING, &ec->flags);
  481. acpi_ec_complete_query(ec);
  482. }
  483. }
  484. if (!t)
  485. goto err;
  486. if (t->flags & ACPI_EC_COMMAND_POLL) {
  487. if (t->wlen > t->wi) {
  488. if ((status & ACPI_EC_FLAG_IBF) == 0)
  489. acpi_ec_write_data(ec, t->wdata[t->wi++]);
  490. else
  491. goto err;
  492. } else if (t->rlen > t->ri) {
  493. if ((status & ACPI_EC_FLAG_OBF) == 1) {
  494. t->rdata[t->ri++] = acpi_ec_read_data(ec);
  495. if (t->rlen == t->ri) {
  496. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  497. if (t->command == ACPI_EC_COMMAND_QUERY)
  498. ec_dbg_evt("Command(%s) completed by hardware",
  499. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  500. wakeup = true;
  501. }
  502. } else
  503. goto err;
  504. } else if (t->wlen == t->wi &&
  505. (status & ACPI_EC_FLAG_IBF) == 0) {
  506. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  507. wakeup = true;
  508. }
  509. goto out;
  510. } else {
  511. if (EC_FLAGS_QUERY_HANDSHAKE &&
  512. !(status & ACPI_EC_FLAG_SCI) &&
  513. (t->command == ACPI_EC_COMMAND_QUERY)) {
  514. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  515. t->rdata[t->ri++] = 0x00;
  516. ec_transaction_transition(ec, ACPI_EC_COMMAND_COMPLETE);
  517. ec_dbg_evt("Command(%s) completed by software",
  518. acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
  519. wakeup = true;
  520. } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
  521. acpi_ec_write_cmd(ec, t->command);
  522. ec_transaction_transition(ec, ACPI_EC_COMMAND_POLL);
  523. } else
  524. goto err;
  525. goto out;
  526. }
  527. err:
  528. /*
  529. * If SCI bit is set, then don't think it's a false IRQ
  530. * otherwise will take a not handled IRQ as a false one.
  531. */
  532. if (!(status & ACPI_EC_FLAG_SCI)) {
  533. if (in_interrupt() && t) {
  534. if (t->irq_count < ec_storm_threshold)
  535. ++t->irq_count;
  536. /* Allow triggering on 0 threshold */
  537. if (t->irq_count == ec_storm_threshold)
  538. acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
  539. }
  540. }
  541. out:
  542. if (status & ACPI_EC_FLAG_SCI)
  543. acpi_ec_submit_query(ec);
  544. if (wakeup && in_interrupt())
  545. wake_up(&ec->wait);
  546. }
  547. static void start_transaction(struct acpi_ec *ec)
  548. {
  549. ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
  550. ec->curr->flags = 0;
  551. }
  552. static int ec_guard(struct acpi_ec *ec)
  553. {
  554. unsigned long guard = usecs_to_jiffies(ec_polling_guard);
  555. unsigned long timeout = ec->timestamp + guard;
  556. /* Ensure guarding period before polling EC status */
  557. do {
  558. if (ec_busy_polling) {
  559. /* Perform busy polling */
  560. if (ec_transaction_completed(ec))
  561. return 0;
  562. udelay(jiffies_to_usecs(guard));
  563. } else {
  564. /*
  565. * Perform wait polling
  566. * 1. Wait the transaction to be completed by the
  567. * GPE handler after the transaction enters
  568. * ACPI_EC_COMMAND_POLL state.
  569. * 2. A special guarding logic is also required
  570. * for event clearing mode "event" before the
  571. * transaction enters ACPI_EC_COMMAND_POLL
  572. * state.
  573. */
  574. if (!ec_transaction_polled(ec) &&
  575. !acpi_ec_guard_event(ec))
  576. break;
  577. if (wait_event_timeout(ec->wait,
  578. ec_transaction_completed(ec),
  579. guard))
  580. return 0;
  581. }
  582. } while (time_before(jiffies, timeout));
  583. return -ETIME;
  584. }
  585. static int ec_poll(struct acpi_ec *ec)
  586. {
  587. unsigned long flags;
  588. int repeat = 5; /* number of command restarts */
  589. while (repeat--) {
  590. unsigned long delay = jiffies +
  591. msecs_to_jiffies(ec_delay);
  592. do {
  593. if (!ec_guard(ec))
  594. return 0;
  595. spin_lock_irqsave(&ec->lock, flags);
  596. advance_transaction(ec);
  597. spin_unlock_irqrestore(&ec->lock, flags);
  598. } while (time_before(jiffies, delay));
  599. pr_debug("controller reset, restart transaction\n");
  600. spin_lock_irqsave(&ec->lock, flags);
  601. start_transaction(ec);
  602. spin_unlock_irqrestore(&ec->lock, flags);
  603. }
  604. return -ETIME;
  605. }
  606. static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
  607. struct transaction *t)
  608. {
  609. unsigned long tmp;
  610. int ret = 0;
  611. /* start transaction */
  612. spin_lock_irqsave(&ec->lock, tmp);
  613. /* Enable GPE for command processing (IBF=0/OBF=1) */
  614. if (!acpi_ec_submit_flushable_request(ec)) {
  615. ret = -EINVAL;
  616. goto unlock;
  617. }
  618. ec_dbg_ref(ec, "Increase command");
  619. /* following two actions should be kept atomic */
  620. ec->curr = t;
  621. ec_dbg_req("Command(%s) started", acpi_ec_cmd_string(t->command));
  622. start_transaction(ec);
  623. spin_unlock_irqrestore(&ec->lock, tmp);
  624. ret = ec_poll(ec);
  625. spin_lock_irqsave(&ec->lock, tmp);
  626. if (t->irq_count == ec_storm_threshold)
  627. acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
  628. ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
  629. ec->curr = NULL;
  630. /* Disable GPE for command processing (IBF=0/OBF=1) */
  631. acpi_ec_complete_request(ec);
  632. ec_dbg_ref(ec, "Decrease command");
  633. unlock:
  634. spin_unlock_irqrestore(&ec->lock, tmp);
  635. return ret;
  636. }
  637. static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
  638. {
  639. int status;
  640. u32 glk;
  641. if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
  642. return -EINVAL;
  643. if (t->rdata)
  644. memset(t->rdata, 0, t->rlen);
  645. mutex_lock(&ec->mutex);
  646. if (ec->global_lock) {
  647. status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
  648. if (ACPI_FAILURE(status)) {
  649. status = -ENODEV;
  650. goto unlock;
  651. }
  652. }
  653. status = acpi_ec_transaction_unlocked(ec, t);
  654. if (ec->global_lock)
  655. acpi_release_global_lock(glk);
  656. unlock:
  657. mutex_unlock(&ec->mutex);
  658. return status;
  659. }
  660. static int acpi_ec_burst_enable(struct acpi_ec *ec)
  661. {
  662. u8 d;
  663. struct transaction t = {.command = ACPI_EC_BURST_ENABLE,
  664. .wdata = NULL, .rdata = &d,
  665. .wlen = 0, .rlen = 1};
  666. return acpi_ec_transaction(ec, &t);
  667. }
  668. static int acpi_ec_burst_disable(struct acpi_ec *ec)
  669. {
  670. struct transaction t = {.command = ACPI_EC_BURST_DISABLE,
  671. .wdata = NULL, .rdata = NULL,
  672. .wlen = 0, .rlen = 0};
  673. return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
  674. acpi_ec_transaction(ec, &t) : 0;
  675. }
  676. static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
  677. {
  678. int result;
  679. u8 d;
  680. struct transaction t = {.command = ACPI_EC_COMMAND_READ,
  681. .wdata = &address, .rdata = &d,
  682. .wlen = 1, .rlen = 1};
  683. result = acpi_ec_transaction(ec, &t);
  684. *data = d;
  685. return result;
  686. }
  687. static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
  688. {
  689. u8 wdata[2] = { address, data };
  690. struct transaction t = {.command = ACPI_EC_COMMAND_WRITE,
  691. .wdata = wdata, .rdata = NULL,
  692. .wlen = 2, .rlen = 0};
  693. return acpi_ec_transaction(ec, &t);
  694. }
  695. int ec_read(u8 addr, u8 *val)
  696. {
  697. int err;
  698. u8 temp_data;
  699. if (!first_ec)
  700. return -ENODEV;
  701. err = acpi_ec_read(first_ec, addr, &temp_data);
  702. if (!err) {
  703. *val = temp_data;
  704. return 0;
  705. }
  706. return err;
  707. }
  708. EXPORT_SYMBOL(ec_read);
  709. int ec_write(u8 addr, u8 val)
  710. {
  711. int err;
  712. if (!first_ec)
  713. return -ENODEV;
  714. err = acpi_ec_write(first_ec, addr, val);
  715. return err;
  716. }
  717. EXPORT_SYMBOL(ec_write);
  718. int ec_transaction(u8 command,
  719. const u8 *wdata, unsigned wdata_len,
  720. u8 *rdata, unsigned rdata_len)
  721. {
  722. struct transaction t = {.command = command,
  723. .wdata = wdata, .rdata = rdata,
  724. .wlen = wdata_len, .rlen = rdata_len};
  725. if (!first_ec)
  726. return -ENODEV;
  727. return acpi_ec_transaction(first_ec, &t);
  728. }
  729. EXPORT_SYMBOL(ec_transaction);
  730. /* Get the handle to the EC device */
  731. acpi_handle ec_get_handle(void)
  732. {
  733. if (!first_ec)
  734. return NULL;
  735. return first_ec->handle;
  736. }
  737. EXPORT_SYMBOL(ec_get_handle);
  738. /*
  739. * Process _Q events that might have accumulated in the EC.
  740. * Run with locked ec mutex.
  741. */
  742. static void acpi_ec_clear(struct acpi_ec *ec)
  743. {
  744. int i, status;
  745. u8 value = 0;
  746. for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
  747. status = acpi_ec_query(ec, &value);
  748. if (status || !value)
  749. break;
  750. }
  751. if (unlikely(i == ACPI_EC_CLEAR_MAX))
  752. pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
  753. else
  754. pr_info("%d stale EC events cleared\n", i);
  755. }
  756. static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
  757. {
  758. unsigned long flags;
  759. spin_lock_irqsave(&ec->lock, flags);
  760. if (!test_and_set_bit(EC_FLAGS_STARTED, &ec->flags)) {
  761. ec_dbg_drv("Starting EC");
  762. /* Enable GPE for event processing (SCI_EVT=1) */
  763. if (!resuming) {
  764. acpi_ec_submit_request(ec);
  765. ec_dbg_ref(ec, "Increase driver");
  766. }
  767. ec_log_drv("EC started");
  768. }
  769. spin_unlock_irqrestore(&ec->lock, flags);
  770. }
  771. static bool acpi_ec_stopped(struct acpi_ec *ec)
  772. {
  773. unsigned long flags;
  774. bool flushed;
  775. spin_lock_irqsave(&ec->lock, flags);
  776. flushed = acpi_ec_flushed(ec);
  777. spin_unlock_irqrestore(&ec->lock, flags);
  778. return flushed;
  779. }
  780. static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
  781. {
  782. unsigned long flags;
  783. spin_lock_irqsave(&ec->lock, flags);
  784. if (acpi_ec_started(ec)) {
  785. ec_dbg_drv("Stopping EC");
  786. set_bit(EC_FLAGS_STOPPED, &ec->flags);
  787. spin_unlock_irqrestore(&ec->lock, flags);
  788. wait_event(ec->wait, acpi_ec_stopped(ec));
  789. spin_lock_irqsave(&ec->lock, flags);
  790. /* Disable GPE for event processing (SCI_EVT=1) */
  791. if (!suspending) {
  792. acpi_ec_complete_request(ec);
  793. ec_dbg_ref(ec, "Decrease driver");
  794. }
  795. clear_bit(EC_FLAGS_STARTED, &ec->flags);
  796. clear_bit(EC_FLAGS_STOPPED, &ec->flags);
  797. ec_log_drv("EC stopped");
  798. }
  799. spin_unlock_irqrestore(&ec->lock, flags);
  800. }
  801. void acpi_ec_block_transactions(void)
  802. {
  803. struct acpi_ec *ec = first_ec;
  804. if (!ec)
  805. return;
  806. mutex_lock(&ec->mutex);
  807. /* Prevent transactions from being carried out */
  808. acpi_ec_stop(ec, true);
  809. mutex_unlock(&ec->mutex);
  810. }
  811. void acpi_ec_unblock_transactions(void)
  812. {
  813. struct acpi_ec *ec = first_ec;
  814. if (!ec)
  815. return;
  816. /* Allow transactions to be carried out again */
  817. acpi_ec_start(ec, true);
  818. if (EC_FLAGS_CLEAR_ON_RESUME)
  819. acpi_ec_clear(ec);
  820. }
  821. void acpi_ec_unblock_transactions_early(void)
  822. {
  823. /*
  824. * Allow transactions to happen again (this function is called from
  825. * atomic context during wakeup, so we don't need to acquire the mutex).
  826. */
  827. if (first_ec)
  828. acpi_ec_start(first_ec, true);
  829. }
  830. /* --------------------------------------------------------------------------
  831. Event Management
  832. -------------------------------------------------------------------------- */
  833. static struct acpi_ec_query_handler *
  834. acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
  835. {
  836. if (handler)
  837. kref_get(&handler->kref);
  838. return handler;
  839. }
  840. static struct acpi_ec_query_handler *
  841. acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
  842. {
  843. struct acpi_ec_query_handler *handler;
  844. bool found = false;
  845. mutex_lock(&ec->mutex);
  846. list_for_each_entry(handler, &ec->list, node) {
  847. if (value == handler->query_bit) {
  848. found = true;
  849. break;
  850. }
  851. }
  852. mutex_unlock(&ec->mutex);
  853. return found ? acpi_ec_get_query_handler(handler) : NULL;
  854. }
  855. static void acpi_ec_query_handler_release(struct kref *kref)
  856. {
  857. struct acpi_ec_query_handler *handler =
  858. container_of(kref, struct acpi_ec_query_handler, kref);
  859. kfree(handler);
  860. }
  861. static void acpi_ec_put_query_handler(struct acpi_ec_query_handler *handler)
  862. {
  863. kref_put(&handler->kref, acpi_ec_query_handler_release);
  864. }
  865. int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
  866. acpi_handle handle, acpi_ec_query_func func,
  867. void *data)
  868. {
  869. struct acpi_ec_query_handler *handler =
  870. kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
  871. if (!handler)
  872. return -ENOMEM;
  873. handler->query_bit = query_bit;
  874. handler->handle = handle;
  875. handler->func = func;
  876. handler->data = data;
  877. mutex_lock(&ec->mutex);
  878. kref_init(&handler->kref);
  879. list_add(&handler->node, &ec->list);
  880. mutex_unlock(&ec->mutex);
  881. return 0;
  882. }
  883. EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
  884. static void acpi_ec_remove_query_handlers(struct acpi_ec *ec,
  885. bool remove_all, u8 query_bit)
  886. {
  887. struct acpi_ec_query_handler *handler, *tmp;
  888. LIST_HEAD(free_list);
  889. mutex_lock(&ec->mutex);
  890. list_for_each_entry_safe(handler, tmp, &ec->list, node) {
  891. if (remove_all || query_bit == handler->query_bit) {
  892. list_del_init(&handler->node);
  893. list_add(&handler->node, &free_list);
  894. }
  895. }
  896. mutex_unlock(&ec->mutex);
  897. list_for_each_entry_safe(handler, tmp, &free_list, node)
  898. acpi_ec_put_query_handler(handler);
  899. }
  900. void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
  901. {
  902. acpi_ec_remove_query_handlers(ec, false, query_bit);
  903. }
  904. EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
  905. static struct acpi_ec_query *acpi_ec_create_query(u8 *pval)
  906. {
  907. struct acpi_ec_query *q;
  908. struct transaction *t;
  909. q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL);
  910. if (!q)
  911. return NULL;
  912. INIT_WORK(&q->work, acpi_ec_event_processor);
  913. t = &q->transaction;
  914. t->command = ACPI_EC_COMMAND_QUERY;
  915. t->rdata = pval;
  916. t->rlen = 1;
  917. return q;
  918. }
  919. static void acpi_ec_delete_query(struct acpi_ec_query *q)
  920. {
  921. if (q) {
  922. if (q->handler)
  923. acpi_ec_put_query_handler(q->handler);
  924. kfree(q);
  925. }
  926. }
  927. static void acpi_ec_event_processor(struct work_struct *work)
  928. {
  929. struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work);
  930. struct acpi_ec_query_handler *handler = q->handler;
  931. ec_dbg_evt("Query(0x%02x) started", handler->query_bit);
  932. if (handler->func)
  933. handler->func(handler->data);
  934. else if (handler->handle)
  935. acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
  936. ec_dbg_evt("Query(0x%02x) stopped", handler->query_bit);
  937. acpi_ec_delete_query(q);
  938. }
  939. static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
  940. {
  941. u8 value = 0;
  942. int result;
  943. struct acpi_ec_query *q;
  944. q = acpi_ec_create_query(&value);
  945. if (!q)
  946. return -ENOMEM;
  947. /*
  948. * Query the EC to find out which _Qxx method we need to evaluate.
  949. * Note that successful completion of the query causes the ACPI_EC_SCI
  950. * bit to be cleared (and thus clearing the interrupt source).
  951. */
  952. result = acpi_ec_transaction(ec, &q->transaction);
  953. if (!value)
  954. result = -ENODATA;
  955. if (result)
  956. goto err_exit;
  957. q->handler = acpi_ec_get_query_handler_by_value(ec, value);
  958. if (!q->handler) {
  959. result = -ENODATA;
  960. goto err_exit;
  961. }
  962. /*
  963. * It is reported that _Qxx are evaluated in a parallel way on
  964. * Windows:
  965. * https://bugzilla.kernel.org/show_bug.cgi?id=94411
  966. *
  967. * Put this log entry before schedule_work() in order to make
  968. * it appearing before any other log entries occurred during the
  969. * work queue execution.
  970. */
  971. ec_dbg_evt("Query(0x%02x) scheduled", value);
  972. if (!queue_work(ec_query_wq, &q->work)) {
  973. ec_dbg_evt("Query(0x%02x) overlapped", value);
  974. result = -EBUSY;
  975. }
  976. err_exit:
  977. if (result)
  978. acpi_ec_delete_query(q);
  979. if (data)
  980. *data = value;
  981. return result;
  982. }
  983. static void acpi_ec_check_event(struct acpi_ec *ec)
  984. {
  985. unsigned long flags;
  986. if (ec_event_clearing == ACPI_EC_EVT_TIMING_EVENT) {
  987. if (ec_guard(ec)) {
  988. spin_lock_irqsave(&ec->lock, flags);
  989. /*
  990. * Take care of the SCI_EVT unless no one else is
  991. * taking care of it.
  992. */
  993. if (!ec->curr)
  994. advance_transaction(ec);
  995. spin_unlock_irqrestore(&ec->lock, flags);
  996. }
  997. }
  998. }
  999. static void acpi_ec_event_handler(struct work_struct *work)
  1000. {
  1001. unsigned long flags;
  1002. struct acpi_ec *ec = container_of(work, struct acpi_ec, work);
  1003. ec_dbg_evt("Event started");
  1004. spin_lock_irqsave(&ec->lock, flags);
  1005. while (ec->nr_pending_queries) {
  1006. spin_unlock_irqrestore(&ec->lock, flags);
  1007. (void)acpi_ec_query(ec, NULL);
  1008. spin_lock_irqsave(&ec->lock, flags);
  1009. ec->nr_pending_queries--;
  1010. /*
  1011. * Before exit, make sure that this work item can be
  1012. * scheduled again. There might be QR_EC failures, leaving
  1013. * EC_FLAGS_QUERY_PENDING uncleared and preventing this work
  1014. * item from being scheduled again.
  1015. */
  1016. if (!ec->nr_pending_queries) {
  1017. if (ec_event_clearing == ACPI_EC_EVT_TIMING_STATUS ||
  1018. ec_event_clearing == ACPI_EC_EVT_TIMING_QUERY)
  1019. acpi_ec_complete_query(ec);
  1020. }
  1021. }
  1022. spin_unlock_irqrestore(&ec->lock, flags);
  1023. ec_dbg_evt("Event stopped");
  1024. acpi_ec_check_event(ec);
  1025. }
  1026. static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
  1027. u32 gpe_number, void *data)
  1028. {
  1029. unsigned long flags;
  1030. struct acpi_ec *ec = data;
  1031. spin_lock_irqsave(&ec->lock, flags);
  1032. advance_transaction(ec);
  1033. spin_unlock_irqrestore(&ec->lock, flags);
  1034. return ACPI_INTERRUPT_HANDLED;
  1035. }
  1036. /* --------------------------------------------------------------------------
  1037. * Address Space Management
  1038. * -------------------------------------------------------------------------- */
  1039. static acpi_status
  1040. acpi_ec_space_handler(u32 function, acpi_physical_address address,
  1041. u32 bits, u64 *value64,
  1042. void *handler_context, void *region_context)
  1043. {
  1044. struct acpi_ec *ec = handler_context;
  1045. int result = 0, i, bytes = bits / 8;
  1046. u8 *value = (u8 *)value64;
  1047. if ((address > 0xFF) || !value || !handler_context)
  1048. return AE_BAD_PARAMETER;
  1049. if (function != ACPI_READ && function != ACPI_WRITE)
  1050. return AE_BAD_PARAMETER;
  1051. if (ec_busy_polling || bits > 8)
  1052. acpi_ec_burst_enable(ec);
  1053. for (i = 0; i < bytes; ++i, ++address, ++value)
  1054. result = (function == ACPI_READ) ?
  1055. acpi_ec_read(ec, address, value) :
  1056. acpi_ec_write(ec, address, *value);
  1057. if (ec_busy_polling || bits > 8)
  1058. acpi_ec_burst_disable(ec);
  1059. switch (result) {
  1060. case -EINVAL:
  1061. return AE_BAD_PARAMETER;
  1062. case -ENODEV:
  1063. return AE_NOT_FOUND;
  1064. case -ETIME:
  1065. return AE_TIME;
  1066. default:
  1067. return AE_OK;
  1068. }
  1069. }
  1070. /* --------------------------------------------------------------------------
  1071. * Driver Interface
  1072. * -------------------------------------------------------------------------- */
  1073. static acpi_status
  1074. ec_parse_io_ports(struct acpi_resource *resource, void *context);
  1075. static struct acpi_ec *make_acpi_ec(void)
  1076. {
  1077. struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
  1078. if (!ec)
  1079. return NULL;
  1080. ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
  1081. mutex_init(&ec->mutex);
  1082. init_waitqueue_head(&ec->wait);
  1083. INIT_LIST_HEAD(&ec->list);
  1084. spin_lock_init(&ec->lock);
  1085. INIT_WORK(&ec->work, acpi_ec_event_handler);
  1086. ec->timestamp = jiffies;
  1087. return ec;
  1088. }
  1089. static acpi_status
  1090. acpi_ec_register_query_methods(acpi_handle handle, u32 level,
  1091. void *context, void **return_value)
  1092. {
  1093. char node_name[5];
  1094. struct acpi_buffer buffer = { sizeof(node_name), node_name };
  1095. struct acpi_ec *ec = context;
  1096. int value = 0;
  1097. acpi_status status;
  1098. status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
  1099. if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
  1100. acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
  1101. return AE_OK;
  1102. }
  1103. static acpi_status
  1104. ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
  1105. {
  1106. acpi_status status;
  1107. unsigned long long tmp = 0;
  1108. struct acpi_ec *ec = context;
  1109. /* clear addr values, ec_parse_io_ports depend on it */
  1110. ec->command_addr = ec->data_addr = 0;
  1111. status = acpi_walk_resources(handle, METHOD_NAME__CRS,
  1112. ec_parse_io_ports, ec);
  1113. if (ACPI_FAILURE(status))
  1114. return status;
  1115. /* Get GPE bit assignment (EC events). */
  1116. /* TODO: Add support for _GPE returning a package */
  1117. status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
  1118. if (ACPI_FAILURE(status))
  1119. return status;
  1120. ec->gpe = tmp;
  1121. /* Use the global lock for all EC transactions? */
  1122. tmp = 0;
  1123. acpi_evaluate_integer(handle, "_GLK", NULL, &tmp);
  1124. ec->global_lock = tmp;
  1125. ec->handle = handle;
  1126. return AE_CTRL_TERMINATE;
  1127. }
  1128. static int ec_install_handlers(struct acpi_ec *ec)
  1129. {
  1130. acpi_status status;
  1131. if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1132. return 0;
  1133. status = acpi_install_gpe_raw_handler(NULL, ec->gpe,
  1134. ACPI_GPE_EDGE_TRIGGERED,
  1135. &acpi_ec_gpe_handler, ec);
  1136. if (ACPI_FAILURE(status))
  1137. return -ENODEV;
  1138. acpi_ec_start(ec, false);
  1139. status = acpi_install_address_space_handler(ec->handle,
  1140. ACPI_ADR_SPACE_EC,
  1141. &acpi_ec_space_handler,
  1142. NULL, ec);
  1143. if (ACPI_FAILURE(status)) {
  1144. if (status == AE_NOT_FOUND) {
  1145. /*
  1146. * Maybe OS fails in evaluating the _REG object.
  1147. * The AE_NOT_FOUND error will be ignored and OS
  1148. * continue to initialize EC.
  1149. */
  1150. pr_err("Fail in evaluating the _REG object"
  1151. " of EC device. Broken bios is suspected.\n");
  1152. } else {
  1153. acpi_ec_stop(ec, false);
  1154. acpi_remove_gpe_handler(NULL, ec->gpe,
  1155. &acpi_ec_gpe_handler);
  1156. return -ENODEV;
  1157. }
  1158. }
  1159. set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1160. return 0;
  1161. }
  1162. static void ec_remove_handlers(struct acpi_ec *ec)
  1163. {
  1164. if (!test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
  1165. return;
  1166. acpi_ec_stop(ec, false);
  1167. if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
  1168. ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
  1169. pr_err("failed to remove space handler\n");
  1170. if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
  1171. &acpi_ec_gpe_handler)))
  1172. pr_err("failed to remove gpe handler\n");
  1173. clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
  1174. }
  1175. static int acpi_ec_add(struct acpi_device *device)
  1176. {
  1177. struct acpi_ec *ec = NULL;
  1178. int ret;
  1179. strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
  1180. strcpy(acpi_device_class(device), ACPI_EC_CLASS);
  1181. /* Check for boot EC */
  1182. if (boot_ec &&
  1183. (boot_ec->handle == device->handle ||
  1184. boot_ec->handle == ACPI_ROOT_OBJECT)) {
  1185. ec = boot_ec;
  1186. boot_ec = NULL;
  1187. } else {
  1188. ec = make_acpi_ec();
  1189. if (!ec)
  1190. return -ENOMEM;
  1191. }
  1192. if (ec_parse_device(device->handle, 0, ec, NULL) !=
  1193. AE_CTRL_TERMINATE) {
  1194. kfree(ec);
  1195. return -EINVAL;
  1196. }
  1197. /* Find and register all query methods */
  1198. acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
  1199. acpi_ec_register_query_methods, NULL, ec, NULL);
  1200. if (!first_ec)
  1201. first_ec = ec;
  1202. device->driver_data = ec;
  1203. ret = !!request_region(ec->data_addr, 1, "EC data");
  1204. WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr);
  1205. ret = !!request_region(ec->command_addr, 1, "EC cmd");
  1206. WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
  1207. pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
  1208. ec->gpe, ec->command_addr, ec->data_addr);
  1209. ret = ec_install_handlers(ec);
  1210. /* Reprobe devices depending on the EC */
  1211. acpi_walk_dep_device_list(ec->handle);
  1212. /* EC is fully operational, allow queries */
  1213. clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
  1214. /* Clear stale _Q events if hardware might require that */
  1215. if (EC_FLAGS_CLEAR_ON_RESUME)
  1216. acpi_ec_clear(ec);
  1217. return ret;
  1218. }
  1219. static int acpi_ec_remove(struct acpi_device *device)
  1220. {
  1221. struct acpi_ec *ec;
  1222. if (!device)
  1223. return -EINVAL;
  1224. ec = acpi_driver_data(device);
  1225. ec_remove_handlers(ec);
  1226. acpi_ec_remove_query_handlers(ec, true, 0);
  1227. release_region(ec->data_addr, 1);
  1228. release_region(ec->command_addr, 1);
  1229. device->driver_data = NULL;
  1230. if (ec == first_ec)
  1231. first_ec = NULL;
  1232. kfree(ec);
  1233. return 0;
  1234. }
  1235. static acpi_status
  1236. ec_parse_io_ports(struct acpi_resource *resource, void *context)
  1237. {
  1238. struct acpi_ec *ec = context;
  1239. if (resource->type != ACPI_RESOURCE_TYPE_IO)
  1240. return AE_OK;
  1241. /*
  1242. * The first address region returned is the data port, and
  1243. * the second address region returned is the status/command
  1244. * port.
  1245. */
  1246. if (ec->data_addr == 0)
  1247. ec->data_addr = resource->data.io.minimum;
  1248. else if (ec->command_addr == 0)
  1249. ec->command_addr = resource->data.io.minimum;
  1250. else
  1251. return AE_CTRL_TERMINATE;
  1252. return AE_OK;
  1253. }
  1254. int __init acpi_boot_ec_enable(void)
  1255. {
  1256. if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
  1257. return 0;
  1258. if (!ec_install_handlers(boot_ec)) {
  1259. first_ec = boot_ec;
  1260. return 0;
  1261. }
  1262. return -EFAULT;
  1263. }
  1264. static const struct acpi_device_id ec_device_ids[] = {
  1265. {"PNP0C09", 0},
  1266. {"", 0},
  1267. };
  1268. /* Some BIOS do not survive early DSDT scan, skip it */
  1269. static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
  1270. {
  1271. EC_FLAGS_SKIP_DSDT_SCAN = 1;
  1272. return 0;
  1273. }
  1274. /* ASUStek often supplies us with broken ECDT, validate it */
  1275. static int ec_validate_ecdt(const struct dmi_system_id *id)
  1276. {
  1277. EC_FLAGS_VALIDATE_ECDT = 1;
  1278. return 0;
  1279. }
  1280. #if 0
  1281. /*
  1282. * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not
  1283. * set, for which case, we complete the QR_EC without issuing it to the
  1284. * firmware.
  1285. * https://bugzilla.kernel.org/show_bug.cgi?id=82611
  1286. * https://bugzilla.kernel.org/show_bug.cgi?id=97381
  1287. */
  1288. static int ec_flag_query_handshake(const struct dmi_system_id *id)
  1289. {
  1290. pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
  1291. EC_FLAGS_QUERY_HANDSHAKE = 1;
  1292. return 0;
  1293. }
  1294. #endif
  1295. /*
  1296. * On some hardware it is necessary to clear events accumulated by the EC during
  1297. * sleep. These ECs stop reporting GPEs until they are manually polled, if too
  1298. * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
  1299. *
  1300. * https://bugzilla.kernel.org/show_bug.cgi?id=44161
  1301. *
  1302. * Ideally, the EC should also be instructed NOT to accumulate events during
  1303. * sleep (which Windows seems to do somehow), but the interface to control this
  1304. * behaviour is not known at this time.
  1305. *
  1306. * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
  1307. * however it is very likely that other Samsung models are affected.
  1308. *
  1309. * On systems which don't accumulate _Q events during sleep, this extra check
  1310. * should be harmless.
  1311. */
  1312. static int ec_clear_on_resume(const struct dmi_system_id *id)
  1313. {
  1314. pr_debug("Detected system needing EC poll on resume.\n");
  1315. EC_FLAGS_CLEAR_ON_RESUME = 1;
  1316. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1317. return 0;
  1318. }
  1319. static struct dmi_system_id ec_dmi_table[] __initdata = {
  1320. {
  1321. ec_skip_dsdt_scan, "Compal JFL92", {
  1322. DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
  1323. DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
  1324. {
  1325. ec_validate_ecdt, "MSI MS-171F", {
  1326. DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
  1327. DMI_MATCH(DMI_PRODUCT_NAME, "MS-171F"),}, NULL},
  1328. {
  1329. ec_validate_ecdt, "ASUS hardware", {
  1330. DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
  1331. {
  1332. ec_validate_ecdt, "ASUS hardware", {
  1333. DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL},
  1334. {
  1335. ec_skip_dsdt_scan, "HP Folio 13", {
  1336. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  1337. DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
  1338. {
  1339. ec_validate_ecdt, "ASUS hardware", {
  1340. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
  1341. DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
  1342. {
  1343. ec_clear_on_resume, "Samsung hardware", {
  1344. DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
  1345. {},
  1346. };
  1347. int __init acpi_ec_ecdt_probe(void)
  1348. {
  1349. acpi_status status;
  1350. struct acpi_ec *saved_ec = NULL;
  1351. struct acpi_table_ecdt *ecdt_ptr;
  1352. boot_ec = make_acpi_ec();
  1353. if (!boot_ec)
  1354. return -ENOMEM;
  1355. /*
  1356. * Generate a boot ec context
  1357. */
  1358. dmi_check_system(ec_dmi_table);
  1359. status = acpi_get_table(ACPI_SIG_ECDT, 1,
  1360. (struct acpi_table_header **)&ecdt_ptr);
  1361. if (ACPI_SUCCESS(status)) {
  1362. pr_info("EC description table is found, configuring boot EC\n");
  1363. boot_ec->command_addr = ecdt_ptr->control.address;
  1364. boot_ec->data_addr = ecdt_ptr->data.address;
  1365. boot_ec->gpe = ecdt_ptr->gpe;
  1366. boot_ec->handle = ACPI_ROOT_OBJECT;
  1367. acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
  1368. &boot_ec->handle);
  1369. /* Don't trust ECDT, which comes from ASUSTek */
  1370. if (!EC_FLAGS_VALIDATE_ECDT)
  1371. goto install;
  1372. saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
  1373. if (!saved_ec)
  1374. return -ENOMEM;
  1375. /* fall through */
  1376. }
  1377. if (EC_FLAGS_SKIP_DSDT_SCAN) {
  1378. kfree(saved_ec);
  1379. return -ENODEV;
  1380. }
  1381. /* This workaround is needed only on some broken machines,
  1382. * which require early EC, but fail to provide ECDT */
  1383. pr_debug("Look up EC in DSDT\n");
  1384. status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
  1385. boot_ec, NULL);
  1386. /* Check that acpi_get_devices actually find something */
  1387. if (ACPI_FAILURE(status) || !boot_ec->handle)
  1388. goto error;
  1389. if (saved_ec) {
  1390. /* try to find good ECDT from ASUSTek */
  1391. if (saved_ec->command_addr != boot_ec->command_addr ||
  1392. saved_ec->data_addr != boot_ec->data_addr ||
  1393. saved_ec->gpe != boot_ec->gpe ||
  1394. saved_ec->handle != boot_ec->handle)
  1395. pr_info("ASUSTek keeps feeding us with broken "
  1396. "ECDT tables, which are very hard to workaround. "
  1397. "Trying to use DSDT EC info instead. Please send "
  1398. "output of acpidump to linux-acpi@vger.kernel.org\n");
  1399. kfree(saved_ec);
  1400. saved_ec = NULL;
  1401. } else {
  1402. /* We really need to limit this workaround, the only ASUS,
  1403. * which needs it, has fake EC._INI method, so use it as flag.
  1404. * Keep boot_ec struct as it will be needed soon.
  1405. */
  1406. if (!dmi_name_in_vendors("ASUS") ||
  1407. !acpi_has_method(boot_ec->handle, "_INI"))
  1408. return -ENODEV;
  1409. }
  1410. install:
  1411. if (!ec_install_handlers(boot_ec)) {
  1412. first_ec = boot_ec;
  1413. return 0;
  1414. }
  1415. error:
  1416. kfree(boot_ec);
  1417. kfree(saved_ec);
  1418. boot_ec = NULL;
  1419. return -ENODEV;
  1420. }
  1421. static int param_set_event_clearing(const char *val, struct kernel_param *kp)
  1422. {
  1423. int result = 0;
  1424. if (!strncmp(val, "status", sizeof("status") - 1)) {
  1425. ec_event_clearing = ACPI_EC_EVT_TIMING_STATUS;
  1426. pr_info("Assuming SCI_EVT clearing on EC_SC accesses\n");
  1427. } else if (!strncmp(val, "query", sizeof("query") - 1)) {
  1428. ec_event_clearing = ACPI_EC_EVT_TIMING_QUERY;
  1429. pr_info("Assuming SCI_EVT clearing on QR_EC writes\n");
  1430. } else if (!strncmp(val, "event", sizeof("event") - 1)) {
  1431. ec_event_clearing = ACPI_EC_EVT_TIMING_EVENT;
  1432. pr_info("Assuming SCI_EVT clearing on event reads\n");
  1433. } else
  1434. result = -EINVAL;
  1435. return result;
  1436. }
  1437. static int param_get_event_clearing(char *buffer, struct kernel_param *kp)
  1438. {
  1439. switch (ec_event_clearing) {
  1440. case ACPI_EC_EVT_TIMING_STATUS:
  1441. return sprintf(buffer, "status");
  1442. case ACPI_EC_EVT_TIMING_QUERY:
  1443. return sprintf(buffer, "query");
  1444. case ACPI_EC_EVT_TIMING_EVENT:
  1445. return sprintf(buffer, "event");
  1446. default:
  1447. return sprintf(buffer, "invalid");
  1448. }
  1449. return 0;
  1450. }
  1451. module_param_call(ec_event_clearing, param_set_event_clearing, param_get_event_clearing,
  1452. NULL, 0644);
  1453. MODULE_PARM_DESC(ec_event_clearing, "Assumed SCI_EVT clearing timing");
  1454. static struct acpi_driver acpi_ec_driver = {
  1455. .name = "ec",
  1456. .class = ACPI_EC_CLASS,
  1457. .ids = ec_device_ids,
  1458. .ops = {
  1459. .add = acpi_ec_add,
  1460. .remove = acpi_ec_remove,
  1461. },
  1462. };
  1463. static inline int acpi_ec_query_init(void)
  1464. {
  1465. if (!ec_query_wq) {
  1466. ec_query_wq = alloc_workqueue("kec_query", 0,
  1467. ec_max_queries);
  1468. if (!ec_query_wq)
  1469. return -ENODEV;
  1470. }
  1471. return 0;
  1472. }
  1473. static inline void acpi_ec_query_exit(void)
  1474. {
  1475. if (ec_query_wq) {
  1476. destroy_workqueue(ec_query_wq);
  1477. ec_query_wq = NULL;
  1478. }
  1479. }
  1480. int __init acpi_ec_init(void)
  1481. {
  1482. int result;
  1483. /* register workqueue for _Qxx evaluations */
  1484. result = acpi_ec_query_init();
  1485. if (result)
  1486. goto err_exit;
  1487. /* Now register the driver for the EC */
  1488. result = acpi_bus_register_driver(&acpi_ec_driver);
  1489. if (result)
  1490. goto err_exit;
  1491. err_exit:
  1492. if (result)
  1493. acpi_ec_query_exit();
  1494. return result;
  1495. }
  1496. /* EC driver currently not unloadable */
  1497. #if 0
  1498. static void __exit acpi_ec_exit(void)
  1499. {
  1500. acpi_bus_unregister_driver(&acpi_ec_driver);
  1501. acpi_ec_query_exit();
  1502. }
  1503. #endif /* 0 */