processor_idle.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141
  1. /*
  2. * processor_idle - idle state submodule to the ACPI processor driver
  3. *
  4. * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
  5. * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
  6. * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
  7. * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
  8. * - Added processor hotplug support
  9. * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
  10. * - Added support for C3 on SMP
  11. *
  12. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or (at
  17. * your option) any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful, but
  20. * WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  22. * General Public License for more details.
  23. *
  24. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  25. */
  26. #include <linux/module.h>
  27. #include <linux/acpi.h>
  28. #include <linux/dmi.h>
  29. #include <linux/sched.h> /* need_resched() */
  30. #include <linux/tick.h>
  31. #include <linux/cpuidle.h>
  32. #include <linux/syscore_ops.h>
  33. #include <acpi/processor.h>
  34. /*
  35. * Include the apic definitions for x86 to have the APIC timer related defines
  36. * available also for UP (on SMP it gets magically included via linux/smp.h).
  37. * asm/acpi.h is not an option, as it would require more include magic. Also
  38. * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
  39. */
  40. #ifdef CONFIG_X86
  41. #include <asm/apic.h>
  42. #endif
  43. #define PREFIX "ACPI: "
  44. #define ACPI_PROCESSOR_CLASS "processor"
  45. #define _COMPONENT ACPI_PROCESSOR_COMPONENT
  46. ACPI_MODULE_NAME("processor_idle");
  47. static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
  48. module_param(max_cstate, uint, 0000);
  49. static unsigned int nocst __read_mostly;
  50. module_param(nocst, uint, 0000);
  51. static int bm_check_disable __read_mostly;
  52. module_param(bm_check_disable, uint, 0000);
  53. static unsigned int latency_factor __read_mostly = 2;
  54. module_param(latency_factor, uint, 0644);
  55. static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
  56. static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
  57. acpi_cstate);
  58. static int disabled_by_idle_boot_param(void)
  59. {
  60. return boot_option_idle_override == IDLE_POLL ||
  61. boot_option_idle_override == IDLE_HALT;
  62. }
  63. /*
  64. * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
  65. * For now disable this. Probably a bug somewhere else.
  66. *
  67. * To skip this limit, boot/load with a large max_cstate limit.
  68. */
  69. static int set_max_cstate(const struct dmi_system_id *id)
  70. {
  71. if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
  72. return 0;
  73. printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
  74. " Override with \"processor.max_cstate=%d\"\n", id->ident,
  75. (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
  76. max_cstate = (long)id->driver_data;
  77. return 0;
  78. }
  79. static const struct dmi_system_id processor_power_dmi_table[] = {
  80. { set_max_cstate, "Clevo 5600D", {
  81. DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
  82. DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
  83. (void *)2},
  84. { set_max_cstate, "Pavilion zv5000", {
  85. DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
  86. DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
  87. (void *)1},
  88. { set_max_cstate, "Asus L8400B", {
  89. DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
  90. DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
  91. (void *)1},
  92. {},
  93. };
  94. /*
  95. * Callers should disable interrupts before the call and enable
  96. * interrupts after return.
  97. */
  98. static void acpi_safe_halt(void)
  99. {
  100. if (!tif_need_resched()) {
  101. safe_halt();
  102. local_irq_disable();
  103. }
  104. }
  105. #ifdef ARCH_APICTIMER_STOPS_ON_C3
  106. /*
  107. * Some BIOS implementations switch to C3 in the published C2 state.
  108. * This seems to be a common problem on AMD boxen, but other vendors
  109. * are affected too. We pick the most conservative approach: we assume
  110. * that the local APIC stops in both C2 and C3.
  111. */
  112. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  113. struct acpi_processor_cx *cx)
  114. {
  115. struct acpi_processor_power *pwr = &pr->power;
  116. u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
  117. if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
  118. return;
  119. if (amd_e400_c1e_detected)
  120. type = ACPI_STATE_C1;
  121. /*
  122. * Check, if one of the previous states already marked the lapic
  123. * unstable
  124. */
  125. if (pwr->timer_broadcast_on_state < state)
  126. return;
  127. if (cx->type >= type)
  128. pr->power.timer_broadcast_on_state = state;
  129. }
  130. static void __lapic_timer_propagate_broadcast(void *arg)
  131. {
  132. struct acpi_processor *pr = (struct acpi_processor *) arg;
  133. if (pr->power.timer_broadcast_on_state < INT_MAX)
  134. tick_broadcast_enable();
  135. else
  136. tick_broadcast_disable();
  137. }
  138. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
  139. {
  140. smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
  141. (void *)pr, 1);
  142. }
  143. /* Power(C) State timer broadcast control */
  144. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  145. struct acpi_processor_cx *cx,
  146. int broadcast)
  147. {
  148. int state = cx - pr->power.states;
  149. if (state >= pr->power.timer_broadcast_on_state) {
  150. if (broadcast)
  151. tick_broadcast_enter();
  152. else
  153. tick_broadcast_exit();
  154. }
  155. }
  156. #else
  157. static void lapic_timer_check_state(int state, struct acpi_processor *pr,
  158. struct acpi_processor_cx *cstate) { }
  159. static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
  160. static void lapic_timer_state_broadcast(struct acpi_processor *pr,
  161. struct acpi_processor_cx *cx,
  162. int broadcast)
  163. {
  164. }
  165. #endif
  166. #ifdef CONFIG_PM_SLEEP
  167. static u32 saved_bm_rld;
  168. static int acpi_processor_suspend(void)
  169. {
  170. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
  171. return 0;
  172. }
  173. static void acpi_processor_resume(void)
  174. {
  175. u32 resumed_bm_rld = 0;
  176. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
  177. if (resumed_bm_rld == saved_bm_rld)
  178. return;
  179. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
  180. }
  181. static struct syscore_ops acpi_processor_syscore_ops = {
  182. .suspend = acpi_processor_suspend,
  183. .resume = acpi_processor_resume,
  184. };
  185. void acpi_processor_syscore_init(void)
  186. {
  187. register_syscore_ops(&acpi_processor_syscore_ops);
  188. }
  189. void acpi_processor_syscore_exit(void)
  190. {
  191. unregister_syscore_ops(&acpi_processor_syscore_ops);
  192. }
  193. #endif /* CONFIG_PM_SLEEP */
  194. #if defined(CONFIG_X86)
  195. static void tsc_check_state(int state)
  196. {
  197. switch (boot_cpu_data.x86_vendor) {
  198. case X86_VENDOR_AMD:
  199. case X86_VENDOR_INTEL:
  200. /*
  201. * AMD Fam10h TSC will tick in all
  202. * C/P/S0/S1 states when this bit is set.
  203. */
  204. if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
  205. return;
  206. /*FALL THROUGH*/
  207. default:
  208. /* TSC could halt in idle, so notify users */
  209. if (state > ACPI_STATE_C1)
  210. mark_tsc_unstable("TSC halts in idle");
  211. }
  212. }
  213. #else
  214. static void tsc_check_state(int state) { return; }
  215. #endif
  216. static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
  217. {
  218. if (!pr->pblk)
  219. return -ENODEV;
  220. /* if info is obtained from pblk/fadt, type equals state */
  221. pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
  222. pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
  223. #ifndef CONFIG_HOTPLUG_CPU
  224. /*
  225. * Check for P_LVL2_UP flag before entering C2 and above on
  226. * an SMP system.
  227. */
  228. if ((num_online_cpus() > 1) &&
  229. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
  230. return -ENODEV;
  231. #endif
  232. /* determine C2 and C3 address from pblk */
  233. pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
  234. pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
  235. /* determine latencies from FADT */
  236. pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
  237. pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
  238. /*
  239. * FADT specified C2 latency must be less than or equal to
  240. * 100 microseconds.
  241. */
  242. if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
  243. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  244. "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
  245. /* invalidate C2 */
  246. pr->power.states[ACPI_STATE_C2].address = 0;
  247. }
  248. /*
  249. * FADT supplied C3 latency must be less than or equal to
  250. * 1000 microseconds.
  251. */
  252. if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
  253. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  254. "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
  255. /* invalidate C3 */
  256. pr->power.states[ACPI_STATE_C3].address = 0;
  257. }
  258. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  259. "lvl2[0x%08x] lvl3[0x%08x]\n",
  260. pr->power.states[ACPI_STATE_C2].address,
  261. pr->power.states[ACPI_STATE_C3].address));
  262. return 0;
  263. }
  264. static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
  265. {
  266. if (!pr->power.states[ACPI_STATE_C1].valid) {
  267. /* set the first C-State to C1 */
  268. /* all processors need to support C1 */
  269. pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
  270. pr->power.states[ACPI_STATE_C1].valid = 1;
  271. pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
  272. }
  273. /* the C0 state only exists as a filler in our array */
  274. pr->power.states[ACPI_STATE_C0].valid = 1;
  275. return 0;
  276. }
  277. static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
  278. {
  279. acpi_status status;
  280. u64 count;
  281. int current_count;
  282. int i, ret = 0;
  283. struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  284. union acpi_object *cst;
  285. if (nocst)
  286. return -ENODEV;
  287. current_count = 0;
  288. status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
  289. if (ACPI_FAILURE(status)) {
  290. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
  291. return -ENODEV;
  292. }
  293. cst = buffer.pointer;
  294. /* There must be at least 2 elements */
  295. if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
  296. printk(KERN_ERR PREFIX "not enough elements in _CST\n");
  297. ret = -EFAULT;
  298. goto end;
  299. }
  300. count = cst->package.elements[0].integer.value;
  301. /* Validate number of power states. */
  302. if (count < 1 || count != cst->package.count - 1) {
  303. printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
  304. ret = -EFAULT;
  305. goto end;
  306. }
  307. /* Tell driver that at least _CST is supported. */
  308. pr->flags.has_cst = 1;
  309. for (i = 1; i <= count; i++) {
  310. union acpi_object *element;
  311. union acpi_object *obj;
  312. struct acpi_power_register *reg;
  313. struct acpi_processor_cx cx;
  314. memset(&cx, 0, sizeof(cx));
  315. element = &(cst->package.elements[i]);
  316. if (element->type != ACPI_TYPE_PACKAGE)
  317. continue;
  318. if (element->package.count != 4)
  319. continue;
  320. obj = &(element->package.elements[0]);
  321. if (obj->type != ACPI_TYPE_BUFFER)
  322. continue;
  323. reg = (struct acpi_power_register *)obj->buffer.pointer;
  324. if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
  325. (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
  326. continue;
  327. /* There should be an easy way to extract an integer... */
  328. obj = &(element->package.elements[1]);
  329. if (obj->type != ACPI_TYPE_INTEGER)
  330. continue;
  331. cx.type = obj->integer.value;
  332. /*
  333. * Some buggy BIOSes won't list C1 in _CST -
  334. * Let acpi_processor_get_power_info_default() handle them later
  335. */
  336. if (i == 1 && cx.type != ACPI_STATE_C1)
  337. current_count++;
  338. cx.address = reg->address;
  339. cx.index = current_count + 1;
  340. cx.entry_method = ACPI_CSTATE_SYSTEMIO;
  341. if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
  342. if (acpi_processor_ffh_cstate_probe
  343. (pr->id, &cx, reg) == 0) {
  344. cx.entry_method = ACPI_CSTATE_FFH;
  345. } else if (cx.type == ACPI_STATE_C1) {
  346. /*
  347. * C1 is a special case where FIXED_HARDWARE
  348. * can be handled in non-MWAIT way as well.
  349. * In that case, save this _CST entry info.
  350. * Otherwise, ignore this info and continue.
  351. */
  352. cx.entry_method = ACPI_CSTATE_HALT;
  353. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  354. } else {
  355. continue;
  356. }
  357. if (cx.type == ACPI_STATE_C1 &&
  358. (boot_option_idle_override == IDLE_NOMWAIT)) {
  359. /*
  360. * In most cases the C1 space_id obtained from
  361. * _CST object is FIXED_HARDWARE access mode.
  362. * But when the option of idle=halt is added,
  363. * the entry_method type should be changed from
  364. * CSTATE_FFH to CSTATE_HALT.
  365. * When the option of idle=nomwait is added,
  366. * the C1 entry_method type should be
  367. * CSTATE_HALT.
  368. */
  369. cx.entry_method = ACPI_CSTATE_HALT;
  370. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
  371. }
  372. } else {
  373. snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
  374. cx.address);
  375. }
  376. if (cx.type == ACPI_STATE_C1) {
  377. cx.valid = 1;
  378. }
  379. obj = &(element->package.elements[2]);
  380. if (obj->type != ACPI_TYPE_INTEGER)
  381. continue;
  382. cx.latency = obj->integer.value;
  383. obj = &(element->package.elements[3]);
  384. if (obj->type != ACPI_TYPE_INTEGER)
  385. continue;
  386. current_count++;
  387. memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
  388. /*
  389. * We support total ACPI_PROCESSOR_MAX_POWER - 1
  390. * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
  391. */
  392. if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
  393. printk(KERN_WARNING
  394. "Limiting number of power states to max (%d)\n",
  395. ACPI_PROCESSOR_MAX_POWER);
  396. printk(KERN_WARNING
  397. "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
  398. break;
  399. }
  400. }
  401. ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
  402. current_count));
  403. /* Validate number of power states discovered */
  404. if (current_count < 2)
  405. ret = -EFAULT;
  406. end:
  407. kfree(buffer.pointer);
  408. return ret;
  409. }
  410. static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
  411. struct acpi_processor_cx *cx)
  412. {
  413. static int bm_check_flag = -1;
  414. static int bm_control_flag = -1;
  415. if (!cx->address)
  416. return;
  417. /*
  418. * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
  419. * DMA transfers are used by any ISA device to avoid livelock.
  420. * Note that we could disable Type-F DMA (as recommended by
  421. * the erratum), but this is known to disrupt certain ISA
  422. * devices thus we take the conservative approach.
  423. */
  424. else if (errata.piix4.fdma) {
  425. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  426. "C3 not supported on PIIX4 with Type-F DMA\n"));
  427. return;
  428. }
  429. /* All the logic here assumes flags.bm_check is same across all CPUs */
  430. if (bm_check_flag == -1) {
  431. /* Determine whether bm_check is needed based on CPU */
  432. acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
  433. bm_check_flag = pr->flags.bm_check;
  434. bm_control_flag = pr->flags.bm_control;
  435. } else {
  436. pr->flags.bm_check = bm_check_flag;
  437. pr->flags.bm_control = bm_control_flag;
  438. }
  439. if (pr->flags.bm_check) {
  440. if (!pr->flags.bm_control) {
  441. if (pr->flags.has_cst != 1) {
  442. /* bus mastering control is necessary */
  443. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  444. "C3 support requires BM control\n"));
  445. return;
  446. } else {
  447. /* Here we enter C3 without bus mastering */
  448. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  449. "C3 support without BM control\n"));
  450. }
  451. }
  452. } else {
  453. /*
  454. * WBINVD should be set in fadt, for C3 state to be
  455. * supported on when bm_check is not required.
  456. */
  457. if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
  458. ACPI_DEBUG_PRINT((ACPI_DB_INFO,
  459. "Cache invalidation should work properly"
  460. " for C3 to be enabled on SMP systems\n"));
  461. return;
  462. }
  463. }
  464. /*
  465. * Otherwise we've met all of our C3 requirements.
  466. * Normalize the C3 latency to expidite policy. Enable
  467. * checking of bus mastering status (bm_check) so we can
  468. * use this in our C3 policy
  469. */
  470. cx->valid = 1;
  471. /*
  472. * On older chipsets, BM_RLD needs to be set
  473. * in order for Bus Master activity to wake the
  474. * system from C3. Newer chipsets handle DMA
  475. * during C3 automatically and BM_RLD is a NOP.
  476. * In either case, the proper way to
  477. * handle BM_RLD is to set it and leave it set.
  478. */
  479. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
  480. return;
  481. }
  482. static int acpi_processor_power_verify(struct acpi_processor *pr)
  483. {
  484. unsigned int i;
  485. unsigned int working = 0;
  486. pr->power.timer_broadcast_on_state = INT_MAX;
  487. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  488. struct acpi_processor_cx *cx = &pr->power.states[i];
  489. switch (cx->type) {
  490. case ACPI_STATE_C1:
  491. cx->valid = 1;
  492. break;
  493. case ACPI_STATE_C2:
  494. if (!cx->address)
  495. break;
  496. cx->valid = 1;
  497. break;
  498. case ACPI_STATE_C3:
  499. acpi_processor_power_verify_c3(pr, cx);
  500. break;
  501. }
  502. if (!cx->valid)
  503. continue;
  504. lapic_timer_check_state(i, pr, cx);
  505. tsc_check_state(cx->type);
  506. working++;
  507. }
  508. lapic_timer_propagate_broadcast(pr);
  509. return (working);
  510. }
  511. static int acpi_processor_get_power_info(struct acpi_processor *pr)
  512. {
  513. unsigned int i;
  514. int result;
  515. /* NOTE: the idle thread may not be running while calling
  516. * this function */
  517. /* Zero initialize all the C-states info. */
  518. memset(pr->power.states, 0, sizeof(pr->power.states));
  519. result = acpi_processor_get_power_info_cst(pr);
  520. if (result == -ENODEV)
  521. result = acpi_processor_get_power_info_fadt(pr);
  522. if (result)
  523. return result;
  524. acpi_processor_get_power_info_default(pr);
  525. pr->power.count = acpi_processor_power_verify(pr);
  526. /*
  527. * if one state of type C2 or C3 is available, mark this
  528. * CPU as being "idle manageable"
  529. */
  530. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
  531. if (pr->power.states[i].valid) {
  532. pr->power.count = i;
  533. if (pr->power.states[i].type >= ACPI_STATE_C2)
  534. pr->flags.power = 1;
  535. }
  536. }
  537. return 0;
  538. }
  539. /**
  540. * acpi_idle_bm_check - checks if bus master activity was detected
  541. */
  542. static int acpi_idle_bm_check(void)
  543. {
  544. u32 bm_status = 0;
  545. if (bm_check_disable)
  546. return 0;
  547. acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
  548. if (bm_status)
  549. acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
  550. /*
  551. * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
  552. * the true state of bus mastering activity; forcing us to
  553. * manually check the BMIDEA bit of each IDE channel.
  554. */
  555. else if (errata.piix4.bmisx) {
  556. if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
  557. || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
  558. bm_status = 1;
  559. }
  560. return bm_status;
  561. }
  562. /**
  563. * acpi_idle_do_entry - enter idle state using the appropriate method
  564. * @cx: cstate data
  565. *
  566. * Caller disables interrupt before call and enables interrupt after return.
  567. */
  568. static void acpi_idle_do_entry(struct acpi_processor_cx *cx)
  569. {
  570. if (cx->entry_method == ACPI_CSTATE_FFH) {
  571. /* Call into architectural FFH based C-state */
  572. acpi_processor_ffh_cstate_enter(cx);
  573. } else if (cx->entry_method == ACPI_CSTATE_HALT) {
  574. acpi_safe_halt();
  575. } else {
  576. /* IO port based C-state */
  577. inb(cx->address);
  578. /* Dummy wait op - must do something useless after P_LVL2 read
  579. because chipsets cannot guarantee that STPCLK# signal
  580. gets asserted in time to freeze execution properly. */
  581. inl(acpi_gbl_FADT.xpm_timer_block.address);
  582. }
  583. }
  584. /**
  585. * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
  586. * @dev: the target CPU
  587. * @index: the index of suggested state
  588. */
  589. static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
  590. {
  591. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  592. ACPI_FLUSH_CPU_CACHE();
  593. while (1) {
  594. if (cx->entry_method == ACPI_CSTATE_HALT)
  595. safe_halt();
  596. else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
  597. inb(cx->address);
  598. /* See comment in acpi_idle_do_entry() */
  599. inl(acpi_gbl_FADT.xpm_timer_block.address);
  600. } else
  601. return -ENODEV;
  602. }
  603. /* Never reached */
  604. return 0;
  605. }
  606. static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
  607. {
  608. return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
  609. !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
  610. }
  611. static int c3_cpu_count;
  612. static DEFINE_RAW_SPINLOCK(c3_lock);
  613. /**
  614. * acpi_idle_enter_bm - enters C3 with proper BM handling
  615. * @pr: Target processor
  616. * @cx: Target state context
  617. * @timer_bc: Whether or not to change timer mode to broadcast
  618. */
  619. static void acpi_idle_enter_bm(struct acpi_processor *pr,
  620. struct acpi_processor_cx *cx, bool timer_bc)
  621. {
  622. acpi_unlazy_tlb(smp_processor_id());
  623. /*
  624. * Must be done before busmaster disable as we might need to
  625. * access HPET !
  626. */
  627. if (timer_bc)
  628. lapic_timer_state_broadcast(pr, cx, 1);
  629. /*
  630. * disable bus master
  631. * bm_check implies we need ARB_DIS
  632. * bm_control implies whether we can do ARB_DIS
  633. *
  634. * That leaves a case where bm_check is set and bm_control is
  635. * not set. In that case we cannot do much, we enter C3
  636. * without doing anything.
  637. */
  638. if (pr->flags.bm_control) {
  639. raw_spin_lock(&c3_lock);
  640. c3_cpu_count++;
  641. /* Disable bus master arbitration when all CPUs are in C3 */
  642. if (c3_cpu_count == num_online_cpus())
  643. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
  644. raw_spin_unlock(&c3_lock);
  645. }
  646. acpi_idle_do_entry(cx);
  647. /* Re-enable bus master arbitration */
  648. if (pr->flags.bm_control) {
  649. raw_spin_lock(&c3_lock);
  650. acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
  651. c3_cpu_count--;
  652. raw_spin_unlock(&c3_lock);
  653. }
  654. if (timer_bc)
  655. lapic_timer_state_broadcast(pr, cx, 0);
  656. }
  657. static int acpi_idle_enter(struct cpuidle_device *dev,
  658. struct cpuidle_driver *drv, int index)
  659. {
  660. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  661. struct acpi_processor *pr;
  662. pr = __this_cpu_read(processors);
  663. if (unlikely(!pr))
  664. return -EINVAL;
  665. if (cx->type != ACPI_STATE_C1) {
  666. if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
  667. index = CPUIDLE_DRIVER_STATE_START;
  668. cx = per_cpu(acpi_cstate[index], dev->cpu);
  669. } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
  670. if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
  671. acpi_idle_enter_bm(pr, cx, true);
  672. return index;
  673. } else if (drv->safe_state_index >= 0) {
  674. index = drv->safe_state_index;
  675. cx = per_cpu(acpi_cstate[index], dev->cpu);
  676. } else {
  677. acpi_safe_halt();
  678. return -EBUSY;
  679. }
  680. }
  681. }
  682. lapic_timer_state_broadcast(pr, cx, 1);
  683. if (cx->type == ACPI_STATE_C3)
  684. ACPI_FLUSH_CPU_CACHE();
  685. acpi_idle_do_entry(cx);
  686. lapic_timer_state_broadcast(pr, cx, 0);
  687. return index;
  688. }
  689. static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
  690. struct cpuidle_driver *drv, int index)
  691. {
  692. struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
  693. if (cx->type == ACPI_STATE_C3) {
  694. struct acpi_processor *pr = __this_cpu_read(processors);
  695. if (unlikely(!pr))
  696. return;
  697. if (pr->flags.bm_check) {
  698. acpi_idle_enter_bm(pr, cx, false);
  699. return;
  700. } else {
  701. ACPI_FLUSH_CPU_CACHE();
  702. }
  703. }
  704. acpi_idle_do_entry(cx);
  705. }
  706. struct cpuidle_driver acpi_idle_driver = {
  707. .name = "acpi_idle",
  708. .owner = THIS_MODULE,
  709. };
  710. /**
  711. * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE
  712. * device i.e. per-cpu data
  713. *
  714. * @pr: the ACPI processor
  715. * @dev : the cpuidle device
  716. */
  717. static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
  718. struct cpuidle_device *dev)
  719. {
  720. int i, count = CPUIDLE_DRIVER_STATE_START;
  721. struct acpi_processor_cx *cx;
  722. if (!pr->flags.power_setup_done)
  723. return -EINVAL;
  724. if (pr->flags.power == 0) {
  725. return -EINVAL;
  726. }
  727. if (!dev)
  728. return -EINVAL;
  729. dev->cpu = pr->id;
  730. if (max_cstate == 0)
  731. max_cstate = 1;
  732. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  733. cx = &pr->power.states[i];
  734. if (!cx->valid)
  735. continue;
  736. per_cpu(acpi_cstate[count], dev->cpu) = cx;
  737. count++;
  738. if (count == CPUIDLE_STATE_MAX)
  739. break;
  740. }
  741. if (!count)
  742. return -EINVAL;
  743. return 0;
  744. }
  745. /**
  746. * acpi_processor_setup_cpuidle states- prepares and configures cpuidle
  747. * global state data i.e. idle routines
  748. *
  749. * @pr: the ACPI processor
  750. */
  751. static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
  752. {
  753. int i, count = CPUIDLE_DRIVER_STATE_START;
  754. struct acpi_processor_cx *cx;
  755. struct cpuidle_state *state;
  756. struct cpuidle_driver *drv = &acpi_idle_driver;
  757. if (!pr->flags.power_setup_done)
  758. return -EINVAL;
  759. if (pr->flags.power == 0)
  760. return -EINVAL;
  761. drv->safe_state_index = -1;
  762. for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
  763. drv->states[i].name[0] = '\0';
  764. drv->states[i].desc[0] = '\0';
  765. }
  766. if (max_cstate == 0)
  767. max_cstate = 1;
  768. for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
  769. cx = &pr->power.states[i];
  770. if (!cx->valid)
  771. continue;
  772. state = &drv->states[count];
  773. snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
  774. strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
  775. state->exit_latency = cx->latency;
  776. state->target_residency = cx->latency * latency_factor;
  777. state->enter = acpi_idle_enter;
  778. state->flags = 0;
  779. if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
  780. state->enter_dead = acpi_idle_play_dead;
  781. drv->safe_state_index = count;
  782. }
  783. /*
  784. * Halt-induced C1 is not good for ->enter_freeze, because it
  785. * re-enables interrupts on exit. Moreover, C1 is generally not
  786. * particularly interesting from the suspend-to-idle angle, so
  787. * avoid C1 and the situations in which we may need to fall back
  788. * to it altogether.
  789. */
  790. if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
  791. state->enter_freeze = acpi_idle_enter_freeze;
  792. count++;
  793. if (count == CPUIDLE_STATE_MAX)
  794. break;
  795. }
  796. drv->state_count = count;
  797. if (!count)
  798. return -EINVAL;
  799. return 0;
  800. }
  801. int acpi_processor_hotplug(struct acpi_processor *pr)
  802. {
  803. int ret = 0;
  804. struct cpuidle_device *dev;
  805. if (disabled_by_idle_boot_param())
  806. return 0;
  807. if (nocst)
  808. return -ENODEV;
  809. if (!pr->flags.power_setup_done)
  810. return -ENODEV;
  811. dev = per_cpu(acpi_cpuidle_device, pr->id);
  812. cpuidle_pause_and_lock();
  813. cpuidle_disable_device(dev);
  814. acpi_processor_get_power_info(pr);
  815. if (pr->flags.power) {
  816. acpi_processor_setup_cpuidle_cx(pr, dev);
  817. ret = cpuidle_enable_device(dev);
  818. }
  819. cpuidle_resume_and_unlock();
  820. return ret;
  821. }
  822. int acpi_processor_cst_has_changed(struct acpi_processor *pr)
  823. {
  824. int cpu;
  825. struct acpi_processor *_pr;
  826. struct cpuidle_device *dev;
  827. if (disabled_by_idle_boot_param())
  828. return 0;
  829. if (nocst)
  830. return -ENODEV;
  831. if (!pr->flags.power_setup_done)
  832. return -ENODEV;
  833. /*
  834. * FIXME: Design the ACPI notification to make it once per
  835. * system instead of once per-cpu. This condition is a hack
  836. * to make the code that updates C-States be called once.
  837. */
  838. if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
  839. /* Protect against cpu-hotplug */
  840. get_online_cpus();
  841. cpuidle_pause_and_lock();
  842. /* Disable all cpuidle devices */
  843. for_each_online_cpu(cpu) {
  844. _pr = per_cpu(processors, cpu);
  845. if (!_pr || !_pr->flags.power_setup_done)
  846. continue;
  847. dev = per_cpu(acpi_cpuidle_device, cpu);
  848. cpuidle_disable_device(dev);
  849. }
  850. /* Populate Updated C-state information */
  851. acpi_processor_get_power_info(pr);
  852. acpi_processor_setup_cpuidle_states(pr);
  853. /* Enable all cpuidle devices */
  854. for_each_online_cpu(cpu) {
  855. _pr = per_cpu(processors, cpu);
  856. if (!_pr || !_pr->flags.power_setup_done)
  857. continue;
  858. acpi_processor_get_power_info(_pr);
  859. if (_pr->flags.power) {
  860. dev = per_cpu(acpi_cpuidle_device, cpu);
  861. acpi_processor_setup_cpuidle_cx(_pr, dev);
  862. cpuidle_enable_device(dev);
  863. }
  864. }
  865. cpuidle_resume_and_unlock();
  866. put_online_cpus();
  867. }
  868. return 0;
  869. }
  870. static int acpi_processor_registered;
  871. int acpi_processor_power_init(struct acpi_processor *pr)
  872. {
  873. acpi_status status;
  874. int retval;
  875. struct cpuidle_device *dev;
  876. static int first_run;
  877. if (disabled_by_idle_boot_param())
  878. return 0;
  879. if (!first_run) {
  880. dmi_check_system(processor_power_dmi_table);
  881. max_cstate = acpi_processor_cstate_check(max_cstate);
  882. if (max_cstate < ACPI_C_STATES_MAX)
  883. printk(KERN_NOTICE
  884. "ACPI: processor limited to max C-state %d\n",
  885. max_cstate);
  886. first_run++;
  887. }
  888. if (acpi_gbl_FADT.cst_control && !nocst) {
  889. status =
  890. acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
  891. if (ACPI_FAILURE(status)) {
  892. ACPI_EXCEPTION((AE_INFO, status,
  893. "Notifying BIOS of _CST ability failed"));
  894. }
  895. }
  896. acpi_processor_get_power_info(pr);
  897. pr->flags.power_setup_done = 1;
  898. /*
  899. * Install the idle handler if processor power management is supported.
  900. * Note that we use previously set idle handler will be used on
  901. * platforms that only support C1.
  902. */
  903. if (pr->flags.power) {
  904. /* Register acpi_idle_driver if not already registered */
  905. if (!acpi_processor_registered) {
  906. acpi_processor_setup_cpuidle_states(pr);
  907. retval = cpuidle_register_driver(&acpi_idle_driver);
  908. if (retval)
  909. return retval;
  910. printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
  911. acpi_idle_driver.name);
  912. }
  913. dev = kzalloc(sizeof(*dev), GFP_KERNEL);
  914. if (!dev)
  915. return -ENOMEM;
  916. per_cpu(acpi_cpuidle_device, pr->id) = dev;
  917. acpi_processor_setup_cpuidle_cx(pr, dev);
  918. /* Register per-cpu cpuidle_device. Cpuidle driver
  919. * must already be registered before registering device
  920. */
  921. retval = cpuidle_register_device(dev);
  922. if (retval) {
  923. if (acpi_processor_registered == 0)
  924. cpuidle_unregister_driver(&acpi_idle_driver);
  925. return retval;
  926. }
  927. acpi_processor_registered++;
  928. }
  929. return 0;
  930. }
  931. int acpi_processor_power_exit(struct acpi_processor *pr)
  932. {
  933. struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
  934. if (disabled_by_idle_boot_param())
  935. return 0;
  936. if (pr->flags.power) {
  937. cpuidle_unregister_device(dev);
  938. acpi_processor_registered--;
  939. if (acpi_processor_registered == 0)
  940. cpuidle_unregister_driver(&acpi_idle_driver);
  941. }
  942. pr->flags.power_setup_done = 0;
  943. return 0;
  944. }