idle.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. /*
  2. * PowerNV cpuidle code
  3. *
  4. * Copyright 2015 IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/types.h>
  12. #include <linux/mm.h>
  13. #include <linux/slab.h>
  14. #include <linux/of.h>
  15. #include <linux/device.h>
  16. #include <linux/cpu.h>
  17. #include <asm/firmware.h>
  18. #include <asm/machdep.h>
  19. #include <asm/opal.h>
  20. #include <asm/cputhreads.h>
  21. #include <asm/cpuidle.h>
  22. #include <asm/code-patching.h>
  23. #include <asm/smp.h>
  24. #include "powernv.h"
  25. #include "subcore.h"
  26. static u32 supported_cpuidle_states;
  27. int pnv_save_sprs_for_winkle(void)
  28. {
  29. int cpu;
  30. int rc;
  31. /*
  32. * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
  33. * all cpus at boot. Get these reg values of current cpu and use the
  34. * same accross all cpus.
  35. */
  36. uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
  37. uint64_t hid0_val = mfspr(SPRN_HID0);
  38. uint64_t hid1_val = mfspr(SPRN_HID1);
  39. uint64_t hid4_val = mfspr(SPRN_HID4);
  40. uint64_t hid5_val = mfspr(SPRN_HID5);
  41. uint64_t hmeer_val = mfspr(SPRN_HMEER);
  42. for_each_possible_cpu(cpu) {
  43. uint64_t pir = get_hard_smp_processor_id(cpu);
  44. uint64_t hsprg0_val = (uint64_t)&paca[cpu];
  45. /*
  46. * HSPRG0 is used to store the cpu's pointer to paca. Hence last
  47. * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
  48. * with 63rd bit set, so that when a thread wakes up at 0x100 we
  49. * can use this bit to distinguish between fastsleep and
  50. * deep winkle.
  51. */
  52. hsprg0_val |= 1;
  53. rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
  54. if (rc != 0)
  55. return rc;
  56. rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
  57. if (rc != 0)
  58. return rc;
  59. /* HIDs are per core registers */
  60. if (cpu_thread_in_core(cpu) == 0) {
  61. rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
  62. if (rc != 0)
  63. return rc;
  64. rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
  65. if (rc != 0)
  66. return rc;
  67. rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
  68. if (rc != 0)
  69. return rc;
  70. rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
  71. if (rc != 0)
  72. return rc;
  73. rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
  74. if (rc != 0)
  75. return rc;
  76. }
  77. }
  78. return 0;
  79. }
  80. static void pnv_alloc_idle_core_states(void)
  81. {
  82. int i, j;
  83. int nr_cores = cpu_nr_cores();
  84. u32 *core_idle_state;
  85. /*
  86. * core_idle_state - First 8 bits track the idle state of each thread
  87. * of the core. The 8th bit is the lock bit. Initially all thread bits
  88. * are set. They are cleared when the thread enters deep idle state
  89. * like sleep and winkle. Initially the lock bit is cleared.
  90. * The lock bit has 2 purposes
  91. * a. While the first thread is restoring core state, it prevents
  92. * other threads in the core from switching to process context.
  93. * b. While the last thread in the core is saving the core state, it
  94. * prevents a different thread from waking up.
  95. */
  96. for (i = 0; i < nr_cores; i++) {
  97. int first_cpu = i * threads_per_core;
  98. int node = cpu_to_node(first_cpu);
  99. core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
  100. *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
  101. for (j = 0; j < threads_per_core; j++) {
  102. int cpu = first_cpu + j;
  103. paca[cpu].core_idle_state_ptr = core_idle_state;
  104. paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
  105. paca[cpu].thread_mask = 1 << j;
  106. }
  107. }
  108. update_subcore_sibling_mask();
  109. if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
  110. pnv_save_sprs_for_winkle();
  111. }
  112. u32 pnv_get_supported_cpuidle_states(void)
  113. {
  114. return supported_cpuidle_states;
  115. }
  116. EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
  117. static void pnv_fastsleep_workaround_apply(void *info)
  118. {
  119. int rc;
  120. int *err = info;
  121. rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
  122. OPAL_CONFIG_IDLE_APPLY);
  123. if (rc)
  124. *err = 1;
  125. }
  126. /*
  127. * Used to store fastsleep workaround state
  128. * 0 - Workaround applied/undone at fastsleep entry/exit path (Default)
  129. * 1 - Workaround applied once, never undone.
  130. */
  131. static u8 fastsleep_workaround_applyonce;
  132. static ssize_t show_fastsleep_workaround_applyonce(struct device *dev,
  133. struct device_attribute *attr, char *buf)
  134. {
  135. return sprintf(buf, "%u\n", fastsleep_workaround_applyonce);
  136. }
  137. static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
  138. struct device_attribute *attr, const char *buf,
  139. size_t count)
  140. {
  141. cpumask_t primary_thread_mask;
  142. int err;
  143. u8 val;
  144. if (kstrtou8(buf, 0, &val) || val != 1)
  145. return -EINVAL;
  146. if (fastsleep_workaround_applyonce == 1)
  147. return count;
  148. /*
  149. * fastsleep_workaround_applyonce = 1 implies
  150. * fastsleep workaround needs to be left in 'applied' state on all
  151. * the cores. Do this by-
  152. * 1. Patching out the call to 'undo' workaround in fastsleep exit path
  153. * 2. Sending ipi to all the cores which have atleast one online thread
  154. * 3. Patching out the call to 'apply' workaround in fastsleep entry
  155. * path
  156. * There is no need to send ipi to cores which have all threads
  157. * offlined, as last thread of the core entering fastsleep or deeper
  158. * state would have applied workaround.
  159. */
  160. err = patch_instruction(
  161. (unsigned int *)pnv_fastsleep_workaround_at_exit,
  162. PPC_INST_NOP);
  163. if (err) {
  164. pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_exit");
  165. goto fail;
  166. }
  167. get_online_cpus();
  168. primary_thread_mask = cpu_online_cores_map();
  169. on_each_cpu_mask(&primary_thread_mask,
  170. pnv_fastsleep_workaround_apply,
  171. &err, 1);
  172. put_online_cpus();
  173. if (err) {
  174. pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
  175. goto fail;
  176. }
  177. err = patch_instruction(
  178. (unsigned int *)pnv_fastsleep_workaround_at_entry,
  179. PPC_INST_NOP);
  180. if (err) {
  181. pr_err("fastsleep_workaround_applyonce change failed while patching pnv_fastsleep_workaround_at_entry");
  182. goto fail;
  183. }
  184. fastsleep_workaround_applyonce = 1;
  185. return count;
  186. fail:
  187. return -EIO;
  188. }
  189. static DEVICE_ATTR(fastsleep_workaround_applyonce, 0600,
  190. show_fastsleep_workaround_applyonce,
  191. store_fastsleep_workaround_applyonce);
  192. static int __init pnv_init_idle_states(void)
  193. {
  194. struct device_node *power_mgt;
  195. int dt_idle_states;
  196. u32 *flags;
  197. int i;
  198. supported_cpuidle_states = 0;
  199. if (cpuidle_disable != IDLE_NO_OVERRIDE)
  200. goto out;
  201. if (!firmware_has_feature(FW_FEATURE_OPAL))
  202. goto out;
  203. power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
  204. if (!power_mgt) {
  205. pr_warn("opal: PowerMgmt Node not found\n");
  206. goto out;
  207. }
  208. dt_idle_states = of_property_count_u32_elems(power_mgt,
  209. "ibm,cpu-idle-state-flags");
  210. if (dt_idle_states < 0) {
  211. pr_warn("cpuidle-powernv: no idle states found in the DT\n");
  212. goto out;
  213. }
  214. flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
  215. if (of_property_read_u32_array(power_mgt,
  216. "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
  217. pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
  218. goto out_free;
  219. }
  220. for (i = 0; i < dt_idle_states; i++)
  221. supported_cpuidle_states |= flags[i];
  222. if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
  223. patch_instruction(
  224. (unsigned int *)pnv_fastsleep_workaround_at_entry,
  225. PPC_INST_NOP);
  226. patch_instruction(
  227. (unsigned int *)pnv_fastsleep_workaround_at_exit,
  228. PPC_INST_NOP);
  229. } else {
  230. /*
  231. * OPAL_PM_SLEEP_ENABLED_ER1 is set. It indicates that
  232. * workaround is needed to use fastsleep. Provide sysfs
  233. * control to choose how this workaround has to be applied.
  234. */
  235. device_create_file(cpu_subsys.dev_root,
  236. &dev_attr_fastsleep_workaround_applyonce);
  237. }
  238. pnv_alloc_idle_core_states();
  239. out_free:
  240. kfree(flags);
  241. out:
  242. return 0;
  243. }
  244. machine_subsys_initcall(powernv, pnv_init_idle_states);