powernv-cpufreq.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /*
  2. * POWERNV cpufreq driver for the IBM POWER processors
  3. *
  4. * (C) Copyright IBM 2014
  5. *
  6. * Author: Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. */
  19. #define pr_fmt(fmt) "powernv-cpufreq: " fmt
  20. #include <linux/kernel.h>
  21. #include <linux/sysfs.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/module.h>
  24. #include <linux/cpufreq.h>
  25. #include <linux/smp.h>
  26. #include <linux/of.h>
  27. #include <linux/reboot.h>
  28. #include <linux/slab.h>
  29. #include <asm/cputhreads.h>
  30. #include <asm/firmware.h>
  31. #include <asm/reg.h>
  32. #include <asm/smp.h> /* Required for cpu_sibling_mask() in UP configs */
  33. #include <asm/opal.h>
  34. #define POWERNV_MAX_PSTATES 256
  35. #define PMSR_PSAFE_ENABLE (1UL << 30)
  36. #define PMSR_SPR_EM_DISABLE (1UL << 31)
  37. #define PMSR_MAX(x) ((x >> 32) & 0xFF)
  38. static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
  39. static bool rebooting, throttled, occ_reset;
  40. static struct chip {
  41. unsigned int id;
  42. bool throttled;
  43. cpumask_t mask;
  44. struct work_struct throttle;
  45. bool restore;
  46. } *chips;
  47. static int nr_chips;
  48. /*
  49. * Note: The set of pstates consists of contiguous integers, the
  50. * smallest of which is indicated by powernv_pstate_info.min, the
  51. * largest of which is indicated by powernv_pstate_info.max.
  52. *
  53. * The nominal pstate is the highest non-turbo pstate in this
  54. * platform. This is indicated by powernv_pstate_info.nominal.
  55. */
  56. static struct powernv_pstate_info {
  57. int min;
  58. int max;
  59. int nominal;
  60. int nr_pstates;
  61. } powernv_pstate_info;
  62. /*
  63. * Initialize the freq table based on data obtained
  64. * from the firmware passed via device-tree
  65. */
  66. static int init_powernv_pstates(void)
  67. {
  68. struct device_node *power_mgt;
  69. int i, pstate_min, pstate_max, pstate_nominal, nr_pstates = 0;
  70. const __be32 *pstate_ids, *pstate_freqs;
  71. u32 len_ids, len_freqs;
  72. power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
  73. if (!power_mgt) {
  74. pr_warn("power-mgt node not found\n");
  75. return -ENODEV;
  76. }
  77. if (of_property_read_u32(power_mgt, "ibm,pstate-min", &pstate_min)) {
  78. pr_warn("ibm,pstate-min node not found\n");
  79. return -ENODEV;
  80. }
  81. if (of_property_read_u32(power_mgt, "ibm,pstate-max", &pstate_max)) {
  82. pr_warn("ibm,pstate-max node not found\n");
  83. return -ENODEV;
  84. }
  85. if (of_property_read_u32(power_mgt, "ibm,pstate-nominal",
  86. &pstate_nominal)) {
  87. pr_warn("ibm,pstate-nominal not found\n");
  88. return -ENODEV;
  89. }
  90. pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
  91. pstate_nominal, pstate_max);
  92. pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
  93. if (!pstate_ids) {
  94. pr_warn("ibm,pstate-ids not found\n");
  95. return -ENODEV;
  96. }
  97. pstate_freqs = of_get_property(power_mgt, "ibm,pstate-frequencies-mhz",
  98. &len_freqs);
  99. if (!pstate_freqs) {
  100. pr_warn("ibm,pstate-frequencies-mhz not found\n");
  101. return -ENODEV;
  102. }
  103. if (len_ids != len_freqs) {
  104. pr_warn("Entries in ibm,pstate-ids and "
  105. "ibm,pstate-frequencies-mhz does not match\n");
  106. }
  107. nr_pstates = min(len_ids, len_freqs) / sizeof(u32);
  108. if (!nr_pstates) {
  109. pr_warn("No PStates found\n");
  110. return -ENODEV;
  111. }
  112. pr_debug("NR PStates %d\n", nr_pstates);
  113. for (i = 0; i < nr_pstates; i++) {
  114. u32 id = be32_to_cpu(pstate_ids[i]);
  115. u32 freq = be32_to_cpu(pstate_freqs[i]);
  116. pr_debug("PState id %d freq %d MHz\n", id, freq);
  117. powernv_freqs[i].frequency = freq * 1000; /* kHz */
  118. powernv_freqs[i].driver_data = id;
  119. }
  120. /* End of list marker entry */
  121. powernv_freqs[i].frequency = CPUFREQ_TABLE_END;
  122. powernv_pstate_info.min = pstate_min;
  123. powernv_pstate_info.max = pstate_max;
  124. powernv_pstate_info.nominal = pstate_nominal;
  125. powernv_pstate_info.nr_pstates = nr_pstates;
  126. return 0;
  127. }
  128. /* Returns the CPU frequency corresponding to the pstate_id. */
  129. static unsigned int pstate_id_to_freq(int pstate_id)
  130. {
  131. int i;
  132. i = powernv_pstate_info.max - pstate_id;
  133. if (i >= powernv_pstate_info.nr_pstates || i < 0) {
  134. pr_warn("PState id %d outside of PState table, "
  135. "reporting nominal id %d instead\n",
  136. pstate_id, powernv_pstate_info.nominal);
  137. i = powernv_pstate_info.max - powernv_pstate_info.nominal;
  138. }
  139. return powernv_freqs[i].frequency;
  140. }
  141. /*
  142. * cpuinfo_nominal_freq_show - Show the nominal CPU frequency as indicated by
  143. * the firmware
  144. */
  145. static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
  146. char *buf)
  147. {
  148. return sprintf(buf, "%u\n",
  149. pstate_id_to_freq(powernv_pstate_info.nominal));
  150. }
  151. struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
  152. __ATTR_RO(cpuinfo_nominal_freq);
  153. static struct freq_attr *powernv_cpu_freq_attr[] = {
  154. &cpufreq_freq_attr_scaling_available_freqs,
  155. &cpufreq_freq_attr_cpuinfo_nominal_freq,
  156. NULL,
  157. };
  158. /* Helper routines */
  159. /* Access helpers to power mgt SPR */
  160. static inline unsigned long get_pmspr(unsigned long sprn)
  161. {
  162. switch (sprn) {
  163. case SPRN_PMCR:
  164. return mfspr(SPRN_PMCR);
  165. case SPRN_PMICR:
  166. return mfspr(SPRN_PMICR);
  167. case SPRN_PMSR:
  168. return mfspr(SPRN_PMSR);
  169. }
  170. BUG();
  171. }
  172. static inline void set_pmspr(unsigned long sprn, unsigned long val)
  173. {
  174. switch (sprn) {
  175. case SPRN_PMCR:
  176. mtspr(SPRN_PMCR, val);
  177. return;
  178. case SPRN_PMICR:
  179. mtspr(SPRN_PMICR, val);
  180. return;
  181. }
  182. BUG();
  183. }
  184. /*
  185. * Use objects of this type to query/update
  186. * pstates on a remote CPU via smp_call_function.
  187. */
  188. struct powernv_smp_call_data {
  189. unsigned int freq;
  190. int pstate_id;
  191. };
  192. /*
  193. * powernv_read_cpu_freq: Reads the current frequency on this CPU.
  194. *
  195. * Called via smp_call_function.
  196. *
  197. * Note: The caller of the smp_call_function should pass an argument of
  198. * the type 'struct powernv_smp_call_data *' along with this function.
  199. *
  200. * The current frequency on this CPU will be returned via
  201. * ((struct powernv_smp_call_data *)arg)->freq;
  202. */
  203. static void powernv_read_cpu_freq(void *arg)
  204. {
  205. unsigned long pmspr_val;
  206. s8 local_pstate_id;
  207. struct powernv_smp_call_data *freq_data = arg;
  208. pmspr_val = get_pmspr(SPRN_PMSR);
  209. /*
  210. * The local pstate id corresponds bits 48..55 in the PMSR.
  211. * Note: Watch out for the sign!
  212. */
  213. local_pstate_id = (pmspr_val >> 48) & 0xFF;
  214. freq_data->pstate_id = local_pstate_id;
  215. freq_data->freq = pstate_id_to_freq(freq_data->pstate_id);
  216. pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n",
  217. raw_smp_processor_id(), pmspr_val, freq_data->pstate_id,
  218. freq_data->freq);
  219. }
  220. /*
  221. * powernv_cpufreq_get: Returns the CPU frequency as reported by the
  222. * firmware for CPU 'cpu'. This value is reported through the sysfs
  223. * file cpuinfo_cur_freq.
  224. */
  225. static unsigned int powernv_cpufreq_get(unsigned int cpu)
  226. {
  227. struct powernv_smp_call_data freq_data;
  228. smp_call_function_any(cpu_sibling_mask(cpu), powernv_read_cpu_freq,
  229. &freq_data, 1);
  230. return freq_data.freq;
  231. }
  232. /*
  233. * set_pstate: Sets the pstate on this CPU.
  234. *
  235. * This is called via an smp_call_function.
  236. *
  237. * The caller must ensure that freq_data is of the type
  238. * (struct powernv_smp_call_data *) and the pstate_id which needs to be set
  239. * on this CPU should be present in freq_data->pstate_id.
  240. */
  241. static void set_pstate(void *freq_data)
  242. {
  243. unsigned long val;
  244. unsigned long pstate_ul =
  245. ((struct powernv_smp_call_data *) freq_data)->pstate_id;
  246. val = get_pmspr(SPRN_PMCR);
  247. val = val & 0x0000FFFFFFFFFFFFULL;
  248. pstate_ul = pstate_ul & 0xFF;
  249. /* Set both global(bits 56..63) and local(bits 48..55) PStates */
  250. val = val | (pstate_ul << 56) | (pstate_ul << 48);
  251. pr_debug("Setting cpu %d pmcr to %016lX\n",
  252. raw_smp_processor_id(), val);
  253. set_pmspr(SPRN_PMCR, val);
  254. }
  255. /*
  256. * get_nominal_index: Returns the index corresponding to the nominal
  257. * pstate in the cpufreq table
  258. */
  259. static inline unsigned int get_nominal_index(void)
  260. {
  261. return powernv_pstate_info.max - powernv_pstate_info.nominal;
  262. }
  263. static void powernv_cpufreq_throttle_check(void *data)
  264. {
  265. unsigned int cpu = smp_processor_id();
  266. unsigned long pmsr;
  267. int pmsr_pmax, i;
  268. pmsr = get_pmspr(SPRN_PMSR);
  269. for (i = 0; i < nr_chips; i++)
  270. if (chips[i].id == cpu_to_chip_id(cpu))
  271. break;
  272. /* Check for Pmax Capping */
  273. pmsr_pmax = (s8)PMSR_MAX(pmsr);
  274. if (pmsr_pmax != powernv_pstate_info.max) {
  275. if (chips[i].throttled)
  276. goto next;
  277. chips[i].throttled = true;
  278. if (pmsr_pmax < powernv_pstate_info.nominal)
  279. pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
  280. cpu, chips[i].id, pmsr_pmax,
  281. powernv_pstate_info.nominal);
  282. else
  283. pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
  284. cpu, chips[i].id, pmsr_pmax,
  285. powernv_pstate_info.max);
  286. } else if (chips[i].throttled) {
  287. chips[i].throttled = false;
  288. pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
  289. chips[i].id, pmsr_pmax);
  290. }
  291. /* Check if Psafe_mode_active is set in PMSR. */
  292. next:
  293. if (pmsr & PMSR_PSAFE_ENABLE) {
  294. throttled = true;
  295. pr_info("Pstate set to safe frequency\n");
  296. }
  297. /* Check if SPR_EM_DISABLE is set in PMSR */
  298. if (pmsr & PMSR_SPR_EM_DISABLE) {
  299. throttled = true;
  300. pr_info("Frequency Control disabled from OS\n");
  301. }
  302. if (throttled) {
  303. pr_info("PMSR = %16lx\n", pmsr);
  304. pr_crit("CPU Frequency could be throttled\n");
  305. }
  306. }
  307. /*
  308. * powernv_cpufreq_target_index: Sets the frequency corresponding to
  309. * the cpufreq table entry indexed by new_index on the cpus in the
  310. * mask policy->cpus
  311. */
  312. static int powernv_cpufreq_target_index(struct cpufreq_policy *policy,
  313. unsigned int new_index)
  314. {
  315. struct powernv_smp_call_data freq_data;
  316. if (unlikely(rebooting) && new_index != get_nominal_index())
  317. return 0;
  318. if (!throttled) {
  319. /* we don't want to be preempted while
  320. * checking if the CPU frequency has been throttled
  321. */
  322. preempt_disable();
  323. powernv_cpufreq_throttle_check(NULL);
  324. preempt_enable();
  325. }
  326. freq_data.pstate_id = powernv_freqs[new_index].driver_data;
  327. /*
  328. * Use smp_call_function to send IPI and execute the
  329. * mtspr on target CPU. We could do that without IPI
  330. * if current CPU is within policy->cpus (core)
  331. */
  332. smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
  333. return 0;
  334. }
  335. static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy)
  336. {
  337. int base, i;
  338. base = cpu_first_thread_sibling(policy->cpu);
  339. for (i = 0; i < threads_per_core; i++)
  340. cpumask_set_cpu(base + i, policy->cpus);
  341. return cpufreq_table_validate_and_show(policy, powernv_freqs);
  342. }
  343. static int powernv_cpufreq_reboot_notifier(struct notifier_block *nb,
  344. unsigned long action, void *unused)
  345. {
  346. int cpu;
  347. struct cpufreq_policy cpu_policy;
  348. rebooting = true;
  349. for_each_online_cpu(cpu) {
  350. cpufreq_get_policy(&cpu_policy, cpu);
  351. powernv_cpufreq_target_index(&cpu_policy, get_nominal_index());
  352. }
  353. return NOTIFY_DONE;
  354. }
  355. static struct notifier_block powernv_cpufreq_reboot_nb = {
  356. .notifier_call = powernv_cpufreq_reboot_notifier,
  357. };
  358. void powernv_cpufreq_work_fn(struct work_struct *work)
  359. {
  360. struct chip *chip = container_of(work, struct chip, throttle);
  361. unsigned int cpu;
  362. cpumask_var_t mask;
  363. smp_call_function_any(&chip->mask,
  364. powernv_cpufreq_throttle_check, NULL, 0);
  365. if (!chip->restore)
  366. return;
  367. chip->restore = false;
  368. cpumask_copy(mask, &chip->mask);
  369. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  370. int index, tcpu;
  371. struct cpufreq_policy policy;
  372. cpufreq_get_policy(&policy, cpu);
  373. cpufreq_frequency_table_target(&policy, policy.freq_table,
  374. policy.cur,
  375. CPUFREQ_RELATION_C, &index);
  376. powernv_cpufreq_target_index(&policy, index);
  377. for_each_cpu(tcpu, policy.cpus)
  378. cpumask_clear_cpu(tcpu, mask);
  379. }
  380. }
  381. static char throttle_reason[][30] = {
  382. "No throttling",
  383. "Power Cap",
  384. "Processor Over Temperature",
  385. "Power Supply Failure",
  386. "Over Current",
  387. "OCC Reset"
  388. };
  389. static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
  390. unsigned long msg_type, void *_msg)
  391. {
  392. struct opal_msg *msg = _msg;
  393. struct opal_occ_msg omsg;
  394. int i;
  395. if (msg_type != OPAL_MSG_OCC)
  396. return 0;
  397. omsg.type = be64_to_cpu(msg->params[0]);
  398. switch (omsg.type) {
  399. case OCC_RESET:
  400. occ_reset = true;
  401. pr_info("OCC (On Chip Controller - enforces hard thermal/power limits) Resetting\n");
  402. /*
  403. * powernv_cpufreq_throttle_check() is called in
  404. * target() callback which can detect the throttle state
  405. * for governors like ondemand.
  406. * But static governors will not call target() often thus
  407. * report throttling here.
  408. */
  409. if (!throttled) {
  410. throttled = true;
  411. pr_crit("CPU frequency is throttled for duration\n");
  412. }
  413. break;
  414. case OCC_LOAD:
  415. pr_info("OCC Loading, CPU frequency is throttled until OCC is started\n");
  416. break;
  417. case OCC_THROTTLE:
  418. omsg.chip = be64_to_cpu(msg->params[1]);
  419. omsg.throttle_status = be64_to_cpu(msg->params[2]);
  420. if (occ_reset) {
  421. occ_reset = false;
  422. throttled = false;
  423. pr_info("OCC Active, CPU frequency is no longer throttled\n");
  424. for (i = 0; i < nr_chips; i++) {
  425. chips[i].restore = true;
  426. schedule_work(&chips[i].throttle);
  427. }
  428. return 0;
  429. }
  430. if (omsg.throttle_status &&
  431. omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
  432. pr_info("OCC: Chip %u Pmax reduced due to %s\n",
  433. (unsigned int)omsg.chip,
  434. throttle_reason[omsg.throttle_status]);
  435. else if (!omsg.throttle_status)
  436. pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
  437. throttle_reason[omsg.throttle_status]);
  438. else
  439. return 0;
  440. for (i = 0; i < nr_chips; i++)
  441. if (chips[i].id == omsg.chip) {
  442. if (!omsg.throttle_status)
  443. chips[i].restore = true;
  444. schedule_work(&chips[i].throttle);
  445. }
  446. }
  447. return 0;
  448. }
  449. static struct notifier_block powernv_cpufreq_opal_nb = {
  450. .notifier_call = powernv_cpufreq_occ_msg,
  451. .next = NULL,
  452. .priority = 0,
  453. };
  454. static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy)
  455. {
  456. struct powernv_smp_call_data freq_data;
  457. freq_data.pstate_id = powernv_pstate_info.min;
  458. smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1);
  459. }
  460. static struct cpufreq_driver powernv_cpufreq_driver = {
  461. .name = "powernv-cpufreq",
  462. .flags = CPUFREQ_CONST_LOOPS,
  463. .init = powernv_cpufreq_cpu_init,
  464. .verify = cpufreq_generic_frequency_table_verify,
  465. .target_index = powernv_cpufreq_target_index,
  466. .get = powernv_cpufreq_get,
  467. .stop_cpu = powernv_cpufreq_stop_cpu,
  468. .attr = powernv_cpu_freq_attr,
  469. };
  470. static int init_chip_info(void)
  471. {
  472. unsigned int chip[256];
  473. unsigned int cpu, i;
  474. unsigned int prev_chip_id = UINT_MAX;
  475. for_each_possible_cpu(cpu) {
  476. unsigned int id = cpu_to_chip_id(cpu);
  477. if (prev_chip_id != id) {
  478. prev_chip_id = id;
  479. chip[nr_chips++] = id;
  480. }
  481. }
  482. chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
  483. if (!chips)
  484. return -ENOMEM;
  485. for (i = 0; i < nr_chips; i++) {
  486. chips[i].id = chip[i];
  487. chips[i].throttled = false;
  488. cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
  489. INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
  490. chips[i].restore = false;
  491. }
  492. return 0;
  493. }
  494. static int __init powernv_cpufreq_init(void)
  495. {
  496. int rc = 0;
  497. /* Don't probe on pseries (guest) platforms */
  498. if (!firmware_has_feature(FW_FEATURE_OPAL))
  499. return -ENODEV;
  500. /* Discover pstates from device tree and init */
  501. rc = init_powernv_pstates();
  502. if (rc) {
  503. pr_info("powernv-cpufreq disabled. System does not support PState control\n");
  504. return rc;
  505. }
  506. /* Populate chip info */
  507. rc = init_chip_info();
  508. if (rc)
  509. return rc;
  510. register_reboot_notifier(&powernv_cpufreq_reboot_nb);
  511. opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
  512. return cpufreq_register_driver(&powernv_cpufreq_driver);
  513. }
  514. module_init(powernv_cpufreq_init);
  515. static void __exit powernv_cpufreq_exit(void)
  516. {
  517. unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
  518. opal_message_notifier_unregister(OPAL_MSG_OCC,
  519. &powernv_cpufreq_opal_nb);
  520. cpufreq_unregister_driver(&powernv_cpufreq_driver);
  521. }
  522. module_exit(powernv_cpufreq_exit);
  523. MODULE_LICENSE("GPL");
  524. MODULE_AUTHOR("Vaidyanathan Srinivasan <svaidy at linux.vnet.ibm.com>");