cpu.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Generic OPP helper interface for CPU device
  3. *
  4. * Copyright (C) 2009-2014 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/cpu.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/err.h>
  17. #include <linux/errno.h>
  18. #include <linux/export.h>
  19. #include <linux/of.h>
  20. #include <linux/slab.h>
  21. #include "opp.h"
  22. #ifdef CONFIG_CPU_FREQ
  23. /**
  24. * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
  25. * @dev: device for which we do this operation
  26. * @table: Cpufreq table returned back to caller
  27. *
  28. * Generate a cpufreq table for a provided device- this assumes that the
  29. * opp list is already initialized and ready for usage.
  30. *
  31. * This function allocates required memory for the cpufreq table. It is
  32. * expected that the caller does the required maintenance such as freeing
  33. * the table as required.
  34. *
  35. * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
  36. * if no memory available for the operation (table is not populated), returns 0
  37. * if successful and table is populated.
  38. *
  39. * WARNING: It is important for the callers to ensure refreshing their copy of
  40. * the table if any of the mentioned functions have been invoked in the interim.
  41. *
  42. * Locking: The internal device_opp and opp structures are RCU protected.
  43. * Since we just use the regular accessor functions to access the internal data
  44. * structures, we use RCU read lock inside this function. As a result, users of
  45. * this function DONOT need to use explicit locks for invoking.
  46. */
  47. int dev_pm_opp_init_cpufreq_table(struct device *dev,
  48. struct cpufreq_frequency_table **table)
  49. {
  50. struct dev_pm_opp *opp;
  51. struct cpufreq_frequency_table *freq_table = NULL;
  52. int i, max_opps, ret = 0;
  53. unsigned long rate;
  54. rcu_read_lock();
  55. max_opps = dev_pm_opp_get_opp_count(dev);
  56. if (max_opps <= 0) {
  57. ret = max_opps ? max_opps : -ENODATA;
  58. goto out;
  59. }
  60. freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
  61. if (!freq_table) {
  62. ret = -ENOMEM;
  63. goto out;
  64. }
  65. for (i = 0, rate = 0; i < max_opps; i++, rate++) {
  66. /* find next rate */
  67. opp = dev_pm_opp_find_freq_ceil(dev, &rate);
  68. if (IS_ERR(opp)) {
  69. ret = PTR_ERR(opp);
  70. goto out;
  71. }
  72. freq_table[i].driver_data = i;
  73. freq_table[i].frequency = rate / 1000;
  74. /* Is Boost/turbo opp ? */
  75. if (dev_pm_opp_is_turbo(opp))
  76. freq_table[i].flags = CPUFREQ_BOOST_FREQ;
  77. }
  78. freq_table[i].driver_data = i;
  79. freq_table[i].frequency = CPUFREQ_TABLE_END;
  80. *table = &freq_table[0];
  81. out:
  82. rcu_read_unlock();
  83. if (ret)
  84. kfree(freq_table);
  85. return ret;
  86. }
  87. EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
  88. /**
  89. * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
  90. * @dev: device for which we do this operation
  91. * @table: table to free
  92. *
  93. * Free up the table allocated by dev_pm_opp_init_cpufreq_table
  94. */
  95. void dev_pm_opp_free_cpufreq_table(struct device *dev,
  96. struct cpufreq_frequency_table **table)
  97. {
  98. if (!table)
  99. return;
  100. kfree(*table);
  101. *table = NULL;
  102. }
  103. EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
  104. #endif /* CONFIG_CPU_FREQ */
  105. /* Required only for V1 bindings, as v2 can manage it from DT itself */
  106. int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
  107. {
  108. struct device_list_opp *list_dev;
  109. struct device_opp *dev_opp;
  110. struct device *dev;
  111. int cpu, ret = 0;
  112. mutex_lock(&dev_opp_list_lock);
  113. dev_opp = _find_device_opp(cpu_dev);
  114. if (IS_ERR(dev_opp)) {
  115. ret = -EINVAL;
  116. goto unlock;
  117. }
  118. for_each_cpu(cpu, cpumask) {
  119. if (cpu == cpu_dev->id)
  120. continue;
  121. dev = get_cpu_device(cpu);
  122. if (!dev) {
  123. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  124. __func__, cpu);
  125. continue;
  126. }
  127. list_dev = _add_list_dev(dev, dev_opp);
  128. if (!list_dev) {
  129. dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
  130. __func__, cpu);
  131. continue;
  132. }
  133. }
  134. unlock:
  135. mutex_unlock(&dev_opp_list_lock);
  136. return ret;
  137. }
  138. EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  139. #ifdef CONFIG_OF
  140. void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
  141. {
  142. struct device *cpu_dev;
  143. int cpu;
  144. WARN_ON(cpumask_empty(cpumask));
  145. for_each_cpu(cpu, cpumask) {
  146. cpu_dev = get_cpu_device(cpu);
  147. if (!cpu_dev) {
  148. pr_err("%s: failed to get cpu%d device\n", __func__,
  149. cpu);
  150. continue;
  151. }
  152. dev_pm_opp_of_remove_table(cpu_dev);
  153. }
  154. }
  155. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
  156. int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
  157. {
  158. struct device *cpu_dev;
  159. int cpu, ret = 0;
  160. WARN_ON(cpumask_empty(cpumask));
  161. for_each_cpu(cpu, cpumask) {
  162. cpu_dev = get_cpu_device(cpu);
  163. if (!cpu_dev) {
  164. pr_err("%s: failed to get cpu%d device\n", __func__,
  165. cpu);
  166. continue;
  167. }
  168. ret = dev_pm_opp_of_add_table(cpu_dev);
  169. if (ret) {
  170. pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
  171. __func__, cpu, ret);
  172. /* Free all other OPPs */
  173. dev_pm_opp_of_cpumask_remove_table(cpumask);
  174. break;
  175. }
  176. }
  177. return ret;
  178. }
  179. EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
  180. /*
  181. * Works only for OPP v2 bindings.
  182. *
  183. * cpumask should be already set to mask of cpu_dev->id.
  184. * Returns -ENOENT if operating-points-v2 bindings aren't supported.
  185. */
  186. int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
  187. {
  188. struct device_node *np, *tmp_np;
  189. struct device *tcpu_dev;
  190. int cpu, ret = 0;
  191. /* Get OPP descriptor node */
  192. np = _of_get_opp_desc_node(cpu_dev);
  193. if (!np) {
  194. dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
  195. return -ENOENT;
  196. }
  197. /* OPPs are shared ? */
  198. if (!of_property_read_bool(np, "opp-shared"))
  199. goto put_cpu_node;
  200. for_each_possible_cpu(cpu) {
  201. if (cpu == cpu_dev->id)
  202. continue;
  203. tcpu_dev = get_cpu_device(cpu);
  204. if (!tcpu_dev) {
  205. dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
  206. __func__, cpu);
  207. ret = -ENODEV;
  208. goto put_cpu_node;
  209. }
  210. /* Get OPP descriptor node */
  211. tmp_np = _of_get_opp_desc_node(tcpu_dev);
  212. if (!tmp_np) {
  213. dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
  214. __func__);
  215. ret = -ENOENT;
  216. goto put_cpu_node;
  217. }
  218. /* CPUs are sharing opp node */
  219. if (np == tmp_np)
  220. cpumask_set_cpu(cpu, cpumask);
  221. of_node_put(tmp_np);
  222. }
  223. put_cpu_node:
  224. of_node_put(np);
  225. return ret;
  226. }
  227. EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
  228. #endif