core.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*
  2. * Generic OPP Interface
  3. *
  4. * Copyright (C) 2009-2010 Texas Instruments Incorporated.
  5. * Nishanth Menon
  6. * Romit Dasgupta
  7. * Kevin Hilman
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14. #include <linux/errno.h>
  15. #include <linux/err.h>
  16. #include <linux/slab.h>
  17. #include <linux/device.h>
  18. #include <linux/of.h>
  19. #include <linux/export.h>
  20. #include "opp.h"
  21. /*
  22. * The root of the list of all devices. All device_opp structures branch off
  23. * from here, with each device_opp containing the list of opp it supports in
  24. * various states of availability.
  25. */
  26. static LIST_HEAD(dev_opp_list);
  27. /* Lock to allow exclusive modification to the device and opp lists */
  28. DEFINE_MUTEX(dev_opp_list_lock);
  29. #define opp_rcu_lockdep_assert() \
  30. do { \
  31. RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
  32. !lockdep_is_held(&dev_opp_list_lock), \
  33. "Missing rcu_read_lock() or " \
  34. "dev_opp_list_lock protection"); \
  35. } while (0)
  36. static struct device_list_opp *_find_list_dev(const struct device *dev,
  37. struct device_opp *dev_opp)
  38. {
  39. struct device_list_opp *list_dev;
  40. list_for_each_entry(list_dev, &dev_opp->dev_list, node)
  41. if (list_dev->dev == dev)
  42. return list_dev;
  43. return NULL;
  44. }
  45. static struct device_opp *_managed_opp(const struct device_node *np)
  46. {
  47. struct device_opp *dev_opp;
  48. list_for_each_entry_rcu(dev_opp, &dev_opp_list, node) {
  49. if (dev_opp->np == np) {
  50. /*
  51. * Multiple devices can point to the same OPP table and
  52. * so will have same node-pointer, np.
  53. *
  54. * But the OPPs will be considered as shared only if the
  55. * OPP table contains a "opp-shared" property.
  56. */
  57. return dev_opp->shared_opp ? dev_opp : NULL;
  58. }
  59. }
  60. return NULL;
  61. }
  62. /**
  63. * _find_device_opp() - find device_opp struct using device pointer
  64. * @dev: device pointer used to lookup device OPPs
  65. *
  66. * Search list of device OPPs for one containing matching device. Does a RCU
  67. * reader operation to grab the pointer needed.
  68. *
  69. * Return: pointer to 'struct device_opp' if found, otherwise -ENODEV or
  70. * -EINVAL based on type of error.
  71. *
  72. * Locking: For readers, this function must be called under rcu_read_lock().
  73. * device_opp is a RCU protected pointer, which means that device_opp is valid
  74. * as long as we are under RCU lock.
  75. *
  76. * For Writers, this function must be called with dev_opp_list_lock held.
  77. */
  78. struct device_opp *_find_device_opp(struct device *dev)
  79. {
  80. struct device_opp *dev_opp;
  81. opp_rcu_lockdep_assert();
  82. if (IS_ERR_OR_NULL(dev)) {
  83. pr_err("%s: Invalid parameters\n", __func__);
  84. return ERR_PTR(-EINVAL);
  85. }
  86. list_for_each_entry_rcu(dev_opp, &dev_opp_list, node)
  87. if (_find_list_dev(dev, dev_opp))
  88. return dev_opp;
  89. return ERR_PTR(-ENODEV);
  90. }
  91. /**
  92. * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  93. * @opp: opp for which voltage has to be returned for
  94. *
  95. * Return: voltage in micro volt corresponding to the opp, else
  96. * return 0
  97. *
  98. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  99. * protected pointer. This means that opp which could have been fetched by
  100. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  101. * under RCU lock. The pointer returned by the opp_find_freq family must be
  102. * used in the same section as the usage of this function with the pointer
  103. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  104. * pointer.
  105. */
  106. unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
  107. {
  108. struct dev_pm_opp *tmp_opp;
  109. unsigned long v = 0;
  110. opp_rcu_lockdep_assert();
  111. tmp_opp = rcu_dereference(opp);
  112. if (IS_ERR_OR_NULL(tmp_opp))
  113. pr_err("%s: Invalid parameters\n", __func__);
  114. else
  115. v = tmp_opp->u_volt;
  116. return v;
  117. }
  118. EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
  119. /**
  120. * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
  121. * @opp: opp for which frequency has to be returned for
  122. *
  123. * Return: frequency in hertz corresponding to the opp, else
  124. * return 0
  125. *
  126. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  127. * protected pointer. This means that opp which could have been fetched by
  128. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  129. * under RCU lock. The pointer returned by the opp_find_freq family must be
  130. * used in the same section as the usage of this function with the pointer
  131. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  132. * pointer.
  133. */
  134. unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
  135. {
  136. struct dev_pm_opp *tmp_opp;
  137. unsigned long f = 0;
  138. opp_rcu_lockdep_assert();
  139. tmp_opp = rcu_dereference(opp);
  140. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
  141. pr_err("%s: Invalid parameters\n", __func__);
  142. else
  143. f = tmp_opp->rate;
  144. return f;
  145. }
  146. EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  147. /**
  148. * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
  149. * @opp: opp for which turbo mode is being verified
  150. *
  151. * Turbo OPPs are not for normal use, and can be enabled (under certain
  152. * conditions) for short duration of times to finish high throughput work
  153. * quickly. Running on them for longer times may overheat the chip.
  154. *
  155. * Return: true if opp is turbo opp, else false.
  156. *
  157. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  158. * protected pointer. This means that opp which could have been fetched by
  159. * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  160. * under RCU lock. The pointer returned by the opp_find_freq family must be
  161. * used in the same section as the usage of this function with the pointer
  162. * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
  163. * pointer.
  164. */
  165. bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
  166. {
  167. struct dev_pm_opp *tmp_opp;
  168. opp_rcu_lockdep_assert();
  169. tmp_opp = rcu_dereference(opp);
  170. if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
  171. pr_err("%s: Invalid parameters\n", __func__);
  172. return false;
  173. }
  174. return tmp_opp->turbo;
  175. }
  176. EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
  177. /**
  178. * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
  179. * @dev: device for which we do this operation
  180. *
  181. * Return: This function returns the max clock latency in nanoseconds.
  182. *
  183. * Locking: This function takes rcu_read_lock().
  184. */
  185. unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
  186. {
  187. struct device_opp *dev_opp;
  188. unsigned long clock_latency_ns;
  189. rcu_read_lock();
  190. dev_opp = _find_device_opp(dev);
  191. if (IS_ERR(dev_opp))
  192. clock_latency_ns = 0;
  193. else
  194. clock_latency_ns = dev_opp->clock_latency_ns_max;
  195. rcu_read_unlock();
  196. return clock_latency_ns;
  197. }
  198. EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
  199. /**
  200. * dev_pm_opp_get_suspend_opp() - Get suspend opp
  201. * @dev: device for which we do this operation
  202. *
  203. * Return: This function returns pointer to the suspend opp if it is
  204. * defined and available, otherwise it returns NULL.
  205. *
  206. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  207. * protected pointer. The reason for the same is that the opp pointer which is
  208. * returned will remain valid for use with opp_get_{voltage, freq} only while
  209. * under the locked area. The pointer returned must be used prior to unlocking
  210. * with rcu_read_unlock() to maintain the integrity of the pointer.
  211. */
  212. struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
  213. {
  214. struct device_opp *dev_opp;
  215. opp_rcu_lockdep_assert();
  216. dev_opp = _find_device_opp(dev);
  217. if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
  218. !dev_opp->suspend_opp->available)
  219. return NULL;
  220. return dev_opp->suspend_opp;
  221. }
  222. EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
  223. /**
  224. * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
  225. * @dev: device for which we do this operation
  226. *
  227. * Return: This function returns the number of available opps if there are any,
  228. * else returns 0 if none or the corresponding error value.
  229. *
  230. * Locking: This function takes rcu_read_lock().
  231. */
  232. int dev_pm_opp_get_opp_count(struct device *dev)
  233. {
  234. struct device_opp *dev_opp;
  235. struct dev_pm_opp *temp_opp;
  236. int count = 0;
  237. rcu_read_lock();
  238. dev_opp = _find_device_opp(dev);
  239. if (IS_ERR(dev_opp)) {
  240. count = PTR_ERR(dev_opp);
  241. dev_err(dev, "%s: device OPP not found (%d)\n",
  242. __func__, count);
  243. goto out_unlock;
  244. }
  245. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  246. if (temp_opp->available)
  247. count++;
  248. }
  249. out_unlock:
  250. rcu_read_unlock();
  251. return count;
  252. }
  253. EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
  254. /**
  255. * dev_pm_opp_find_freq_exact() - search for an exact frequency
  256. * @dev: device for which we do this operation
  257. * @freq: frequency to search for
  258. * @available: true/false - match for available opp
  259. *
  260. * Return: Searches for exact match in the opp list and returns pointer to the
  261. * matching opp if found, else returns ERR_PTR in case of error and should
  262. * be handled using IS_ERR. Error return values can be:
  263. * EINVAL: for bad pointer
  264. * ERANGE: no match found for search
  265. * ENODEV: if device not found in list of registered devices
  266. *
  267. * Note: available is a modifier for the search. if available=true, then the
  268. * match is for exact matching frequency and is available in the stored OPP
  269. * table. if false, the match is for exact frequency which is not available.
  270. *
  271. * This provides a mechanism to enable an opp which is not available currently
  272. * or the opposite as well.
  273. *
  274. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  275. * protected pointer. The reason for the same is that the opp pointer which is
  276. * returned will remain valid for use with opp_get_{voltage, freq} only while
  277. * under the locked area. The pointer returned must be used prior to unlocking
  278. * with rcu_read_unlock() to maintain the integrity of the pointer.
  279. */
  280. struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
  281. unsigned long freq,
  282. bool available)
  283. {
  284. struct device_opp *dev_opp;
  285. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  286. opp_rcu_lockdep_assert();
  287. dev_opp = _find_device_opp(dev);
  288. if (IS_ERR(dev_opp)) {
  289. int r = PTR_ERR(dev_opp);
  290. dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
  291. return ERR_PTR(r);
  292. }
  293. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  294. if (temp_opp->available == available &&
  295. temp_opp->rate == freq) {
  296. opp = temp_opp;
  297. break;
  298. }
  299. }
  300. return opp;
  301. }
  302. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
  303. /**
  304. * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
  305. * @dev: device for which we do this operation
  306. * @freq: Start frequency
  307. *
  308. * Search for the matching ceil *available* OPP from a starting freq
  309. * for a device.
  310. *
  311. * Return: matching *opp and refreshes *freq accordingly, else returns
  312. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  313. * values can be:
  314. * EINVAL: for bad pointer
  315. * ERANGE: no match found for search
  316. * ENODEV: if device not found in list of registered devices
  317. *
  318. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  319. * protected pointer. The reason for the same is that the opp pointer which is
  320. * returned will remain valid for use with opp_get_{voltage, freq} only while
  321. * under the locked area. The pointer returned must be used prior to unlocking
  322. * with rcu_read_unlock() to maintain the integrity of the pointer.
  323. */
  324. struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
  325. unsigned long *freq)
  326. {
  327. struct device_opp *dev_opp;
  328. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  329. opp_rcu_lockdep_assert();
  330. if (!dev || !freq) {
  331. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  332. return ERR_PTR(-EINVAL);
  333. }
  334. dev_opp = _find_device_opp(dev);
  335. if (IS_ERR(dev_opp))
  336. return ERR_CAST(dev_opp);
  337. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  338. if (temp_opp->available && temp_opp->rate >= *freq) {
  339. opp = temp_opp;
  340. *freq = opp->rate;
  341. break;
  342. }
  343. }
  344. return opp;
  345. }
  346. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
  347. /**
  348. * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
  349. * @dev: device for which we do this operation
  350. * @freq: Start frequency
  351. *
  352. * Search for the matching floor *available* OPP from a starting freq
  353. * for a device.
  354. *
  355. * Return: matching *opp and refreshes *freq accordingly, else returns
  356. * ERR_PTR in case of error and should be handled using IS_ERR. Error return
  357. * values can be:
  358. * EINVAL: for bad pointer
  359. * ERANGE: no match found for search
  360. * ENODEV: if device not found in list of registered devices
  361. *
  362. * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  363. * protected pointer. The reason for the same is that the opp pointer which is
  364. * returned will remain valid for use with opp_get_{voltage, freq} only while
  365. * under the locked area. The pointer returned must be used prior to unlocking
  366. * with rcu_read_unlock() to maintain the integrity of the pointer.
  367. */
  368. struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
  369. unsigned long *freq)
  370. {
  371. struct device_opp *dev_opp;
  372. struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
  373. opp_rcu_lockdep_assert();
  374. if (!dev || !freq) {
  375. dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
  376. return ERR_PTR(-EINVAL);
  377. }
  378. dev_opp = _find_device_opp(dev);
  379. if (IS_ERR(dev_opp))
  380. return ERR_CAST(dev_opp);
  381. list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
  382. if (temp_opp->available) {
  383. /* go to the next node, before choosing prev */
  384. if (temp_opp->rate > *freq)
  385. break;
  386. else
  387. opp = temp_opp;
  388. }
  389. }
  390. if (!IS_ERR(opp))
  391. *freq = opp->rate;
  392. return opp;
  393. }
  394. EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
  395. /* List-dev Helpers */
  396. static void _kfree_list_dev_rcu(struct rcu_head *head)
  397. {
  398. struct device_list_opp *list_dev;
  399. list_dev = container_of(head, struct device_list_opp, rcu_head);
  400. kfree_rcu(list_dev, rcu_head);
  401. }
  402. static void _remove_list_dev(struct device_list_opp *list_dev,
  403. struct device_opp *dev_opp)
  404. {
  405. list_del(&list_dev->node);
  406. call_srcu(&dev_opp->srcu_head.srcu, &list_dev->rcu_head,
  407. _kfree_list_dev_rcu);
  408. }
  409. struct device_list_opp *_add_list_dev(const struct device *dev,
  410. struct device_opp *dev_opp)
  411. {
  412. struct device_list_opp *list_dev;
  413. list_dev = kzalloc(sizeof(*list_dev), GFP_KERNEL);
  414. if (!list_dev)
  415. return NULL;
  416. /* Initialize list-dev */
  417. list_dev->dev = dev;
  418. list_add_rcu(&list_dev->node, &dev_opp->dev_list);
  419. return list_dev;
  420. }
  421. /**
  422. * _add_device_opp() - Find device OPP table or allocate a new one
  423. * @dev: device for which we do this operation
  424. *
  425. * It tries to find an existing table first, if it couldn't find one, it
  426. * allocates a new OPP table and returns that.
  427. *
  428. * Return: valid device_opp pointer if success, else NULL.
  429. */
  430. static struct device_opp *_add_device_opp(struct device *dev)
  431. {
  432. struct device_opp *dev_opp;
  433. struct device_list_opp *list_dev;
  434. /* Check for existing list for 'dev' first */
  435. dev_opp = _find_device_opp(dev);
  436. if (!IS_ERR(dev_opp))
  437. return dev_opp;
  438. /*
  439. * Allocate a new device OPP table. In the infrequent case where a new
  440. * device is needed to be added, we pay this penalty.
  441. */
  442. dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL);
  443. if (!dev_opp)
  444. return NULL;
  445. INIT_LIST_HEAD(&dev_opp->dev_list);
  446. list_dev = _add_list_dev(dev, dev_opp);
  447. if (!list_dev) {
  448. kfree(dev_opp);
  449. return NULL;
  450. }
  451. srcu_init_notifier_head(&dev_opp->srcu_head);
  452. INIT_LIST_HEAD(&dev_opp->opp_list);
  453. /* Secure the device list modification */
  454. list_add_rcu(&dev_opp->node, &dev_opp_list);
  455. return dev_opp;
  456. }
  457. /**
  458. * _kfree_device_rcu() - Free device_opp RCU handler
  459. * @head: RCU head
  460. */
  461. static void _kfree_device_rcu(struct rcu_head *head)
  462. {
  463. struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head);
  464. kfree_rcu(device_opp, rcu_head);
  465. }
  466. /**
  467. * _remove_device_opp() - Removes a device OPP table
  468. * @dev_opp: device OPP table to be removed.
  469. *
  470. * Removes/frees device OPP table it it doesn't contain any OPPs.
  471. */
  472. static void _remove_device_opp(struct device_opp *dev_opp)
  473. {
  474. struct device_list_opp *list_dev;
  475. if (!list_empty(&dev_opp->opp_list))
  476. return;
  477. list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
  478. node);
  479. _remove_list_dev(list_dev, dev_opp);
  480. /* dev_list must be empty now */
  481. WARN_ON(!list_empty(&dev_opp->dev_list));
  482. list_del_rcu(&dev_opp->node);
  483. call_srcu(&dev_opp->srcu_head.srcu, &dev_opp->rcu_head,
  484. _kfree_device_rcu);
  485. }
  486. /**
  487. * _kfree_opp_rcu() - Free OPP RCU handler
  488. * @head: RCU head
  489. */
  490. static void _kfree_opp_rcu(struct rcu_head *head)
  491. {
  492. struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
  493. kfree_rcu(opp, rcu_head);
  494. }
  495. /**
  496. * _opp_remove() - Remove an OPP from a table definition
  497. * @dev_opp: points back to the device_opp struct this opp belongs to
  498. * @opp: pointer to the OPP to remove
  499. * @notify: OPP_EVENT_REMOVE notification should be sent or not
  500. *
  501. * This function removes an opp definition from the opp list.
  502. *
  503. * Locking: The internal device_opp and opp structures are RCU protected.
  504. * It is assumed that the caller holds required mutex for an RCU updater
  505. * strategy.
  506. */
  507. static void _opp_remove(struct device_opp *dev_opp,
  508. struct dev_pm_opp *opp, bool notify)
  509. {
  510. /*
  511. * Notify the changes in the availability of the operable
  512. * frequency/voltage list.
  513. */
  514. if (notify)
  515. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_REMOVE, opp);
  516. list_del_rcu(&opp->node);
  517. call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  518. _remove_device_opp(dev_opp);
  519. }
  520. /**
  521. * dev_pm_opp_remove() - Remove an OPP from OPP list
  522. * @dev: device for which we do this operation
  523. * @freq: OPP to remove with matching 'freq'
  524. *
  525. * This function removes an opp from the opp list.
  526. *
  527. * Locking: The internal device_opp and opp structures are RCU protected.
  528. * Hence this function internally uses RCU updater strategy with mutex locks
  529. * to keep the integrity of the internal data structures. Callers should ensure
  530. * that this function is *NOT* called under RCU protection or in contexts where
  531. * mutex cannot be locked.
  532. */
  533. void dev_pm_opp_remove(struct device *dev, unsigned long freq)
  534. {
  535. struct dev_pm_opp *opp;
  536. struct device_opp *dev_opp;
  537. bool found = false;
  538. /* Hold our list modification lock here */
  539. mutex_lock(&dev_opp_list_lock);
  540. dev_opp = _find_device_opp(dev);
  541. if (IS_ERR(dev_opp))
  542. goto unlock;
  543. list_for_each_entry(opp, &dev_opp->opp_list, node) {
  544. if (opp->rate == freq) {
  545. found = true;
  546. break;
  547. }
  548. }
  549. if (!found) {
  550. dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
  551. __func__, freq);
  552. goto unlock;
  553. }
  554. _opp_remove(dev_opp, opp, true);
  555. unlock:
  556. mutex_unlock(&dev_opp_list_lock);
  557. }
  558. EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
  559. static struct dev_pm_opp *_allocate_opp(struct device *dev,
  560. struct device_opp **dev_opp)
  561. {
  562. struct dev_pm_opp *opp;
  563. /* allocate new OPP node */
  564. opp = kzalloc(sizeof(*opp), GFP_KERNEL);
  565. if (!opp)
  566. return NULL;
  567. INIT_LIST_HEAD(&opp->node);
  568. *dev_opp = _add_device_opp(dev);
  569. if (!*dev_opp) {
  570. kfree(opp);
  571. return NULL;
  572. }
  573. return opp;
  574. }
  575. static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
  576. struct device_opp *dev_opp)
  577. {
  578. struct dev_pm_opp *opp;
  579. struct list_head *head = &dev_opp->opp_list;
  580. /*
  581. * Insert new OPP in order of increasing frequency and discard if
  582. * already present.
  583. *
  584. * Need to use &dev_opp->opp_list in the condition part of the 'for'
  585. * loop, don't replace it with head otherwise it will become an infinite
  586. * loop.
  587. */
  588. list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
  589. if (new_opp->rate > opp->rate) {
  590. head = &opp->node;
  591. continue;
  592. }
  593. if (new_opp->rate < opp->rate)
  594. break;
  595. /* Duplicate OPPs */
  596. dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
  597. __func__, opp->rate, opp->u_volt, opp->available,
  598. new_opp->rate, new_opp->u_volt, new_opp->available);
  599. return opp->available && new_opp->u_volt == opp->u_volt ?
  600. 0 : -EEXIST;
  601. }
  602. new_opp->dev_opp = dev_opp;
  603. list_add_rcu(&new_opp->node, head);
  604. return 0;
  605. }
  606. /**
  607. * _opp_add_v1() - Allocate a OPP based on v1 bindings.
  608. * @dev: device for which we do this operation
  609. * @freq: Frequency in Hz for this OPP
  610. * @u_volt: Voltage in uVolts for this OPP
  611. * @dynamic: Dynamically added OPPs.
  612. *
  613. * This function adds an opp definition to the opp list and returns status.
  614. * The opp is made available by default and it can be controlled using
  615. * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
  616. *
  617. * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
  618. * and freed by dev_pm_opp_of_remove_table.
  619. *
  620. * Locking: The internal device_opp and opp structures are RCU protected.
  621. * Hence this function internally uses RCU updater strategy with mutex locks
  622. * to keep the integrity of the internal data structures. Callers should ensure
  623. * that this function is *NOT* called under RCU protection or in contexts where
  624. * mutex cannot be locked.
  625. *
  626. * Return:
  627. * 0 On success OR
  628. * Duplicate OPPs (both freq and volt are same) and opp->available
  629. * -EEXIST Freq are same and volt are different OR
  630. * Duplicate OPPs (both freq and volt are same) and !opp->available
  631. * -ENOMEM Memory allocation failure
  632. */
  633. static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
  634. bool dynamic)
  635. {
  636. struct device_opp *dev_opp;
  637. struct dev_pm_opp *new_opp;
  638. int ret;
  639. /* Hold our list modification lock here */
  640. mutex_lock(&dev_opp_list_lock);
  641. new_opp = _allocate_opp(dev, &dev_opp);
  642. if (!new_opp) {
  643. ret = -ENOMEM;
  644. goto unlock;
  645. }
  646. /* populate the opp table */
  647. new_opp->rate = freq;
  648. new_opp->u_volt = u_volt;
  649. new_opp->available = true;
  650. new_opp->dynamic = dynamic;
  651. ret = _opp_add(dev, new_opp, dev_opp);
  652. if (ret)
  653. goto free_opp;
  654. mutex_unlock(&dev_opp_list_lock);
  655. /*
  656. * Notify the changes in the availability of the operable
  657. * frequency/voltage list.
  658. */
  659. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
  660. return 0;
  661. free_opp:
  662. _opp_remove(dev_opp, new_opp, false);
  663. unlock:
  664. mutex_unlock(&dev_opp_list_lock);
  665. return ret;
  666. }
  667. /* TODO: Support multiple regulators */
  668. static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
  669. {
  670. u32 microvolt[3] = {0};
  671. u32 val;
  672. int count, ret;
  673. /* Missing property isn't a problem, but an invalid entry is */
  674. if (!of_find_property(opp->np, "opp-microvolt", NULL))
  675. return 0;
  676. count = of_property_count_u32_elems(opp->np, "opp-microvolt");
  677. if (count < 0) {
  678. dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
  679. __func__, count);
  680. return count;
  681. }
  682. /* There can be one or three elements here */
  683. if (count != 1 && count != 3) {
  684. dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
  685. __func__, count);
  686. return -EINVAL;
  687. }
  688. ret = of_property_read_u32_array(opp->np, "opp-microvolt", microvolt,
  689. count);
  690. if (ret) {
  691. dev_err(dev, "%s: error parsing opp-microvolt: %d\n", __func__,
  692. ret);
  693. return -EINVAL;
  694. }
  695. opp->u_volt = microvolt[0];
  696. if (count == 1) {
  697. opp->u_volt_min = opp->u_volt;
  698. opp->u_volt_max = opp->u_volt;
  699. } else {
  700. opp->u_volt_min = microvolt[1];
  701. opp->u_volt_max = microvolt[2];
  702. }
  703. if (!of_property_read_u32(opp->np, "opp-microamp", &val))
  704. opp->u_amp = val;
  705. return 0;
  706. }
  707. /**
  708. * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
  709. * @dev: device for which we do this operation
  710. * @np: device node
  711. *
  712. * This function adds an opp definition to the opp list and returns status. The
  713. * opp can be controlled using dev_pm_opp_enable/disable functions and may be
  714. * removed by dev_pm_opp_remove.
  715. *
  716. * Locking: The internal device_opp and opp structures are RCU protected.
  717. * Hence this function internally uses RCU updater strategy with mutex locks
  718. * to keep the integrity of the internal data structures. Callers should ensure
  719. * that this function is *NOT* called under RCU protection or in contexts where
  720. * mutex cannot be locked.
  721. *
  722. * Return:
  723. * 0 On success OR
  724. * Duplicate OPPs (both freq and volt are same) and opp->available
  725. * -EEXIST Freq are same and volt are different OR
  726. * Duplicate OPPs (both freq and volt are same) and !opp->available
  727. * -ENOMEM Memory allocation failure
  728. * -EINVAL Failed parsing the OPP node
  729. */
  730. static int _opp_add_static_v2(struct device *dev, struct device_node *np)
  731. {
  732. struct device_opp *dev_opp;
  733. struct dev_pm_opp *new_opp;
  734. u64 rate;
  735. u32 val;
  736. int ret;
  737. /* Hold our list modification lock here */
  738. mutex_lock(&dev_opp_list_lock);
  739. new_opp = _allocate_opp(dev, &dev_opp);
  740. if (!new_opp) {
  741. ret = -ENOMEM;
  742. goto unlock;
  743. }
  744. ret = of_property_read_u64(np, "opp-hz", &rate);
  745. if (ret < 0) {
  746. dev_err(dev, "%s: opp-hz not found\n", __func__);
  747. goto free_opp;
  748. }
  749. /*
  750. * Rate is defined as an unsigned long in clk API, and so casting
  751. * explicitly to its type. Must be fixed once rate is 64 bit
  752. * guaranteed in clk API.
  753. */
  754. new_opp->rate = (unsigned long)rate;
  755. new_opp->turbo = of_property_read_bool(np, "turbo-mode");
  756. new_opp->np = np;
  757. new_opp->dynamic = false;
  758. new_opp->available = true;
  759. if (!of_property_read_u32(np, "clock-latency-ns", &val))
  760. new_opp->clock_latency_ns = val;
  761. ret = opp_parse_supplies(new_opp, dev);
  762. if (ret)
  763. goto free_opp;
  764. ret = _opp_add(dev, new_opp, dev_opp);
  765. if (ret)
  766. goto free_opp;
  767. /* OPP to select on device suspend */
  768. if (of_property_read_bool(np, "opp-suspend")) {
  769. if (dev_opp->suspend_opp)
  770. dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
  771. __func__, dev_opp->suspend_opp->rate,
  772. new_opp->rate);
  773. else
  774. dev_opp->suspend_opp = new_opp;
  775. }
  776. if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max)
  777. dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns;
  778. mutex_unlock(&dev_opp_list_lock);
  779. pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
  780. __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
  781. new_opp->u_volt_min, new_opp->u_volt_max,
  782. new_opp->clock_latency_ns);
  783. /*
  784. * Notify the changes in the availability of the operable
  785. * frequency/voltage list.
  786. */
  787. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp);
  788. return 0;
  789. free_opp:
  790. _opp_remove(dev_opp, new_opp, false);
  791. unlock:
  792. mutex_unlock(&dev_opp_list_lock);
  793. return ret;
  794. }
  795. /**
  796. * dev_pm_opp_add() - Add an OPP table from a table definitions
  797. * @dev: device for which we do this operation
  798. * @freq: Frequency in Hz for this OPP
  799. * @u_volt: Voltage in uVolts for this OPP
  800. *
  801. * This function adds an opp definition to the opp list and returns status.
  802. * The opp is made available by default and it can be controlled using
  803. * dev_pm_opp_enable/disable functions.
  804. *
  805. * Locking: The internal device_opp and opp structures are RCU protected.
  806. * Hence this function internally uses RCU updater strategy with mutex locks
  807. * to keep the integrity of the internal data structures. Callers should ensure
  808. * that this function is *NOT* called under RCU protection or in contexts where
  809. * mutex cannot be locked.
  810. *
  811. * Return:
  812. * 0 On success OR
  813. * Duplicate OPPs (both freq and volt are same) and opp->available
  814. * -EEXIST Freq are same and volt are different OR
  815. * Duplicate OPPs (both freq and volt are same) and !opp->available
  816. * -ENOMEM Memory allocation failure
  817. */
  818. int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
  819. {
  820. return _opp_add_v1(dev, freq, u_volt, true);
  821. }
  822. EXPORT_SYMBOL_GPL(dev_pm_opp_add);
  823. /**
  824. * _opp_set_availability() - helper to set the availability of an opp
  825. * @dev: device for which we do this operation
  826. * @freq: OPP frequency to modify availability
  827. * @availability_req: availability status requested for this opp
  828. *
  829. * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
  830. * share a common logic which is isolated here.
  831. *
  832. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  833. * copy operation, returns 0 if no modification was done OR modification was
  834. * successful.
  835. *
  836. * Locking: The internal device_opp and opp structures are RCU protected.
  837. * Hence this function internally uses RCU updater strategy with mutex locks to
  838. * keep the integrity of the internal data structures. Callers should ensure
  839. * that this function is *NOT* called under RCU protection or in contexts where
  840. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  841. */
  842. static int _opp_set_availability(struct device *dev, unsigned long freq,
  843. bool availability_req)
  844. {
  845. struct device_opp *dev_opp;
  846. struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
  847. int r = 0;
  848. /* keep the node allocated */
  849. new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
  850. if (!new_opp)
  851. return -ENOMEM;
  852. mutex_lock(&dev_opp_list_lock);
  853. /* Find the device_opp */
  854. dev_opp = _find_device_opp(dev);
  855. if (IS_ERR(dev_opp)) {
  856. r = PTR_ERR(dev_opp);
  857. dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
  858. goto unlock;
  859. }
  860. /* Do we have the frequency? */
  861. list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
  862. if (tmp_opp->rate == freq) {
  863. opp = tmp_opp;
  864. break;
  865. }
  866. }
  867. if (IS_ERR(opp)) {
  868. r = PTR_ERR(opp);
  869. goto unlock;
  870. }
  871. /* Is update really needed? */
  872. if (opp->available == availability_req)
  873. goto unlock;
  874. /* copy the old data over */
  875. *new_opp = *opp;
  876. /* plug in new node */
  877. new_opp->available = availability_req;
  878. list_replace_rcu(&opp->node, &new_opp->node);
  879. mutex_unlock(&dev_opp_list_lock);
  880. call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
  881. /* Notify the change of the OPP availability */
  882. if (availability_req)
  883. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ENABLE,
  884. new_opp);
  885. else
  886. srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_DISABLE,
  887. new_opp);
  888. return 0;
  889. unlock:
  890. mutex_unlock(&dev_opp_list_lock);
  891. kfree(new_opp);
  892. return r;
  893. }
  894. /**
  895. * dev_pm_opp_enable() - Enable a specific OPP
  896. * @dev: device for which we do this operation
  897. * @freq: OPP frequency to enable
  898. *
  899. * Enables a provided opp. If the operation is valid, this returns 0, else the
  900. * corresponding error value. It is meant to be used for users an OPP available
  901. * after being temporarily made unavailable with dev_pm_opp_disable.
  902. *
  903. * Locking: The internal device_opp and opp structures are RCU protected.
  904. * Hence this function indirectly uses RCU and mutex locks to keep the
  905. * integrity of the internal data structures. Callers should ensure that
  906. * this function is *NOT* called under RCU protection or in contexts where
  907. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  908. *
  909. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  910. * copy operation, returns 0 if no modification was done OR modification was
  911. * successful.
  912. */
  913. int dev_pm_opp_enable(struct device *dev, unsigned long freq)
  914. {
  915. return _opp_set_availability(dev, freq, true);
  916. }
  917. EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
  918. /**
  919. * dev_pm_opp_disable() - Disable a specific OPP
  920. * @dev: device for which we do this operation
  921. * @freq: OPP frequency to disable
  922. *
  923. * Disables a provided opp. If the operation is valid, this returns
  924. * 0, else the corresponding error value. It is meant to be a temporary
  925. * control by users to make this OPP not available until the circumstances are
  926. * right to make it available again (with a call to dev_pm_opp_enable).
  927. *
  928. * Locking: The internal device_opp and opp structures are RCU protected.
  929. * Hence this function indirectly uses RCU and mutex locks to keep the
  930. * integrity of the internal data structures. Callers should ensure that
  931. * this function is *NOT* called under RCU protection or in contexts where
  932. * mutex locking or synchronize_rcu() blocking calls cannot be used.
  933. *
  934. * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
  935. * copy operation, returns 0 if no modification was done OR modification was
  936. * successful.
  937. */
  938. int dev_pm_opp_disable(struct device *dev, unsigned long freq)
  939. {
  940. return _opp_set_availability(dev, freq, false);
  941. }
  942. EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
  943. /**
  944. * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
  945. * @dev: device pointer used to lookup device OPPs.
  946. *
  947. * Return: pointer to notifier head if found, otherwise -ENODEV or
  948. * -EINVAL based on type of error casted as pointer. value must be checked
  949. * with IS_ERR to determine valid pointer or error result.
  950. *
  951. * Locking: This function must be called under rcu_read_lock(). dev_opp is a RCU
  952. * protected pointer. The reason for the same is that the opp pointer which is
  953. * returned will remain valid for use with opp_get_{voltage, freq} only while
  954. * under the locked area. The pointer returned must be used prior to unlocking
  955. * with rcu_read_unlock() to maintain the integrity of the pointer.
  956. */
  957. struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
  958. {
  959. struct device_opp *dev_opp = _find_device_opp(dev);
  960. if (IS_ERR(dev_opp))
  961. return ERR_CAST(dev_opp); /* matching type */
  962. return &dev_opp->srcu_head;
  963. }
  964. EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
  965. #ifdef CONFIG_OF
  966. /**
  967. * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
  968. * entries
  969. * @dev: device pointer used to lookup device OPPs.
  970. *
  971. * Free OPPs created using static entries present in DT.
  972. *
  973. * Locking: The internal device_opp and opp structures are RCU protected.
  974. * Hence this function indirectly uses RCU updater strategy with mutex locks
  975. * to keep the integrity of the internal data structures. Callers should ensure
  976. * that this function is *NOT* called under RCU protection or in contexts where
  977. * mutex cannot be locked.
  978. */
  979. void dev_pm_opp_of_remove_table(struct device *dev)
  980. {
  981. struct device_opp *dev_opp;
  982. struct dev_pm_opp *opp, *tmp;
  983. /* Hold our list modification lock here */
  984. mutex_lock(&dev_opp_list_lock);
  985. /* Check for existing list for 'dev' */
  986. dev_opp = _find_device_opp(dev);
  987. if (IS_ERR(dev_opp)) {
  988. int error = PTR_ERR(dev_opp);
  989. if (error != -ENODEV)
  990. WARN(1, "%s: dev_opp: %d\n",
  991. IS_ERR_OR_NULL(dev) ?
  992. "Invalid device" : dev_name(dev),
  993. error);
  994. goto unlock;
  995. }
  996. /* Find if dev_opp manages a single device */
  997. if (list_is_singular(&dev_opp->dev_list)) {
  998. /* Free static OPPs */
  999. list_for_each_entry_safe(opp, tmp, &dev_opp->opp_list, node) {
  1000. if (!opp->dynamic)
  1001. _opp_remove(dev_opp, opp, true);
  1002. }
  1003. } else {
  1004. _remove_list_dev(_find_list_dev(dev, dev_opp), dev_opp);
  1005. }
  1006. unlock:
  1007. mutex_unlock(&dev_opp_list_lock);
  1008. }
  1009. EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
  1010. /* Returns opp descriptor node for a device, caller must do of_node_put() */
  1011. struct device_node *_of_get_opp_desc_node(struct device *dev)
  1012. {
  1013. /*
  1014. * TODO: Support for multiple OPP tables.
  1015. *
  1016. * There should be only ONE phandle present in "operating-points-v2"
  1017. * property.
  1018. */
  1019. return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
  1020. }
  1021. /* Initializes OPP tables based on new bindings */
  1022. static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
  1023. {
  1024. struct device_node *np;
  1025. struct device_opp *dev_opp;
  1026. int ret = 0, count = 0;
  1027. mutex_lock(&dev_opp_list_lock);
  1028. dev_opp = _managed_opp(opp_np);
  1029. if (dev_opp) {
  1030. /* OPPs are already managed */
  1031. if (!_add_list_dev(dev, dev_opp))
  1032. ret = -ENOMEM;
  1033. mutex_unlock(&dev_opp_list_lock);
  1034. return ret;
  1035. }
  1036. mutex_unlock(&dev_opp_list_lock);
  1037. /* We have opp-list node now, iterate over it and add OPPs */
  1038. for_each_available_child_of_node(opp_np, np) {
  1039. count++;
  1040. ret = _opp_add_static_v2(dev, np);
  1041. if (ret) {
  1042. dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
  1043. ret);
  1044. of_node_put(np);
  1045. goto free_table;
  1046. }
  1047. }
  1048. /* There should be one of more OPP defined */
  1049. if (WARN_ON(!count))
  1050. return -ENOENT;
  1051. mutex_lock(&dev_opp_list_lock);
  1052. dev_opp = _find_device_opp(dev);
  1053. if (WARN_ON(IS_ERR(dev_opp))) {
  1054. ret = PTR_ERR(dev_opp);
  1055. mutex_unlock(&dev_opp_list_lock);
  1056. goto free_table;
  1057. }
  1058. dev_opp->np = opp_np;
  1059. dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
  1060. mutex_unlock(&dev_opp_list_lock);
  1061. return 0;
  1062. free_table:
  1063. dev_pm_opp_of_remove_table(dev);
  1064. return ret;
  1065. }
  1066. /* Initializes OPP tables based on old-deprecated bindings */
  1067. static int _of_add_opp_table_v1(struct device *dev)
  1068. {
  1069. const struct property *prop;
  1070. const __be32 *val;
  1071. int nr;
  1072. prop = of_find_property(dev->of_node, "operating-points", NULL);
  1073. if (!prop)
  1074. return -ENODEV;
  1075. if (!prop->value)
  1076. return -ENODATA;
  1077. /*
  1078. * Each OPP is a set of tuples consisting of frequency and
  1079. * voltage like <freq-kHz vol-uV>.
  1080. */
  1081. nr = prop->length / sizeof(u32);
  1082. if (nr % 2) {
  1083. dev_err(dev, "%s: Invalid OPP list\n", __func__);
  1084. return -EINVAL;
  1085. }
  1086. val = prop->value;
  1087. while (nr) {
  1088. unsigned long freq = be32_to_cpup(val++) * 1000;
  1089. unsigned long volt = be32_to_cpup(val++);
  1090. if (_opp_add_v1(dev, freq, volt, false))
  1091. dev_warn(dev, "%s: Failed to add OPP %ld\n",
  1092. __func__, freq);
  1093. nr -= 2;
  1094. }
  1095. return 0;
  1096. }
  1097. /**
  1098. * dev_pm_opp_of_add_table() - Initialize opp table from device tree
  1099. * @dev: device pointer used to lookup device OPPs.
  1100. *
  1101. * Register the initial OPP table with the OPP library for given device.
  1102. *
  1103. * Locking: The internal device_opp and opp structures are RCU protected.
  1104. * Hence this function indirectly uses RCU updater strategy with mutex locks
  1105. * to keep the integrity of the internal data structures. Callers should ensure
  1106. * that this function is *NOT* called under RCU protection or in contexts where
  1107. * mutex cannot be locked.
  1108. *
  1109. * Return:
  1110. * 0 On success OR
  1111. * Duplicate OPPs (both freq and volt are same) and opp->available
  1112. * -EEXIST Freq are same and volt are different OR
  1113. * Duplicate OPPs (both freq and volt are same) and !opp->available
  1114. * -ENOMEM Memory allocation failure
  1115. * -ENODEV when 'operating-points' property is not found or is invalid data
  1116. * in device node.
  1117. * -ENODATA when empty 'operating-points' property is found
  1118. * -EINVAL when invalid entries are found in opp-v2 table
  1119. */
  1120. int dev_pm_opp_of_add_table(struct device *dev)
  1121. {
  1122. struct device_node *opp_np;
  1123. int ret;
  1124. /*
  1125. * OPPs have two version of bindings now. The older one is deprecated,
  1126. * try for the new binding first.
  1127. */
  1128. opp_np = _of_get_opp_desc_node(dev);
  1129. if (!opp_np) {
  1130. /*
  1131. * Try old-deprecated bindings for backward compatibility with
  1132. * older dtbs.
  1133. */
  1134. return _of_add_opp_table_v1(dev);
  1135. }
  1136. ret = _of_add_opp_table_v2(dev, opp_np);
  1137. of_node_put(opp_np);
  1138. return ret;
  1139. }
  1140. EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
  1141. #endif