cpg.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /*
  2. * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3. *
  4. * Copyright (C) 2010 Magnus Damm
  5. * Copyright (C) 2010 - 2012 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/compiler.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/sh_clk.h>
  16. #define CPG_CKSTP_BIT BIT(8)
  17. static unsigned int sh_clk_read(struct clk *clk)
  18. {
  19. if (clk->flags & CLK_ENABLE_REG_8BIT)
  20. return ioread8(clk->mapped_reg);
  21. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  22. return ioread16(clk->mapped_reg);
  23. return ioread32(clk->mapped_reg);
  24. }
  25. static void sh_clk_write(int value, struct clk *clk)
  26. {
  27. if (clk->flags & CLK_ENABLE_REG_8BIT)
  28. iowrite8(value, clk->mapped_reg);
  29. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  30. iowrite16(value, clk->mapped_reg);
  31. else
  32. iowrite32(value, clk->mapped_reg);
  33. }
  34. static unsigned int r8(const void __iomem *addr)
  35. {
  36. return ioread8(addr);
  37. }
  38. static unsigned int r16(const void __iomem *addr)
  39. {
  40. return ioread16(addr);
  41. }
  42. static unsigned int r32(const void __iomem *addr)
  43. {
  44. return ioread32(addr);
  45. }
  46. static int sh_clk_mstp_enable(struct clk *clk)
  47. {
  48. sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
  49. if (clk->status_reg) {
  50. unsigned int (*read)(const void __iomem *addr);
  51. int i;
  52. void __iomem *mapped_status = (phys_addr_t)clk->status_reg -
  53. (phys_addr_t)clk->enable_reg + clk->mapped_reg;
  54. if (clk->flags & CLK_ENABLE_REG_8BIT)
  55. read = r8;
  56. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  57. read = r16;
  58. else
  59. read = r32;
  60. for (i = 1000;
  61. (read(mapped_status) & (1 << clk->enable_bit)) && i;
  62. i--)
  63. cpu_relax();
  64. if (!i) {
  65. pr_err("cpg: failed to enable %p[%d]\n",
  66. clk->enable_reg, clk->enable_bit);
  67. return -ETIMEDOUT;
  68. }
  69. }
  70. return 0;
  71. }
  72. static void sh_clk_mstp_disable(struct clk *clk)
  73. {
  74. sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
  75. }
  76. static struct sh_clk_ops sh_clk_mstp_clk_ops = {
  77. .enable = sh_clk_mstp_enable,
  78. .disable = sh_clk_mstp_disable,
  79. .recalc = followparent_recalc,
  80. };
  81. int __init sh_clk_mstp_register(struct clk *clks, int nr)
  82. {
  83. struct clk *clkp;
  84. int ret = 0;
  85. int k;
  86. for (k = 0; !ret && (k < nr); k++) {
  87. clkp = clks + k;
  88. clkp->ops = &sh_clk_mstp_clk_ops;
  89. ret |= clk_register(clkp);
  90. }
  91. return ret;
  92. }
  93. /*
  94. * Div/mult table lookup helpers
  95. */
  96. static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
  97. {
  98. return clk->priv;
  99. }
  100. static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
  101. {
  102. return clk_to_div_table(clk)->div_mult_table;
  103. }
  104. /*
  105. * Common div ops
  106. */
  107. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  108. {
  109. return clk_rate_table_round(clk, clk->freq_table, rate);
  110. }
  111. static unsigned long sh_clk_div_recalc(struct clk *clk)
  112. {
  113. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  114. unsigned int idx;
  115. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  116. table, clk->arch_flags ? &clk->arch_flags : NULL);
  117. idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
  118. return clk->freq_table[idx].frequency;
  119. }
  120. static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
  121. {
  122. struct clk_div_table *dt = clk_to_div_table(clk);
  123. unsigned long value;
  124. int idx;
  125. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  126. if (idx < 0)
  127. return idx;
  128. value = sh_clk_read(clk);
  129. value &= ~(clk->div_mask << clk->enable_bit);
  130. value |= (idx << clk->enable_bit);
  131. sh_clk_write(value, clk);
  132. /* XXX: Should use a post-change notifier */
  133. if (dt->kick)
  134. dt->kick(clk);
  135. return 0;
  136. }
  137. static int sh_clk_div_enable(struct clk *clk)
  138. {
  139. if (clk->div_mask == SH_CLK_DIV6_MSK) {
  140. int ret = sh_clk_div_set_rate(clk, clk->rate);
  141. if (ret < 0)
  142. return ret;
  143. }
  144. sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
  145. return 0;
  146. }
  147. static void sh_clk_div_disable(struct clk *clk)
  148. {
  149. unsigned int val;
  150. val = sh_clk_read(clk);
  151. val |= CPG_CKSTP_BIT;
  152. /*
  153. * div6 clocks require the divisor field to be non-zero or the
  154. * above CKSTP toggle silently fails. Ensure that the divisor
  155. * array is reset to its initial state on disable.
  156. */
  157. if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
  158. val |= clk->div_mask;
  159. sh_clk_write(val, clk);
  160. }
  161. static struct sh_clk_ops sh_clk_div_clk_ops = {
  162. .recalc = sh_clk_div_recalc,
  163. .set_rate = sh_clk_div_set_rate,
  164. .round_rate = sh_clk_div_round_rate,
  165. };
  166. static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
  167. .recalc = sh_clk_div_recalc,
  168. .set_rate = sh_clk_div_set_rate,
  169. .round_rate = sh_clk_div_round_rate,
  170. .enable = sh_clk_div_enable,
  171. .disable = sh_clk_div_disable,
  172. };
  173. static int __init sh_clk_init_parent(struct clk *clk)
  174. {
  175. u32 val;
  176. if (clk->parent)
  177. return 0;
  178. if (!clk->parent_table || !clk->parent_num)
  179. return 0;
  180. if (!clk->src_width) {
  181. pr_err("sh_clk_init_parent: cannot select parent clock\n");
  182. return -EINVAL;
  183. }
  184. val = (sh_clk_read(clk) >> clk->src_shift);
  185. val &= (1 << clk->src_width) - 1;
  186. if (val >= clk->parent_num) {
  187. pr_err("sh_clk_init_parent: parent table size failed\n");
  188. return -EINVAL;
  189. }
  190. clk_reparent(clk, clk->parent_table[val]);
  191. if (!clk->parent) {
  192. pr_err("sh_clk_init_parent: unable to set parent");
  193. return -EINVAL;
  194. }
  195. return 0;
  196. }
  197. static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
  198. struct clk_div_table *table, struct sh_clk_ops *ops)
  199. {
  200. struct clk *clkp;
  201. void *freq_table;
  202. int nr_divs = table->div_mult_table->nr_divisors;
  203. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  204. int ret = 0;
  205. int k;
  206. freq_table_size *= (nr_divs + 1);
  207. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  208. if (!freq_table) {
  209. pr_err("%s: unable to alloc memory\n", __func__);
  210. return -ENOMEM;
  211. }
  212. for (k = 0; !ret && (k < nr); k++) {
  213. clkp = clks + k;
  214. clkp->ops = ops;
  215. clkp->priv = table;
  216. clkp->freq_table = freq_table + (k * freq_table_size);
  217. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  218. ret = clk_register(clkp);
  219. if (ret == 0)
  220. ret = sh_clk_init_parent(clkp);
  221. }
  222. return ret;
  223. }
  224. /*
  225. * div6 support
  226. */
  227. static int sh_clk_div6_divisors[64] = {
  228. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  229. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  230. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  231. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  232. };
  233. static struct clk_div_mult_table div6_div_mult_table = {
  234. .divisors = sh_clk_div6_divisors,
  235. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  236. };
  237. static struct clk_div_table sh_clk_div6_table = {
  238. .div_mult_table = &div6_div_mult_table,
  239. };
  240. static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
  241. {
  242. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  243. u32 value;
  244. int ret, i;
  245. if (!clk->parent_table || !clk->parent_num)
  246. return -EINVAL;
  247. /* Search the parent */
  248. for (i = 0; i < clk->parent_num; i++)
  249. if (clk->parent_table[i] == parent)
  250. break;
  251. if (i == clk->parent_num)
  252. return -ENODEV;
  253. ret = clk_reparent(clk, parent);
  254. if (ret < 0)
  255. return ret;
  256. value = sh_clk_read(clk) &
  257. ~(((1 << clk->src_width) - 1) << clk->src_shift);
  258. sh_clk_write(value | (i << clk->src_shift), clk);
  259. /* Rebuild the frequency table */
  260. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  261. table, NULL);
  262. return 0;
  263. }
  264. static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
  265. .recalc = sh_clk_div_recalc,
  266. .round_rate = sh_clk_div_round_rate,
  267. .set_rate = sh_clk_div_set_rate,
  268. .enable = sh_clk_div_enable,
  269. .disable = sh_clk_div_disable,
  270. .set_parent = sh_clk_div6_set_parent,
  271. };
  272. int __init sh_clk_div6_register(struct clk *clks, int nr)
  273. {
  274. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  275. &sh_clk_div_enable_clk_ops);
  276. }
  277. int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
  278. {
  279. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  280. &sh_clk_div6_reparent_clk_ops);
  281. }
  282. /*
  283. * div4 support
  284. */
  285. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  286. {
  287. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  288. u32 value;
  289. int ret;
  290. /* we really need a better way to determine parent index, but for
  291. * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
  292. * no CLK_ENABLE_ON_INIT means external clock...
  293. */
  294. if (parent->flags & CLK_ENABLE_ON_INIT)
  295. value = sh_clk_read(clk) & ~(1 << 7);
  296. else
  297. value = sh_clk_read(clk) | (1 << 7);
  298. ret = clk_reparent(clk, parent);
  299. if (ret < 0)
  300. return ret;
  301. sh_clk_write(value, clk);
  302. /* Rebiuld the frequency table */
  303. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  304. table, &clk->arch_flags);
  305. return 0;
  306. }
  307. static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
  308. .recalc = sh_clk_div_recalc,
  309. .set_rate = sh_clk_div_set_rate,
  310. .round_rate = sh_clk_div_round_rate,
  311. .enable = sh_clk_div_enable,
  312. .disable = sh_clk_div_disable,
  313. .set_parent = sh_clk_div4_set_parent,
  314. };
  315. int __init sh_clk_div4_register(struct clk *clks, int nr,
  316. struct clk_div4_table *table)
  317. {
  318. return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
  319. }
  320. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  321. struct clk_div4_table *table)
  322. {
  323. return sh_clk_div_register_ops(clks, nr, table,
  324. &sh_clk_div_enable_clk_ops);
  325. }
  326. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  327. struct clk_div4_table *table)
  328. {
  329. return sh_clk_div_register_ops(clks, nr, table,
  330. &sh_clk_div4_reparent_clk_ops);
  331. }
  332. /* FSI-DIV */
  333. static unsigned long fsidiv_recalc(struct clk *clk)
  334. {
  335. u32 value;
  336. value = __raw_readl(clk->mapping->base);
  337. value >>= 16;
  338. if (value < 2)
  339. return clk->parent->rate;
  340. return clk->parent->rate / value;
  341. }
  342. static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
  343. {
  344. return clk_rate_div_range_round(clk, 1, 0xffff, rate);
  345. }
  346. static void fsidiv_disable(struct clk *clk)
  347. {
  348. __raw_writel(0, clk->mapping->base);
  349. }
  350. static int fsidiv_enable(struct clk *clk)
  351. {
  352. u32 value;
  353. value = __raw_readl(clk->mapping->base) >> 16;
  354. if (value < 2)
  355. return 0;
  356. __raw_writel((value << 16) | 0x3, clk->mapping->base);
  357. return 0;
  358. }
  359. static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
  360. {
  361. int idx;
  362. idx = (clk->parent->rate / rate) & 0xffff;
  363. if (idx < 2)
  364. __raw_writel(0, clk->mapping->base);
  365. else
  366. __raw_writel(idx << 16, clk->mapping->base);
  367. return 0;
  368. }
  369. static struct sh_clk_ops fsidiv_clk_ops = {
  370. .recalc = fsidiv_recalc,
  371. .round_rate = fsidiv_round_rate,
  372. .set_rate = fsidiv_set_rate,
  373. .enable = fsidiv_enable,
  374. .disable = fsidiv_disable,
  375. };
  376. int __init sh_clk_fsidiv_register(struct clk *clks, int nr)
  377. {
  378. struct clk_mapping *map;
  379. int i;
  380. for (i = 0; i < nr; i++) {
  381. map = kzalloc(sizeof(struct clk_mapping), GFP_KERNEL);
  382. if (!map) {
  383. pr_err("%s: unable to alloc memory\n", __func__);
  384. return -ENOMEM;
  385. }
  386. /* clks[i].enable_reg came from SH_CLK_FSIDIV() */
  387. map->phys = (phys_addr_t)clks[i].enable_reg;
  388. map->len = 8;
  389. clks[i].enable_reg = 0; /* remove .enable_reg */
  390. clks[i].ops = &fsidiv_clk_ops;
  391. clks[i].mapping = map;
  392. clk_register(&clks[i]);
  393. }
  394. return 0;
  395. }