clk-generated.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /*
  2. * Copyright (C) 2015 Atmel Corporation,
  3. * Nicolas Ferre <nicolas.ferre@atmel.com>
  4. *
  5. * Based on clk-programmable & clk-peripheral drivers by Boris BREZILLON.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. */
  13. #include <linux/clk-provider.h>
  14. #include <linux/clkdev.h>
  15. #include <linux/clk/at91_pmc.h>
  16. #include <linux/of.h>
  17. #include <linux/of_address.h>
  18. #include <linux/io.h>
  19. #include "pmc.h"
  20. #define PERIPHERAL_MAX 64
  21. #define PERIPHERAL_ID_MIN 2
  22. #define GENERATED_SOURCE_MAX 6
  23. #define GENERATED_MAX_DIV 255
  24. struct clk_generated {
  25. struct clk_hw hw;
  26. struct at91_pmc *pmc;
  27. struct clk_range range;
  28. u32 id;
  29. u32 gckdiv;
  30. u8 parent_id;
  31. };
  32. #define to_clk_generated(hw) \
  33. container_of(hw, struct clk_generated, hw)
  34. static int clk_generated_enable(struct clk_hw *hw)
  35. {
  36. struct clk_generated *gck = to_clk_generated(hw);
  37. struct at91_pmc *pmc = gck->pmc;
  38. u32 tmp;
  39. pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
  40. __func__, gck->gckdiv, gck->parent_id);
  41. pmc_lock(pmc);
  42. pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
  43. tmp = pmc_read(pmc, AT91_PMC_PCR) &
  44. ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK);
  45. pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id)
  46. | AT91_PMC_PCR_CMD
  47. | AT91_PMC_PCR_GCKDIV(gck->gckdiv)
  48. | AT91_PMC_PCR_GCKEN);
  49. pmc_unlock(pmc);
  50. return 0;
  51. }
  52. static void clk_generated_disable(struct clk_hw *hw)
  53. {
  54. struct clk_generated *gck = to_clk_generated(hw);
  55. struct at91_pmc *pmc = gck->pmc;
  56. u32 tmp;
  57. pmc_lock(pmc);
  58. pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
  59. tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN;
  60. pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD);
  61. pmc_unlock(pmc);
  62. }
  63. static int clk_generated_is_enabled(struct clk_hw *hw)
  64. {
  65. struct clk_generated *gck = to_clk_generated(hw);
  66. struct at91_pmc *pmc = gck->pmc;
  67. int ret;
  68. pmc_lock(pmc);
  69. pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
  70. ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN);
  71. pmc_unlock(pmc);
  72. return ret;
  73. }
  74. static unsigned long
  75. clk_generated_recalc_rate(struct clk_hw *hw,
  76. unsigned long parent_rate)
  77. {
  78. struct clk_generated *gck = to_clk_generated(hw);
  79. return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
  80. }
  81. static int clk_generated_determine_rate(struct clk_hw *hw,
  82. struct clk_rate_request *req)
  83. {
  84. struct clk_generated *gck = to_clk_generated(hw);
  85. struct clk_hw *parent = NULL;
  86. long best_rate = -EINVAL;
  87. unsigned long tmp_rate, min_rate;
  88. int best_diff = -1;
  89. int tmp_diff;
  90. int i;
  91. for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
  92. u32 div;
  93. unsigned long parent_rate;
  94. parent = clk_hw_get_parent_by_index(hw, i);
  95. if (!parent)
  96. continue;
  97. parent_rate = clk_hw_get_rate(parent);
  98. min_rate = DIV_ROUND_CLOSEST(parent_rate, GENERATED_MAX_DIV + 1);
  99. if (!parent_rate ||
  100. (gck->range.max && min_rate > gck->range.max))
  101. continue;
  102. for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
  103. tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div);
  104. tmp_diff = abs(req->rate - tmp_rate);
  105. if (best_diff < 0 || best_diff > tmp_diff) {
  106. best_rate = tmp_rate;
  107. best_diff = tmp_diff;
  108. req->best_parent_rate = parent_rate;
  109. req->best_parent_hw = parent;
  110. }
  111. if (!best_diff || tmp_rate < req->rate)
  112. break;
  113. }
  114. if (!best_diff)
  115. break;
  116. }
  117. pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
  118. __func__, best_rate,
  119. __clk_get_name((req->best_parent_hw)->clk),
  120. req->best_parent_rate);
  121. if (best_rate < 0)
  122. return best_rate;
  123. req->rate = best_rate;
  124. return 0;
  125. }
  126. /* No modification of hardware as we have the flag CLK_SET_PARENT_GATE set */
  127. static int clk_generated_set_parent(struct clk_hw *hw, u8 index)
  128. {
  129. struct clk_generated *gck = to_clk_generated(hw);
  130. if (index >= clk_hw_get_num_parents(hw))
  131. return -EINVAL;
  132. gck->parent_id = index;
  133. return 0;
  134. }
  135. static u8 clk_generated_get_parent(struct clk_hw *hw)
  136. {
  137. struct clk_generated *gck = to_clk_generated(hw);
  138. return gck->parent_id;
  139. }
  140. /* No modification of hardware as we have the flag CLK_SET_RATE_GATE set */
  141. static int clk_generated_set_rate(struct clk_hw *hw,
  142. unsigned long rate,
  143. unsigned long parent_rate)
  144. {
  145. struct clk_generated *gck = to_clk_generated(hw);
  146. u32 div;
  147. if (!rate)
  148. return -EINVAL;
  149. if (gck->range.max && rate > gck->range.max)
  150. return -EINVAL;
  151. div = DIV_ROUND_CLOSEST(parent_rate, rate);
  152. if (div > GENERATED_MAX_DIV + 1 || !div)
  153. return -EINVAL;
  154. gck->gckdiv = div - 1;
  155. return 0;
  156. }
  157. static const struct clk_ops generated_ops = {
  158. .enable = clk_generated_enable,
  159. .disable = clk_generated_disable,
  160. .is_enabled = clk_generated_is_enabled,
  161. .recalc_rate = clk_generated_recalc_rate,
  162. .determine_rate = clk_generated_determine_rate,
  163. .get_parent = clk_generated_get_parent,
  164. .set_parent = clk_generated_set_parent,
  165. .set_rate = clk_generated_set_rate,
  166. };
  167. /**
  168. * clk_generated_startup - Initialize a given clock to its default parent and
  169. * divisor parameter.
  170. *
  171. * @gck: Generated clock to set the startup parameters for.
  172. *
  173. * Take parameters from the hardware and update local clock configuration
  174. * accordingly.
  175. */
  176. static void clk_generated_startup(struct clk_generated *gck)
  177. {
  178. struct at91_pmc *pmc = gck->pmc;
  179. u32 tmp;
  180. pmc_lock(pmc);
  181. pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK));
  182. tmp = pmc_read(pmc, AT91_PMC_PCR);
  183. pmc_unlock(pmc);
  184. gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK)
  185. >> AT91_PMC_PCR_GCKCSS_OFFSET;
  186. gck->gckdiv = (tmp & AT91_PMC_PCR_GCKDIV_MASK)
  187. >> AT91_PMC_PCR_GCKDIV_OFFSET;
  188. }
  189. static struct clk * __init
  190. at91_clk_register_generated(struct at91_pmc *pmc, const char *name,
  191. const char **parent_names, u8 num_parents,
  192. u8 id, const struct clk_range *range)
  193. {
  194. struct clk_generated *gck;
  195. struct clk *clk = NULL;
  196. struct clk_init_data init;
  197. gck = kzalloc(sizeof(*gck), GFP_KERNEL);
  198. if (!gck)
  199. return ERR_PTR(-ENOMEM);
  200. init.name = name;
  201. init.ops = &generated_ops;
  202. init.parent_names = parent_names;
  203. init.num_parents = num_parents;
  204. init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
  205. gck->id = id;
  206. gck->hw.init = &init;
  207. gck->pmc = pmc;
  208. gck->range = *range;
  209. clk = clk_register(NULL, &gck->hw);
  210. if (IS_ERR(clk))
  211. kfree(gck);
  212. else
  213. clk_generated_startup(gck);
  214. return clk;
  215. }
  216. void __init of_sama5d2_clk_generated_setup(struct device_node *np,
  217. struct at91_pmc *pmc)
  218. {
  219. int num;
  220. u32 id;
  221. const char *name;
  222. struct clk *clk;
  223. int num_parents;
  224. const char *parent_names[GENERATED_SOURCE_MAX];
  225. struct device_node *gcknp;
  226. struct clk_range range = CLK_RANGE(0, 0);
  227. num_parents = of_clk_get_parent_count(np);
  228. if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX)
  229. return;
  230. of_clk_parent_fill(np, parent_names, num_parents);
  231. num = of_get_child_count(np);
  232. if (!num || num > PERIPHERAL_MAX)
  233. return;
  234. for_each_child_of_node(np, gcknp) {
  235. if (of_property_read_u32(gcknp, "reg", &id))
  236. continue;
  237. if (id < PERIPHERAL_ID_MIN || id >= PERIPHERAL_MAX)
  238. continue;
  239. if (of_property_read_string(np, "clock-output-names", &name))
  240. name = gcknp->name;
  241. of_at91_get_clk_range(gcknp, "atmel,clk-output-range",
  242. &range);
  243. clk = at91_clk_register_generated(pmc, name, parent_names,
  244. num_parents, id, &range);
  245. if (IS_ERR(clk))
  246. continue;
  247. of_clk_add_provider(gcknp, of_clk_src_simple_get, clk);
  248. }
  249. }