common.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. /*
  2. * Marvell EBU SoC common clock handling
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  8. * Andrew Lunn <andrew@lunn.ch>
  9. *
  10. * This file is licensed under the terms of the GNU General Public
  11. * License version 2. This program is licensed "as is" without any
  12. * warranty of any kind, whether express or implied.
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/slab.h>
  16. #include <linux/clk.h>
  17. #include <linux/clk-provider.h>
  18. #include <linux/io.h>
  19. #include <linux/of.h>
  20. #include <linux/of_address.h>
  21. #include <linux/syscore_ops.h>
  22. #include "common.h"
  23. /*
  24. * Core Clocks
  25. */
  26. #define SSCG_CONF_MODE(reg) (((reg) >> 16) & 0x3)
  27. #define SSCG_SPREAD_DOWN 0x0
  28. #define SSCG_SPREAD_UP 0x1
  29. #define SSCG_SPREAD_CENTRAL 0x2
  30. #define SSCG_CONF_LOW(reg) (((reg) >> 8) & 0xFF)
  31. #define SSCG_CONF_HIGH(reg) ((reg) & 0xFF)
  32. static struct clk_onecell_data clk_data;
  33. /*
  34. * This function can be used by the Kirkwood, the Armada 370, the
  35. * Armada XP and the Armada 375 SoC. The name of the function was
  36. * chosen following the dt convention: using the first known SoC
  37. * compatible with it.
  38. */
  39. u32 kirkwood_fix_sscg_deviation(u32 system_clk)
  40. {
  41. struct device_node *sscg_np = NULL;
  42. void __iomem *sscg_map;
  43. u32 sscg_reg;
  44. s32 low_bound, high_bound;
  45. u64 freq_swing_half;
  46. sscg_np = of_find_node_by_name(NULL, "sscg");
  47. if (sscg_np == NULL) {
  48. pr_err("cannot get SSCG register node\n");
  49. return system_clk;
  50. }
  51. sscg_map = of_iomap(sscg_np, 0);
  52. if (sscg_map == NULL) {
  53. pr_err("cannot map SSCG register\n");
  54. goto out;
  55. }
  56. sscg_reg = readl(sscg_map);
  57. high_bound = SSCG_CONF_HIGH(sscg_reg);
  58. low_bound = SSCG_CONF_LOW(sscg_reg);
  59. if ((high_bound - low_bound) <= 0)
  60. goto out;
  61. /*
  62. * From Marvell engineer we got the following formula (when
  63. * this code was written, the datasheet was erroneous)
  64. * Spread percentage = 1/96 * (H - L) / H
  65. * H = SSCG_High_Boundary
  66. * L = SSCG_Low_Boundary
  67. *
  68. * As the deviation is half of spread then it lead to the
  69. * following formula in the code.
  70. *
  71. * To avoid an overflow and not lose any significant digit in
  72. * the same time we have to use a 64 bit integer.
  73. */
  74. freq_swing_half = (((u64)high_bound - (u64)low_bound)
  75. * (u64)system_clk);
  76. do_div(freq_swing_half, (2 * 96 * high_bound));
  77. switch (SSCG_CONF_MODE(sscg_reg)) {
  78. case SSCG_SPREAD_DOWN:
  79. system_clk -= freq_swing_half;
  80. break;
  81. case SSCG_SPREAD_UP:
  82. system_clk += freq_swing_half;
  83. break;
  84. case SSCG_SPREAD_CENTRAL:
  85. default:
  86. break;
  87. }
  88. iounmap(sscg_map);
  89. out:
  90. of_node_put(sscg_np);
  91. return system_clk;
  92. }
  93. void __init mvebu_coreclk_setup(struct device_node *np,
  94. const struct coreclk_soc_desc *desc)
  95. {
  96. const char *tclk_name = "tclk";
  97. const char *cpuclk_name = "cpuclk";
  98. void __iomem *base;
  99. unsigned long rate;
  100. int n;
  101. base = of_iomap(np, 0);
  102. if (WARN_ON(!base))
  103. return;
  104. /* Allocate struct for TCLK, cpu clk, and core ratio clocks */
  105. clk_data.clk_num = 2 + desc->num_ratios;
  106. /* One more clock for the optional refclk */
  107. if (desc->get_refclk_freq)
  108. clk_data.clk_num += 1;
  109. clk_data.clks = kzalloc(clk_data.clk_num * sizeof(struct clk *),
  110. GFP_KERNEL);
  111. if (WARN_ON(!clk_data.clks)) {
  112. iounmap(base);
  113. return;
  114. }
  115. /* Register TCLK */
  116. of_property_read_string_index(np, "clock-output-names", 0,
  117. &tclk_name);
  118. rate = desc->get_tclk_freq(base);
  119. clk_data.clks[0] = clk_register_fixed_rate(NULL, tclk_name, NULL,
  120. CLK_IS_ROOT, rate);
  121. WARN_ON(IS_ERR(clk_data.clks[0]));
  122. /* Register CPU clock */
  123. of_property_read_string_index(np, "clock-output-names", 1,
  124. &cpuclk_name);
  125. rate = desc->get_cpu_freq(base);
  126. if (desc->is_sscg_enabled && desc->fix_sscg_deviation
  127. && desc->is_sscg_enabled(base))
  128. rate = desc->fix_sscg_deviation(rate);
  129. clk_data.clks[1] = clk_register_fixed_rate(NULL, cpuclk_name, NULL,
  130. CLK_IS_ROOT, rate);
  131. WARN_ON(IS_ERR(clk_data.clks[1]));
  132. /* Register fixed-factor clocks derived from CPU clock */
  133. for (n = 0; n < desc->num_ratios; n++) {
  134. const char *rclk_name = desc->ratios[n].name;
  135. int mult, div;
  136. of_property_read_string_index(np, "clock-output-names",
  137. 2+n, &rclk_name);
  138. desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div);
  139. clk_data.clks[2+n] = clk_register_fixed_factor(NULL, rclk_name,
  140. cpuclk_name, 0, mult, div);
  141. WARN_ON(IS_ERR(clk_data.clks[2+n]));
  142. }
  143. /* Register optional refclk */
  144. if (desc->get_refclk_freq) {
  145. const char *name = "refclk";
  146. of_property_read_string_index(np, "clock-output-names",
  147. 2 + desc->num_ratios, &name);
  148. rate = desc->get_refclk_freq(base);
  149. clk_data.clks[2 + desc->num_ratios] =
  150. clk_register_fixed_rate(NULL, name, NULL,
  151. CLK_IS_ROOT, rate);
  152. WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios]));
  153. }
  154. /* SAR register isn't needed anymore */
  155. iounmap(base);
  156. of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
  157. }
  158. /*
  159. * Clock Gating Control
  160. */
  161. DEFINE_SPINLOCK(ctrl_gating_lock);
  162. struct clk_gating_ctrl {
  163. spinlock_t *lock;
  164. struct clk **gates;
  165. int num_gates;
  166. void __iomem *base;
  167. u32 saved_reg;
  168. };
  169. #define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
  170. static struct clk_gating_ctrl *ctrl;
  171. static struct clk *clk_gating_get_src(
  172. struct of_phandle_args *clkspec, void *data)
  173. {
  174. int n;
  175. if (clkspec->args_count < 1)
  176. return ERR_PTR(-EINVAL);
  177. for (n = 0; n < ctrl->num_gates; n++) {
  178. struct clk_gate *gate =
  179. to_clk_gate(__clk_get_hw(ctrl->gates[n]));
  180. if (clkspec->args[0] == gate->bit_idx)
  181. return ctrl->gates[n];
  182. }
  183. return ERR_PTR(-ENODEV);
  184. }
  185. static int mvebu_clk_gating_suspend(void)
  186. {
  187. ctrl->saved_reg = readl(ctrl->base);
  188. return 0;
  189. }
  190. static void mvebu_clk_gating_resume(void)
  191. {
  192. writel(ctrl->saved_reg, ctrl->base);
  193. }
  194. static struct syscore_ops clk_gate_syscore_ops = {
  195. .suspend = mvebu_clk_gating_suspend,
  196. .resume = mvebu_clk_gating_resume,
  197. };
  198. void __init mvebu_clk_gating_setup(struct device_node *np,
  199. const struct clk_gating_soc_desc *desc)
  200. {
  201. struct clk *clk;
  202. void __iomem *base;
  203. const char *default_parent = NULL;
  204. int n;
  205. if (ctrl) {
  206. pr_err("mvebu-clk-gating: cannot instantiate more than one gatable clock device\n");
  207. return;
  208. }
  209. base = of_iomap(np, 0);
  210. if (WARN_ON(!base))
  211. return;
  212. clk = of_clk_get(np, 0);
  213. if (!IS_ERR(clk)) {
  214. default_parent = __clk_get_name(clk);
  215. clk_put(clk);
  216. }
  217. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  218. if (WARN_ON(!ctrl))
  219. goto ctrl_out;
  220. /* lock must already be initialized */
  221. ctrl->lock = &ctrl_gating_lock;
  222. ctrl->base = base;
  223. /* Count, allocate, and register clock gates */
  224. for (n = 0; desc[n].name;)
  225. n++;
  226. ctrl->num_gates = n;
  227. ctrl->gates = kzalloc(ctrl->num_gates * sizeof(struct clk *),
  228. GFP_KERNEL);
  229. if (WARN_ON(!ctrl->gates))
  230. goto gates_out;
  231. for (n = 0; n < ctrl->num_gates; n++) {
  232. const char *parent =
  233. (desc[n].parent) ? desc[n].parent : default_parent;
  234. ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent,
  235. desc[n].flags, base, desc[n].bit_idx,
  236. 0, ctrl->lock);
  237. WARN_ON(IS_ERR(ctrl->gates[n]));
  238. }
  239. of_clk_add_provider(np, clk_gating_get_src, ctrl);
  240. register_syscore_ops(&clk_gate_syscore_ops);
  241. return;
  242. gates_out:
  243. kfree(ctrl);
  244. ctrl_out:
  245. iounmap(base);
  246. }