clk-cpu.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Marvell MVEBU CPU clock handling.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/clk.h>
  15. #include <linux/clk-provider.h>
  16. #include <linux/of_address.h>
  17. #include <linux/io.h>
  18. #include <linux/of.h>
  19. #include <linux/delay.h>
  20. #include <linux/mvebu-pmsu.h>
  21. #include <asm/smp_plat.h>
  22. #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
  23. #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL 0xff
  24. #define SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT 8
  25. #define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET 0x8
  26. #define SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
  27. #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
  28. #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
  29. #define PMU_DFS_RATIO_SHIFT 16
  30. #define PMU_DFS_RATIO_MASK 0x3F
  31. #define MAX_CPU 4
  32. struct cpu_clk {
  33. struct clk_hw hw;
  34. int cpu;
  35. const char *clk_name;
  36. const char *parent_name;
  37. void __iomem *reg_base;
  38. void __iomem *pmu_dfs;
  39. };
  40. static struct clk **clks;
  41. static struct clk_onecell_data clk_data;
  42. #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  43. static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  44. unsigned long parent_rate)
  45. {
  46. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  47. u32 reg, div;
  48. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  49. div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  50. return parent_rate / div;
  51. }
  52. static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  53. unsigned long *parent_rate)
  54. {
  55. /* Valid ratio are 1:1, 1:2 and 1:3 */
  56. u32 div;
  57. div = *parent_rate / rate;
  58. if (div == 0)
  59. div = 1;
  60. else if (div > 3)
  61. div = 3;
  62. return *parent_rate / div;
  63. }
  64. static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
  65. unsigned long parent_rate)
  66. {
  67. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  68. u32 reg, div;
  69. u32 reload_mask;
  70. div = parent_rate / rate;
  71. reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  72. & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  73. | (div << (cpuclk->cpu * 8));
  74. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  75. /* Set clock divider reload smooth bit mask */
  76. reload_mask = 1 << (20 + cpuclk->cpu);
  77. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  78. | reload_mask;
  79. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  80. /* Now trigger the clock update */
  81. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  82. | 1 << 24;
  83. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  84. /* Wait for clocks to settle down then clear reload request */
  85. udelay(1000);
  86. reg &= ~(reload_mask | 1 << 24);
  87. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  88. udelay(1000);
  89. return 0;
  90. }
  91. static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
  92. unsigned long parent_rate)
  93. {
  94. u32 reg;
  95. unsigned long fabric_div, target_div, cur_rate;
  96. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  97. /*
  98. * PMU DFS registers are not mapped, Device Tree does not
  99. * describes them. We cannot change the frequency dynamically.
  100. */
  101. if (!cpuclk->pmu_dfs)
  102. return -ENODEV;
  103. cur_rate = clk_hw_get_rate(hwclk);
  104. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
  105. fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
  106. SYS_CTRL_CLK_DIVIDER_MASK;
  107. /* Frequency is going up */
  108. if (rate == 2 * cur_rate)
  109. target_div = fabric_div / 2;
  110. /* Frequency is going down */
  111. else
  112. target_div = fabric_div;
  113. if (target_div == 0)
  114. target_div = 1;
  115. reg = readl(cpuclk->pmu_dfs);
  116. reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
  117. reg |= (target_div << PMU_DFS_RATIO_SHIFT);
  118. writel(reg, cpuclk->pmu_dfs);
  119. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  120. reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
  121. SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
  122. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  123. return mvebu_pmsu_dfs_request(cpuclk->cpu);
  124. }
  125. static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
  126. unsigned long parent_rate)
  127. {
  128. if (__clk_is_enabled(hwclk->clk))
  129. return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
  130. else
  131. return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
  132. }
  133. static const struct clk_ops cpu_ops = {
  134. .recalc_rate = clk_cpu_recalc_rate,
  135. .round_rate = clk_cpu_round_rate,
  136. .set_rate = clk_cpu_set_rate,
  137. };
  138. static void __init of_cpu_clk_setup(struct device_node *node)
  139. {
  140. struct cpu_clk *cpuclk;
  141. void __iomem *clock_complex_base = of_iomap(node, 0);
  142. void __iomem *pmu_dfs_base = of_iomap(node, 1);
  143. int ncpus = 0;
  144. struct device_node *dn;
  145. if (clock_complex_base == NULL) {
  146. pr_err("%s: clock-complex base register not set\n",
  147. __func__);
  148. return;
  149. }
  150. if (pmu_dfs_base == NULL)
  151. pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
  152. __func__);
  153. for_each_node_by_type(dn, "cpu")
  154. ncpus++;
  155. cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
  156. if (WARN_ON(!cpuclk))
  157. goto cpuclk_out;
  158. clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
  159. if (WARN_ON(!clks))
  160. goto clks_out;
  161. for_each_node_by_type(dn, "cpu") {
  162. struct clk_init_data init;
  163. struct clk *clk;
  164. char *clk_name = kzalloc(5, GFP_KERNEL);
  165. int cpu, err;
  166. if (WARN_ON(!clk_name))
  167. goto bail_out;
  168. err = of_property_read_u32(dn, "reg", &cpu);
  169. if (WARN_ON(err))
  170. goto bail_out;
  171. sprintf(clk_name, "cpu%d", cpu);
  172. cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
  173. cpuclk[cpu].clk_name = clk_name;
  174. cpuclk[cpu].cpu = cpu;
  175. cpuclk[cpu].reg_base = clock_complex_base;
  176. if (pmu_dfs_base)
  177. cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
  178. cpuclk[cpu].hw.init = &init;
  179. init.name = cpuclk[cpu].clk_name;
  180. init.ops = &cpu_ops;
  181. init.flags = 0;
  182. init.parent_names = &cpuclk[cpu].parent_name;
  183. init.num_parents = 1;
  184. clk = clk_register(NULL, &cpuclk[cpu].hw);
  185. if (WARN_ON(IS_ERR(clk)))
  186. goto bail_out;
  187. clks[cpu] = clk;
  188. }
  189. clk_data.clk_num = MAX_CPU;
  190. clk_data.clks = clks;
  191. of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
  192. return;
  193. bail_out:
  194. kfree(clks);
  195. while(ncpus--)
  196. kfree(cpuclk[ncpus].clk_name);
  197. clks_out:
  198. kfree(cpuclk);
  199. cpuclk_out:
  200. iounmap(clock_complex_base);
  201. }
  202. CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
  203. of_cpu_clk_setup);