clk-busy.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189
  1. /*
  2. * Copyright 2012 Freescale Semiconductor, Inc.
  3. * Copyright 2012 Linaro Ltd.
  4. *
  5. * The code contained herein is licensed under the GNU General Public
  6. * License. You may obtain a copy of the GNU General Public License
  7. * Version 2 or later at the following locations:
  8. *
  9. * http://www.opensource.org/licenses/gpl-license.html
  10. * http://www.gnu.org/copyleft/gpl.html
  11. */
  12. #include <linux/clk.h>
  13. #include <linux/clk-provider.h>
  14. #include <linux/io.h>
  15. #include <linux/slab.h>
  16. #include <linux/jiffies.h>
  17. #include <linux/err.h>
  18. #include "clk.h"
  19. static int clk_busy_wait(void __iomem *reg, u8 shift)
  20. {
  21. unsigned long timeout = jiffies + msecs_to_jiffies(10);
  22. while (readl_relaxed(reg) & (1 << shift))
  23. if (time_after(jiffies, timeout))
  24. return -ETIMEDOUT;
  25. return 0;
  26. }
  27. struct clk_busy_divider {
  28. struct clk_divider div;
  29. const struct clk_ops *div_ops;
  30. void __iomem *reg;
  31. u8 shift;
  32. };
  33. static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
  34. {
  35. struct clk_divider *div = container_of(hw, struct clk_divider, hw);
  36. return container_of(div, struct clk_busy_divider, div);
  37. }
  38. static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
  39. unsigned long parent_rate)
  40. {
  41. struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  42. return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
  43. }
  44. static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
  45. unsigned long *prate)
  46. {
  47. struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  48. return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
  49. }
  50. static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
  51. unsigned long parent_rate)
  52. {
  53. struct clk_busy_divider *busy = to_clk_busy_divider(hw);
  54. int ret;
  55. ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
  56. if (!ret)
  57. ret = clk_busy_wait(busy->reg, busy->shift);
  58. return ret;
  59. }
  60. static struct clk_ops clk_busy_divider_ops = {
  61. .recalc_rate = clk_busy_divider_recalc_rate,
  62. .round_rate = clk_busy_divider_round_rate,
  63. .set_rate = clk_busy_divider_set_rate,
  64. };
  65. struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
  66. void __iomem *reg, u8 shift, u8 width,
  67. void __iomem *busy_reg, u8 busy_shift)
  68. {
  69. struct clk_busy_divider *busy;
  70. struct clk *clk;
  71. struct clk_init_data init;
  72. busy = kzalloc(sizeof(*busy), GFP_KERNEL);
  73. if (!busy)
  74. return ERR_PTR(-ENOMEM);
  75. busy->reg = busy_reg;
  76. busy->shift = busy_shift;
  77. busy->div.reg = reg;
  78. busy->div.shift = shift;
  79. busy->div.width = width;
  80. busy->div.lock = &imx_ccm_lock;
  81. busy->div_ops = &clk_divider_ops;
  82. init.name = name;
  83. init.ops = &clk_busy_divider_ops;
  84. init.flags = CLK_SET_RATE_PARENT;
  85. init.parent_names = &parent_name;
  86. init.num_parents = 1;
  87. busy->div.hw.init = &init;
  88. clk = clk_register(NULL, &busy->div.hw);
  89. if (IS_ERR(clk))
  90. kfree(busy);
  91. return clk;
  92. }
  93. struct clk_busy_mux {
  94. struct clk_mux mux;
  95. const struct clk_ops *mux_ops;
  96. void __iomem *reg;
  97. u8 shift;
  98. };
  99. static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
  100. {
  101. struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
  102. return container_of(mux, struct clk_busy_mux, mux);
  103. }
  104. static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
  105. {
  106. struct clk_busy_mux *busy = to_clk_busy_mux(hw);
  107. return busy->mux_ops->get_parent(&busy->mux.hw);
  108. }
  109. static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
  110. {
  111. struct clk_busy_mux *busy = to_clk_busy_mux(hw);
  112. int ret;
  113. ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
  114. if (!ret)
  115. ret = clk_busy_wait(busy->reg, busy->shift);
  116. return ret;
  117. }
  118. static struct clk_ops clk_busy_mux_ops = {
  119. .get_parent = clk_busy_mux_get_parent,
  120. .set_parent = clk_busy_mux_set_parent,
  121. };
  122. struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
  123. u8 width, void __iomem *busy_reg, u8 busy_shift,
  124. const char **parent_names, int num_parents)
  125. {
  126. struct clk_busy_mux *busy;
  127. struct clk *clk;
  128. struct clk_init_data init;
  129. busy = kzalloc(sizeof(*busy), GFP_KERNEL);
  130. if (!busy)
  131. return ERR_PTR(-ENOMEM);
  132. busy->reg = busy_reg;
  133. busy->shift = busy_shift;
  134. busy->mux.reg = reg;
  135. busy->mux.shift = shift;
  136. busy->mux.mask = BIT(width) - 1;
  137. busy->mux.lock = &imx_ccm_lock;
  138. busy->mux_ops = &clk_mux_ops;
  139. init.name = name;
  140. init.ops = &clk_busy_mux_ops;
  141. init.flags = 0;
  142. init.parent_names = parent_names;
  143. init.num_parents = num_parents;
  144. busy->mux.hw.init = &init;
  145. clk = clk_register(NULL, &busy->mux.hw);
  146. if (IS_ERR(clk))
  147. kfree(busy);
  148. return clk;
  149. }