clkgen-mux.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. /*
  2. * clkgen-mux.c: ST GEN-MUX Clock driver
  3. *
  4. * Copyright (C) 2014 STMicroelectronics (R&D) Limited
  5. *
  6. * Authors: Stephen Gallimore <stephen.gallimore@st.com>
  7. * Pankaj Dev <pankaj.dev@st.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. */
  15. #include <linux/slab.h>
  16. #include <linux/of_address.h>
  17. #include <linux/clk.h>
  18. #include <linux/clk-provider.h>
  19. #include "clkgen.h"
  20. static DEFINE_SPINLOCK(clkgena_divmux_lock);
  21. static DEFINE_SPINLOCK(clkgenf_lock);
  22. static const char ** __init clkgen_mux_get_parents(struct device_node *np,
  23. int *num_parents)
  24. {
  25. const char **parents;
  26. int nparents;
  27. nparents = of_clk_get_parent_count(np);
  28. if (WARN_ON(nparents <= 0))
  29. return ERR_PTR(-EINVAL);
  30. parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
  31. if (!parents)
  32. return ERR_PTR(-ENOMEM);
  33. *num_parents = of_clk_parent_fill(np, parents, nparents);
  34. return parents;
  35. }
  36. /**
  37. * DOC: Clock mux with a programmable divider on each of its three inputs.
  38. * The mux has an input setting which effectively gates its output.
  39. *
  40. * Traits of this clock:
  41. * prepare - clk_(un)prepare only ensures parent is (un)prepared
  42. * enable - clk_enable and clk_disable are functional & control gating
  43. * rate - set rate is supported
  44. * parent - set/get parent
  45. */
  46. #define NUM_INPUTS 3
  47. struct clkgena_divmux {
  48. struct clk_hw hw;
  49. /* Subclassed mux and divider structures */
  50. struct clk_mux mux;
  51. struct clk_divider div[NUM_INPUTS];
  52. /* Enable/running feedback register bits for each input */
  53. void __iomem *feedback_reg[NUM_INPUTS];
  54. int feedback_bit_idx;
  55. u8 muxsel;
  56. };
  57. #define to_clkgena_divmux(_hw) container_of(_hw, struct clkgena_divmux, hw)
  58. struct clkgena_divmux_data {
  59. int num_outputs;
  60. int mux_offset;
  61. int mux_offset2;
  62. int mux_start_bit;
  63. int div_offsets[NUM_INPUTS];
  64. int fb_offsets[NUM_INPUTS];
  65. int fb_start_bit_idx;
  66. };
  67. #define CKGAX_CLKOPSRC_SWITCH_OFF 0x3
  68. static int clkgena_divmux_is_running(struct clkgena_divmux *mux)
  69. {
  70. u32 regval = readl(mux->feedback_reg[mux->muxsel]);
  71. u32 running = regval & BIT(mux->feedback_bit_idx);
  72. return !!running;
  73. }
  74. static int clkgena_divmux_enable(struct clk_hw *hw)
  75. {
  76. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  77. struct clk_hw *mux_hw = &genamux->mux.hw;
  78. unsigned long timeout;
  79. int ret = 0;
  80. __clk_hw_set_clk(mux_hw, hw);
  81. ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
  82. if (ret)
  83. return ret;
  84. timeout = jiffies + msecs_to_jiffies(10);
  85. while (!clkgena_divmux_is_running(genamux)) {
  86. if (time_after(jiffies, timeout))
  87. return -ETIMEDOUT;
  88. cpu_relax();
  89. }
  90. return 0;
  91. }
  92. static void clkgena_divmux_disable(struct clk_hw *hw)
  93. {
  94. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  95. struct clk_hw *mux_hw = &genamux->mux.hw;
  96. __clk_hw_set_clk(mux_hw, hw);
  97. clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
  98. }
  99. static int clkgena_divmux_is_enabled(struct clk_hw *hw)
  100. {
  101. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  102. struct clk_hw *mux_hw = &genamux->mux.hw;
  103. __clk_hw_set_clk(mux_hw, hw);
  104. return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
  105. }
  106. static u8 clkgena_divmux_get_parent(struct clk_hw *hw)
  107. {
  108. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  109. struct clk_hw *mux_hw = &genamux->mux.hw;
  110. __clk_hw_set_clk(mux_hw, hw);
  111. genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
  112. if ((s8)genamux->muxsel < 0) {
  113. pr_debug("%s: %s: Invalid parent, setting to default.\n",
  114. __func__, clk_hw_get_name(hw));
  115. genamux->muxsel = 0;
  116. }
  117. return genamux->muxsel;
  118. }
  119. static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index)
  120. {
  121. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  122. if (index >= CKGAX_CLKOPSRC_SWITCH_OFF)
  123. return -EINVAL;
  124. genamux->muxsel = index;
  125. /*
  126. * If the mux is already enabled, call enable directly to set the
  127. * new mux position and wait for it to start running again. Otherwise
  128. * do nothing.
  129. */
  130. if (clkgena_divmux_is_enabled(hw))
  131. clkgena_divmux_enable(hw);
  132. return 0;
  133. }
  134. static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
  135. unsigned long parent_rate)
  136. {
  137. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  138. struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
  139. __clk_hw_set_clk(div_hw, hw);
  140. return clk_divider_ops.recalc_rate(div_hw, parent_rate);
  141. }
  142. static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
  143. unsigned long parent_rate)
  144. {
  145. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  146. struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
  147. __clk_hw_set_clk(div_hw, hw);
  148. return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
  149. }
  150. static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
  151. unsigned long *prate)
  152. {
  153. struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
  154. struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
  155. __clk_hw_set_clk(div_hw, hw);
  156. return clk_divider_ops.round_rate(div_hw, rate, prate);
  157. }
  158. static const struct clk_ops clkgena_divmux_ops = {
  159. .enable = clkgena_divmux_enable,
  160. .disable = clkgena_divmux_disable,
  161. .is_enabled = clkgena_divmux_is_enabled,
  162. .get_parent = clkgena_divmux_get_parent,
  163. .set_parent = clkgena_divmux_set_parent,
  164. .round_rate = clkgena_divmux_round_rate,
  165. .recalc_rate = clkgena_divmux_recalc_rate,
  166. .set_rate = clkgena_divmux_set_rate,
  167. };
  168. /**
  169. * clk_register_genamux - register a genamux clock with the clock framework
  170. */
  171. static struct clk * __init clk_register_genamux(const char *name,
  172. const char **parent_names, u8 num_parents,
  173. void __iomem *reg,
  174. const struct clkgena_divmux_data *muxdata,
  175. u32 idx)
  176. {
  177. /*
  178. * Fixed constants across all ClockgenA variants
  179. */
  180. const int mux_width = 2;
  181. const int divider_width = 5;
  182. struct clkgena_divmux *genamux;
  183. struct clk *clk;
  184. struct clk_init_data init;
  185. int i;
  186. genamux = kzalloc(sizeof(*genamux), GFP_KERNEL);
  187. if (!genamux)
  188. return ERR_PTR(-ENOMEM);
  189. init.name = name;
  190. init.ops = &clkgena_divmux_ops;
  191. init.flags = CLK_IS_BASIC | CLK_GET_RATE_NOCACHE;
  192. init.parent_names = parent_names;
  193. init.num_parents = num_parents;
  194. genamux->mux.lock = &clkgena_divmux_lock;
  195. genamux->mux.mask = BIT(mux_width) - 1;
  196. genamux->mux.shift = muxdata->mux_start_bit + (idx * mux_width);
  197. if (genamux->mux.shift > 31) {
  198. /*
  199. * We have spilled into the second mux register so
  200. * adjust the register address and the bit shift accordingly
  201. */
  202. genamux->mux.reg = reg + muxdata->mux_offset2;
  203. genamux->mux.shift -= 32;
  204. } else {
  205. genamux->mux.reg = reg + muxdata->mux_offset;
  206. }
  207. for (i = 0; i < NUM_INPUTS; i++) {
  208. /*
  209. * Divider config for each input
  210. */
  211. void __iomem *divbase = reg + muxdata->div_offsets[i];
  212. genamux->div[i].width = divider_width;
  213. genamux->div[i].reg = divbase + (idx * sizeof(u32));
  214. /*
  215. * Mux enabled/running feedback register for each input.
  216. */
  217. genamux->feedback_reg[i] = reg + muxdata->fb_offsets[i];
  218. }
  219. genamux->feedback_bit_idx = muxdata->fb_start_bit_idx + idx;
  220. genamux->hw.init = &init;
  221. clk = clk_register(NULL, &genamux->hw);
  222. if (IS_ERR(clk)) {
  223. kfree(genamux);
  224. goto err;
  225. }
  226. pr_debug("%s: parent %s rate %lu\n",
  227. __clk_get_name(clk),
  228. __clk_get_name(clk_get_parent(clk)),
  229. clk_get_rate(clk));
  230. err:
  231. return clk;
  232. }
  233. static struct clkgena_divmux_data st_divmux_c65hs = {
  234. .num_outputs = 4,
  235. .mux_offset = 0x14,
  236. .mux_start_bit = 0,
  237. .div_offsets = { 0x800, 0x900, 0xb00 },
  238. .fb_offsets = { 0x18, 0x1c, 0x20 },
  239. .fb_start_bit_idx = 0,
  240. };
  241. static struct clkgena_divmux_data st_divmux_c65ls = {
  242. .num_outputs = 14,
  243. .mux_offset = 0x14,
  244. .mux_offset2 = 0x24,
  245. .mux_start_bit = 8,
  246. .div_offsets = { 0x810, 0xa10, 0xb10 },
  247. .fb_offsets = { 0x18, 0x1c, 0x20 },
  248. .fb_start_bit_idx = 4,
  249. };
  250. static struct clkgena_divmux_data st_divmux_c32odf0 = {
  251. .num_outputs = 8,
  252. .mux_offset = 0x1c,
  253. .mux_start_bit = 0,
  254. .div_offsets = { 0x800, 0x900, 0xa60 },
  255. .fb_offsets = { 0x2c, 0x24, 0x28 },
  256. .fb_start_bit_idx = 0,
  257. };
  258. static struct clkgena_divmux_data st_divmux_c32odf1 = {
  259. .num_outputs = 8,
  260. .mux_offset = 0x1c,
  261. .mux_start_bit = 16,
  262. .div_offsets = { 0x820, 0x980, 0xa80 },
  263. .fb_offsets = { 0x2c, 0x24, 0x28 },
  264. .fb_start_bit_idx = 8,
  265. };
  266. static struct clkgena_divmux_data st_divmux_c32odf2 = {
  267. .num_outputs = 8,
  268. .mux_offset = 0x20,
  269. .mux_start_bit = 0,
  270. .div_offsets = { 0x840, 0xa20, 0xb10 },
  271. .fb_offsets = { 0x2c, 0x24, 0x28 },
  272. .fb_start_bit_idx = 16,
  273. };
  274. static struct clkgena_divmux_data st_divmux_c32odf3 = {
  275. .num_outputs = 8,
  276. .mux_offset = 0x20,
  277. .mux_start_bit = 16,
  278. .div_offsets = { 0x860, 0xa40, 0xb30 },
  279. .fb_offsets = { 0x2c, 0x24, 0x28 },
  280. .fb_start_bit_idx = 24,
  281. };
  282. static const struct of_device_id clkgena_divmux_of_match[] = {
  283. {
  284. .compatible = "st,clkgena-divmux-c65-hs",
  285. .data = &st_divmux_c65hs,
  286. },
  287. {
  288. .compatible = "st,clkgena-divmux-c65-ls",
  289. .data = &st_divmux_c65ls,
  290. },
  291. {
  292. .compatible = "st,clkgena-divmux-c32-odf0",
  293. .data = &st_divmux_c32odf0,
  294. },
  295. {
  296. .compatible = "st,clkgena-divmux-c32-odf1",
  297. .data = &st_divmux_c32odf1,
  298. },
  299. {
  300. .compatible = "st,clkgena-divmux-c32-odf2",
  301. .data = &st_divmux_c32odf2,
  302. },
  303. {
  304. .compatible = "st,clkgena-divmux-c32-odf3",
  305. .data = &st_divmux_c32odf3,
  306. },
  307. {}
  308. };
  309. static void __iomem * __init clkgen_get_register_base(struct device_node *np)
  310. {
  311. struct device_node *pnode;
  312. void __iomem *reg;
  313. pnode = of_get_parent(np);
  314. if (!pnode)
  315. return NULL;
  316. reg = of_iomap(pnode, 0);
  317. of_node_put(pnode);
  318. return reg;
  319. }
  320. static void __init st_of_clkgena_divmux_setup(struct device_node *np)
  321. {
  322. const struct of_device_id *match;
  323. const struct clkgena_divmux_data *data;
  324. struct clk_onecell_data *clk_data;
  325. void __iomem *reg;
  326. const char **parents;
  327. int num_parents = 0, i;
  328. match = of_match_node(clkgena_divmux_of_match, np);
  329. if (WARN_ON(!match))
  330. return;
  331. data = match->data;
  332. reg = clkgen_get_register_base(np);
  333. if (!reg)
  334. return;
  335. parents = clkgen_mux_get_parents(np, &num_parents);
  336. if (IS_ERR(parents))
  337. goto err_parents;
  338. clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
  339. if (!clk_data)
  340. goto err_alloc;
  341. clk_data->clk_num = data->num_outputs;
  342. clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
  343. GFP_KERNEL);
  344. if (!clk_data->clks)
  345. goto err_alloc_clks;
  346. for (i = 0; i < clk_data->clk_num; i++) {
  347. struct clk *clk;
  348. const char *clk_name;
  349. if (of_property_read_string_index(np, "clock-output-names",
  350. i, &clk_name))
  351. break;
  352. /*
  353. * If we read an empty clock name then the output is unused
  354. */
  355. if (*clk_name == '\0')
  356. continue;
  357. clk = clk_register_genamux(clk_name, parents, num_parents,
  358. reg, data, i);
  359. if (IS_ERR(clk))
  360. goto err;
  361. clk_data->clks[i] = clk;
  362. }
  363. kfree(parents);
  364. of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
  365. return;
  366. err:
  367. kfree(clk_data->clks);
  368. err_alloc_clks:
  369. kfree(clk_data);
  370. err_alloc:
  371. kfree(parents);
  372. err_parents:
  373. iounmap(reg);
  374. }
  375. CLK_OF_DECLARE(clkgenadivmux, "st,clkgena-divmux", st_of_clkgena_divmux_setup);
  376. struct clkgena_prediv_data {
  377. u32 offset;
  378. u8 shift;
  379. struct clk_div_table *table;
  380. };
  381. static struct clk_div_table prediv_table16[] = {
  382. { .val = 0, .div = 1 },
  383. { .val = 1, .div = 16 },
  384. { .div = 0 },
  385. };
  386. static struct clkgena_prediv_data prediv_c65_data = {
  387. .offset = 0x4c,
  388. .shift = 31,
  389. .table = prediv_table16,
  390. };
  391. static struct clkgena_prediv_data prediv_c32_data = {
  392. .offset = 0x50,
  393. .shift = 1,
  394. .table = prediv_table16,
  395. };
  396. static const struct of_device_id clkgena_prediv_of_match[] = {
  397. { .compatible = "st,clkgena-prediv-c65", .data = &prediv_c65_data },
  398. { .compatible = "st,clkgena-prediv-c32", .data = &prediv_c32_data },
  399. {}
  400. };
  401. static void __init st_of_clkgena_prediv_setup(struct device_node *np)
  402. {
  403. const struct of_device_id *match;
  404. void __iomem *reg;
  405. const char *parent_name, *clk_name;
  406. struct clk *clk;
  407. const struct clkgena_prediv_data *data;
  408. match = of_match_node(clkgena_prediv_of_match, np);
  409. if (!match) {
  410. pr_err("%s: No matching data\n", __func__);
  411. return;
  412. }
  413. data = match->data;
  414. reg = clkgen_get_register_base(np);
  415. if (!reg)
  416. return;
  417. parent_name = of_clk_get_parent_name(np, 0);
  418. if (!parent_name)
  419. goto err;
  420. if (of_property_read_string_index(np, "clock-output-names",
  421. 0, &clk_name))
  422. goto err;
  423. clk = clk_register_divider_table(NULL, clk_name, parent_name,
  424. CLK_GET_RATE_NOCACHE,
  425. reg + data->offset, data->shift, 1,
  426. 0, data->table, NULL);
  427. if (IS_ERR(clk))
  428. goto err;
  429. of_clk_add_provider(np, of_clk_src_simple_get, clk);
  430. pr_debug("%s: parent %s rate %u\n",
  431. __clk_get_name(clk),
  432. __clk_get_name(clk_get_parent(clk)),
  433. (unsigned int)clk_get_rate(clk));
  434. return;
  435. err:
  436. iounmap(reg);
  437. }
  438. CLK_OF_DECLARE(clkgenaprediv, "st,clkgena-prediv", st_of_clkgena_prediv_setup);
  439. struct clkgen_mux_data {
  440. u32 offset;
  441. u8 shift;
  442. u8 width;
  443. spinlock_t *lock;
  444. unsigned long clk_flags;
  445. u8 mux_flags;
  446. };
  447. static struct clkgen_mux_data clkgen_mux_c_vcc_hd_416 = {
  448. .offset = 0,
  449. .shift = 0,
  450. .width = 1,
  451. };
  452. static struct clkgen_mux_data clkgen_mux_f_vcc_fvdp_416 = {
  453. .offset = 0,
  454. .shift = 0,
  455. .width = 1,
  456. };
  457. static struct clkgen_mux_data clkgen_mux_f_vcc_hva_416 = {
  458. .offset = 0,
  459. .shift = 0,
  460. .width = 1,
  461. };
  462. static struct clkgen_mux_data clkgen_mux_f_vcc_hd_416 = {
  463. .offset = 0,
  464. .shift = 16,
  465. .width = 1,
  466. .lock = &clkgenf_lock,
  467. };
  468. static struct clkgen_mux_data clkgen_mux_c_vcc_sd_416 = {
  469. .offset = 0,
  470. .shift = 17,
  471. .width = 1,
  472. .lock = &clkgenf_lock,
  473. };
  474. static struct clkgen_mux_data stih415_a9_mux_data = {
  475. .offset = 0,
  476. .shift = 1,
  477. .width = 2,
  478. .lock = &clkgen_a9_lock,
  479. };
  480. static struct clkgen_mux_data stih416_a9_mux_data = {
  481. .offset = 0,
  482. .shift = 0,
  483. .width = 2,
  484. };
  485. static struct clkgen_mux_data stih407_a9_mux_data = {
  486. .offset = 0x1a4,
  487. .shift = 0,
  488. .width = 2,
  489. .lock = &clkgen_a9_lock,
  490. };
  491. static const struct of_device_id mux_of_match[] = {
  492. {
  493. .compatible = "st,stih416-clkgenc-vcc-hd",
  494. .data = &clkgen_mux_c_vcc_hd_416,
  495. },
  496. {
  497. .compatible = "st,stih416-clkgenf-vcc-fvdp",
  498. .data = &clkgen_mux_f_vcc_fvdp_416,
  499. },
  500. {
  501. .compatible = "st,stih416-clkgenf-vcc-hva",
  502. .data = &clkgen_mux_f_vcc_hva_416,
  503. },
  504. {
  505. .compatible = "st,stih416-clkgenf-vcc-hd",
  506. .data = &clkgen_mux_f_vcc_hd_416,
  507. },
  508. {
  509. .compatible = "st,stih416-clkgenf-vcc-sd",
  510. .data = &clkgen_mux_c_vcc_sd_416,
  511. },
  512. {
  513. .compatible = "st,stih415-clkgen-a9-mux",
  514. .data = &stih415_a9_mux_data,
  515. },
  516. {
  517. .compatible = "st,stih416-clkgen-a9-mux",
  518. .data = &stih416_a9_mux_data,
  519. },
  520. {
  521. .compatible = "st,stih407-clkgen-a9-mux",
  522. .data = &stih407_a9_mux_data,
  523. },
  524. {}
  525. };
  526. static void __init st_of_clkgen_mux_setup(struct device_node *np)
  527. {
  528. const struct of_device_id *match;
  529. struct clk *clk;
  530. void __iomem *reg;
  531. const char **parents;
  532. int num_parents;
  533. const struct clkgen_mux_data *data;
  534. match = of_match_node(mux_of_match, np);
  535. if (!match) {
  536. pr_err("%s: No matching data\n", __func__);
  537. return;
  538. }
  539. data = match->data;
  540. reg = of_iomap(np, 0);
  541. if (!reg) {
  542. pr_err("%s: Failed to get base address\n", __func__);
  543. return;
  544. }
  545. parents = clkgen_mux_get_parents(np, &num_parents);
  546. if (IS_ERR(parents)) {
  547. pr_err("%s: Failed to get parents (%ld)\n",
  548. __func__, PTR_ERR(parents));
  549. goto err_parents;
  550. }
  551. clk = clk_register_mux(NULL, np->name, parents, num_parents,
  552. data->clk_flags | CLK_SET_RATE_PARENT,
  553. reg + data->offset,
  554. data->shift, data->width, data->mux_flags,
  555. data->lock);
  556. if (IS_ERR(clk))
  557. goto err;
  558. pr_debug("%s: parent %s rate %u\n",
  559. __clk_get_name(clk),
  560. __clk_get_name(clk_get_parent(clk)),
  561. (unsigned int)clk_get_rate(clk));
  562. kfree(parents);
  563. of_clk_add_provider(np, of_clk_src_simple_get, clk);
  564. return;
  565. err:
  566. kfree(parents);
  567. err_parents:
  568. iounmap(reg);
  569. }
  570. CLK_OF_DECLARE(clkgen_mux, "st,clkgen-mux", st_of_clkgen_mux_setup);
  571. #define VCC_MAX_CHANNELS 16
  572. #define VCC_GATE_OFFSET 0x0
  573. #define VCC_MUX_OFFSET 0x4
  574. #define VCC_DIV_OFFSET 0x8
  575. struct clkgen_vcc_data {
  576. spinlock_t *lock;
  577. unsigned long clk_flags;
  578. };
  579. static struct clkgen_vcc_data st_clkgenc_vcc_416 = {
  580. .clk_flags = CLK_SET_RATE_PARENT,
  581. };
  582. static struct clkgen_vcc_data st_clkgenf_vcc_416 = {
  583. .lock = &clkgenf_lock,
  584. };
  585. static const struct of_device_id vcc_of_match[] = {
  586. { .compatible = "st,stih416-clkgenc", .data = &st_clkgenc_vcc_416 },
  587. { .compatible = "st,stih416-clkgenf", .data = &st_clkgenf_vcc_416 },
  588. {}
  589. };
  590. static void __init st_of_clkgen_vcc_setup(struct device_node *np)
  591. {
  592. const struct of_device_id *match;
  593. void __iomem *reg;
  594. const char **parents;
  595. int num_parents, i;
  596. struct clk_onecell_data *clk_data;
  597. const struct clkgen_vcc_data *data;
  598. match = of_match_node(vcc_of_match, np);
  599. if (WARN_ON(!match))
  600. return;
  601. data = match->data;
  602. reg = of_iomap(np, 0);
  603. if (!reg)
  604. return;
  605. parents = clkgen_mux_get_parents(np, &num_parents);
  606. if (IS_ERR(parents))
  607. goto err_parents;
  608. clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
  609. if (!clk_data)
  610. goto err_alloc;
  611. clk_data->clk_num = VCC_MAX_CHANNELS;
  612. clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
  613. GFP_KERNEL);
  614. if (!clk_data->clks)
  615. goto err_alloc_clks;
  616. for (i = 0; i < clk_data->clk_num; i++) {
  617. struct clk *clk;
  618. const char *clk_name;
  619. struct clk_gate *gate;
  620. struct clk_divider *div;
  621. struct clk_mux *mux;
  622. if (of_property_read_string_index(np, "clock-output-names",
  623. i, &clk_name))
  624. break;
  625. /*
  626. * If we read an empty clock name then the output is unused
  627. */
  628. if (*clk_name == '\0')
  629. continue;
  630. gate = kzalloc(sizeof(*gate), GFP_KERNEL);
  631. if (!gate)
  632. goto err;
  633. div = kzalloc(sizeof(*div), GFP_KERNEL);
  634. if (!div) {
  635. kfree(gate);
  636. goto err;
  637. }
  638. mux = kzalloc(sizeof(*mux), GFP_KERNEL);
  639. if (!mux) {
  640. kfree(gate);
  641. kfree(div);
  642. goto err;
  643. }
  644. gate->reg = reg + VCC_GATE_OFFSET;
  645. gate->bit_idx = i;
  646. gate->flags = CLK_GATE_SET_TO_DISABLE;
  647. gate->lock = data->lock;
  648. div->reg = reg + VCC_DIV_OFFSET;
  649. div->shift = 2 * i;
  650. div->width = 2;
  651. div->flags = CLK_DIVIDER_POWER_OF_TWO |
  652. CLK_DIVIDER_ROUND_CLOSEST;
  653. mux->reg = reg + VCC_MUX_OFFSET;
  654. mux->shift = 2 * i;
  655. mux->mask = 0x3;
  656. clk = clk_register_composite(NULL, clk_name, parents,
  657. num_parents,
  658. &mux->hw, &clk_mux_ops,
  659. &div->hw, &clk_divider_ops,
  660. &gate->hw, &clk_gate_ops,
  661. data->clk_flags |
  662. CLK_GET_RATE_NOCACHE);
  663. if (IS_ERR(clk)) {
  664. kfree(gate);
  665. kfree(div);
  666. kfree(mux);
  667. goto err;
  668. }
  669. pr_debug("%s: parent %s rate %u\n",
  670. __clk_get_name(clk),
  671. __clk_get_name(clk_get_parent(clk)),
  672. (unsigned int)clk_get_rate(clk));
  673. clk_data->clks[i] = clk;
  674. }
  675. kfree(parents);
  676. of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
  677. return;
  678. err:
  679. for (i = 0; i < clk_data->clk_num; i++) {
  680. struct clk_composite *composite;
  681. if (!clk_data->clks[i])
  682. continue;
  683. composite = container_of(__clk_get_hw(clk_data->clks[i]),
  684. struct clk_composite, hw);
  685. kfree(container_of(composite->gate_hw, struct clk_gate, hw));
  686. kfree(container_of(composite->rate_hw, struct clk_divider, hw));
  687. kfree(container_of(composite->mux_hw, struct clk_mux, hw));
  688. }
  689. kfree(clk_data->clks);
  690. err_alloc_clks:
  691. kfree(clk_data);
  692. err_alloc:
  693. kfree(parents);
  694. err_parents:
  695. iounmap(reg);
  696. }
  697. CLK_OF_DECLARE(clkgen_vcc, "st,clkgen-vcc", st_of_clkgen_vcc_setup);