dsi_pll_28nm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /*
  2. * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/clk-provider.h>
  15. #include "dsi_pll.h"
  16. #include "dsi.xml.h"
  17. /*
  18. * DSI PLL 28nm - clock diagram (eg: DSI0):
  19. *
  20. * dsi0analog_postdiv_clk
  21. * | dsi0indirect_path_div2_clk
  22. * | |
  23. * +------+ | +----+ | |\ dsi0byte_mux
  24. * dsi0vco_clk --o--| DIV1 |--o--| /2 |--o--| \ |
  25. * | +------+ +----+ | m| | +----+
  26. * | | u|--o--| /4 |-- dsi0pllbyte
  27. * | | x| +----+
  28. * o--------------------------| /
  29. * | |/
  30. * | +------+
  31. * o----------| DIV3 |------------------------- dsi0pll
  32. * +------+
  33. */
  34. #define POLL_MAX_READS 10
  35. #define POLL_TIMEOUT_US 50
  36. #define NUM_PROVIDED_CLKS 2
  37. #define VCO_REF_CLK_RATE 19200000
  38. #define VCO_MIN_RATE 350000000
  39. #define VCO_MAX_RATE 750000000
  40. #define DSI_BYTE_PLL_CLK 0
  41. #define DSI_PIXEL_PLL_CLK 1
  42. #define LPFR_LUT_SIZE 10
  43. struct lpfr_cfg {
  44. unsigned long vco_rate;
  45. u32 resistance;
  46. };
  47. /* Loop filter resistance: */
  48. static const struct lpfr_cfg lpfr_lut[LPFR_LUT_SIZE] = {
  49. { 479500000, 8 },
  50. { 480000000, 11 },
  51. { 575500000, 8 },
  52. { 576000000, 12 },
  53. { 610500000, 8 },
  54. { 659500000, 9 },
  55. { 671500000, 10 },
  56. { 672000000, 14 },
  57. { 708500000, 10 },
  58. { 750000000, 11 },
  59. };
  60. struct pll_28nm_cached_state {
  61. unsigned long vco_rate;
  62. u8 postdiv3;
  63. u8 postdiv1;
  64. u8 byte_mux;
  65. };
  66. struct dsi_pll_28nm {
  67. struct msm_dsi_pll base;
  68. int id;
  69. struct platform_device *pdev;
  70. void __iomem *mmio;
  71. int vco_delay;
  72. /* private clocks: */
  73. struct clk *clks[NUM_DSI_CLOCKS_MAX];
  74. u32 num_clks;
  75. /* clock-provider: */
  76. struct clk *provided_clks[NUM_PROVIDED_CLKS];
  77. struct clk_onecell_data clk_data;
  78. struct pll_28nm_cached_state cached_state;
  79. };
  80. #define to_pll_28nm(x) container_of(x, struct dsi_pll_28nm, base)
  81. static bool pll_28nm_poll_for_ready(struct dsi_pll_28nm *pll_28nm,
  82. u32 nb_tries, u32 timeout_us)
  83. {
  84. bool pll_locked = false;
  85. u32 val;
  86. while (nb_tries--) {
  87. val = pll_read(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_STATUS);
  88. pll_locked = !!(val & DSI_28nm_PHY_PLL_STATUS_PLL_RDY);
  89. if (pll_locked)
  90. break;
  91. udelay(timeout_us);
  92. }
  93. DBG("DSI PLL is %slocked", pll_locked ? "" : "*not* ");
  94. return pll_locked;
  95. }
  96. static void pll_28nm_software_reset(struct dsi_pll_28nm *pll_28nm)
  97. {
  98. void __iomem *base = pll_28nm->mmio;
  99. /*
  100. * Add HW recommended delays after toggling the software
  101. * reset bit off and back on.
  102. */
  103. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG,
  104. DSI_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET, 1);
  105. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_TEST_CFG, 0x00, 1);
  106. }
  107. /*
  108. * Clock Callbacks
  109. */
  110. static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
  111. unsigned long parent_rate)
  112. {
  113. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  114. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  115. struct device *dev = &pll_28nm->pdev->dev;
  116. void __iomem *base = pll_28nm->mmio;
  117. unsigned long div_fbx1000, gen_vco_clk;
  118. u32 refclk_cfg, frac_n_mode, frac_n_value;
  119. u32 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
  120. u32 cal_cfg10, cal_cfg11;
  121. u32 rem;
  122. int i;
  123. VERB("rate=%lu, parent's=%lu", rate, parent_rate);
  124. /* Force postdiv2 to be div-4 */
  125. pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV2_CFG, 3);
  126. /* Configure the Loop filter resistance */
  127. for (i = 0; i < LPFR_LUT_SIZE; i++)
  128. if (rate <= lpfr_lut[i].vco_rate)
  129. break;
  130. if (i == LPFR_LUT_SIZE) {
  131. dev_err(dev, "unable to get loop filter resistance. vco=%lu\n",
  132. rate);
  133. return -EINVAL;
  134. }
  135. pll_write(base + REG_DSI_28nm_PHY_PLL_LPFR_CFG, lpfr_lut[i].resistance);
  136. /* Loop filter capacitance values : c1 and c2 */
  137. pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC1_CFG, 0x70);
  138. pll_write(base + REG_DSI_28nm_PHY_PLL_LPFC2_CFG, 0x15);
  139. rem = rate % VCO_REF_CLK_RATE;
  140. if (rem) {
  141. refclk_cfg = DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
  142. frac_n_mode = 1;
  143. div_fbx1000 = rate / (VCO_REF_CLK_RATE / 500);
  144. gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 500);
  145. } else {
  146. refclk_cfg = 0x0;
  147. frac_n_mode = 0;
  148. div_fbx1000 = rate / (VCO_REF_CLK_RATE / 1000);
  149. gen_vco_clk = div_fbx1000 * (VCO_REF_CLK_RATE / 1000);
  150. }
  151. DBG("refclk_cfg = %d", refclk_cfg);
  152. rem = div_fbx1000 % 1000;
  153. frac_n_value = (rem << 16) / 1000;
  154. DBG("div_fb = %lu", div_fbx1000);
  155. DBG("frac_n_value = %d", frac_n_value);
  156. DBG("Generated VCO Clock: %lu", gen_vco_clk);
  157. rem = 0;
  158. sdm_cfg1 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1);
  159. sdm_cfg1 &= ~DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET__MASK;
  160. if (frac_n_mode) {
  161. sdm_cfg0 = 0x0;
  162. sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(0);
  163. sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(
  164. (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
  165. sdm_cfg3 = frac_n_value >> 8;
  166. sdm_cfg2 = frac_n_value & 0xff;
  167. } else {
  168. sdm_cfg0 = DSI_28nm_PHY_PLL_SDM_CFG0_BYP;
  169. sdm_cfg0 |= DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV(
  170. (u32)(((div_fbx1000 / 1000) & 0x3f) - 1));
  171. sdm_cfg1 |= DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET(0);
  172. sdm_cfg2 = 0;
  173. sdm_cfg3 = 0;
  174. }
  175. DBG("sdm_cfg0=%d", sdm_cfg0);
  176. DBG("sdm_cfg1=%d", sdm_cfg1);
  177. DBG("sdm_cfg2=%d", sdm_cfg2);
  178. DBG("sdm_cfg3=%d", sdm_cfg3);
  179. cal_cfg11 = (u32)(gen_vco_clk / (256 * 1000000));
  180. cal_cfg10 = (u32)((gen_vco_clk % (256 * 1000000)) / 1000000);
  181. DBG("cal_cfg10=%d, cal_cfg11=%d", cal_cfg10, cal_cfg11);
  182. pll_write(base + REG_DSI_28nm_PHY_PLL_CHGPUMP_CFG, 0x02);
  183. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG3, 0x2b);
  184. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG4, 0x06);
  185. pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
  186. pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1, sdm_cfg1);
  187. pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2,
  188. DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0(sdm_cfg2));
  189. pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3,
  190. DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8(sdm_cfg3));
  191. pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG4, 0x00);
  192. /* Add hardware recommended delay for correct PLL configuration */
  193. if (pll_28nm->vco_delay)
  194. udelay(pll_28nm->vco_delay);
  195. pll_write(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG, refclk_cfg);
  196. pll_write(base + REG_DSI_28nm_PHY_PLL_PWRGEN_CFG, 0x00);
  197. pll_write(base + REG_DSI_28nm_PHY_PLL_VCOLPF_CFG, 0x31);
  198. pll_write(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0, sdm_cfg0);
  199. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG0, 0x12);
  200. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG6, 0x30);
  201. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG7, 0x00);
  202. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG8, 0x60);
  203. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG9, 0x00);
  204. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG10, cal_cfg10 & 0xff);
  205. pll_write(base + REG_DSI_28nm_PHY_PLL_CAL_CFG11, cal_cfg11 & 0xff);
  206. pll_write(base + REG_DSI_28nm_PHY_PLL_EFUSE_CFG, 0x20);
  207. return 0;
  208. }
  209. static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
  210. {
  211. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  212. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  213. return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
  214. POLL_TIMEOUT_US);
  215. }
  216. static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
  217. unsigned long parent_rate)
  218. {
  219. struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
  220. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  221. void __iomem *base = pll_28nm->mmio;
  222. u32 sdm0, doubler, sdm_byp_div;
  223. u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
  224. u32 ref_clk = VCO_REF_CLK_RATE;
  225. unsigned long vco_rate;
  226. VERB("parent_rate=%lu", parent_rate);
  227. /* Check to see if the ref clk doubler is enabled */
  228. doubler = pll_read(base + REG_DSI_28nm_PHY_PLL_REFCLK_CFG) &
  229. DSI_28nm_PHY_PLL_REFCLK_CFG_DBLR;
  230. ref_clk += (doubler * VCO_REF_CLK_RATE);
  231. /* see if it is integer mode or sdm mode */
  232. sdm0 = pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0);
  233. if (sdm0 & DSI_28nm_PHY_PLL_SDM_CFG0_BYP) {
  234. /* integer mode */
  235. sdm_byp_div = FIELD(
  236. pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG0),
  237. DSI_28nm_PHY_PLL_SDM_CFG0_BYP_DIV) + 1;
  238. vco_rate = ref_clk * sdm_byp_div;
  239. } else {
  240. /* sdm mode */
  241. sdm_dc_off = FIELD(
  242. pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG1),
  243. DSI_28nm_PHY_PLL_SDM_CFG1_DC_OFFSET);
  244. DBG("sdm_dc_off = %d", sdm_dc_off);
  245. sdm2 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG2),
  246. DSI_28nm_PHY_PLL_SDM_CFG2_FREQ_SEED_7_0);
  247. sdm3 = FIELD(pll_read(base + REG_DSI_28nm_PHY_PLL_SDM_CFG3),
  248. DSI_28nm_PHY_PLL_SDM_CFG3_FREQ_SEED_15_8);
  249. sdm_freq_seed = (sdm3 << 8) | sdm2;
  250. DBG("sdm_freq_seed = %d", sdm_freq_seed);
  251. vco_rate = (ref_clk * (sdm_dc_off + 1)) +
  252. mult_frac(ref_clk, sdm_freq_seed, BIT(16));
  253. DBG("vco rate = %lu", vco_rate);
  254. }
  255. DBG("returning vco rate = %lu", vco_rate);
  256. return vco_rate;
  257. }
  258. static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
  259. .round_rate = msm_dsi_pll_helper_clk_round_rate,
  260. .set_rate = dsi_pll_28nm_clk_set_rate,
  261. .recalc_rate = dsi_pll_28nm_clk_recalc_rate,
  262. .prepare = msm_dsi_pll_helper_clk_prepare,
  263. .unprepare = msm_dsi_pll_helper_clk_unprepare,
  264. .is_enabled = dsi_pll_28nm_clk_is_enabled,
  265. };
  266. /*
  267. * PLL Callbacks
  268. */
  269. static int dsi_pll_28nm_enable_seq_hpm(struct msm_dsi_pll *pll)
  270. {
  271. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  272. struct device *dev = &pll_28nm->pdev->dev;
  273. void __iomem *base = pll_28nm->mmio;
  274. u32 max_reads = 5, timeout_us = 100;
  275. bool locked;
  276. u32 val;
  277. int i;
  278. DBG("id=%d", pll_28nm->id);
  279. pll_28nm_software_reset(pll_28nm);
  280. /*
  281. * PLL power up sequence.
  282. * Add necessary delays recommended by hardware.
  283. */
  284. val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
  285. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
  286. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
  287. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
  288. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
  289. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
  290. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
  291. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
  292. for (i = 0; i < 2; i++) {
  293. /* DSI Uniphy lock detect setting */
  294. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2,
  295. 0x0c, 100);
  296. pll_write(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x0d);
  297. /* poll for PLL ready status */
  298. locked = pll_28nm_poll_for_ready(pll_28nm,
  299. max_reads, timeout_us);
  300. if (locked)
  301. break;
  302. pll_28nm_software_reset(pll_28nm);
  303. /*
  304. * PLL power up sequence.
  305. * Add necessary delays recommended by hardware.
  306. */
  307. val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
  308. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 1);
  309. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
  310. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
  311. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
  312. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 250);
  313. val &= ~DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
  314. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 200);
  315. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B;
  316. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
  317. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
  318. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 600);
  319. }
  320. if (unlikely(!locked))
  321. dev_err(dev, "DSI PLL lock failed\n");
  322. else
  323. DBG("DSI PLL Lock success");
  324. return locked ? 0 : -EINVAL;
  325. }
  326. static int dsi_pll_28nm_enable_seq_lp(struct msm_dsi_pll *pll)
  327. {
  328. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  329. struct device *dev = &pll_28nm->pdev->dev;
  330. void __iomem *base = pll_28nm->mmio;
  331. bool locked;
  332. u32 max_reads = 10, timeout_us = 50;
  333. u32 val;
  334. DBG("id=%d", pll_28nm->id);
  335. pll_28nm_software_reset(pll_28nm);
  336. /*
  337. * PLL power up sequence.
  338. * Add necessary delays recommended by hardware.
  339. */
  340. pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_CAL_CFG1, 0x34, 500);
  341. val = DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B;
  342. pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
  343. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B;
  344. pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
  345. val |= DSI_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B |
  346. DSI_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE;
  347. pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_GLB_CFG, val, 500);
  348. /* DSI PLL toggle lock detect setting */
  349. pll_write_ndelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x04, 500);
  350. pll_write_udelay(base + REG_DSI_28nm_PHY_PLL_LKDET_CFG2, 0x05, 512);
  351. locked = pll_28nm_poll_for_ready(pll_28nm, max_reads, timeout_us);
  352. if (unlikely(!locked))
  353. dev_err(dev, "DSI PLL lock failed\n");
  354. else
  355. DBG("DSI PLL lock success");
  356. return locked ? 0 : -EINVAL;
  357. }
  358. static void dsi_pll_28nm_disable_seq(struct msm_dsi_pll *pll)
  359. {
  360. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  361. DBG("id=%d", pll_28nm->id);
  362. pll_write(pll_28nm->mmio + REG_DSI_28nm_PHY_PLL_GLB_CFG, 0x00);
  363. }
  364. static void dsi_pll_28nm_save_state(struct msm_dsi_pll *pll)
  365. {
  366. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  367. struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
  368. void __iomem *base = pll_28nm->mmio;
  369. cached_state->postdiv3 =
  370. pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG);
  371. cached_state->postdiv1 =
  372. pll_read(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG);
  373. cached_state->byte_mux = pll_read(base + REG_DSI_28nm_PHY_PLL_VREG_CFG);
  374. cached_state->vco_rate = clk_hw_get_rate(&pll->clk_hw);
  375. }
  376. static int dsi_pll_28nm_restore_state(struct msm_dsi_pll *pll)
  377. {
  378. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  379. struct pll_28nm_cached_state *cached_state = &pll_28nm->cached_state;
  380. void __iomem *base = pll_28nm->mmio;
  381. int ret;
  382. ret = dsi_pll_28nm_clk_set_rate(&pll->clk_hw,
  383. cached_state->vco_rate, 0);
  384. if (ret) {
  385. dev_err(&pll_28nm->pdev->dev,
  386. "restore vco rate failed. ret=%d\n", ret);
  387. return ret;
  388. }
  389. pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
  390. cached_state->postdiv3);
  391. pll_write(base + REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
  392. cached_state->postdiv1);
  393. pll_write(base + REG_DSI_28nm_PHY_PLL_VREG_CFG,
  394. cached_state->byte_mux);
  395. return 0;
  396. }
  397. static int dsi_pll_28nm_get_provider(struct msm_dsi_pll *pll,
  398. struct clk **byte_clk_provider,
  399. struct clk **pixel_clk_provider)
  400. {
  401. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  402. if (byte_clk_provider)
  403. *byte_clk_provider = pll_28nm->provided_clks[DSI_BYTE_PLL_CLK];
  404. if (pixel_clk_provider)
  405. *pixel_clk_provider =
  406. pll_28nm->provided_clks[DSI_PIXEL_PLL_CLK];
  407. return 0;
  408. }
  409. static void dsi_pll_28nm_destroy(struct msm_dsi_pll *pll)
  410. {
  411. struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
  412. int i;
  413. msm_dsi_pll_helper_unregister_clks(pll_28nm->pdev,
  414. pll_28nm->clks, pll_28nm->num_clks);
  415. for (i = 0; i < NUM_PROVIDED_CLKS; i++)
  416. pll_28nm->provided_clks[i] = NULL;
  417. pll_28nm->num_clks = 0;
  418. pll_28nm->clk_data.clks = NULL;
  419. pll_28nm->clk_data.clk_num = 0;
  420. }
  421. static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
  422. {
  423. char clk_name[32], parent1[32], parent2[32], vco_name[32];
  424. struct clk_init_data vco_init = {
  425. .parent_names = (const char *[]){ "xo" },
  426. .num_parents = 1,
  427. .name = vco_name,
  428. .ops = &clk_ops_dsi_pll_28nm_vco,
  429. };
  430. struct device *dev = &pll_28nm->pdev->dev;
  431. struct clk **clks = pll_28nm->clks;
  432. struct clk **provided_clks = pll_28nm->provided_clks;
  433. int num = 0;
  434. int ret;
  435. DBG("%d", pll_28nm->id);
  436. snprintf(vco_name, 32, "dsi%dvco_clk", pll_28nm->id);
  437. pll_28nm->base.clk_hw.init = &vco_init;
  438. clks[num++] = clk_register(dev, &pll_28nm->base.clk_hw);
  439. snprintf(clk_name, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
  440. snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
  441. clks[num++] = clk_register_divider(dev, clk_name,
  442. parent1, CLK_SET_RATE_PARENT,
  443. pll_28nm->mmio +
  444. REG_DSI_28nm_PHY_PLL_POSTDIV1_CFG,
  445. 0, 4, 0, NULL);
  446. snprintf(clk_name, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
  447. snprintf(parent1, 32, "dsi%danalog_postdiv_clk", pll_28nm->id);
  448. clks[num++] = clk_register_fixed_factor(dev, clk_name,
  449. parent1, CLK_SET_RATE_PARENT,
  450. 1, 2);
  451. snprintf(clk_name, 32, "dsi%dpll", pll_28nm->id);
  452. snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
  453. clks[num++] = provided_clks[DSI_PIXEL_PLL_CLK] =
  454. clk_register_divider(dev, clk_name,
  455. parent1, 0, pll_28nm->mmio +
  456. REG_DSI_28nm_PHY_PLL_POSTDIV3_CFG,
  457. 0, 8, 0, NULL);
  458. snprintf(clk_name, 32, "dsi%dbyte_mux", pll_28nm->id);
  459. snprintf(parent1, 32, "dsi%dvco_clk", pll_28nm->id);
  460. snprintf(parent2, 32, "dsi%dindirect_path_div2_clk", pll_28nm->id);
  461. clks[num++] = clk_register_mux(dev, clk_name,
  462. (const char *[]){
  463. parent1, parent2
  464. }, 2, CLK_SET_RATE_PARENT, pll_28nm->mmio +
  465. REG_DSI_28nm_PHY_PLL_VREG_CFG, 1, 1, 0, NULL);
  466. snprintf(clk_name, 32, "dsi%dpllbyte", pll_28nm->id);
  467. snprintf(parent1, 32, "dsi%dbyte_mux", pll_28nm->id);
  468. clks[num++] = provided_clks[DSI_BYTE_PLL_CLK] =
  469. clk_register_fixed_factor(dev, clk_name,
  470. parent1, CLK_SET_RATE_PARENT, 1, 4);
  471. pll_28nm->num_clks = num;
  472. pll_28nm->clk_data.clk_num = NUM_PROVIDED_CLKS;
  473. pll_28nm->clk_data.clks = provided_clks;
  474. ret = of_clk_add_provider(dev->of_node,
  475. of_clk_src_onecell_get, &pll_28nm->clk_data);
  476. if (ret) {
  477. dev_err(dev, "failed to register clk provider: %d\n", ret);
  478. return ret;
  479. }
  480. return 0;
  481. }
  482. struct msm_dsi_pll *msm_dsi_pll_28nm_init(struct platform_device *pdev,
  483. enum msm_dsi_phy_type type, int id)
  484. {
  485. struct dsi_pll_28nm *pll_28nm;
  486. struct msm_dsi_pll *pll;
  487. int ret;
  488. if (!pdev)
  489. return ERR_PTR(-ENODEV);
  490. pll_28nm = devm_kzalloc(&pdev->dev, sizeof(*pll_28nm), GFP_KERNEL);
  491. if (!pll_28nm)
  492. return ERR_PTR(-ENOMEM);
  493. pll_28nm->pdev = pdev;
  494. pll_28nm->id = id;
  495. pll_28nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
  496. if (IS_ERR_OR_NULL(pll_28nm->mmio)) {
  497. dev_err(&pdev->dev, "%s: failed to map pll base\n", __func__);
  498. return ERR_PTR(-ENOMEM);
  499. }
  500. pll = &pll_28nm->base;
  501. pll->min_rate = VCO_MIN_RATE;
  502. pll->max_rate = VCO_MAX_RATE;
  503. pll->get_provider = dsi_pll_28nm_get_provider;
  504. pll->destroy = dsi_pll_28nm_destroy;
  505. pll->disable_seq = dsi_pll_28nm_disable_seq;
  506. pll->save_state = dsi_pll_28nm_save_state;
  507. pll->restore_state = dsi_pll_28nm_restore_state;
  508. if (type == MSM_DSI_PHY_28NM_HPM) {
  509. pll_28nm->vco_delay = 1;
  510. pll->en_seq_cnt = 3;
  511. pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_hpm;
  512. pll->enable_seqs[1] = dsi_pll_28nm_enable_seq_hpm;
  513. pll->enable_seqs[2] = dsi_pll_28nm_enable_seq_hpm;
  514. } else if (type == MSM_DSI_PHY_28NM_LP) {
  515. pll_28nm->vco_delay = 1000;
  516. pll->en_seq_cnt = 1;
  517. pll->enable_seqs[0] = dsi_pll_28nm_enable_seq_lp;
  518. } else {
  519. dev_err(&pdev->dev, "phy type (%d) is not 28nm\n", type);
  520. return ERR_PTR(-EINVAL);
  521. }
  522. ret = pll_28nm_register(pll_28nm);
  523. if (ret) {
  524. dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
  525. return ERR_PTR(ret);
  526. }
  527. return pll;
  528. }