ufs-qcom.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577
  1. /*
  2. * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 and
  6. * only version 2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/time.h>
  15. #include <linux/of.h>
  16. #include <linux/platform_device.h>
  17. #include <linux/phy/phy.h>
  18. #include <linux/phy/phy-qcom-ufs.h>
  19. #include "ufshcd.h"
  20. #include "ufshcd-pltfrm.h"
  21. #include "unipro.h"
  22. #include "ufs-qcom.h"
  23. #include "ufshci.h"
  24. #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
  25. (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
  26. enum {
  27. TSTBUS_UAWM,
  28. TSTBUS_UARM,
  29. TSTBUS_TXUC,
  30. TSTBUS_RXUC,
  31. TSTBUS_DFC,
  32. TSTBUS_TRLUT,
  33. TSTBUS_TMRLUT,
  34. TSTBUS_OCSC,
  35. TSTBUS_UTP_HCI,
  36. TSTBUS_COMBINED,
  37. TSTBUS_WRAPPER,
  38. TSTBUS_UNIPRO,
  39. TSTBUS_MAX,
  40. };
  41. static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
  42. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
  43. static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
  44. static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  45. u32 clk_cycles);
  46. static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
  47. char *prefix)
  48. {
  49. print_hex_dump(KERN_ERR, prefix,
  50. len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
  51. 16, 4, (void __force *)hba->mmio_base + offset,
  52. len * 4, false);
  53. }
  54. static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
  55. {
  56. int err = 0;
  57. err = ufshcd_dme_get(hba,
  58. UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
  59. if (err)
  60. dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
  61. __func__, err);
  62. return err;
  63. }
  64. static int ufs_qcom_host_clk_get(struct device *dev,
  65. const char *name, struct clk **clk_out)
  66. {
  67. struct clk *clk;
  68. int err = 0;
  69. clk = devm_clk_get(dev, name);
  70. if (IS_ERR(clk)) {
  71. err = PTR_ERR(clk);
  72. dev_err(dev, "%s: failed to get %s err %d",
  73. __func__, name, err);
  74. } else {
  75. *clk_out = clk;
  76. }
  77. return err;
  78. }
  79. static int ufs_qcom_host_clk_enable(struct device *dev,
  80. const char *name, struct clk *clk)
  81. {
  82. int err = 0;
  83. err = clk_prepare_enable(clk);
  84. if (err)
  85. dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
  86. return err;
  87. }
  88. static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
  89. {
  90. if (!host->is_lane_clks_enabled)
  91. return;
  92. clk_disable_unprepare(host->tx_l1_sync_clk);
  93. clk_disable_unprepare(host->tx_l0_sync_clk);
  94. clk_disable_unprepare(host->rx_l1_sync_clk);
  95. clk_disable_unprepare(host->rx_l0_sync_clk);
  96. host->is_lane_clks_enabled = false;
  97. }
  98. static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
  99. {
  100. int err = 0;
  101. struct device *dev = host->hba->dev;
  102. if (host->is_lane_clks_enabled)
  103. return 0;
  104. err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
  105. host->rx_l0_sync_clk);
  106. if (err)
  107. goto out;
  108. err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
  109. host->tx_l0_sync_clk);
  110. if (err)
  111. goto disable_rx_l0;
  112. err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
  113. host->rx_l1_sync_clk);
  114. if (err)
  115. goto disable_tx_l0;
  116. err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
  117. host->tx_l1_sync_clk);
  118. if (err)
  119. goto disable_rx_l1;
  120. host->is_lane_clks_enabled = true;
  121. goto out;
  122. disable_rx_l1:
  123. clk_disable_unprepare(host->rx_l1_sync_clk);
  124. disable_tx_l0:
  125. clk_disable_unprepare(host->tx_l0_sync_clk);
  126. disable_rx_l0:
  127. clk_disable_unprepare(host->rx_l0_sync_clk);
  128. out:
  129. return err;
  130. }
  131. static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
  132. {
  133. int err = 0;
  134. struct device *dev = host->hba->dev;
  135. err = ufs_qcom_host_clk_get(dev,
  136. "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
  137. if (err)
  138. goto out;
  139. err = ufs_qcom_host_clk_get(dev,
  140. "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
  141. if (err)
  142. goto out;
  143. err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
  144. &host->rx_l1_sync_clk);
  145. if (err)
  146. goto out;
  147. err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
  148. &host->tx_l1_sync_clk);
  149. out:
  150. return err;
  151. }
  152. static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
  153. {
  154. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  155. struct phy *phy = host->generic_phy;
  156. u32 tx_lanes;
  157. int err = 0;
  158. err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
  159. if (err)
  160. goto out;
  161. err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
  162. if (err)
  163. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
  164. __func__);
  165. out:
  166. return err;
  167. }
  168. static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
  169. {
  170. int err;
  171. u32 tx_fsm_val = 0;
  172. unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
  173. do {
  174. err = ufshcd_dme_get(hba,
  175. UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
  176. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
  177. &tx_fsm_val);
  178. if (err || tx_fsm_val == TX_FSM_HIBERN8)
  179. break;
  180. /* sleep for max. 200us */
  181. usleep_range(100, 200);
  182. } while (time_before(jiffies, timeout));
  183. /*
  184. * we might have scheduled out for long during polling so
  185. * check the state again.
  186. */
  187. if (time_after(jiffies, timeout))
  188. err = ufshcd_dme_get(hba,
  189. UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
  190. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
  191. &tx_fsm_val);
  192. if (err) {
  193. dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
  194. __func__, err);
  195. } else if (tx_fsm_val != TX_FSM_HIBERN8) {
  196. err = tx_fsm_val;
  197. dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
  198. __func__, err);
  199. }
  200. return err;
  201. }
  202. static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
  203. {
  204. ufshcd_rmwl(host->hba, QUNIPRO_SEL,
  205. ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
  206. REG_UFS_CFG1);
  207. /* make sure above configuration is applied before we return */
  208. mb();
  209. }
  210. static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
  211. {
  212. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  213. struct phy *phy = host->generic_phy;
  214. int ret = 0;
  215. bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
  216. ? true : false;
  217. /* Assert PHY reset and apply PHY calibration values */
  218. ufs_qcom_assert_reset(hba);
  219. /* provide 1ms delay to let the reset pulse propagate */
  220. usleep_range(1000, 1100);
  221. ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
  222. if (ret) {
  223. dev_err(hba->dev,
  224. "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
  225. __func__, ret);
  226. goto out;
  227. }
  228. /* De-assert PHY reset and start serdes */
  229. ufs_qcom_deassert_reset(hba);
  230. /*
  231. * after reset deassertion, phy will need all ref clocks,
  232. * voltage, current to settle down before starting serdes.
  233. */
  234. usleep_range(1000, 1100);
  235. ret = ufs_qcom_phy_start_serdes(phy);
  236. if (ret) {
  237. dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
  238. __func__, ret);
  239. goto out;
  240. }
  241. ret = ufs_qcom_phy_is_pcs_ready(phy);
  242. if (ret)
  243. dev_err(hba->dev,
  244. "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
  245. __func__, ret);
  246. ufs_qcom_select_unipro_mode(host);
  247. out:
  248. return ret;
  249. }
  250. /*
  251. * The UTP controller has a number of internal clock gating cells (CGCs).
  252. * Internal hardware sub-modules within the UTP controller control the CGCs.
  253. * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
  254. * in a specific operation, UTP controller CGCs are by default disabled and
  255. * this function enables them (after every UFS link startup) to save some power
  256. * leakage.
  257. */
  258. static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
  259. {
  260. ufshcd_writel(hba,
  261. ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
  262. REG_UFS_CFG2);
  263. /* Ensure that HW clock gating is enabled before next operations */
  264. mb();
  265. }
  266. static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
  267. enum ufs_notify_change_status status)
  268. {
  269. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  270. int err = 0;
  271. switch (status) {
  272. case PRE_CHANGE:
  273. ufs_qcom_power_up_sequence(hba);
  274. /*
  275. * The PHY PLL output is the source of tx/rx lane symbol
  276. * clocks, hence, enable the lane clocks only after PHY
  277. * is initialized.
  278. */
  279. err = ufs_qcom_enable_lane_clks(host);
  280. break;
  281. case POST_CHANGE:
  282. /* check if UFS PHY moved from DISABLED to HIBERN8 */
  283. err = ufs_qcom_check_hibern8(hba);
  284. ufs_qcom_enable_hw_clk_gating(hba);
  285. break;
  286. default:
  287. dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
  288. err = -EINVAL;
  289. break;
  290. }
  291. return err;
  292. }
  293. /**
  294. * Returns zero for success and non-zero in case of a failure
  295. */
  296. static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
  297. u32 hs, u32 rate, bool update_link_startup_timer)
  298. {
  299. int ret = 0;
  300. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  301. struct ufs_clk_info *clki;
  302. u32 core_clk_period_in_ns;
  303. u32 tx_clk_cycles_per_us = 0;
  304. unsigned long core_clk_rate = 0;
  305. u32 core_clk_cycles_per_us = 0;
  306. static u32 pwm_fr_table[][2] = {
  307. {UFS_PWM_G1, 0x1},
  308. {UFS_PWM_G2, 0x1},
  309. {UFS_PWM_G3, 0x1},
  310. {UFS_PWM_G4, 0x1},
  311. };
  312. static u32 hs_fr_table_rA[][2] = {
  313. {UFS_HS_G1, 0x1F},
  314. {UFS_HS_G2, 0x3e},
  315. {UFS_HS_G3, 0x7D},
  316. };
  317. static u32 hs_fr_table_rB[][2] = {
  318. {UFS_HS_G1, 0x24},
  319. {UFS_HS_G2, 0x49},
  320. {UFS_HS_G3, 0x92},
  321. };
  322. /*
  323. * The Qunipro controller does not use following registers:
  324. * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
  325. * UFS_REG_PA_LINK_STARTUP_TIMER
  326. * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
  327. * Aggregation logic.
  328. */
  329. if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
  330. goto out;
  331. if (gear == 0) {
  332. dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
  333. goto out_error;
  334. }
  335. list_for_each_entry(clki, &hba->clk_list_head, list) {
  336. if (!strcmp(clki->name, "core_clk"))
  337. core_clk_rate = clk_get_rate(clki->clk);
  338. }
  339. /* If frequency is smaller than 1MHz, set to 1MHz */
  340. if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
  341. core_clk_rate = DEFAULT_CLK_RATE_HZ;
  342. core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
  343. if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
  344. ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
  345. /*
  346. * make sure above write gets applied before we return from
  347. * this function.
  348. */
  349. mb();
  350. }
  351. if (ufs_qcom_cap_qunipro(host))
  352. goto out;
  353. core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
  354. core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
  355. core_clk_period_in_ns &= MASK_CLK_NS_REG;
  356. switch (hs) {
  357. case FASTAUTO_MODE:
  358. case FAST_MODE:
  359. if (rate == PA_HS_MODE_A) {
  360. if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
  361. dev_err(hba->dev,
  362. "%s: index %d exceeds table size %zu\n",
  363. __func__, gear,
  364. ARRAY_SIZE(hs_fr_table_rA));
  365. goto out_error;
  366. }
  367. tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
  368. } else if (rate == PA_HS_MODE_B) {
  369. if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
  370. dev_err(hba->dev,
  371. "%s: index %d exceeds table size %zu\n",
  372. __func__, gear,
  373. ARRAY_SIZE(hs_fr_table_rB));
  374. goto out_error;
  375. }
  376. tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
  377. } else {
  378. dev_err(hba->dev, "%s: invalid rate = %d\n",
  379. __func__, rate);
  380. goto out_error;
  381. }
  382. break;
  383. case SLOWAUTO_MODE:
  384. case SLOW_MODE:
  385. if (gear > ARRAY_SIZE(pwm_fr_table)) {
  386. dev_err(hba->dev,
  387. "%s: index %d exceeds table size %zu\n",
  388. __func__, gear,
  389. ARRAY_SIZE(pwm_fr_table));
  390. goto out_error;
  391. }
  392. tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
  393. break;
  394. case UNCHANGED:
  395. default:
  396. dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
  397. goto out_error;
  398. }
  399. if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
  400. (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
  401. /* this register 2 fields shall be written at once */
  402. ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
  403. REG_UFS_TX_SYMBOL_CLK_NS_US);
  404. /*
  405. * make sure above write gets applied before we return from
  406. * this function.
  407. */
  408. mb();
  409. }
  410. if (update_link_startup_timer) {
  411. ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
  412. REG_UFS_PA_LINK_STARTUP_TIMER);
  413. /*
  414. * make sure that this configuration is applied before
  415. * we return
  416. */
  417. mb();
  418. }
  419. goto out;
  420. out_error:
  421. ret = -EINVAL;
  422. out:
  423. return ret;
  424. }
  425. static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
  426. enum ufs_notify_change_status status)
  427. {
  428. int err = 0;
  429. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  430. switch (status) {
  431. case PRE_CHANGE:
  432. if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
  433. 0, true)) {
  434. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  435. __func__);
  436. err = -EINVAL;
  437. goto out;
  438. }
  439. if (ufs_qcom_cap_qunipro(host))
  440. /*
  441. * set unipro core clock cycles to 150 & clear clock
  442. * divider
  443. */
  444. err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
  445. 150);
  446. break;
  447. case POST_CHANGE:
  448. ufs_qcom_link_startup_post_change(hba);
  449. break;
  450. default:
  451. break;
  452. }
  453. out:
  454. return err;
  455. }
  456. static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  457. {
  458. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  459. struct phy *phy = host->generic_phy;
  460. int ret = 0;
  461. if (ufs_qcom_is_link_off(hba)) {
  462. /*
  463. * Disable the tx/rx lane symbol clocks before PHY is
  464. * powered down as the PLL source should be disabled
  465. * after downstream clocks are disabled.
  466. */
  467. ufs_qcom_disable_lane_clks(host);
  468. phy_power_off(phy);
  469. /* Assert PHY soft reset */
  470. ufs_qcom_assert_reset(hba);
  471. goto out;
  472. }
  473. /*
  474. * If UniPro link is not active, PHY ref_clk, main PHY analog power
  475. * rail and low noise analog power rail for PLL can be switched off.
  476. */
  477. if (!ufs_qcom_is_link_active(hba)) {
  478. ufs_qcom_disable_lane_clks(host);
  479. phy_power_off(phy);
  480. }
  481. out:
  482. return ret;
  483. }
  484. static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  485. {
  486. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  487. struct phy *phy = host->generic_phy;
  488. int err;
  489. err = phy_power_on(phy);
  490. if (err) {
  491. dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
  492. __func__, err);
  493. goto out;
  494. }
  495. err = ufs_qcom_enable_lane_clks(host);
  496. if (err)
  497. goto out;
  498. hba->is_sys_suspended = false;
  499. out:
  500. return err;
  501. }
  502. struct ufs_qcom_dev_params {
  503. u32 pwm_rx_gear; /* pwm rx gear to work in */
  504. u32 pwm_tx_gear; /* pwm tx gear to work in */
  505. u32 hs_rx_gear; /* hs rx gear to work in */
  506. u32 hs_tx_gear; /* hs tx gear to work in */
  507. u32 rx_lanes; /* number of rx lanes */
  508. u32 tx_lanes; /* number of tx lanes */
  509. u32 rx_pwr_pwm; /* rx pwm working pwr */
  510. u32 tx_pwr_pwm; /* tx pwm working pwr */
  511. u32 rx_pwr_hs; /* rx hs working pwr */
  512. u32 tx_pwr_hs; /* tx hs working pwr */
  513. u32 hs_rate; /* rate A/B to work in HS */
  514. u32 desired_working_mode;
  515. };
  516. static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
  517. struct ufs_pa_layer_attr *dev_max,
  518. struct ufs_pa_layer_attr *agreed_pwr)
  519. {
  520. int min_qcom_gear;
  521. int min_dev_gear;
  522. bool is_dev_sup_hs = false;
  523. bool is_qcom_max_hs = false;
  524. if (dev_max->pwr_rx == FAST_MODE)
  525. is_dev_sup_hs = true;
  526. if (qcom_param->desired_working_mode == FAST) {
  527. is_qcom_max_hs = true;
  528. min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
  529. qcom_param->hs_tx_gear);
  530. } else {
  531. min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
  532. qcom_param->pwm_tx_gear);
  533. }
  534. /*
  535. * device doesn't support HS but qcom_param->desired_working_mode is
  536. * HS, thus device and qcom_param don't agree
  537. */
  538. if (!is_dev_sup_hs && is_qcom_max_hs) {
  539. pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
  540. __func__);
  541. return -ENOTSUPP;
  542. } else if (is_dev_sup_hs && is_qcom_max_hs) {
  543. /*
  544. * since device supports HS, it supports FAST_MODE.
  545. * since qcom_param->desired_working_mode is also HS
  546. * then final decision (FAST/FASTAUTO) is done according
  547. * to qcom_params as it is the restricting factor
  548. */
  549. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  550. qcom_param->rx_pwr_hs;
  551. } else {
  552. /*
  553. * here qcom_param->desired_working_mode is PWM.
  554. * it doesn't matter whether device supports HS or PWM,
  555. * in both cases qcom_param->desired_working_mode will
  556. * determine the mode
  557. */
  558. agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
  559. qcom_param->rx_pwr_pwm;
  560. }
  561. /*
  562. * we would like tx to work in the minimum number of lanes
  563. * between device capability and vendor preferences.
  564. * the same decision will be made for rx
  565. */
  566. agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
  567. qcom_param->tx_lanes);
  568. agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
  569. qcom_param->rx_lanes);
  570. /* device maximum gear is the minimum between device rx and tx gears */
  571. min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
  572. /*
  573. * if both device capabilities and vendor pre-defined preferences are
  574. * both HS or both PWM then set the minimum gear to be the chosen
  575. * working gear.
  576. * if one is PWM and one is HS then the one that is PWM get to decide
  577. * what is the gear, as it is the one that also decided previously what
  578. * pwr the device will be configured to.
  579. */
  580. if ((is_dev_sup_hs && is_qcom_max_hs) ||
  581. (!is_dev_sup_hs && !is_qcom_max_hs))
  582. agreed_pwr->gear_rx = agreed_pwr->gear_tx =
  583. min_t(u32, min_dev_gear, min_qcom_gear);
  584. else if (!is_dev_sup_hs)
  585. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
  586. else
  587. agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
  588. agreed_pwr->hs_rate = qcom_param->hs_rate;
  589. return 0;
  590. }
  591. #ifdef CONFIG_MSM_BUS_SCALING
  592. static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
  593. const char *speed_mode)
  594. {
  595. struct device *dev = host->hba->dev;
  596. struct device_node *np = dev->of_node;
  597. int err;
  598. const char *key = "qcom,bus-vector-names";
  599. if (!speed_mode) {
  600. err = -EINVAL;
  601. goto out;
  602. }
  603. if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
  604. err = of_property_match_string(np, key, "MAX");
  605. else
  606. err = of_property_match_string(np, key, speed_mode);
  607. out:
  608. if (err < 0)
  609. dev_err(dev, "%s: Invalid %s mode %d\n",
  610. __func__, speed_mode, err);
  611. return err;
  612. }
  613. static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
  614. {
  615. int gear = max_t(u32, p->gear_rx, p->gear_tx);
  616. int lanes = max_t(u32, p->lane_rx, p->lane_tx);
  617. int pwr;
  618. /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
  619. if (!gear)
  620. gear = 1;
  621. if (!lanes)
  622. lanes = 1;
  623. if (!p->pwr_rx && !p->pwr_tx) {
  624. pwr = SLOWAUTO_MODE;
  625. snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
  626. } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
  627. p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
  628. pwr = FAST_MODE;
  629. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
  630. p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
  631. } else {
  632. pwr = SLOW_MODE;
  633. snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
  634. "PWM", gear, lanes);
  635. }
  636. }
  637. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
  638. {
  639. int err = 0;
  640. if (vote != host->bus_vote.curr_vote) {
  641. err = msm_bus_scale_client_update_request(
  642. host->bus_vote.client_handle, vote);
  643. if (err) {
  644. dev_err(host->hba->dev,
  645. "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
  646. __func__, host->bus_vote.client_handle,
  647. vote, err);
  648. goto out;
  649. }
  650. host->bus_vote.curr_vote = vote;
  651. }
  652. out:
  653. return err;
  654. }
  655. static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
  656. {
  657. int vote;
  658. int err = 0;
  659. char mode[BUS_VECTOR_NAME_LEN];
  660. ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
  661. vote = ufs_qcom_get_bus_vote(host, mode);
  662. if (vote >= 0)
  663. err = ufs_qcom_set_bus_vote(host, vote);
  664. else
  665. err = vote;
  666. if (err)
  667. dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
  668. else
  669. host->bus_vote.saved_vote = vote;
  670. return err;
  671. }
  672. static ssize_t
  673. show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  674. char *buf)
  675. {
  676. struct ufs_hba *hba = dev_get_drvdata(dev);
  677. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  678. return snprintf(buf, PAGE_SIZE, "%u\n",
  679. host->bus_vote.is_max_bw_needed);
  680. }
  681. static ssize_t
  682. store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
  683. const char *buf, size_t count)
  684. {
  685. struct ufs_hba *hba = dev_get_drvdata(dev);
  686. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  687. uint32_t value;
  688. if (!kstrtou32(buf, 0, &value)) {
  689. host->bus_vote.is_max_bw_needed = !!value;
  690. ufs_qcom_update_bus_bw_vote(host);
  691. }
  692. return count;
  693. }
  694. static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
  695. {
  696. int err;
  697. struct msm_bus_scale_pdata *bus_pdata;
  698. struct device *dev = host->hba->dev;
  699. struct platform_device *pdev = to_platform_device(dev);
  700. struct device_node *np = dev->of_node;
  701. bus_pdata = msm_bus_cl_get_pdata(pdev);
  702. if (!bus_pdata) {
  703. dev_err(dev, "%s: failed to get bus vectors\n", __func__);
  704. err = -ENODATA;
  705. goto out;
  706. }
  707. err = of_property_count_strings(np, "qcom,bus-vector-names");
  708. if (err < 0 || err != bus_pdata->num_usecases) {
  709. dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
  710. __func__, err);
  711. goto out;
  712. }
  713. host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
  714. if (!host->bus_vote.client_handle) {
  715. dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
  716. __func__);
  717. err = -EFAULT;
  718. goto out;
  719. }
  720. /* cache the vote index for minimum and maximum bandwidth */
  721. host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
  722. host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
  723. host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
  724. host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
  725. sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
  726. host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
  727. host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
  728. err = device_create_file(dev, &host->bus_vote.max_bus_bw);
  729. out:
  730. return err;
  731. }
  732. #else /* CONFIG_MSM_BUS_SCALING */
  733. static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
  734. {
  735. return 0;
  736. }
  737. static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
  738. {
  739. return 0;
  740. }
  741. static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
  742. {
  743. return 0;
  744. }
  745. #endif /* CONFIG_MSM_BUS_SCALING */
  746. static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
  747. {
  748. if (host->dev_ref_clk_ctrl_mmio &&
  749. (enable ^ host->is_dev_ref_clk_enabled)) {
  750. u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
  751. if (enable)
  752. temp |= host->dev_ref_clk_en_mask;
  753. else
  754. temp &= ~host->dev_ref_clk_en_mask;
  755. /*
  756. * If we are here to disable this clock it might be immediately
  757. * after entering into hibern8 in which case we need to make
  758. * sure that device ref_clk is active at least 1us after the
  759. * hibern8 enter.
  760. */
  761. if (!enable)
  762. udelay(1);
  763. writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
  764. /* ensure that ref_clk is enabled/disabled before we return */
  765. wmb();
  766. /*
  767. * If we call hibern8 exit after this, we need to make sure that
  768. * device ref_clk is stable for at least 1us before the hibern8
  769. * exit command.
  770. */
  771. if (enable)
  772. udelay(1);
  773. host->is_dev_ref_clk_enabled = enable;
  774. }
  775. }
  776. static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
  777. enum ufs_notify_change_status status,
  778. struct ufs_pa_layer_attr *dev_max_params,
  779. struct ufs_pa_layer_attr *dev_req_params)
  780. {
  781. u32 val;
  782. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  783. struct phy *phy = host->generic_phy;
  784. struct ufs_qcom_dev_params ufs_qcom_cap;
  785. int ret = 0;
  786. int res = 0;
  787. if (!dev_req_params) {
  788. pr_err("%s: incoming dev_req_params is NULL\n", __func__);
  789. ret = -EINVAL;
  790. goto out;
  791. }
  792. switch (status) {
  793. case PRE_CHANGE:
  794. ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
  795. ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
  796. ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
  797. ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
  798. ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
  799. ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
  800. ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
  801. ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
  802. ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
  803. ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
  804. ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
  805. ufs_qcom_cap.desired_working_mode =
  806. UFS_QCOM_LIMIT_DESIRED_MODE;
  807. if (host->hw_ver.major == 0x1) {
  808. /*
  809. * HS-G3 operations may not reliably work on legacy QCOM
  810. * UFS host controller hardware even though capability
  811. * exchange during link startup phase may end up
  812. * negotiating maximum supported gear as G3.
  813. * Hence downgrade the maximum supported gear to HS-G2.
  814. */
  815. if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
  816. ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
  817. if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
  818. ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
  819. }
  820. ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
  821. dev_max_params,
  822. dev_req_params);
  823. if (ret) {
  824. pr_err("%s: failed to determine capabilities\n",
  825. __func__);
  826. goto out;
  827. }
  828. break;
  829. case POST_CHANGE:
  830. if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
  831. dev_req_params->pwr_rx,
  832. dev_req_params->hs_rate, false)) {
  833. dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
  834. __func__);
  835. /*
  836. * we return error code at the end of the routine,
  837. * but continue to configure UFS_PHY_TX_LANE_ENABLE
  838. * and bus voting as usual
  839. */
  840. ret = -EINVAL;
  841. }
  842. val = ~(MAX_U32 << dev_req_params->lane_tx);
  843. res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
  844. if (res) {
  845. dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
  846. __func__, res);
  847. ret = res;
  848. }
  849. /* cache the power mode parameters to use internally */
  850. memcpy(&host->dev_req_params,
  851. dev_req_params, sizeof(*dev_req_params));
  852. ufs_qcom_update_bus_bw_vote(host);
  853. break;
  854. default:
  855. ret = -EINVAL;
  856. break;
  857. }
  858. out:
  859. return ret;
  860. }
  861. static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
  862. {
  863. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  864. if (host->hw_ver.major == 0x1)
  865. return UFSHCI_VERSION_11;
  866. else
  867. return UFSHCI_VERSION_20;
  868. }
  869. /**
  870. * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
  871. * @hba: host controller instance
  872. *
  873. * QCOM UFS host controller might have some non standard behaviours (quirks)
  874. * than what is specified by UFSHCI specification. Advertise all such
  875. * quirks to standard UFS host controller driver so standard takes them into
  876. * account.
  877. */
  878. static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
  879. {
  880. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  881. if (host->hw_ver.major == 0x01) {
  882. hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  883. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
  884. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
  885. if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
  886. hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
  887. hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
  888. }
  889. if (host->hw_ver.major >= 0x2) {
  890. hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
  891. if (!ufs_qcom_cap_qunipro(host))
  892. /* Legacy UniPro mode still need following quirks */
  893. hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
  894. | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
  895. | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
  896. }
  897. }
  898. static void ufs_qcom_set_caps(struct ufs_hba *hba)
  899. {
  900. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  901. hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
  902. hba->caps |= UFSHCD_CAP_CLK_SCALING;
  903. hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
  904. if (host->hw_ver.major >= 0x2) {
  905. host->caps = UFS_QCOM_CAP_QUNIPRO |
  906. UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
  907. }
  908. }
  909. /**
  910. * ufs_qcom_setup_clocks - enables/disable clocks
  911. * @hba: host controller instance
  912. * @on: If true, enable clocks else disable them.
  913. *
  914. * Returns 0 on success, non-zero on failure.
  915. */
  916. static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
  917. {
  918. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  919. int err;
  920. int vote = 0;
  921. /*
  922. * In case ufs_qcom_init() is not yet done, simply ignore.
  923. * This ufs_qcom_setup_clocks() shall be called from
  924. * ufs_qcom_init() after init is done.
  925. */
  926. if (!host)
  927. return 0;
  928. if (on) {
  929. err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
  930. if (err)
  931. goto out;
  932. err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
  933. if (err) {
  934. dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
  935. __func__, err);
  936. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  937. goto out;
  938. }
  939. vote = host->bus_vote.saved_vote;
  940. if (vote == host->bus_vote.min_bw_vote)
  941. ufs_qcom_update_bus_bw_vote(host);
  942. } else {
  943. /* M-PHY RMMI interface clocks can be turned off */
  944. ufs_qcom_phy_disable_iface_clk(host->generic_phy);
  945. if (!ufs_qcom_is_link_active(hba))
  946. /* disable device ref_clk */
  947. ufs_qcom_dev_ref_clk_ctrl(host, false);
  948. vote = host->bus_vote.min_bw_vote;
  949. }
  950. err = ufs_qcom_set_bus_vote(host, vote);
  951. if (err)
  952. dev_err(hba->dev, "%s: set bus vote failed %d\n",
  953. __func__, err);
  954. out:
  955. return err;
  956. }
  957. #define ANDROID_BOOT_DEV_MAX 30
  958. static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
  959. #ifndef MODULE
  960. static int __init get_android_boot_dev(char *str)
  961. {
  962. strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
  963. return 1;
  964. }
  965. __setup("androidboot.bootdevice=", get_android_boot_dev);
  966. #endif
  967. /**
  968. * ufs_qcom_init - bind phy with controller
  969. * @hba: host controller instance
  970. *
  971. * Binds PHY with controller and powers up PHY enabling clocks
  972. * and regulators.
  973. *
  974. * Returns -EPROBE_DEFER if binding fails, returns negative error
  975. * on phy power up failure and returns zero on success.
  976. */
  977. static int ufs_qcom_init(struct ufs_hba *hba)
  978. {
  979. int err;
  980. struct device *dev = hba->dev;
  981. struct platform_device *pdev = to_platform_device(dev);
  982. struct ufs_qcom_host *host;
  983. struct resource *res;
  984. if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
  985. return -ENODEV;
  986. host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
  987. if (!host) {
  988. err = -ENOMEM;
  989. dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
  990. goto out;
  991. }
  992. /* Make a two way bind between the qcom host and the hba */
  993. host->hba = hba;
  994. ufshcd_set_variant(hba, host);
  995. /*
  996. * voting/devoting device ref_clk source is time consuming hence
  997. * skip devoting it during aggressive clock gating. This clock
  998. * will still be gated off during runtime suspend.
  999. */
  1000. host->generic_phy = devm_phy_get(dev, "ufsphy");
  1001. if (IS_ERR(host->generic_phy)) {
  1002. err = PTR_ERR(host->generic_phy);
  1003. dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
  1004. goto out;
  1005. }
  1006. err = ufs_qcom_bus_register(host);
  1007. if (err)
  1008. goto out_host_free;
  1009. ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
  1010. &host->hw_ver.minor, &host->hw_ver.step);
  1011. /*
  1012. * for newer controllers, device reference clock control bit has
  1013. * moved inside UFS controller register address space itself.
  1014. */
  1015. if (host->hw_ver.major >= 0x02) {
  1016. host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
  1017. host->dev_ref_clk_en_mask = BIT(26);
  1018. } else {
  1019. /* "dev_ref_clk_ctrl_mem" is optional resource */
  1020. res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  1021. if (res) {
  1022. host->dev_ref_clk_ctrl_mmio =
  1023. devm_ioremap_resource(dev, res);
  1024. if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
  1025. dev_warn(dev,
  1026. "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
  1027. __func__,
  1028. PTR_ERR(host->dev_ref_clk_ctrl_mmio));
  1029. host->dev_ref_clk_ctrl_mmio = NULL;
  1030. }
  1031. host->dev_ref_clk_en_mask = BIT(5);
  1032. }
  1033. }
  1034. /* update phy revision information before calling phy_init() */
  1035. ufs_qcom_phy_save_controller_version(host->generic_phy,
  1036. host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
  1037. phy_init(host->generic_phy);
  1038. err = phy_power_on(host->generic_phy);
  1039. if (err)
  1040. goto out_unregister_bus;
  1041. err = ufs_qcom_init_lane_clks(host);
  1042. if (err)
  1043. goto out_disable_phy;
  1044. ufs_qcom_set_caps(hba);
  1045. ufs_qcom_advertise_quirks(hba);
  1046. ufs_qcom_setup_clocks(hba, true);
  1047. if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
  1048. ufs_qcom_hosts[hba->dev->id] = host;
  1049. host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
  1050. ufs_qcom_get_default_testbus_cfg(host);
  1051. err = ufs_qcom_testbus_config(host);
  1052. if (err) {
  1053. dev_warn(dev, "%s: failed to configure the testbus %d\n",
  1054. __func__, err);
  1055. err = 0;
  1056. }
  1057. goto out;
  1058. out_disable_phy:
  1059. phy_power_off(host->generic_phy);
  1060. out_unregister_bus:
  1061. phy_exit(host->generic_phy);
  1062. out_host_free:
  1063. devm_kfree(dev, host);
  1064. ufshcd_set_variant(hba, NULL);
  1065. out:
  1066. return err;
  1067. }
  1068. static void ufs_qcom_exit(struct ufs_hba *hba)
  1069. {
  1070. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1071. ufs_qcom_disable_lane_clks(host);
  1072. phy_power_off(host->generic_phy);
  1073. }
  1074. static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
  1075. u32 clk_cycles)
  1076. {
  1077. int err;
  1078. u32 core_clk_ctrl_reg;
  1079. if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
  1080. return -EINVAL;
  1081. err = ufshcd_dme_get(hba,
  1082. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1083. &core_clk_ctrl_reg);
  1084. if (err)
  1085. goto out;
  1086. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
  1087. core_clk_ctrl_reg |= clk_cycles;
  1088. /* Clear CORE_CLK_DIV_EN */
  1089. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
  1090. err = ufshcd_dme_set(hba,
  1091. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1092. core_clk_ctrl_reg);
  1093. out:
  1094. return err;
  1095. }
  1096. static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
  1097. {
  1098. /* nothing to do as of now */
  1099. return 0;
  1100. }
  1101. static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
  1102. {
  1103. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1104. if (!ufs_qcom_cap_qunipro(host))
  1105. return 0;
  1106. /* set unipro core clock cycles to 150 and clear clock divider */
  1107. return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
  1108. }
  1109. static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
  1110. {
  1111. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1112. int err;
  1113. u32 core_clk_ctrl_reg;
  1114. if (!ufs_qcom_cap_qunipro(host))
  1115. return 0;
  1116. err = ufshcd_dme_get(hba,
  1117. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1118. &core_clk_ctrl_reg);
  1119. /* make sure CORE_CLK_DIV_EN is cleared */
  1120. if (!err &&
  1121. (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
  1122. core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
  1123. err = ufshcd_dme_set(hba,
  1124. UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
  1125. core_clk_ctrl_reg);
  1126. }
  1127. return err;
  1128. }
  1129. static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
  1130. {
  1131. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1132. if (!ufs_qcom_cap_qunipro(host))
  1133. return 0;
  1134. /* set unipro core clock cycles to 75 and clear clock divider */
  1135. return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
  1136. }
  1137. static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
  1138. bool scale_up, enum ufs_notify_change_status status)
  1139. {
  1140. struct ufs_qcom_host *host = ufshcd_get_variant(hba);
  1141. struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
  1142. int err = 0;
  1143. if (status == PRE_CHANGE) {
  1144. if (scale_up)
  1145. err = ufs_qcom_clk_scale_up_pre_change(hba);
  1146. else
  1147. err = ufs_qcom_clk_scale_down_pre_change(hba);
  1148. } else {
  1149. if (scale_up)
  1150. err = ufs_qcom_clk_scale_up_post_change(hba);
  1151. else
  1152. err = ufs_qcom_clk_scale_down_post_change(hba);
  1153. if (err || !dev_req_params)
  1154. goto out;
  1155. ufs_qcom_cfg_timers(hba,
  1156. dev_req_params->gear_rx,
  1157. dev_req_params->pwr_rx,
  1158. dev_req_params->hs_rate,
  1159. false);
  1160. ufs_qcom_update_bus_bw_vote(host);
  1161. }
  1162. out:
  1163. return err;
  1164. }
  1165. static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
  1166. {
  1167. /* provide a legal default configuration */
  1168. host->testbus.select_major = TSTBUS_UAWM;
  1169. host->testbus.select_minor = 1;
  1170. }
  1171. static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
  1172. {
  1173. if (host->testbus.select_major >= TSTBUS_MAX) {
  1174. dev_err(host->hba->dev,
  1175. "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
  1176. __func__, host->testbus.select_major);
  1177. return false;
  1178. }
  1179. /*
  1180. * Not performing check for each individual select_major
  1181. * mappings of select_minor, since there is no harm in
  1182. * configuring a non-existent select_minor
  1183. */
  1184. if (host->testbus.select_minor > 0x1F) {
  1185. dev_err(host->hba->dev,
  1186. "%s: 0x%05X is not a legal testbus option\n",
  1187. __func__, host->testbus.select_minor);
  1188. return false;
  1189. }
  1190. return true;
  1191. }
  1192. int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
  1193. {
  1194. int reg;
  1195. int offset;
  1196. u32 mask = TEST_BUS_SUB_SEL_MASK;
  1197. if (!host)
  1198. return -EINVAL;
  1199. if (!ufs_qcom_testbus_cfg_is_ok(host))
  1200. return -EPERM;
  1201. switch (host->testbus.select_major) {
  1202. case TSTBUS_UAWM:
  1203. reg = UFS_TEST_BUS_CTRL_0;
  1204. offset = 24;
  1205. break;
  1206. case TSTBUS_UARM:
  1207. reg = UFS_TEST_BUS_CTRL_0;
  1208. offset = 16;
  1209. break;
  1210. case TSTBUS_TXUC:
  1211. reg = UFS_TEST_BUS_CTRL_0;
  1212. offset = 8;
  1213. break;
  1214. case TSTBUS_RXUC:
  1215. reg = UFS_TEST_BUS_CTRL_0;
  1216. offset = 0;
  1217. break;
  1218. case TSTBUS_DFC:
  1219. reg = UFS_TEST_BUS_CTRL_1;
  1220. offset = 24;
  1221. break;
  1222. case TSTBUS_TRLUT:
  1223. reg = UFS_TEST_BUS_CTRL_1;
  1224. offset = 16;
  1225. break;
  1226. case TSTBUS_TMRLUT:
  1227. reg = UFS_TEST_BUS_CTRL_1;
  1228. offset = 8;
  1229. break;
  1230. case TSTBUS_OCSC:
  1231. reg = UFS_TEST_BUS_CTRL_1;
  1232. offset = 0;
  1233. break;
  1234. case TSTBUS_WRAPPER:
  1235. reg = UFS_TEST_BUS_CTRL_2;
  1236. offset = 16;
  1237. break;
  1238. case TSTBUS_COMBINED:
  1239. reg = UFS_TEST_BUS_CTRL_2;
  1240. offset = 8;
  1241. break;
  1242. case TSTBUS_UTP_HCI:
  1243. reg = UFS_TEST_BUS_CTRL_2;
  1244. offset = 0;
  1245. break;
  1246. case TSTBUS_UNIPRO:
  1247. reg = UFS_UNIPRO_CFG;
  1248. offset = 1;
  1249. break;
  1250. /*
  1251. * No need for a default case, since
  1252. * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
  1253. * is legal
  1254. */
  1255. }
  1256. mask <<= offset;
  1257. pm_runtime_get_sync(host->hba->dev);
  1258. ufshcd_hold(host->hba, false);
  1259. ufshcd_rmwl(host->hba, TEST_BUS_SEL,
  1260. (u32)host->testbus.select_major << 19,
  1261. REG_UFS_CFG1);
  1262. ufshcd_rmwl(host->hba, mask,
  1263. (u32)host->testbus.select_minor << offset,
  1264. reg);
  1265. ufshcd_release(host->hba);
  1266. pm_runtime_put_sync(host->hba->dev);
  1267. return 0;
  1268. }
  1269. static void ufs_qcom_testbus_read(struct ufs_hba *hba)
  1270. {
  1271. ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
  1272. }
  1273. static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
  1274. {
  1275. ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
  1276. "HCI Vendor Specific Registers ");
  1277. ufs_qcom_testbus_read(hba);
  1278. }
  1279. /**
  1280. * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
  1281. *
  1282. * The variant operations configure the necessary controller and PHY
  1283. * handshake during initialization.
  1284. */
  1285. static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
  1286. .name = "qcom",
  1287. .init = ufs_qcom_init,
  1288. .exit = ufs_qcom_exit,
  1289. .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
  1290. .clk_scale_notify = ufs_qcom_clk_scale_notify,
  1291. .setup_clocks = ufs_qcom_setup_clocks,
  1292. .hce_enable_notify = ufs_qcom_hce_enable_notify,
  1293. .link_startup_notify = ufs_qcom_link_startup_notify,
  1294. .pwr_change_notify = ufs_qcom_pwr_change_notify,
  1295. .suspend = ufs_qcom_suspend,
  1296. .resume = ufs_qcom_resume,
  1297. .dbg_register_dump = ufs_qcom_dump_dbg_regs,
  1298. };
  1299. /**
  1300. * ufs_qcom_probe - probe routine of the driver
  1301. * @pdev: pointer to Platform device handle
  1302. *
  1303. * Return zero for success and non-zero for failure
  1304. */
  1305. static int ufs_qcom_probe(struct platform_device *pdev)
  1306. {
  1307. int err;
  1308. struct device *dev = &pdev->dev;
  1309. /* Perform generic probe */
  1310. err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
  1311. if (err)
  1312. dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
  1313. return err;
  1314. }
  1315. /**
  1316. * ufs_qcom_remove - set driver_data of the device to NULL
  1317. * @pdev: pointer to platform device handle
  1318. *
  1319. * Always return 0
  1320. */
  1321. static int ufs_qcom_remove(struct platform_device *pdev)
  1322. {
  1323. struct ufs_hba *hba = platform_get_drvdata(pdev);
  1324. pm_runtime_get_sync(&(pdev)->dev);
  1325. ufshcd_remove(hba);
  1326. return 0;
  1327. }
  1328. static const struct of_device_id ufs_qcom_of_match[] = {
  1329. { .compatible = "qcom,ufshc"},
  1330. {},
  1331. };
  1332. MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
  1333. static const struct dev_pm_ops ufs_qcom_pm_ops = {
  1334. .suspend = ufshcd_pltfrm_suspend,
  1335. .resume = ufshcd_pltfrm_resume,
  1336. .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
  1337. .runtime_resume = ufshcd_pltfrm_runtime_resume,
  1338. .runtime_idle = ufshcd_pltfrm_runtime_idle,
  1339. };
  1340. static struct platform_driver ufs_qcom_pltform = {
  1341. .probe = ufs_qcom_probe,
  1342. .remove = ufs_qcom_remove,
  1343. .shutdown = ufshcd_pltfrm_shutdown,
  1344. .driver = {
  1345. .name = "ufshcd-qcom",
  1346. .pm = &ufs_qcom_pm_ops,
  1347. .of_match_table = of_match_ptr(ufs_qcom_of_match),
  1348. },
  1349. };
  1350. module_platform_driver(ufs_qcom_pltform);
  1351. MODULE_LICENSE("GPL v2");