12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577 |
- /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
- #include <linux/time.h>
- #include <linux/of.h>
- #include <linux/platform_device.h>
- #include <linux/phy/phy.h>
- #include <linux/phy/phy-qcom-ufs.h>
- #include "ufshcd.h"
- #include "ufshcd-pltfrm.h"
- #include "unipro.h"
- #include "ufs-qcom.h"
- #include "ufshci.h"
- #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
- (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
- enum {
- TSTBUS_UAWM,
- TSTBUS_UARM,
- TSTBUS_TXUC,
- TSTBUS_RXUC,
- TSTBUS_DFC,
- TSTBUS_TRLUT,
- TSTBUS_TMRLUT,
- TSTBUS_OCSC,
- TSTBUS_UTP_HCI,
- TSTBUS_COMBINED,
- TSTBUS_WRAPPER,
- TSTBUS_UNIPRO,
- TSTBUS_MAX,
- };
- static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
- static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
- static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
- static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles);
- static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
- char *prefix)
- {
- print_hex_dump(KERN_ERR, prefix,
- len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
- 16, 4, (void __force *)hba->mmio_base + offset,
- len * 4, false);
- }
- static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
- {
- int err = 0;
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
- __func__, err);
- return err;
- }
- static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
- {
- struct clk *clk;
- int err = 0;
- clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
- *clk_out = clk;
- }
- return err;
- }
- static int ufs_qcom_host_clk_enable(struct device *dev,
- const char *name, struct clk *clk)
- {
- int err = 0;
- err = clk_prepare_enable(clk);
- if (err)
- dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
- return err;
- }
- static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
- {
- if (!host->is_lane_clks_enabled)
- return;
- clk_disable_unprepare(host->tx_l1_sync_clk);
- clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
- clk_disable_unprepare(host->rx_l0_sync_clk);
- host->is_lane_clks_enabled = false;
- }
- static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
- {
- int err = 0;
- struct device *dev = host->hba->dev;
- if (host->is_lane_clks_enabled)
- return 0;
- err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
- host->rx_l0_sync_clk);
- if (err)
- goto out;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
- host->tx_l0_sync_clk);
- if (err)
- goto disable_rx_l0;
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
- host->is_lane_clks_enabled = true;
- goto out;
- disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
- disable_tx_l0:
- clk_disable_unprepare(host->tx_l0_sync_clk);
- disable_rx_l0:
- clk_disable_unprepare(host->rx_l0_sync_clk);
- out:
- return err;
- }
- static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
- {
- int err = 0;
- struct device *dev = host->hba->dev;
- err = ufs_qcom_host_clk_get(dev,
- "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
- if (err)
- goto out;
- err = ufs_qcom_host_clk_get(dev,
- "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
- if (err)
- goto out;
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
- if (err)
- goto out;
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
- out:
- return err;
- }
- static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- u32 tx_lanes;
- int err = 0;
- err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
- if (err)
- goto out;
- err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
- __func__);
- out:
- return err;
- }
- static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
- {
- int err;
- u32 tx_fsm_val = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
- do {
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
- if (err || tx_fsm_val == TX_FSM_HIBERN8)
- break;
- /* sleep for max. 200us */
- usleep_range(100, 200);
- } while (time_before(jiffies, timeout));
- /*
- * we might have scheduled out for long during polling so
- * check the state again.
- */
- if (time_after(jiffies, timeout))
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
- &tx_fsm_val);
- if (err) {
- dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
- __func__, err);
- } else if (tx_fsm_val != TX_FSM_HIBERN8) {
- err = tx_fsm_val;
- dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
- __func__, err);
- }
- return err;
- }
- static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
- {
- ufshcd_rmwl(host->hba, QUNIPRO_SEL,
- ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
- REG_UFS_CFG1);
- /* make sure above configuration is applied before we return */
- mb();
- }
- static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int ret = 0;
- bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
- ? true : false;
- /* Assert PHY reset and apply PHY calibration values */
- ufs_qcom_assert_reset(hba);
- /* provide 1ms delay to let the reset pulse propagate */
- usleep_range(1000, 1100);
- ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
- if (ret) {
- dev_err(hba->dev,
- "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
- __func__, ret);
- goto out;
- }
- /* De-assert PHY reset and start serdes */
- ufs_qcom_deassert_reset(hba);
- /*
- * after reset deassertion, phy will need all ref clocks,
- * voltage, current to settle down before starting serdes.
- */
- usleep_range(1000, 1100);
- ret = ufs_qcom_phy_start_serdes(phy);
- if (ret) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_start_serdes() failed, ret = %d\n",
- __func__, ret);
- goto out;
- }
- ret = ufs_qcom_phy_is_pcs_ready(phy);
- if (ret)
- dev_err(hba->dev,
- "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
- __func__, ret);
- ufs_qcom_select_unipro_mode(host);
- out:
- return ret;
- }
- /*
- * The UTP controller has a number of internal clock gating cells (CGCs).
- * Internal hardware sub-modules within the UTP controller control the CGCs.
- * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
- * in a specific operation, UTP controller CGCs are by default disabled and
- * this function enables them (after every UFS link startup) to save some power
- * leakage.
- */
- static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
- {
- ufshcd_writel(hba,
- ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
- REG_UFS_CFG2);
- /* Ensure that HW clock gating is enabled before next operations */
- mb();
- }
- static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err = 0;
- switch (status) {
- case PRE_CHANGE:
- ufs_qcom_power_up_sequence(hba);
- /*
- * The PHY PLL output is the source of tx/rx lane symbol
- * clocks, hence, enable the lane clocks only after PHY
- * is initialized.
- */
- err = ufs_qcom_enable_lane_clks(host);
- break;
- case POST_CHANGE:
- /* check if UFS PHY moved from DISABLED to HIBERN8 */
- err = ufs_qcom_check_hibern8(hba);
- ufs_qcom_enable_hw_clk_gating(hba);
- break;
- default:
- dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
- err = -EINVAL;
- break;
- }
- return err;
- }
- /**
- * Returns zero for success and non-zero in case of a failure
- */
- static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
- {
- int ret = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_clk_info *clki;
- u32 core_clk_period_in_ns;
- u32 tx_clk_cycles_per_us = 0;
- unsigned long core_clk_rate = 0;
- u32 core_clk_cycles_per_us = 0;
- static u32 pwm_fr_table[][2] = {
- {UFS_PWM_G1, 0x1},
- {UFS_PWM_G2, 0x1},
- {UFS_PWM_G3, 0x1},
- {UFS_PWM_G4, 0x1},
- };
- static u32 hs_fr_table_rA[][2] = {
- {UFS_HS_G1, 0x1F},
- {UFS_HS_G2, 0x3e},
- {UFS_HS_G3, 0x7D},
- };
- static u32 hs_fr_table_rB[][2] = {
- {UFS_HS_G1, 0x24},
- {UFS_HS_G2, 0x49},
- {UFS_HS_G3, 0x92},
- };
- /*
- * The Qunipro controller does not use following registers:
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
- * UFS_REG_PA_LINK_STARTUP_TIMER
- * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
- * Aggregation logic.
- */
- if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
- goto out;
- if (gear == 0) {
- dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
- goto out_error;
- }
- list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
- }
- /* If frequency is smaller than 1MHz, set to 1MHz */
- if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
- core_clk_rate = DEFAULT_CLK_RATE_HZ;
- core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
- if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
- ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
- if (ufs_qcom_cap_qunipro(host))
- goto out;
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
- core_clk_period_in_ns &= MASK_CLK_NS_REG;
- switch (hs) {
- case FASTAUTO_MODE:
- case FAST_MODE:
- if (rate == PA_HS_MODE_A) {
- if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rA));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
- } else if (rate == PA_HS_MODE_B) {
- if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(hs_fr_table_rB));
- goto out_error;
- }
- tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
- } else {
- dev_err(hba->dev, "%s: invalid rate = %d\n",
- __func__, rate);
- goto out_error;
- }
- break;
- case SLOWAUTO_MODE:
- case SLOW_MODE:
- if (gear > ARRAY_SIZE(pwm_fr_table)) {
- dev_err(hba->dev,
- "%s: index %d exceeds table size %zu\n",
- __func__, gear,
- ARRAY_SIZE(pwm_fr_table));
- goto out_error;
- }
- tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
- break;
- case UNCHANGED:
- default:
- dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
- goto out_error;
- }
- if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
- (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
- /* this register 2 fields shall be written at once */
- ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
- REG_UFS_TX_SYMBOL_CLK_NS_US);
- /*
- * make sure above write gets applied before we return from
- * this function.
- */
- mb();
- }
- if (update_link_startup_timer) {
- ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
- REG_UFS_PA_LINK_STARTUP_TIMER);
- /*
- * make sure that this configuration is applied before
- * we return
- */
- mb();
- }
- goto out;
- out_error:
- ret = -EINVAL;
- out:
- return ret;
- }
- static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status)
- {
- int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- switch (status) {
- case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- err = -EINVAL;
- goto out;
- }
- if (ufs_qcom_cap_qunipro(host))
- /*
- * set unipro core clock cycles to 150 & clear clock
- * divider
- */
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
- 150);
- break;
- case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
- break;
- default:
- break;
- }
- out:
- return err;
- }
- static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int ret = 0;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
- goto out;
- }
- /*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
- */
- if (!ufs_qcom_is_link_active(hba)) {
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- }
- out:
- return ret;
- }
- static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- int err;
- err = phy_power_on(phy);
- if (err) {
- dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
- __func__, err);
- goto out;
- }
- err = ufs_qcom_enable_lane_clks(host);
- if (err)
- goto out;
- hba->is_sys_suspended = false;
- out:
- return err;
- }
- struct ufs_qcom_dev_params {
- u32 pwm_rx_gear; /* pwm rx gear to work in */
- u32 pwm_tx_gear; /* pwm tx gear to work in */
- u32 hs_rx_gear; /* hs rx gear to work in */
- u32 hs_tx_gear; /* hs tx gear to work in */
- u32 rx_lanes; /* number of rx lanes */
- u32 tx_lanes; /* number of tx lanes */
- u32 rx_pwr_pwm; /* rx pwm working pwr */
- u32 tx_pwr_pwm; /* tx pwm working pwr */
- u32 rx_pwr_hs; /* rx hs working pwr */
- u32 tx_pwr_hs; /* tx hs working pwr */
- u32 hs_rate; /* rate A/B to work in HS */
- u32 desired_working_mode;
- };
- static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
- struct ufs_pa_layer_attr *dev_max,
- struct ufs_pa_layer_attr *agreed_pwr)
- {
- int min_qcom_gear;
- int min_dev_gear;
- bool is_dev_sup_hs = false;
- bool is_qcom_max_hs = false;
- if (dev_max->pwr_rx == FAST_MODE)
- is_dev_sup_hs = true;
- if (qcom_param->desired_working_mode == FAST) {
- is_qcom_max_hs = true;
- min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
- qcom_param->hs_tx_gear);
- } else {
- min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
- qcom_param->pwm_tx_gear);
- }
- /*
- * device doesn't support HS but qcom_param->desired_working_mode is
- * HS, thus device and qcom_param don't agree
- */
- if (!is_dev_sup_hs && is_qcom_max_hs) {
- pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
- __func__);
- return -ENOTSUPP;
- } else if (is_dev_sup_hs && is_qcom_max_hs) {
- /*
- * since device supports HS, it supports FAST_MODE.
- * since qcom_param->desired_working_mode is also HS
- * then final decision (FAST/FASTAUTO) is done according
- * to qcom_params as it is the restricting factor
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- qcom_param->rx_pwr_hs;
- } else {
- /*
- * here qcom_param->desired_working_mode is PWM.
- * it doesn't matter whether device supports HS or PWM,
- * in both cases qcom_param->desired_working_mode will
- * determine the mode
- */
- agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
- qcom_param->rx_pwr_pwm;
- }
- /*
- * we would like tx to work in the minimum number of lanes
- * between device capability and vendor preferences.
- * the same decision will be made for rx
- */
- agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
- qcom_param->tx_lanes);
- agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
- qcom_param->rx_lanes);
- /* device maximum gear is the minimum between device rx and tx gears */
- min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
- /*
- * if both device capabilities and vendor pre-defined preferences are
- * both HS or both PWM then set the minimum gear to be the chosen
- * working gear.
- * if one is PWM and one is HS then the one that is PWM get to decide
- * what is the gear, as it is the one that also decided previously what
- * pwr the device will be configured to.
- */
- if ((is_dev_sup_hs && is_qcom_max_hs) ||
- (!is_dev_sup_hs && !is_qcom_max_hs))
- agreed_pwr->gear_rx = agreed_pwr->gear_tx =
- min_t(u32, min_dev_gear, min_qcom_gear);
- else if (!is_dev_sup_hs)
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
- else
- agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
- agreed_pwr->hs_rate = qcom_param->hs_rate;
- return 0;
- }
- #ifdef CONFIG_MSM_BUS_SCALING
- static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
- const char *speed_mode)
- {
- struct device *dev = host->hba->dev;
- struct device_node *np = dev->of_node;
- int err;
- const char *key = "qcom,bus-vector-names";
- if (!speed_mode) {
- err = -EINVAL;
- goto out;
- }
- if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
- err = of_property_match_string(np, key, "MAX");
- else
- err = of_property_match_string(np, key, speed_mode);
- out:
- if (err < 0)
- dev_err(dev, "%s: Invalid %s mode %d\n",
- __func__, speed_mode, err);
- return err;
- }
- static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
- {
- int gear = max_t(u32, p->gear_rx, p->gear_tx);
- int lanes = max_t(u32, p->lane_rx, p->lane_tx);
- int pwr;
- /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
- if (!gear)
- gear = 1;
- if (!lanes)
- lanes = 1;
- if (!p->pwr_rx && !p->pwr_tx) {
- pwr = SLOWAUTO_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
- } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
- p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
- pwr = FAST_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
- p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
- } else {
- pwr = SLOW_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
- "PWM", gear, lanes);
- }
- }
- static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
- {
- int err = 0;
- if (vote != host->bus_vote.curr_vote) {
- err = msm_bus_scale_client_update_request(
- host->bus_vote.client_handle, vote);
- if (err) {
- dev_err(host->hba->dev,
- "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
- __func__, host->bus_vote.client_handle,
- vote, err);
- goto out;
- }
- host->bus_vote.curr_vote = vote;
- }
- out:
- return err;
- }
- static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
- {
- int vote;
- int err = 0;
- char mode[BUS_VECTOR_NAME_LEN];
- ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
- vote = ufs_qcom_get_bus_vote(host, mode);
- if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
- else
- err = vote;
- if (err)
- dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
- else
- host->bus_vote.saved_vote = vote;
- return err;
- }
- static ssize_t
- show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- char *buf)
- {
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- return snprintf(buf, PAGE_SIZE, "%u\n",
- host->bus_vote.is_max_bw_needed);
- }
- static ssize_t
- store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- uint32_t value;
- if (!kstrtou32(buf, 0, &value)) {
- host->bus_vote.is_max_bw_needed = !!value;
- ufs_qcom_update_bus_bw_vote(host);
- }
- return count;
- }
- static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
- {
- int err;
- struct msm_bus_scale_pdata *bus_pdata;
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *np = dev->of_node;
- bus_pdata = msm_bus_cl_get_pdata(pdev);
- if (!bus_pdata) {
- dev_err(dev, "%s: failed to get bus vectors\n", __func__);
- err = -ENODATA;
- goto out;
- }
- err = of_property_count_strings(np, "qcom,bus-vector-names");
- if (err < 0 || err != bus_pdata->num_usecases) {
- dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
- __func__, err);
- goto out;
- }
- host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
- if (!host->bus_vote.client_handle) {
- dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
- __func__);
- err = -EFAULT;
- goto out;
- }
- /* cache the vote index for minimum and maximum bandwidth */
- host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
- host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
- host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
- host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
- sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
- host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
- host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
- err = device_create_file(dev, &host->bus_vote.max_bus_bw);
- out:
- return err;
- }
- #else /* CONFIG_MSM_BUS_SCALING */
- static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
- {
- return 0;
- }
- static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
- {
- return 0;
- }
- static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
- {
- return 0;
- }
- #endif /* CONFIG_MSM_BUS_SCALING */
- static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
- {
- if (host->dev_ref_clk_ctrl_mmio &&
- (enable ^ host->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
- if (enable)
- temp |= host->dev_ref_clk_en_mask;
- else
- temp &= ~host->dev_ref_clk_en_mask;
- /*
- * If we are here to disable this clock it might be immediately
- * after entering into hibern8 in which case we need to make
- * sure that device ref_clk is active at least 1us after the
- * hibern8 enter.
- */
- if (!enable)
- udelay(1);
- writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for at least 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
- host->is_dev_ref_clk_enabled = enable;
- }
- }
- static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
- {
- u32 val;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- struct ufs_qcom_dev_params ufs_qcom_cap;
- int ret = 0;
- int res = 0;
- if (!dev_req_params) {
- pr_err("%s: incoming dev_req_params is NULL\n", __func__);
- ret = -EINVAL;
- goto out;
- }
- switch (status) {
- case PRE_CHANGE:
- ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
- ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
- ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
- ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
- ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
- ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
- ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
- ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
- ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
- ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
- ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
- ufs_qcom_cap.desired_working_mode =
- UFS_QCOM_LIMIT_DESIRED_MODE;
- if (host->hw_ver.major == 0x1) {
- /*
- * HS-G3 operations may not reliably work on legacy QCOM
- * UFS host controller hardware even though capability
- * exchange during link startup phase may end up
- * negotiating maximum supported gear as G3.
- * Hence downgrade the maximum supported gear to HS-G2.
- */
- if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
- if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
- ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
- }
- ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
- dev_max_params,
- dev_req_params);
- if (ret) {
- pr_err("%s: failed to determine capabilities\n",
- __func__);
- goto out;
- }
- break;
- case POST_CHANGE:
- if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate, false)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- /*
- * we return error code at the end of the routine,
- * but continue to configure UFS_PHY_TX_LANE_ENABLE
- * and bus voting as usual
- */
- ret = -EINVAL;
- }
- val = ~(MAX_U32 << dev_req_params->lane_tx);
- res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
- if (res) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
- __func__, res);
- ret = res;
- }
- /* cache the power mode parameters to use internally */
- memcpy(&host->dev_req_params,
- dev_req_params, sizeof(*dev_req_params));
- ufs_qcom_update_bus_bw_vote(host);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- out:
- return ret;
- }
- static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x1)
- return UFSHCI_VERSION_11;
- else
- return UFSHCI_VERSION_20;
- }
- /**
- * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
- * @hba: host controller instance
- *
- * QCOM UFS host controller might have some non standard behaviours (quirks)
- * than what is specified by UFSHCI specification. Advertise all such
- * quirks to standard UFS host controller driver so standard takes them into
- * account.
- */
- static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
- hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
- hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
- }
- if (host->hw_ver.major >= 0x2) {
- hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
- if (!ufs_qcom_cap_qunipro(host))
- /* Legacy UniPro mode still need following quirks */
- hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
- }
- }
- static void ufs_qcom_set_caps(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
- hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
- if (host->hw_ver.major >= 0x2) {
- host->caps = UFS_QCOM_CAP_QUNIPRO |
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
- }
- }
- /**
- * ufs_qcom_setup_clocks - enables/disable clocks
- * @hba: host controller instance
- * @on: If true, enable clocks else disable them.
- *
- * Returns 0 on success, non-zero on failure.
- */
- static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- int vote = 0;
- /*
- * In case ufs_qcom_init() is not yet done, simply ignore.
- * This ufs_qcom_setup_clocks() shall be called from
- * ufs_qcom_init() after init is done.
- */
- if (!host)
- return 0;
- if (on) {
- err = ufs_qcom_phy_enable_iface_clk(host->generic_phy);
- if (err)
- goto out;
- err = ufs_qcom_phy_enable_ref_clk(host->generic_phy);
- if (err) {
- dev_err(hba->dev, "%s enable phy ref clock failed, err=%d\n",
- __func__, err);
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- goto out;
- }
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
- } else {
- /* M-PHY RMMI interface clocks can be turned off */
- ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
- /* disable device ref_clk */
- ufs_qcom_dev_ref_clk_ctrl(host, false);
- vote = host->bus_vote.min_bw_vote;
- }
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
- out:
- return err;
- }
- #define ANDROID_BOOT_DEV_MAX 30
- static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
- #ifndef MODULE
- static int __init get_android_boot_dev(char *str)
- {
- strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
- return 1;
- }
- __setup("androidboot.bootdevice=", get_android_boot_dev);
- #endif
- /**
- * ufs_qcom_init - bind phy with controller
- * @hba: host controller instance
- *
- * Binds PHY with controller and powers up PHY enabling clocks
- * and regulators.
- *
- * Returns -EPROBE_DEFER if binding fails, returns negative error
- * on phy power up failure and returns zero on success.
- */
- static int ufs_qcom_init(struct ufs_hba *hba)
- {
- int err;
- struct device *dev = hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct ufs_qcom_host *host;
- struct resource *res;
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
- host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
- if (!host) {
- err = -ENOMEM;
- dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
- goto out;
- }
- /* Make a two way bind between the qcom host and the hba */
- host->hba = hba;
- ufshcd_set_variant(hba, host);
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
- err = PTR_ERR(host->generic_phy);
- dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
- goto out;
- }
- err = ufs_qcom_bus_register(host);
- if (err)
- goto out_host_free;
- ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
- &host->hw_ver.minor, &host->hw_ver.step);
- /*
- * for newer controllers, device reference clock control bit has
- * moved inside UFS controller register address space itself.
- */
- if (host->hw_ver.major >= 0x02) {
- host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
- host->dev_ref_clk_en_mask = BIT(26);
- } else {
- /* "dev_ref_clk_ctrl_mem" is optional resource */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res) {
- host->dev_ref_clk_ctrl_mmio =
- devm_ioremap_resource(dev, res);
- if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
- dev_warn(dev,
- "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
- __func__,
- PTR_ERR(host->dev_ref_clk_ctrl_mmio));
- host->dev_ref_clk_ctrl_mmio = NULL;
- }
- host->dev_ref_clk_en_mask = BIT(5);
- }
- }
- /* update phy revision information before calling phy_init() */
- ufs_qcom_phy_save_controller_version(host->generic_phy,
- host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
- phy_init(host->generic_phy);
- err = phy_power_on(host->generic_phy);
- if (err)
- goto out_unregister_bus;
- err = ufs_qcom_init_lane_clks(host);
- if (err)
- goto out_disable_phy;
- ufs_qcom_set_caps(hba);
- ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
- if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
- ufs_qcom_hosts[hba->dev->id] = host;
- host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
- ufs_qcom_get_default_testbus_cfg(host);
- err = ufs_qcom_testbus_config(host);
- if (err) {
- dev_warn(dev, "%s: failed to configure the testbus %d\n",
- __func__, err);
- err = 0;
- }
- goto out;
- out_disable_phy:
- phy_power_off(host->generic_phy);
- out_unregister_bus:
- phy_exit(host->generic_phy);
- out_host_free:
- devm_kfree(dev, host);
- ufshcd_set_variant(hba, NULL);
- out:
- return err;
- }
- static void ufs_qcom_exit(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(host->generic_phy);
- }
- static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
- u32 clk_cycles)
- {
- int err;
- u32 core_clk_ctrl_reg;
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
- return -EINVAL;
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
- if (err)
- goto out;
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
- core_clk_ctrl_reg |= clk_cycles;
- /* Clear CORE_CLK_DIV_EN */
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- out:
- return err;
- }
- static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
- {
- /* nothing to do as of now */
- return 0;
- }
- static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
- /* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
- }
- static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- u32 core_clk_ctrl_reg;
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
- /* make sure CORE_CLK_DIV_EN is cleared */
- if (!err &&
- (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- }
- return err;
- }
- static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (!ufs_qcom_cap_qunipro(host))
- return 0;
- /* set unipro core clock cycles to 75 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
- }
- static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
- bool scale_up, enum ufs_notify_change_status status)
- {
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
- int err = 0;
- if (status == PRE_CHANGE) {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_pre_change(hba);
- else
- err = ufs_qcom_clk_scale_down_pre_change(hba);
- } else {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_post_change(hba);
- else
- err = ufs_qcom_clk_scale_down_post_change(hba);
- if (err || !dev_req_params)
- goto out;
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
- ufs_qcom_update_bus_bw_vote(host);
- }
- out:
- return err;
- }
- static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
- {
- /* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UAWM;
- host->testbus.select_minor = 1;
- }
- static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
- {
- if (host->testbus.select_major >= TSTBUS_MAX) {
- dev_err(host->hba->dev,
- "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
- return false;
- }
- /*
- * Not performing check for each individual select_major
- * mappings of select_minor, since there is no harm in
- * configuring a non-existent select_minor
- */
- if (host->testbus.select_minor > 0x1F) {
- dev_err(host->hba->dev,
- "%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
- return false;
- }
- return true;
- }
- int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
- {
- int reg;
- int offset;
- u32 mask = TEST_BUS_SUB_SEL_MASK;
- if (!host)
- return -EINVAL;
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
- switch (host->testbus.select_major) {
- case TSTBUS_UAWM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 24;
- break;
- case TSTBUS_UARM:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 16;
- break;
- case TSTBUS_TXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 8;
- break;
- case TSTBUS_RXUC:
- reg = UFS_TEST_BUS_CTRL_0;
- offset = 0;
- break;
- case TSTBUS_DFC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 24;
- break;
- case TSTBUS_TRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 16;
- break;
- case TSTBUS_TMRLUT:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 8;
- break;
- case TSTBUS_OCSC:
- reg = UFS_TEST_BUS_CTRL_1;
- offset = 0;
- break;
- case TSTBUS_WRAPPER:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 16;
- break;
- case TSTBUS_COMBINED:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 8;
- break;
- case TSTBUS_UTP_HCI:
- reg = UFS_TEST_BUS_CTRL_2;
- offset = 0;
- break;
- case TSTBUS_UNIPRO:
- reg = UFS_UNIPRO_CFG;
- offset = 1;
- break;
- /*
- * No need for a default case, since
- * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
- * is legal
- */
- }
- mask <<= offset;
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
- REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
- (u32)host->testbus.select_minor << offset,
- reg);
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
- return 0;
- }
- static void ufs_qcom_testbus_read(struct ufs_hba *hba)
- {
- ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
- }
- static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
- {
- ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
- "HCI Vendor Specific Registers ");
- ufs_qcom_testbus_read(hba);
- }
- /**
- * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
- *
- * The variant operations configure the necessary controller and PHY
- * handshake during initialization.
- */
- static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
- .name = "qcom",
- .init = ufs_qcom_init,
- .exit = ufs_qcom_exit,
- .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
- .clk_scale_notify = ufs_qcom_clk_scale_notify,
- .setup_clocks = ufs_qcom_setup_clocks,
- .hce_enable_notify = ufs_qcom_hce_enable_notify,
- .link_startup_notify = ufs_qcom_link_startup_notify,
- .pwr_change_notify = ufs_qcom_pwr_change_notify,
- .suspend = ufs_qcom_suspend,
- .resume = ufs_qcom_resume,
- .dbg_register_dump = ufs_qcom_dump_dbg_regs,
- };
- /**
- * ufs_qcom_probe - probe routine of the driver
- * @pdev: pointer to Platform device handle
- *
- * Return zero for success and non-zero for failure
- */
- static int ufs_qcom_probe(struct platform_device *pdev)
- {
- int err;
- struct device *dev = &pdev->dev;
- /* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
- if (err)
- dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
- return err;
- }
- /**
- * ufs_qcom_remove - set driver_data of the device to NULL
- * @pdev: pointer to platform device handle
- *
- * Always return 0
- */
- static int ufs_qcom_remove(struct platform_device *pdev)
- {
- struct ufs_hba *hba = platform_get_drvdata(pdev);
- pm_runtime_get_sync(&(pdev)->dev);
- ufshcd_remove(hba);
- return 0;
- }
- static const struct of_device_id ufs_qcom_of_match[] = {
- { .compatible = "qcom,ufshc"},
- {},
- };
- MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
- static const struct dev_pm_ops ufs_qcom_pm_ops = {
- .suspend = ufshcd_pltfrm_suspend,
- .resume = ufshcd_pltfrm_resume,
- .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
- .runtime_resume = ufshcd_pltfrm_runtime_resume,
- .runtime_idle = ufshcd_pltfrm_runtime_idle,
- };
- static struct platform_driver ufs_qcom_pltform = {
- .probe = ufs_qcom_probe,
- .remove = ufs_qcom_remove,
- .shutdown = ufshcd_pltfrm_shutdown,
- .driver = {
- .name = "ufshcd-qcom",
- .pm = &ufs_qcom_pm_ops,
- .of_match_table = of_match_ptr(ufs_qcom_of_match),
- },
- };
- module_platform_driver(ufs_qcom_pltform);
- MODULE_LICENSE("GPL v2");
|