ufshcd.c 149 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744
  1. /*
  2. * Universal Flash Storage Host controller driver Core
  3. *
  4. * This code is based on drivers/scsi/ufs/ufshcd.c
  5. * Copyright (C) 2011-2013 Samsung India Software Operations
  6. * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  7. *
  8. * Authors:
  9. * Santosh Yaraganavi <santosh.sy@samsung.com>
  10. * Vinayak Holikatti <h.vinayak@samsung.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version 2
  15. * of the License, or (at your option) any later version.
  16. * See the COPYING file in the top-level directory or visit
  17. * <http://www.gnu.org/licenses/gpl-2.0.html>
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * This program is provided "AS IS" and "WITH ALL FAULTS" and
  25. * without warranty of any kind. You are solely responsible for
  26. * determining the appropriateness of using and distributing
  27. * the program and assume all risks associated with your exercise
  28. * of rights with respect to the program, including but not limited
  29. * to infringement of third party rights, the risks and costs of
  30. * program errors, damage to or loss of data, programs or equipment,
  31. * and unavailability or interruption of operations. Under no
  32. * circumstances will the contributor of this Program be liable for
  33. * any damages of any kind arising from your use or distribution of
  34. * this program.
  35. *
  36. * The Linux Foundation chooses to take subject only to the GPLv2
  37. * license terms, and distributes only under these terms.
  38. */
  39. #include <linux/async.h>
  40. #include <linux/devfreq.h>
  41. #include "ufshcd.h"
  42. #include "unipro.h"
  43. #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
  44. UTP_TASK_REQ_COMPL |\
  45. UFSHCD_ERROR_MASK)
  46. /* UIC command timeout, unit: ms */
  47. #define UIC_CMD_TIMEOUT 500
  48. /* NOP OUT retries waiting for NOP IN response */
  49. #define NOP_OUT_RETRIES 10
  50. /* Timeout after 30 msecs if NOP OUT hangs without response */
  51. #define NOP_OUT_TIMEOUT 30 /* msecs */
  52. /* Query request retries */
  53. #define QUERY_REQ_RETRIES 10
  54. /* Query request timeout */
  55. #define QUERY_REQ_TIMEOUT 30 /* msec */
  56. /* Task management command timeout */
  57. #define TM_CMD_TIMEOUT 100 /* msecs */
  58. /* maximum number of link-startup retries */
  59. #define DME_LINKSTARTUP_RETRIES 3
  60. /* maximum number of reset retries before giving up */
  61. #define MAX_HOST_RESET_RETRIES 5
  62. /* Expose the flag value from utp_upiu_query.value */
  63. #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  64. /* Interrupt aggregation default timeout, unit: 40us */
  65. #define INT_AGGR_DEF_TO 0x02
  66. #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
  67. ({ \
  68. int _ret; \
  69. if (_on) \
  70. _ret = ufshcd_enable_vreg(_dev, _vreg); \
  71. else \
  72. _ret = ufshcd_disable_vreg(_dev, _vreg); \
  73. _ret; \
  74. })
  75. static u32 ufs_query_desc_max_size[] = {
  76. QUERY_DESC_DEVICE_MAX_SIZE,
  77. QUERY_DESC_CONFIGURAION_MAX_SIZE,
  78. QUERY_DESC_UNIT_MAX_SIZE,
  79. QUERY_DESC_RFU_MAX_SIZE,
  80. QUERY_DESC_INTERCONNECT_MAX_SIZE,
  81. QUERY_DESC_STRING_MAX_SIZE,
  82. QUERY_DESC_RFU_MAX_SIZE,
  83. QUERY_DESC_GEOMETRY_MAZ_SIZE,
  84. QUERY_DESC_POWER_MAX_SIZE,
  85. QUERY_DESC_RFU_MAX_SIZE,
  86. };
  87. enum {
  88. UFSHCD_MAX_CHANNEL = 0,
  89. UFSHCD_MAX_ID = 1,
  90. UFSHCD_CMD_PER_LUN = 32,
  91. UFSHCD_CAN_QUEUE = 32,
  92. };
  93. /* UFSHCD states */
  94. enum {
  95. UFSHCD_STATE_RESET,
  96. UFSHCD_STATE_ERROR,
  97. UFSHCD_STATE_OPERATIONAL,
  98. };
  99. /* UFSHCD error handling flags */
  100. enum {
  101. UFSHCD_EH_IN_PROGRESS = (1 << 0),
  102. };
  103. /* UFSHCD UIC layer error flags */
  104. enum {
  105. UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
  106. UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
  107. UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
  108. UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
  109. };
  110. /* Interrupt configuration options */
  111. enum {
  112. UFSHCD_INT_DISABLE,
  113. UFSHCD_INT_ENABLE,
  114. UFSHCD_INT_CLEAR,
  115. };
  116. #define ufshcd_set_eh_in_progress(h) \
  117. (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  118. #define ufshcd_eh_in_progress(h) \
  119. (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
  120. #define ufshcd_clear_eh_in_progress(h) \
  121. (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
  122. #define ufshcd_set_ufs_dev_active(h) \
  123. ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
  124. #define ufshcd_set_ufs_dev_sleep(h) \
  125. ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
  126. #define ufshcd_set_ufs_dev_poweroff(h) \
  127. ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
  128. #define ufshcd_is_ufs_dev_active(h) \
  129. ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
  130. #define ufshcd_is_ufs_dev_sleep(h) \
  131. ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
  132. #define ufshcd_is_ufs_dev_poweroff(h) \
  133. ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
  134. static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
  135. {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  136. {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  137. {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  138. {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  139. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  140. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
  141. };
  142. static inline enum ufs_dev_pwr_mode
  143. ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
  144. {
  145. return ufs_pm_lvl_states[lvl].dev_state;
  146. }
  147. static inline enum uic_link_state
  148. ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
  149. {
  150. return ufs_pm_lvl_states[lvl].link_state;
  151. }
  152. static void ufshcd_tmc_handler(struct ufs_hba *hba);
  153. static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  154. static int ufshcd_reset_and_restore(struct ufs_hba *hba);
  155. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  156. static void ufshcd_hba_exit(struct ufs_hba *hba);
  157. static int ufshcd_probe_hba(struct ufs_hba *hba);
  158. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  159. bool skip_ref_clk);
  160. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
  161. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
  162. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
  163. static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
  164. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
  165. static irqreturn_t ufshcd_intr(int irq, void *__hba);
  166. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  167. struct ufs_pa_layer_attr *desired_pwr_mode);
  168. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  169. struct ufs_pa_layer_attr *pwr_mode);
  170. static inline int ufshcd_enable_irq(struct ufs_hba *hba)
  171. {
  172. int ret = 0;
  173. if (!hba->is_irq_enabled) {
  174. ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
  175. hba);
  176. if (ret)
  177. dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
  178. __func__, ret);
  179. hba->is_irq_enabled = true;
  180. }
  181. return ret;
  182. }
  183. static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  184. {
  185. if (hba->is_irq_enabled) {
  186. free_irq(hba->irq, hba);
  187. hba->is_irq_enabled = false;
  188. }
  189. }
  190. /*
  191. * ufshcd_wait_for_register - wait for register value to change
  192. * @hba - per-adapter interface
  193. * @reg - mmio register offset
  194. * @mask - mask to apply to read register value
  195. * @val - wait condition
  196. * @interval_us - polling interval in microsecs
  197. * @timeout_ms - timeout in millisecs
  198. *
  199. * Returns -ETIMEDOUT on error, zero on success
  200. */
  201. static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  202. u32 val, unsigned long interval_us, unsigned long timeout_ms)
  203. {
  204. int err = 0;
  205. unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
  206. /* ignore bits that we don't intend to wait on */
  207. val = val & mask;
  208. while ((ufshcd_readl(hba, reg) & mask) != val) {
  209. /* wakeup within 50us of expiry */
  210. usleep_range(interval_us, interval_us + 50);
  211. if (time_after(jiffies, timeout)) {
  212. if ((ufshcd_readl(hba, reg) & mask) != val)
  213. err = -ETIMEDOUT;
  214. break;
  215. }
  216. }
  217. return err;
  218. }
  219. /**
  220. * ufshcd_get_intr_mask - Get the interrupt bit mask
  221. * @hba - Pointer to adapter instance
  222. *
  223. * Returns interrupt bit mask per version
  224. */
  225. static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  226. {
  227. if (hba->ufs_version == UFSHCI_VERSION_10)
  228. return INTERRUPT_MASK_ALL_VER_10;
  229. else
  230. return INTERRUPT_MASK_ALL_VER_11;
  231. }
  232. /**
  233. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  234. * @hba - Pointer to adapter instance
  235. *
  236. * Returns UFSHCI version supported by the controller
  237. */
  238. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  239. {
  240. if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
  241. return ufshcd_vops_get_ufs_hci_version(hba);
  242. return ufshcd_readl(hba, REG_UFS_VERSION);
  243. }
  244. /**
  245. * ufshcd_is_device_present - Check if any device connected to
  246. * the host controller
  247. * @hba: pointer to adapter instance
  248. *
  249. * Returns 1 if device present, 0 if no device detected
  250. */
  251. static inline int ufshcd_is_device_present(struct ufs_hba *hba)
  252. {
  253. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
  254. DEVICE_PRESENT) ? 1 : 0;
  255. }
  256. /**
  257. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  258. * @lrb: pointer to local command reference block
  259. *
  260. * This function is used to get the OCS field from UTRD
  261. * Returns the OCS field in the UTRD
  262. */
  263. static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  264. {
  265. return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
  266. }
  267. /**
  268. * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
  269. * @task_req_descp: pointer to utp_task_req_desc structure
  270. *
  271. * This function is used to get the OCS field from UTMRD
  272. * Returns the OCS field in the UTMRD
  273. */
  274. static inline int
  275. ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  276. {
  277. return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
  278. }
  279. /**
  280. * ufshcd_get_tm_free_slot - get a free slot for task management request
  281. * @hba: per adapter instance
  282. * @free_slot: pointer to variable with available slot value
  283. *
  284. * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
  285. * Returns 0 if free slot is not available, else return 1 with tag value
  286. * in @free_slot.
  287. */
  288. static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
  289. {
  290. int tag;
  291. bool ret = false;
  292. if (!free_slot)
  293. goto out;
  294. do {
  295. tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
  296. if (tag >= hba->nutmrs)
  297. goto out;
  298. } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
  299. *free_slot = tag;
  300. ret = true;
  301. out:
  302. return ret;
  303. }
  304. static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
  305. {
  306. clear_bit_unlock(slot, &hba->tm_slots_in_use);
  307. }
  308. /**
  309. * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  310. * @hba: per adapter instance
  311. * @pos: position of the bit to be cleared
  312. */
  313. static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  314. {
  315. ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  316. }
  317. /**
  318. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  319. * @reg: Register value of host controller status
  320. *
  321. * Returns integer, 0 on Success and positive value if failed
  322. */
  323. static inline int ufshcd_get_lists_status(u32 reg)
  324. {
  325. /*
  326. * The mask 0xFF is for the following HCS register bits
  327. * Bit Description
  328. * 0 Device Present
  329. * 1 UTRLRDY
  330. * 2 UTMRLRDY
  331. * 3 UCRDY
  332. * 4 HEI
  333. * 5 DEI
  334. * 6-7 reserved
  335. */
  336. return (((reg) & (0xFF)) >> 1) ^ (0x07);
  337. }
  338. /**
  339. * ufshcd_get_uic_cmd_result - Get the UIC command result
  340. * @hba: Pointer to adapter instance
  341. *
  342. * This function gets the result of UIC command completion
  343. * Returns 0 on success, non zero value on error
  344. */
  345. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  346. {
  347. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  348. MASK_UIC_COMMAND_RESULT;
  349. }
  350. /**
  351. * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
  352. * @hba: Pointer to adapter instance
  353. *
  354. * This function gets UIC command argument3
  355. * Returns 0 on success, non zero value on error
  356. */
  357. static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
  358. {
  359. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
  360. }
  361. /**
  362. * ufshcd_get_req_rsp - returns the TR response transaction type
  363. * @ucd_rsp_ptr: pointer to response UPIU
  364. */
  365. static inline int
  366. ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  367. {
  368. return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
  369. }
  370. /**
  371. * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
  372. * @ucd_rsp_ptr: pointer to response UPIU
  373. *
  374. * This function gets the response status and scsi_status from response UPIU
  375. * Returns the response result code.
  376. */
  377. static inline int
  378. ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  379. {
  380. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  381. }
  382. /*
  383. * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
  384. * from response UPIU
  385. * @ucd_rsp_ptr: pointer to response UPIU
  386. *
  387. * Return the data segment length.
  388. */
  389. static inline unsigned int
  390. ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
  391. {
  392. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  393. MASK_RSP_UPIU_DATA_SEG_LEN;
  394. }
  395. /**
  396. * ufshcd_is_exception_event - Check if the device raised an exception event
  397. * @ucd_rsp_ptr: pointer to response UPIU
  398. *
  399. * The function checks if the device raised an exception event indicated in
  400. * the Device Information field of response UPIU.
  401. *
  402. * Returns true if exception is raised, false otherwise.
  403. */
  404. static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
  405. {
  406. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  407. MASK_RSP_EXCEPTION_EVENT ? true : false;
  408. }
  409. /**
  410. * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
  411. * @hba: per adapter instance
  412. */
  413. static inline void
  414. ufshcd_reset_intr_aggr(struct ufs_hba *hba)
  415. {
  416. ufshcd_writel(hba, INT_AGGR_ENABLE |
  417. INT_AGGR_COUNTER_AND_TIMER_RESET,
  418. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  419. }
  420. /**
  421. * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
  422. * @hba: per adapter instance
  423. * @cnt: Interrupt aggregation counter threshold
  424. * @tmout: Interrupt aggregation timeout value
  425. */
  426. static inline void
  427. ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
  428. {
  429. ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
  430. INT_AGGR_COUNTER_THLD_VAL(cnt) |
  431. INT_AGGR_TIMEOUT_VAL(tmout),
  432. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  433. }
  434. /**
  435. * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
  436. * @hba: per adapter instance
  437. */
  438. static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
  439. {
  440. ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  441. }
  442. /**
  443. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  444. * When run-stop registers are set to 1, it indicates the
  445. * host controller that it can process the requests
  446. * @hba: per adapter instance
  447. */
  448. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  449. {
  450. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  451. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  452. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  453. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  454. }
  455. /**
  456. * ufshcd_hba_start - Start controller initialization sequence
  457. * @hba: per adapter instance
  458. */
  459. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  460. {
  461. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  462. }
  463. /**
  464. * ufshcd_is_hba_active - Get controller state
  465. * @hba: per adapter instance
  466. *
  467. * Returns zero if controller is active, 1 otherwise
  468. */
  469. static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
  470. {
  471. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
  472. }
  473. static void ufshcd_ungate_work(struct work_struct *work)
  474. {
  475. int ret;
  476. unsigned long flags;
  477. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  478. clk_gating.ungate_work);
  479. cancel_delayed_work_sync(&hba->clk_gating.gate_work);
  480. spin_lock_irqsave(hba->host->host_lock, flags);
  481. if (hba->clk_gating.state == CLKS_ON) {
  482. spin_unlock_irqrestore(hba->host->host_lock, flags);
  483. goto unblock_reqs;
  484. }
  485. spin_unlock_irqrestore(hba->host->host_lock, flags);
  486. ufshcd_setup_clocks(hba, true);
  487. /* Exit from hibern8 */
  488. if (ufshcd_can_hibern8_during_gating(hba)) {
  489. /* Prevent gating in this path */
  490. hba->clk_gating.is_suspended = true;
  491. if (ufshcd_is_link_hibern8(hba)) {
  492. ret = ufshcd_uic_hibern8_exit(hba);
  493. if (ret)
  494. dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
  495. __func__, ret);
  496. else
  497. ufshcd_set_link_active(hba);
  498. }
  499. hba->clk_gating.is_suspended = false;
  500. }
  501. unblock_reqs:
  502. if (ufshcd_is_clkscaling_enabled(hba))
  503. devfreq_resume_device(hba->devfreq);
  504. scsi_unblock_requests(hba->host);
  505. }
  506. /**
  507. * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
  508. * Also, exit from hibern8 mode and set the link as active.
  509. * @hba: per adapter instance
  510. * @async: This indicates whether caller should ungate clocks asynchronously.
  511. */
  512. int ufshcd_hold(struct ufs_hba *hba, bool async)
  513. {
  514. int rc = 0;
  515. unsigned long flags;
  516. if (!ufshcd_is_clkgating_allowed(hba))
  517. goto out;
  518. spin_lock_irqsave(hba->host->host_lock, flags);
  519. hba->clk_gating.active_reqs++;
  520. start:
  521. switch (hba->clk_gating.state) {
  522. case CLKS_ON:
  523. /*
  524. * Wait for the ungate work to complete if in progress.
  525. * Though the clocks may be in ON state, the link could
  526. * still be in hibner8 state if hibern8 is allowed
  527. * during clock gating.
  528. * Make sure we exit hibern8 state also in addition to
  529. * clocks being ON.
  530. */
  531. if (ufshcd_can_hibern8_during_gating(hba) &&
  532. ufshcd_is_link_hibern8(hba)) {
  533. spin_unlock_irqrestore(hba->host->host_lock, flags);
  534. flush_work(&hba->clk_gating.ungate_work);
  535. spin_lock_irqsave(hba->host->host_lock, flags);
  536. goto start;
  537. }
  538. break;
  539. case REQ_CLKS_OFF:
  540. if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
  541. hba->clk_gating.state = CLKS_ON;
  542. break;
  543. }
  544. /*
  545. * If we here, it means gating work is either done or
  546. * currently running. Hence, fall through to cancel gating
  547. * work and to enable clocks.
  548. */
  549. case CLKS_OFF:
  550. scsi_block_requests(hba->host);
  551. hba->clk_gating.state = REQ_CLKS_ON;
  552. schedule_work(&hba->clk_gating.ungate_work);
  553. /*
  554. * fall through to check if we should wait for this
  555. * work to be done or not.
  556. */
  557. case REQ_CLKS_ON:
  558. if (async) {
  559. rc = -EAGAIN;
  560. hba->clk_gating.active_reqs--;
  561. break;
  562. }
  563. spin_unlock_irqrestore(hba->host->host_lock, flags);
  564. flush_work(&hba->clk_gating.ungate_work);
  565. /* Make sure state is CLKS_ON before returning */
  566. spin_lock_irqsave(hba->host->host_lock, flags);
  567. goto start;
  568. default:
  569. dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
  570. __func__, hba->clk_gating.state);
  571. break;
  572. }
  573. spin_unlock_irqrestore(hba->host->host_lock, flags);
  574. out:
  575. return rc;
  576. }
  577. EXPORT_SYMBOL_GPL(ufshcd_hold);
  578. static void ufshcd_gate_work(struct work_struct *work)
  579. {
  580. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  581. clk_gating.gate_work.work);
  582. unsigned long flags;
  583. spin_lock_irqsave(hba->host->host_lock, flags);
  584. if (hba->clk_gating.is_suspended) {
  585. hba->clk_gating.state = CLKS_ON;
  586. goto rel_lock;
  587. }
  588. if (hba->clk_gating.active_reqs
  589. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  590. || hba->lrb_in_use || hba->outstanding_tasks
  591. || hba->active_uic_cmd || hba->uic_async_done)
  592. goto rel_lock;
  593. spin_unlock_irqrestore(hba->host->host_lock, flags);
  594. /* put the link into hibern8 mode before turning off clocks */
  595. if (ufshcd_can_hibern8_during_gating(hba)) {
  596. if (ufshcd_uic_hibern8_enter(hba)) {
  597. hba->clk_gating.state = CLKS_ON;
  598. goto out;
  599. }
  600. ufshcd_set_link_hibern8(hba);
  601. }
  602. if (ufshcd_is_clkscaling_enabled(hba)) {
  603. devfreq_suspend_device(hba->devfreq);
  604. hba->clk_scaling.window_start_t = 0;
  605. }
  606. if (!ufshcd_is_link_active(hba))
  607. ufshcd_setup_clocks(hba, false);
  608. else
  609. /* If link is active, device ref_clk can't be switched off */
  610. __ufshcd_setup_clocks(hba, false, true);
  611. /*
  612. * In case you are here to cancel this work the gating state
  613. * would be marked as REQ_CLKS_ON. In this case keep the state
  614. * as REQ_CLKS_ON which would anyway imply that clocks are off
  615. * and a request to turn them on is pending. By doing this way,
  616. * we keep the state machine in tact and this would ultimately
  617. * prevent from doing cancel work multiple times when there are
  618. * new requests arriving before the current cancel work is done.
  619. */
  620. spin_lock_irqsave(hba->host->host_lock, flags);
  621. if (hba->clk_gating.state == REQ_CLKS_OFF)
  622. hba->clk_gating.state = CLKS_OFF;
  623. rel_lock:
  624. spin_unlock_irqrestore(hba->host->host_lock, flags);
  625. out:
  626. return;
  627. }
  628. /* host lock must be held before calling this variant */
  629. static void __ufshcd_release(struct ufs_hba *hba)
  630. {
  631. if (!ufshcd_is_clkgating_allowed(hba))
  632. return;
  633. hba->clk_gating.active_reqs--;
  634. if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
  635. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  636. || hba->lrb_in_use || hba->outstanding_tasks
  637. || hba->active_uic_cmd || hba->uic_async_done)
  638. return;
  639. hba->clk_gating.state = REQ_CLKS_OFF;
  640. schedule_delayed_work(&hba->clk_gating.gate_work,
  641. msecs_to_jiffies(hba->clk_gating.delay_ms));
  642. }
  643. void ufshcd_release(struct ufs_hba *hba)
  644. {
  645. unsigned long flags;
  646. spin_lock_irqsave(hba->host->host_lock, flags);
  647. __ufshcd_release(hba);
  648. spin_unlock_irqrestore(hba->host->host_lock, flags);
  649. }
  650. EXPORT_SYMBOL_GPL(ufshcd_release);
  651. static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
  652. struct device_attribute *attr, char *buf)
  653. {
  654. struct ufs_hba *hba = dev_get_drvdata(dev);
  655. return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
  656. }
  657. static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
  658. struct device_attribute *attr, const char *buf, size_t count)
  659. {
  660. struct ufs_hba *hba = dev_get_drvdata(dev);
  661. unsigned long flags, value;
  662. if (kstrtoul(buf, 0, &value))
  663. return -EINVAL;
  664. spin_lock_irqsave(hba->host->host_lock, flags);
  665. hba->clk_gating.delay_ms = value;
  666. spin_unlock_irqrestore(hba->host->host_lock, flags);
  667. return count;
  668. }
  669. static void ufshcd_init_clk_gating(struct ufs_hba *hba)
  670. {
  671. if (!ufshcd_is_clkgating_allowed(hba))
  672. return;
  673. hba->clk_gating.delay_ms = 150;
  674. INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
  675. INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
  676. hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
  677. hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
  678. sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
  679. hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
  680. hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
  681. if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
  682. dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
  683. }
  684. static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
  685. {
  686. if (!ufshcd_is_clkgating_allowed(hba))
  687. return;
  688. device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
  689. cancel_work_sync(&hba->clk_gating.ungate_work);
  690. cancel_delayed_work_sync(&hba->clk_gating.gate_work);
  691. }
  692. /* Must be called with host lock acquired */
  693. static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  694. {
  695. if (!ufshcd_is_clkscaling_enabled(hba))
  696. return;
  697. if (!hba->clk_scaling.is_busy_started) {
  698. hba->clk_scaling.busy_start_t = ktime_get();
  699. hba->clk_scaling.is_busy_started = true;
  700. }
  701. }
  702. static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
  703. {
  704. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  705. if (!ufshcd_is_clkscaling_enabled(hba))
  706. return;
  707. if (!hba->outstanding_reqs && scaling->is_busy_started) {
  708. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  709. scaling->busy_start_t));
  710. scaling->busy_start_t = ktime_set(0, 0);
  711. scaling->is_busy_started = false;
  712. }
  713. }
  714. /**
  715. * ufshcd_send_command - Send SCSI or device management commands
  716. * @hba: per adapter instance
  717. * @task_tag: Task tag of the command
  718. */
  719. static inline
  720. void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  721. {
  722. ufshcd_clk_scaling_start_busy(hba);
  723. __set_bit(task_tag, &hba->outstanding_reqs);
  724. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  725. }
  726. /**
  727. * ufshcd_copy_sense_data - Copy sense data in case of check condition
  728. * @lrb - pointer to local reference block
  729. */
  730. static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  731. {
  732. int len;
  733. if (lrbp->sense_buffer &&
  734. ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
  735. int len_to_copy;
  736. len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
  737. len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
  738. memcpy(lrbp->sense_buffer,
  739. lrbp->ucd_rsp_ptr->sr.sense_data,
  740. min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
  741. }
  742. }
  743. /**
  744. * ufshcd_copy_query_response() - Copy the Query Response and the data
  745. * descriptor
  746. * @hba: per adapter instance
  747. * @lrb - pointer to local reference block
  748. */
  749. static
  750. int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  751. {
  752. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  753. memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
  754. /* Get the descriptor */
  755. if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
  756. u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
  757. GENERAL_UPIU_REQUEST_SIZE;
  758. u16 resp_len;
  759. u16 buf_len;
  760. /* data segment length */
  761. resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
  762. MASK_QUERY_DATA_SEG_LEN;
  763. buf_len = be16_to_cpu(
  764. hba->dev_cmd.query.request.upiu_req.length);
  765. if (likely(buf_len >= resp_len)) {
  766. memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
  767. } else {
  768. dev_warn(hba->dev,
  769. "%s: Response size is bigger than buffer",
  770. __func__);
  771. return -EINVAL;
  772. }
  773. }
  774. return 0;
  775. }
  776. /**
  777. * ufshcd_hba_capabilities - Read controller capabilities
  778. * @hba: per adapter instance
  779. */
  780. static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
  781. {
  782. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  783. /* nutrs and nutmrs are 0 based values */
  784. hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
  785. hba->nutmrs =
  786. ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
  787. }
  788. /**
  789. * ufshcd_ready_for_uic_cmd - Check if controller is ready
  790. * to accept UIC commands
  791. * @hba: per adapter instance
  792. * Return true on success, else false
  793. */
  794. static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
  795. {
  796. if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
  797. return true;
  798. else
  799. return false;
  800. }
  801. /**
  802. * ufshcd_get_upmcrs - Get the power mode change request status
  803. * @hba: Pointer to adapter instance
  804. *
  805. * This function gets the UPMCRS field of HCS register
  806. * Returns value of UPMCRS field
  807. */
  808. static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
  809. {
  810. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
  811. }
  812. /**
  813. * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  814. * @hba: per adapter instance
  815. * @uic_cmd: UIC command
  816. *
  817. * Mutex must be held.
  818. */
  819. static inline void
  820. ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  821. {
  822. WARN_ON(hba->active_uic_cmd);
  823. hba->active_uic_cmd = uic_cmd;
  824. /* Write Args */
  825. ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
  826. ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
  827. ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
  828. /* Write UIC Cmd */
  829. ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
  830. REG_UIC_COMMAND);
  831. }
  832. /**
  833. * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
  834. * @hba: per adapter instance
  835. * @uic_command: UIC command
  836. *
  837. * Must be called with mutex held.
  838. * Returns 0 only if success.
  839. */
  840. static int
  841. ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  842. {
  843. int ret;
  844. unsigned long flags;
  845. if (wait_for_completion_timeout(&uic_cmd->done,
  846. msecs_to_jiffies(UIC_CMD_TIMEOUT)))
  847. ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
  848. else
  849. ret = -ETIMEDOUT;
  850. spin_lock_irqsave(hba->host->host_lock, flags);
  851. hba->active_uic_cmd = NULL;
  852. spin_unlock_irqrestore(hba->host->host_lock, flags);
  853. return ret;
  854. }
  855. /**
  856. * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  857. * @hba: per adapter instance
  858. * @uic_cmd: UIC command
  859. *
  860. * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  861. * with mutex held and host_lock locked.
  862. * Returns 0 only if success.
  863. */
  864. static int
  865. __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  866. {
  867. if (!ufshcd_ready_for_uic_cmd(hba)) {
  868. dev_err(hba->dev,
  869. "Controller not ready to accept UIC commands\n");
  870. return -EIO;
  871. }
  872. init_completion(&uic_cmd->done);
  873. ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  874. return 0;
  875. }
  876. /**
  877. * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  878. * @hba: per adapter instance
  879. * @uic_cmd: UIC command
  880. *
  881. * Returns 0 only if success.
  882. */
  883. static int
  884. ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  885. {
  886. int ret;
  887. unsigned long flags;
  888. ufshcd_hold(hba, false);
  889. mutex_lock(&hba->uic_cmd_mutex);
  890. ufshcd_add_delay_before_dme_cmd(hba);
  891. spin_lock_irqsave(hba->host->host_lock, flags);
  892. ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
  893. spin_unlock_irqrestore(hba->host->host_lock, flags);
  894. if (!ret)
  895. ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  896. mutex_unlock(&hba->uic_cmd_mutex);
  897. ufshcd_release(hba);
  898. return ret;
  899. }
  900. /**
  901. * ufshcd_map_sg - Map scatter-gather list to prdt
  902. * @lrbp - pointer to local reference block
  903. *
  904. * Returns 0 in case of success, non-zero value in case of failure
  905. */
  906. static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
  907. {
  908. struct ufshcd_sg_entry *prd_table;
  909. struct scatterlist *sg;
  910. struct scsi_cmnd *cmd;
  911. int sg_segments;
  912. int i;
  913. cmd = lrbp->cmd;
  914. sg_segments = scsi_dma_map(cmd);
  915. if (sg_segments < 0)
  916. return sg_segments;
  917. if (sg_segments) {
  918. lrbp->utr_descriptor_ptr->prd_table_length =
  919. cpu_to_le16((u16) (sg_segments));
  920. prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  921. scsi_for_each_sg(cmd, sg, sg_segments, i) {
  922. prd_table[i].size =
  923. cpu_to_le32(((u32) sg_dma_len(sg))-1);
  924. prd_table[i].base_addr =
  925. cpu_to_le32(lower_32_bits(sg->dma_address));
  926. prd_table[i].upper_addr =
  927. cpu_to_le32(upper_32_bits(sg->dma_address));
  928. }
  929. } else {
  930. lrbp->utr_descriptor_ptr->prd_table_length = 0;
  931. }
  932. return 0;
  933. }
  934. /**
  935. * ufshcd_enable_intr - enable interrupts
  936. * @hba: per adapter instance
  937. * @intrs: interrupt bits
  938. */
  939. static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
  940. {
  941. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  942. if (hba->ufs_version == UFSHCI_VERSION_10) {
  943. u32 rw;
  944. rw = set & INTERRUPT_MASK_RW_VER_10;
  945. set = rw | ((set ^ intrs) & intrs);
  946. } else {
  947. set |= intrs;
  948. }
  949. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  950. }
  951. /**
  952. * ufshcd_disable_intr - disable interrupts
  953. * @hba: per adapter instance
  954. * @intrs: interrupt bits
  955. */
  956. static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
  957. {
  958. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  959. if (hba->ufs_version == UFSHCI_VERSION_10) {
  960. u32 rw;
  961. rw = (set & INTERRUPT_MASK_RW_VER_10) &
  962. ~(intrs & INTERRUPT_MASK_RW_VER_10);
  963. set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
  964. } else {
  965. set &= ~intrs;
  966. }
  967. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  968. }
  969. /**
  970. * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  971. * descriptor according to request
  972. * @lrbp: pointer to local reference block
  973. * @upiu_flags: flags required in the header
  974. * @cmd_dir: requests data direction
  975. */
  976. static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
  977. u32 *upiu_flags, enum dma_data_direction cmd_dir)
  978. {
  979. struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
  980. u32 data_direction;
  981. u32 dword_0;
  982. if (cmd_dir == DMA_FROM_DEVICE) {
  983. data_direction = UTP_DEVICE_TO_HOST;
  984. *upiu_flags = UPIU_CMD_FLAGS_READ;
  985. } else if (cmd_dir == DMA_TO_DEVICE) {
  986. data_direction = UTP_HOST_TO_DEVICE;
  987. *upiu_flags = UPIU_CMD_FLAGS_WRITE;
  988. } else {
  989. data_direction = UTP_NO_DATA_TRANSFER;
  990. *upiu_flags = UPIU_CMD_FLAGS_NONE;
  991. }
  992. dword_0 = data_direction | (lrbp->command_type
  993. << UPIU_COMMAND_TYPE_OFFSET);
  994. if (lrbp->intr_cmd)
  995. dword_0 |= UTP_REQ_DESC_INT_CMD;
  996. /* Transfer request descriptor header fields */
  997. req_desc->header.dword_0 = cpu_to_le32(dword_0);
  998. /*
  999. * assigning invalid value for command status. Controller
  1000. * updates OCS on command completion, with the command
  1001. * status
  1002. */
  1003. req_desc->header.dword_2 =
  1004. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  1005. }
  1006. /**
  1007. * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
  1008. * for scsi commands
  1009. * @lrbp - local reference block pointer
  1010. * @upiu_flags - flags
  1011. */
  1012. static
  1013. void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  1014. {
  1015. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1016. /* command descriptor fields */
  1017. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  1018. UPIU_TRANSACTION_COMMAND, upiu_flags,
  1019. lrbp->lun, lrbp->task_tag);
  1020. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  1021. UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
  1022. /* Total EHS length and Data segment length will be zero */
  1023. ucd_req_ptr->header.dword_2 = 0;
  1024. ucd_req_ptr->sc.exp_data_transfer_len =
  1025. cpu_to_be32(lrbp->cmd->sdb.length);
  1026. memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
  1027. (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
  1028. }
  1029. /**
  1030. * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
  1031. * for query requsts
  1032. * @hba: UFS hba
  1033. * @lrbp: local reference block pointer
  1034. * @upiu_flags: flags
  1035. */
  1036. static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
  1037. struct ufshcd_lrb *lrbp, u32 upiu_flags)
  1038. {
  1039. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1040. struct ufs_query *query = &hba->dev_cmd.query;
  1041. u16 len = be16_to_cpu(query->request.upiu_req.length);
  1042. u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
  1043. /* Query request header */
  1044. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  1045. UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
  1046. lrbp->lun, lrbp->task_tag);
  1047. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  1048. 0, query->request.query_func, 0, 0);
  1049. /* Data segment length */
  1050. ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
  1051. 0, 0, len >> 8, (u8)len);
  1052. /* Copy the Query Request buffer as is */
  1053. memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
  1054. QUERY_OSF_SIZE);
  1055. /* Copy the Descriptor */
  1056. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  1057. memcpy(descp, query->descriptor, len);
  1058. }
  1059. static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
  1060. {
  1061. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1062. memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
  1063. /* command descriptor fields */
  1064. ucd_req_ptr->header.dword_0 =
  1065. UPIU_HEADER_DWORD(
  1066. UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
  1067. }
  1068. /**
  1069. * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
  1070. * @hba - per adapter instance
  1071. * @lrb - pointer to local reference block
  1072. */
  1073. static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1074. {
  1075. u32 upiu_flags;
  1076. int ret = 0;
  1077. switch (lrbp->command_type) {
  1078. case UTP_CMD_TYPE_SCSI:
  1079. if (likely(lrbp->cmd)) {
  1080. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
  1081. lrbp->cmd->sc_data_direction);
  1082. ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
  1083. } else {
  1084. ret = -EINVAL;
  1085. }
  1086. break;
  1087. case UTP_CMD_TYPE_DEV_MANAGE:
  1088. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
  1089. if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
  1090. ufshcd_prepare_utp_query_req_upiu(
  1091. hba, lrbp, upiu_flags);
  1092. else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
  1093. ufshcd_prepare_utp_nop_upiu(lrbp);
  1094. else
  1095. ret = -EINVAL;
  1096. break;
  1097. case UTP_CMD_TYPE_UFS:
  1098. /* For UFS native command implementation */
  1099. ret = -ENOTSUPP;
  1100. dev_err(hba->dev, "%s: UFS native command are not supported\n",
  1101. __func__);
  1102. break;
  1103. default:
  1104. ret = -ENOTSUPP;
  1105. dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
  1106. __func__, lrbp->command_type);
  1107. break;
  1108. } /* end of switch */
  1109. return ret;
  1110. }
  1111. /*
  1112. * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
  1113. * @scsi_lun: scsi LUN id
  1114. *
  1115. * Returns UPIU LUN id
  1116. */
  1117. static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
  1118. {
  1119. if (scsi_is_wlun(scsi_lun))
  1120. return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
  1121. | UFS_UPIU_WLUN_ID;
  1122. else
  1123. return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
  1124. }
  1125. /**
  1126. * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
  1127. * @scsi_lun: UPIU W-LUN id
  1128. *
  1129. * Returns SCSI W-LUN id
  1130. */
  1131. static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
  1132. {
  1133. return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
  1134. }
  1135. /**
  1136. * ufshcd_queuecommand - main entry point for SCSI requests
  1137. * @cmd: command from SCSI Midlayer
  1138. * @done: call back function
  1139. *
  1140. * Returns 0 for success, non-zero in case of failure
  1141. */
  1142. static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  1143. {
  1144. struct ufshcd_lrb *lrbp;
  1145. struct ufs_hba *hba;
  1146. unsigned long flags;
  1147. int tag;
  1148. int err = 0;
  1149. hba = shost_priv(host);
  1150. tag = cmd->request->tag;
  1151. spin_lock_irqsave(hba->host->host_lock, flags);
  1152. switch (hba->ufshcd_state) {
  1153. case UFSHCD_STATE_OPERATIONAL:
  1154. break;
  1155. case UFSHCD_STATE_RESET:
  1156. err = SCSI_MLQUEUE_HOST_BUSY;
  1157. goto out_unlock;
  1158. case UFSHCD_STATE_ERROR:
  1159. set_host_byte(cmd, DID_ERROR);
  1160. cmd->scsi_done(cmd);
  1161. goto out_unlock;
  1162. default:
  1163. dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
  1164. __func__, hba->ufshcd_state);
  1165. set_host_byte(cmd, DID_BAD_TARGET);
  1166. cmd->scsi_done(cmd);
  1167. goto out_unlock;
  1168. }
  1169. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1170. /* acquire the tag to make sure device cmds don't use it */
  1171. if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
  1172. /*
  1173. * Dev manage command in progress, requeue the command.
  1174. * Requeuing the command helps in cases where the request *may*
  1175. * find different tag instead of waiting for dev manage command
  1176. * completion.
  1177. */
  1178. err = SCSI_MLQUEUE_HOST_BUSY;
  1179. goto out;
  1180. }
  1181. err = ufshcd_hold(hba, true);
  1182. if (err) {
  1183. err = SCSI_MLQUEUE_HOST_BUSY;
  1184. clear_bit_unlock(tag, &hba->lrb_in_use);
  1185. goto out;
  1186. }
  1187. WARN_ON(hba->clk_gating.state != CLKS_ON);
  1188. lrbp = &hba->lrb[tag];
  1189. WARN_ON(lrbp->cmd);
  1190. lrbp->cmd = cmd;
  1191. lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
  1192. lrbp->sense_buffer = cmd->sense_buffer;
  1193. lrbp->task_tag = tag;
  1194. lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
  1195. lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
  1196. lrbp->command_type = UTP_CMD_TYPE_SCSI;
  1197. /* form UPIU before issuing the command */
  1198. ufshcd_compose_upiu(hba, lrbp);
  1199. err = ufshcd_map_sg(lrbp);
  1200. if (err) {
  1201. lrbp->cmd = NULL;
  1202. clear_bit_unlock(tag, &hba->lrb_in_use);
  1203. goto out;
  1204. }
  1205. /* issue command to the controller */
  1206. spin_lock_irqsave(hba->host->host_lock, flags);
  1207. ufshcd_send_command(hba, tag);
  1208. out_unlock:
  1209. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1210. out:
  1211. return err;
  1212. }
  1213. static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
  1214. struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
  1215. {
  1216. lrbp->cmd = NULL;
  1217. lrbp->sense_bufflen = 0;
  1218. lrbp->sense_buffer = NULL;
  1219. lrbp->task_tag = tag;
  1220. lrbp->lun = 0; /* device management cmd is not specific to any LUN */
  1221. lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
  1222. lrbp->intr_cmd = true; /* No interrupt aggregation */
  1223. hba->dev_cmd.type = cmd_type;
  1224. return ufshcd_compose_upiu(hba, lrbp);
  1225. }
  1226. static int
  1227. ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
  1228. {
  1229. int err = 0;
  1230. unsigned long flags;
  1231. u32 mask = 1 << tag;
  1232. /* clear outstanding transaction before retry */
  1233. spin_lock_irqsave(hba->host->host_lock, flags);
  1234. ufshcd_utrl_clear(hba, tag);
  1235. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1236. /*
  1237. * wait for for h/w to clear corresponding bit in door-bell.
  1238. * max. wait is 1 sec.
  1239. */
  1240. err = ufshcd_wait_for_register(hba,
  1241. REG_UTP_TRANSFER_REQ_DOOR_BELL,
  1242. mask, ~mask, 1000, 1000);
  1243. return err;
  1244. }
  1245. static int
  1246. ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1247. {
  1248. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  1249. /* Get the UPIU response */
  1250. query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
  1251. UPIU_RSP_CODE_OFFSET;
  1252. return query_res->response;
  1253. }
  1254. /**
  1255. * ufshcd_dev_cmd_completion() - handles device management command responses
  1256. * @hba: per adapter instance
  1257. * @lrbp: pointer to local reference block
  1258. */
  1259. static int
  1260. ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1261. {
  1262. int resp;
  1263. int err = 0;
  1264. resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  1265. switch (resp) {
  1266. case UPIU_TRANSACTION_NOP_IN:
  1267. if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
  1268. err = -EINVAL;
  1269. dev_err(hba->dev, "%s: unexpected response %x\n",
  1270. __func__, resp);
  1271. }
  1272. break;
  1273. case UPIU_TRANSACTION_QUERY_RSP:
  1274. err = ufshcd_check_query_response(hba, lrbp);
  1275. if (!err)
  1276. err = ufshcd_copy_query_response(hba, lrbp);
  1277. break;
  1278. case UPIU_TRANSACTION_REJECT_UPIU:
  1279. /* TODO: handle Reject UPIU Response */
  1280. err = -EPERM;
  1281. dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
  1282. __func__);
  1283. break;
  1284. default:
  1285. err = -EINVAL;
  1286. dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
  1287. __func__, resp);
  1288. break;
  1289. }
  1290. return err;
  1291. }
  1292. static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
  1293. struct ufshcd_lrb *lrbp, int max_timeout)
  1294. {
  1295. int err = 0;
  1296. unsigned long time_left;
  1297. unsigned long flags;
  1298. time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
  1299. msecs_to_jiffies(max_timeout));
  1300. spin_lock_irqsave(hba->host->host_lock, flags);
  1301. hba->dev_cmd.complete = NULL;
  1302. if (likely(time_left)) {
  1303. err = ufshcd_get_tr_ocs(lrbp);
  1304. if (!err)
  1305. err = ufshcd_dev_cmd_completion(hba, lrbp);
  1306. }
  1307. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1308. if (!time_left) {
  1309. err = -ETIMEDOUT;
  1310. if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
  1311. /* sucessfully cleared the command, retry if needed */
  1312. err = -EAGAIN;
  1313. }
  1314. return err;
  1315. }
  1316. /**
  1317. * ufshcd_get_dev_cmd_tag - Get device management command tag
  1318. * @hba: per-adapter instance
  1319. * @tag: pointer to variable with available slot value
  1320. *
  1321. * Get a free slot and lock it until device management command
  1322. * completes.
  1323. *
  1324. * Returns false if free slot is unavailable for locking, else
  1325. * return true with tag value in @tag.
  1326. */
  1327. static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
  1328. {
  1329. int tag;
  1330. bool ret = false;
  1331. unsigned long tmp;
  1332. if (!tag_out)
  1333. goto out;
  1334. do {
  1335. tmp = ~hba->lrb_in_use;
  1336. tag = find_last_bit(&tmp, hba->nutrs);
  1337. if (tag >= hba->nutrs)
  1338. goto out;
  1339. } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
  1340. *tag_out = tag;
  1341. ret = true;
  1342. out:
  1343. return ret;
  1344. }
  1345. static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
  1346. {
  1347. clear_bit_unlock(tag, &hba->lrb_in_use);
  1348. }
  1349. /**
  1350. * ufshcd_exec_dev_cmd - API for sending device management requests
  1351. * @hba - UFS hba
  1352. * @cmd_type - specifies the type (NOP, Query...)
  1353. * @timeout - time in seconds
  1354. *
  1355. * NOTE: Since there is only one available tag for device management commands,
  1356. * it is expected you hold the hba->dev_cmd.lock mutex.
  1357. */
  1358. static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
  1359. enum dev_cmd_type cmd_type, int timeout)
  1360. {
  1361. struct ufshcd_lrb *lrbp;
  1362. int err;
  1363. int tag;
  1364. struct completion wait;
  1365. unsigned long flags;
  1366. /*
  1367. * Get free slot, sleep if slots are unavailable.
  1368. * Even though we use wait_event() which sleeps indefinitely,
  1369. * the maximum wait time is bounded by SCSI request timeout.
  1370. */
  1371. wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
  1372. init_completion(&wait);
  1373. lrbp = &hba->lrb[tag];
  1374. WARN_ON(lrbp->cmd);
  1375. err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
  1376. if (unlikely(err))
  1377. goto out_put_tag;
  1378. hba->dev_cmd.complete = &wait;
  1379. spin_lock_irqsave(hba->host->host_lock, flags);
  1380. ufshcd_send_command(hba, tag);
  1381. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1382. err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  1383. out_put_tag:
  1384. ufshcd_put_dev_cmd_tag(hba, tag);
  1385. wake_up(&hba->dev_cmd.tag_wq);
  1386. return err;
  1387. }
  1388. /**
  1389. * ufshcd_init_query() - init the query response and request parameters
  1390. * @hba: per-adapter instance
  1391. * @request: address of the request pointer to be initialized
  1392. * @response: address of the response pointer to be initialized
  1393. * @opcode: operation to perform
  1394. * @idn: flag idn to access
  1395. * @index: LU number to access
  1396. * @selector: query/flag/descriptor further identification
  1397. */
  1398. static inline void ufshcd_init_query(struct ufs_hba *hba,
  1399. struct ufs_query_req **request, struct ufs_query_res **response,
  1400. enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  1401. {
  1402. *request = &hba->dev_cmd.query.request;
  1403. *response = &hba->dev_cmd.query.response;
  1404. memset(*request, 0, sizeof(struct ufs_query_req));
  1405. memset(*response, 0, sizeof(struct ufs_query_res));
  1406. (*request)->upiu_req.opcode = opcode;
  1407. (*request)->upiu_req.idn = idn;
  1408. (*request)->upiu_req.index = index;
  1409. (*request)->upiu_req.selector = selector;
  1410. }
  1411. /**
  1412. * ufshcd_query_flag() - API function for sending flag query requests
  1413. * hba: per-adapter instance
  1414. * query_opcode: flag query to perform
  1415. * idn: flag idn to access
  1416. * flag_res: the flag value after the query request completes
  1417. *
  1418. * Returns 0 for success, non-zero in case of failure
  1419. */
  1420. static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
  1421. enum flag_idn idn, bool *flag_res)
  1422. {
  1423. struct ufs_query_req *request = NULL;
  1424. struct ufs_query_res *response = NULL;
  1425. int err, index = 0, selector = 0;
  1426. BUG_ON(!hba);
  1427. ufshcd_hold(hba, false);
  1428. mutex_lock(&hba->dev_cmd.lock);
  1429. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1430. selector);
  1431. switch (opcode) {
  1432. case UPIU_QUERY_OPCODE_SET_FLAG:
  1433. case UPIU_QUERY_OPCODE_CLEAR_FLAG:
  1434. case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
  1435. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1436. break;
  1437. case UPIU_QUERY_OPCODE_READ_FLAG:
  1438. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1439. if (!flag_res) {
  1440. /* No dummy reads */
  1441. dev_err(hba->dev, "%s: Invalid argument for read request\n",
  1442. __func__);
  1443. err = -EINVAL;
  1444. goto out_unlock;
  1445. }
  1446. break;
  1447. default:
  1448. dev_err(hba->dev,
  1449. "%s: Expected query flag opcode but got = %d\n",
  1450. __func__, opcode);
  1451. err = -EINVAL;
  1452. goto out_unlock;
  1453. }
  1454. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1455. if (err) {
  1456. dev_err(hba->dev,
  1457. "%s: Sending flag query for idn %d failed, err = %d\n",
  1458. __func__, idn, err);
  1459. goto out_unlock;
  1460. }
  1461. if (flag_res)
  1462. *flag_res = (be32_to_cpu(response->upiu_res.value) &
  1463. MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
  1464. out_unlock:
  1465. mutex_unlock(&hba->dev_cmd.lock);
  1466. ufshcd_release(hba);
  1467. return err;
  1468. }
  1469. /**
  1470. * ufshcd_query_attr - API function for sending attribute requests
  1471. * hba: per-adapter instance
  1472. * opcode: attribute opcode
  1473. * idn: attribute idn to access
  1474. * index: index field
  1475. * selector: selector field
  1476. * attr_val: the attribute value after the query request completes
  1477. *
  1478. * Returns 0 for success, non-zero in case of failure
  1479. */
  1480. static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
  1481. enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  1482. {
  1483. struct ufs_query_req *request = NULL;
  1484. struct ufs_query_res *response = NULL;
  1485. int err;
  1486. BUG_ON(!hba);
  1487. ufshcd_hold(hba, false);
  1488. if (!attr_val) {
  1489. dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
  1490. __func__, opcode);
  1491. err = -EINVAL;
  1492. goto out;
  1493. }
  1494. mutex_lock(&hba->dev_cmd.lock);
  1495. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1496. selector);
  1497. switch (opcode) {
  1498. case UPIU_QUERY_OPCODE_WRITE_ATTR:
  1499. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1500. request->upiu_req.value = cpu_to_be32(*attr_val);
  1501. break;
  1502. case UPIU_QUERY_OPCODE_READ_ATTR:
  1503. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1504. break;
  1505. default:
  1506. dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
  1507. __func__, opcode);
  1508. err = -EINVAL;
  1509. goto out_unlock;
  1510. }
  1511. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1512. if (err) {
  1513. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1514. __func__, opcode, idn, err);
  1515. goto out_unlock;
  1516. }
  1517. *attr_val = be32_to_cpu(response->upiu_res.value);
  1518. out_unlock:
  1519. mutex_unlock(&hba->dev_cmd.lock);
  1520. out:
  1521. ufshcd_release(hba);
  1522. return err;
  1523. }
  1524. /**
  1525. * ufshcd_query_descriptor - API function for sending descriptor requests
  1526. * hba: per-adapter instance
  1527. * opcode: attribute opcode
  1528. * idn: attribute idn to access
  1529. * index: index field
  1530. * selector: selector field
  1531. * desc_buf: the buffer that contains the descriptor
  1532. * buf_len: length parameter passed to the device
  1533. *
  1534. * Returns 0 for success, non-zero in case of failure.
  1535. * The buf_len parameter will contain, on return, the length parameter
  1536. * received on the response.
  1537. */
  1538. static int ufshcd_query_descriptor(struct ufs_hba *hba,
  1539. enum query_opcode opcode, enum desc_idn idn, u8 index,
  1540. u8 selector, u8 *desc_buf, int *buf_len)
  1541. {
  1542. struct ufs_query_req *request = NULL;
  1543. struct ufs_query_res *response = NULL;
  1544. int err;
  1545. BUG_ON(!hba);
  1546. ufshcd_hold(hba, false);
  1547. if (!desc_buf) {
  1548. dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
  1549. __func__, opcode);
  1550. err = -EINVAL;
  1551. goto out;
  1552. }
  1553. if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
  1554. dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
  1555. __func__, *buf_len);
  1556. err = -EINVAL;
  1557. goto out;
  1558. }
  1559. mutex_lock(&hba->dev_cmd.lock);
  1560. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1561. selector);
  1562. hba->dev_cmd.query.descriptor = desc_buf;
  1563. request->upiu_req.length = cpu_to_be16(*buf_len);
  1564. switch (opcode) {
  1565. case UPIU_QUERY_OPCODE_WRITE_DESC:
  1566. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1567. break;
  1568. case UPIU_QUERY_OPCODE_READ_DESC:
  1569. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1570. break;
  1571. default:
  1572. dev_err(hba->dev,
  1573. "%s: Expected query descriptor opcode but got = 0x%.2x\n",
  1574. __func__, opcode);
  1575. err = -EINVAL;
  1576. goto out_unlock;
  1577. }
  1578. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1579. if (err) {
  1580. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1581. __func__, opcode, idn, err);
  1582. goto out_unlock;
  1583. }
  1584. hba->dev_cmd.query.descriptor = NULL;
  1585. *buf_len = be16_to_cpu(response->upiu_res.length);
  1586. out_unlock:
  1587. mutex_unlock(&hba->dev_cmd.lock);
  1588. out:
  1589. ufshcd_release(hba);
  1590. return err;
  1591. }
  1592. /**
  1593. * ufshcd_read_desc_param - read the specified descriptor parameter
  1594. * @hba: Pointer to adapter instance
  1595. * @desc_id: descriptor idn value
  1596. * @desc_index: descriptor index
  1597. * @param_offset: offset of the parameter to read
  1598. * @param_read_buf: pointer to buffer where parameter would be read
  1599. * @param_size: sizeof(param_read_buf)
  1600. *
  1601. * Return 0 in case of success, non-zero otherwise
  1602. */
  1603. static int ufshcd_read_desc_param(struct ufs_hba *hba,
  1604. enum desc_idn desc_id,
  1605. int desc_index,
  1606. u32 param_offset,
  1607. u8 *param_read_buf,
  1608. u32 param_size)
  1609. {
  1610. int ret;
  1611. u8 *desc_buf;
  1612. u32 buff_len;
  1613. bool is_kmalloc = true;
  1614. /* safety checks */
  1615. if (desc_id >= QUERY_DESC_IDN_MAX)
  1616. return -EINVAL;
  1617. buff_len = ufs_query_desc_max_size[desc_id];
  1618. if ((param_offset + param_size) > buff_len)
  1619. return -EINVAL;
  1620. if (!param_offset && (param_size == buff_len)) {
  1621. /* memory space already available to hold full descriptor */
  1622. desc_buf = param_read_buf;
  1623. is_kmalloc = false;
  1624. } else {
  1625. /* allocate memory to hold full descriptor */
  1626. desc_buf = kmalloc(buff_len, GFP_KERNEL);
  1627. if (!desc_buf)
  1628. return -ENOMEM;
  1629. }
  1630. ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
  1631. desc_id, desc_index, 0, desc_buf,
  1632. &buff_len);
  1633. if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
  1634. (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
  1635. ufs_query_desc_max_size[desc_id])
  1636. || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
  1637. dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
  1638. __func__, desc_id, param_offset, buff_len, ret);
  1639. if (!ret)
  1640. ret = -EINVAL;
  1641. goto out;
  1642. }
  1643. if (is_kmalloc)
  1644. memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  1645. out:
  1646. if (is_kmalloc)
  1647. kfree(desc_buf);
  1648. return ret;
  1649. }
  1650. static inline int ufshcd_read_desc(struct ufs_hba *hba,
  1651. enum desc_idn desc_id,
  1652. int desc_index,
  1653. u8 *buf,
  1654. u32 size)
  1655. {
  1656. return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
  1657. }
  1658. static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
  1659. u8 *buf,
  1660. u32 size)
  1661. {
  1662. return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  1663. }
  1664. /**
  1665. * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
  1666. * @hba: Pointer to adapter instance
  1667. * @lun: lun id
  1668. * @param_offset: offset of the parameter to read
  1669. * @param_read_buf: pointer to buffer where parameter would be read
  1670. * @param_size: sizeof(param_read_buf)
  1671. *
  1672. * Return 0 in case of success, non-zero otherwise
  1673. */
  1674. static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
  1675. int lun,
  1676. enum unit_desc_param param_offset,
  1677. u8 *param_read_buf,
  1678. u32 param_size)
  1679. {
  1680. /*
  1681. * Unit descriptors are only available for general purpose LUs (LUN id
  1682. * from 0 to 7) and RPMB Well known LU.
  1683. */
  1684. if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
  1685. return -EOPNOTSUPP;
  1686. return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
  1687. param_offset, param_read_buf, param_size);
  1688. }
  1689. /**
  1690. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  1691. * @hba: per adapter instance
  1692. *
  1693. * 1. Allocate DMA memory for Command Descriptor array
  1694. * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
  1695. * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
  1696. * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
  1697. * (UTMRDL)
  1698. * 4. Allocate memory for local reference block(lrb).
  1699. *
  1700. * Returns 0 for success, non-zero in case of failure
  1701. */
  1702. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  1703. {
  1704. size_t utmrdl_size, utrdl_size, ucdl_size;
  1705. /* Allocate memory for UTP command descriptors */
  1706. ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  1707. hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
  1708. ucdl_size,
  1709. &hba->ucdl_dma_addr,
  1710. GFP_KERNEL);
  1711. /*
  1712. * UFSHCI requires UTP command descriptor to be 128 byte aligned.
  1713. * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
  1714. * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
  1715. * be aligned to 128 bytes as well
  1716. */
  1717. if (!hba->ucdl_base_addr ||
  1718. WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
  1719. dev_err(hba->dev,
  1720. "Command Descriptor Memory allocation failed\n");
  1721. goto out;
  1722. }
  1723. /*
  1724. * Allocate memory for UTP Transfer descriptors
  1725. * UFSHCI requires 1024 byte alignment of UTRD
  1726. */
  1727. utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  1728. hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
  1729. utrdl_size,
  1730. &hba->utrdl_dma_addr,
  1731. GFP_KERNEL);
  1732. if (!hba->utrdl_base_addr ||
  1733. WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
  1734. dev_err(hba->dev,
  1735. "Transfer Descriptor Memory allocation failed\n");
  1736. goto out;
  1737. }
  1738. /*
  1739. * Allocate memory for UTP Task Management descriptors
  1740. * UFSHCI requires 1024 byte alignment of UTMRD
  1741. */
  1742. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  1743. hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
  1744. utmrdl_size,
  1745. &hba->utmrdl_dma_addr,
  1746. GFP_KERNEL);
  1747. if (!hba->utmrdl_base_addr ||
  1748. WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
  1749. dev_err(hba->dev,
  1750. "Task Management Descriptor Memory allocation failed\n");
  1751. goto out;
  1752. }
  1753. /* Allocate memory for local reference block */
  1754. hba->lrb = devm_kzalloc(hba->dev,
  1755. hba->nutrs * sizeof(struct ufshcd_lrb),
  1756. GFP_KERNEL);
  1757. if (!hba->lrb) {
  1758. dev_err(hba->dev, "LRB Memory allocation failed\n");
  1759. goto out;
  1760. }
  1761. return 0;
  1762. out:
  1763. return -ENOMEM;
  1764. }
  1765. /**
  1766. * ufshcd_host_memory_configure - configure local reference block with
  1767. * memory offsets
  1768. * @hba: per adapter instance
  1769. *
  1770. * Configure Host memory space
  1771. * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
  1772. * address.
  1773. * 2. Update each UTRD with Response UPIU offset, Response UPIU length
  1774. * and PRDT offset.
  1775. * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
  1776. * into local reference block.
  1777. */
  1778. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  1779. {
  1780. struct utp_transfer_cmd_desc *cmd_descp;
  1781. struct utp_transfer_req_desc *utrdlp;
  1782. dma_addr_t cmd_desc_dma_addr;
  1783. dma_addr_t cmd_desc_element_addr;
  1784. u16 response_offset;
  1785. u16 prdt_offset;
  1786. int cmd_desc_size;
  1787. int i;
  1788. utrdlp = hba->utrdl_base_addr;
  1789. cmd_descp = hba->ucdl_base_addr;
  1790. response_offset =
  1791. offsetof(struct utp_transfer_cmd_desc, response_upiu);
  1792. prdt_offset =
  1793. offsetof(struct utp_transfer_cmd_desc, prd_table);
  1794. cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
  1795. cmd_desc_dma_addr = hba->ucdl_dma_addr;
  1796. for (i = 0; i < hba->nutrs; i++) {
  1797. /* Configure UTRD with command descriptor base address */
  1798. cmd_desc_element_addr =
  1799. (cmd_desc_dma_addr + (cmd_desc_size * i));
  1800. utrdlp[i].command_desc_base_addr_lo =
  1801. cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
  1802. utrdlp[i].command_desc_base_addr_hi =
  1803. cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  1804. /* Response upiu and prdt offset should be in double words */
  1805. utrdlp[i].response_upiu_offset =
  1806. cpu_to_le16((response_offset >> 2));
  1807. utrdlp[i].prd_table_offset =
  1808. cpu_to_le16((prdt_offset >> 2));
  1809. utrdlp[i].response_upiu_length =
  1810. cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  1811. hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
  1812. hba->lrb[i].ucd_req_ptr =
  1813. (struct utp_upiu_req *)(cmd_descp + i);
  1814. hba->lrb[i].ucd_rsp_ptr =
  1815. (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
  1816. hba->lrb[i].ucd_prdt_ptr =
  1817. (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
  1818. }
  1819. }
  1820. /**
  1821. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  1822. * @hba: per adapter instance
  1823. *
  1824. * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
  1825. * in order to initialize the Unipro link startup procedure.
  1826. * Once the Unipro links are up, the device connected to the controller
  1827. * is detected.
  1828. *
  1829. * Returns 0 on success, non-zero value on failure
  1830. */
  1831. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  1832. {
  1833. struct uic_command uic_cmd = {0};
  1834. int ret;
  1835. uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
  1836. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1837. if (ret)
  1838. dev_err(hba->dev,
  1839. "dme-link-startup: error code %d\n", ret);
  1840. return ret;
  1841. }
  1842. static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
  1843. {
  1844. #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
  1845. unsigned long min_sleep_time_us;
  1846. if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
  1847. return;
  1848. /*
  1849. * last_dme_cmd_tstamp will be 0 only for 1st call to
  1850. * this function
  1851. */
  1852. if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
  1853. min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
  1854. } else {
  1855. unsigned long delta =
  1856. (unsigned long) ktime_to_us(
  1857. ktime_sub(ktime_get(),
  1858. hba->last_dme_cmd_tstamp));
  1859. if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
  1860. min_sleep_time_us =
  1861. MIN_DELAY_BEFORE_DME_CMDS_US - delta;
  1862. else
  1863. return; /* no more delay required */
  1864. }
  1865. /* allow sleep for extra 50us if needed */
  1866. usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
  1867. }
  1868. /**
  1869. * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  1870. * @hba: per adapter instance
  1871. * @attr_sel: uic command argument1
  1872. * @attr_set: attribute set type as uic command argument2
  1873. * @mib_val: setting value as uic command argument3
  1874. * @peer: indicate whether peer or local
  1875. *
  1876. * Returns 0 on success, non-zero value on failure
  1877. */
  1878. int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
  1879. u8 attr_set, u32 mib_val, u8 peer)
  1880. {
  1881. struct uic_command uic_cmd = {0};
  1882. static const char *const action[] = {
  1883. "dme-set",
  1884. "dme-peer-set"
  1885. };
  1886. const char *set = action[!!peer];
  1887. int ret;
  1888. uic_cmd.command = peer ?
  1889. UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
  1890. uic_cmd.argument1 = attr_sel;
  1891. uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
  1892. uic_cmd.argument3 = mib_val;
  1893. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1894. if (ret)
  1895. dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
  1896. set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
  1897. return ret;
  1898. }
  1899. EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
  1900. /**
  1901. * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
  1902. * @hba: per adapter instance
  1903. * @attr_sel: uic command argument1
  1904. * @mib_val: the value of the attribute as returned by the UIC command
  1905. * @peer: indicate whether peer or local
  1906. *
  1907. * Returns 0 on success, non-zero value on failure
  1908. */
  1909. int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
  1910. u32 *mib_val, u8 peer)
  1911. {
  1912. struct uic_command uic_cmd = {0};
  1913. static const char *const action[] = {
  1914. "dme-get",
  1915. "dme-peer-get"
  1916. };
  1917. const char *get = action[!!peer];
  1918. int ret;
  1919. struct ufs_pa_layer_attr orig_pwr_info;
  1920. struct ufs_pa_layer_attr temp_pwr_info;
  1921. bool pwr_mode_change = false;
  1922. if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
  1923. orig_pwr_info = hba->pwr_info;
  1924. temp_pwr_info = orig_pwr_info;
  1925. if (orig_pwr_info.pwr_tx == FAST_MODE ||
  1926. orig_pwr_info.pwr_rx == FAST_MODE) {
  1927. temp_pwr_info.pwr_tx = FASTAUTO_MODE;
  1928. temp_pwr_info.pwr_rx = FASTAUTO_MODE;
  1929. pwr_mode_change = true;
  1930. } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
  1931. orig_pwr_info.pwr_rx == SLOW_MODE) {
  1932. temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
  1933. temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
  1934. pwr_mode_change = true;
  1935. }
  1936. if (pwr_mode_change) {
  1937. ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
  1938. if (ret)
  1939. goto out;
  1940. }
  1941. }
  1942. uic_cmd.command = peer ?
  1943. UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
  1944. uic_cmd.argument1 = attr_sel;
  1945. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1946. if (ret) {
  1947. dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
  1948. get, UIC_GET_ATTR_ID(attr_sel), ret);
  1949. goto out;
  1950. }
  1951. if (mib_val)
  1952. *mib_val = uic_cmd.argument3;
  1953. if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
  1954. && pwr_mode_change)
  1955. ufshcd_change_power_mode(hba, &orig_pwr_info);
  1956. out:
  1957. return ret;
  1958. }
  1959. EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
  1960. /**
  1961. * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  1962. * state) and waits for it to take effect.
  1963. *
  1964. * @hba: per adapter instance
  1965. * @cmd: UIC command to execute
  1966. *
  1967. * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
  1968. * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
  1969. * and device UniPro link and hence it's final completion would be indicated by
  1970. * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
  1971. * addition to normal UIC command completion Status (UCCS). This function only
  1972. * returns after the relevant status bits indicate the completion.
  1973. *
  1974. * Returns 0 on success, non-zero value on failure
  1975. */
  1976. static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
  1977. {
  1978. struct completion uic_async_done;
  1979. unsigned long flags;
  1980. u8 status;
  1981. int ret;
  1982. mutex_lock(&hba->uic_cmd_mutex);
  1983. init_completion(&uic_async_done);
  1984. ufshcd_add_delay_before_dme_cmd(hba);
  1985. spin_lock_irqsave(hba->host->host_lock, flags);
  1986. hba->uic_async_done = &uic_async_done;
  1987. ret = __ufshcd_send_uic_cmd(hba, cmd);
  1988. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1989. if (ret) {
  1990. dev_err(hba->dev,
  1991. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  1992. cmd->command, cmd->argument3, ret);
  1993. goto out;
  1994. }
  1995. ret = ufshcd_wait_for_uic_cmd(hba, cmd);
  1996. if (ret) {
  1997. dev_err(hba->dev,
  1998. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  1999. cmd->command, cmd->argument3, ret);
  2000. goto out;
  2001. }
  2002. if (!wait_for_completion_timeout(hba->uic_async_done,
  2003. msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
  2004. dev_err(hba->dev,
  2005. "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
  2006. cmd->command, cmd->argument3);
  2007. ret = -ETIMEDOUT;
  2008. goto out;
  2009. }
  2010. status = ufshcd_get_upmcrs(hba);
  2011. if (status != PWR_LOCAL) {
  2012. dev_err(hba->dev,
  2013. "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
  2014. cmd->command, status);
  2015. ret = (status != PWR_OK) ? status : -1;
  2016. }
  2017. out:
  2018. spin_lock_irqsave(hba->host->host_lock, flags);
  2019. hba->uic_async_done = NULL;
  2020. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2021. mutex_unlock(&hba->uic_cmd_mutex);
  2022. return ret;
  2023. }
  2024. /**
  2025. * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
  2026. * using DME_SET primitives.
  2027. * @hba: per adapter instance
  2028. * @mode: powr mode value
  2029. *
  2030. * Returns 0 on success, non-zero value on failure
  2031. */
  2032. static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
  2033. {
  2034. struct uic_command uic_cmd = {0};
  2035. int ret;
  2036. if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
  2037. ret = ufshcd_dme_set(hba,
  2038. UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
  2039. if (ret) {
  2040. dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
  2041. __func__, ret);
  2042. goto out;
  2043. }
  2044. }
  2045. uic_cmd.command = UIC_CMD_DME_SET;
  2046. uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
  2047. uic_cmd.argument3 = mode;
  2048. ufshcd_hold(hba, false);
  2049. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2050. ufshcd_release(hba);
  2051. out:
  2052. return ret;
  2053. }
  2054. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  2055. {
  2056. struct uic_command uic_cmd = {0};
  2057. uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
  2058. return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2059. }
  2060. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  2061. {
  2062. struct uic_command uic_cmd = {0};
  2063. int ret;
  2064. uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
  2065. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  2066. if (ret) {
  2067. ufshcd_set_link_off(hba);
  2068. ret = ufshcd_host_reset_and_restore(hba);
  2069. }
  2070. return ret;
  2071. }
  2072. /**
  2073. * ufshcd_init_pwr_info - setting the POR (power on reset)
  2074. * values in hba power info
  2075. * @hba: per-adapter instance
  2076. */
  2077. static void ufshcd_init_pwr_info(struct ufs_hba *hba)
  2078. {
  2079. hba->pwr_info.gear_rx = UFS_PWM_G1;
  2080. hba->pwr_info.gear_tx = UFS_PWM_G1;
  2081. hba->pwr_info.lane_rx = 1;
  2082. hba->pwr_info.lane_tx = 1;
  2083. hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
  2084. hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
  2085. hba->pwr_info.hs_rate = 0;
  2086. }
  2087. /**
  2088. * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  2089. * @hba: per-adapter instance
  2090. */
  2091. static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
  2092. {
  2093. struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
  2094. if (hba->max_pwr_info.is_valid)
  2095. return 0;
  2096. pwr_info->pwr_tx = FASTAUTO_MODE;
  2097. pwr_info->pwr_rx = FASTAUTO_MODE;
  2098. pwr_info->hs_rate = PA_HS_MODE_B;
  2099. /* Get the connected lane count */
  2100. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
  2101. &pwr_info->lane_rx);
  2102. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2103. &pwr_info->lane_tx);
  2104. if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
  2105. dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
  2106. __func__,
  2107. pwr_info->lane_rx,
  2108. pwr_info->lane_tx);
  2109. return -EINVAL;
  2110. }
  2111. /*
  2112. * First, get the maximum gears of HS speed.
  2113. * If a zero value, it means there is no HSGEAR capability.
  2114. * Then, get the maximum gears of PWM speed.
  2115. */
  2116. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
  2117. if (!pwr_info->gear_rx) {
  2118. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2119. &pwr_info->gear_rx);
  2120. if (!pwr_info->gear_rx) {
  2121. dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
  2122. __func__, pwr_info->gear_rx);
  2123. return -EINVAL;
  2124. }
  2125. pwr_info->pwr_rx = SLOWAUTO_MODE;
  2126. }
  2127. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
  2128. &pwr_info->gear_tx);
  2129. if (!pwr_info->gear_tx) {
  2130. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2131. &pwr_info->gear_tx);
  2132. if (!pwr_info->gear_tx) {
  2133. dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
  2134. __func__, pwr_info->gear_tx);
  2135. return -EINVAL;
  2136. }
  2137. pwr_info->pwr_tx = SLOWAUTO_MODE;
  2138. }
  2139. hba->max_pwr_info.is_valid = true;
  2140. return 0;
  2141. }
  2142. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  2143. struct ufs_pa_layer_attr *pwr_mode)
  2144. {
  2145. int ret;
  2146. /* if already configured to the requested pwr_mode */
  2147. if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
  2148. pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
  2149. pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
  2150. pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
  2151. pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
  2152. pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
  2153. pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
  2154. dev_dbg(hba->dev, "%s: power already configured\n", __func__);
  2155. return 0;
  2156. }
  2157. /*
  2158. * Configure attributes for power mode change with below.
  2159. * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
  2160. * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
  2161. * - PA_HSSERIES
  2162. */
  2163. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
  2164. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
  2165. pwr_mode->lane_rx);
  2166. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2167. pwr_mode->pwr_rx == FAST_MODE)
  2168. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
  2169. else
  2170. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
  2171. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
  2172. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
  2173. pwr_mode->lane_tx);
  2174. if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2175. pwr_mode->pwr_tx == FAST_MODE)
  2176. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
  2177. else
  2178. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
  2179. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2180. pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2181. pwr_mode->pwr_rx == FAST_MODE ||
  2182. pwr_mode->pwr_tx == FAST_MODE)
  2183. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
  2184. pwr_mode->hs_rate);
  2185. ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
  2186. | pwr_mode->pwr_tx);
  2187. if (ret) {
  2188. dev_err(hba->dev,
  2189. "%s: power mode change failed %d\n", __func__, ret);
  2190. } else {
  2191. ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
  2192. pwr_mode);
  2193. memcpy(&hba->pwr_info, pwr_mode,
  2194. sizeof(struct ufs_pa_layer_attr));
  2195. }
  2196. return ret;
  2197. }
  2198. /**
  2199. * ufshcd_config_pwr_mode - configure a new power mode
  2200. * @hba: per-adapter instance
  2201. * @desired_pwr_mode: desired power configuration
  2202. */
  2203. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  2204. struct ufs_pa_layer_attr *desired_pwr_mode)
  2205. {
  2206. struct ufs_pa_layer_attr final_params = { 0 };
  2207. int ret;
  2208. ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
  2209. desired_pwr_mode, &final_params);
  2210. if (ret)
  2211. memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  2212. ret = ufshcd_change_power_mode(hba, &final_params);
  2213. return ret;
  2214. }
  2215. /**
  2216. * ufshcd_complete_dev_init() - checks device readiness
  2217. * hba: per-adapter instance
  2218. *
  2219. * Set fDeviceInit flag and poll until device toggles it.
  2220. */
  2221. static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  2222. {
  2223. int i, retries, err = 0;
  2224. bool flag_res = 1;
  2225. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  2226. /* Set the fDeviceInit flag */
  2227. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  2228. QUERY_FLAG_IDN_FDEVICEINIT, NULL);
  2229. if (!err || err == -ETIMEDOUT)
  2230. break;
  2231. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  2232. }
  2233. if (err) {
  2234. dev_err(hba->dev,
  2235. "%s setting fDeviceInit flag failed with error %d\n",
  2236. __func__, err);
  2237. goto out;
  2238. }
  2239. /* poll for max. 100 iterations for fDeviceInit flag to clear */
  2240. for (i = 0; i < 100 && !err && flag_res; i++) {
  2241. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  2242. err = ufshcd_query_flag(hba,
  2243. UPIU_QUERY_OPCODE_READ_FLAG,
  2244. QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
  2245. if (!err || err == -ETIMEDOUT)
  2246. break;
  2247. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
  2248. err);
  2249. }
  2250. }
  2251. if (err)
  2252. dev_err(hba->dev,
  2253. "%s reading fDeviceInit flag failed with error %d\n",
  2254. __func__, err);
  2255. else if (flag_res)
  2256. dev_err(hba->dev,
  2257. "%s fDeviceInit was not cleared by the device\n",
  2258. __func__);
  2259. out:
  2260. return err;
  2261. }
  2262. /**
  2263. * ufshcd_make_hba_operational - Make UFS controller operational
  2264. * @hba: per adapter instance
  2265. *
  2266. * To bring UFS host controller to operational state,
  2267. * 1. Enable required interrupts
  2268. * 2. Configure interrupt aggregation
  2269. * 3. Program UTRL and UTMRL base addres
  2270. * 4. Configure run-stop-registers
  2271. *
  2272. * Returns 0 on success, non-zero value on failure
  2273. */
  2274. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  2275. {
  2276. int err = 0;
  2277. u32 reg;
  2278. /* Enable required interrupts */
  2279. ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
  2280. /* Configure interrupt aggregation */
  2281. if (ufshcd_is_intr_aggr_allowed(hba))
  2282. ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
  2283. else
  2284. ufshcd_disable_intr_aggr(hba);
  2285. /* Configure UTRL and UTMRL base address registers */
  2286. ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
  2287. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  2288. ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
  2289. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  2290. ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
  2291. REG_UTP_TASK_REQ_LIST_BASE_L);
  2292. ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
  2293. REG_UTP_TASK_REQ_LIST_BASE_H);
  2294. /*
  2295. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  2296. * DEI, HEI bits must be 0
  2297. */
  2298. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  2299. if (!(ufshcd_get_lists_status(reg))) {
  2300. ufshcd_enable_run_stop_reg(hba);
  2301. } else {
  2302. dev_err(hba->dev,
  2303. "Host controller not ready to process requests");
  2304. err = -EIO;
  2305. goto out;
  2306. }
  2307. out:
  2308. return err;
  2309. }
  2310. /**
  2311. * ufshcd_hba_enable - initialize the controller
  2312. * @hba: per adapter instance
  2313. *
  2314. * The controller resets itself and controller firmware initialization
  2315. * sequence kicks off. When controller is ready it will set
  2316. * the Host Controller Enable bit to 1.
  2317. *
  2318. * Returns 0 on success, non-zero value on failure
  2319. */
  2320. static int ufshcd_hba_enable(struct ufs_hba *hba)
  2321. {
  2322. int retry;
  2323. /*
  2324. * msleep of 1 and 5 used in this function might result in msleep(20),
  2325. * but it was necessary to send the UFS FPGA to reset mode during
  2326. * development and testing of this driver. msleep can be changed to
  2327. * mdelay and retry count can be reduced based on the controller.
  2328. */
  2329. if (!ufshcd_is_hba_active(hba)) {
  2330. /* change controller state to "reset state" */
  2331. ufshcd_hba_stop(hba);
  2332. /*
  2333. * This delay is based on the testing done with UFS host
  2334. * controller FPGA. The delay can be changed based on the
  2335. * host controller used.
  2336. */
  2337. msleep(5);
  2338. }
  2339. /* UniPro link is disabled at this point */
  2340. ufshcd_set_link_off(hba);
  2341. ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
  2342. /* start controller initialization sequence */
  2343. ufshcd_hba_start(hba);
  2344. /*
  2345. * To initialize a UFS host controller HCE bit must be set to 1.
  2346. * During initialization the HCE bit value changes from 1->0->1.
  2347. * When the host controller completes initialization sequence
  2348. * it sets the value of HCE bit to 1. The same HCE bit is read back
  2349. * to check if the controller has completed initialization sequence.
  2350. * So without this delay the value HCE = 1, set in the previous
  2351. * instruction might be read back.
  2352. * This delay can be changed based on the controller.
  2353. */
  2354. msleep(1);
  2355. /* wait for the host controller to complete initialization */
  2356. retry = 10;
  2357. while (ufshcd_is_hba_active(hba)) {
  2358. if (retry) {
  2359. retry--;
  2360. } else {
  2361. dev_err(hba->dev,
  2362. "Controller enable failed\n");
  2363. return -EIO;
  2364. }
  2365. msleep(5);
  2366. }
  2367. /* enable UIC related interrupts */
  2368. ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
  2369. ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
  2370. return 0;
  2371. }
  2372. static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
  2373. {
  2374. int tx_lanes, i, err = 0;
  2375. if (!peer)
  2376. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2377. &tx_lanes);
  2378. else
  2379. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  2380. &tx_lanes);
  2381. for (i = 0; i < tx_lanes; i++) {
  2382. if (!peer)
  2383. err = ufshcd_dme_set(hba,
  2384. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  2385. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  2386. 0);
  2387. else
  2388. err = ufshcd_dme_peer_set(hba,
  2389. UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
  2390. UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
  2391. 0);
  2392. if (err) {
  2393. dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
  2394. __func__, peer, i, err);
  2395. break;
  2396. }
  2397. }
  2398. return err;
  2399. }
  2400. static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
  2401. {
  2402. return ufshcd_disable_tx_lcc(hba, true);
  2403. }
  2404. /**
  2405. * ufshcd_link_startup - Initialize unipro link startup
  2406. * @hba: per adapter instance
  2407. *
  2408. * Returns 0 for success, non-zero in case of failure
  2409. */
  2410. static int ufshcd_link_startup(struct ufs_hba *hba)
  2411. {
  2412. int ret;
  2413. int retries = DME_LINKSTARTUP_RETRIES;
  2414. do {
  2415. ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
  2416. ret = ufshcd_dme_link_startup(hba);
  2417. /* check if device is detected by inter-connect layer */
  2418. if (!ret && !ufshcd_is_device_present(hba)) {
  2419. dev_err(hba->dev, "%s: Device not present\n", __func__);
  2420. ret = -ENXIO;
  2421. goto out;
  2422. }
  2423. /*
  2424. * DME link lost indication is only received when link is up,
  2425. * but we can't be sure if the link is up until link startup
  2426. * succeeds. So reset the local Uni-Pro and try again.
  2427. */
  2428. if (ret && ufshcd_hba_enable(hba))
  2429. goto out;
  2430. } while (ret && retries--);
  2431. if (ret)
  2432. /* failed to get the link up... retire */
  2433. goto out;
  2434. if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
  2435. ret = ufshcd_disable_device_tx_lcc(hba);
  2436. if (ret)
  2437. goto out;
  2438. }
  2439. /* Include any host controller configuration via UIC commands */
  2440. ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
  2441. if (ret)
  2442. goto out;
  2443. ret = ufshcd_make_hba_operational(hba);
  2444. out:
  2445. if (ret)
  2446. dev_err(hba->dev, "link startup failed %d\n", ret);
  2447. return ret;
  2448. }
  2449. /**
  2450. * ufshcd_verify_dev_init() - Verify device initialization
  2451. * @hba: per-adapter instance
  2452. *
  2453. * Send NOP OUT UPIU and wait for NOP IN response to check whether the
  2454. * device Transport Protocol (UTP) layer is ready after a reset.
  2455. * If the UTP layer at the device side is not initialized, it may
  2456. * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
  2457. * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
  2458. */
  2459. static int ufshcd_verify_dev_init(struct ufs_hba *hba)
  2460. {
  2461. int err = 0;
  2462. int retries;
  2463. ufshcd_hold(hba, false);
  2464. mutex_lock(&hba->dev_cmd.lock);
  2465. for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
  2466. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
  2467. NOP_OUT_TIMEOUT);
  2468. if (!err || err == -ETIMEDOUT)
  2469. break;
  2470. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  2471. }
  2472. mutex_unlock(&hba->dev_cmd.lock);
  2473. ufshcd_release(hba);
  2474. if (err)
  2475. dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
  2476. return err;
  2477. }
  2478. /**
  2479. * ufshcd_set_queue_depth - set lun queue depth
  2480. * @sdev: pointer to SCSI device
  2481. *
  2482. * Read bLUQueueDepth value and activate scsi tagged command
  2483. * queueing. For WLUN, queue depth is set to 1. For best-effort
  2484. * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
  2485. * value that host can queue.
  2486. */
  2487. static void ufshcd_set_queue_depth(struct scsi_device *sdev)
  2488. {
  2489. int ret = 0;
  2490. u8 lun_qdepth;
  2491. struct ufs_hba *hba;
  2492. hba = shost_priv(sdev->host);
  2493. lun_qdepth = hba->nutrs;
  2494. ret = ufshcd_read_unit_desc_param(hba,
  2495. ufshcd_scsi_to_upiu_lun(sdev->lun),
  2496. UNIT_DESC_PARAM_LU_Q_DEPTH,
  2497. &lun_qdepth,
  2498. sizeof(lun_qdepth));
  2499. /* Some WLUN doesn't support unit descriptor */
  2500. if (ret == -EOPNOTSUPP)
  2501. lun_qdepth = 1;
  2502. else if (!lun_qdepth)
  2503. /* eventually, we can figure out the real queue depth */
  2504. lun_qdepth = hba->nutrs;
  2505. else
  2506. lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
  2507. dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
  2508. __func__, lun_qdepth);
  2509. scsi_change_queue_depth(sdev, lun_qdepth);
  2510. }
  2511. /*
  2512. * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
  2513. * @hba: per-adapter instance
  2514. * @lun: UFS device lun id
  2515. * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
  2516. *
  2517. * Returns 0 in case of success and b_lu_write_protect status would be returned
  2518. * @b_lu_write_protect parameter.
  2519. * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
  2520. * Returns -EINVAL in case of invalid parameters passed to this function.
  2521. */
  2522. static int ufshcd_get_lu_wp(struct ufs_hba *hba,
  2523. u8 lun,
  2524. u8 *b_lu_write_protect)
  2525. {
  2526. int ret;
  2527. if (!b_lu_write_protect)
  2528. ret = -EINVAL;
  2529. /*
  2530. * According to UFS device spec, RPMB LU can't be write
  2531. * protected so skip reading bLUWriteProtect parameter for
  2532. * it. For other W-LUs, UNIT DESCRIPTOR is not available.
  2533. */
  2534. else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
  2535. ret = -ENOTSUPP;
  2536. else
  2537. ret = ufshcd_read_unit_desc_param(hba,
  2538. lun,
  2539. UNIT_DESC_PARAM_LU_WR_PROTECT,
  2540. b_lu_write_protect,
  2541. sizeof(*b_lu_write_protect));
  2542. return ret;
  2543. }
  2544. /**
  2545. * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
  2546. * status
  2547. * @hba: per-adapter instance
  2548. * @sdev: pointer to SCSI device
  2549. *
  2550. */
  2551. static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
  2552. struct scsi_device *sdev)
  2553. {
  2554. if (hba->dev_info.f_power_on_wp_en &&
  2555. !hba->dev_info.is_lu_power_on_wp) {
  2556. u8 b_lu_write_protect;
  2557. if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
  2558. &b_lu_write_protect) &&
  2559. (b_lu_write_protect == UFS_LU_POWER_ON_WP))
  2560. hba->dev_info.is_lu_power_on_wp = true;
  2561. }
  2562. }
  2563. /**
  2564. * ufshcd_slave_alloc - handle initial SCSI device configurations
  2565. * @sdev: pointer to SCSI device
  2566. *
  2567. * Returns success
  2568. */
  2569. static int ufshcd_slave_alloc(struct scsi_device *sdev)
  2570. {
  2571. struct ufs_hba *hba;
  2572. hba = shost_priv(sdev->host);
  2573. /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
  2574. sdev->use_10_for_ms = 1;
  2575. /* allow SCSI layer to restart the device in case of errors */
  2576. sdev->allow_restart = 1;
  2577. /* REPORT SUPPORTED OPERATION CODES is not supported */
  2578. sdev->no_report_opcodes = 1;
  2579. /* WRITE_SAME command is not supported */
  2580. sdev->no_write_same = 1;
  2581. ufshcd_set_queue_depth(sdev);
  2582. ufshcd_get_lu_power_on_wp_status(hba, sdev);
  2583. return 0;
  2584. }
  2585. /**
  2586. * ufshcd_change_queue_depth - change queue depth
  2587. * @sdev: pointer to SCSI device
  2588. * @depth: required depth to set
  2589. *
  2590. * Change queue depth and make sure the max. limits are not crossed.
  2591. */
  2592. static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
  2593. {
  2594. struct ufs_hba *hba = shost_priv(sdev->host);
  2595. if (depth > hba->nutrs)
  2596. depth = hba->nutrs;
  2597. return scsi_change_queue_depth(sdev, depth);
  2598. }
  2599. /**
  2600. * ufshcd_slave_configure - adjust SCSI device configurations
  2601. * @sdev: pointer to SCSI device
  2602. */
  2603. static int ufshcd_slave_configure(struct scsi_device *sdev)
  2604. {
  2605. struct request_queue *q = sdev->request_queue;
  2606. blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
  2607. blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  2608. return 0;
  2609. }
  2610. /**
  2611. * ufshcd_slave_destroy - remove SCSI device configurations
  2612. * @sdev: pointer to SCSI device
  2613. */
  2614. static void ufshcd_slave_destroy(struct scsi_device *sdev)
  2615. {
  2616. struct ufs_hba *hba;
  2617. hba = shost_priv(sdev->host);
  2618. /* Drop the reference as it won't be needed anymore */
  2619. if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
  2620. unsigned long flags;
  2621. spin_lock_irqsave(hba->host->host_lock, flags);
  2622. hba->sdev_ufs_device = NULL;
  2623. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2624. }
  2625. }
  2626. /**
  2627. * ufshcd_task_req_compl - handle task management request completion
  2628. * @hba: per adapter instance
  2629. * @index: index of the completed request
  2630. * @resp: task management service response
  2631. *
  2632. * Returns non-zero value on error, zero on success
  2633. */
  2634. static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
  2635. {
  2636. struct utp_task_req_desc *task_req_descp;
  2637. struct utp_upiu_task_rsp *task_rsp_upiup;
  2638. unsigned long flags;
  2639. int ocs_value;
  2640. int task_result;
  2641. spin_lock_irqsave(hba->host->host_lock, flags);
  2642. /* Clear completed tasks from outstanding_tasks */
  2643. __clear_bit(index, &hba->outstanding_tasks);
  2644. task_req_descp = hba->utmrdl_base_addr;
  2645. ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
  2646. if (ocs_value == OCS_SUCCESS) {
  2647. task_rsp_upiup = (struct utp_upiu_task_rsp *)
  2648. task_req_descp[index].task_rsp_upiu;
  2649. task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
  2650. task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
  2651. if (resp)
  2652. *resp = (u8)task_result;
  2653. } else {
  2654. dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
  2655. __func__, ocs_value);
  2656. }
  2657. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2658. return ocs_value;
  2659. }
  2660. /**
  2661. * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  2662. * @lrb: pointer to local reference block of completed command
  2663. * @scsi_status: SCSI command status
  2664. *
  2665. * Returns value base on SCSI command status
  2666. */
  2667. static inline int
  2668. ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
  2669. {
  2670. int result = 0;
  2671. switch (scsi_status) {
  2672. case SAM_STAT_CHECK_CONDITION:
  2673. ufshcd_copy_sense_data(lrbp);
  2674. case SAM_STAT_GOOD:
  2675. result |= DID_OK << 16 |
  2676. COMMAND_COMPLETE << 8 |
  2677. scsi_status;
  2678. break;
  2679. case SAM_STAT_TASK_SET_FULL:
  2680. case SAM_STAT_BUSY:
  2681. case SAM_STAT_TASK_ABORTED:
  2682. ufshcd_copy_sense_data(lrbp);
  2683. result |= scsi_status;
  2684. break;
  2685. default:
  2686. result |= DID_ERROR << 16;
  2687. break;
  2688. } /* end of switch */
  2689. return result;
  2690. }
  2691. /**
  2692. * ufshcd_transfer_rsp_status - Get overall status of the response
  2693. * @hba: per adapter instance
  2694. * @lrb: pointer to local reference block of completed command
  2695. *
  2696. * Returns result of the command to notify SCSI midlayer
  2697. */
  2698. static inline int
  2699. ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  2700. {
  2701. int result = 0;
  2702. int scsi_status;
  2703. int ocs;
  2704. /* overall command status of utrd */
  2705. ocs = ufshcd_get_tr_ocs(lrbp);
  2706. switch (ocs) {
  2707. case OCS_SUCCESS:
  2708. result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  2709. switch (result) {
  2710. case UPIU_TRANSACTION_RESPONSE:
  2711. /*
  2712. * get the response UPIU result to extract
  2713. * the SCSI command status
  2714. */
  2715. result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
  2716. /*
  2717. * get the result based on SCSI status response
  2718. * to notify the SCSI midlayer of the command status
  2719. */
  2720. scsi_status = result & MASK_SCSI_STATUS;
  2721. result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  2722. if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
  2723. schedule_work(&hba->eeh_work);
  2724. break;
  2725. case UPIU_TRANSACTION_REJECT_UPIU:
  2726. /* TODO: handle Reject UPIU Response */
  2727. result = DID_ERROR << 16;
  2728. dev_err(hba->dev,
  2729. "Reject UPIU not fully implemented\n");
  2730. break;
  2731. default:
  2732. result = DID_ERROR << 16;
  2733. dev_err(hba->dev,
  2734. "Unexpected request response code = %x\n",
  2735. result);
  2736. break;
  2737. }
  2738. break;
  2739. case OCS_ABORTED:
  2740. result |= DID_ABORT << 16;
  2741. break;
  2742. case OCS_INVALID_COMMAND_STATUS:
  2743. result |= DID_REQUEUE << 16;
  2744. break;
  2745. case OCS_INVALID_CMD_TABLE_ATTR:
  2746. case OCS_INVALID_PRDT_ATTR:
  2747. case OCS_MISMATCH_DATA_BUF_SIZE:
  2748. case OCS_MISMATCH_RESP_UPIU_SIZE:
  2749. case OCS_PEER_COMM_FAILURE:
  2750. case OCS_FATAL_ERROR:
  2751. default:
  2752. result |= DID_ERROR << 16;
  2753. dev_err(hba->dev,
  2754. "OCS error from controller = %x\n", ocs);
  2755. break;
  2756. } /* end of switch */
  2757. return result;
  2758. }
  2759. /**
  2760. * ufshcd_uic_cmd_compl - handle completion of uic command
  2761. * @hba: per adapter instance
  2762. * @intr_status: interrupt status generated by the controller
  2763. */
  2764. static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  2765. {
  2766. if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
  2767. hba->active_uic_cmd->argument2 |=
  2768. ufshcd_get_uic_cmd_result(hba);
  2769. hba->active_uic_cmd->argument3 =
  2770. ufshcd_get_dme_attr_val(hba);
  2771. complete(&hba->active_uic_cmd->done);
  2772. }
  2773. if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
  2774. complete(hba->uic_async_done);
  2775. }
  2776. /**
  2777. * ufshcd_transfer_req_compl - handle SCSI and query command completion
  2778. * @hba: per adapter instance
  2779. */
  2780. static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
  2781. {
  2782. struct ufshcd_lrb *lrbp;
  2783. struct scsi_cmnd *cmd;
  2784. unsigned long completed_reqs;
  2785. u32 tr_doorbell;
  2786. int result;
  2787. int index;
  2788. /* Resetting interrupt aggregation counters first and reading the
  2789. * DOOR_BELL afterward allows us to handle all the completed requests.
  2790. * In order to prevent other interrupts starvation the DB is read once
  2791. * after reset. The down side of this solution is the possibility of
  2792. * false interrupt if device completes another request after resetting
  2793. * aggregation and before reading the DB.
  2794. */
  2795. if (ufshcd_is_intr_aggr_allowed(hba))
  2796. ufshcd_reset_intr_aggr(hba);
  2797. tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  2798. completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  2799. for_each_set_bit(index, &completed_reqs, hba->nutrs) {
  2800. lrbp = &hba->lrb[index];
  2801. cmd = lrbp->cmd;
  2802. if (cmd) {
  2803. result = ufshcd_transfer_rsp_status(hba, lrbp);
  2804. scsi_dma_unmap(cmd);
  2805. cmd->result = result;
  2806. /* Mark completed command as NULL in LRB */
  2807. lrbp->cmd = NULL;
  2808. clear_bit_unlock(index, &hba->lrb_in_use);
  2809. /* Do not touch lrbp after scsi done */
  2810. cmd->scsi_done(cmd);
  2811. __ufshcd_release(hba);
  2812. } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
  2813. if (hba->dev_cmd.complete)
  2814. complete(hba->dev_cmd.complete);
  2815. }
  2816. }
  2817. /* clear corresponding bits of completed commands */
  2818. hba->outstanding_reqs ^= completed_reqs;
  2819. ufshcd_clk_scaling_update_busy(hba);
  2820. /* we might have free'd some tags above */
  2821. wake_up(&hba->dev_cmd.tag_wq);
  2822. }
  2823. /**
  2824. * ufshcd_disable_ee - disable exception event
  2825. * @hba: per-adapter instance
  2826. * @mask: exception event to disable
  2827. *
  2828. * Disables exception event in the device so that the EVENT_ALERT
  2829. * bit is not set.
  2830. *
  2831. * Returns zero on success, non-zero error value on failure.
  2832. */
  2833. static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
  2834. {
  2835. int err = 0;
  2836. u32 val;
  2837. if (!(hba->ee_ctrl_mask & mask))
  2838. goto out;
  2839. val = hba->ee_ctrl_mask & ~mask;
  2840. val &= 0xFFFF; /* 2 bytes */
  2841. err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  2842. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  2843. if (!err)
  2844. hba->ee_ctrl_mask &= ~mask;
  2845. out:
  2846. return err;
  2847. }
  2848. /**
  2849. * ufshcd_enable_ee - enable exception event
  2850. * @hba: per-adapter instance
  2851. * @mask: exception event to enable
  2852. *
  2853. * Enable corresponding exception event in the device to allow
  2854. * device to alert host in critical scenarios.
  2855. *
  2856. * Returns zero on success, non-zero error value on failure.
  2857. */
  2858. static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
  2859. {
  2860. int err = 0;
  2861. u32 val;
  2862. if (hba->ee_ctrl_mask & mask)
  2863. goto out;
  2864. val = hba->ee_ctrl_mask | mask;
  2865. val &= 0xFFFF; /* 2 bytes */
  2866. err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  2867. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  2868. if (!err)
  2869. hba->ee_ctrl_mask |= mask;
  2870. out:
  2871. return err;
  2872. }
  2873. /**
  2874. * ufshcd_enable_auto_bkops - Allow device managed BKOPS
  2875. * @hba: per-adapter instance
  2876. *
  2877. * Allow device to manage background operations on its own. Enabling
  2878. * this might lead to inconsistent latencies during normal data transfers
  2879. * as the device is allowed to manage its own way of handling background
  2880. * operations.
  2881. *
  2882. * Returns zero on success, non-zero on failure.
  2883. */
  2884. static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
  2885. {
  2886. int err = 0;
  2887. if (hba->auto_bkops_enabled)
  2888. goto out;
  2889. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  2890. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  2891. if (err) {
  2892. dev_err(hba->dev, "%s: failed to enable bkops %d\n",
  2893. __func__, err);
  2894. goto out;
  2895. }
  2896. hba->auto_bkops_enabled = true;
  2897. /* No need of URGENT_BKOPS exception from the device */
  2898. err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  2899. if (err)
  2900. dev_err(hba->dev, "%s: failed to disable exception event %d\n",
  2901. __func__, err);
  2902. out:
  2903. return err;
  2904. }
  2905. /**
  2906. * ufshcd_disable_auto_bkops - block device in doing background operations
  2907. * @hba: per-adapter instance
  2908. *
  2909. * Disabling background operations improves command response latency but
  2910. * has drawback of device moving into critical state where the device is
  2911. * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
  2912. * host is idle so that BKOPS are managed effectively without any negative
  2913. * impacts.
  2914. *
  2915. * Returns zero on success, non-zero on failure.
  2916. */
  2917. static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
  2918. {
  2919. int err = 0;
  2920. if (!hba->auto_bkops_enabled)
  2921. goto out;
  2922. /*
  2923. * If host assisted BKOPs is to be enabled, make sure
  2924. * urgent bkops exception is allowed.
  2925. */
  2926. err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
  2927. if (err) {
  2928. dev_err(hba->dev, "%s: failed to enable exception event %d\n",
  2929. __func__, err);
  2930. goto out;
  2931. }
  2932. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
  2933. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  2934. if (err) {
  2935. dev_err(hba->dev, "%s: failed to disable bkops %d\n",
  2936. __func__, err);
  2937. ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  2938. goto out;
  2939. }
  2940. hba->auto_bkops_enabled = false;
  2941. out:
  2942. return err;
  2943. }
  2944. /**
  2945. * ufshcd_force_reset_auto_bkops - force reset auto bkops state
  2946. * @hba: per adapter instance
  2947. *
  2948. * After a device reset the device may toggle the BKOPS_EN flag
  2949. * to default value. The s/w tracking variables should be updated
  2950. * as well. This function would change the auto-bkops state based on
  2951. * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
  2952. */
  2953. static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
  2954. {
  2955. if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
  2956. hba->auto_bkops_enabled = false;
  2957. hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
  2958. ufshcd_enable_auto_bkops(hba);
  2959. } else {
  2960. hba->auto_bkops_enabled = true;
  2961. hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
  2962. ufshcd_disable_auto_bkops(hba);
  2963. }
  2964. }
  2965. static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  2966. {
  2967. return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  2968. QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  2969. }
  2970. /**
  2971. * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
  2972. * @hba: per-adapter instance
  2973. * @status: bkops_status value
  2974. *
  2975. * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
  2976. * flag in the device to permit background operations if the device
  2977. * bkops_status is greater than or equal to "status" argument passed to
  2978. * this function, disable otherwise.
  2979. *
  2980. * Returns 0 for success, non-zero in case of failure.
  2981. *
  2982. * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
  2983. * to know whether auto bkops is enabled or disabled after this function
  2984. * returns control to it.
  2985. */
  2986. static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
  2987. enum bkops_status status)
  2988. {
  2989. int err;
  2990. u32 curr_status = 0;
  2991. err = ufshcd_get_bkops_status(hba, &curr_status);
  2992. if (err) {
  2993. dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
  2994. __func__, err);
  2995. goto out;
  2996. } else if (curr_status > BKOPS_STATUS_MAX) {
  2997. dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
  2998. __func__, curr_status);
  2999. err = -EINVAL;
  3000. goto out;
  3001. }
  3002. if (curr_status >= status)
  3003. err = ufshcd_enable_auto_bkops(hba);
  3004. else
  3005. err = ufshcd_disable_auto_bkops(hba);
  3006. out:
  3007. return err;
  3008. }
  3009. /**
  3010. * ufshcd_urgent_bkops - handle urgent bkops exception event
  3011. * @hba: per-adapter instance
  3012. *
  3013. * Enable fBackgroundOpsEn flag in the device to permit background
  3014. * operations.
  3015. *
  3016. * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
  3017. * and negative error value for any other failure.
  3018. */
  3019. static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  3020. {
  3021. return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
  3022. }
  3023. static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
  3024. {
  3025. return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  3026. QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
  3027. }
  3028. /**
  3029. * ufshcd_exception_event_handler - handle exceptions raised by device
  3030. * @work: pointer to work data
  3031. *
  3032. * Read bExceptionEventStatus attribute from the device and handle the
  3033. * exception event accordingly.
  3034. */
  3035. static void ufshcd_exception_event_handler(struct work_struct *work)
  3036. {
  3037. struct ufs_hba *hba;
  3038. int err;
  3039. u32 status = 0;
  3040. hba = container_of(work, struct ufs_hba, eeh_work);
  3041. pm_runtime_get_sync(hba->dev);
  3042. scsi_block_requests(hba->host);
  3043. err = ufshcd_get_ee_status(hba, &status);
  3044. if (err) {
  3045. dev_err(hba->dev, "%s: failed to get exception status %d\n",
  3046. __func__, err);
  3047. goto out;
  3048. }
  3049. status &= hba->ee_ctrl_mask;
  3050. if (status & MASK_EE_URGENT_BKOPS) {
  3051. err = ufshcd_urgent_bkops(hba);
  3052. if (err < 0)
  3053. dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
  3054. __func__, err);
  3055. }
  3056. out:
  3057. scsi_unblock_requests(hba->host);
  3058. pm_runtime_put_sync(hba->dev);
  3059. return;
  3060. }
  3061. /**
  3062. * ufshcd_err_handler - handle UFS errors that require s/w attention
  3063. * @work: pointer to work structure
  3064. */
  3065. static void ufshcd_err_handler(struct work_struct *work)
  3066. {
  3067. struct ufs_hba *hba;
  3068. unsigned long flags;
  3069. u32 err_xfer = 0;
  3070. u32 err_tm = 0;
  3071. int err = 0;
  3072. int tag;
  3073. hba = container_of(work, struct ufs_hba, eh_work);
  3074. pm_runtime_get_sync(hba->dev);
  3075. ufshcd_hold(hba, false);
  3076. spin_lock_irqsave(hba->host->host_lock, flags);
  3077. if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
  3078. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3079. goto out;
  3080. }
  3081. hba->ufshcd_state = UFSHCD_STATE_RESET;
  3082. ufshcd_set_eh_in_progress(hba);
  3083. /* Complete requests that have door-bell cleared by h/w */
  3084. ufshcd_transfer_req_compl(hba);
  3085. ufshcd_tmc_handler(hba);
  3086. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3087. /* Clear pending transfer requests */
  3088. for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
  3089. if (ufshcd_clear_cmd(hba, tag))
  3090. err_xfer |= 1 << tag;
  3091. /* Clear pending task management requests */
  3092. for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
  3093. if (ufshcd_clear_tm_cmd(hba, tag))
  3094. err_tm |= 1 << tag;
  3095. /* Complete the requests that are cleared by s/w */
  3096. spin_lock_irqsave(hba->host->host_lock, flags);
  3097. ufshcd_transfer_req_compl(hba);
  3098. ufshcd_tmc_handler(hba);
  3099. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3100. /* Fatal errors need reset */
  3101. if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
  3102. ((hba->saved_err & UIC_ERROR) &&
  3103. (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
  3104. err = ufshcd_reset_and_restore(hba);
  3105. if (err) {
  3106. dev_err(hba->dev, "%s: reset and restore failed\n",
  3107. __func__);
  3108. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3109. }
  3110. /*
  3111. * Inform scsi mid-layer that we did reset and allow to handle
  3112. * Unit Attention properly.
  3113. */
  3114. scsi_report_bus_reset(hba->host, 0);
  3115. hba->saved_err = 0;
  3116. hba->saved_uic_err = 0;
  3117. }
  3118. ufshcd_clear_eh_in_progress(hba);
  3119. out:
  3120. scsi_unblock_requests(hba->host);
  3121. ufshcd_release(hba);
  3122. pm_runtime_put_sync(hba->dev);
  3123. }
  3124. /**
  3125. * ufshcd_update_uic_error - check and set fatal UIC error flags.
  3126. * @hba: per-adapter instance
  3127. */
  3128. static void ufshcd_update_uic_error(struct ufs_hba *hba)
  3129. {
  3130. u32 reg;
  3131. /* PA_INIT_ERROR is fatal and needs UIC reset */
  3132. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
  3133. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  3134. hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
  3135. /* UIC NL/TL/DME errors needs software retry */
  3136. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
  3137. if (reg)
  3138. hba->uic_error |= UFSHCD_UIC_NL_ERROR;
  3139. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
  3140. if (reg)
  3141. hba->uic_error |= UFSHCD_UIC_TL_ERROR;
  3142. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
  3143. if (reg)
  3144. hba->uic_error |= UFSHCD_UIC_DME_ERROR;
  3145. dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
  3146. __func__, hba->uic_error);
  3147. }
  3148. /**
  3149. * ufshcd_check_errors - Check for errors that need s/w attention
  3150. * @hba: per-adapter instance
  3151. */
  3152. static void ufshcd_check_errors(struct ufs_hba *hba)
  3153. {
  3154. bool queue_eh_work = false;
  3155. if (hba->errors & INT_FATAL_ERRORS)
  3156. queue_eh_work = true;
  3157. if (hba->errors & UIC_ERROR) {
  3158. hba->uic_error = 0;
  3159. ufshcd_update_uic_error(hba);
  3160. if (hba->uic_error)
  3161. queue_eh_work = true;
  3162. }
  3163. if (queue_eh_work) {
  3164. /* handle fatal errors only when link is functional */
  3165. if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
  3166. /* block commands from scsi mid-layer */
  3167. scsi_block_requests(hba->host);
  3168. /* transfer error masks to sticky bits */
  3169. hba->saved_err |= hba->errors;
  3170. hba->saved_uic_err |= hba->uic_error;
  3171. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3172. schedule_work(&hba->eh_work);
  3173. }
  3174. }
  3175. /*
  3176. * if (!queue_eh_work) -
  3177. * Other errors are either non-fatal where host recovers
  3178. * itself without s/w intervention or errors that will be
  3179. * handled by the SCSI core layer.
  3180. */
  3181. }
  3182. /**
  3183. * ufshcd_tmc_handler - handle task management function completion
  3184. * @hba: per adapter instance
  3185. */
  3186. static void ufshcd_tmc_handler(struct ufs_hba *hba)
  3187. {
  3188. u32 tm_doorbell;
  3189. tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
  3190. hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
  3191. wake_up(&hba->tm_wq);
  3192. }
  3193. /**
  3194. * ufshcd_sl_intr - Interrupt service routine
  3195. * @hba: per adapter instance
  3196. * @intr_status: contains interrupts generated by the controller
  3197. */
  3198. static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  3199. {
  3200. hba->errors = UFSHCD_ERROR_MASK & intr_status;
  3201. if (hba->errors)
  3202. ufshcd_check_errors(hba);
  3203. if (intr_status & UFSHCD_UIC_MASK)
  3204. ufshcd_uic_cmd_compl(hba, intr_status);
  3205. if (intr_status & UTP_TASK_REQ_COMPL)
  3206. ufshcd_tmc_handler(hba);
  3207. if (intr_status & UTP_TRANSFER_REQ_COMPL)
  3208. ufshcd_transfer_req_compl(hba);
  3209. }
  3210. /**
  3211. * ufshcd_intr - Main interrupt service routine
  3212. * @irq: irq number
  3213. * @__hba: pointer to adapter instance
  3214. *
  3215. * Returns IRQ_HANDLED - If interrupt is valid
  3216. * IRQ_NONE - If invalid interrupt
  3217. */
  3218. static irqreturn_t ufshcd_intr(int irq, void *__hba)
  3219. {
  3220. u32 intr_status;
  3221. irqreturn_t retval = IRQ_NONE;
  3222. struct ufs_hba *hba = __hba;
  3223. spin_lock(hba->host->host_lock);
  3224. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  3225. if (intr_status) {
  3226. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  3227. ufshcd_sl_intr(hba, intr_status);
  3228. retval = IRQ_HANDLED;
  3229. }
  3230. spin_unlock(hba->host->host_lock);
  3231. return retval;
  3232. }
  3233. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
  3234. {
  3235. int err = 0;
  3236. u32 mask = 1 << tag;
  3237. unsigned long flags;
  3238. if (!test_bit(tag, &hba->outstanding_tasks))
  3239. goto out;
  3240. spin_lock_irqsave(hba->host->host_lock, flags);
  3241. ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
  3242. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3243. /* poll for max. 1 sec to clear door bell register by h/w */
  3244. err = ufshcd_wait_for_register(hba,
  3245. REG_UTP_TASK_REQ_DOOR_BELL,
  3246. mask, 0, 1000, 1000);
  3247. out:
  3248. return err;
  3249. }
  3250. /**
  3251. * ufshcd_issue_tm_cmd - issues task management commands to controller
  3252. * @hba: per adapter instance
  3253. * @lun_id: LUN ID to which TM command is sent
  3254. * @task_id: task ID to which the TM command is applicable
  3255. * @tm_function: task management function opcode
  3256. * @tm_response: task management service response return value
  3257. *
  3258. * Returns non-zero value on error, zero on success.
  3259. */
  3260. static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
  3261. u8 tm_function, u8 *tm_response)
  3262. {
  3263. struct utp_task_req_desc *task_req_descp;
  3264. struct utp_upiu_task_req *task_req_upiup;
  3265. struct Scsi_Host *host;
  3266. unsigned long flags;
  3267. int free_slot;
  3268. int err;
  3269. int task_tag;
  3270. host = hba->host;
  3271. /*
  3272. * Get free slot, sleep if slots are unavailable.
  3273. * Even though we use wait_event() which sleeps indefinitely,
  3274. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
  3275. */
  3276. wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
  3277. ufshcd_hold(hba, false);
  3278. spin_lock_irqsave(host->host_lock, flags);
  3279. task_req_descp = hba->utmrdl_base_addr;
  3280. task_req_descp += free_slot;
  3281. /* Configure task request descriptor */
  3282. task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
  3283. task_req_descp->header.dword_2 =
  3284. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  3285. /* Configure task request UPIU */
  3286. task_req_upiup =
  3287. (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
  3288. task_tag = hba->nutrs + free_slot;
  3289. task_req_upiup->header.dword_0 =
  3290. UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
  3291. lun_id, task_tag);
  3292. task_req_upiup->header.dword_1 =
  3293. UPIU_HEADER_DWORD(0, tm_function, 0, 0);
  3294. /*
  3295. * The host shall provide the same value for LUN field in the basic
  3296. * header and for Input Parameter.
  3297. */
  3298. task_req_upiup->input_param1 = cpu_to_be32(lun_id);
  3299. task_req_upiup->input_param2 = cpu_to_be32(task_id);
  3300. /* send command to the controller */
  3301. __set_bit(free_slot, &hba->outstanding_tasks);
  3302. ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
  3303. spin_unlock_irqrestore(host->host_lock, flags);
  3304. /* wait until the task management command is completed */
  3305. err = wait_event_timeout(hba->tm_wq,
  3306. test_bit(free_slot, &hba->tm_condition),
  3307. msecs_to_jiffies(TM_CMD_TIMEOUT));
  3308. if (!err) {
  3309. dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
  3310. __func__, tm_function);
  3311. if (ufshcd_clear_tm_cmd(hba, free_slot))
  3312. dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
  3313. __func__, free_slot);
  3314. err = -ETIMEDOUT;
  3315. } else {
  3316. err = ufshcd_task_req_compl(hba, free_slot, tm_response);
  3317. }
  3318. clear_bit(free_slot, &hba->tm_condition);
  3319. ufshcd_put_tm_slot(hba, free_slot);
  3320. wake_up(&hba->tm_tag_wq);
  3321. ufshcd_release(hba);
  3322. return err;
  3323. }
  3324. /**
  3325. * ufshcd_eh_device_reset_handler - device reset handler registered to
  3326. * scsi layer.
  3327. * @cmd: SCSI command pointer
  3328. *
  3329. * Returns SUCCESS/FAILED
  3330. */
  3331. static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
  3332. {
  3333. struct Scsi_Host *host;
  3334. struct ufs_hba *hba;
  3335. unsigned int tag;
  3336. u32 pos;
  3337. int err;
  3338. u8 resp = 0xF;
  3339. struct ufshcd_lrb *lrbp;
  3340. unsigned long flags;
  3341. host = cmd->device->host;
  3342. hba = shost_priv(host);
  3343. tag = cmd->request->tag;
  3344. lrbp = &hba->lrb[tag];
  3345. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
  3346. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3347. if (!err)
  3348. err = resp;
  3349. goto out;
  3350. }
  3351. /* clear the commands that were pending for corresponding LUN */
  3352. for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
  3353. if (hba->lrb[pos].lun == lrbp->lun) {
  3354. err = ufshcd_clear_cmd(hba, pos);
  3355. if (err)
  3356. break;
  3357. }
  3358. }
  3359. spin_lock_irqsave(host->host_lock, flags);
  3360. ufshcd_transfer_req_compl(hba);
  3361. spin_unlock_irqrestore(host->host_lock, flags);
  3362. out:
  3363. if (!err) {
  3364. err = SUCCESS;
  3365. } else {
  3366. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  3367. err = FAILED;
  3368. }
  3369. return err;
  3370. }
  3371. /**
  3372. * ufshcd_abort - abort a specific command
  3373. * @cmd: SCSI command pointer
  3374. *
  3375. * Abort the pending command in device by sending UFS_ABORT_TASK task management
  3376. * command, and in host controller by clearing the door-bell register. There can
  3377. * be race between controller sending the command to the device while abort is
  3378. * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
  3379. * really issued and then try to abort it.
  3380. *
  3381. * Returns SUCCESS/FAILED
  3382. */
  3383. static int ufshcd_abort(struct scsi_cmnd *cmd)
  3384. {
  3385. struct Scsi_Host *host;
  3386. struct ufs_hba *hba;
  3387. unsigned long flags;
  3388. unsigned int tag;
  3389. int err = 0;
  3390. int poll_cnt;
  3391. u8 resp = 0xF;
  3392. struct ufshcd_lrb *lrbp;
  3393. u32 reg;
  3394. host = cmd->device->host;
  3395. hba = shost_priv(host);
  3396. tag = cmd->request->tag;
  3397. ufshcd_hold(hba, false);
  3398. /* If command is already aborted/completed, return SUCCESS */
  3399. if (!(test_bit(tag, &hba->outstanding_reqs)))
  3400. goto out;
  3401. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  3402. if (!(reg & (1 << tag))) {
  3403. dev_err(hba->dev,
  3404. "%s: cmd was completed, but without a notifying intr, tag = %d",
  3405. __func__, tag);
  3406. }
  3407. lrbp = &hba->lrb[tag];
  3408. for (poll_cnt = 100; poll_cnt; poll_cnt--) {
  3409. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  3410. UFS_QUERY_TASK, &resp);
  3411. if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
  3412. /* cmd pending in the device */
  3413. break;
  3414. } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3415. /*
  3416. * cmd not pending in the device, check if it is
  3417. * in transition.
  3418. */
  3419. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  3420. if (reg & (1 << tag)) {
  3421. /* sleep for max. 200us to stabilize */
  3422. usleep_range(100, 200);
  3423. continue;
  3424. }
  3425. /* command completed already */
  3426. goto out;
  3427. } else {
  3428. if (!err)
  3429. err = resp; /* service response error */
  3430. goto out;
  3431. }
  3432. }
  3433. if (!poll_cnt) {
  3434. err = -EBUSY;
  3435. goto out;
  3436. }
  3437. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  3438. UFS_ABORT_TASK, &resp);
  3439. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3440. if (!err)
  3441. err = resp; /* service response error */
  3442. goto out;
  3443. }
  3444. err = ufshcd_clear_cmd(hba, tag);
  3445. if (err)
  3446. goto out;
  3447. scsi_dma_unmap(cmd);
  3448. spin_lock_irqsave(host->host_lock, flags);
  3449. __clear_bit(tag, &hba->outstanding_reqs);
  3450. hba->lrb[tag].cmd = NULL;
  3451. spin_unlock_irqrestore(host->host_lock, flags);
  3452. clear_bit_unlock(tag, &hba->lrb_in_use);
  3453. wake_up(&hba->dev_cmd.tag_wq);
  3454. out:
  3455. if (!err) {
  3456. err = SUCCESS;
  3457. } else {
  3458. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  3459. err = FAILED;
  3460. }
  3461. /*
  3462. * This ufshcd_release() corresponds to the original scsi cmd that got
  3463. * aborted here (as we won't get any IRQ for it).
  3464. */
  3465. ufshcd_release(hba);
  3466. return err;
  3467. }
  3468. /**
  3469. * ufshcd_host_reset_and_restore - reset and restore host controller
  3470. * @hba: per-adapter instance
  3471. *
  3472. * Note that host controller reset may issue DME_RESET to
  3473. * local and remote (device) Uni-Pro stack and the attributes
  3474. * are reset to default state.
  3475. *
  3476. * Returns zero on success, non-zero on failure
  3477. */
  3478. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
  3479. {
  3480. int err;
  3481. unsigned long flags;
  3482. /* Reset the host controller */
  3483. spin_lock_irqsave(hba->host->host_lock, flags);
  3484. ufshcd_hba_stop(hba);
  3485. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3486. err = ufshcd_hba_enable(hba);
  3487. if (err)
  3488. goto out;
  3489. /* Establish the link again and restore the device */
  3490. err = ufshcd_probe_hba(hba);
  3491. if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
  3492. err = -EIO;
  3493. out:
  3494. if (err)
  3495. dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
  3496. return err;
  3497. }
  3498. /**
  3499. * ufshcd_reset_and_restore - reset and re-initialize host/device
  3500. * @hba: per-adapter instance
  3501. *
  3502. * Reset and recover device, host and re-establish link. This
  3503. * is helpful to recover the communication in fatal error conditions.
  3504. *
  3505. * Returns zero on success, non-zero on failure
  3506. */
  3507. static int ufshcd_reset_and_restore(struct ufs_hba *hba)
  3508. {
  3509. int err = 0;
  3510. unsigned long flags;
  3511. int retries = MAX_HOST_RESET_RETRIES;
  3512. do {
  3513. err = ufshcd_host_reset_and_restore(hba);
  3514. } while (err && --retries);
  3515. /*
  3516. * After reset the door-bell might be cleared, complete
  3517. * outstanding requests in s/w here.
  3518. */
  3519. spin_lock_irqsave(hba->host->host_lock, flags);
  3520. ufshcd_transfer_req_compl(hba);
  3521. ufshcd_tmc_handler(hba);
  3522. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3523. return err;
  3524. }
  3525. /**
  3526. * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
  3527. * @cmd - SCSI command pointer
  3528. *
  3529. * Returns SUCCESS/FAILED
  3530. */
  3531. static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  3532. {
  3533. int err;
  3534. unsigned long flags;
  3535. struct ufs_hba *hba;
  3536. hba = shost_priv(cmd->device->host);
  3537. ufshcd_hold(hba, false);
  3538. /*
  3539. * Check if there is any race with fatal error handling.
  3540. * If so, wait for it to complete. Even though fatal error
  3541. * handling does reset and restore in some cases, don't assume
  3542. * anything out of it. We are just avoiding race here.
  3543. */
  3544. do {
  3545. spin_lock_irqsave(hba->host->host_lock, flags);
  3546. if (!(work_pending(&hba->eh_work) ||
  3547. hba->ufshcd_state == UFSHCD_STATE_RESET))
  3548. break;
  3549. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3550. dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
  3551. flush_work(&hba->eh_work);
  3552. } while (1);
  3553. hba->ufshcd_state = UFSHCD_STATE_RESET;
  3554. ufshcd_set_eh_in_progress(hba);
  3555. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3556. err = ufshcd_reset_and_restore(hba);
  3557. spin_lock_irqsave(hba->host->host_lock, flags);
  3558. if (!err) {
  3559. err = SUCCESS;
  3560. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  3561. } else {
  3562. err = FAILED;
  3563. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3564. }
  3565. ufshcd_clear_eh_in_progress(hba);
  3566. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3567. ufshcd_release(hba);
  3568. return err;
  3569. }
  3570. /**
  3571. * ufshcd_get_max_icc_level - calculate the ICC level
  3572. * @sup_curr_uA: max. current supported by the regulator
  3573. * @start_scan: row at the desc table to start scan from
  3574. * @buff: power descriptor buffer
  3575. *
  3576. * Returns calculated max ICC level for specific regulator
  3577. */
  3578. static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
  3579. {
  3580. int i;
  3581. int curr_uA;
  3582. u16 data;
  3583. u16 unit;
  3584. for (i = start_scan; i >= 0; i--) {
  3585. data = be16_to_cpu(*((u16 *)(buff + 2*i)));
  3586. unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
  3587. ATTR_ICC_LVL_UNIT_OFFSET;
  3588. curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
  3589. switch (unit) {
  3590. case UFSHCD_NANO_AMP:
  3591. curr_uA = curr_uA / 1000;
  3592. break;
  3593. case UFSHCD_MILI_AMP:
  3594. curr_uA = curr_uA * 1000;
  3595. break;
  3596. case UFSHCD_AMP:
  3597. curr_uA = curr_uA * 1000 * 1000;
  3598. break;
  3599. case UFSHCD_MICRO_AMP:
  3600. default:
  3601. break;
  3602. }
  3603. if (sup_curr_uA >= curr_uA)
  3604. break;
  3605. }
  3606. if (i < 0) {
  3607. i = 0;
  3608. pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
  3609. }
  3610. return (u32)i;
  3611. }
  3612. /**
  3613. * ufshcd_calc_icc_level - calculate the max ICC level
  3614. * In case regulators are not initialized we'll return 0
  3615. * @hba: per-adapter instance
  3616. * @desc_buf: power descriptor buffer to extract ICC levels from.
  3617. * @len: length of desc_buff
  3618. *
  3619. * Returns calculated ICC level
  3620. */
  3621. static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
  3622. u8 *desc_buf, int len)
  3623. {
  3624. u32 icc_level = 0;
  3625. if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
  3626. !hba->vreg_info.vccq2) {
  3627. dev_err(hba->dev,
  3628. "%s: Regulator capability was not set, actvIccLevel=%d",
  3629. __func__, icc_level);
  3630. goto out;
  3631. }
  3632. if (hba->vreg_info.vcc)
  3633. icc_level = ufshcd_get_max_icc_level(
  3634. hba->vreg_info.vcc->max_uA,
  3635. POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
  3636. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
  3637. if (hba->vreg_info.vccq)
  3638. icc_level = ufshcd_get_max_icc_level(
  3639. hba->vreg_info.vccq->max_uA,
  3640. icc_level,
  3641. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
  3642. if (hba->vreg_info.vccq2)
  3643. icc_level = ufshcd_get_max_icc_level(
  3644. hba->vreg_info.vccq2->max_uA,
  3645. icc_level,
  3646. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
  3647. out:
  3648. return icc_level;
  3649. }
  3650. static void ufshcd_init_icc_levels(struct ufs_hba *hba)
  3651. {
  3652. int ret;
  3653. int buff_len = QUERY_DESC_POWER_MAX_SIZE;
  3654. u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
  3655. ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
  3656. if (ret) {
  3657. dev_err(hba->dev,
  3658. "%s: Failed reading power descriptor.len = %d ret = %d",
  3659. __func__, buff_len, ret);
  3660. return;
  3661. }
  3662. hba->init_prefetch_data.icc_level =
  3663. ufshcd_find_max_sup_active_icc_level(hba,
  3664. desc_buf, buff_len);
  3665. dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
  3666. __func__, hba->init_prefetch_data.icc_level);
  3667. ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  3668. QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
  3669. &hba->init_prefetch_data.icc_level);
  3670. if (ret)
  3671. dev_err(hba->dev,
  3672. "%s: Failed configuring bActiveICCLevel = %d ret = %d",
  3673. __func__, hba->init_prefetch_data.icc_level , ret);
  3674. }
  3675. /**
  3676. * ufshcd_scsi_add_wlus - Adds required W-LUs
  3677. * @hba: per-adapter instance
  3678. *
  3679. * UFS device specification requires the UFS devices to support 4 well known
  3680. * logical units:
  3681. * "REPORT_LUNS" (address: 01h)
  3682. * "UFS Device" (address: 50h)
  3683. * "RPMB" (address: 44h)
  3684. * "BOOT" (address: 30h)
  3685. * UFS device's power management needs to be controlled by "POWER CONDITION"
  3686. * field of SSU (START STOP UNIT) command. But this "power condition" field
  3687. * will take effect only when its sent to "UFS device" well known logical unit
  3688. * hence we require the scsi_device instance to represent this logical unit in
  3689. * order for the UFS host driver to send the SSU command for power management.
  3690. * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
  3691. * Block) LU so user space process can control this LU. User space may also
  3692. * want to have access to BOOT LU.
  3693. * This function adds scsi device instances for each of all well known LUs
  3694. * (except "REPORT LUNS" LU).
  3695. *
  3696. * Returns zero on success (all required W-LUs are added successfully),
  3697. * non-zero error value on failure (if failed to add any of the required W-LU).
  3698. */
  3699. static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
  3700. {
  3701. int ret = 0;
  3702. struct scsi_device *sdev_rpmb;
  3703. struct scsi_device *sdev_boot;
  3704. hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
  3705. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
  3706. if (IS_ERR(hba->sdev_ufs_device)) {
  3707. ret = PTR_ERR(hba->sdev_ufs_device);
  3708. hba->sdev_ufs_device = NULL;
  3709. goto out;
  3710. }
  3711. scsi_device_put(hba->sdev_ufs_device);
  3712. sdev_boot = __scsi_add_device(hba->host, 0, 0,
  3713. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
  3714. if (IS_ERR(sdev_boot)) {
  3715. ret = PTR_ERR(sdev_boot);
  3716. goto remove_sdev_ufs_device;
  3717. }
  3718. scsi_device_put(sdev_boot);
  3719. sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
  3720. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
  3721. if (IS_ERR(sdev_rpmb)) {
  3722. ret = PTR_ERR(sdev_rpmb);
  3723. goto remove_sdev_boot;
  3724. }
  3725. scsi_device_put(sdev_rpmb);
  3726. goto out;
  3727. remove_sdev_boot:
  3728. scsi_remove_device(sdev_boot);
  3729. remove_sdev_ufs_device:
  3730. scsi_remove_device(hba->sdev_ufs_device);
  3731. out:
  3732. return ret;
  3733. }
  3734. /**
  3735. * ufshcd_probe_hba - probe hba to detect device and initialize
  3736. * @hba: per-adapter instance
  3737. *
  3738. * Execute link-startup and verify device initialization
  3739. */
  3740. static int ufshcd_probe_hba(struct ufs_hba *hba)
  3741. {
  3742. int ret;
  3743. ret = ufshcd_link_startup(hba);
  3744. if (ret)
  3745. goto out;
  3746. ufshcd_init_pwr_info(hba);
  3747. /* UniPro link is active now */
  3748. ufshcd_set_link_active(hba);
  3749. ret = ufshcd_verify_dev_init(hba);
  3750. if (ret)
  3751. goto out;
  3752. ret = ufshcd_complete_dev_init(hba);
  3753. if (ret)
  3754. goto out;
  3755. /* UFS device is also active now */
  3756. ufshcd_set_ufs_dev_active(hba);
  3757. ufshcd_force_reset_auto_bkops(hba);
  3758. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  3759. hba->wlun_dev_clr_ua = true;
  3760. if (ufshcd_get_max_pwr_mode(hba)) {
  3761. dev_err(hba->dev,
  3762. "%s: Failed getting max supported power mode\n",
  3763. __func__);
  3764. } else {
  3765. ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
  3766. if (ret)
  3767. dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
  3768. __func__, ret);
  3769. }
  3770. /*
  3771. * If we are in error handling context or in power management callbacks
  3772. * context, no need to scan the host
  3773. */
  3774. if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  3775. bool flag;
  3776. /* clear any previous UFS device information */
  3777. memset(&hba->dev_info, 0, sizeof(hba->dev_info));
  3778. if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
  3779. QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
  3780. hba->dev_info.f_power_on_wp_en = flag;
  3781. if (!hba->is_init_prefetch)
  3782. ufshcd_init_icc_levels(hba);
  3783. /* Add required well known logical units to scsi mid layer */
  3784. if (ufshcd_scsi_add_wlus(hba))
  3785. goto out;
  3786. scsi_scan_host(hba->host);
  3787. pm_runtime_put_sync(hba->dev);
  3788. }
  3789. if (!hba->is_init_prefetch)
  3790. hba->is_init_prefetch = true;
  3791. /* Resume devfreq after UFS device is detected */
  3792. if (ufshcd_is_clkscaling_enabled(hba))
  3793. devfreq_resume_device(hba->devfreq);
  3794. out:
  3795. /*
  3796. * If we failed to initialize the device or the device is not
  3797. * present, turn off the power/clocks etc.
  3798. */
  3799. if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  3800. pm_runtime_put_sync(hba->dev);
  3801. ufshcd_hba_exit(hba);
  3802. }
  3803. return ret;
  3804. }
  3805. /**
  3806. * ufshcd_async_scan - asynchronous execution for probing hba
  3807. * @data: data pointer to pass to this function
  3808. * @cookie: cookie data
  3809. */
  3810. static void ufshcd_async_scan(void *data, async_cookie_t cookie)
  3811. {
  3812. struct ufs_hba *hba = (struct ufs_hba *)data;
  3813. ufshcd_probe_hba(hba);
  3814. }
  3815. static struct scsi_host_template ufshcd_driver_template = {
  3816. .module = THIS_MODULE,
  3817. .name = UFSHCD,
  3818. .proc_name = UFSHCD,
  3819. .queuecommand = ufshcd_queuecommand,
  3820. .slave_alloc = ufshcd_slave_alloc,
  3821. .slave_configure = ufshcd_slave_configure,
  3822. .slave_destroy = ufshcd_slave_destroy,
  3823. .change_queue_depth = ufshcd_change_queue_depth,
  3824. .eh_abort_handler = ufshcd_abort,
  3825. .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
  3826. .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
  3827. .this_id = -1,
  3828. .sg_tablesize = SG_ALL,
  3829. .cmd_per_lun = UFSHCD_CMD_PER_LUN,
  3830. .can_queue = UFSHCD_CAN_QUEUE,
  3831. .max_host_blocked = 1,
  3832. .track_queue_depth = 1,
  3833. };
  3834. static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
  3835. int ua)
  3836. {
  3837. int ret;
  3838. if (!vreg)
  3839. return 0;
  3840. ret = regulator_set_load(vreg->reg, ua);
  3841. if (ret < 0) {
  3842. dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
  3843. __func__, vreg->name, ua, ret);
  3844. }
  3845. return ret;
  3846. }
  3847. static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
  3848. struct ufs_vreg *vreg)
  3849. {
  3850. return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
  3851. }
  3852. static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
  3853. struct ufs_vreg *vreg)
  3854. {
  3855. return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  3856. }
  3857. static int ufshcd_config_vreg(struct device *dev,
  3858. struct ufs_vreg *vreg, bool on)
  3859. {
  3860. int ret = 0;
  3861. struct regulator *reg;
  3862. const char *name;
  3863. int min_uV, uA_load;
  3864. BUG_ON(!vreg);
  3865. reg = vreg->reg;
  3866. name = vreg->name;
  3867. if (regulator_count_voltages(reg) > 0) {
  3868. min_uV = on ? vreg->min_uV : 0;
  3869. ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
  3870. if (ret) {
  3871. dev_err(dev, "%s: %s set voltage failed, err=%d\n",
  3872. __func__, name, ret);
  3873. goto out;
  3874. }
  3875. uA_load = on ? vreg->max_uA : 0;
  3876. ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
  3877. if (ret)
  3878. goto out;
  3879. }
  3880. out:
  3881. return ret;
  3882. }
  3883. static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
  3884. {
  3885. int ret = 0;
  3886. if (!vreg || vreg->enabled)
  3887. goto out;
  3888. ret = ufshcd_config_vreg(dev, vreg, true);
  3889. if (!ret)
  3890. ret = regulator_enable(vreg->reg);
  3891. if (!ret)
  3892. vreg->enabled = true;
  3893. else
  3894. dev_err(dev, "%s: %s enable failed, err=%d\n",
  3895. __func__, vreg->name, ret);
  3896. out:
  3897. return ret;
  3898. }
  3899. static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
  3900. {
  3901. int ret = 0;
  3902. if (!vreg || !vreg->enabled)
  3903. goto out;
  3904. ret = regulator_disable(vreg->reg);
  3905. if (!ret) {
  3906. /* ignore errors on applying disable config */
  3907. ufshcd_config_vreg(dev, vreg, false);
  3908. vreg->enabled = false;
  3909. } else {
  3910. dev_err(dev, "%s: %s disable failed, err=%d\n",
  3911. __func__, vreg->name, ret);
  3912. }
  3913. out:
  3914. return ret;
  3915. }
  3916. static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
  3917. {
  3918. int ret = 0;
  3919. struct device *dev = hba->dev;
  3920. struct ufs_vreg_info *info = &hba->vreg_info;
  3921. if (!info)
  3922. goto out;
  3923. ret = ufshcd_toggle_vreg(dev, info->vcc, on);
  3924. if (ret)
  3925. goto out;
  3926. ret = ufshcd_toggle_vreg(dev, info->vccq, on);
  3927. if (ret)
  3928. goto out;
  3929. ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
  3930. if (ret)
  3931. goto out;
  3932. out:
  3933. if (ret) {
  3934. ufshcd_toggle_vreg(dev, info->vccq2, false);
  3935. ufshcd_toggle_vreg(dev, info->vccq, false);
  3936. ufshcd_toggle_vreg(dev, info->vcc, false);
  3937. }
  3938. return ret;
  3939. }
  3940. static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  3941. {
  3942. struct ufs_vreg_info *info = &hba->vreg_info;
  3943. if (info)
  3944. return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  3945. return 0;
  3946. }
  3947. static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
  3948. {
  3949. int ret = 0;
  3950. if (!vreg)
  3951. goto out;
  3952. vreg->reg = devm_regulator_get(dev, vreg->name);
  3953. if (IS_ERR(vreg->reg)) {
  3954. ret = PTR_ERR(vreg->reg);
  3955. dev_err(dev, "%s: %s get failed, err=%d\n",
  3956. __func__, vreg->name, ret);
  3957. }
  3958. out:
  3959. return ret;
  3960. }
  3961. static int ufshcd_init_vreg(struct ufs_hba *hba)
  3962. {
  3963. int ret = 0;
  3964. struct device *dev = hba->dev;
  3965. struct ufs_vreg_info *info = &hba->vreg_info;
  3966. if (!info)
  3967. goto out;
  3968. ret = ufshcd_get_vreg(dev, info->vcc);
  3969. if (ret)
  3970. goto out;
  3971. ret = ufshcd_get_vreg(dev, info->vccq);
  3972. if (ret)
  3973. goto out;
  3974. ret = ufshcd_get_vreg(dev, info->vccq2);
  3975. out:
  3976. return ret;
  3977. }
  3978. static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
  3979. {
  3980. struct ufs_vreg_info *info = &hba->vreg_info;
  3981. if (info)
  3982. return ufshcd_get_vreg(hba->dev, info->vdd_hba);
  3983. return 0;
  3984. }
  3985. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  3986. bool skip_ref_clk)
  3987. {
  3988. int ret = 0;
  3989. struct ufs_clk_info *clki;
  3990. struct list_head *head = &hba->clk_list_head;
  3991. unsigned long flags;
  3992. if (!head || list_empty(head))
  3993. goto out;
  3994. list_for_each_entry(clki, head, list) {
  3995. if (!IS_ERR_OR_NULL(clki->clk)) {
  3996. if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
  3997. continue;
  3998. if (on && !clki->enabled) {
  3999. ret = clk_prepare_enable(clki->clk);
  4000. if (ret) {
  4001. dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
  4002. __func__, clki->name, ret);
  4003. goto out;
  4004. }
  4005. } else if (!on && clki->enabled) {
  4006. clk_disable_unprepare(clki->clk);
  4007. }
  4008. clki->enabled = on;
  4009. dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
  4010. clki->name, on ? "en" : "dis");
  4011. }
  4012. }
  4013. ret = ufshcd_vops_setup_clocks(hba, on);
  4014. out:
  4015. if (ret) {
  4016. list_for_each_entry(clki, head, list) {
  4017. if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
  4018. clk_disable_unprepare(clki->clk);
  4019. }
  4020. } else if (on) {
  4021. spin_lock_irqsave(hba->host->host_lock, flags);
  4022. hba->clk_gating.state = CLKS_ON;
  4023. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4024. }
  4025. return ret;
  4026. }
  4027. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
  4028. {
  4029. return __ufshcd_setup_clocks(hba, on, false);
  4030. }
  4031. static int ufshcd_init_clocks(struct ufs_hba *hba)
  4032. {
  4033. int ret = 0;
  4034. struct ufs_clk_info *clki;
  4035. struct device *dev = hba->dev;
  4036. struct list_head *head = &hba->clk_list_head;
  4037. if (!head || list_empty(head))
  4038. goto out;
  4039. list_for_each_entry(clki, head, list) {
  4040. if (!clki->name)
  4041. continue;
  4042. clki->clk = devm_clk_get(dev, clki->name);
  4043. if (IS_ERR(clki->clk)) {
  4044. ret = PTR_ERR(clki->clk);
  4045. dev_err(dev, "%s: %s clk get failed, %d\n",
  4046. __func__, clki->name, ret);
  4047. goto out;
  4048. }
  4049. if (clki->max_freq) {
  4050. ret = clk_set_rate(clki->clk, clki->max_freq);
  4051. if (ret) {
  4052. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4053. __func__, clki->name,
  4054. clki->max_freq, ret);
  4055. goto out;
  4056. }
  4057. clki->curr_freq = clki->max_freq;
  4058. }
  4059. dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
  4060. clki->name, clk_get_rate(clki->clk));
  4061. }
  4062. out:
  4063. return ret;
  4064. }
  4065. static int ufshcd_variant_hba_init(struct ufs_hba *hba)
  4066. {
  4067. int err = 0;
  4068. if (!hba->vops)
  4069. goto out;
  4070. err = ufshcd_vops_init(hba);
  4071. if (err)
  4072. goto out;
  4073. err = ufshcd_vops_setup_regulators(hba, true);
  4074. if (err)
  4075. goto out_exit;
  4076. goto out;
  4077. out_exit:
  4078. ufshcd_vops_exit(hba);
  4079. out:
  4080. if (err)
  4081. dev_err(hba->dev, "%s: variant %s init failed err %d\n",
  4082. __func__, ufshcd_get_var_name(hba), err);
  4083. return err;
  4084. }
  4085. static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  4086. {
  4087. if (!hba->vops)
  4088. return;
  4089. ufshcd_vops_setup_clocks(hba, false);
  4090. ufshcd_vops_setup_regulators(hba, false);
  4091. ufshcd_vops_exit(hba);
  4092. }
  4093. static int ufshcd_hba_init(struct ufs_hba *hba)
  4094. {
  4095. int err;
  4096. /*
  4097. * Handle host controller power separately from the UFS device power
  4098. * rails as it will help controlling the UFS host controller power
  4099. * collapse easily which is different than UFS device power collapse.
  4100. * Also, enable the host controller power before we go ahead with rest
  4101. * of the initialization here.
  4102. */
  4103. err = ufshcd_init_hba_vreg(hba);
  4104. if (err)
  4105. goto out;
  4106. err = ufshcd_setup_hba_vreg(hba, true);
  4107. if (err)
  4108. goto out;
  4109. err = ufshcd_init_clocks(hba);
  4110. if (err)
  4111. goto out_disable_hba_vreg;
  4112. err = ufshcd_setup_clocks(hba, true);
  4113. if (err)
  4114. goto out_disable_hba_vreg;
  4115. err = ufshcd_init_vreg(hba);
  4116. if (err)
  4117. goto out_disable_clks;
  4118. err = ufshcd_setup_vreg(hba, true);
  4119. if (err)
  4120. goto out_disable_clks;
  4121. err = ufshcd_variant_hba_init(hba);
  4122. if (err)
  4123. goto out_disable_vreg;
  4124. hba->is_powered = true;
  4125. goto out;
  4126. out_disable_vreg:
  4127. ufshcd_setup_vreg(hba, false);
  4128. out_disable_clks:
  4129. ufshcd_setup_clocks(hba, false);
  4130. out_disable_hba_vreg:
  4131. ufshcd_setup_hba_vreg(hba, false);
  4132. out:
  4133. return err;
  4134. }
  4135. static void ufshcd_hba_exit(struct ufs_hba *hba)
  4136. {
  4137. if (hba->is_powered) {
  4138. ufshcd_variant_hba_exit(hba);
  4139. ufshcd_setup_vreg(hba, false);
  4140. ufshcd_setup_clocks(hba, false);
  4141. ufshcd_setup_hba_vreg(hba, false);
  4142. hba->is_powered = false;
  4143. }
  4144. }
  4145. static int
  4146. ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
  4147. {
  4148. unsigned char cmd[6] = {REQUEST_SENSE,
  4149. 0,
  4150. 0,
  4151. 0,
  4152. SCSI_SENSE_BUFFERSIZE,
  4153. 0};
  4154. char *buffer;
  4155. int ret;
  4156. buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  4157. if (!buffer) {
  4158. ret = -ENOMEM;
  4159. goto out;
  4160. }
  4161. ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
  4162. SCSI_SENSE_BUFFERSIZE, NULL,
  4163. msecs_to_jiffies(1000), 3, NULL, REQ_PM);
  4164. if (ret)
  4165. pr_err("%s: failed with err %d\n", __func__, ret);
  4166. kfree(buffer);
  4167. out:
  4168. return ret;
  4169. }
  4170. /**
  4171. * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
  4172. * power mode
  4173. * @hba: per adapter instance
  4174. * @pwr_mode: device power mode to set
  4175. *
  4176. * Returns 0 if requested power mode is set successfully
  4177. * Returns non-zero if failed to set the requested power mode
  4178. */
  4179. static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
  4180. enum ufs_dev_pwr_mode pwr_mode)
  4181. {
  4182. unsigned char cmd[6] = { START_STOP };
  4183. struct scsi_sense_hdr sshdr;
  4184. struct scsi_device *sdp;
  4185. unsigned long flags;
  4186. int ret;
  4187. spin_lock_irqsave(hba->host->host_lock, flags);
  4188. sdp = hba->sdev_ufs_device;
  4189. if (sdp) {
  4190. ret = scsi_device_get(sdp);
  4191. if (!ret && !scsi_device_online(sdp)) {
  4192. ret = -ENODEV;
  4193. scsi_device_put(sdp);
  4194. }
  4195. } else {
  4196. ret = -ENODEV;
  4197. }
  4198. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4199. if (ret)
  4200. return ret;
  4201. /*
  4202. * If scsi commands fail, the scsi mid-layer schedules scsi error-
  4203. * handling, which would wait for host to be resumed. Since we know
  4204. * we are functional while we are here, skip host resume in error
  4205. * handling context.
  4206. */
  4207. hba->host->eh_noresume = 1;
  4208. if (hba->wlun_dev_clr_ua) {
  4209. ret = ufshcd_send_request_sense(hba, sdp);
  4210. if (ret)
  4211. goto out;
  4212. /* Unit attention condition is cleared now */
  4213. hba->wlun_dev_clr_ua = false;
  4214. }
  4215. cmd[4] = pwr_mode << 4;
  4216. /*
  4217. * Current function would be generally called from the power management
  4218. * callbacks hence set the REQ_PM flag so that it doesn't resume the
  4219. * already suspended childs.
  4220. */
  4221. ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
  4222. START_STOP_TIMEOUT, 0, NULL, REQ_PM);
  4223. if (ret) {
  4224. sdev_printk(KERN_WARNING, sdp,
  4225. "START_STOP failed for power mode: %d, result %x\n",
  4226. pwr_mode, ret);
  4227. if (driver_byte(ret) & DRIVER_SENSE)
  4228. scsi_print_sense_hdr(sdp, NULL, &sshdr);
  4229. }
  4230. if (!ret)
  4231. hba->curr_dev_pwr_mode = pwr_mode;
  4232. out:
  4233. scsi_device_put(sdp);
  4234. hba->host->eh_noresume = 0;
  4235. return ret;
  4236. }
  4237. static int ufshcd_link_state_transition(struct ufs_hba *hba,
  4238. enum uic_link_state req_link_state,
  4239. int check_for_bkops)
  4240. {
  4241. int ret = 0;
  4242. if (req_link_state == hba->uic_link_state)
  4243. return 0;
  4244. if (req_link_state == UIC_LINK_HIBERN8_STATE) {
  4245. ret = ufshcd_uic_hibern8_enter(hba);
  4246. if (!ret)
  4247. ufshcd_set_link_hibern8(hba);
  4248. else
  4249. goto out;
  4250. }
  4251. /*
  4252. * If autobkops is enabled, link can't be turned off because
  4253. * turning off the link would also turn off the device.
  4254. */
  4255. else if ((req_link_state == UIC_LINK_OFF_STATE) &&
  4256. (!check_for_bkops || (check_for_bkops &&
  4257. !hba->auto_bkops_enabled))) {
  4258. /*
  4259. * Change controller state to "reset state" which
  4260. * should also put the link in off/reset state
  4261. */
  4262. ufshcd_hba_stop(hba);
  4263. /*
  4264. * TODO: Check if we need any delay to make sure that
  4265. * controller is reset
  4266. */
  4267. ufshcd_set_link_off(hba);
  4268. }
  4269. out:
  4270. return ret;
  4271. }
  4272. static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  4273. {
  4274. /*
  4275. * If UFS device is either in UFS_Sleep turn off VCC rail to save some
  4276. * power.
  4277. *
  4278. * If UFS device and link is in OFF state, all power supplies (VCC,
  4279. * VCCQ, VCCQ2) can be turned off if power on write protect is not
  4280. * required. If UFS link is inactive (Hibern8 or OFF state) and device
  4281. * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
  4282. *
  4283. * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
  4284. * in low power state which would save some power.
  4285. */
  4286. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  4287. !hba->dev_info.is_lu_power_on_wp) {
  4288. ufshcd_setup_vreg(hba, false);
  4289. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  4290. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  4291. if (!ufshcd_is_link_active(hba)) {
  4292. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  4293. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
  4294. }
  4295. }
  4296. }
  4297. static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
  4298. {
  4299. int ret = 0;
  4300. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  4301. !hba->dev_info.is_lu_power_on_wp) {
  4302. ret = ufshcd_setup_vreg(hba, true);
  4303. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  4304. ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
  4305. if (!ret && !ufshcd_is_link_active(hba)) {
  4306. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
  4307. if (ret)
  4308. goto vcc_disable;
  4309. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
  4310. if (ret)
  4311. goto vccq_lpm;
  4312. }
  4313. }
  4314. goto out;
  4315. vccq_lpm:
  4316. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  4317. vcc_disable:
  4318. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  4319. out:
  4320. return ret;
  4321. }
  4322. static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  4323. {
  4324. if (ufshcd_is_link_off(hba))
  4325. ufshcd_setup_hba_vreg(hba, false);
  4326. }
  4327. static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  4328. {
  4329. if (ufshcd_is_link_off(hba))
  4330. ufshcd_setup_hba_vreg(hba, true);
  4331. }
  4332. /**
  4333. * ufshcd_suspend - helper function for suspend operations
  4334. * @hba: per adapter instance
  4335. * @pm_op: desired low power operation type
  4336. *
  4337. * This function will try to put the UFS device and link into low power
  4338. * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
  4339. * (System PM level).
  4340. *
  4341. * If this function is called during shutdown, it will make sure that
  4342. * both UFS device and UFS link is powered off.
  4343. *
  4344. * NOTE: UFS device & link must be active before we enter in this function.
  4345. *
  4346. * Returns 0 for success and non-zero for failure
  4347. */
  4348. static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  4349. {
  4350. int ret = 0;
  4351. enum ufs_pm_level pm_lvl;
  4352. enum ufs_dev_pwr_mode req_dev_pwr_mode;
  4353. enum uic_link_state req_link_state;
  4354. hba->pm_op_in_progress = 1;
  4355. if (!ufshcd_is_shutdown_pm(pm_op)) {
  4356. pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
  4357. hba->rpm_lvl : hba->spm_lvl;
  4358. req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
  4359. req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
  4360. } else {
  4361. req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
  4362. req_link_state = UIC_LINK_OFF_STATE;
  4363. }
  4364. /*
  4365. * If we can't transition into any of the low power modes
  4366. * just gate the clocks.
  4367. */
  4368. ufshcd_hold(hba, false);
  4369. hba->clk_gating.is_suspended = true;
  4370. if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
  4371. req_link_state == UIC_LINK_ACTIVE_STATE) {
  4372. goto disable_clks;
  4373. }
  4374. if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
  4375. (req_link_state == hba->uic_link_state))
  4376. goto out;
  4377. /* UFS device & link must be active before we enter in this function */
  4378. if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
  4379. ret = -EINVAL;
  4380. goto out;
  4381. }
  4382. if (ufshcd_is_runtime_pm(pm_op)) {
  4383. if (ufshcd_can_autobkops_during_suspend(hba)) {
  4384. /*
  4385. * The device is idle with no requests in the queue,
  4386. * allow background operations if bkops status shows
  4387. * that performance might be impacted.
  4388. */
  4389. ret = ufshcd_urgent_bkops(hba);
  4390. if (ret)
  4391. goto enable_gating;
  4392. } else {
  4393. /* make sure that auto bkops is disabled */
  4394. ufshcd_disable_auto_bkops(hba);
  4395. }
  4396. }
  4397. if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
  4398. ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
  4399. !ufshcd_is_runtime_pm(pm_op))) {
  4400. /* ensure that bkops is disabled */
  4401. ufshcd_disable_auto_bkops(hba);
  4402. ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
  4403. if (ret)
  4404. goto enable_gating;
  4405. }
  4406. ret = ufshcd_link_state_transition(hba, req_link_state, 1);
  4407. if (ret)
  4408. goto set_dev_active;
  4409. ufshcd_vreg_set_lpm(hba);
  4410. disable_clks:
  4411. /*
  4412. * The clock scaling needs access to controller registers. Hence, Wait
  4413. * for pending clock scaling work to be done before clocks are
  4414. * turned off.
  4415. */
  4416. if (ufshcd_is_clkscaling_enabled(hba)) {
  4417. devfreq_suspend_device(hba->devfreq);
  4418. hba->clk_scaling.window_start_t = 0;
  4419. }
  4420. /*
  4421. * Call vendor specific suspend callback. As these callbacks may access
  4422. * vendor specific host controller register space call them before the
  4423. * host clocks are ON.
  4424. */
  4425. ret = ufshcd_vops_suspend(hba, pm_op);
  4426. if (ret)
  4427. goto set_link_active;
  4428. ret = ufshcd_vops_setup_clocks(hba, false);
  4429. if (ret)
  4430. goto vops_resume;
  4431. if (!ufshcd_is_link_active(hba))
  4432. ufshcd_setup_clocks(hba, false);
  4433. else
  4434. /* If link is active, device ref_clk can't be switched off */
  4435. __ufshcd_setup_clocks(hba, false, true);
  4436. hba->clk_gating.state = CLKS_OFF;
  4437. /*
  4438. * Disable the host irq as host controller as there won't be any
  4439. * host controller transaction expected till resume.
  4440. */
  4441. ufshcd_disable_irq(hba);
  4442. /* Put the host controller in low power mode if possible */
  4443. ufshcd_hba_vreg_set_lpm(hba);
  4444. goto out;
  4445. vops_resume:
  4446. ufshcd_vops_resume(hba, pm_op);
  4447. set_link_active:
  4448. ufshcd_vreg_set_hpm(hba);
  4449. if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
  4450. ufshcd_set_link_active(hba);
  4451. else if (ufshcd_is_link_off(hba))
  4452. ufshcd_host_reset_and_restore(hba);
  4453. set_dev_active:
  4454. if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
  4455. ufshcd_disable_auto_bkops(hba);
  4456. enable_gating:
  4457. hba->clk_gating.is_suspended = false;
  4458. ufshcd_release(hba);
  4459. out:
  4460. hba->pm_op_in_progress = 0;
  4461. return ret;
  4462. }
  4463. /**
  4464. * ufshcd_resume - helper function for resume operations
  4465. * @hba: per adapter instance
  4466. * @pm_op: runtime PM or system PM
  4467. *
  4468. * This function basically brings the UFS device, UniPro link and controller
  4469. * to active state.
  4470. *
  4471. * Returns 0 for success and non-zero for failure
  4472. */
  4473. static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  4474. {
  4475. int ret;
  4476. enum uic_link_state old_link_state;
  4477. hba->pm_op_in_progress = 1;
  4478. old_link_state = hba->uic_link_state;
  4479. ufshcd_hba_vreg_set_hpm(hba);
  4480. /* Make sure clocks are enabled before accessing controller */
  4481. ret = ufshcd_setup_clocks(hba, true);
  4482. if (ret)
  4483. goto out;
  4484. /* enable the host irq as host controller would be active soon */
  4485. ret = ufshcd_enable_irq(hba);
  4486. if (ret)
  4487. goto disable_irq_and_vops_clks;
  4488. ret = ufshcd_vreg_set_hpm(hba);
  4489. if (ret)
  4490. goto disable_irq_and_vops_clks;
  4491. /*
  4492. * Call vendor specific resume callback. As these callbacks may access
  4493. * vendor specific host controller register space call them when the
  4494. * host clocks are ON.
  4495. */
  4496. ret = ufshcd_vops_resume(hba, pm_op);
  4497. if (ret)
  4498. goto disable_vreg;
  4499. if (ufshcd_is_link_hibern8(hba)) {
  4500. ret = ufshcd_uic_hibern8_exit(hba);
  4501. if (!ret)
  4502. ufshcd_set_link_active(hba);
  4503. else
  4504. goto vendor_suspend;
  4505. } else if (ufshcd_is_link_off(hba)) {
  4506. ret = ufshcd_host_reset_and_restore(hba);
  4507. /*
  4508. * ufshcd_host_reset_and_restore() should have already
  4509. * set the link state as active
  4510. */
  4511. if (ret || !ufshcd_is_link_active(hba))
  4512. goto vendor_suspend;
  4513. }
  4514. if (!ufshcd_is_ufs_dev_active(hba)) {
  4515. ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
  4516. if (ret)
  4517. goto set_old_link_state;
  4518. }
  4519. if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
  4520. ufshcd_enable_auto_bkops(hba);
  4521. else
  4522. /*
  4523. * If BKOPs operations are urgently needed at this moment then
  4524. * keep auto-bkops enabled or else disable it.
  4525. */
  4526. ufshcd_urgent_bkops(hba);
  4527. hba->clk_gating.is_suspended = false;
  4528. if (ufshcd_is_clkscaling_enabled(hba))
  4529. devfreq_resume_device(hba->devfreq);
  4530. /* Schedule clock gating in case of no access to UFS device yet */
  4531. ufshcd_release(hba);
  4532. goto out;
  4533. set_old_link_state:
  4534. ufshcd_link_state_transition(hba, old_link_state, 0);
  4535. vendor_suspend:
  4536. ufshcd_vops_suspend(hba, pm_op);
  4537. disable_vreg:
  4538. ufshcd_vreg_set_lpm(hba);
  4539. disable_irq_and_vops_clks:
  4540. ufshcd_disable_irq(hba);
  4541. ufshcd_setup_clocks(hba, false);
  4542. out:
  4543. hba->pm_op_in_progress = 0;
  4544. return ret;
  4545. }
  4546. /**
  4547. * ufshcd_system_suspend - system suspend routine
  4548. * @hba: per adapter instance
  4549. * @pm_op: runtime PM or system PM
  4550. *
  4551. * Check the description of ufshcd_suspend() function for more details.
  4552. *
  4553. * Returns 0 for success and non-zero for failure
  4554. */
  4555. int ufshcd_system_suspend(struct ufs_hba *hba)
  4556. {
  4557. int ret = 0;
  4558. if (!hba || !hba->is_powered)
  4559. return 0;
  4560. if (pm_runtime_suspended(hba->dev)) {
  4561. if (hba->rpm_lvl == hba->spm_lvl)
  4562. /*
  4563. * There is possibility that device may still be in
  4564. * active state during the runtime suspend.
  4565. */
  4566. if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
  4567. hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
  4568. goto out;
  4569. /*
  4570. * UFS device and/or UFS link low power states during runtime
  4571. * suspend seems to be different than what is expected during
  4572. * system suspend. Hence runtime resume the devic & link and
  4573. * let the system suspend low power states to take effect.
  4574. * TODO: If resume takes longer time, we might have optimize
  4575. * it in future by not resuming everything if possible.
  4576. */
  4577. ret = ufshcd_runtime_resume(hba);
  4578. if (ret)
  4579. goto out;
  4580. }
  4581. ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  4582. out:
  4583. if (!ret)
  4584. hba->is_sys_suspended = true;
  4585. return ret;
  4586. }
  4587. EXPORT_SYMBOL(ufshcd_system_suspend);
  4588. /**
  4589. * ufshcd_system_resume - system resume routine
  4590. * @hba: per adapter instance
  4591. *
  4592. * Returns 0 for success and non-zero for failure
  4593. */
  4594. int ufshcd_system_resume(struct ufs_hba *hba)
  4595. {
  4596. if (!hba)
  4597. return -EINVAL;
  4598. if (!hba->is_powered || pm_runtime_suspended(hba->dev))
  4599. /*
  4600. * Let the runtime resume take care of resuming
  4601. * if runtime suspended.
  4602. */
  4603. return 0;
  4604. return ufshcd_resume(hba, UFS_SYSTEM_PM);
  4605. }
  4606. EXPORT_SYMBOL(ufshcd_system_resume);
  4607. /**
  4608. * ufshcd_runtime_suspend - runtime suspend routine
  4609. * @hba: per adapter instance
  4610. *
  4611. * Check the description of ufshcd_suspend() function for more details.
  4612. *
  4613. * Returns 0 for success and non-zero for failure
  4614. */
  4615. int ufshcd_runtime_suspend(struct ufs_hba *hba)
  4616. {
  4617. if (!hba)
  4618. return -EINVAL;
  4619. if (!hba->is_powered)
  4620. return 0;
  4621. return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  4622. }
  4623. EXPORT_SYMBOL(ufshcd_runtime_suspend);
  4624. /**
  4625. * ufshcd_runtime_resume - runtime resume routine
  4626. * @hba: per adapter instance
  4627. *
  4628. * This function basically brings the UFS device, UniPro link and controller
  4629. * to active state. Following operations are done in this function:
  4630. *
  4631. * 1. Turn on all the controller related clocks
  4632. * 2. Bring the UniPro link out of Hibernate state
  4633. * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
  4634. * to active state.
  4635. * 4. If auto-bkops is enabled on the device, disable it.
  4636. *
  4637. * So following would be the possible power state after this function return
  4638. * successfully:
  4639. * S1: UFS device in Active state with VCC rail ON
  4640. * UniPro link in Active state
  4641. * All the UFS/UniPro controller clocks are ON
  4642. *
  4643. * Returns 0 for success and non-zero for failure
  4644. */
  4645. int ufshcd_runtime_resume(struct ufs_hba *hba)
  4646. {
  4647. if (!hba)
  4648. return -EINVAL;
  4649. if (!hba->is_powered)
  4650. return 0;
  4651. return ufshcd_resume(hba, UFS_RUNTIME_PM);
  4652. }
  4653. EXPORT_SYMBOL(ufshcd_runtime_resume);
  4654. int ufshcd_runtime_idle(struct ufs_hba *hba)
  4655. {
  4656. return 0;
  4657. }
  4658. EXPORT_SYMBOL(ufshcd_runtime_idle);
  4659. /**
  4660. * ufshcd_shutdown - shutdown routine
  4661. * @hba: per adapter instance
  4662. *
  4663. * This function would power off both UFS device and UFS link.
  4664. *
  4665. * Returns 0 always to allow force shutdown even in case of errors.
  4666. */
  4667. int ufshcd_shutdown(struct ufs_hba *hba)
  4668. {
  4669. int ret = 0;
  4670. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
  4671. goto out;
  4672. if (pm_runtime_suspended(hba->dev)) {
  4673. ret = ufshcd_runtime_resume(hba);
  4674. if (ret)
  4675. goto out;
  4676. }
  4677. ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
  4678. out:
  4679. if (ret)
  4680. dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
  4681. /* allow force shutdown even in case of errors */
  4682. return 0;
  4683. }
  4684. EXPORT_SYMBOL(ufshcd_shutdown);
  4685. /**
  4686. * ufshcd_remove - de-allocate SCSI host and host memory space
  4687. * data structure memory
  4688. * @hba - per adapter instance
  4689. */
  4690. void ufshcd_remove(struct ufs_hba *hba)
  4691. {
  4692. scsi_remove_host(hba->host);
  4693. /* disable interrupts */
  4694. ufshcd_disable_intr(hba, hba->intr_mask);
  4695. ufshcd_hba_stop(hba);
  4696. ufshcd_exit_clk_gating(hba);
  4697. if (ufshcd_is_clkscaling_enabled(hba))
  4698. devfreq_remove_device(hba->devfreq);
  4699. ufshcd_hba_exit(hba);
  4700. }
  4701. EXPORT_SYMBOL_GPL(ufshcd_remove);
  4702. /**
  4703. * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
  4704. * @hba: pointer to Host Bus Adapter (HBA)
  4705. */
  4706. void ufshcd_dealloc_host(struct ufs_hba *hba)
  4707. {
  4708. scsi_host_put(hba->host);
  4709. }
  4710. EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
  4711. /**
  4712. * ufshcd_set_dma_mask - Set dma mask based on the controller
  4713. * addressing capability
  4714. * @hba: per adapter instance
  4715. *
  4716. * Returns 0 for success, non-zero for failure
  4717. */
  4718. static int ufshcd_set_dma_mask(struct ufs_hba *hba)
  4719. {
  4720. if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
  4721. if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
  4722. return 0;
  4723. }
  4724. return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
  4725. }
  4726. /**
  4727. * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  4728. * @dev: pointer to device handle
  4729. * @hba_handle: driver private handle
  4730. * Returns 0 on success, non-zero value on failure
  4731. */
  4732. int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
  4733. {
  4734. struct Scsi_Host *host;
  4735. struct ufs_hba *hba;
  4736. int err = 0;
  4737. if (!dev) {
  4738. dev_err(dev,
  4739. "Invalid memory reference for dev is NULL\n");
  4740. err = -ENODEV;
  4741. goto out_error;
  4742. }
  4743. host = scsi_host_alloc(&ufshcd_driver_template,
  4744. sizeof(struct ufs_hba));
  4745. if (!host) {
  4746. dev_err(dev, "scsi_host_alloc failed\n");
  4747. err = -ENOMEM;
  4748. goto out_error;
  4749. }
  4750. hba = shost_priv(host);
  4751. hba->host = host;
  4752. hba->dev = dev;
  4753. *hba_handle = hba;
  4754. out_error:
  4755. return err;
  4756. }
  4757. EXPORT_SYMBOL(ufshcd_alloc_host);
  4758. static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
  4759. {
  4760. int ret = 0;
  4761. struct ufs_clk_info *clki;
  4762. struct list_head *head = &hba->clk_list_head;
  4763. if (!head || list_empty(head))
  4764. goto out;
  4765. ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
  4766. if (ret)
  4767. return ret;
  4768. list_for_each_entry(clki, head, list) {
  4769. if (!IS_ERR_OR_NULL(clki->clk)) {
  4770. if (scale_up && clki->max_freq) {
  4771. if (clki->curr_freq == clki->max_freq)
  4772. continue;
  4773. ret = clk_set_rate(clki->clk, clki->max_freq);
  4774. if (ret) {
  4775. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4776. __func__, clki->name,
  4777. clki->max_freq, ret);
  4778. break;
  4779. }
  4780. clki->curr_freq = clki->max_freq;
  4781. } else if (!scale_up && clki->min_freq) {
  4782. if (clki->curr_freq == clki->min_freq)
  4783. continue;
  4784. ret = clk_set_rate(clki->clk, clki->min_freq);
  4785. if (ret) {
  4786. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4787. __func__, clki->name,
  4788. clki->min_freq, ret);
  4789. break;
  4790. }
  4791. clki->curr_freq = clki->min_freq;
  4792. }
  4793. }
  4794. dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
  4795. clki->name, clk_get_rate(clki->clk));
  4796. }
  4797. ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
  4798. out:
  4799. return ret;
  4800. }
  4801. static int ufshcd_devfreq_target(struct device *dev,
  4802. unsigned long *freq, u32 flags)
  4803. {
  4804. int err = 0;
  4805. struct ufs_hba *hba = dev_get_drvdata(dev);
  4806. bool release_clk_hold = false;
  4807. unsigned long irq_flags;
  4808. if (!ufshcd_is_clkscaling_enabled(hba))
  4809. return -EINVAL;
  4810. spin_lock_irqsave(hba->host->host_lock, irq_flags);
  4811. if (ufshcd_eh_in_progress(hba)) {
  4812. spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  4813. return 0;
  4814. }
  4815. if (ufshcd_is_clkgating_allowed(hba) &&
  4816. (hba->clk_gating.state != CLKS_ON)) {
  4817. if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
  4818. /* hold the vote until the scaling work is completed */
  4819. hba->clk_gating.active_reqs++;
  4820. release_clk_hold = true;
  4821. hba->clk_gating.state = CLKS_ON;
  4822. } else {
  4823. /*
  4824. * Clock gating work seems to be running in parallel
  4825. * hence skip scaling work to avoid deadlock between
  4826. * current scaling work and gating work.
  4827. */
  4828. spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  4829. return 0;
  4830. }
  4831. }
  4832. spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  4833. if (*freq == UINT_MAX)
  4834. err = ufshcd_scale_clks(hba, true);
  4835. else if (*freq == 0)
  4836. err = ufshcd_scale_clks(hba, false);
  4837. spin_lock_irqsave(hba->host->host_lock, irq_flags);
  4838. if (release_clk_hold)
  4839. __ufshcd_release(hba);
  4840. spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
  4841. return err;
  4842. }
  4843. static int ufshcd_devfreq_get_dev_status(struct device *dev,
  4844. struct devfreq_dev_status *stat)
  4845. {
  4846. struct ufs_hba *hba = dev_get_drvdata(dev);
  4847. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  4848. unsigned long flags;
  4849. if (!ufshcd_is_clkscaling_enabled(hba))
  4850. return -EINVAL;
  4851. memset(stat, 0, sizeof(*stat));
  4852. spin_lock_irqsave(hba->host->host_lock, flags);
  4853. if (!scaling->window_start_t)
  4854. goto start_window;
  4855. if (scaling->is_busy_started)
  4856. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  4857. scaling->busy_start_t));
  4858. stat->total_time = jiffies_to_usecs((long)jiffies -
  4859. (long)scaling->window_start_t);
  4860. stat->busy_time = scaling->tot_busy_t;
  4861. start_window:
  4862. scaling->window_start_t = jiffies;
  4863. scaling->tot_busy_t = 0;
  4864. if (hba->outstanding_reqs) {
  4865. scaling->busy_start_t = ktime_get();
  4866. scaling->is_busy_started = true;
  4867. } else {
  4868. scaling->busy_start_t = ktime_set(0, 0);
  4869. scaling->is_busy_started = false;
  4870. }
  4871. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4872. return 0;
  4873. }
  4874. static struct devfreq_dev_profile ufs_devfreq_profile = {
  4875. .polling_ms = 100,
  4876. .target = ufshcd_devfreq_target,
  4877. .get_dev_status = ufshcd_devfreq_get_dev_status,
  4878. };
  4879. /**
  4880. * ufshcd_init - Driver initialization routine
  4881. * @hba: per-adapter instance
  4882. * @mmio_base: base register address
  4883. * @irq: Interrupt line of device
  4884. * Returns 0 on success, non-zero value on failure
  4885. */
  4886. int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
  4887. {
  4888. int err;
  4889. struct Scsi_Host *host = hba->host;
  4890. struct device *dev = hba->dev;
  4891. if (!mmio_base) {
  4892. dev_err(hba->dev,
  4893. "Invalid memory reference for mmio_base is NULL\n");
  4894. err = -ENODEV;
  4895. goto out_error;
  4896. }
  4897. hba->mmio_base = mmio_base;
  4898. hba->irq = irq;
  4899. err = ufshcd_hba_init(hba);
  4900. if (err)
  4901. goto out_error;
  4902. /* Read capabilities registers */
  4903. ufshcd_hba_capabilities(hba);
  4904. /* Get UFS version supported by the controller */
  4905. hba->ufs_version = ufshcd_get_ufs_version(hba);
  4906. /* Get Interrupt bit mask per version */
  4907. hba->intr_mask = ufshcd_get_intr_mask(hba);
  4908. err = ufshcd_set_dma_mask(hba);
  4909. if (err) {
  4910. dev_err(hba->dev, "set dma mask failed\n");
  4911. goto out_disable;
  4912. }
  4913. /* Allocate memory for host memory space */
  4914. err = ufshcd_memory_alloc(hba);
  4915. if (err) {
  4916. dev_err(hba->dev, "Memory allocation failed\n");
  4917. goto out_disable;
  4918. }
  4919. /* Configure LRB */
  4920. ufshcd_host_memory_configure(hba);
  4921. host->can_queue = hba->nutrs;
  4922. host->cmd_per_lun = hba->nutrs;
  4923. host->max_id = UFSHCD_MAX_ID;
  4924. host->max_lun = UFS_MAX_LUNS;
  4925. host->max_channel = UFSHCD_MAX_CHANNEL;
  4926. host->unique_id = host->host_no;
  4927. host->max_cmd_len = MAX_CDB_SIZE;
  4928. hba->max_pwr_info.is_valid = false;
  4929. /* Initailize wait queue for task management */
  4930. init_waitqueue_head(&hba->tm_wq);
  4931. init_waitqueue_head(&hba->tm_tag_wq);
  4932. /* Initialize work queues */
  4933. INIT_WORK(&hba->eh_work, ufshcd_err_handler);
  4934. INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
  4935. /* Initialize UIC command mutex */
  4936. mutex_init(&hba->uic_cmd_mutex);
  4937. /* Initialize mutex for device management commands */
  4938. mutex_init(&hba->dev_cmd.lock);
  4939. /* Initialize device management tag acquire wait queue */
  4940. init_waitqueue_head(&hba->dev_cmd.tag_wq);
  4941. ufshcd_init_clk_gating(hba);
  4942. /* IRQ registration */
  4943. err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
  4944. if (err) {
  4945. dev_err(hba->dev, "request irq failed\n");
  4946. goto exit_gating;
  4947. } else {
  4948. hba->is_irq_enabled = true;
  4949. }
  4950. err = scsi_add_host(host, hba->dev);
  4951. if (err) {
  4952. dev_err(hba->dev, "scsi_add_host failed\n");
  4953. goto exit_gating;
  4954. }
  4955. /* Host controller enable */
  4956. err = ufshcd_hba_enable(hba);
  4957. if (err) {
  4958. dev_err(hba->dev, "Host controller enable failed\n");
  4959. goto out_remove_scsi_host;
  4960. }
  4961. if (ufshcd_is_clkscaling_enabled(hba)) {
  4962. hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
  4963. "simple_ondemand", NULL);
  4964. if (IS_ERR(hba->devfreq)) {
  4965. dev_err(hba->dev, "Unable to register with devfreq %ld\n",
  4966. PTR_ERR(hba->devfreq));
  4967. goto out_remove_scsi_host;
  4968. }
  4969. /* Suspend devfreq until the UFS device is detected */
  4970. devfreq_suspend_device(hba->devfreq);
  4971. hba->clk_scaling.window_start_t = 0;
  4972. }
  4973. /* Hold auto suspend until async scan completes */
  4974. pm_runtime_get_sync(dev);
  4975. /*
  4976. * The device-initialize-sequence hasn't been invoked yet.
  4977. * Set the device to power-off state
  4978. */
  4979. ufshcd_set_ufs_dev_poweroff(hba);
  4980. async_schedule(ufshcd_async_scan, hba);
  4981. return 0;
  4982. out_remove_scsi_host:
  4983. scsi_remove_host(hba->host);
  4984. exit_gating:
  4985. ufshcd_exit_clk_gating(hba);
  4986. out_disable:
  4987. hba->is_irq_enabled = false;
  4988. ufshcd_hba_exit(hba);
  4989. out_error:
  4990. return err;
  4991. }
  4992. EXPORT_SYMBOL_GPL(ufshcd_init);
  4993. MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
  4994. MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
  4995. MODULE_DESCRIPTION("Generic UFS host controller driver Core");
  4996. MODULE_LICENSE("GPL");
  4997. MODULE_VERSION(UFSHCD_DRIVER_VERSION);