bfa_svc.c 167 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfad_im.h"
  19. #include "bfa_plog.h"
  20. #include "bfa_cs.h"
  21. #include "bfa_modules.h"
  22. BFA_TRC_FILE(HAL, FCXP);
  23. BFA_MODULE(fcdiag);
  24. BFA_MODULE(fcxp);
  25. BFA_MODULE(sgpg);
  26. BFA_MODULE(lps);
  27. BFA_MODULE(fcport);
  28. BFA_MODULE(rport);
  29. BFA_MODULE(uf);
  30. /*
  31. * LPS related definitions
  32. */
  33. #define BFA_LPS_MIN_LPORTS (1)
  34. #define BFA_LPS_MAX_LPORTS (256)
  35. /*
  36. * Maximum Vports supported per physical port or vf.
  37. */
  38. #define BFA_LPS_MAX_VPORTS_SUPP_CB 255
  39. #define BFA_LPS_MAX_VPORTS_SUPP_CT 190
  40. /*
  41. * FC PORT related definitions
  42. */
  43. /*
  44. * The port is considered disabled if corresponding physical port or IOC are
  45. * disabled explicitly
  46. */
  47. #define BFA_PORT_IS_DISABLED(bfa) \
  48. ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
  49. (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
  50. /*
  51. * BFA port state machine events
  52. */
  53. enum bfa_fcport_sm_event {
  54. BFA_FCPORT_SM_START = 1, /* start port state machine */
  55. BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
  56. BFA_FCPORT_SM_ENABLE = 3, /* enable port */
  57. BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
  58. BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
  59. BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
  60. BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
  61. BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
  62. BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
  63. BFA_FCPORT_SM_DPORTENABLE = 10, /* enable dport */
  64. BFA_FCPORT_SM_DPORTDISABLE = 11,/* disable dport */
  65. BFA_FCPORT_SM_FAA_MISCONFIG = 12, /* FAA misconfiguratin */
  66. BFA_FCPORT_SM_DDPORTENABLE = 13, /* enable ddport */
  67. BFA_FCPORT_SM_DDPORTDISABLE = 14, /* disable ddport */
  68. };
  69. /*
  70. * BFA port link notification state machine events
  71. */
  72. enum bfa_fcport_ln_sm_event {
  73. BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
  74. BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
  75. BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
  76. };
  77. /*
  78. * RPORT related definitions
  79. */
  80. #define bfa_rport_offline_cb(__rp) do { \
  81. if ((__rp)->bfa->fcs) \
  82. bfa_cb_rport_offline((__rp)->rport_drv); \
  83. else { \
  84. bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
  85. __bfa_cb_rport_offline, (__rp)); \
  86. } \
  87. } while (0)
  88. #define bfa_rport_online_cb(__rp) do { \
  89. if ((__rp)->bfa->fcs) \
  90. bfa_cb_rport_online((__rp)->rport_drv); \
  91. else { \
  92. bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
  93. __bfa_cb_rport_online, (__rp)); \
  94. } \
  95. } while (0)
  96. /*
  97. * forward declarations FCXP related functions
  98. */
  99. static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
  100. static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
  101. struct bfi_fcxp_send_rsp_s *fcxp_rsp);
  102. static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
  103. struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
  104. static void bfa_fcxp_qresume(void *cbarg);
  105. static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
  106. struct bfi_fcxp_send_req_s *send_req);
  107. /*
  108. * forward declarations for LPS functions
  109. */
  110. static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
  111. struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
  112. static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
  113. struct bfa_iocfc_cfg_s *cfg,
  114. struct bfa_pcidev_s *pcidev);
  115. static void bfa_lps_detach(struct bfa_s *bfa);
  116. static void bfa_lps_start(struct bfa_s *bfa);
  117. static void bfa_lps_stop(struct bfa_s *bfa);
  118. static void bfa_lps_iocdisable(struct bfa_s *bfa);
  119. static void bfa_lps_login_rsp(struct bfa_s *bfa,
  120. struct bfi_lps_login_rsp_s *rsp);
  121. static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
  122. static void bfa_lps_logout_rsp(struct bfa_s *bfa,
  123. struct bfi_lps_logout_rsp_s *rsp);
  124. static void bfa_lps_reqq_resume(void *lps_arg);
  125. static void bfa_lps_free(struct bfa_lps_s *lps);
  126. static void bfa_lps_send_login(struct bfa_lps_s *lps);
  127. static void bfa_lps_send_logout(struct bfa_lps_s *lps);
  128. static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
  129. static void bfa_lps_login_comp(struct bfa_lps_s *lps);
  130. static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
  131. static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
  132. /*
  133. * forward declaration for LPS state machine
  134. */
  135. static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
  136. static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
  137. static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
  138. event);
  139. static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
  140. static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
  141. enum bfa_lps_event event);
  142. static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
  143. static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
  144. event);
  145. /*
  146. * forward declaration for FC Port functions
  147. */
  148. static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
  149. static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
  150. static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
  151. static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
  152. static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
  153. static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
  154. static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
  155. enum bfa_port_linkstate event, bfa_boolean_t trunk);
  156. static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
  157. enum bfa_port_linkstate event);
  158. static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
  159. static void bfa_fcport_stats_get_timeout(void *cbarg);
  160. static void bfa_fcport_stats_clr_timeout(void *cbarg);
  161. static void bfa_trunk_iocdisable(struct bfa_s *bfa);
  162. /*
  163. * forward declaration for FC PORT state machine
  164. */
  165. static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
  166. enum bfa_fcport_sm_event event);
  167. static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
  168. enum bfa_fcport_sm_event event);
  169. static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
  170. enum bfa_fcport_sm_event event);
  171. static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
  172. enum bfa_fcport_sm_event event);
  173. static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
  174. enum bfa_fcport_sm_event event);
  175. static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
  176. enum bfa_fcport_sm_event event);
  177. static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
  178. enum bfa_fcport_sm_event event);
  179. static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
  180. enum bfa_fcport_sm_event event);
  181. static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
  182. enum bfa_fcport_sm_event event);
  183. static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
  184. enum bfa_fcport_sm_event event);
  185. static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
  186. enum bfa_fcport_sm_event event);
  187. static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
  188. enum bfa_fcport_sm_event event);
  189. static void bfa_fcport_sm_dport(struct bfa_fcport_s *fcport,
  190. enum bfa_fcport_sm_event event);
  191. static void bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
  192. enum bfa_fcport_sm_event event);
  193. static void bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
  194. enum bfa_fcport_sm_event event);
  195. static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
  196. enum bfa_fcport_ln_sm_event event);
  197. static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
  198. enum bfa_fcport_ln_sm_event event);
  199. static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
  200. enum bfa_fcport_ln_sm_event event);
  201. static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
  202. enum bfa_fcport_ln_sm_event event);
  203. static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
  204. enum bfa_fcport_ln_sm_event event);
  205. static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
  206. enum bfa_fcport_ln_sm_event event);
  207. static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
  208. enum bfa_fcport_ln_sm_event event);
  209. static struct bfa_sm_table_s hal_port_sm_table[] = {
  210. {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
  211. {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
  212. {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
  213. {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
  214. {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
  215. {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
  216. {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
  217. {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
  218. {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
  219. {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
  220. {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
  221. {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
  222. {BFA_SM(bfa_fcport_sm_dport), BFA_PORT_ST_DPORT},
  223. {BFA_SM(bfa_fcport_sm_ddport), BFA_PORT_ST_DDPORT},
  224. {BFA_SM(bfa_fcport_sm_faa_misconfig), BFA_PORT_ST_FAA_MISCONFIG},
  225. };
  226. /*
  227. * forward declaration for RPORT related functions
  228. */
  229. static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
  230. static void bfa_rport_free(struct bfa_rport_s *rport);
  231. static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
  232. static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
  233. static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
  234. static void __bfa_cb_rport_online(void *cbarg,
  235. bfa_boolean_t complete);
  236. static void __bfa_cb_rport_offline(void *cbarg,
  237. bfa_boolean_t complete);
  238. /*
  239. * forward declaration for RPORT state machine
  240. */
  241. static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
  242. enum bfa_rport_event event);
  243. static void bfa_rport_sm_created(struct bfa_rport_s *rp,
  244. enum bfa_rport_event event);
  245. static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
  246. enum bfa_rport_event event);
  247. static void bfa_rport_sm_online(struct bfa_rport_s *rp,
  248. enum bfa_rport_event event);
  249. static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
  250. enum bfa_rport_event event);
  251. static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
  252. enum bfa_rport_event event);
  253. static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
  254. enum bfa_rport_event event);
  255. static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
  256. enum bfa_rport_event event);
  257. static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
  258. enum bfa_rport_event event);
  259. static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
  260. enum bfa_rport_event event);
  261. static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
  262. enum bfa_rport_event event);
  263. static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
  264. enum bfa_rport_event event);
  265. static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
  266. enum bfa_rport_event event);
  267. /*
  268. * PLOG related definitions
  269. */
  270. static int
  271. plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
  272. {
  273. if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
  274. (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
  275. return 1;
  276. if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
  277. (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
  278. return 1;
  279. return 0;
  280. }
  281. static u64
  282. bfa_get_log_time(void)
  283. {
  284. u64 system_time = 0;
  285. struct timeval tv;
  286. do_gettimeofday(&tv);
  287. /* We are interested in seconds only. */
  288. system_time = tv.tv_sec;
  289. return system_time;
  290. }
  291. static void
  292. bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
  293. {
  294. u16 tail;
  295. struct bfa_plog_rec_s *pl_recp;
  296. if (plog->plog_enabled == 0)
  297. return;
  298. if (plkd_validate_logrec(pl_rec)) {
  299. WARN_ON(1);
  300. return;
  301. }
  302. tail = plog->tail;
  303. pl_recp = &(plog->plog_recs[tail]);
  304. memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
  305. pl_recp->tv = bfa_get_log_time();
  306. BFA_PL_LOG_REC_INCR(plog->tail);
  307. if (plog->head == plog->tail)
  308. BFA_PL_LOG_REC_INCR(plog->head);
  309. }
  310. void
  311. bfa_plog_init(struct bfa_plog_s *plog)
  312. {
  313. memset((char *)plog, 0, sizeof(struct bfa_plog_s));
  314. memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
  315. plog->head = plog->tail = 0;
  316. plog->plog_enabled = 1;
  317. }
  318. void
  319. bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  320. enum bfa_plog_eid event,
  321. u16 misc, char *log_str)
  322. {
  323. struct bfa_plog_rec_s lp;
  324. if (plog->plog_enabled) {
  325. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  326. lp.mid = mid;
  327. lp.eid = event;
  328. lp.log_type = BFA_PL_LOG_TYPE_STRING;
  329. lp.misc = misc;
  330. strlcpy(lp.log_entry.string_log, log_str,
  331. BFA_PL_STRING_LOG_SZ);
  332. lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
  333. bfa_plog_add(plog, &lp);
  334. }
  335. }
  336. void
  337. bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  338. enum bfa_plog_eid event,
  339. u16 misc, u32 *intarr, u32 num_ints)
  340. {
  341. struct bfa_plog_rec_s lp;
  342. u32 i;
  343. if (num_ints > BFA_PL_INT_LOG_SZ)
  344. num_ints = BFA_PL_INT_LOG_SZ;
  345. if (plog->plog_enabled) {
  346. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  347. lp.mid = mid;
  348. lp.eid = event;
  349. lp.log_type = BFA_PL_LOG_TYPE_INT;
  350. lp.misc = misc;
  351. for (i = 0; i < num_ints; i++)
  352. lp.log_entry.int_log[i] = intarr[i];
  353. lp.log_num_ints = (u8) num_ints;
  354. bfa_plog_add(plog, &lp);
  355. }
  356. }
  357. void
  358. bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  359. enum bfa_plog_eid event,
  360. u16 misc, struct fchs_s *fchdr)
  361. {
  362. struct bfa_plog_rec_s lp;
  363. u32 *tmp_int = (u32 *) fchdr;
  364. u32 ints[BFA_PL_INT_LOG_SZ];
  365. if (plog->plog_enabled) {
  366. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  367. ints[0] = tmp_int[0];
  368. ints[1] = tmp_int[1];
  369. ints[2] = tmp_int[4];
  370. bfa_plog_intarr(plog, mid, event, misc, ints, 3);
  371. }
  372. }
  373. void
  374. bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
  375. enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
  376. u32 pld_w0)
  377. {
  378. struct bfa_plog_rec_s lp;
  379. u32 *tmp_int = (u32 *) fchdr;
  380. u32 ints[BFA_PL_INT_LOG_SZ];
  381. if (plog->plog_enabled) {
  382. memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
  383. ints[0] = tmp_int[0];
  384. ints[1] = tmp_int[1];
  385. ints[2] = tmp_int[4];
  386. ints[3] = pld_w0;
  387. bfa_plog_intarr(plog, mid, event, misc, ints, 4);
  388. }
  389. }
  390. /*
  391. * fcxp_pvt BFA FCXP private functions
  392. */
  393. static void
  394. claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
  395. {
  396. u16 i;
  397. struct bfa_fcxp_s *fcxp;
  398. fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
  399. memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
  400. INIT_LIST_HEAD(&mod->fcxp_req_free_q);
  401. INIT_LIST_HEAD(&mod->fcxp_rsp_free_q);
  402. INIT_LIST_HEAD(&mod->fcxp_active_q);
  403. INIT_LIST_HEAD(&mod->fcxp_req_unused_q);
  404. INIT_LIST_HEAD(&mod->fcxp_rsp_unused_q);
  405. mod->fcxp_list = fcxp;
  406. for (i = 0; i < mod->num_fcxps; i++) {
  407. fcxp->fcxp_mod = mod;
  408. fcxp->fcxp_tag = i;
  409. if (i < (mod->num_fcxps / 2)) {
  410. list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
  411. fcxp->req_rsp = BFA_TRUE;
  412. } else {
  413. list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
  414. fcxp->req_rsp = BFA_FALSE;
  415. }
  416. bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
  417. fcxp->reqq_waiting = BFA_FALSE;
  418. fcxp = fcxp + 1;
  419. }
  420. bfa_mem_kva_curp(mod) = (void *)fcxp;
  421. }
  422. static void
  423. bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  424. struct bfa_s *bfa)
  425. {
  426. struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
  427. struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
  428. struct bfa_mem_dma_s *seg_ptr;
  429. u16 nsegs, idx, per_seg_fcxp;
  430. u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
  431. u32 per_fcxp_sz;
  432. if (num_fcxps == 0)
  433. return;
  434. if (cfg->drvcfg.min_cfg)
  435. per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
  436. else
  437. per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
  438. /* dma memory */
  439. nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
  440. per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
  441. bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
  442. if (num_fcxps >= per_seg_fcxp) {
  443. num_fcxps -= per_seg_fcxp;
  444. bfa_mem_dma_setup(minfo, seg_ptr,
  445. per_seg_fcxp * per_fcxp_sz);
  446. } else
  447. bfa_mem_dma_setup(minfo, seg_ptr,
  448. num_fcxps * per_fcxp_sz);
  449. }
  450. /* kva memory */
  451. bfa_mem_kva_setup(minfo, fcxp_kva,
  452. cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
  453. }
  454. static void
  455. bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  456. struct bfa_pcidev_s *pcidev)
  457. {
  458. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  459. mod->bfa = bfa;
  460. mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
  461. /*
  462. * Initialize FCXP request and response payload sizes.
  463. */
  464. mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
  465. if (!cfg->drvcfg.min_cfg)
  466. mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
  467. INIT_LIST_HEAD(&mod->req_wait_q);
  468. INIT_LIST_HEAD(&mod->rsp_wait_q);
  469. claim_fcxps_mem(mod);
  470. }
  471. static void
  472. bfa_fcxp_detach(struct bfa_s *bfa)
  473. {
  474. }
  475. static void
  476. bfa_fcxp_start(struct bfa_s *bfa)
  477. {
  478. }
  479. static void
  480. bfa_fcxp_stop(struct bfa_s *bfa)
  481. {
  482. }
  483. static void
  484. bfa_fcxp_iocdisable(struct bfa_s *bfa)
  485. {
  486. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  487. struct bfa_fcxp_s *fcxp;
  488. struct list_head *qe, *qen;
  489. /* Enqueue unused fcxp resources to free_q */
  490. list_splice_tail_init(&mod->fcxp_req_unused_q, &mod->fcxp_req_free_q);
  491. list_splice_tail_init(&mod->fcxp_rsp_unused_q, &mod->fcxp_rsp_free_q);
  492. list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
  493. fcxp = (struct bfa_fcxp_s *) qe;
  494. if (fcxp->caller == NULL) {
  495. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  496. BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
  497. bfa_fcxp_free(fcxp);
  498. } else {
  499. fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
  500. bfa_cb_queue(bfa, &fcxp->hcb_qe,
  501. __bfa_fcxp_send_cbfn, fcxp);
  502. }
  503. }
  504. }
  505. static struct bfa_fcxp_s *
  506. bfa_fcxp_get(struct bfa_fcxp_mod_s *fm, bfa_boolean_t req)
  507. {
  508. struct bfa_fcxp_s *fcxp;
  509. if (req)
  510. bfa_q_deq(&fm->fcxp_req_free_q, &fcxp);
  511. else
  512. bfa_q_deq(&fm->fcxp_rsp_free_q, &fcxp);
  513. if (fcxp)
  514. list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
  515. return fcxp;
  516. }
  517. static void
  518. bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
  519. struct bfa_s *bfa,
  520. u8 *use_ibuf,
  521. u32 *nr_sgles,
  522. bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
  523. bfa_fcxp_get_sglen_t *r_sglen_cbfn,
  524. struct list_head *r_sgpg_q,
  525. int n_sgles,
  526. bfa_fcxp_get_sgaddr_t sga_cbfn,
  527. bfa_fcxp_get_sglen_t sglen_cbfn)
  528. {
  529. WARN_ON(bfa == NULL);
  530. bfa_trc(bfa, fcxp->fcxp_tag);
  531. if (n_sgles == 0) {
  532. *use_ibuf = 1;
  533. } else {
  534. WARN_ON(*sga_cbfn == NULL);
  535. WARN_ON(*sglen_cbfn == NULL);
  536. *use_ibuf = 0;
  537. *r_sga_cbfn = sga_cbfn;
  538. *r_sglen_cbfn = sglen_cbfn;
  539. *nr_sgles = n_sgles;
  540. /*
  541. * alloc required sgpgs
  542. */
  543. if (n_sgles > BFI_SGE_INLINE)
  544. WARN_ON(1);
  545. }
  546. }
  547. static void
  548. bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
  549. void *caller, struct bfa_s *bfa, int nreq_sgles,
  550. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  551. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  552. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  553. bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
  554. {
  555. WARN_ON(bfa == NULL);
  556. bfa_trc(bfa, fcxp->fcxp_tag);
  557. fcxp->caller = caller;
  558. bfa_fcxp_init_reqrsp(fcxp, bfa,
  559. &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
  560. &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
  561. nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
  562. bfa_fcxp_init_reqrsp(fcxp, bfa,
  563. &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
  564. &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
  565. nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
  566. }
  567. static void
  568. bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
  569. {
  570. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  571. struct bfa_fcxp_wqe_s *wqe;
  572. if (fcxp->req_rsp)
  573. bfa_q_deq(&mod->req_wait_q, &wqe);
  574. else
  575. bfa_q_deq(&mod->rsp_wait_q, &wqe);
  576. if (wqe) {
  577. bfa_trc(mod->bfa, fcxp->fcxp_tag);
  578. bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
  579. wqe->nrsp_sgles, wqe->req_sga_cbfn,
  580. wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
  581. wqe->rsp_sglen_cbfn);
  582. wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
  583. return;
  584. }
  585. WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
  586. list_del(&fcxp->qe);
  587. if (fcxp->req_rsp)
  588. list_add_tail(&fcxp->qe, &mod->fcxp_req_free_q);
  589. else
  590. list_add_tail(&fcxp->qe, &mod->fcxp_rsp_free_q);
  591. }
  592. static void
  593. bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  594. bfa_status_t req_status, u32 rsp_len,
  595. u32 resid_len, struct fchs_s *rsp_fchs)
  596. {
  597. /* discarded fcxp completion */
  598. }
  599. static void
  600. __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
  601. {
  602. struct bfa_fcxp_s *fcxp = cbarg;
  603. if (complete) {
  604. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  605. fcxp->rsp_status, fcxp->rsp_len,
  606. fcxp->residue_len, &fcxp->rsp_fchs);
  607. } else {
  608. bfa_fcxp_free(fcxp);
  609. }
  610. }
  611. static void
  612. hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
  613. {
  614. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  615. struct bfa_fcxp_s *fcxp;
  616. u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
  617. bfa_trc(bfa, fcxp_tag);
  618. fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
  619. /*
  620. * @todo f/w should not set residue to non-0 when everything
  621. * is received.
  622. */
  623. if (fcxp_rsp->req_status == BFA_STATUS_OK)
  624. fcxp_rsp->residue_len = 0;
  625. else
  626. fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
  627. fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
  628. WARN_ON(fcxp->send_cbfn == NULL);
  629. hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
  630. if (fcxp->send_cbfn != NULL) {
  631. bfa_trc(mod->bfa, (NULL == fcxp->caller));
  632. if (fcxp->caller == NULL) {
  633. fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
  634. fcxp_rsp->req_status, fcxp_rsp->rsp_len,
  635. fcxp_rsp->residue_len, &fcxp_rsp->fchs);
  636. /*
  637. * fcxp automatically freed on return from the callback
  638. */
  639. bfa_fcxp_free(fcxp);
  640. } else {
  641. fcxp->rsp_status = fcxp_rsp->req_status;
  642. fcxp->rsp_len = fcxp_rsp->rsp_len;
  643. fcxp->residue_len = fcxp_rsp->residue_len;
  644. fcxp->rsp_fchs = fcxp_rsp->fchs;
  645. bfa_cb_queue(bfa, &fcxp->hcb_qe,
  646. __bfa_fcxp_send_cbfn, fcxp);
  647. }
  648. } else {
  649. bfa_trc(bfa, (NULL == fcxp->send_cbfn));
  650. }
  651. }
  652. static void
  653. hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
  654. struct fchs_s *fchs)
  655. {
  656. /*
  657. * TODO: TX ox_id
  658. */
  659. if (reqlen > 0) {
  660. if (fcxp->use_ireqbuf) {
  661. u32 pld_w0 =
  662. *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
  663. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
  664. BFA_PL_EID_TX,
  665. reqlen + sizeof(struct fchs_s), fchs,
  666. pld_w0);
  667. } else {
  668. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
  669. BFA_PL_EID_TX,
  670. reqlen + sizeof(struct fchs_s),
  671. fchs);
  672. }
  673. } else {
  674. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
  675. reqlen + sizeof(struct fchs_s), fchs);
  676. }
  677. }
  678. static void
  679. hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
  680. struct bfi_fcxp_send_rsp_s *fcxp_rsp)
  681. {
  682. if (fcxp_rsp->rsp_len > 0) {
  683. if (fcxp->use_irspbuf) {
  684. u32 pld_w0 =
  685. *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
  686. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
  687. BFA_PL_EID_RX,
  688. (u16) fcxp_rsp->rsp_len,
  689. &fcxp_rsp->fchs, pld_w0);
  690. } else {
  691. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
  692. BFA_PL_EID_RX,
  693. (u16) fcxp_rsp->rsp_len,
  694. &fcxp_rsp->fchs);
  695. }
  696. } else {
  697. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
  698. (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
  699. }
  700. }
  701. /*
  702. * Handler to resume sending fcxp when space in available in cpe queue.
  703. */
  704. static void
  705. bfa_fcxp_qresume(void *cbarg)
  706. {
  707. struct bfa_fcxp_s *fcxp = cbarg;
  708. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  709. struct bfi_fcxp_send_req_s *send_req;
  710. fcxp->reqq_waiting = BFA_FALSE;
  711. send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
  712. bfa_fcxp_queue(fcxp, send_req);
  713. }
  714. /*
  715. * Queue fcxp send request to foimrware.
  716. */
  717. static void
  718. bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
  719. {
  720. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  721. struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
  722. struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
  723. struct bfa_rport_s *rport = reqi->bfa_rport;
  724. bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
  725. bfa_fn_lpu(bfa));
  726. send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
  727. if (rport) {
  728. send_req->rport_fw_hndl = rport->fw_handle;
  729. send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
  730. if (send_req->max_frmsz == 0)
  731. send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
  732. } else {
  733. send_req->rport_fw_hndl = 0;
  734. send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
  735. }
  736. send_req->vf_id = cpu_to_be16(reqi->vf_id);
  737. send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
  738. send_req->class = reqi->class;
  739. send_req->rsp_timeout = rspi->rsp_timeout;
  740. send_req->cts = reqi->cts;
  741. send_req->fchs = reqi->fchs;
  742. send_req->req_len = cpu_to_be32(reqi->req_tot_len);
  743. send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
  744. /*
  745. * setup req sgles
  746. */
  747. if (fcxp->use_ireqbuf == 1) {
  748. bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
  749. BFA_FCXP_REQ_PLD_PA(fcxp));
  750. } else {
  751. if (fcxp->nreq_sgles > 0) {
  752. WARN_ON(fcxp->nreq_sgles != 1);
  753. bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
  754. fcxp->req_sga_cbfn(fcxp->caller, 0));
  755. } else {
  756. WARN_ON(reqi->req_tot_len != 0);
  757. bfa_alen_set(&send_req->rsp_alen, 0, 0);
  758. }
  759. }
  760. /*
  761. * setup rsp sgles
  762. */
  763. if (fcxp->use_irspbuf == 1) {
  764. WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
  765. bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
  766. BFA_FCXP_RSP_PLD_PA(fcxp));
  767. } else {
  768. if (fcxp->nrsp_sgles > 0) {
  769. WARN_ON(fcxp->nrsp_sgles != 1);
  770. bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
  771. fcxp->rsp_sga_cbfn(fcxp->caller, 0));
  772. } else {
  773. WARN_ON(rspi->rsp_maxlen != 0);
  774. bfa_alen_set(&send_req->rsp_alen, 0, 0);
  775. }
  776. }
  777. hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
  778. bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
  779. bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
  780. bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
  781. }
  782. /*
  783. * Allocate an FCXP instance to send a response or to send a request
  784. * that has a response. Request/response buffers are allocated by caller.
  785. *
  786. * @param[in] bfa BFA bfa instance
  787. * @param[in] nreq_sgles Number of SG elements required for request
  788. * buffer. 0, if fcxp internal buffers are used.
  789. * Use bfa_fcxp_get_reqbuf() to get the
  790. * internal req buffer.
  791. * @param[in] req_sgles SG elements describing request buffer. Will be
  792. * copied in by BFA and hence can be freed on
  793. * return from this function.
  794. * @param[in] get_req_sga function ptr to be called to get a request SG
  795. * Address (given the sge index).
  796. * @param[in] get_req_sglen function ptr to be called to get a request SG
  797. * len (given the sge index).
  798. * @param[in] get_rsp_sga function ptr to be called to get a response SG
  799. * Address (given the sge index).
  800. * @param[in] get_rsp_sglen function ptr to be called to get a response SG
  801. * len (given the sge index).
  802. * @param[in] req Allocated FCXP is used to send req or rsp?
  803. * request - BFA_TRUE, response - BFA_FALSE
  804. *
  805. * @return FCXP instance. NULL on failure.
  806. */
  807. struct bfa_fcxp_s *
  808. bfa_fcxp_req_rsp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
  809. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  810. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  811. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  812. bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
  813. {
  814. struct bfa_fcxp_s *fcxp = NULL;
  815. WARN_ON(bfa == NULL);
  816. fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa), req);
  817. if (fcxp == NULL)
  818. return NULL;
  819. bfa_trc(bfa, fcxp->fcxp_tag);
  820. bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
  821. req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
  822. return fcxp;
  823. }
  824. /*
  825. * Get the internal request buffer pointer
  826. *
  827. * @param[in] fcxp BFA fcxp pointer
  828. *
  829. * @return pointer to the internal request buffer
  830. */
  831. void *
  832. bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
  833. {
  834. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  835. void *reqbuf;
  836. WARN_ON(fcxp->use_ireqbuf != 1);
  837. reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
  838. mod->req_pld_sz + mod->rsp_pld_sz);
  839. return reqbuf;
  840. }
  841. u32
  842. bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
  843. {
  844. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  845. return mod->req_pld_sz;
  846. }
  847. /*
  848. * Get the internal response buffer pointer
  849. *
  850. * @param[in] fcxp BFA fcxp pointer
  851. *
  852. * @return pointer to the internal request buffer
  853. */
  854. void *
  855. bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
  856. {
  857. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  858. void *fcxp_buf;
  859. WARN_ON(fcxp->use_irspbuf != 1);
  860. fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
  861. mod->req_pld_sz + mod->rsp_pld_sz);
  862. /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
  863. return ((u8 *) fcxp_buf) + mod->req_pld_sz;
  864. }
  865. /*
  866. * Free the BFA FCXP
  867. *
  868. * @param[in] fcxp BFA fcxp pointer
  869. *
  870. * @return void
  871. */
  872. void
  873. bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
  874. {
  875. struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
  876. WARN_ON(fcxp == NULL);
  877. bfa_trc(mod->bfa, fcxp->fcxp_tag);
  878. bfa_fcxp_put(fcxp);
  879. }
  880. /*
  881. * Send a FCXP request
  882. *
  883. * @param[in] fcxp BFA fcxp pointer
  884. * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
  885. * @param[in] vf_id virtual Fabric ID
  886. * @param[in] lp_tag lport tag
  887. * @param[in] cts use Continuous sequence
  888. * @param[in] cos fc Class of Service
  889. * @param[in] reqlen request length, does not include FCHS length
  890. * @param[in] fchs fc Header Pointer. The header content will be copied
  891. * in by BFA.
  892. *
  893. * @param[in] cbfn call back function to be called on receiving
  894. * the response
  895. * @param[in] cbarg arg for cbfn
  896. * @param[in] rsp_timeout
  897. * response timeout
  898. *
  899. * @return bfa_status_t
  900. */
  901. void
  902. bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
  903. u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
  904. u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
  905. void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
  906. {
  907. struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
  908. struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
  909. struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
  910. struct bfi_fcxp_send_req_s *send_req;
  911. bfa_trc(bfa, fcxp->fcxp_tag);
  912. /*
  913. * setup request/response info
  914. */
  915. reqi->bfa_rport = rport;
  916. reqi->vf_id = vf_id;
  917. reqi->lp_tag = lp_tag;
  918. reqi->class = cos;
  919. rspi->rsp_timeout = rsp_timeout;
  920. reqi->cts = cts;
  921. reqi->fchs = *fchs;
  922. reqi->req_tot_len = reqlen;
  923. rspi->rsp_maxlen = rsp_maxlen;
  924. fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
  925. fcxp->send_cbarg = cbarg;
  926. /*
  927. * If no room in CPE queue, wait for space in request queue
  928. */
  929. send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
  930. if (!send_req) {
  931. bfa_trc(bfa, fcxp->fcxp_tag);
  932. fcxp->reqq_waiting = BFA_TRUE;
  933. bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
  934. return;
  935. }
  936. bfa_fcxp_queue(fcxp, send_req);
  937. }
  938. /*
  939. * Abort a BFA FCXP
  940. *
  941. * @param[in] fcxp BFA fcxp pointer
  942. *
  943. * @return void
  944. */
  945. bfa_status_t
  946. bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
  947. {
  948. bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
  949. WARN_ON(1);
  950. return BFA_STATUS_OK;
  951. }
  952. void
  953. bfa_fcxp_req_rsp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
  954. bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
  955. void *caller, int nreq_sgles,
  956. int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
  957. bfa_fcxp_get_sglen_t req_sglen_cbfn,
  958. bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
  959. bfa_fcxp_get_sglen_t rsp_sglen_cbfn, bfa_boolean_t req)
  960. {
  961. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  962. if (req)
  963. WARN_ON(!list_empty(&mod->fcxp_req_free_q));
  964. else
  965. WARN_ON(!list_empty(&mod->fcxp_rsp_free_q));
  966. wqe->alloc_cbfn = alloc_cbfn;
  967. wqe->alloc_cbarg = alloc_cbarg;
  968. wqe->caller = caller;
  969. wqe->bfa = bfa;
  970. wqe->nreq_sgles = nreq_sgles;
  971. wqe->nrsp_sgles = nrsp_sgles;
  972. wqe->req_sga_cbfn = req_sga_cbfn;
  973. wqe->req_sglen_cbfn = req_sglen_cbfn;
  974. wqe->rsp_sga_cbfn = rsp_sga_cbfn;
  975. wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
  976. if (req)
  977. list_add_tail(&wqe->qe, &mod->req_wait_q);
  978. else
  979. list_add_tail(&wqe->qe, &mod->rsp_wait_q);
  980. }
  981. void
  982. bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
  983. {
  984. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  985. WARN_ON(!bfa_q_is_on_q(&mod->req_wait_q, wqe) ||
  986. !bfa_q_is_on_q(&mod->rsp_wait_q, wqe));
  987. list_del(&wqe->qe);
  988. }
  989. void
  990. bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
  991. {
  992. /*
  993. * If waiting for room in request queue, cancel reqq wait
  994. * and free fcxp.
  995. */
  996. if (fcxp->reqq_waiting) {
  997. fcxp->reqq_waiting = BFA_FALSE;
  998. bfa_reqq_wcancel(&fcxp->reqq_wqe);
  999. bfa_fcxp_free(fcxp);
  1000. return;
  1001. }
  1002. fcxp->send_cbfn = bfa_fcxp_null_comp;
  1003. }
  1004. void
  1005. bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  1006. {
  1007. switch (msg->mhdr.msg_id) {
  1008. case BFI_FCXP_I2H_SEND_RSP:
  1009. hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
  1010. break;
  1011. default:
  1012. bfa_trc(bfa, msg->mhdr.msg_id);
  1013. WARN_ON(1);
  1014. }
  1015. }
  1016. u32
  1017. bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
  1018. {
  1019. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  1020. return mod->rsp_pld_sz;
  1021. }
  1022. void
  1023. bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
  1024. {
  1025. struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
  1026. struct list_head *qe;
  1027. int i;
  1028. for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
  1029. if (i < ((mod->num_fcxps - num_fcxp_fw) / 2)) {
  1030. bfa_q_deq_tail(&mod->fcxp_req_free_q, &qe);
  1031. list_add_tail(qe, &mod->fcxp_req_unused_q);
  1032. } else {
  1033. bfa_q_deq_tail(&mod->fcxp_rsp_free_q, &qe);
  1034. list_add_tail(qe, &mod->fcxp_rsp_unused_q);
  1035. }
  1036. }
  1037. }
  1038. /*
  1039. * BFA LPS state machine functions
  1040. */
  1041. /*
  1042. * Init state -- no login
  1043. */
  1044. static void
  1045. bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1046. {
  1047. bfa_trc(lps->bfa, lps->bfa_tag);
  1048. bfa_trc(lps->bfa, event);
  1049. switch (event) {
  1050. case BFA_LPS_SM_LOGIN:
  1051. if (bfa_reqq_full(lps->bfa, lps->reqq)) {
  1052. bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
  1053. bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
  1054. } else {
  1055. bfa_sm_set_state(lps, bfa_lps_sm_login);
  1056. bfa_lps_send_login(lps);
  1057. }
  1058. if (lps->fdisc)
  1059. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1060. BFA_PL_EID_LOGIN, 0, "FDISC Request");
  1061. else
  1062. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1063. BFA_PL_EID_LOGIN, 0, "FLOGI Request");
  1064. break;
  1065. case BFA_LPS_SM_LOGOUT:
  1066. bfa_lps_logout_comp(lps);
  1067. break;
  1068. case BFA_LPS_SM_DELETE:
  1069. bfa_lps_free(lps);
  1070. break;
  1071. case BFA_LPS_SM_RX_CVL:
  1072. case BFA_LPS_SM_OFFLINE:
  1073. break;
  1074. case BFA_LPS_SM_FWRSP:
  1075. /*
  1076. * Could happen when fabric detects loopback and discards
  1077. * the lps request. Fw will eventually sent out the timeout
  1078. * Just ignore
  1079. */
  1080. break;
  1081. case BFA_LPS_SM_SET_N2N_PID:
  1082. /*
  1083. * When topology is set to loop, bfa_lps_set_n2n_pid() sends
  1084. * this event. Ignore this event.
  1085. */
  1086. break;
  1087. default:
  1088. bfa_sm_fault(lps->bfa, event);
  1089. }
  1090. }
  1091. /*
  1092. * login is in progress -- awaiting response from firmware
  1093. */
  1094. static void
  1095. bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1096. {
  1097. bfa_trc(lps->bfa, lps->bfa_tag);
  1098. bfa_trc(lps->bfa, event);
  1099. switch (event) {
  1100. case BFA_LPS_SM_FWRSP:
  1101. if (lps->status == BFA_STATUS_OK) {
  1102. bfa_sm_set_state(lps, bfa_lps_sm_online);
  1103. if (lps->fdisc)
  1104. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1105. BFA_PL_EID_LOGIN, 0, "FDISC Accept");
  1106. else
  1107. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1108. BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
  1109. /* If N2N, send the assigned PID to FW */
  1110. bfa_trc(lps->bfa, lps->fport);
  1111. bfa_trc(lps->bfa, lps->lp_pid);
  1112. if (!lps->fport && lps->lp_pid)
  1113. bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
  1114. } else {
  1115. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1116. if (lps->fdisc)
  1117. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1118. BFA_PL_EID_LOGIN, 0,
  1119. "FDISC Fail (RJT or timeout)");
  1120. else
  1121. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1122. BFA_PL_EID_LOGIN, 0,
  1123. "FLOGI Fail (RJT or timeout)");
  1124. }
  1125. bfa_lps_login_comp(lps);
  1126. break;
  1127. case BFA_LPS_SM_OFFLINE:
  1128. case BFA_LPS_SM_DELETE:
  1129. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1130. break;
  1131. case BFA_LPS_SM_SET_N2N_PID:
  1132. bfa_trc(lps->bfa, lps->fport);
  1133. bfa_trc(lps->bfa, lps->lp_pid);
  1134. break;
  1135. default:
  1136. bfa_sm_fault(lps->bfa, event);
  1137. }
  1138. }
  1139. /*
  1140. * login pending - awaiting space in request queue
  1141. */
  1142. static void
  1143. bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1144. {
  1145. bfa_trc(lps->bfa, lps->bfa_tag);
  1146. bfa_trc(lps->bfa, event);
  1147. switch (event) {
  1148. case BFA_LPS_SM_RESUME:
  1149. bfa_sm_set_state(lps, bfa_lps_sm_login);
  1150. bfa_lps_send_login(lps);
  1151. break;
  1152. case BFA_LPS_SM_OFFLINE:
  1153. case BFA_LPS_SM_DELETE:
  1154. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1155. bfa_reqq_wcancel(&lps->wqe);
  1156. break;
  1157. case BFA_LPS_SM_RX_CVL:
  1158. /*
  1159. * Login was not even sent out; so when getting out
  1160. * of this state, it will appear like a login retry
  1161. * after Clear virtual link
  1162. */
  1163. break;
  1164. default:
  1165. bfa_sm_fault(lps->bfa, event);
  1166. }
  1167. }
  1168. /*
  1169. * login complete
  1170. */
  1171. static void
  1172. bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1173. {
  1174. bfa_trc(lps->bfa, lps->bfa_tag);
  1175. bfa_trc(lps->bfa, event);
  1176. switch (event) {
  1177. case BFA_LPS_SM_LOGOUT:
  1178. if (bfa_reqq_full(lps->bfa, lps->reqq)) {
  1179. bfa_sm_set_state(lps, bfa_lps_sm_logowait);
  1180. bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
  1181. } else {
  1182. bfa_sm_set_state(lps, bfa_lps_sm_logout);
  1183. bfa_lps_send_logout(lps);
  1184. }
  1185. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1186. BFA_PL_EID_LOGO, 0, "Logout");
  1187. break;
  1188. case BFA_LPS_SM_RX_CVL:
  1189. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1190. /* Let the vport module know about this event */
  1191. bfa_lps_cvl_event(lps);
  1192. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1193. BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
  1194. break;
  1195. case BFA_LPS_SM_SET_N2N_PID:
  1196. if (bfa_reqq_full(lps->bfa, lps->reqq)) {
  1197. bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
  1198. bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
  1199. } else
  1200. bfa_lps_send_set_n2n_pid(lps);
  1201. break;
  1202. case BFA_LPS_SM_OFFLINE:
  1203. case BFA_LPS_SM_DELETE:
  1204. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1205. break;
  1206. default:
  1207. bfa_sm_fault(lps->bfa, event);
  1208. }
  1209. }
  1210. /*
  1211. * login complete
  1212. */
  1213. static void
  1214. bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1215. {
  1216. bfa_trc(lps->bfa, lps->bfa_tag);
  1217. bfa_trc(lps->bfa, event);
  1218. switch (event) {
  1219. case BFA_LPS_SM_RESUME:
  1220. bfa_sm_set_state(lps, bfa_lps_sm_online);
  1221. bfa_lps_send_set_n2n_pid(lps);
  1222. break;
  1223. case BFA_LPS_SM_LOGOUT:
  1224. bfa_sm_set_state(lps, bfa_lps_sm_logowait);
  1225. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1226. BFA_PL_EID_LOGO, 0, "Logout");
  1227. break;
  1228. case BFA_LPS_SM_RX_CVL:
  1229. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1230. bfa_reqq_wcancel(&lps->wqe);
  1231. /* Let the vport module know about this event */
  1232. bfa_lps_cvl_event(lps);
  1233. bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
  1234. BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
  1235. break;
  1236. case BFA_LPS_SM_OFFLINE:
  1237. case BFA_LPS_SM_DELETE:
  1238. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1239. bfa_reqq_wcancel(&lps->wqe);
  1240. break;
  1241. default:
  1242. bfa_sm_fault(lps->bfa, event);
  1243. }
  1244. }
  1245. /*
  1246. * logout in progress - awaiting firmware response
  1247. */
  1248. static void
  1249. bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1250. {
  1251. bfa_trc(lps->bfa, lps->bfa_tag);
  1252. bfa_trc(lps->bfa, event);
  1253. switch (event) {
  1254. case BFA_LPS_SM_FWRSP:
  1255. case BFA_LPS_SM_OFFLINE:
  1256. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1257. bfa_lps_logout_comp(lps);
  1258. break;
  1259. case BFA_LPS_SM_DELETE:
  1260. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1261. break;
  1262. default:
  1263. bfa_sm_fault(lps->bfa, event);
  1264. }
  1265. }
  1266. /*
  1267. * logout pending -- awaiting space in request queue
  1268. */
  1269. static void
  1270. bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
  1271. {
  1272. bfa_trc(lps->bfa, lps->bfa_tag);
  1273. bfa_trc(lps->bfa, event);
  1274. switch (event) {
  1275. case BFA_LPS_SM_RESUME:
  1276. bfa_sm_set_state(lps, bfa_lps_sm_logout);
  1277. bfa_lps_send_logout(lps);
  1278. break;
  1279. case BFA_LPS_SM_OFFLINE:
  1280. case BFA_LPS_SM_DELETE:
  1281. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1282. bfa_reqq_wcancel(&lps->wqe);
  1283. break;
  1284. default:
  1285. bfa_sm_fault(lps->bfa, event);
  1286. }
  1287. }
  1288. /*
  1289. * lps_pvt BFA LPS private functions
  1290. */
  1291. /*
  1292. * return memory requirement
  1293. */
  1294. static void
  1295. bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  1296. struct bfa_s *bfa)
  1297. {
  1298. struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
  1299. if (cfg->drvcfg.min_cfg)
  1300. bfa_mem_kva_setup(minfo, lps_kva,
  1301. sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
  1302. else
  1303. bfa_mem_kva_setup(minfo, lps_kva,
  1304. sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
  1305. }
  1306. /*
  1307. * bfa module attach at initialization time
  1308. */
  1309. static void
  1310. bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  1311. struct bfa_pcidev_s *pcidev)
  1312. {
  1313. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1314. struct bfa_lps_s *lps;
  1315. int i;
  1316. mod->num_lps = BFA_LPS_MAX_LPORTS;
  1317. if (cfg->drvcfg.min_cfg)
  1318. mod->num_lps = BFA_LPS_MIN_LPORTS;
  1319. else
  1320. mod->num_lps = BFA_LPS_MAX_LPORTS;
  1321. mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
  1322. bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
  1323. INIT_LIST_HEAD(&mod->lps_free_q);
  1324. INIT_LIST_HEAD(&mod->lps_active_q);
  1325. INIT_LIST_HEAD(&mod->lps_login_q);
  1326. for (i = 0; i < mod->num_lps; i++, lps++) {
  1327. lps->bfa = bfa;
  1328. lps->bfa_tag = (u8) i;
  1329. lps->reqq = BFA_REQQ_LPS;
  1330. bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
  1331. list_add_tail(&lps->qe, &mod->lps_free_q);
  1332. }
  1333. }
  1334. static void
  1335. bfa_lps_detach(struct bfa_s *bfa)
  1336. {
  1337. }
  1338. static void
  1339. bfa_lps_start(struct bfa_s *bfa)
  1340. {
  1341. }
  1342. static void
  1343. bfa_lps_stop(struct bfa_s *bfa)
  1344. {
  1345. }
  1346. /*
  1347. * IOC in disabled state -- consider all lps offline
  1348. */
  1349. static void
  1350. bfa_lps_iocdisable(struct bfa_s *bfa)
  1351. {
  1352. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1353. struct bfa_lps_s *lps;
  1354. struct list_head *qe, *qen;
  1355. list_for_each_safe(qe, qen, &mod->lps_active_q) {
  1356. lps = (struct bfa_lps_s *) qe;
  1357. bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
  1358. }
  1359. list_for_each_safe(qe, qen, &mod->lps_login_q) {
  1360. lps = (struct bfa_lps_s *) qe;
  1361. bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
  1362. }
  1363. list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
  1364. }
  1365. /*
  1366. * Firmware login response
  1367. */
  1368. static void
  1369. bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
  1370. {
  1371. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1372. struct bfa_lps_s *lps;
  1373. WARN_ON(rsp->bfa_tag >= mod->num_lps);
  1374. lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
  1375. lps->status = rsp->status;
  1376. switch (rsp->status) {
  1377. case BFA_STATUS_OK:
  1378. lps->fw_tag = rsp->fw_tag;
  1379. lps->fport = rsp->f_port;
  1380. if (lps->fport)
  1381. lps->lp_pid = rsp->lp_pid;
  1382. lps->npiv_en = rsp->npiv_en;
  1383. lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
  1384. lps->pr_pwwn = rsp->port_name;
  1385. lps->pr_nwwn = rsp->node_name;
  1386. lps->auth_req = rsp->auth_req;
  1387. lps->lp_mac = rsp->lp_mac;
  1388. lps->brcd_switch = rsp->brcd_switch;
  1389. lps->fcf_mac = rsp->fcf_mac;
  1390. break;
  1391. case BFA_STATUS_FABRIC_RJT:
  1392. lps->lsrjt_rsn = rsp->lsrjt_rsn;
  1393. lps->lsrjt_expl = rsp->lsrjt_expl;
  1394. break;
  1395. case BFA_STATUS_EPROTOCOL:
  1396. lps->ext_status = rsp->ext_status;
  1397. break;
  1398. case BFA_STATUS_VPORT_MAX:
  1399. if (rsp->ext_status)
  1400. bfa_lps_no_res(lps, rsp->ext_status);
  1401. break;
  1402. default:
  1403. /* Nothing to do with other status */
  1404. break;
  1405. }
  1406. list_del(&lps->qe);
  1407. list_add_tail(&lps->qe, &mod->lps_active_q);
  1408. bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
  1409. }
  1410. static void
  1411. bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
  1412. {
  1413. struct bfa_s *bfa = first_lps->bfa;
  1414. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1415. struct list_head *qe, *qe_next;
  1416. struct bfa_lps_s *lps;
  1417. bfa_trc(bfa, count);
  1418. qe = bfa_q_next(first_lps);
  1419. while (count && qe) {
  1420. qe_next = bfa_q_next(qe);
  1421. lps = (struct bfa_lps_s *)qe;
  1422. bfa_trc(bfa, lps->bfa_tag);
  1423. lps->status = first_lps->status;
  1424. list_del(&lps->qe);
  1425. list_add_tail(&lps->qe, &mod->lps_active_q);
  1426. bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
  1427. qe = qe_next;
  1428. count--;
  1429. }
  1430. }
  1431. /*
  1432. * Firmware logout response
  1433. */
  1434. static void
  1435. bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
  1436. {
  1437. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1438. struct bfa_lps_s *lps;
  1439. WARN_ON(rsp->bfa_tag >= mod->num_lps);
  1440. lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
  1441. bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
  1442. }
  1443. /*
  1444. * Firmware received a Clear virtual link request (for FCoE)
  1445. */
  1446. static void
  1447. bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
  1448. {
  1449. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1450. struct bfa_lps_s *lps;
  1451. lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
  1452. bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
  1453. }
  1454. /*
  1455. * Space is available in request queue, resume queueing request to firmware.
  1456. */
  1457. static void
  1458. bfa_lps_reqq_resume(void *lps_arg)
  1459. {
  1460. struct bfa_lps_s *lps = lps_arg;
  1461. bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
  1462. }
  1463. /*
  1464. * lps is freed -- triggered by vport delete
  1465. */
  1466. static void
  1467. bfa_lps_free(struct bfa_lps_s *lps)
  1468. {
  1469. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
  1470. lps->lp_pid = 0;
  1471. list_del(&lps->qe);
  1472. list_add_tail(&lps->qe, &mod->lps_free_q);
  1473. }
  1474. /*
  1475. * send login request to firmware
  1476. */
  1477. static void
  1478. bfa_lps_send_login(struct bfa_lps_s *lps)
  1479. {
  1480. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
  1481. struct bfi_lps_login_req_s *m;
  1482. m = bfa_reqq_next(lps->bfa, lps->reqq);
  1483. WARN_ON(!m);
  1484. bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
  1485. bfa_fn_lpu(lps->bfa));
  1486. m->bfa_tag = lps->bfa_tag;
  1487. m->alpa = lps->alpa;
  1488. m->pdu_size = cpu_to_be16(lps->pdusz);
  1489. m->pwwn = lps->pwwn;
  1490. m->nwwn = lps->nwwn;
  1491. m->fdisc = lps->fdisc;
  1492. m->auth_en = lps->auth_en;
  1493. bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
  1494. list_del(&lps->qe);
  1495. list_add_tail(&lps->qe, &mod->lps_login_q);
  1496. }
  1497. /*
  1498. * send logout request to firmware
  1499. */
  1500. static void
  1501. bfa_lps_send_logout(struct bfa_lps_s *lps)
  1502. {
  1503. struct bfi_lps_logout_req_s *m;
  1504. m = bfa_reqq_next(lps->bfa, lps->reqq);
  1505. WARN_ON(!m);
  1506. bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
  1507. bfa_fn_lpu(lps->bfa));
  1508. m->fw_tag = lps->fw_tag;
  1509. m->port_name = lps->pwwn;
  1510. bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
  1511. }
  1512. /*
  1513. * send n2n pid set request to firmware
  1514. */
  1515. static void
  1516. bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
  1517. {
  1518. struct bfi_lps_n2n_pid_req_s *m;
  1519. m = bfa_reqq_next(lps->bfa, lps->reqq);
  1520. WARN_ON(!m);
  1521. bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
  1522. bfa_fn_lpu(lps->bfa));
  1523. m->fw_tag = lps->fw_tag;
  1524. m->lp_pid = lps->lp_pid;
  1525. bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
  1526. }
  1527. /*
  1528. * Indirect login completion handler for non-fcs
  1529. */
  1530. static void
  1531. bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
  1532. {
  1533. struct bfa_lps_s *lps = arg;
  1534. if (!complete)
  1535. return;
  1536. if (lps->fdisc)
  1537. bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1538. else
  1539. bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1540. }
  1541. /*
  1542. * Login completion handler -- direct call for fcs, queue for others
  1543. */
  1544. static void
  1545. bfa_lps_login_comp(struct bfa_lps_s *lps)
  1546. {
  1547. if (!lps->bfa->fcs) {
  1548. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
  1549. lps);
  1550. return;
  1551. }
  1552. if (lps->fdisc)
  1553. bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1554. else
  1555. bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
  1556. }
  1557. /*
  1558. * Indirect logout completion handler for non-fcs
  1559. */
  1560. static void
  1561. bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
  1562. {
  1563. struct bfa_lps_s *lps = arg;
  1564. if (!complete)
  1565. return;
  1566. if (lps->fdisc)
  1567. bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
  1568. else
  1569. bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
  1570. }
  1571. /*
  1572. * Logout completion handler -- direct call for fcs, queue for others
  1573. */
  1574. static void
  1575. bfa_lps_logout_comp(struct bfa_lps_s *lps)
  1576. {
  1577. if (!lps->bfa->fcs) {
  1578. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
  1579. lps);
  1580. return;
  1581. }
  1582. if (lps->fdisc)
  1583. bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
  1584. }
  1585. /*
  1586. * Clear virtual link completion handler for non-fcs
  1587. */
  1588. static void
  1589. bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
  1590. {
  1591. struct bfa_lps_s *lps = arg;
  1592. if (!complete)
  1593. return;
  1594. /* Clear virtual link to base port will result in link down */
  1595. if (lps->fdisc)
  1596. bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
  1597. }
  1598. /*
  1599. * Received Clear virtual link event --direct call for fcs,
  1600. * queue for others
  1601. */
  1602. static void
  1603. bfa_lps_cvl_event(struct bfa_lps_s *lps)
  1604. {
  1605. if (!lps->bfa->fcs) {
  1606. bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
  1607. lps);
  1608. return;
  1609. }
  1610. /* Clear virtual link to base port will result in link down */
  1611. if (lps->fdisc)
  1612. bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
  1613. }
  1614. /*
  1615. * lps_public BFA LPS public functions
  1616. */
  1617. u32
  1618. bfa_lps_get_max_vport(struct bfa_s *bfa)
  1619. {
  1620. if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
  1621. return BFA_LPS_MAX_VPORTS_SUPP_CT;
  1622. else
  1623. return BFA_LPS_MAX_VPORTS_SUPP_CB;
  1624. }
  1625. /*
  1626. * Allocate a lport srvice tag.
  1627. */
  1628. struct bfa_lps_s *
  1629. bfa_lps_alloc(struct bfa_s *bfa)
  1630. {
  1631. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1632. struct bfa_lps_s *lps = NULL;
  1633. bfa_q_deq(&mod->lps_free_q, &lps);
  1634. if (lps == NULL)
  1635. return NULL;
  1636. list_add_tail(&lps->qe, &mod->lps_active_q);
  1637. bfa_sm_set_state(lps, bfa_lps_sm_init);
  1638. return lps;
  1639. }
  1640. /*
  1641. * Free lport service tag. This can be called anytime after an alloc.
  1642. * No need to wait for any pending login/logout completions.
  1643. */
  1644. void
  1645. bfa_lps_delete(struct bfa_lps_s *lps)
  1646. {
  1647. bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
  1648. }
  1649. /*
  1650. * Initiate a lport login.
  1651. */
  1652. void
  1653. bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
  1654. wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
  1655. {
  1656. lps->uarg = uarg;
  1657. lps->alpa = alpa;
  1658. lps->pdusz = pdusz;
  1659. lps->pwwn = pwwn;
  1660. lps->nwwn = nwwn;
  1661. lps->fdisc = BFA_FALSE;
  1662. lps->auth_en = auth_en;
  1663. bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
  1664. }
  1665. /*
  1666. * Initiate a lport fdisc login.
  1667. */
  1668. void
  1669. bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
  1670. wwn_t nwwn)
  1671. {
  1672. lps->uarg = uarg;
  1673. lps->alpa = 0;
  1674. lps->pdusz = pdusz;
  1675. lps->pwwn = pwwn;
  1676. lps->nwwn = nwwn;
  1677. lps->fdisc = BFA_TRUE;
  1678. lps->auth_en = BFA_FALSE;
  1679. bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
  1680. }
  1681. /*
  1682. * Initiate a lport FDSIC logout.
  1683. */
  1684. void
  1685. bfa_lps_fdisclogo(struct bfa_lps_s *lps)
  1686. {
  1687. bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
  1688. }
  1689. u8
  1690. bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
  1691. {
  1692. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1693. return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
  1694. }
  1695. /*
  1696. * Return lport services tag given the pid
  1697. */
  1698. u8
  1699. bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
  1700. {
  1701. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1702. struct bfa_lps_s *lps;
  1703. int i;
  1704. for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
  1705. if (lps->lp_pid == pid)
  1706. return lps->bfa_tag;
  1707. }
  1708. /* Return base port tag anyway */
  1709. return 0;
  1710. }
  1711. /*
  1712. * return port id assigned to the base lport
  1713. */
  1714. u32
  1715. bfa_lps_get_base_pid(struct bfa_s *bfa)
  1716. {
  1717. struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
  1718. return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
  1719. }
  1720. /*
  1721. * Set PID in case of n2n (which is assigned during PLOGI)
  1722. */
  1723. void
  1724. bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
  1725. {
  1726. bfa_trc(lps->bfa, lps->bfa_tag);
  1727. bfa_trc(lps->bfa, n2n_pid);
  1728. lps->lp_pid = n2n_pid;
  1729. bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
  1730. }
  1731. /*
  1732. * LPS firmware message class handler.
  1733. */
  1734. void
  1735. bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1736. {
  1737. union bfi_lps_i2h_msg_u msg;
  1738. bfa_trc(bfa, m->mhdr.msg_id);
  1739. msg.msg = m;
  1740. switch (m->mhdr.msg_id) {
  1741. case BFI_LPS_I2H_LOGIN_RSP:
  1742. bfa_lps_login_rsp(bfa, msg.login_rsp);
  1743. break;
  1744. case BFI_LPS_I2H_LOGOUT_RSP:
  1745. bfa_lps_logout_rsp(bfa, msg.logout_rsp);
  1746. break;
  1747. case BFI_LPS_I2H_CVL_EVENT:
  1748. bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
  1749. break;
  1750. default:
  1751. bfa_trc(bfa, m->mhdr.msg_id);
  1752. WARN_ON(1);
  1753. }
  1754. }
  1755. static void
  1756. bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
  1757. {
  1758. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1759. struct bfa_aen_entry_s *aen_entry;
  1760. bfad_get_aen_entry(bfad, aen_entry);
  1761. if (!aen_entry)
  1762. return;
  1763. aen_entry->aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
  1764. aen_entry->aen_data.port.pwwn = fcport->pwwn;
  1765. /* Send the AEN notification */
  1766. bfad_im_post_vendor_event(aen_entry, bfad, ++fcport->bfa->bfa_aen_seq,
  1767. BFA_AEN_CAT_PORT, event);
  1768. }
  1769. /*
  1770. * FC PORT state machine functions
  1771. */
  1772. static void
  1773. bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
  1774. enum bfa_fcport_sm_event event)
  1775. {
  1776. bfa_trc(fcport->bfa, event);
  1777. switch (event) {
  1778. case BFA_FCPORT_SM_START:
  1779. /*
  1780. * Start event after IOC is configured and BFA is started.
  1781. */
  1782. fcport->use_flash_cfg = BFA_TRUE;
  1783. if (bfa_fcport_send_enable(fcport)) {
  1784. bfa_trc(fcport->bfa, BFA_TRUE);
  1785. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1786. } else {
  1787. bfa_trc(fcport->bfa, BFA_FALSE);
  1788. bfa_sm_set_state(fcport,
  1789. bfa_fcport_sm_enabling_qwait);
  1790. }
  1791. break;
  1792. case BFA_FCPORT_SM_ENABLE:
  1793. /*
  1794. * Port is persistently configured to be in enabled state. Do
  1795. * not change state. Port enabling is done when START event is
  1796. * received.
  1797. */
  1798. break;
  1799. case BFA_FCPORT_SM_DISABLE:
  1800. /*
  1801. * If a port is persistently configured to be disabled, the
  1802. * first event will a port disable request.
  1803. */
  1804. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  1805. break;
  1806. case BFA_FCPORT_SM_HWFAIL:
  1807. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1808. break;
  1809. default:
  1810. bfa_sm_fault(fcport->bfa, event);
  1811. }
  1812. }
  1813. static void
  1814. bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
  1815. enum bfa_fcport_sm_event event)
  1816. {
  1817. char pwwn_buf[BFA_STRING_32];
  1818. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1819. bfa_trc(fcport->bfa, event);
  1820. switch (event) {
  1821. case BFA_FCPORT_SM_QRESUME:
  1822. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  1823. bfa_fcport_send_enable(fcport);
  1824. break;
  1825. case BFA_FCPORT_SM_STOP:
  1826. bfa_reqq_wcancel(&fcport->reqq_wait);
  1827. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1828. break;
  1829. case BFA_FCPORT_SM_ENABLE:
  1830. /*
  1831. * Already enable is in progress.
  1832. */
  1833. break;
  1834. case BFA_FCPORT_SM_DISABLE:
  1835. /*
  1836. * Just send disable request to firmware when room becomes
  1837. * available in request queue.
  1838. */
  1839. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  1840. bfa_reqq_wcancel(&fcport->reqq_wait);
  1841. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1842. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1843. wwn2str(pwwn_buf, fcport->pwwn);
  1844. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1845. "Base port disabled: WWN = %s\n", pwwn_buf);
  1846. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
  1847. break;
  1848. case BFA_FCPORT_SM_LINKUP:
  1849. case BFA_FCPORT_SM_LINKDOWN:
  1850. /*
  1851. * Possible to get link events when doing back-to-back
  1852. * enable/disables.
  1853. */
  1854. break;
  1855. case BFA_FCPORT_SM_HWFAIL:
  1856. bfa_reqq_wcancel(&fcport->reqq_wait);
  1857. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1858. break;
  1859. case BFA_FCPORT_SM_FAA_MISCONFIG:
  1860. bfa_fcport_reset_linkinfo(fcport);
  1861. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  1862. bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
  1863. break;
  1864. default:
  1865. bfa_sm_fault(fcport->bfa, event);
  1866. }
  1867. }
  1868. static void
  1869. bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
  1870. enum bfa_fcport_sm_event event)
  1871. {
  1872. char pwwn_buf[BFA_STRING_32];
  1873. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1874. bfa_trc(fcport->bfa, event);
  1875. switch (event) {
  1876. case BFA_FCPORT_SM_FWRSP:
  1877. case BFA_FCPORT_SM_LINKDOWN:
  1878. bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
  1879. break;
  1880. case BFA_FCPORT_SM_LINKUP:
  1881. bfa_fcport_update_linkinfo(fcport);
  1882. bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
  1883. WARN_ON(!fcport->event_cbfn);
  1884. bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
  1885. break;
  1886. case BFA_FCPORT_SM_ENABLE:
  1887. /*
  1888. * Already being enabled.
  1889. */
  1890. break;
  1891. case BFA_FCPORT_SM_DISABLE:
  1892. if (bfa_fcport_send_disable(fcport))
  1893. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1894. else
  1895. bfa_sm_set_state(fcport,
  1896. bfa_fcport_sm_disabling_qwait);
  1897. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1898. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1899. wwn2str(pwwn_buf, fcport->pwwn);
  1900. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1901. "Base port disabled: WWN = %s\n", pwwn_buf);
  1902. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
  1903. break;
  1904. case BFA_FCPORT_SM_STOP:
  1905. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1906. break;
  1907. case BFA_FCPORT_SM_HWFAIL:
  1908. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1909. break;
  1910. case BFA_FCPORT_SM_FAA_MISCONFIG:
  1911. bfa_fcport_reset_linkinfo(fcport);
  1912. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  1913. bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
  1914. break;
  1915. default:
  1916. bfa_sm_fault(fcport->bfa, event);
  1917. }
  1918. }
  1919. static void
  1920. bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
  1921. enum bfa_fcport_sm_event event)
  1922. {
  1923. struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
  1924. char pwwn_buf[BFA_STRING_32];
  1925. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  1926. bfa_trc(fcport->bfa, event);
  1927. switch (event) {
  1928. case BFA_FCPORT_SM_LINKUP:
  1929. bfa_fcport_update_linkinfo(fcport);
  1930. bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
  1931. WARN_ON(!fcport->event_cbfn);
  1932. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1933. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
  1934. if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
  1935. bfa_trc(fcport->bfa,
  1936. pevent->link_state.attr.vc_fcf.fcf.fipenabled);
  1937. bfa_trc(fcport->bfa,
  1938. pevent->link_state.attr.vc_fcf.fcf.fipfailed);
  1939. if (pevent->link_state.attr.vc_fcf.fcf.fipfailed)
  1940. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1941. BFA_PL_EID_FIP_FCF_DISC, 0,
  1942. "FIP FCF Discovery Failed");
  1943. else
  1944. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1945. BFA_PL_EID_FIP_FCF_DISC, 0,
  1946. "FIP FCF Discovered");
  1947. }
  1948. bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
  1949. wwn2str(pwwn_buf, fcport->pwwn);
  1950. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1951. "Base port online: WWN = %s\n", pwwn_buf);
  1952. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
  1953. /* If QoS is enabled and it is not online, send AEN */
  1954. if (fcport->cfg.qos_enabled &&
  1955. fcport->qos_attr.state != BFA_QOS_ONLINE)
  1956. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
  1957. break;
  1958. case BFA_FCPORT_SM_LINKDOWN:
  1959. /*
  1960. * Possible to get link down event.
  1961. */
  1962. break;
  1963. case BFA_FCPORT_SM_ENABLE:
  1964. /*
  1965. * Already enabled.
  1966. */
  1967. break;
  1968. case BFA_FCPORT_SM_DISABLE:
  1969. if (bfa_fcport_send_disable(fcport))
  1970. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  1971. else
  1972. bfa_sm_set_state(fcport,
  1973. bfa_fcport_sm_disabling_qwait);
  1974. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  1975. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  1976. wwn2str(pwwn_buf, fcport->pwwn);
  1977. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  1978. "Base port disabled: WWN = %s\n", pwwn_buf);
  1979. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
  1980. break;
  1981. case BFA_FCPORT_SM_STOP:
  1982. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  1983. break;
  1984. case BFA_FCPORT_SM_HWFAIL:
  1985. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  1986. break;
  1987. case BFA_FCPORT_SM_FAA_MISCONFIG:
  1988. bfa_fcport_reset_linkinfo(fcport);
  1989. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  1990. bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
  1991. break;
  1992. default:
  1993. bfa_sm_fault(fcport->bfa, event);
  1994. }
  1995. }
  1996. static void
  1997. bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
  1998. enum bfa_fcport_sm_event event)
  1999. {
  2000. char pwwn_buf[BFA_STRING_32];
  2001. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  2002. bfa_trc(fcport->bfa, event);
  2003. switch (event) {
  2004. case BFA_FCPORT_SM_ENABLE:
  2005. /*
  2006. * Already enabled.
  2007. */
  2008. break;
  2009. case BFA_FCPORT_SM_DISABLE:
  2010. if (bfa_fcport_send_disable(fcport))
  2011. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  2012. else
  2013. bfa_sm_set_state(fcport,
  2014. bfa_fcport_sm_disabling_qwait);
  2015. bfa_fcport_reset_linkinfo(fcport);
  2016. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  2017. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2018. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  2019. wwn2str(pwwn_buf, fcport->pwwn);
  2020. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2021. "Base port offline: WWN = %s\n", pwwn_buf);
  2022. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
  2023. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2024. "Base port disabled: WWN = %s\n", pwwn_buf);
  2025. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
  2026. break;
  2027. case BFA_FCPORT_SM_LINKDOWN:
  2028. bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
  2029. bfa_fcport_reset_linkinfo(fcport);
  2030. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  2031. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2032. BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
  2033. wwn2str(pwwn_buf, fcport->pwwn);
  2034. if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
  2035. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2036. "Base port offline: WWN = %s\n", pwwn_buf);
  2037. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
  2038. } else {
  2039. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  2040. "Base port (WWN = %s) "
  2041. "lost fabric connectivity\n", pwwn_buf);
  2042. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  2043. }
  2044. break;
  2045. case BFA_FCPORT_SM_STOP:
  2046. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2047. bfa_fcport_reset_linkinfo(fcport);
  2048. wwn2str(pwwn_buf, fcport->pwwn);
  2049. if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
  2050. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2051. "Base port offline: WWN = %s\n", pwwn_buf);
  2052. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
  2053. } else {
  2054. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  2055. "Base port (WWN = %s) "
  2056. "lost fabric connectivity\n", pwwn_buf);
  2057. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  2058. }
  2059. break;
  2060. case BFA_FCPORT_SM_HWFAIL:
  2061. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  2062. bfa_fcport_reset_linkinfo(fcport);
  2063. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  2064. wwn2str(pwwn_buf, fcport->pwwn);
  2065. if (BFA_PORT_IS_DISABLED(fcport->bfa)) {
  2066. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2067. "Base port offline: WWN = %s\n", pwwn_buf);
  2068. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
  2069. } else {
  2070. BFA_LOG(KERN_ERR, bfad, bfa_log_level,
  2071. "Base port (WWN = %s) "
  2072. "lost fabric connectivity\n", pwwn_buf);
  2073. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  2074. }
  2075. break;
  2076. case BFA_FCPORT_SM_FAA_MISCONFIG:
  2077. bfa_fcport_reset_linkinfo(fcport);
  2078. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  2079. bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
  2080. break;
  2081. default:
  2082. bfa_sm_fault(fcport->bfa, event);
  2083. }
  2084. }
  2085. static void
  2086. bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
  2087. enum bfa_fcport_sm_event event)
  2088. {
  2089. bfa_trc(fcport->bfa, event);
  2090. switch (event) {
  2091. case BFA_FCPORT_SM_QRESUME:
  2092. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  2093. bfa_fcport_send_disable(fcport);
  2094. break;
  2095. case BFA_FCPORT_SM_STOP:
  2096. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2097. bfa_reqq_wcancel(&fcport->reqq_wait);
  2098. break;
  2099. case BFA_FCPORT_SM_ENABLE:
  2100. bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
  2101. break;
  2102. case BFA_FCPORT_SM_DISABLE:
  2103. /*
  2104. * Already being disabled.
  2105. */
  2106. break;
  2107. case BFA_FCPORT_SM_LINKUP:
  2108. case BFA_FCPORT_SM_LINKDOWN:
  2109. /*
  2110. * Possible to get link events when doing back-to-back
  2111. * enable/disables.
  2112. */
  2113. break;
  2114. case BFA_FCPORT_SM_HWFAIL:
  2115. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2116. bfa_reqq_wcancel(&fcport->reqq_wait);
  2117. break;
  2118. case BFA_FCPORT_SM_FAA_MISCONFIG:
  2119. bfa_fcport_reset_linkinfo(fcport);
  2120. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
  2121. bfa_sm_set_state(fcport, bfa_fcport_sm_faa_misconfig);
  2122. break;
  2123. default:
  2124. bfa_sm_fault(fcport->bfa, event);
  2125. }
  2126. }
  2127. static void
  2128. bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
  2129. enum bfa_fcport_sm_event event)
  2130. {
  2131. bfa_trc(fcport->bfa, event);
  2132. switch (event) {
  2133. case BFA_FCPORT_SM_QRESUME:
  2134. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  2135. bfa_fcport_send_disable(fcport);
  2136. if (bfa_fcport_send_enable(fcport))
  2137. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2138. else
  2139. bfa_sm_set_state(fcport,
  2140. bfa_fcport_sm_enabling_qwait);
  2141. break;
  2142. case BFA_FCPORT_SM_STOP:
  2143. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2144. bfa_reqq_wcancel(&fcport->reqq_wait);
  2145. break;
  2146. case BFA_FCPORT_SM_ENABLE:
  2147. break;
  2148. case BFA_FCPORT_SM_DISABLE:
  2149. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
  2150. break;
  2151. case BFA_FCPORT_SM_LINKUP:
  2152. case BFA_FCPORT_SM_LINKDOWN:
  2153. /*
  2154. * Possible to get link events when doing back-to-back
  2155. * enable/disables.
  2156. */
  2157. break;
  2158. case BFA_FCPORT_SM_HWFAIL:
  2159. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2160. bfa_reqq_wcancel(&fcport->reqq_wait);
  2161. break;
  2162. default:
  2163. bfa_sm_fault(fcport->bfa, event);
  2164. }
  2165. }
  2166. static void
  2167. bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
  2168. enum bfa_fcport_sm_event event)
  2169. {
  2170. char pwwn_buf[BFA_STRING_32];
  2171. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  2172. bfa_trc(fcport->bfa, event);
  2173. switch (event) {
  2174. case BFA_FCPORT_SM_FWRSP:
  2175. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  2176. break;
  2177. case BFA_FCPORT_SM_DISABLE:
  2178. /*
  2179. * Already being disabled.
  2180. */
  2181. break;
  2182. case BFA_FCPORT_SM_ENABLE:
  2183. if (bfa_fcport_send_enable(fcport))
  2184. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2185. else
  2186. bfa_sm_set_state(fcport,
  2187. bfa_fcport_sm_enabling_qwait);
  2188. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2189. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  2190. wwn2str(pwwn_buf, fcport->pwwn);
  2191. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2192. "Base port enabled: WWN = %s\n", pwwn_buf);
  2193. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
  2194. break;
  2195. case BFA_FCPORT_SM_STOP:
  2196. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2197. break;
  2198. case BFA_FCPORT_SM_LINKUP:
  2199. case BFA_FCPORT_SM_LINKDOWN:
  2200. /*
  2201. * Possible to get link events when doing back-to-back
  2202. * enable/disables.
  2203. */
  2204. break;
  2205. case BFA_FCPORT_SM_HWFAIL:
  2206. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2207. break;
  2208. default:
  2209. bfa_sm_fault(fcport->bfa, event);
  2210. }
  2211. }
  2212. static void
  2213. bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
  2214. enum bfa_fcport_sm_event event)
  2215. {
  2216. char pwwn_buf[BFA_STRING_32];
  2217. struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
  2218. bfa_trc(fcport->bfa, event);
  2219. switch (event) {
  2220. case BFA_FCPORT_SM_START:
  2221. /*
  2222. * Ignore start event for a port that is disabled.
  2223. */
  2224. break;
  2225. case BFA_FCPORT_SM_STOP:
  2226. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2227. break;
  2228. case BFA_FCPORT_SM_ENABLE:
  2229. if (bfa_fcport_send_enable(fcport))
  2230. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2231. else
  2232. bfa_sm_set_state(fcport,
  2233. bfa_fcport_sm_enabling_qwait);
  2234. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2235. BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
  2236. wwn2str(pwwn_buf, fcport->pwwn);
  2237. BFA_LOG(KERN_INFO, bfad, bfa_log_level,
  2238. "Base port enabled: WWN = %s\n", pwwn_buf);
  2239. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
  2240. break;
  2241. case BFA_FCPORT_SM_DISABLE:
  2242. /*
  2243. * Already disabled.
  2244. */
  2245. break;
  2246. case BFA_FCPORT_SM_HWFAIL:
  2247. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2248. break;
  2249. case BFA_FCPORT_SM_DPORTENABLE:
  2250. bfa_sm_set_state(fcport, bfa_fcport_sm_dport);
  2251. break;
  2252. case BFA_FCPORT_SM_DDPORTENABLE:
  2253. bfa_sm_set_state(fcport, bfa_fcport_sm_ddport);
  2254. break;
  2255. default:
  2256. bfa_sm_fault(fcport->bfa, event);
  2257. }
  2258. }
  2259. static void
  2260. bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
  2261. enum bfa_fcport_sm_event event)
  2262. {
  2263. bfa_trc(fcport->bfa, event);
  2264. switch (event) {
  2265. case BFA_FCPORT_SM_START:
  2266. if (bfa_fcport_send_enable(fcport))
  2267. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2268. else
  2269. bfa_sm_set_state(fcport,
  2270. bfa_fcport_sm_enabling_qwait);
  2271. break;
  2272. default:
  2273. /*
  2274. * Ignore all other events.
  2275. */
  2276. ;
  2277. }
  2278. }
  2279. /*
  2280. * Port is enabled. IOC is down/failed.
  2281. */
  2282. static void
  2283. bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
  2284. enum bfa_fcport_sm_event event)
  2285. {
  2286. bfa_trc(fcport->bfa, event);
  2287. switch (event) {
  2288. case BFA_FCPORT_SM_START:
  2289. if (bfa_fcport_send_enable(fcport))
  2290. bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
  2291. else
  2292. bfa_sm_set_state(fcport,
  2293. bfa_fcport_sm_enabling_qwait);
  2294. break;
  2295. default:
  2296. /*
  2297. * Ignore all events.
  2298. */
  2299. ;
  2300. }
  2301. }
  2302. /*
  2303. * Port is disabled. IOC is down/failed.
  2304. */
  2305. static void
  2306. bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
  2307. enum bfa_fcport_sm_event event)
  2308. {
  2309. bfa_trc(fcport->bfa, event);
  2310. switch (event) {
  2311. case BFA_FCPORT_SM_START:
  2312. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  2313. break;
  2314. case BFA_FCPORT_SM_ENABLE:
  2315. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  2316. break;
  2317. default:
  2318. /*
  2319. * Ignore all events.
  2320. */
  2321. ;
  2322. }
  2323. }
  2324. static void
  2325. bfa_fcport_sm_dport(struct bfa_fcport_s *fcport, enum bfa_fcport_sm_event event)
  2326. {
  2327. bfa_trc(fcport->bfa, event);
  2328. switch (event) {
  2329. case BFA_FCPORT_SM_DPORTENABLE:
  2330. case BFA_FCPORT_SM_DISABLE:
  2331. case BFA_FCPORT_SM_ENABLE:
  2332. case BFA_FCPORT_SM_START:
  2333. /*
  2334. * Ignore event for a port that is dport
  2335. */
  2336. break;
  2337. case BFA_FCPORT_SM_STOP:
  2338. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2339. break;
  2340. case BFA_FCPORT_SM_HWFAIL:
  2341. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2342. break;
  2343. case BFA_FCPORT_SM_DPORTDISABLE:
  2344. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  2345. break;
  2346. default:
  2347. bfa_sm_fault(fcport->bfa, event);
  2348. }
  2349. }
  2350. static void
  2351. bfa_fcport_sm_ddport(struct bfa_fcport_s *fcport,
  2352. enum bfa_fcport_sm_event event)
  2353. {
  2354. bfa_trc(fcport->bfa, event);
  2355. switch (event) {
  2356. case BFA_FCPORT_SM_DISABLE:
  2357. case BFA_FCPORT_SM_DDPORTDISABLE:
  2358. bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
  2359. break;
  2360. case BFA_FCPORT_SM_DPORTENABLE:
  2361. case BFA_FCPORT_SM_DPORTDISABLE:
  2362. case BFA_FCPORT_SM_ENABLE:
  2363. case BFA_FCPORT_SM_START:
  2364. /**
  2365. * Ignore event for a port that is ddport
  2366. */
  2367. break;
  2368. case BFA_FCPORT_SM_STOP:
  2369. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2370. break;
  2371. case BFA_FCPORT_SM_HWFAIL:
  2372. bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
  2373. break;
  2374. default:
  2375. bfa_sm_fault(fcport->bfa, event);
  2376. }
  2377. }
  2378. static void
  2379. bfa_fcport_sm_faa_misconfig(struct bfa_fcport_s *fcport,
  2380. enum bfa_fcport_sm_event event)
  2381. {
  2382. bfa_trc(fcport->bfa, event);
  2383. switch (event) {
  2384. case BFA_FCPORT_SM_DPORTENABLE:
  2385. case BFA_FCPORT_SM_ENABLE:
  2386. case BFA_FCPORT_SM_START:
  2387. /*
  2388. * Ignore event for a port as there is FAA misconfig
  2389. */
  2390. break;
  2391. case BFA_FCPORT_SM_DISABLE:
  2392. if (bfa_fcport_send_disable(fcport))
  2393. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
  2394. else
  2395. bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
  2396. bfa_fcport_reset_linkinfo(fcport);
  2397. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  2398. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  2399. BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
  2400. bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
  2401. break;
  2402. case BFA_FCPORT_SM_STOP:
  2403. bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
  2404. break;
  2405. case BFA_FCPORT_SM_HWFAIL:
  2406. bfa_fcport_reset_linkinfo(fcport);
  2407. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
  2408. bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
  2409. break;
  2410. default:
  2411. bfa_sm_fault(fcport->bfa, event);
  2412. }
  2413. }
  2414. /*
  2415. * Link state is down
  2416. */
  2417. static void
  2418. bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
  2419. enum bfa_fcport_ln_sm_event event)
  2420. {
  2421. bfa_trc(ln->fcport->bfa, event);
  2422. switch (event) {
  2423. case BFA_FCPORT_LN_SM_LINKUP:
  2424. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
  2425. bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
  2426. break;
  2427. default:
  2428. bfa_sm_fault(ln->fcport->bfa, event);
  2429. }
  2430. }
  2431. /*
  2432. * Link state is waiting for down notification
  2433. */
  2434. static void
  2435. bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
  2436. enum bfa_fcport_ln_sm_event event)
  2437. {
  2438. bfa_trc(ln->fcport->bfa, event);
  2439. switch (event) {
  2440. case BFA_FCPORT_LN_SM_LINKUP:
  2441. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
  2442. break;
  2443. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2444. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
  2445. break;
  2446. default:
  2447. bfa_sm_fault(ln->fcport->bfa, event);
  2448. }
  2449. }
  2450. /*
  2451. * Link state is waiting for down notification and there is a pending up
  2452. */
  2453. static void
  2454. bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
  2455. enum bfa_fcport_ln_sm_event event)
  2456. {
  2457. bfa_trc(ln->fcport->bfa, event);
  2458. switch (event) {
  2459. case BFA_FCPORT_LN_SM_LINKDOWN:
  2460. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2461. break;
  2462. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2463. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
  2464. bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
  2465. break;
  2466. default:
  2467. bfa_sm_fault(ln->fcport->bfa, event);
  2468. }
  2469. }
  2470. /*
  2471. * Link state is up
  2472. */
  2473. static void
  2474. bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
  2475. enum bfa_fcport_ln_sm_event event)
  2476. {
  2477. bfa_trc(ln->fcport->bfa, event);
  2478. switch (event) {
  2479. case BFA_FCPORT_LN_SM_LINKDOWN:
  2480. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2481. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2482. break;
  2483. default:
  2484. bfa_sm_fault(ln->fcport->bfa, event);
  2485. }
  2486. }
  2487. /*
  2488. * Link state is waiting for up notification
  2489. */
  2490. static void
  2491. bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
  2492. enum bfa_fcport_ln_sm_event event)
  2493. {
  2494. bfa_trc(ln->fcport->bfa, event);
  2495. switch (event) {
  2496. case BFA_FCPORT_LN_SM_LINKDOWN:
  2497. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
  2498. break;
  2499. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2500. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
  2501. break;
  2502. default:
  2503. bfa_sm_fault(ln->fcport->bfa, event);
  2504. }
  2505. }
  2506. /*
  2507. * Link state is waiting for up notification and there is a pending down
  2508. */
  2509. static void
  2510. bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
  2511. enum bfa_fcport_ln_sm_event event)
  2512. {
  2513. bfa_trc(ln->fcport->bfa, event);
  2514. switch (event) {
  2515. case BFA_FCPORT_LN_SM_LINKUP:
  2516. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
  2517. break;
  2518. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2519. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
  2520. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2521. break;
  2522. default:
  2523. bfa_sm_fault(ln->fcport->bfa, event);
  2524. }
  2525. }
  2526. /*
  2527. * Link state is waiting for up notification and there are pending down and up
  2528. */
  2529. static void
  2530. bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
  2531. enum bfa_fcport_ln_sm_event event)
  2532. {
  2533. bfa_trc(ln->fcport->bfa, event);
  2534. switch (event) {
  2535. case BFA_FCPORT_LN_SM_LINKDOWN:
  2536. bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
  2537. break;
  2538. case BFA_FCPORT_LN_SM_NOTIFICATION:
  2539. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
  2540. bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
  2541. break;
  2542. default:
  2543. bfa_sm_fault(ln->fcport->bfa, event);
  2544. }
  2545. }
  2546. static void
  2547. __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
  2548. {
  2549. struct bfa_fcport_ln_s *ln = cbarg;
  2550. if (complete)
  2551. ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
  2552. else
  2553. bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
  2554. }
  2555. /*
  2556. * Send SCN notification to upper layers.
  2557. * trunk - false if caller is fcport to ignore fcport event in trunked mode
  2558. */
  2559. static void
  2560. bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
  2561. bfa_boolean_t trunk)
  2562. {
  2563. if (fcport->cfg.trunked && !trunk)
  2564. return;
  2565. switch (event) {
  2566. case BFA_PORT_LINKUP:
  2567. bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
  2568. break;
  2569. case BFA_PORT_LINKDOWN:
  2570. bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
  2571. break;
  2572. default:
  2573. WARN_ON(1);
  2574. }
  2575. }
  2576. static void
  2577. bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
  2578. {
  2579. struct bfa_fcport_s *fcport = ln->fcport;
  2580. if (fcport->bfa->fcs) {
  2581. fcport->event_cbfn(fcport->event_cbarg, event);
  2582. bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
  2583. } else {
  2584. ln->ln_event = event;
  2585. bfa_cb_queue(fcport->bfa, &ln->ln_qe,
  2586. __bfa_cb_fcport_event, ln);
  2587. }
  2588. }
  2589. #define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
  2590. BFA_CACHELINE_SZ))
  2591. static void
  2592. bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  2593. struct bfa_s *bfa)
  2594. {
  2595. struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
  2596. bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
  2597. }
  2598. static void
  2599. bfa_fcport_qresume(void *cbarg)
  2600. {
  2601. struct bfa_fcport_s *fcport = cbarg;
  2602. bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
  2603. }
  2604. static void
  2605. bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
  2606. {
  2607. struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
  2608. fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
  2609. fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
  2610. fcport->stats = (union bfa_fcport_stats_u *)
  2611. bfa_mem_dma_virt(fcport_dma);
  2612. }
  2613. /*
  2614. * Memory initialization.
  2615. */
  2616. static void
  2617. bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  2618. struct bfa_pcidev_s *pcidev)
  2619. {
  2620. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2621. struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
  2622. struct bfa_fcport_ln_s *ln = &fcport->ln;
  2623. struct timeval tv;
  2624. fcport->bfa = bfa;
  2625. ln->fcport = fcport;
  2626. bfa_fcport_mem_claim(fcport);
  2627. bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
  2628. bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
  2629. /*
  2630. * initialize time stamp for stats reset
  2631. */
  2632. do_gettimeofday(&tv);
  2633. fcport->stats_reset_time = tv.tv_sec;
  2634. fcport->stats_dma_ready = BFA_FALSE;
  2635. /*
  2636. * initialize and set default configuration
  2637. */
  2638. port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
  2639. port_cfg->speed = BFA_PORT_SPEED_AUTO;
  2640. port_cfg->trunked = BFA_FALSE;
  2641. port_cfg->maxfrsize = 0;
  2642. port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
  2643. port_cfg->qos_bw.high = BFA_QOS_BW_HIGH;
  2644. port_cfg->qos_bw.med = BFA_QOS_BW_MED;
  2645. port_cfg->qos_bw.low = BFA_QOS_BW_LOW;
  2646. fcport->fec_state = BFA_FEC_OFFLINE;
  2647. INIT_LIST_HEAD(&fcport->stats_pending_q);
  2648. INIT_LIST_HEAD(&fcport->statsclr_pending_q);
  2649. bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
  2650. }
  2651. static void
  2652. bfa_fcport_detach(struct bfa_s *bfa)
  2653. {
  2654. }
  2655. /*
  2656. * Called when IOC is ready.
  2657. */
  2658. static void
  2659. bfa_fcport_start(struct bfa_s *bfa)
  2660. {
  2661. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
  2662. }
  2663. /*
  2664. * Called before IOC is stopped.
  2665. */
  2666. static void
  2667. bfa_fcport_stop(struct bfa_s *bfa)
  2668. {
  2669. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
  2670. bfa_trunk_iocdisable(bfa);
  2671. }
  2672. /*
  2673. * Called when IOC failure is detected.
  2674. */
  2675. static void
  2676. bfa_fcport_iocdisable(struct bfa_s *bfa)
  2677. {
  2678. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  2679. bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
  2680. bfa_trunk_iocdisable(bfa);
  2681. }
  2682. /*
  2683. * Update loop info in fcport for SCN online
  2684. */
  2685. static void
  2686. bfa_fcport_update_loop_info(struct bfa_fcport_s *fcport,
  2687. struct bfa_fcport_loop_info_s *loop_info)
  2688. {
  2689. fcport->myalpa = loop_info->myalpa;
  2690. fcport->alpabm_valid =
  2691. loop_info->alpabm_val;
  2692. memcpy(fcport->alpabm.alpa_bm,
  2693. loop_info->alpabm.alpa_bm,
  2694. sizeof(struct fc_alpabm_s));
  2695. }
  2696. static void
  2697. bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
  2698. {
  2699. struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
  2700. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  2701. fcport->speed = pevent->link_state.speed;
  2702. fcport->topology = pevent->link_state.topology;
  2703. if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP) {
  2704. bfa_fcport_update_loop_info(fcport,
  2705. &pevent->link_state.attr.loop_info);
  2706. return;
  2707. }
  2708. /* QoS Details */
  2709. fcport->qos_attr = pevent->link_state.qos_attr;
  2710. fcport->qos_vc_attr = pevent->link_state.attr.vc_fcf.qos_vc_attr;
  2711. if (fcport->cfg.bb_cr_enabled)
  2712. fcport->bbcr_attr = pevent->link_state.attr.bbcr_attr;
  2713. fcport->fec_state = pevent->link_state.fec_state;
  2714. /*
  2715. * update trunk state if applicable
  2716. */
  2717. if (!fcport->cfg.trunked)
  2718. trunk->attr.state = BFA_TRUNK_DISABLED;
  2719. /* update FCoE specific */
  2720. fcport->fcoe_vlan =
  2721. be16_to_cpu(pevent->link_state.attr.vc_fcf.fcf.vlan);
  2722. bfa_trc(fcport->bfa, fcport->speed);
  2723. bfa_trc(fcport->bfa, fcport->topology);
  2724. }
  2725. static void
  2726. bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
  2727. {
  2728. fcport->speed = BFA_PORT_SPEED_UNKNOWN;
  2729. fcport->topology = BFA_PORT_TOPOLOGY_NONE;
  2730. fcport->fec_state = BFA_FEC_OFFLINE;
  2731. }
  2732. /*
  2733. * Send port enable message to firmware.
  2734. */
  2735. static bfa_boolean_t
  2736. bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
  2737. {
  2738. struct bfi_fcport_enable_req_s *m;
  2739. /*
  2740. * Increment message tag before queue check, so that responses to old
  2741. * requests are discarded.
  2742. */
  2743. fcport->msgtag++;
  2744. /*
  2745. * check for room in queue to send request now
  2746. */
  2747. m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2748. if (!m) {
  2749. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2750. &fcport->reqq_wait);
  2751. return BFA_FALSE;
  2752. }
  2753. bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
  2754. bfa_fn_lpu(fcport->bfa));
  2755. m->nwwn = fcport->nwwn;
  2756. m->pwwn = fcport->pwwn;
  2757. m->port_cfg = fcport->cfg;
  2758. m->msgtag = fcport->msgtag;
  2759. m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
  2760. m->use_flash_cfg = fcport->use_flash_cfg;
  2761. bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
  2762. bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
  2763. bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
  2764. /*
  2765. * queue I/O message to firmware
  2766. */
  2767. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
  2768. return BFA_TRUE;
  2769. }
  2770. /*
  2771. * Send port disable message to firmware.
  2772. */
  2773. static bfa_boolean_t
  2774. bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
  2775. {
  2776. struct bfi_fcport_req_s *m;
  2777. /*
  2778. * Increment message tag before queue check, so that responses to old
  2779. * requests are discarded.
  2780. */
  2781. fcport->msgtag++;
  2782. /*
  2783. * check for room in queue to send request now
  2784. */
  2785. m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2786. if (!m) {
  2787. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2788. &fcport->reqq_wait);
  2789. return BFA_FALSE;
  2790. }
  2791. bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
  2792. bfa_fn_lpu(fcport->bfa));
  2793. m->msgtag = fcport->msgtag;
  2794. /*
  2795. * queue I/O message to firmware
  2796. */
  2797. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
  2798. return BFA_TRUE;
  2799. }
  2800. static void
  2801. bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
  2802. {
  2803. fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
  2804. fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
  2805. bfa_trc(fcport->bfa, fcport->pwwn);
  2806. bfa_trc(fcport->bfa, fcport->nwwn);
  2807. }
  2808. static void
  2809. bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
  2810. struct bfa_qos_stats_s *s)
  2811. {
  2812. u32 *dip = (u32 *) d;
  2813. __be32 *sip = (__be32 *) s;
  2814. int i;
  2815. /* Now swap the 32 bit fields */
  2816. for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
  2817. dip[i] = be32_to_cpu(sip[i]);
  2818. }
  2819. static void
  2820. bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
  2821. struct bfa_fcoe_stats_s *s)
  2822. {
  2823. u32 *dip = (u32 *) d;
  2824. __be32 *sip = (__be32 *) s;
  2825. int i;
  2826. for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
  2827. i = i + 2) {
  2828. #ifdef __BIG_ENDIAN
  2829. dip[i] = be32_to_cpu(sip[i]);
  2830. dip[i + 1] = be32_to_cpu(sip[i + 1]);
  2831. #else
  2832. dip[i] = be32_to_cpu(sip[i + 1]);
  2833. dip[i + 1] = be32_to_cpu(sip[i]);
  2834. #endif
  2835. }
  2836. }
  2837. static void
  2838. __bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
  2839. {
  2840. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *)cbarg;
  2841. struct bfa_cb_pending_q_s *cb;
  2842. struct list_head *qe, *qen;
  2843. union bfa_fcport_stats_u *ret;
  2844. if (complete) {
  2845. struct timeval tv;
  2846. if (fcport->stats_status == BFA_STATUS_OK)
  2847. do_gettimeofday(&tv);
  2848. list_for_each_safe(qe, qen, &fcport->stats_pending_q) {
  2849. bfa_q_deq(&fcport->stats_pending_q, &qe);
  2850. cb = (struct bfa_cb_pending_q_s *)qe;
  2851. if (fcport->stats_status == BFA_STATUS_OK) {
  2852. ret = (union bfa_fcport_stats_u *)cb->data;
  2853. /* Swap FC QoS or FCoE stats */
  2854. if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
  2855. bfa_fcport_qos_stats_swap(&ret->fcqos,
  2856. &fcport->stats->fcqos);
  2857. else {
  2858. bfa_fcport_fcoe_stats_swap(&ret->fcoe,
  2859. &fcport->stats->fcoe);
  2860. ret->fcoe.secs_reset =
  2861. tv.tv_sec - fcport->stats_reset_time;
  2862. }
  2863. }
  2864. bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
  2865. fcport->stats_status);
  2866. }
  2867. fcport->stats_status = BFA_STATUS_OK;
  2868. } else {
  2869. INIT_LIST_HEAD(&fcport->stats_pending_q);
  2870. fcport->stats_status = BFA_STATUS_OK;
  2871. }
  2872. }
  2873. static void
  2874. bfa_fcport_stats_get_timeout(void *cbarg)
  2875. {
  2876. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2877. bfa_trc(fcport->bfa, fcport->stats_qfull);
  2878. if (fcport->stats_qfull) {
  2879. bfa_reqq_wcancel(&fcport->stats_reqq_wait);
  2880. fcport->stats_qfull = BFA_FALSE;
  2881. }
  2882. fcport->stats_status = BFA_STATUS_ETIMER;
  2883. __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
  2884. }
  2885. static void
  2886. bfa_fcport_send_stats_get(void *cbarg)
  2887. {
  2888. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2889. struct bfi_fcport_req_s *msg;
  2890. msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2891. if (!msg) {
  2892. fcport->stats_qfull = BFA_TRUE;
  2893. bfa_reqq_winit(&fcport->stats_reqq_wait,
  2894. bfa_fcport_send_stats_get, fcport);
  2895. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2896. &fcport->stats_reqq_wait);
  2897. return;
  2898. }
  2899. fcport->stats_qfull = BFA_FALSE;
  2900. memset(msg, 0, sizeof(struct bfi_fcport_req_s));
  2901. bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
  2902. bfa_fn_lpu(fcport->bfa));
  2903. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
  2904. }
  2905. static void
  2906. __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
  2907. {
  2908. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2909. struct bfa_cb_pending_q_s *cb;
  2910. struct list_head *qe, *qen;
  2911. if (complete) {
  2912. struct timeval tv;
  2913. /*
  2914. * re-initialize time stamp for stats reset
  2915. */
  2916. do_gettimeofday(&tv);
  2917. fcport->stats_reset_time = tv.tv_sec;
  2918. list_for_each_safe(qe, qen, &fcport->statsclr_pending_q) {
  2919. bfa_q_deq(&fcport->statsclr_pending_q, &qe);
  2920. cb = (struct bfa_cb_pending_q_s *)qe;
  2921. bfa_cb_queue_status(fcport->bfa, &cb->hcb_qe,
  2922. fcport->stats_status);
  2923. }
  2924. fcport->stats_status = BFA_STATUS_OK;
  2925. } else {
  2926. INIT_LIST_HEAD(&fcport->statsclr_pending_q);
  2927. fcport->stats_status = BFA_STATUS_OK;
  2928. }
  2929. }
  2930. static void
  2931. bfa_fcport_stats_clr_timeout(void *cbarg)
  2932. {
  2933. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2934. bfa_trc(fcport->bfa, fcport->stats_qfull);
  2935. if (fcport->stats_qfull) {
  2936. bfa_reqq_wcancel(&fcport->stats_reqq_wait);
  2937. fcport->stats_qfull = BFA_FALSE;
  2938. }
  2939. fcport->stats_status = BFA_STATUS_ETIMER;
  2940. __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
  2941. }
  2942. static void
  2943. bfa_fcport_send_stats_clear(void *cbarg)
  2944. {
  2945. struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
  2946. struct bfi_fcport_req_s *msg;
  2947. msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
  2948. if (!msg) {
  2949. fcport->stats_qfull = BFA_TRUE;
  2950. bfa_reqq_winit(&fcport->stats_reqq_wait,
  2951. bfa_fcport_send_stats_clear, fcport);
  2952. bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
  2953. &fcport->stats_reqq_wait);
  2954. return;
  2955. }
  2956. fcport->stats_qfull = BFA_FALSE;
  2957. memset(msg, 0, sizeof(struct bfi_fcport_req_s));
  2958. bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
  2959. bfa_fn_lpu(fcport->bfa));
  2960. bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
  2961. }
  2962. /*
  2963. * Handle trunk SCN event from firmware.
  2964. */
  2965. static void
  2966. bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
  2967. {
  2968. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  2969. struct bfi_fcport_trunk_link_s *tlink;
  2970. struct bfa_trunk_link_attr_s *lattr;
  2971. enum bfa_trunk_state state_prev;
  2972. int i;
  2973. int link_bm = 0;
  2974. bfa_trc(fcport->bfa, fcport->cfg.trunked);
  2975. WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
  2976. scn->trunk_state != BFA_TRUNK_OFFLINE);
  2977. bfa_trc(fcport->bfa, trunk->attr.state);
  2978. bfa_trc(fcport->bfa, scn->trunk_state);
  2979. bfa_trc(fcport->bfa, scn->trunk_speed);
  2980. /*
  2981. * Save off new state for trunk attribute query
  2982. */
  2983. state_prev = trunk->attr.state;
  2984. if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
  2985. trunk->attr.state = scn->trunk_state;
  2986. trunk->attr.speed = scn->trunk_speed;
  2987. for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
  2988. lattr = &trunk->attr.link_attr[i];
  2989. tlink = &scn->tlink[i];
  2990. lattr->link_state = tlink->state;
  2991. lattr->trunk_wwn = tlink->trunk_wwn;
  2992. lattr->fctl = tlink->fctl;
  2993. lattr->speed = tlink->speed;
  2994. lattr->deskew = be32_to_cpu(tlink->deskew);
  2995. if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
  2996. fcport->speed = tlink->speed;
  2997. fcport->topology = BFA_PORT_TOPOLOGY_P2P;
  2998. link_bm |= 1 << i;
  2999. }
  3000. bfa_trc(fcport->bfa, lattr->link_state);
  3001. bfa_trc(fcport->bfa, lattr->trunk_wwn);
  3002. bfa_trc(fcport->bfa, lattr->fctl);
  3003. bfa_trc(fcport->bfa, lattr->speed);
  3004. bfa_trc(fcport->bfa, lattr->deskew);
  3005. }
  3006. switch (link_bm) {
  3007. case 3:
  3008. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  3009. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
  3010. break;
  3011. case 2:
  3012. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  3013. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
  3014. break;
  3015. case 1:
  3016. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  3017. BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
  3018. break;
  3019. default:
  3020. bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
  3021. BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
  3022. }
  3023. /*
  3024. * Notify upper layers if trunk state changed.
  3025. */
  3026. if ((state_prev != trunk->attr.state) ||
  3027. (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
  3028. bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
  3029. BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
  3030. }
  3031. }
  3032. static void
  3033. bfa_trunk_iocdisable(struct bfa_s *bfa)
  3034. {
  3035. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3036. int i = 0;
  3037. /*
  3038. * In trunked mode, notify upper layers that link is down
  3039. */
  3040. if (fcport->cfg.trunked) {
  3041. if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
  3042. bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
  3043. fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
  3044. fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
  3045. for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
  3046. fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
  3047. fcport->trunk.attr.link_attr[i].fctl =
  3048. BFA_TRUNK_LINK_FCTL_NORMAL;
  3049. fcport->trunk.attr.link_attr[i].link_state =
  3050. BFA_TRUNK_LINK_STATE_DN_LINKDN;
  3051. fcport->trunk.attr.link_attr[i].speed =
  3052. BFA_PORT_SPEED_UNKNOWN;
  3053. fcport->trunk.attr.link_attr[i].deskew = 0;
  3054. }
  3055. }
  3056. }
  3057. /*
  3058. * Called to initialize port attributes
  3059. */
  3060. void
  3061. bfa_fcport_init(struct bfa_s *bfa)
  3062. {
  3063. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3064. /*
  3065. * Initialize port attributes from IOC hardware data.
  3066. */
  3067. bfa_fcport_set_wwns(fcport);
  3068. if (fcport->cfg.maxfrsize == 0)
  3069. fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
  3070. fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
  3071. fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
  3072. if (bfa_fcport_is_pbcdisabled(bfa))
  3073. bfa->modules.port.pbc_disabled = BFA_TRUE;
  3074. WARN_ON(!fcport->cfg.maxfrsize);
  3075. WARN_ON(!fcport->cfg.rx_bbcredit);
  3076. WARN_ON(!fcport->speed_sup);
  3077. }
  3078. /*
  3079. * Firmware message handler.
  3080. */
  3081. void
  3082. bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  3083. {
  3084. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3085. union bfi_fcport_i2h_msg_u i2hmsg;
  3086. i2hmsg.msg = msg;
  3087. fcport->event_arg.i2hmsg = i2hmsg;
  3088. bfa_trc(bfa, msg->mhdr.msg_id);
  3089. bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
  3090. switch (msg->mhdr.msg_id) {
  3091. case BFI_FCPORT_I2H_ENABLE_RSP:
  3092. if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
  3093. fcport->stats_dma_ready = BFA_TRUE;
  3094. if (fcport->use_flash_cfg) {
  3095. fcport->cfg = i2hmsg.penable_rsp->port_cfg;
  3096. fcport->cfg.maxfrsize =
  3097. cpu_to_be16(fcport->cfg.maxfrsize);
  3098. fcport->cfg.path_tov =
  3099. cpu_to_be16(fcport->cfg.path_tov);
  3100. fcport->cfg.q_depth =
  3101. cpu_to_be16(fcport->cfg.q_depth);
  3102. if (fcport->cfg.trunked)
  3103. fcport->trunk.attr.state =
  3104. BFA_TRUNK_OFFLINE;
  3105. else
  3106. fcport->trunk.attr.state =
  3107. BFA_TRUNK_DISABLED;
  3108. fcport->qos_attr.qos_bw =
  3109. i2hmsg.penable_rsp->port_cfg.qos_bw;
  3110. fcport->use_flash_cfg = BFA_FALSE;
  3111. }
  3112. if (fcport->cfg.qos_enabled)
  3113. fcport->qos_attr.state = BFA_QOS_OFFLINE;
  3114. else
  3115. fcport->qos_attr.state = BFA_QOS_DISABLED;
  3116. fcport->qos_attr.qos_bw_op =
  3117. i2hmsg.penable_rsp->port_cfg.qos_bw;
  3118. if (fcport->cfg.bb_cr_enabled)
  3119. fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
  3120. else
  3121. fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
  3122. bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
  3123. }
  3124. break;
  3125. case BFI_FCPORT_I2H_DISABLE_RSP:
  3126. if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
  3127. bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
  3128. break;
  3129. case BFI_FCPORT_I2H_EVENT:
  3130. if (fcport->cfg.bb_cr_enabled)
  3131. fcport->bbcr_attr.state = BFA_BBCR_OFFLINE;
  3132. else
  3133. fcport->bbcr_attr.state = BFA_BBCR_DISABLED;
  3134. if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
  3135. bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
  3136. else {
  3137. if (i2hmsg.event->link_state.linkstate_rsn ==
  3138. BFA_PORT_LINKSTATE_RSN_FAA_MISCONFIG)
  3139. bfa_sm_send_event(fcport,
  3140. BFA_FCPORT_SM_FAA_MISCONFIG);
  3141. else
  3142. bfa_sm_send_event(fcport,
  3143. BFA_FCPORT_SM_LINKDOWN);
  3144. }
  3145. fcport->qos_attr.qos_bw_op =
  3146. i2hmsg.event->link_state.qos_attr.qos_bw_op;
  3147. break;
  3148. case BFI_FCPORT_I2H_TRUNK_SCN:
  3149. bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
  3150. break;
  3151. case BFI_FCPORT_I2H_STATS_GET_RSP:
  3152. /*
  3153. * check for timer pop before processing the rsp
  3154. */
  3155. if (list_empty(&fcport->stats_pending_q) ||
  3156. (fcport->stats_status == BFA_STATUS_ETIMER))
  3157. break;
  3158. bfa_timer_stop(&fcport->timer);
  3159. fcport->stats_status = i2hmsg.pstatsget_rsp->status;
  3160. __bfa_cb_fcport_stats_get(fcport, BFA_TRUE);
  3161. break;
  3162. case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
  3163. /*
  3164. * check for timer pop before processing the rsp
  3165. */
  3166. if (list_empty(&fcport->statsclr_pending_q) ||
  3167. (fcport->stats_status == BFA_STATUS_ETIMER))
  3168. break;
  3169. bfa_timer_stop(&fcport->timer);
  3170. fcport->stats_status = BFA_STATUS_OK;
  3171. __bfa_cb_fcport_stats_clr(fcport, BFA_TRUE);
  3172. break;
  3173. case BFI_FCPORT_I2H_ENABLE_AEN:
  3174. bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
  3175. break;
  3176. case BFI_FCPORT_I2H_DISABLE_AEN:
  3177. bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
  3178. break;
  3179. default:
  3180. WARN_ON(1);
  3181. break;
  3182. }
  3183. }
  3184. /*
  3185. * Registered callback for port events.
  3186. */
  3187. void
  3188. bfa_fcport_event_register(struct bfa_s *bfa,
  3189. void (*cbfn) (void *cbarg,
  3190. enum bfa_port_linkstate event),
  3191. void *cbarg)
  3192. {
  3193. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3194. fcport->event_cbfn = cbfn;
  3195. fcport->event_cbarg = cbarg;
  3196. }
  3197. bfa_status_t
  3198. bfa_fcport_enable(struct bfa_s *bfa)
  3199. {
  3200. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3201. if (bfa_fcport_is_pbcdisabled(bfa))
  3202. return BFA_STATUS_PBC;
  3203. if (bfa_ioc_is_disabled(&bfa->ioc))
  3204. return BFA_STATUS_IOC_DISABLED;
  3205. if (fcport->diag_busy)
  3206. return BFA_STATUS_DIAG_BUSY;
  3207. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
  3208. return BFA_STATUS_OK;
  3209. }
  3210. bfa_status_t
  3211. bfa_fcport_disable(struct bfa_s *bfa)
  3212. {
  3213. if (bfa_fcport_is_pbcdisabled(bfa))
  3214. return BFA_STATUS_PBC;
  3215. if (bfa_ioc_is_disabled(&bfa->ioc))
  3216. return BFA_STATUS_IOC_DISABLED;
  3217. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
  3218. return BFA_STATUS_OK;
  3219. }
  3220. /* If PBC is disabled on port, return error */
  3221. bfa_status_t
  3222. bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
  3223. {
  3224. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3225. struct bfa_iocfc_s *iocfc = &bfa->iocfc;
  3226. struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
  3227. if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
  3228. bfa_trc(bfa, fcport->pwwn);
  3229. return BFA_STATUS_PBC;
  3230. }
  3231. return BFA_STATUS_OK;
  3232. }
  3233. /*
  3234. * Configure port speed.
  3235. */
  3236. bfa_status_t
  3237. bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
  3238. {
  3239. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3240. bfa_trc(bfa, speed);
  3241. if (fcport->cfg.trunked == BFA_TRUE)
  3242. return BFA_STATUS_TRUNK_ENABLED;
  3243. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  3244. (speed == BFA_PORT_SPEED_16GBPS))
  3245. return BFA_STATUS_UNSUPP_SPEED;
  3246. if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
  3247. bfa_trc(bfa, fcport->speed_sup);
  3248. return BFA_STATUS_UNSUPP_SPEED;
  3249. }
  3250. /* Port speed entered needs to be checked */
  3251. if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
  3252. /* For CT2, 1G is not supported */
  3253. if ((speed == BFA_PORT_SPEED_1GBPS) &&
  3254. (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
  3255. return BFA_STATUS_UNSUPP_SPEED;
  3256. /* Already checked for Auto Speed and Max Speed supp */
  3257. if (!(speed == BFA_PORT_SPEED_1GBPS ||
  3258. speed == BFA_PORT_SPEED_2GBPS ||
  3259. speed == BFA_PORT_SPEED_4GBPS ||
  3260. speed == BFA_PORT_SPEED_8GBPS ||
  3261. speed == BFA_PORT_SPEED_16GBPS ||
  3262. speed == BFA_PORT_SPEED_AUTO))
  3263. return BFA_STATUS_UNSUPP_SPEED;
  3264. } else {
  3265. if (speed != BFA_PORT_SPEED_10GBPS)
  3266. return BFA_STATUS_UNSUPP_SPEED;
  3267. }
  3268. fcport->cfg.speed = speed;
  3269. return BFA_STATUS_OK;
  3270. }
  3271. /*
  3272. * Get current speed.
  3273. */
  3274. enum bfa_port_speed
  3275. bfa_fcport_get_speed(struct bfa_s *bfa)
  3276. {
  3277. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3278. return fcport->speed;
  3279. }
  3280. /*
  3281. * Configure port topology.
  3282. */
  3283. bfa_status_t
  3284. bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
  3285. {
  3286. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3287. bfa_trc(bfa, topology);
  3288. bfa_trc(bfa, fcport->cfg.topology);
  3289. switch (topology) {
  3290. case BFA_PORT_TOPOLOGY_P2P:
  3291. break;
  3292. case BFA_PORT_TOPOLOGY_LOOP:
  3293. if ((bfa_fcport_is_qos_enabled(bfa) != BFA_FALSE) ||
  3294. (fcport->qos_attr.state != BFA_QOS_DISABLED))
  3295. return BFA_STATUS_ERROR_QOS_ENABLED;
  3296. if (fcport->cfg.ratelimit != BFA_FALSE)
  3297. return BFA_STATUS_ERROR_TRL_ENABLED;
  3298. if ((bfa_fcport_is_trunk_enabled(bfa) != BFA_FALSE) ||
  3299. (fcport->trunk.attr.state != BFA_TRUNK_DISABLED))
  3300. return BFA_STATUS_ERROR_TRUNK_ENABLED;
  3301. if ((bfa_fcport_get_speed(bfa) == BFA_PORT_SPEED_16GBPS) ||
  3302. (fcport->cfg.speed == BFA_PORT_SPEED_16GBPS))
  3303. return BFA_STATUS_UNSUPP_SPEED;
  3304. if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type))
  3305. return BFA_STATUS_LOOP_UNSUPP_MEZZ;
  3306. if (bfa_fcport_is_dport(bfa) != BFA_FALSE)
  3307. return BFA_STATUS_DPORT_ERR;
  3308. if (bfa_fcport_is_ddport(bfa) != BFA_FALSE)
  3309. return BFA_STATUS_DPORT_ERR;
  3310. break;
  3311. case BFA_PORT_TOPOLOGY_AUTO:
  3312. break;
  3313. default:
  3314. return BFA_STATUS_EINVAL;
  3315. }
  3316. fcport->cfg.topology = topology;
  3317. return BFA_STATUS_OK;
  3318. }
  3319. /*
  3320. * Get current topology.
  3321. */
  3322. enum bfa_port_topology
  3323. bfa_fcport_get_topology(struct bfa_s *bfa)
  3324. {
  3325. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3326. return fcport->topology;
  3327. }
  3328. /**
  3329. * Get config topology.
  3330. */
  3331. enum bfa_port_topology
  3332. bfa_fcport_get_cfg_topology(struct bfa_s *bfa)
  3333. {
  3334. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3335. return fcport->cfg.topology;
  3336. }
  3337. bfa_status_t
  3338. bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
  3339. {
  3340. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3341. bfa_trc(bfa, alpa);
  3342. bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
  3343. bfa_trc(bfa, fcport->cfg.hardalpa);
  3344. fcport->cfg.cfg_hardalpa = BFA_TRUE;
  3345. fcport->cfg.hardalpa = alpa;
  3346. return BFA_STATUS_OK;
  3347. }
  3348. bfa_status_t
  3349. bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
  3350. {
  3351. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3352. bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
  3353. bfa_trc(bfa, fcport->cfg.hardalpa);
  3354. fcport->cfg.cfg_hardalpa = BFA_FALSE;
  3355. return BFA_STATUS_OK;
  3356. }
  3357. bfa_boolean_t
  3358. bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
  3359. {
  3360. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3361. *alpa = fcport->cfg.hardalpa;
  3362. return fcport->cfg.cfg_hardalpa;
  3363. }
  3364. u8
  3365. bfa_fcport_get_myalpa(struct bfa_s *bfa)
  3366. {
  3367. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3368. return fcport->myalpa;
  3369. }
  3370. bfa_status_t
  3371. bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
  3372. {
  3373. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3374. bfa_trc(bfa, maxfrsize);
  3375. bfa_trc(bfa, fcport->cfg.maxfrsize);
  3376. /* with in range */
  3377. if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
  3378. return BFA_STATUS_INVLD_DFSZ;
  3379. /* power of 2, if not the max frame size of 2112 */
  3380. if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
  3381. return BFA_STATUS_INVLD_DFSZ;
  3382. fcport->cfg.maxfrsize = maxfrsize;
  3383. return BFA_STATUS_OK;
  3384. }
  3385. u16
  3386. bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
  3387. {
  3388. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3389. return fcport->cfg.maxfrsize;
  3390. }
  3391. u8
  3392. bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
  3393. {
  3394. if (bfa_fcport_get_topology(bfa) != BFA_PORT_TOPOLOGY_LOOP)
  3395. return (BFA_FCPORT_MOD(bfa))->cfg.rx_bbcredit;
  3396. else
  3397. return 0;
  3398. }
  3399. void
  3400. bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
  3401. {
  3402. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3403. fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
  3404. }
  3405. /*
  3406. * Get port attributes.
  3407. */
  3408. wwn_t
  3409. bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
  3410. {
  3411. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3412. if (node)
  3413. return fcport->nwwn;
  3414. else
  3415. return fcport->pwwn;
  3416. }
  3417. void
  3418. bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
  3419. {
  3420. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3421. memset(attr, 0, sizeof(struct bfa_port_attr_s));
  3422. attr->nwwn = fcport->nwwn;
  3423. attr->pwwn = fcport->pwwn;
  3424. attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
  3425. attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
  3426. memcpy(&attr->pport_cfg, &fcport->cfg,
  3427. sizeof(struct bfa_port_cfg_s));
  3428. /* speed attributes */
  3429. attr->pport_cfg.speed = fcport->cfg.speed;
  3430. attr->speed_supported = fcport->speed_sup;
  3431. attr->speed = fcport->speed;
  3432. attr->cos_supported = FC_CLASS_3;
  3433. /* topology attributes */
  3434. attr->pport_cfg.topology = fcport->cfg.topology;
  3435. attr->topology = fcport->topology;
  3436. attr->pport_cfg.trunked = fcport->cfg.trunked;
  3437. /* beacon attributes */
  3438. attr->beacon = fcport->beacon;
  3439. attr->link_e2e_beacon = fcport->link_e2e_beacon;
  3440. attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
  3441. attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
  3442. attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
  3443. attr->fec_state = fcport->fec_state;
  3444. /* PBC Disabled State */
  3445. if (bfa_fcport_is_pbcdisabled(bfa))
  3446. attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
  3447. else {
  3448. if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
  3449. attr->port_state = BFA_PORT_ST_IOCDIS;
  3450. else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
  3451. attr->port_state = BFA_PORT_ST_FWMISMATCH;
  3452. }
  3453. /* FCoE vlan */
  3454. attr->fcoe_vlan = fcport->fcoe_vlan;
  3455. }
  3456. #define BFA_FCPORT_STATS_TOV 1000
  3457. /*
  3458. * Fetch port statistics (FCQoS or FCoE).
  3459. */
  3460. bfa_status_t
  3461. bfa_fcport_get_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
  3462. {
  3463. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3464. if (!bfa_iocfc_is_operational(bfa) ||
  3465. !fcport->stats_dma_ready)
  3466. return BFA_STATUS_IOC_NON_OP;
  3467. if (!list_empty(&fcport->statsclr_pending_q))
  3468. return BFA_STATUS_DEVBUSY;
  3469. if (list_empty(&fcport->stats_pending_q)) {
  3470. list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
  3471. bfa_fcport_send_stats_get(fcport);
  3472. bfa_timer_start(bfa, &fcport->timer,
  3473. bfa_fcport_stats_get_timeout,
  3474. fcport, BFA_FCPORT_STATS_TOV);
  3475. } else
  3476. list_add_tail(&cb->hcb_qe.qe, &fcport->stats_pending_q);
  3477. return BFA_STATUS_OK;
  3478. }
  3479. /*
  3480. * Reset port statistics (FCQoS or FCoE).
  3481. */
  3482. bfa_status_t
  3483. bfa_fcport_clear_stats(struct bfa_s *bfa, struct bfa_cb_pending_q_s *cb)
  3484. {
  3485. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3486. if (!bfa_iocfc_is_operational(bfa) ||
  3487. !fcport->stats_dma_ready)
  3488. return BFA_STATUS_IOC_NON_OP;
  3489. if (!list_empty(&fcport->stats_pending_q))
  3490. return BFA_STATUS_DEVBUSY;
  3491. if (list_empty(&fcport->statsclr_pending_q)) {
  3492. list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
  3493. bfa_fcport_send_stats_clear(fcport);
  3494. bfa_timer_start(bfa, &fcport->timer,
  3495. bfa_fcport_stats_clr_timeout,
  3496. fcport, BFA_FCPORT_STATS_TOV);
  3497. } else
  3498. list_add_tail(&cb->hcb_qe.qe, &fcport->statsclr_pending_q);
  3499. return BFA_STATUS_OK;
  3500. }
  3501. /*
  3502. * Fetch port attributes.
  3503. */
  3504. bfa_boolean_t
  3505. bfa_fcport_is_disabled(struct bfa_s *bfa)
  3506. {
  3507. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3508. return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
  3509. BFA_PORT_ST_DISABLED;
  3510. }
  3511. bfa_boolean_t
  3512. bfa_fcport_is_dport(struct bfa_s *bfa)
  3513. {
  3514. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3515. return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
  3516. BFA_PORT_ST_DPORT);
  3517. }
  3518. bfa_boolean_t
  3519. bfa_fcport_is_ddport(struct bfa_s *bfa)
  3520. {
  3521. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3522. return (bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
  3523. BFA_PORT_ST_DDPORT);
  3524. }
  3525. bfa_status_t
  3526. bfa_fcport_set_qos_bw(struct bfa_s *bfa, struct bfa_qos_bw_s *qos_bw)
  3527. {
  3528. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3529. enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
  3530. bfa_trc(bfa, ioc_type);
  3531. if ((qos_bw->high == 0) || (qos_bw->med == 0) || (qos_bw->low == 0))
  3532. return BFA_STATUS_QOS_BW_INVALID;
  3533. if ((qos_bw->high + qos_bw->med + qos_bw->low) != 100)
  3534. return BFA_STATUS_QOS_BW_INVALID;
  3535. if ((qos_bw->med > qos_bw->high) || (qos_bw->low > qos_bw->med) ||
  3536. (qos_bw->low > qos_bw->high))
  3537. return BFA_STATUS_QOS_BW_INVALID;
  3538. if ((ioc_type == BFA_IOC_TYPE_FC) &&
  3539. (fcport->cfg.topology != BFA_PORT_TOPOLOGY_LOOP))
  3540. fcport->cfg.qos_bw = *qos_bw;
  3541. return BFA_STATUS_OK;
  3542. }
  3543. bfa_boolean_t
  3544. bfa_fcport_is_ratelim(struct bfa_s *bfa)
  3545. {
  3546. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3547. return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
  3548. }
  3549. /*
  3550. * Enable/Disable FAA feature in port config
  3551. */
  3552. void
  3553. bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
  3554. {
  3555. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3556. bfa_trc(bfa, state);
  3557. fcport->cfg.faa_state = state;
  3558. }
  3559. /*
  3560. * Get default minimum ratelim speed
  3561. */
  3562. enum bfa_port_speed
  3563. bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
  3564. {
  3565. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3566. bfa_trc(bfa, fcport->cfg.trl_def_speed);
  3567. return fcport->cfg.trl_def_speed;
  3568. }
  3569. void
  3570. bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
  3571. bfa_boolean_t link_e2e_beacon)
  3572. {
  3573. struct bfa_s *bfa = dev;
  3574. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3575. bfa_trc(bfa, beacon);
  3576. bfa_trc(bfa, link_e2e_beacon);
  3577. bfa_trc(bfa, fcport->beacon);
  3578. bfa_trc(bfa, fcport->link_e2e_beacon);
  3579. fcport->beacon = beacon;
  3580. fcport->link_e2e_beacon = link_e2e_beacon;
  3581. }
  3582. bfa_boolean_t
  3583. bfa_fcport_is_linkup(struct bfa_s *bfa)
  3584. {
  3585. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3586. return (!fcport->cfg.trunked &&
  3587. bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
  3588. (fcport->cfg.trunked &&
  3589. fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
  3590. }
  3591. bfa_boolean_t
  3592. bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
  3593. {
  3594. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3595. return fcport->cfg.qos_enabled;
  3596. }
  3597. bfa_boolean_t
  3598. bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
  3599. {
  3600. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3601. return fcport->cfg.trunked;
  3602. }
  3603. bfa_status_t
  3604. bfa_fcport_cfg_bbcr(struct bfa_s *bfa, bfa_boolean_t on_off, u8 bb_scn)
  3605. {
  3606. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3607. bfa_trc(bfa, on_off);
  3608. if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
  3609. return BFA_STATUS_BBCR_FC_ONLY;
  3610. if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type) &&
  3611. (bfa->ioc.attr->card_type != BFA_MFG_TYPE_CHINOOK))
  3612. return BFA_STATUS_CMD_NOTSUPP_MEZZ;
  3613. if (on_off) {
  3614. if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
  3615. return BFA_STATUS_TOPOLOGY_LOOP;
  3616. if (fcport->cfg.qos_enabled)
  3617. return BFA_STATUS_ERROR_QOS_ENABLED;
  3618. if (fcport->cfg.trunked)
  3619. return BFA_STATUS_TRUNK_ENABLED;
  3620. if ((fcport->cfg.speed != BFA_PORT_SPEED_AUTO) &&
  3621. (fcport->cfg.speed < bfa_ioc_speed_sup(&bfa->ioc)))
  3622. return BFA_STATUS_ERR_BBCR_SPEED_UNSUPPORT;
  3623. if (bfa_ioc_speed_sup(&bfa->ioc) < BFA_PORT_SPEED_8GBPS)
  3624. return BFA_STATUS_FEATURE_NOT_SUPPORTED;
  3625. if (fcport->cfg.bb_cr_enabled) {
  3626. if (bb_scn != fcport->cfg.bb_scn)
  3627. return BFA_STATUS_BBCR_CFG_NO_CHANGE;
  3628. else
  3629. return BFA_STATUS_NO_CHANGE;
  3630. }
  3631. if ((bb_scn == 0) || (bb_scn > BFA_BB_SCN_MAX))
  3632. bb_scn = BFA_BB_SCN_DEF;
  3633. fcport->cfg.bb_cr_enabled = on_off;
  3634. fcport->cfg.bb_scn = bb_scn;
  3635. } else {
  3636. if (!fcport->cfg.bb_cr_enabled)
  3637. return BFA_STATUS_NO_CHANGE;
  3638. fcport->cfg.bb_cr_enabled = on_off;
  3639. fcport->cfg.bb_scn = 0;
  3640. }
  3641. return BFA_STATUS_OK;
  3642. }
  3643. bfa_status_t
  3644. bfa_fcport_get_bbcr_attr(struct bfa_s *bfa,
  3645. struct bfa_bbcr_attr_s *bbcr_attr)
  3646. {
  3647. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
  3648. if (bfa_ioc_get_type(&fcport->bfa->ioc) != BFA_IOC_TYPE_FC)
  3649. return BFA_STATUS_BBCR_FC_ONLY;
  3650. if (fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP)
  3651. return BFA_STATUS_TOPOLOGY_LOOP;
  3652. *bbcr_attr = fcport->bbcr_attr;
  3653. return BFA_STATUS_OK;
  3654. }
  3655. void
  3656. bfa_fcport_dportenable(struct bfa_s *bfa)
  3657. {
  3658. /*
  3659. * Assume caller check for port is in disable state
  3660. */
  3661. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTENABLE);
  3662. bfa_port_set_dportenabled(&bfa->modules.port, BFA_TRUE);
  3663. }
  3664. void
  3665. bfa_fcport_dportdisable(struct bfa_s *bfa)
  3666. {
  3667. /*
  3668. * Assume caller check for port is in disable state
  3669. */
  3670. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DPORTDISABLE);
  3671. bfa_port_set_dportenabled(&bfa->modules.port, BFA_FALSE);
  3672. }
  3673. void
  3674. bfa_fcport_ddportenable(struct bfa_s *bfa)
  3675. {
  3676. /*
  3677. * Assume caller check for port is in disable state
  3678. */
  3679. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTENABLE);
  3680. }
  3681. void
  3682. bfa_fcport_ddportdisable(struct bfa_s *bfa)
  3683. {
  3684. /*
  3685. * Assume caller check for port is in disable state
  3686. */
  3687. bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DDPORTDISABLE);
  3688. }
  3689. /*
  3690. * Rport State machine functions
  3691. */
  3692. /*
  3693. * Beginning state, only online event expected.
  3694. */
  3695. static void
  3696. bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3697. {
  3698. bfa_trc(rp->bfa, rp->rport_tag);
  3699. bfa_trc(rp->bfa, event);
  3700. switch (event) {
  3701. case BFA_RPORT_SM_CREATE:
  3702. bfa_stats(rp, sm_un_cr);
  3703. bfa_sm_set_state(rp, bfa_rport_sm_created);
  3704. break;
  3705. default:
  3706. bfa_stats(rp, sm_un_unexp);
  3707. bfa_sm_fault(rp->bfa, event);
  3708. }
  3709. }
  3710. static void
  3711. bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3712. {
  3713. bfa_trc(rp->bfa, rp->rport_tag);
  3714. bfa_trc(rp->bfa, event);
  3715. switch (event) {
  3716. case BFA_RPORT_SM_ONLINE:
  3717. bfa_stats(rp, sm_cr_on);
  3718. if (bfa_rport_send_fwcreate(rp))
  3719. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3720. else
  3721. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  3722. break;
  3723. case BFA_RPORT_SM_DELETE:
  3724. bfa_stats(rp, sm_cr_del);
  3725. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3726. bfa_rport_free(rp);
  3727. break;
  3728. case BFA_RPORT_SM_HWFAIL:
  3729. bfa_stats(rp, sm_cr_hwf);
  3730. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3731. break;
  3732. default:
  3733. bfa_stats(rp, sm_cr_unexp);
  3734. bfa_sm_fault(rp->bfa, event);
  3735. }
  3736. }
  3737. /*
  3738. * Waiting for rport create response from firmware.
  3739. */
  3740. static void
  3741. bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3742. {
  3743. bfa_trc(rp->bfa, rp->rport_tag);
  3744. bfa_trc(rp->bfa, event);
  3745. switch (event) {
  3746. case BFA_RPORT_SM_FWRSP:
  3747. bfa_stats(rp, sm_fwc_rsp);
  3748. bfa_sm_set_state(rp, bfa_rport_sm_online);
  3749. bfa_rport_online_cb(rp);
  3750. break;
  3751. case BFA_RPORT_SM_DELETE:
  3752. bfa_stats(rp, sm_fwc_del);
  3753. bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
  3754. break;
  3755. case BFA_RPORT_SM_OFFLINE:
  3756. bfa_stats(rp, sm_fwc_off);
  3757. bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
  3758. break;
  3759. case BFA_RPORT_SM_HWFAIL:
  3760. bfa_stats(rp, sm_fwc_hwf);
  3761. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3762. break;
  3763. default:
  3764. bfa_stats(rp, sm_fwc_unexp);
  3765. bfa_sm_fault(rp->bfa, event);
  3766. }
  3767. }
  3768. /*
  3769. * Request queue is full, awaiting queue resume to send create request.
  3770. */
  3771. static void
  3772. bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3773. {
  3774. bfa_trc(rp->bfa, rp->rport_tag);
  3775. bfa_trc(rp->bfa, event);
  3776. switch (event) {
  3777. case BFA_RPORT_SM_QRESUME:
  3778. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3779. bfa_rport_send_fwcreate(rp);
  3780. break;
  3781. case BFA_RPORT_SM_DELETE:
  3782. bfa_stats(rp, sm_fwc_del);
  3783. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3784. bfa_reqq_wcancel(&rp->reqq_wait);
  3785. bfa_rport_free(rp);
  3786. break;
  3787. case BFA_RPORT_SM_OFFLINE:
  3788. bfa_stats(rp, sm_fwc_off);
  3789. bfa_sm_set_state(rp, bfa_rport_sm_offline);
  3790. bfa_reqq_wcancel(&rp->reqq_wait);
  3791. bfa_rport_offline_cb(rp);
  3792. break;
  3793. case BFA_RPORT_SM_HWFAIL:
  3794. bfa_stats(rp, sm_fwc_hwf);
  3795. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3796. bfa_reqq_wcancel(&rp->reqq_wait);
  3797. break;
  3798. default:
  3799. bfa_stats(rp, sm_fwc_unexp);
  3800. bfa_sm_fault(rp->bfa, event);
  3801. }
  3802. }
  3803. /*
  3804. * Online state - normal parking state.
  3805. */
  3806. static void
  3807. bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3808. {
  3809. struct bfi_rport_qos_scn_s *qos_scn;
  3810. bfa_trc(rp->bfa, rp->rport_tag);
  3811. bfa_trc(rp->bfa, event);
  3812. switch (event) {
  3813. case BFA_RPORT_SM_OFFLINE:
  3814. bfa_stats(rp, sm_on_off);
  3815. if (bfa_rport_send_fwdelete(rp))
  3816. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  3817. else
  3818. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
  3819. break;
  3820. case BFA_RPORT_SM_DELETE:
  3821. bfa_stats(rp, sm_on_del);
  3822. if (bfa_rport_send_fwdelete(rp))
  3823. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3824. else
  3825. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  3826. break;
  3827. case BFA_RPORT_SM_HWFAIL:
  3828. bfa_stats(rp, sm_on_hwf);
  3829. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3830. break;
  3831. case BFA_RPORT_SM_SET_SPEED:
  3832. bfa_rport_send_fwspeed(rp);
  3833. break;
  3834. case BFA_RPORT_SM_QOS_SCN:
  3835. qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
  3836. rp->qos_attr = qos_scn->new_qos_attr;
  3837. bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
  3838. bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
  3839. bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
  3840. bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
  3841. qos_scn->old_qos_attr.qos_flow_id =
  3842. be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
  3843. qos_scn->new_qos_attr.qos_flow_id =
  3844. be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
  3845. if (qos_scn->old_qos_attr.qos_flow_id !=
  3846. qos_scn->new_qos_attr.qos_flow_id)
  3847. bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
  3848. qos_scn->old_qos_attr,
  3849. qos_scn->new_qos_attr);
  3850. if (qos_scn->old_qos_attr.qos_priority !=
  3851. qos_scn->new_qos_attr.qos_priority)
  3852. bfa_cb_rport_qos_scn_prio(rp->rport_drv,
  3853. qos_scn->old_qos_attr,
  3854. qos_scn->new_qos_attr);
  3855. break;
  3856. default:
  3857. bfa_stats(rp, sm_on_unexp);
  3858. bfa_sm_fault(rp->bfa, event);
  3859. }
  3860. }
  3861. /*
  3862. * Firmware rport is being deleted - awaiting f/w response.
  3863. */
  3864. static void
  3865. bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3866. {
  3867. bfa_trc(rp->bfa, rp->rport_tag);
  3868. bfa_trc(rp->bfa, event);
  3869. switch (event) {
  3870. case BFA_RPORT_SM_FWRSP:
  3871. bfa_stats(rp, sm_fwd_rsp);
  3872. bfa_sm_set_state(rp, bfa_rport_sm_offline);
  3873. bfa_rport_offline_cb(rp);
  3874. break;
  3875. case BFA_RPORT_SM_DELETE:
  3876. bfa_stats(rp, sm_fwd_del);
  3877. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3878. break;
  3879. case BFA_RPORT_SM_HWFAIL:
  3880. bfa_stats(rp, sm_fwd_hwf);
  3881. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3882. bfa_rport_offline_cb(rp);
  3883. break;
  3884. default:
  3885. bfa_stats(rp, sm_fwd_unexp);
  3886. bfa_sm_fault(rp->bfa, event);
  3887. }
  3888. }
  3889. static void
  3890. bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3891. {
  3892. bfa_trc(rp->bfa, rp->rport_tag);
  3893. bfa_trc(rp->bfa, event);
  3894. switch (event) {
  3895. case BFA_RPORT_SM_QRESUME:
  3896. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  3897. bfa_rport_send_fwdelete(rp);
  3898. break;
  3899. case BFA_RPORT_SM_DELETE:
  3900. bfa_stats(rp, sm_fwd_del);
  3901. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  3902. break;
  3903. case BFA_RPORT_SM_HWFAIL:
  3904. bfa_stats(rp, sm_fwd_hwf);
  3905. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3906. bfa_reqq_wcancel(&rp->reqq_wait);
  3907. bfa_rport_offline_cb(rp);
  3908. break;
  3909. default:
  3910. bfa_stats(rp, sm_fwd_unexp);
  3911. bfa_sm_fault(rp->bfa, event);
  3912. }
  3913. }
  3914. /*
  3915. * Offline state.
  3916. */
  3917. static void
  3918. bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3919. {
  3920. bfa_trc(rp->bfa, rp->rport_tag);
  3921. bfa_trc(rp->bfa, event);
  3922. switch (event) {
  3923. case BFA_RPORT_SM_DELETE:
  3924. bfa_stats(rp, sm_off_del);
  3925. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3926. bfa_rport_free(rp);
  3927. break;
  3928. case BFA_RPORT_SM_ONLINE:
  3929. bfa_stats(rp, sm_off_on);
  3930. if (bfa_rport_send_fwcreate(rp))
  3931. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  3932. else
  3933. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  3934. break;
  3935. case BFA_RPORT_SM_HWFAIL:
  3936. bfa_stats(rp, sm_off_hwf);
  3937. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  3938. break;
  3939. case BFA_RPORT_SM_OFFLINE:
  3940. bfa_rport_offline_cb(rp);
  3941. break;
  3942. default:
  3943. bfa_stats(rp, sm_off_unexp);
  3944. bfa_sm_fault(rp->bfa, event);
  3945. }
  3946. }
  3947. /*
  3948. * Rport is deleted, waiting for firmware response to delete.
  3949. */
  3950. static void
  3951. bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3952. {
  3953. bfa_trc(rp->bfa, rp->rport_tag);
  3954. bfa_trc(rp->bfa, event);
  3955. switch (event) {
  3956. case BFA_RPORT_SM_FWRSP:
  3957. bfa_stats(rp, sm_del_fwrsp);
  3958. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3959. bfa_rport_free(rp);
  3960. break;
  3961. case BFA_RPORT_SM_HWFAIL:
  3962. bfa_stats(rp, sm_del_hwf);
  3963. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3964. bfa_rport_free(rp);
  3965. break;
  3966. default:
  3967. bfa_sm_fault(rp->bfa, event);
  3968. }
  3969. }
  3970. static void
  3971. bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
  3972. {
  3973. bfa_trc(rp->bfa, rp->rport_tag);
  3974. bfa_trc(rp->bfa, event);
  3975. switch (event) {
  3976. case BFA_RPORT_SM_QRESUME:
  3977. bfa_stats(rp, sm_del_fwrsp);
  3978. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  3979. bfa_rport_send_fwdelete(rp);
  3980. break;
  3981. case BFA_RPORT_SM_HWFAIL:
  3982. bfa_stats(rp, sm_del_hwf);
  3983. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  3984. bfa_reqq_wcancel(&rp->reqq_wait);
  3985. bfa_rport_free(rp);
  3986. break;
  3987. default:
  3988. bfa_sm_fault(rp->bfa, event);
  3989. }
  3990. }
  3991. /*
  3992. * Waiting for rport create response from firmware. A delete is pending.
  3993. */
  3994. static void
  3995. bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
  3996. enum bfa_rport_event event)
  3997. {
  3998. bfa_trc(rp->bfa, rp->rport_tag);
  3999. bfa_trc(rp->bfa, event);
  4000. switch (event) {
  4001. case BFA_RPORT_SM_FWRSP:
  4002. bfa_stats(rp, sm_delp_fwrsp);
  4003. if (bfa_rport_send_fwdelete(rp))
  4004. bfa_sm_set_state(rp, bfa_rport_sm_deleting);
  4005. else
  4006. bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
  4007. break;
  4008. case BFA_RPORT_SM_HWFAIL:
  4009. bfa_stats(rp, sm_delp_hwf);
  4010. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  4011. bfa_rport_free(rp);
  4012. break;
  4013. default:
  4014. bfa_stats(rp, sm_delp_unexp);
  4015. bfa_sm_fault(rp->bfa, event);
  4016. }
  4017. }
  4018. /*
  4019. * Waiting for rport create response from firmware. Rport offline is pending.
  4020. */
  4021. static void
  4022. bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
  4023. enum bfa_rport_event event)
  4024. {
  4025. bfa_trc(rp->bfa, rp->rport_tag);
  4026. bfa_trc(rp->bfa, event);
  4027. switch (event) {
  4028. case BFA_RPORT_SM_FWRSP:
  4029. bfa_stats(rp, sm_offp_fwrsp);
  4030. if (bfa_rport_send_fwdelete(rp))
  4031. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
  4032. else
  4033. bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
  4034. break;
  4035. case BFA_RPORT_SM_DELETE:
  4036. bfa_stats(rp, sm_offp_del);
  4037. bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
  4038. break;
  4039. case BFA_RPORT_SM_HWFAIL:
  4040. bfa_stats(rp, sm_offp_hwf);
  4041. bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
  4042. bfa_rport_offline_cb(rp);
  4043. break;
  4044. default:
  4045. bfa_stats(rp, sm_offp_unexp);
  4046. bfa_sm_fault(rp->bfa, event);
  4047. }
  4048. }
  4049. /*
  4050. * IOC h/w failed.
  4051. */
  4052. static void
  4053. bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
  4054. {
  4055. bfa_trc(rp->bfa, rp->rport_tag);
  4056. bfa_trc(rp->bfa, event);
  4057. switch (event) {
  4058. case BFA_RPORT_SM_OFFLINE:
  4059. bfa_stats(rp, sm_iocd_off);
  4060. bfa_rport_offline_cb(rp);
  4061. break;
  4062. case BFA_RPORT_SM_DELETE:
  4063. bfa_stats(rp, sm_iocd_del);
  4064. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  4065. bfa_rport_free(rp);
  4066. break;
  4067. case BFA_RPORT_SM_ONLINE:
  4068. bfa_stats(rp, sm_iocd_on);
  4069. if (bfa_rport_send_fwcreate(rp))
  4070. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
  4071. else
  4072. bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
  4073. break;
  4074. case BFA_RPORT_SM_HWFAIL:
  4075. break;
  4076. default:
  4077. bfa_stats(rp, sm_iocd_unexp);
  4078. bfa_sm_fault(rp->bfa, event);
  4079. }
  4080. }
  4081. /*
  4082. * bfa_rport_private BFA rport private functions
  4083. */
  4084. static void
  4085. __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
  4086. {
  4087. struct bfa_rport_s *rp = cbarg;
  4088. if (complete)
  4089. bfa_cb_rport_online(rp->rport_drv);
  4090. }
  4091. static void
  4092. __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
  4093. {
  4094. struct bfa_rport_s *rp = cbarg;
  4095. if (complete)
  4096. bfa_cb_rport_offline(rp->rport_drv);
  4097. }
  4098. static void
  4099. bfa_rport_qresume(void *cbarg)
  4100. {
  4101. struct bfa_rport_s *rp = cbarg;
  4102. bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
  4103. }
  4104. static void
  4105. bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  4106. struct bfa_s *bfa)
  4107. {
  4108. struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
  4109. if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
  4110. cfg->fwcfg.num_rports = BFA_RPORT_MIN;
  4111. /* kva memory */
  4112. bfa_mem_kva_setup(minfo, rport_kva,
  4113. cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
  4114. }
  4115. static void
  4116. bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  4117. struct bfa_pcidev_s *pcidev)
  4118. {
  4119. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
  4120. struct bfa_rport_s *rp;
  4121. u16 i;
  4122. INIT_LIST_HEAD(&mod->rp_free_q);
  4123. INIT_LIST_HEAD(&mod->rp_active_q);
  4124. INIT_LIST_HEAD(&mod->rp_unused_q);
  4125. rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
  4126. mod->rps_list = rp;
  4127. mod->num_rports = cfg->fwcfg.num_rports;
  4128. WARN_ON(!mod->num_rports ||
  4129. (mod->num_rports & (mod->num_rports - 1)));
  4130. for (i = 0; i < mod->num_rports; i++, rp++) {
  4131. memset(rp, 0, sizeof(struct bfa_rport_s));
  4132. rp->bfa = bfa;
  4133. rp->rport_tag = i;
  4134. bfa_sm_set_state(rp, bfa_rport_sm_uninit);
  4135. /*
  4136. * - is unused
  4137. */
  4138. if (i)
  4139. list_add_tail(&rp->qe, &mod->rp_free_q);
  4140. bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
  4141. }
  4142. /*
  4143. * consume memory
  4144. */
  4145. bfa_mem_kva_curp(mod) = (u8 *) rp;
  4146. }
  4147. static void
  4148. bfa_rport_detach(struct bfa_s *bfa)
  4149. {
  4150. }
  4151. static void
  4152. bfa_rport_start(struct bfa_s *bfa)
  4153. {
  4154. }
  4155. static void
  4156. bfa_rport_stop(struct bfa_s *bfa)
  4157. {
  4158. }
  4159. static void
  4160. bfa_rport_iocdisable(struct bfa_s *bfa)
  4161. {
  4162. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
  4163. struct bfa_rport_s *rport;
  4164. struct list_head *qe, *qen;
  4165. /* Enqueue unused rport resources to free_q */
  4166. list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
  4167. list_for_each_safe(qe, qen, &mod->rp_active_q) {
  4168. rport = (struct bfa_rport_s *) qe;
  4169. bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
  4170. }
  4171. }
  4172. static struct bfa_rport_s *
  4173. bfa_rport_alloc(struct bfa_rport_mod_s *mod)
  4174. {
  4175. struct bfa_rport_s *rport;
  4176. bfa_q_deq(&mod->rp_free_q, &rport);
  4177. if (rport)
  4178. list_add_tail(&rport->qe, &mod->rp_active_q);
  4179. return rport;
  4180. }
  4181. static void
  4182. bfa_rport_free(struct bfa_rport_s *rport)
  4183. {
  4184. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
  4185. WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
  4186. list_del(&rport->qe);
  4187. list_add_tail(&rport->qe, &mod->rp_free_q);
  4188. }
  4189. static bfa_boolean_t
  4190. bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
  4191. {
  4192. struct bfi_rport_create_req_s *m;
  4193. /*
  4194. * check for room in queue to send request now
  4195. */
  4196. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  4197. if (!m) {
  4198. bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
  4199. return BFA_FALSE;
  4200. }
  4201. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
  4202. bfa_fn_lpu(rp->bfa));
  4203. m->bfa_handle = rp->rport_tag;
  4204. m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
  4205. m->pid = rp->rport_info.pid;
  4206. m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
  4207. m->local_pid = rp->rport_info.local_pid;
  4208. m->fc_class = rp->rport_info.fc_class;
  4209. m->vf_en = rp->rport_info.vf_en;
  4210. m->vf_id = rp->rport_info.vf_id;
  4211. m->cisc = rp->rport_info.cisc;
  4212. /*
  4213. * queue I/O message to firmware
  4214. */
  4215. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
  4216. return BFA_TRUE;
  4217. }
  4218. static bfa_boolean_t
  4219. bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
  4220. {
  4221. struct bfi_rport_delete_req_s *m;
  4222. /*
  4223. * check for room in queue to send request now
  4224. */
  4225. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  4226. if (!m) {
  4227. bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
  4228. return BFA_FALSE;
  4229. }
  4230. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
  4231. bfa_fn_lpu(rp->bfa));
  4232. m->fw_handle = rp->fw_handle;
  4233. /*
  4234. * queue I/O message to firmware
  4235. */
  4236. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
  4237. return BFA_TRUE;
  4238. }
  4239. static bfa_boolean_t
  4240. bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
  4241. {
  4242. struct bfa_rport_speed_req_s *m;
  4243. /*
  4244. * check for room in queue to send request now
  4245. */
  4246. m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
  4247. if (!m) {
  4248. bfa_trc(rp->bfa, rp->rport_info.speed);
  4249. return BFA_FALSE;
  4250. }
  4251. bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
  4252. bfa_fn_lpu(rp->bfa));
  4253. m->fw_handle = rp->fw_handle;
  4254. m->speed = (u8)rp->rport_info.speed;
  4255. /*
  4256. * queue I/O message to firmware
  4257. */
  4258. bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
  4259. return BFA_TRUE;
  4260. }
  4261. /*
  4262. * bfa_rport_public
  4263. */
  4264. /*
  4265. * Rport interrupt processing.
  4266. */
  4267. void
  4268. bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  4269. {
  4270. union bfi_rport_i2h_msg_u msg;
  4271. struct bfa_rport_s *rp;
  4272. bfa_trc(bfa, m->mhdr.msg_id);
  4273. msg.msg = m;
  4274. switch (m->mhdr.msg_id) {
  4275. case BFI_RPORT_I2H_CREATE_RSP:
  4276. rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
  4277. rp->fw_handle = msg.create_rsp->fw_handle;
  4278. rp->qos_attr = msg.create_rsp->qos_attr;
  4279. bfa_rport_set_lunmask(bfa, rp);
  4280. WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
  4281. bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
  4282. break;
  4283. case BFI_RPORT_I2H_DELETE_RSP:
  4284. rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
  4285. WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
  4286. bfa_rport_unset_lunmask(bfa, rp);
  4287. bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
  4288. break;
  4289. case BFI_RPORT_I2H_QOS_SCN:
  4290. rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
  4291. rp->event_arg.fw_msg = msg.qos_scn_evt;
  4292. bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
  4293. break;
  4294. case BFI_RPORT_I2H_LIP_SCN_ONLINE:
  4295. bfa_fcport_update_loop_info(BFA_FCPORT_MOD(bfa),
  4296. &msg.lip_scn->loop_info);
  4297. bfa_cb_rport_scn_online(bfa);
  4298. break;
  4299. case BFI_RPORT_I2H_LIP_SCN_OFFLINE:
  4300. bfa_cb_rport_scn_offline(bfa);
  4301. break;
  4302. case BFI_RPORT_I2H_NO_DEV:
  4303. rp = BFA_RPORT_FROM_TAG(bfa, msg.lip_scn->bfa_handle);
  4304. bfa_cb_rport_scn_no_dev(rp->rport_drv);
  4305. break;
  4306. default:
  4307. bfa_trc(bfa, m->mhdr.msg_id);
  4308. WARN_ON(1);
  4309. }
  4310. }
  4311. void
  4312. bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
  4313. {
  4314. struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
  4315. struct list_head *qe;
  4316. int i;
  4317. for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
  4318. bfa_q_deq_tail(&mod->rp_free_q, &qe);
  4319. list_add_tail(qe, &mod->rp_unused_q);
  4320. }
  4321. }
  4322. /*
  4323. * bfa_rport_api
  4324. */
  4325. struct bfa_rport_s *
  4326. bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
  4327. {
  4328. struct bfa_rport_s *rp;
  4329. rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
  4330. if (rp == NULL)
  4331. return NULL;
  4332. rp->bfa = bfa;
  4333. rp->rport_drv = rport_drv;
  4334. memset(&rp->stats, 0, sizeof(rp->stats));
  4335. WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
  4336. bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
  4337. return rp;
  4338. }
  4339. void
  4340. bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
  4341. {
  4342. WARN_ON(rport_info->max_frmsz == 0);
  4343. /*
  4344. * Some JBODs are seen to be not setting PDU size correctly in PLOGI
  4345. * responses. Default to minimum size.
  4346. */
  4347. if (rport_info->max_frmsz == 0) {
  4348. bfa_trc(rport->bfa, rport->rport_tag);
  4349. rport_info->max_frmsz = FC_MIN_PDUSZ;
  4350. }
  4351. rport->rport_info = *rport_info;
  4352. bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
  4353. }
  4354. void
  4355. bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
  4356. {
  4357. WARN_ON(speed == 0);
  4358. WARN_ON(speed == BFA_PORT_SPEED_AUTO);
  4359. if (rport) {
  4360. rport->rport_info.speed = speed;
  4361. bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
  4362. }
  4363. }
  4364. /* Set Rport LUN Mask */
  4365. void
  4366. bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
  4367. {
  4368. struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
  4369. wwn_t lp_wwn, rp_wwn;
  4370. u8 lp_tag = (u8)rp->rport_info.lp_tag;
  4371. rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
  4372. lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
  4373. BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
  4374. rp->lun_mask = BFA_TRUE;
  4375. bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn, rp->rport_tag, lp_tag);
  4376. }
  4377. /* Unset Rport LUN mask */
  4378. void
  4379. bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp)
  4380. {
  4381. struct bfa_lps_mod_s *lps_mod = BFA_LPS_MOD(bfa);
  4382. wwn_t lp_wwn, rp_wwn;
  4383. rp_wwn = ((struct bfa_fcs_rport_s *)rp->rport_drv)->pwwn;
  4384. lp_wwn = (BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag))->pwwn;
  4385. BFA_LPS_FROM_TAG(lps_mod, rp->rport_info.lp_tag)->lun_mask =
  4386. rp->lun_mask = BFA_FALSE;
  4387. bfa_fcpim_lunmask_rp_update(bfa, lp_wwn, rp_wwn,
  4388. BFA_RPORT_TAG_INVALID, BFA_LP_TAG_INVALID);
  4389. }
  4390. /*
  4391. * SGPG related functions
  4392. */
  4393. /*
  4394. * Compute and return memory needed by FCP(im) module.
  4395. */
  4396. static void
  4397. bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  4398. struct bfa_s *bfa)
  4399. {
  4400. struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
  4401. struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
  4402. struct bfa_mem_dma_s *seg_ptr;
  4403. u16 nsegs, idx, per_seg_sgpg, num_sgpg;
  4404. u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
  4405. if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
  4406. cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
  4407. else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
  4408. cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
  4409. num_sgpg = cfg->drvcfg.num_sgpgs;
  4410. nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
  4411. per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
  4412. bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
  4413. if (num_sgpg >= per_seg_sgpg) {
  4414. num_sgpg -= per_seg_sgpg;
  4415. bfa_mem_dma_setup(minfo, seg_ptr,
  4416. per_seg_sgpg * sgpg_sz);
  4417. } else
  4418. bfa_mem_dma_setup(minfo, seg_ptr,
  4419. num_sgpg * sgpg_sz);
  4420. }
  4421. /* kva memory */
  4422. bfa_mem_kva_setup(minfo, sgpg_kva,
  4423. cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
  4424. }
  4425. static void
  4426. bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  4427. struct bfa_pcidev_s *pcidev)
  4428. {
  4429. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  4430. struct bfa_sgpg_s *hsgpg;
  4431. struct bfi_sgpg_s *sgpg;
  4432. u64 align_len;
  4433. struct bfa_mem_dma_s *seg_ptr;
  4434. u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
  4435. u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
  4436. union {
  4437. u64 pa;
  4438. union bfi_addr_u addr;
  4439. } sgpg_pa, sgpg_pa_tmp;
  4440. INIT_LIST_HEAD(&mod->sgpg_q);
  4441. INIT_LIST_HEAD(&mod->sgpg_wait_q);
  4442. bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
  4443. mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
  4444. num_sgpg = cfg->drvcfg.num_sgpgs;
  4445. nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
  4446. /* dma/kva mem claim */
  4447. hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
  4448. bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
  4449. if (!bfa_mem_dma_virt(seg_ptr))
  4450. break;
  4451. align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
  4452. bfa_mem_dma_phys(seg_ptr);
  4453. sgpg = (struct bfi_sgpg_s *)
  4454. (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
  4455. sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
  4456. WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
  4457. per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
  4458. for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
  4459. memset(hsgpg, 0, sizeof(*hsgpg));
  4460. memset(sgpg, 0, sizeof(*sgpg));
  4461. hsgpg->sgpg = sgpg;
  4462. sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
  4463. hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
  4464. list_add_tail(&hsgpg->qe, &mod->sgpg_q);
  4465. sgpg++;
  4466. hsgpg++;
  4467. sgpg_pa.pa += sgpg_sz;
  4468. }
  4469. }
  4470. bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
  4471. }
  4472. static void
  4473. bfa_sgpg_detach(struct bfa_s *bfa)
  4474. {
  4475. }
  4476. static void
  4477. bfa_sgpg_start(struct bfa_s *bfa)
  4478. {
  4479. }
  4480. static void
  4481. bfa_sgpg_stop(struct bfa_s *bfa)
  4482. {
  4483. }
  4484. static void
  4485. bfa_sgpg_iocdisable(struct bfa_s *bfa)
  4486. {
  4487. }
  4488. bfa_status_t
  4489. bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
  4490. {
  4491. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  4492. struct bfa_sgpg_s *hsgpg;
  4493. int i;
  4494. if (mod->free_sgpgs < nsgpgs)
  4495. return BFA_STATUS_ENOMEM;
  4496. for (i = 0; i < nsgpgs; i++) {
  4497. bfa_q_deq(&mod->sgpg_q, &hsgpg);
  4498. WARN_ON(!hsgpg);
  4499. list_add_tail(&hsgpg->qe, sgpg_q);
  4500. }
  4501. mod->free_sgpgs -= nsgpgs;
  4502. return BFA_STATUS_OK;
  4503. }
  4504. void
  4505. bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
  4506. {
  4507. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  4508. struct bfa_sgpg_wqe_s *wqe;
  4509. mod->free_sgpgs += nsgpg;
  4510. WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
  4511. list_splice_tail_init(sgpg_q, &mod->sgpg_q);
  4512. if (list_empty(&mod->sgpg_wait_q))
  4513. return;
  4514. /*
  4515. * satisfy as many waiting requests as possible
  4516. */
  4517. do {
  4518. wqe = bfa_q_first(&mod->sgpg_wait_q);
  4519. if (mod->free_sgpgs < wqe->nsgpg)
  4520. nsgpg = mod->free_sgpgs;
  4521. else
  4522. nsgpg = wqe->nsgpg;
  4523. bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
  4524. wqe->nsgpg -= nsgpg;
  4525. if (wqe->nsgpg == 0) {
  4526. list_del(&wqe->qe);
  4527. wqe->cbfn(wqe->cbarg);
  4528. }
  4529. } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
  4530. }
  4531. void
  4532. bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
  4533. {
  4534. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  4535. WARN_ON(nsgpg <= 0);
  4536. WARN_ON(nsgpg <= mod->free_sgpgs);
  4537. wqe->nsgpg_total = wqe->nsgpg = nsgpg;
  4538. /*
  4539. * allocate any left to this one first
  4540. */
  4541. if (mod->free_sgpgs) {
  4542. /*
  4543. * no one else is waiting for SGPG
  4544. */
  4545. WARN_ON(!list_empty(&mod->sgpg_wait_q));
  4546. list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
  4547. wqe->nsgpg -= mod->free_sgpgs;
  4548. mod->free_sgpgs = 0;
  4549. }
  4550. list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
  4551. }
  4552. void
  4553. bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
  4554. {
  4555. struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
  4556. WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
  4557. list_del(&wqe->qe);
  4558. if (wqe->nsgpg_total != wqe->nsgpg)
  4559. bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
  4560. wqe->nsgpg_total - wqe->nsgpg);
  4561. }
  4562. void
  4563. bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
  4564. void *cbarg)
  4565. {
  4566. INIT_LIST_HEAD(&wqe->sgpg_q);
  4567. wqe->cbfn = cbfn;
  4568. wqe->cbarg = cbarg;
  4569. }
  4570. /*
  4571. * UF related functions
  4572. */
  4573. /*
  4574. *****************************************************************************
  4575. * Internal functions
  4576. *****************************************************************************
  4577. */
  4578. static void
  4579. __bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
  4580. {
  4581. struct bfa_uf_s *uf = cbarg;
  4582. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
  4583. if (complete)
  4584. ufm->ufrecv(ufm->cbarg, uf);
  4585. }
  4586. static void
  4587. claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
  4588. {
  4589. struct bfi_uf_buf_post_s *uf_bp_msg;
  4590. u16 i;
  4591. u16 buf_len;
  4592. ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
  4593. uf_bp_msg = ufm->uf_buf_posts;
  4594. for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
  4595. i++, uf_bp_msg++) {
  4596. memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
  4597. uf_bp_msg->buf_tag = i;
  4598. buf_len = sizeof(struct bfa_uf_buf_s);
  4599. uf_bp_msg->buf_len = cpu_to_be16(buf_len);
  4600. bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
  4601. bfa_fn_lpu(ufm->bfa));
  4602. bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
  4603. }
  4604. /*
  4605. * advance pointer beyond consumed memory
  4606. */
  4607. bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
  4608. }
  4609. static void
  4610. claim_ufs(struct bfa_uf_mod_s *ufm)
  4611. {
  4612. u16 i;
  4613. struct bfa_uf_s *uf;
  4614. /*
  4615. * Claim block of memory for UF list
  4616. */
  4617. ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
  4618. /*
  4619. * Initialize UFs and queue it in UF free queue
  4620. */
  4621. for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
  4622. memset(uf, 0, sizeof(struct bfa_uf_s));
  4623. uf->bfa = ufm->bfa;
  4624. uf->uf_tag = i;
  4625. uf->pb_len = BFA_PER_UF_DMA_SZ;
  4626. uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
  4627. uf->buf_pa = ufm_pbs_pa(ufm, i);
  4628. list_add_tail(&uf->qe, &ufm->uf_free_q);
  4629. }
  4630. /*
  4631. * advance memory pointer
  4632. */
  4633. bfa_mem_kva_curp(ufm) = (u8 *) uf;
  4634. }
  4635. static void
  4636. uf_mem_claim(struct bfa_uf_mod_s *ufm)
  4637. {
  4638. claim_ufs(ufm);
  4639. claim_uf_post_msgs(ufm);
  4640. }
  4641. static void
  4642. bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  4643. struct bfa_s *bfa)
  4644. {
  4645. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4646. struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
  4647. u32 num_ufs = cfg->fwcfg.num_uf_bufs;
  4648. struct bfa_mem_dma_s *seg_ptr;
  4649. u16 nsegs, idx, per_seg_uf = 0;
  4650. nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
  4651. per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
  4652. bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
  4653. if (num_ufs >= per_seg_uf) {
  4654. num_ufs -= per_seg_uf;
  4655. bfa_mem_dma_setup(minfo, seg_ptr,
  4656. per_seg_uf * BFA_PER_UF_DMA_SZ);
  4657. } else
  4658. bfa_mem_dma_setup(minfo, seg_ptr,
  4659. num_ufs * BFA_PER_UF_DMA_SZ);
  4660. }
  4661. /* kva memory */
  4662. bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
  4663. (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
  4664. }
  4665. static void
  4666. bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  4667. struct bfa_pcidev_s *pcidev)
  4668. {
  4669. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4670. ufm->bfa = bfa;
  4671. ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
  4672. INIT_LIST_HEAD(&ufm->uf_free_q);
  4673. INIT_LIST_HEAD(&ufm->uf_posted_q);
  4674. INIT_LIST_HEAD(&ufm->uf_unused_q);
  4675. uf_mem_claim(ufm);
  4676. }
  4677. static void
  4678. bfa_uf_detach(struct bfa_s *bfa)
  4679. {
  4680. }
  4681. static struct bfa_uf_s *
  4682. bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
  4683. {
  4684. struct bfa_uf_s *uf;
  4685. bfa_q_deq(&uf_mod->uf_free_q, &uf);
  4686. return uf;
  4687. }
  4688. static void
  4689. bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
  4690. {
  4691. list_add_tail(&uf->qe, &uf_mod->uf_free_q);
  4692. }
  4693. static bfa_status_t
  4694. bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
  4695. {
  4696. struct bfi_uf_buf_post_s *uf_post_msg;
  4697. uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
  4698. if (!uf_post_msg)
  4699. return BFA_STATUS_FAILED;
  4700. memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
  4701. sizeof(struct bfi_uf_buf_post_s));
  4702. bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
  4703. bfa_trc(ufm->bfa, uf->uf_tag);
  4704. list_add_tail(&uf->qe, &ufm->uf_posted_q);
  4705. return BFA_STATUS_OK;
  4706. }
  4707. static void
  4708. bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
  4709. {
  4710. struct bfa_uf_s *uf;
  4711. while ((uf = bfa_uf_get(uf_mod)) != NULL) {
  4712. if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
  4713. break;
  4714. }
  4715. }
  4716. static void
  4717. uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
  4718. {
  4719. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4720. u16 uf_tag = m->buf_tag;
  4721. struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
  4722. struct bfa_uf_buf_s *uf_buf;
  4723. uint8_t *buf;
  4724. struct fchs_s *fchs;
  4725. uf_buf = (struct bfa_uf_buf_s *)
  4726. bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
  4727. buf = &uf_buf->d[0];
  4728. m->frm_len = be16_to_cpu(m->frm_len);
  4729. m->xfr_len = be16_to_cpu(m->xfr_len);
  4730. fchs = (struct fchs_s *)uf_buf;
  4731. list_del(&uf->qe); /* dequeue from posted queue */
  4732. uf->data_ptr = buf;
  4733. uf->data_len = m->xfr_len;
  4734. WARN_ON(uf->data_len < sizeof(struct fchs_s));
  4735. if (uf->data_len == sizeof(struct fchs_s)) {
  4736. bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
  4737. uf->data_len, (struct fchs_s *)buf);
  4738. } else {
  4739. u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
  4740. bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
  4741. BFA_PL_EID_RX, uf->data_len,
  4742. (struct fchs_s *)buf, pld_w0);
  4743. }
  4744. if (bfa->fcs)
  4745. __bfa_cb_uf_recv(uf, BFA_TRUE);
  4746. else
  4747. bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
  4748. }
  4749. static void
  4750. bfa_uf_stop(struct bfa_s *bfa)
  4751. {
  4752. }
  4753. static void
  4754. bfa_uf_iocdisable(struct bfa_s *bfa)
  4755. {
  4756. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4757. struct bfa_uf_s *uf;
  4758. struct list_head *qe, *qen;
  4759. /* Enqueue unused uf resources to free_q */
  4760. list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
  4761. list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
  4762. uf = (struct bfa_uf_s *) qe;
  4763. list_del(&uf->qe);
  4764. bfa_uf_put(ufm, uf);
  4765. }
  4766. }
  4767. static void
  4768. bfa_uf_start(struct bfa_s *bfa)
  4769. {
  4770. bfa_uf_post_all(BFA_UF_MOD(bfa));
  4771. }
  4772. /*
  4773. * Register handler for all unsolicted receive frames.
  4774. *
  4775. * @param[in] bfa BFA instance
  4776. * @param[in] ufrecv receive handler function
  4777. * @param[in] cbarg receive handler arg
  4778. */
  4779. void
  4780. bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
  4781. {
  4782. struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
  4783. ufm->ufrecv = ufrecv;
  4784. ufm->cbarg = cbarg;
  4785. }
  4786. /*
  4787. * Free an unsolicited frame back to BFA.
  4788. *
  4789. * @param[in] uf unsolicited frame to be freed
  4790. *
  4791. * @return None
  4792. */
  4793. void
  4794. bfa_uf_free(struct bfa_uf_s *uf)
  4795. {
  4796. bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
  4797. bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
  4798. }
  4799. /*
  4800. * uf_pub BFA uf module public functions
  4801. */
  4802. void
  4803. bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  4804. {
  4805. bfa_trc(bfa, msg->mhdr.msg_id);
  4806. switch (msg->mhdr.msg_id) {
  4807. case BFI_UF_I2H_FRM_RCVD:
  4808. uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
  4809. break;
  4810. default:
  4811. bfa_trc(bfa, msg->mhdr.msg_id);
  4812. WARN_ON(1);
  4813. }
  4814. }
  4815. void
  4816. bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
  4817. {
  4818. struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
  4819. struct list_head *qe;
  4820. int i;
  4821. for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
  4822. bfa_q_deq_tail(&mod->uf_free_q, &qe);
  4823. list_add_tail(qe, &mod->uf_unused_q);
  4824. }
  4825. }
  4826. /*
  4827. * Dport forward declaration
  4828. */
  4829. enum bfa_dport_test_state_e {
  4830. BFA_DPORT_ST_DISABLED = 0, /*!< dport is disabled */
  4831. BFA_DPORT_ST_INP = 1, /*!< test in progress */
  4832. BFA_DPORT_ST_COMP = 2, /*!< test complete successfully */
  4833. BFA_DPORT_ST_NO_SFP = 3, /*!< sfp is not present */
  4834. BFA_DPORT_ST_NOTSTART = 4, /*!< test not start dport is enabled */
  4835. };
  4836. /*
  4837. * BFA DPORT state machine events
  4838. */
  4839. enum bfa_dport_sm_event {
  4840. BFA_DPORT_SM_ENABLE = 1, /* dport enable event */
  4841. BFA_DPORT_SM_DISABLE = 2, /* dport disable event */
  4842. BFA_DPORT_SM_FWRSP = 3, /* fw enable/disable rsp */
  4843. BFA_DPORT_SM_QRESUME = 4, /* CQ space available */
  4844. BFA_DPORT_SM_HWFAIL = 5, /* IOC h/w failure */
  4845. BFA_DPORT_SM_START = 6, /* re-start dport test */
  4846. BFA_DPORT_SM_REQFAIL = 7, /* request failure */
  4847. BFA_DPORT_SM_SCN = 8, /* state change notify frm fw */
  4848. };
  4849. static void bfa_dport_sm_disabled(struct bfa_dport_s *dport,
  4850. enum bfa_dport_sm_event event);
  4851. static void bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
  4852. enum bfa_dport_sm_event event);
  4853. static void bfa_dport_sm_enabling(struct bfa_dport_s *dport,
  4854. enum bfa_dport_sm_event event);
  4855. static void bfa_dport_sm_enabled(struct bfa_dport_s *dport,
  4856. enum bfa_dport_sm_event event);
  4857. static void bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
  4858. enum bfa_dport_sm_event event);
  4859. static void bfa_dport_sm_disabling(struct bfa_dport_s *dport,
  4860. enum bfa_dport_sm_event event);
  4861. static void bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
  4862. enum bfa_dport_sm_event event);
  4863. static void bfa_dport_sm_starting(struct bfa_dport_s *dport,
  4864. enum bfa_dport_sm_event event);
  4865. static void bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
  4866. enum bfa_dport_sm_event event);
  4867. static void bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
  4868. enum bfa_dport_sm_event event);
  4869. static void bfa_dport_qresume(void *cbarg);
  4870. static void bfa_dport_req_comp(struct bfa_dport_s *dport,
  4871. struct bfi_diag_dport_rsp_s *msg);
  4872. static void bfa_dport_scn(struct bfa_dport_s *dport,
  4873. struct bfi_diag_dport_scn_s *msg);
  4874. /*
  4875. * BFA fcdiag module
  4876. */
  4877. #define BFA_DIAG_QTEST_TOV 1000 /* msec */
  4878. /*
  4879. * Set port status to busy
  4880. */
  4881. static void
  4882. bfa_fcdiag_set_busy_status(struct bfa_fcdiag_s *fcdiag)
  4883. {
  4884. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(fcdiag->bfa);
  4885. if (fcdiag->lb.lock)
  4886. fcport->diag_busy = BFA_TRUE;
  4887. else
  4888. fcport->diag_busy = BFA_FALSE;
  4889. }
  4890. static void
  4891. bfa_fcdiag_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
  4892. struct bfa_s *bfa)
  4893. {
  4894. }
  4895. static void
  4896. bfa_fcdiag_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  4897. struct bfa_pcidev_s *pcidev)
  4898. {
  4899. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  4900. struct bfa_dport_s *dport = &fcdiag->dport;
  4901. fcdiag->bfa = bfa;
  4902. fcdiag->trcmod = bfa->trcmod;
  4903. /* The common DIAG attach bfa_diag_attach() will do all memory claim */
  4904. dport->bfa = bfa;
  4905. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  4906. bfa_reqq_winit(&dport->reqq_wait, bfa_dport_qresume, dport);
  4907. dport->cbfn = NULL;
  4908. dport->cbarg = NULL;
  4909. dport->test_state = BFA_DPORT_ST_DISABLED;
  4910. memset(&dport->result, 0, sizeof(struct bfa_diag_dport_result_s));
  4911. }
  4912. static void
  4913. bfa_fcdiag_iocdisable(struct bfa_s *bfa)
  4914. {
  4915. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  4916. struct bfa_dport_s *dport = &fcdiag->dport;
  4917. bfa_trc(fcdiag, fcdiag->lb.lock);
  4918. if (fcdiag->lb.lock) {
  4919. fcdiag->lb.status = BFA_STATUS_IOC_FAILURE;
  4920. fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
  4921. fcdiag->lb.lock = 0;
  4922. bfa_fcdiag_set_busy_status(fcdiag);
  4923. }
  4924. bfa_sm_send_event(dport, BFA_DPORT_SM_HWFAIL);
  4925. }
  4926. static void
  4927. bfa_fcdiag_detach(struct bfa_s *bfa)
  4928. {
  4929. }
  4930. static void
  4931. bfa_fcdiag_start(struct bfa_s *bfa)
  4932. {
  4933. }
  4934. static void
  4935. bfa_fcdiag_stop(struct bfa_s *bfa)
  4936. {
  4937. }
  4938. static void
  4939. bfa_fcdiag_queuetest_timeout(void *cbarg)
  4940. {
  4941. struct bfa_fcdiag_s *fcdiag = cbarg;
  4942. struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
  4943. bfa_trc(fcdiag, fcdiag->qtest.all);
  4944. bfa_trc(fcdiag, fcdiag->qtest.count);
  4945. fcdiag->qtest.timer_active = 0;
  4946. res->status = BFA_STATUS_ETIMER;
  4947. res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
  4948. if (fcdiag->qtest.all)
  4949. res->queue = fcdiag->qtest.all;
  4950. bfa_trc(fcdiag, BFA_STATUS_ETIMER);
  4951. fcdiag->qtest.status = BFA_STATUS_ETIMER;
  4952. fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
  4953. fcdiag->qtest.lock = 0;
  4954. }
  4955. static bfa_status_t
  4956. bfa_fcdiag_queuetest_send(struct bfa_fcdiag_s *fcdiag)
  4957. {
  4958. u32 i;
  4959. struct bfi_diag_qtest_req_s *req;
  4960. req = bfa_reqq_next(fcdiag->bfa, fcdiag->qtest.queue);
  4961. if (!req)
  4962. return BFA_STATUS_DEVBUSY;
  4963. /* build host command */
  4964. bfi_h2i_set(req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_QTEST,
  4965. bfa_fn_lpu(fcdiag->bfa));
  4966. for (i = 0; i < BFI_LMSG_PL_WSZ; i++)
  4967. req->data[i] = QTEST_PAT_DEFAULT;
  4968. bfa_trc(fcdiag, fcdiag->qtest.queue);
  4969. /* ring door bell */
  4970. bfa_reqq_produce(fcdiag->bfa, fcdiag->qtest.queue, req->mh);
  4971. return BFA_STATUS_OK;
  4972. }
  4973. static void
  4974. bfa_fcdiag_queuetest_comp(struct bfa_fcdiag_s *fcdiag,
  4975. bfi_diag_qtest_rsp_t *rsp)
  4976. {
  4977. struct bfa_diag_qtest_result_s *res = fcdiag->qtest.result;
  4978. bfa_status_t status = BFA_STATUS_OK;
  4979. int i;
  4980. /* Check timer, should still be active */
  4981. if (!fcdiag->qtest.timer_active) {
  4982. bfa_trc(fcdiag, fcdiag->qtest.timer_active);
  4983. return;
  4984. }
  4985. /* update count */
  4986. fcdiag->qtest.count--;
  4987. /* Check result */
  4988. for (i = 0; i < BFI_LMSG_PL_WSZ; i++) {
  4989. if (rsp->data[i] != ~(QTEST_PAT_DEFAULT)) {
  4990. res->status = BFA_STATUS_DATACORRUPTED;
  4991. break;
  4992. }
  4993. }
  4994. if (res->status == BFA_STATUS_OK) {
  4995. if (fcdiag->qtest.count > 0) {
  4996. status = bfa_fcdiag_queuetest_send(fcdiag);
  4997. if (status == BFA_STATUS_OK)
  4998. return;
  4999. else
  5000. res->status = status;
  5001. } else if (fcdiag->qtest.all > 0 &&
  5002. fcdiag->qtest.queue < (BFI_IOC_MAX_CQS - 1)) {
  5003. fcdiag->qtest.count = QTEST_CNT_DEFAULT;
  5004. fcdiag->qtest.queue++;
  5005. status = bfa_fcdiag_queuetest_send(fcdiag);
  5006. if (status == BFA_STATUS_OK)
  5007. return;
  5008. else
  5009. res->status = status;
  5010. }
  5011. }
  5012. /* Stop timer when we comp all queue */
  5013. if (fcdiag->qtest.timer_active) {
  5014. bfa_timer_stop(&fcdiag->qtest.timer);
  5015. fcdiag->qtest.timer_active = 0;
  5016. }
  5017. res->queue = fcdiag->qtest.queue;
  5018. res->count = QTEST_CNT_DEFAULT - fcdiag->qtest.count;
  5019. bfa_trc(fcdiag, res->count);
  5020. bfa_trc(fcdiag, res->status);
  5021. fcdiag->qtest.status = res->status;
  5022. fcdiag->qtest.cbfn(fcdiag->qtest.cbarg, fcdiag->qtest.status);
  5023. fcdiag->qtest.lock = 0;
  5024. }
  5025. static void
  5026. bfa_fcdiag_loopback_comp(struct bfa_fcdiag_s *fcdiag,
  5027. struct bfi_diag_lb_rsp_s *rsp)
  5028. {
  5029. struct bfa_diag_loopback_result_s *res = fcdiag->lb.result;
  5030. res->numtxmfrm = be32_to_cpu(rsp->res.numtxmfrm);
  5031. res->numosffrm = be32_to_cpu(rsp->res.numosffrm);
  5032. res->numrcvfrm = be32_to_cpu(rsp->res.numrcvfrm);
  5033. res->badfrminf = be32_to_cpu(rsp->res.badfrminf);
  5034. res->badfrmnum = be32_to_cpu(rsp->res.badfrmnum);
  5035. res->status = rsp->res.status;
  5036. fcdiag->lb.status = rsp->res.status;
  5037. bfa_trc(fcdiag, fcdiag->lb.status);
  5038. fcdiag->lb.cbfn(fcdiag->lb.cbarg, fcdiag->lb.status);
  5039. fcdiag->lb.lock = 0;
  5040. bfa_fcdiag_set_busy_status(fcdiag);
  5041. }
  5042. static bfa_status_t
  5043. bfa_fcdiag_loopback_send(struct bfa_fcdiag_s *fcdiag,
  5044. struct bfa_diag_loopback_s *loopback)
  5045. {
  5046. struct bfi_diag_lb_req_s *lb_req;
  5047. lb_req = bfa_reqq_next(fcdiag->bfa, BFA_REQQ_DIAG);
  5048. if (!lb_req)
  5049. return BFA_STATUS_DEVBUSY;
  5050. /* build host command */
  5051. bfi_h2i_set(lb_req->mh, BFI_MC_DIAG, BFI_DIAG_H2I_LOOPBACK,
  5052. bfa_fn_lpu(fcdiag->bfa));
  5053. lb_req->lb_mode = loopback->lb_mode;
  5054. lb_req->speed = loopback->speed;
  5055. lb_req->loopcnt = loopback->loopcnt;
  5056. lb_req->pattern = loopback->pattern;
  5057. /* ring door bell */
  5058. bfa_reqq_produce(fcdiag->bfa, BFA_REQQ_DIAG, lb_req->mh);
  5059. bfa_trc(fcdiag, loopback->lb_mode);
  5060. bfa_trc(fcdiag, loopback->speed);
  5061. bfa_trc(fcdiag, loopback->loopcnt);
  5062. bfa_trc(fcdiag, loopback->pattern);
  5063. return BFA_STATUS_OK;
  5064. }
  5065. /*
  5066. * cpe/rme intr handler
  5067. */
  5068. void
  5069. bfa_fcdiag_intr(struct bfa_s *bfa, struct bfi_msg_s *msg)
  5070. {
  5071. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5072. switch (msg->mhdr.msg_id) {
  5073. case BFI_DIAG_I2H_LOOPBACK:
  5074. bfa_fcdiag_loopback_comp(fcdiag,
  5075. (struct bfi_diag_lb_rsp_s *) msg);
  5076. break;
  5077. case BFI_DIAG_I2H_QTEST:
  5078. bfa_fcdiag_queuetest_comp(fcdiag, (bfi_diag_qtest_rsp_t *)msg);
  5079. break;
  5080. case BFI_DIAG_I2H_DPORT:
  5081. bfa_dport_req_comp(&fcdiag->dport,
  5082. (struct bfi_diag_dport_rsp_s *)msg);
  5083. break;
  5084. case BFI_DIAG_I2H_DPORT_SCN:
  5085. bfa_dport_scn(&fcdiag->dport,
  5086. (struct bfi_diag_dport_scn_s *)msg);
  5087. break;
  5088. default:
  5089. bfa_trc(fcdiag, msg->mhdr.msg_id);
  5090. WARN_ON(1);
  5091. }
  5092. }
  5093. /*
  5094. * Loopback test
  5095. *
  5096. * @param[in] *bfa - bfa data struct
  5097. * @param[in] opmode - port operation mode
  5098. * @param[in] speed - port speed
  5099. * @param[in] lpcnt - loop count
  5100. * @param[in] pat - pattern to build packet
  5101. * @param[in] *result - pt to bfa_diag_loopback_result_t data struct
  5102. * @param[in] cbfn - callback function
  5103. * @param[in] cbarg - callback functioin arg
  5104. *
  5105. * @param[out]
  5106. */
  5107. bfa_status_t
  5108. bfa_fcdiag_loopback(struct bfa_s *bfa, enum bfa_port_opmode opmode,
  5109. enum bfa_port_speed speed, u32 lpcnt, u32 pat,
  5110. struct bfa_diag_loopback_result_s *result, bfa_cb_diag_t cbfn,
  5111. void *cbarg)
  5112. {
  5113. struct bfa_diag_loopback_s loopback;
  5114. struct bfa_port_attr_s attr;
  5115. bfa_status_t status;
  5116. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5117. if (!bfa_iocfc_is_operational(bfa))
  5118. return BFA_STATUS_IOC_NON_OP;
  5119. /* if port is PBC disabled, return error */
  5120. if (bfa_fcport_is_pbcdisabled(bfa)) {
  5121. bfa_trc(fcdiag, BFA_STATUS_PBC);
  5122. return BFA_STATUS_PBC;
  5123. }
  5124. if (bfa_fcport_is_disabled(bfa) == BFA_FALSE) {
  5125. bfa_trc(fcdiag, opmode);
  5126. return BFA_STATUS_PORT_NOT_DISABLED;
  5127. }
  5128. /*
  5129. * Check if input speed is supported by the port mode
  5130. */
  5131. if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
  5132. if (!(speed == BFA_PORT_SPEED_1GBPS ||
  5133. speed == BFA_PORT_SPEED_2GBPS ||
  5134. speed == BFA_PORT_SPEED_4GBPS ||
  5135. speed == BFA_PORT_SPEED_8GBPS ||
  5136. speed == BFA_PORT_SPEED_16GBPS ||
  5137. speed == BFA_PORT_SPEED_AUTO)) {
  5138. bfa_trc(fcdiag, speed);
  5139. return BFA_STATUS_UNSUPP_SPEED;
  5140. }
  5141. bfa_fcport_get_attr(bfa, &attr);
  5142. bfa_trc(fcdiag, attr.speed_supported);
  5143. if (speed > attr.speed_supported)
  5144. return BFA_STATUS_UNSUPP_SPEED;
  5145. } else {
  5146. if (speed != BFA_PORT_SPEED_10GBPS) {
  5147. bfa_trc(fcdiag, speed);
  5148. return BFA_STATUS_UNSUPP_SPEED;
  5149. }
  5150. }
  5151. /*
  5152. * For CT2, 1G is not supported
  5153. */
  5154. if ((speed == BFA_PORT_SPEED_1GBPS) &&
  5155. (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id))) {
  5156. bfa_trc(fcdiag, speed);
  5157. return BFA_STATUS_UNSUPP_SPEED;
  5158. }
  5159. /* For Mezz card, port speed entered needs to be checked */
  5160. if (bfa_mfg_is_mezz(bfa->ioc.attr->card_type)) {
  5161. if (bfa_ioc_get_type(&bfa->ioc) == BFA_IOC_TYPE_FC) {
  5162. if (!(speed == BFA_PORT_SPEED_1GBPS ||
  5163. speed == BFA_PORT_SPEED_2GBPS ||
  5164. speed == BFA_PORT_SPEED_4GBPS ||
  5165. speed == BFA_PORT_SPEED_8GBPS ||
  5166. speed == BFA_PORT_SPEED_16GBPS ||
  5167. speed == BFA_PORT_SPEED_AUTO))
  5168. return BFA_STATUS_UNSUPP_SPEED;
  5169. } else {
  5170. if (speed != BFA_PORT_SPEED_10GBPS)
  5171. return BFA_STATUS_UNSUPP_SPEED;
  5172. }
  5173. }
  5174. /* check to see if fcport is dport */
  5175. if (bfa_fcport_is_dport(bfa)) {
  5176. bfa_trc(fcdiag, fcdiag->lb.lock);
  5177. return BFA_STATUS_DPORT_ENABLED;
  5178. }
  5179. /* check to see if there is another destructive diag cmd running */
  5180. if (fcdiag->lb.lock) {
  5181. bfa_trc(fcdiag, fcdiag->lb.lock);
  5182. return BFA_STATUS_DEVBUSY;
  5183. }
  5184. fcdiag->lb.lock = 1;
  5185. loopback.lb_mode = opmode;
  5186. loopback.speed = speed;
  5187. loopback.loopcnt = lpcnt;
  5188. loopback.pattern = pat;
  5189. fcdiag->lb.result = result;
  5190. fcdiag->lb.cbfn = cbfn;
  5191. fcdiag->lb.cbarg = cbarg;
  5192. memset(result, 0, sizeof(struct bfa_diag_loopback_result_s));
  5193. bfa_fcdiag_set_busy_status(fcdiag);
  5194. /* Send msg to fw */
  5195. status = bfa_fcdiag_loopback_send(fcdiag, &loopback);
  5196. return status;
  5197. }
  5198. /*
  5199. * DIAG queue test command
  5200. *
  5201. * @param[in] *bfa - bfa data struct
  5202. * @param[in] force - 1: don't do ioc op checking
  5203. * @param[in] queue - queue no. to test
  5204. * @param[in] *result - pt to bfa_diag_qtest_result_t data struct
  5205. * @param[in] cbfn - callback function
  5206. * @param[in] *cbarg - callback functioin arg
  5207. *
  5208. * @param[out]
  5209. */
  5210. bfa_status_t
  5211. bfa_fcdiag_queuetest(struct bfa_s *bfa, u32 force, u32 queue,
  5212. struct bfa_diag_qtest_result_s *result, bfa_cb_diag_t cbfn,
  5213. void *cbarg)
  5214. {
  5215. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5216. bfa_status_t status;
  5217. bfa_trc(fcdiag, force);
  5218. bfa_trc(fcdiag, queue);
  5219. if (!force && !bfa_iocfc_is_operational(bfa))
  5220. return BFA_STATUS_IOC_NON_OP;
  5221. /* check to see if there is another destructive diag cmd running */
  5222. if (fcdiag->qtest.lock) {
  5223. bfa_trc(fcdiag, fcdiag->qtest.lock);
  5224. return BFA_STATUS_DEVBUSY;
  5225. }
  5226. /* Initialization */
  5227. fcdiag->qtest.lock = 1;
  5228. fcdiag->qtest.cbfn = cbfn;
  5229. fcdiag->qtest.cbarg = cbarg;
  5230. fcdiag->qtest.result = result;
  5231. fcdiag->qtest.count = QTEST_CNT_DEFAULT;
  5232. /* Init test results */
  5233. fcdiag->qtest.result->status = BFA_STATUS_OK;
  5234. fcdiag->qtest.result->count = 0;
  5235. /* send */
  5236. if (queue < BFI_IOC_MAX_CQS) {
  5237. fcdiag->qtest.result->queue = (u8)queue;
  5238. fcdiag->qtest.queue = (u8)queue;
  5239. fcdiag->qtest.all = 0;
  5240. } else {
  5241. fcdiag->qtest.result->queue = 0;
  5242. fcdiag->qtest.queue = 0;
  5243. fcdiag->qtest.all = 1;
  5244. }
  5245. status = bfa_fcdiag_queuetest_send(fcdiag);
  5246. /* Start a timer */
  5247. if (status == BFA_STATUS_OK) {
  5248. bfa_timer_start(bfa, &fcdiag->qtest.timer,
  5249. bfa_fcdiag_queuetest_timeout, fcdiag,
  5250. BFA_DIAG_QTEST_TOV);
  5251. fcdiag->qtest.timer_active = 1;
  5252. }
  5253. return status;
  5254. }
  5255. /*
  5256. * DIAG PLB is running
  5257. *
  5258. * @param[in] *bfa - bfa data struct
  5259. *
  5260. * @param[out]
  5261. */
  5262. bfa_status_t
  5263. bfa_fcdiag_lb_is_running(struct bfa_s *bfa)
  5264. {
  5265. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5266. return fcdiag->lb.lock ? BFA_STATUS_DIAG_BUSY : BFA_STATUS_OK;
  5267. }
  5268. /*
  5269. * D-port
  5270. */
  5271. #define bfa_dport_result_start(__dport, __mode) do { \
  5272. (__dport)->result.start_time = bfa_get_log_time(); \
  5273. (__dport)->result.status = DPORT_TEST_ST_INPRG; \
  5274. (__dport)->result.mode = (__mode); \
  5275. (__dport)->result.rp_pwwn = (__dport)->rp_pwwn; \
  5276. (__dport)->result.rp_nwwn = (__dport)->rp_nwwn; \
  5277. (__dport)->result.lpcnt = (__dport)->lpcnt; \
  5278. } while (0)
  5279. static bfa_boolean_t bfa_dport_send_req(struct bfa_dport_s *dport,
  5280. enum bfi_dport_req req);
  5281. static void
  5282. bfa_cb_fcdiag_dport(struct bfa_dport_s *dport, bfa_status_t bfa_status)
  5283. {
  5284. if (dport->cbfn != NULL) {
  5285. dport->cbfn(dport->cbarg, bfa_status);
  5286. dport->cbfn = NULL;
  5287. dport->cbarg = NULL;
  5288. }
  5289. }
  5290. static void
  5291. bfa_dport_sm_disabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
  5292. {
  5293. bfa_trc(dport->bfa, event);
  5294. switch (event) {
  5295. case BFA_DPORT_SM_ENABLE:
  5296. bfa_fcport_dportenable(dport->bfa);
  5297. if (bfa_dport_send_req(dport, BFI_DPORT_ENABLE))
  5298. bfa_sm_set_state(dport, bfa_dport_sm_enabling);
  5299. else
  5300. bfa_sm_set_state(dport, bfa_dport_sm_enabling_qwait);
  5301. break;
  5302. case BFA_DPORT_SM_DISABLE:
  5303. /* Already disabled */
  5304. break;
  5305. case BFA_DPORT_SM_HWFAIL:
  5306. /* ignore */
  5307. break;
  5308. case BFA_DPORT_SM_SCN:
  5309. if (dport->i2hmsg.scn.state == BFI_DPORT_SCN_DDPORT_ENABLE) {
  5310. bfa_fcport_ddportenable(dport->bfa);
  5311. dport->dynamic = BFA_TRUE;
  5312. dport->test_state = BFA_DPORT_ST_NOTSTART;
  5313. bfa_sm_set_state(dport, bfa_dport_sm_enabled);
  5314. } else {
  5315. bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
  5316. WARN_ON(1);
  5317. }
  5318. break;
  5319. default:
  5320. bfa_sm_fault(dport->bfa, event);
  5321. }
  5322. }
  5323. static void
  5324. bfa_dport_sm_enabling_qwait(struct bfa_dport_s *dport,
  5325. enum bfa_dport_sm_event event)
  5326. {
  5327. bfa_trc(dport->bfa, event);
  5328. switch (event) {
  5329. case BFA_DPORT_SM_QRESUME:
  5330. bfa_sm_set_state(dport, bfa_dport_sm_enabling);
  5331. bfa_dport_send_req(dport, BFI_DPORT_ENABLE);
  5332. break;
  5333. case BFA_DPORT_SM_HWFAIL:
  5334. bfa_reqq_wcancel(&dport->reqq_wait);
  5335. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5336. bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
  5337. break;
  5338. default:
  5339. bfa_sm_fault(dport->bfa, event);
  5340. }
  5341. }
  5342. static void
  5343. bfa_dport_sm_enabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
  5344. {
  5345. bfa_trc(dport->bfa, event);
  5346. switch (event) {
  5347. case BFA_DPORT_SM_FWRSP:
  5348. memset(&dport->result, 0,
  5349. sizeof(struct bfa_diag_dport_result_s));
  5350. if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
  5351. dport->test_state = BFA_DPORT_ST_NO_SFP;
  5352. } else {
  5353. dport->test_state = BFA_DPORT_ST_INP;
  5354. bfa_dport_result_start(dport, BFA_DPORT_OPMODE_AUTO);
  5355. }
  5356. bfa_sm_set_state(dport, bfa_dport_sm_enabled);
  5357. break;
  5358. case BFA_DPORT_SM_REQFAIL:
  5359. dport->test_state = BFA_DPORT_ST_DISABLED;
  5360. bfa_fcport_dportdisable(dport->bfa);
  5361. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5362. break;
  5363. case BFA_DPORT_SM_HWFAIL:
  5364. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5365. bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
  5366. break;
  5367. default:
  5368. bfa_sm_fault(dport->bfa, event);
  5369. }
  5370. }
  5371. static void
  5372. bfa_dport_sm_enabled(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
  5373. {
  5374. bfa_trc(dport->bfa, event);
  5375. switch (event) {
  5376. case BFA_DPORT_SM_START:
  5377. if (bfa_dport_send_req(dport, BFI_DPORT_START))
  5378. bfa_sm_set_state(dport, bfa_dport_sm_starting);
  5379. else
  5380. bfa_sm_set_state(dport, bfa_dport_sm_starting_qwait);
  5381. break;
  5382. case BFA_DPORT_SM_DISABLE:
  5383. bfa_fcport_dportdisable(dport->bfa);
  5384. if (bfa_dport_send_req(dport, BFI_DPORT_DISABLE))
  5385. bfa_sm_set_state(dport, bfa_dport_sm_disabling);
  5386. else
  5387. bfa_sm_set_state(dport, bfa_dport_sm_disabling_qwait);
  5388. break;
  5389. case BFA_DPORT_SM_HWFAIL:
  5390. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5391. break;
  5392. case BFA_DPORT_SM_SCN:
  5393. switch (dport->i2hmsg.scn.state) {
  5394. case BFI_DPORT_SCN_TESTCOMP:
  5395. dport->test_state = BFA_DPORT_ST_COMP;
  5396. break;
  5397. case BFI_DPORT_SCN_TESTSTART:
  5398. dport->test_state = BFA_DPORT_ST_INP;
  5399. break;
  5400. case BFI_DPORT_SCN_TESTSKIP:
  5401. case BFI_DPORT_SCN_SUBTESTSTART:
  5402. /* no state change */
  5403. break;
  5404. case BFI_DPORT_SCN_SFP_REMOVED:
  5405. dport->test_state = BFA_DPORT_ST_NO_SFP;
  5406. break;
  5407. case BFI_DPORT_SCN_DDPORT_DISABLE:
  5408. bfa_fcport_ddportdisable(dport->bfa);
  5409. if (bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE))
  5410. bfa_sm_set_state(dport,
  5411. bfa_dport_sm_dynamic_disabling);
  5412. else
  5413. bfa_sm_set_state(dport,
  5414. bfa_dport_sm_dynamic_disabling_qwait);
  5415. break;
  5416. case BFI_DPORT_SCN_FCPORT_DISABLE:
  5417. bfa_fcport_ddportdisable(dport->bfa);
  5418. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5419. dport->dynamic = BFA_FALSE;
  5420. break;
  5421. default:
  5422. bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
  5423. bfa_sm_fault(dport->bfa, event);
  5424. }
  5425. break;
  5426. default:
  5427. bfa_sm_fault(dport->bfa, event);
  5428. }
  5429. }
  5430. static void
  5431. bfa_dport_sm_disabling_qwait(struct bfa_dport_s *dport,
  5432. enum bfa_dport_sm_event event)
  5433. {
  5434. bfa_trc(dport->bfa, event);
  5435. switch (event) {
  5436. case BFA_DPORT_SM_QRESUME:
  5437. bfa_sm_set_state(dport, bfa_dport_sm_disabling);
  5438. bfa_dport_send_req(dport, BFI_DPORT_DISABLE);
  5439. break;
  5440. case BFA_DPORT_SM_HWFAIL:
  5441. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5442. bfa_reqq_wcancel(&dport->reqq_wait);
  5443. bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
  5444. break;
  5445. case BFA_DPORT_SM_SCN:
  5446. /* ignore */
  5447. break;
  5448. default:
  5449. bfa_sm_fault(dport->bfa, event);
  5450. }
  5451. }
  5452. static void
  5453. bfa_dport_sm_disabling(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
  5454. {
  5455. bfa_trc(dport->bfa, event);
  5456. switch (event) {
  5457. case BFA_DPORT_SM_FWRSP:
  5458. dport->test_state = BFA_DPORT_ST_DISABLED;
  5459. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5460. break;
  5461. case BFA_DPORT_SM_HWFAIL:
  5462. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5463. bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
  5464. break;
  5465. case BFA_DPORT_SM_SCN:
  5466. /* no state change */
  5467. break;
  5468. default:
  5469. bfa_sm_fault(dport->bfa, event);
  5470. }
  5471. }
  5472. static void
  5473. bfa_dport_sm_starting_qwait(struct bfa_dport_s *dport,
  5474. enum bfa_dport_sm_event event)
  5475. {
  5476. bfa_trc(dport->bfa, event);
  5477. switch (event) {
  5478. case BFA_DPORT_SM_QRESUME:
  5479. bfa_sm_set_state(dport, bfa_dport_sm_starting);
  5480. bfa_dport_send_req(dport, BFI_DPORT_START);
  5481. break;
  5482. case BFA_DPORT_SM_HWFAIL:
  5483. bfa_reqq_wcancel(&dport->reqq_wait);
  5484. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5485. bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
  5486. break;
  5487. default:
  5488. bfa_sm_fault(dport->bfa, event);
  5489. }
  5490. }
  5491. static void
  5492. bfa_dport_sm_starting(struct bfa_dport_s *dport, enum bfa_dport_sm_event event)
  5493. {
  5494. bfa_trc(dport->bfa, event);
  5495. switch (event) {
  5496. case BFA_DPORT_SM_FWRSP:
  5497. memset(&dport->result, 0,
  5498. sizeof(struct bfa_diag_dport_result_s));
  5499. if (dport->i2hmsg.rsp.status == BFA_STATUS_DPORT_INV_SFP) {
  5500. dport->test_state = BFA_DPORT_ST_NO_SFP;
  5501. } else {
  5502. dport->test_state = BFA_DPORT_ST_INP;
  5503. bfa_dport_result_start(dport, BFA_DPORT_OPMODE_MANU);
  5504. }
  5505. /* fall thru */
  5506. case BFA_DPORT_SM_REQFAIL:
  5507. bfa_sm_set_state(dport, bfa_dport_sm_enabled);
  5508. break;
  5509. case BFA_DPORT_SM_HWFAIL:
  5510. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5511. bfa_cb_fcdiag_dport(dport, BFA_STATUS_FAILED);
  5512. break;
  5513. default:
  5514. bfa_sm_fault(dport->bfa, event);
  5515. }
  5516. }
  5517. static void
  5518. bfa_dport_sm_dynamic_disabling(struct bfa_dport_s *dport,
  5519. enum bfa_dport_sm_event event)
  5520. {
  5521. bfa_trc(dport->bfa, event);
  5522. switch (event) {
  5523. case BFA_DPORT_SM_SCN:
  5524. switch (dport->i2hmsg.scn.state) {
  5525. case BFI_DPORT_SCN_DDPORT_DISABLED:
  5526. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5527. dport->dynamic = BFA_FALSE;
  5528. bfa_fcport_enable(dport->bfa);
  5529. break;
  5530. default:
  5531. bfa_trc(dport->bfa, dport->i2hmsg.scn.state);
  5532. bfa_sm_fault(dport->bfa, event);
  5533. }
  5534. break;
  5535. case BFA_DPORT_SM_HWFAIL:
  5536. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5537. bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
  5538. break;
  5539. default:
  5540. bfa_sm_fault(dport->bfa, event);
  5541. }
  5542. }
  5543. static void
  5544. bfa_dport_sm_dynamic_disabling_qwait(struct bfa_dport_s *dport,
  5545. enum bfa_dport_sm_event event)
  5546. {
  5547. bfa_trc(dport->bfa, event);
  5548. switch (event) {
  5549. case BFA_DPORT_SM_QRESUME:
  5550. bfa_sm_set_state(dport, bfa_dport_sm_dynamic_disabling);
  5551. bfa_dport_send_req(dport, BFI_DPORT_DYN_DISABLE);
  5552. break;
  5553. case BFA_DPORT_SM_HWFAIL:
  5554. bfa_sm_set_state(dport, bfa_dport_sm_disabled);
  5555. bfa_reqq_wcancel(&dport->reqq_wait);
  5556. bfa_cb_fcdiag_dport(dport, BFA_STATUS_OK);
  5557. break;
  5558. case BFA_DPORT_SM_SCN:
  5559. /* ignore */
  5560. break;
  5561. default:
  5562. bfa_sm_fault(dport->bfa, event);
  5563. }
  5564. }
  5565. static bfa_boolean_t
  5566. bfa_dport_send_req(struct bfa_dport_s *dport, enum bfi_dport_req req)
  5567. {
  5568. struct bfi_diag_dport_req_s *m;
  5569. /*
  5570. * check for room in queue to send request now
  5571. */
  5572. m = bfa_reqq_next(dport->bfa, BFA_REQQ_DIAG);
  5573. if (!m) {
  5574. bfa_reqq_wait(dport->bfa, BFA_REQQ_PORT, &dport->reqq_wait);
  5575. return BFA_FALSE;
  5576. }
  5577. bfi_h2i_set(m->mh, BFI_MC_DIAG, BFI_DIAG_H2I_DPORT,
  5578. bfa_fn_lpu(dport->bfa));
  5579. m->req = req;
  5580. if ((req == BFI_DPORT_ENABLE) || (req == BFI_DPORT_START)) {
  5581. m->lpcnt = cpu_to_be32(dport->lpcnt);
  5582. m->payload = cpu_to_be32(dport->payload);
  5583. }
  5584. /*
  5585. * queue I/O message to firmware
  5586. */
  5587. bfa_reqq_produce(dport->bfa, BFA_REQQ_DIAG, m->mh);
  5588. return BFA_TRUE;
  5589. }
  5590. static void
  5591. bfa_dport_qresume(void *cbarg)
  5592. {
  5593. struct bfa_dport_s *dport = cbarg;
  5594. bfa_sm_send_event(dport, BFA_DPORT_SM_QRESUME);
  5595. }
  5596. static void
  5597. bfa_dport_req_comp(struct bfa_dport_s *dport, struct bfi_diag_dport_rsp_s *msg)
  5598. {
  5599. msg->status = cpu_to_be32(msg->status);
  5600. dport->i2hmsg.rsp.status = msg->status;
  5601. dport->rp_pwwn = msg->pwwn;
  5602. dport->rp_nwwn = msg->nwwn;
  5603. if ((msg->status == BFA_STATUS_OK) ||
  5604. (msg->status == BFA_STATUS_DPORT_NO_SFP)) {
  5605. bfa_trc(dport->bfa, msg->status);
  5606. bfa_trc(dport->bfa, dport->rp_pwwn);
  5607. bfa_trc(dport->bfa, dport->rp_nwwn);
  5608. bfa_sm_send_event(dport, BFA_DPORT_SM_FWRSP);
  5609. } else {
  5610. bfa_trc(dport->bfa, msg->status);
  5611. bfa_sm_send_event(dport, BFA_DPORT_SM_REQFAIL);
  5612. }
  5613. bfa_cb_fcdiag_dport(dport, msg->status);
  5614. }
  5615. static bfa_boolean_t
  5616. bfa_dport_is_sending_req(struct bfa_dport_s *dport)
  5617. {
  5618. if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabling) ||
  5619. bfa_sm_cmp_state(dport, bfa_dport_sm_enabling_qwait) ||
  5620. bfa_sm_cmp_state(dport, bfa_dport_sm_disabling) ||
  5621. bfa_sm_cmp_state(dport, bfa_dport_sm_disabling_qwait) ||
  5622. bfa_sm_cmp_state(dport, bfa_dport_sm_starting) ||
  5623. bfa_sm_cmp_state(dport, bfa_dport_sm_starting_qwait)) {
  5624. return BFA_TRUE;
  5625. } else {
  5626. return BFA_FALSE;
  5627. }
  5628. }
  5629. static void
  5630. bfa_dport_scn(struct bfa_dport_s *dport, struct bfi_diag_dport_scn_s *msg)
  5631. {
  5632. int i;
  5633. uint8_t subtesttype;
  5634. bfa_trc(dport->bfa, msg->state);
  5635. dport->i2hmsg.scn.state = msg->state;
  5636. switch (dport->i2hmsg.scn.state) {
  5637. case BFI_DPORT_SCN_TESTCOMP:
  5638. dport->result.end_time = bfa_get_log_time();
  5639. bfa_trc(dport->bfa, dport->result.end_time);
  5640. dport->result.status = msg->info.testcomp.status;
  5641. bfa_trc(dport->bfa, dport->result.status);
  5642. dport->result.roundtrip_latency =
  5643. cpu_to_be32(msg->info.testcomp.latency);
  5644. dport->result.est_cable_distance =
  5645. cpu_to_be32(msg->info.testcomp.distance);
  5646. dport->result.buffer_required =
  5647. be16_to_cpu(msg->info.testcomp.numbuffer);
  5648. dport->result.frmsz = be16_to_cpu(msg->info.testcomp.frm_sz);
  5649. dport->result.speed = msg->info.testcomp.speed;
  5650. bfa_trc(dport->bfa, dport->result.roundtrip_latency);
  5651. bfa_trc(dport->bfa, dport->result.est_cable_distance);
  5652. bfa_trc(dport->bfa, dport->result.buffer_required);
  5653. bfa_trc(dport->bfa, dport->result.frmsz);
  5654. bfa_trc(dport->bfa, dport->result.speed);
  5655. for (i = DPORT_TEST_ELOOP; i < DPORT_TEST_MAX; i++) {
  5656. dport->result.subtest[i].status =
  5657. msg->info.testcomp.subtest_status[i];
  5658. bfa_trc(dport->bfa, dport->result.subtest[i].status);
  5659. }
  5660. break;
  5661. case BFI_DPORT_SCN_TESTSKIP:
  5662. case BFI_DPORT_SCN_DDPORT_ENABLE:
  5663. memset(&dport->result, 0,
  5664. sizeof(struct bfa_diag_dport_result_s));
  5665. break;
  5666. case BFI_DPORT_SCN_TESTSTART:
  5667. memset(&dport->result, 0,
  5668. sizeof(struct bfa_diag_dport_result_s));
  5669. dport->rp_pwwn = msg->info.teststart.pwwn;
  5670. dport->rp_nwwn = msg->info.teststart.nwwn;
  5671. dport->lpcnt = cpu_to_be32(msg->info.teststart.numfrm);
  5672. bfa_dport_result_start(dport, msg->info.teststart.mode);
  5673. break;
  5674. case BFI_DPORT_SCN_SUBTESTSTART:
  5675. subtesttype = msg->info.teststart.type;
  5676. dport->result.subtest[subtesttype].start_time =
  5677. bfa_get_log_time();
  5678. dport->result.subtest[subtesttype].status =
  5679. DPORT_TEST_ST_INPRG;
  5680. bfa_trc(dport->bfa, subtesttype);
  5681. bfa_trc(dport->bfa,
  5682. dport->result.subtest[subtesttype].start_time);
  5683. break;
  5684. case BFI_DPORT_SCN_SFP_REMOVED:
  5685. case BFI_DPORT_SCN_DDPORT_DISABLED:
  5686. case BFI_DPORT_SCN_DDPORT_DISABLE:
  5687. case BFI_DPORT_SCN_FCPORT_DISABLE:
  5688. dport->result.status = DPORT_TEST_ST_IDLE;
  5689. break;
  5690. default:
  5691. bfa_sm_fault(dport->bfa, msg->state);
  5692. }
  5693. bfa_sm_send_event(dport, BFA_DPORT_SM_SCN);
  5694. }
  5695. /*
  5696. * Dport enable
  5697. *
  5698. * @param[in] *bfa - bfa data struct
  5699. */
  5700. bfa_status_t
  5701. bfa_dport_enable(struct bfa_s *bfa, u32 lpcnt, u32 pat,
  5702. bfa_cb_diag_t cbfn, void *cbarg)
  5703. {
  5704. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5705. struct bfa_dport_s *dport = &fcdiag->dport;
  5706. /*
  5707. * Dport is not support in MEZZ card
  5708. */
  5709. if (bfa_mfg_is_mezz(dport->bfa->ioc.attr->card_type)) {
  5710. bfa_trc(dport->bfa, BFA_STATUS_PBC);
  5711. return BFA_STATUS_CMD_NOTSUPP_MEZZ;
  5712. }
  5713. /*
  5714. * Dport is supported in CT2 or above
  5715. */
  5716. if (!(bfa_asic_id_ct2(dport->bfa->ioc.pcidev.device_id))) {
  5717. bfa_trc(dport->bfa, dport->bfa->ioc.pcidev.device_id);
  5718. return BFA_STATUS_FEATURE_NOT_SUPPORTED;
  5719. }
  5720. /*
  5721. * Check to see if IOC is down
  5722. */
  5723. if (!bfa_iocfc_is_operational(bfa))
  5724. return BFA_STATUS_IOC_NON_OP;
  5725. /* if port is PBC disabled, return error */
  5726. if (bfa_fcport_is_pbcdisabled(bfa)) {
  5727. bfa_trc(dport->bfa, BFA_STATUS_PBC);
  5728. return BFA_STATUS_PBC;
  5729. }
  5730. /*
  5731. * Check if port mode is FC port
  5732. */
  5733. if (bfa_ioc_get_type(&bfa->ioc) != BFA_IOC_TYPE_FC) {
  5734. bfa_trc(dport->bfa, bfa_ioc_get_type(&bfa->ioc));
  5735. return BFA_STATUS_CMD_NOTSUPP_CNA;
  5736. }
  5737. /*
  5738. * Check if port is in LOOP mode
  5739. */
  5740. if ((bfa_fcport_get_cfg_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP) ||
  5741. (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)) {
  5742. bfa_trc(dport->bfa, 0);
  5743. return BFA_STATUS_TOPOLOGY_LOOP;
  5744. }
  5745. /*
  5746. * Check if port is TRUNK mode
  5747. */
  5748. if (bfa_fcport_is_trunk_enabled(bfa)) {
  5749. bfa_trc(dport->bfa, 0);
  5750. return BFA_STATUS_ERROR_TRUNK_ENABLED;
  5751. }
  5752. /*
  5753. * Check if diag loopback is running
  5754. */
  5755. if (bfa_fcdiag_lb_is_running(bfa)) {
  5756. bfa_trc(dport->bfa, 0);
  5757. return BFA_STATUS_DIAG_BUSY;
  5758. }
  5759. /*
  5760. * Check to see if port is disable or in dport state
  5761. */
  5762. if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
  5763. (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
  5764. bfa_trc(dport->bfa, 0);
  5765. return BFA_STATUS_PORT_NOT_DISABLED;
  5766. }
  5767. /*
  5768. * Check if dport is in dynamic mode
  5769. */
  5770. if (dport->dynamic)
  5771. return BFA_STATUS_DDPORT_ERR;
  5772. /*
  5773. * Check if dport is busy
  5774. */
  5775. if (bfa_dport_is_sending_req(dport))
  5776. return BFA_STATUS_DEVBUSY;
  5777. /*
  5778. * Check if dport is already enabled
  5779. */
  5780. if (bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
  5781. bfa_trc(dport->bfa, 0);
  5782. return BFA_STATUS_DPORT_ENABLED;
  5783. }
  5784. bfa_trc(dport->bfa, lpcnt);
  5785. bfa_trc(dport->bfa, pat);
  5786. dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
  5787. dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
  5788. dport->cbfn = cbfn;
  5789. dport->cbarg = cbarg;
  5790. bfa_sm_send_event(dport, BFA_DPORT_SM_ENABLE);
  5791. return BFA_STATUS_OK;
  5792. }
  5793. /*
  5794. * Dport disable
  5795. *
  5796. * @param[in] *bfa - bfa data struct
  5797. */
  5798. bfa_status_t
  5799. bfa_dport_disable(struct bfa_s *bfa, bfa_cb_diag_t cbfn, void *cbarg)
  5800. {
  5801. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5802. struct bfa_dport_s *dport = &fcdiag->dport;
  5803. if (bfa_ioc_is_disabled(&bfa->ioc))
  5804. return BFA_STATUS_IOC_DISABLED;
  5805. /* if port is PBC disabled, return error */
  5806. if (bfa_fcport_is_pbcdisabled(bfa)) {
  5807. bfa_trc(dport->bfa, BFA_STATUS_PBC);
  5808. return BFA_STATUS_PBC;
  5809. }
  5810. /*
  5811. * Check if dport is in dynamic mode
  5812. */
  5813. if (dport->dynamic) {
  5814. return BFA_STATUS_DDPORT_ERR;
  5815. }
  5816. /*
  5817. * Check to see if port is disable or in dport state
  5818. */
  5819. if ((bfa_fcport_is_disabled(bfa) == BFA_FALSE) &&
  5820. (bfa_fcport_is_dport(bfa) == BFA_FALSE)) {
  5821. bfa_trc(dport->bfa, 0);
  5822. return BFA_STATUS_PORT_NOT_DISABLED;
  5823. }
  5824. /*
  5825. * Check if dport is busy
  5826. */
  5827. if (bfa_dport_is_sending_req(dport))
  5828. return BFA_STATUS_DEVBUSY;
  5829. /*
  5830. * Check if dport is already disabled
  5831. */
  5832. if (bfa_sm_cmp_state(dport, bfa_dport_sm_disabled)) {
  5833. bfa_trc(dport->bfa, 0);
  5834. return BFA_STATUS_DPORT_DISABLED;
  5835. }
  5836. dport->cbfn = cbfn;
  5837. dport->cbarg = cbarg;
  5838. bfa_sm_send_event(dport, BFA_DPORT_SM_DISABLE);
  5839. return BFA_STATUS_OK;
  5840. }
  5841. /*
  5842. * Dport start -- restart dport test
  5843. *
  5844. * @param[in] *bfa - bfa data struct
  5845. */
  5846. bfa_status_t
  5847. bfa_dport_start(struct bfa_s *bfa, u32 lpcnt, u32 pat,
  5848. bfa_cb_diag_t cbfn, void *cbarg)
  5849. {
  5850. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5851. struct bfa_dport_s *dport = &fcdiag->dport;
  5852. /*
  5853. * Check to see if IOC is down
  5854. */
  5855. if (!bfa_iocfc_is_operational(bfa))
  5856. return BFA_STATUS_IOC_NON_OP;
  5857. /*
  5858. * Check if dport is in dynamic mode
  5859. */
  5860. if (dport->dynamic)
  5861. return BFA_STATUS_DDPORT_ERR;
  5862. /*
  5863. * Check if dport is busy
  5864. */
  5865. if (bfa_dport_is_sending_req(dport))
  5866. return BFA_STATUS_DEVBUSY;
  5867. /*
  5868. * Check if dport is in enabled state.
  5869. * Test can only be restart when previous test has completed
  5870. */
  5871. if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
  5872. bfa_trc(dport->bfa, 0);
  5873. return BFA_STATUS_DPORT_DISABLED;
  5874. } else {
  5875. if (dport->test_state == BFA_DPORT_ST_NO_SFP)
  5876. return BFA_STATUS_DPORT_INV_SFP;
  5877. if (dport->test_state == BFA_DPORT_ST_INP)
  5878. return BFA_STATUS_DEVBUSY;
  5879. WARN_ON(dport->test_state != BFA_DPORT_ST_COMP);
  5880. }
  5881. bfa_trc(dport->bfa, lpcnt);
  5882. bfa_trc(dport->bfa, pat);
  5883. dport->lpcnt = (lpcnt) ? lpcnt : DPORT_ENABLE_LOOPCNT_DEFAULT;
  5884. dport->payload = (pat) ? pat : LB_PATTERN_DEFAULT;
  5885. dport->cbfn = cbfn;
  5886. dport->cbarg = cbarg;
  5887. bfa_sm_send_event(dport, BFA_DPORT_SM_START);
  5888. return BFA_STATUS_OK;
  5889. }
  5890. /*
  5891. * Dport show -- return dport test result
  5892. *
  5893. * @param[in] *bfa - bfa data struct
  5894. */
  5895. bfa_status_t
  5896. bfa_dport_show(struct bfa_s *bfa, struct bfa_diag_dport_result_s *result)
  5897. {
  5898. struct bfa_fcdiag_s *fcdiag = BFA_FCDIAG_MOD(bfa);
  5899. struct bfa_dport_s *dport = &fcdiag->dport;
  5900. /*
  5901. * Check to see if IOC is down
  5902. */
  5903. if (!bfa_iocfc_is_operational(bfa))
  5904. return BFA_STATUS_IOC_NON_OP;
  5905. /*
  5906. * Check if dport is busy
  5907. */
  5908. if (bfa_dport_is_sending_req(dport))
  5909. return BFA_STATUS_DEVBUSY;
  5910. /*
  5911. * Check if dport is in enabled state.
  5912. */
  5913. if (!bfa_sm_cmp_state(dport, bfa_dport_sm_enabled)) {
  5914. bfa_trc(dport->bfa, 0);
  5915. return BFA_STATUS_DPORT_DISABLED;
  5916. }
  5917. /*
  5918. * Check if there is SFP
  5919. */
  5920. if (dport->test_state == BFA_DPORT_ST_NO_SFP)
  5921. return BFA_STATUS_DPORT_INV_SFP;
  5922. memcpy(result, &dport->result, sizeof(struct bfa_diag_dport_result_s));
  5923. return BFA_STATUS_OK;
  5924. }