bfa_fcpim.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include "bfad_drv.h"
  18. #include "bfa_modules.h"
  19. BFA_TRC_FILE(HAL, FCPIM);
  20. /*
  21. * BFA ITNIM Related definitions
  22. */
  23. static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
  24. static void bfa_ioim_lm_init(struct bfa_s *bfa);
  25. #define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
  26. (((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
  27. #define bfa_fcpim_additn(__itnim) \
  28. list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
  29. #define bfa_fcpim_delitn(__itnim) do { \
  30. WARN_ON(!bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim)); \
  31. bfa_itnim_update_del_itn_stats(__itnim); \
  32. list_del(&(__itnim)->qe); \
  33. WARN_ON(!list_empty(&(__itnim)->io_q)); \
  34. WARN_ON(!list_empty(&(__itnim)->io_cleanup_q)); \
  35. WARN_ON(!list_empty(&(__itnim)->pending_q)); \
  36. } while (0)
  37. #define bfa_itnim_online_cb(__itnim) do { \
  38. if ((__itnim)->bfa->fcs) \
  39. bfa_cb_itnim_online((__itnim)->ditn); \
  40. else { \
  41. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  42. __bfa_cb_itnim_online, (__itnim)); \
  43. } \
  44. } while (0)
  45. #define bfa_itnim_offline_cb(__itnim) do { \
  46. if ((__itnim)->bfa->fcs) \
  47. bfa_cb_itnim_offline((__itnim)->ditn); \
  48. else { \
  49. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  50. __bfa_cb_itnim_offline, (__itnim)); \
  51. } \
  52. } while (0)
  53. #define bfa_itnim_sler_cb(__itnim) do { \
  54. if ((__itnim)->bfa->fcs) \
  55. bfa_cb_itnim_sler((__itnim)->ditn); \
  56. else { \
  57. bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe, \
  58. __bfa_cb_itnim_sler, (__itnim)); \
  59. } \
  60. } while (0)
  61. enum bfa_ioim_lm_ua_status {
  62. BFA_IOIM_LM_UA_RESET = 0,
  63. BFA_IOIM_LM_UA_SET = 1,
  64. };
  65. /*
  66. * itnim state machine event
  67. */
  68. enum bfa_itnim_event {
  69. BFA_ITNIM_SM_CREATE = 1, /* itnim is created */
  70. BFA_ITNIM_SM_ONLINE = 2, /* itnim is online */
  71. BFA_ITNIM_SM_OFFLINE = 3, /* itnim is offline */
  72. BFA_ITNIM_SM_FWRSP = 4, /* firmware response */
  73. BFA_ITNIM_SM_DELETE = 5, /* deleting an existing itnim */
  74. BFA_ITNIM_SM_CLEANUP = 6, /* IO cleanup completion */
  75. BFA_ITNIM_SM_SLER = 7, /* second level error recovery */
  76. BFA_ITNIM_SM_HWFAIL = 8, /* IOC h/w failure event */
  77. BFA_ITNIM_SM_QRESUME = 9, /* queue space available */
  78. };
  79. /*
  80. * BFA IOIM related definitions
  81. */
  82. #define bfa_ioim_move_to_comp_q(__ioim) do { \
  83. list_del(&(__ioim)->qe); \
  84. list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q); \
  85. } while (0)
  86. #define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do { \
  87. if ((__fcpim)->profile_comp) \
  88. (__fcpim)->profile_comp(__ioim); \
  89. } while (0)
  90. #define bfa_ioim_cb_profile_start(__fcpim, __ioim) do { \
  91. if ((__fcpim)->profile_start) \
  92. (__fcpim)->profile_start(__ioim); \
  93. } while (0)
  94. /*
  95. * IO state machine events
  96. */
  97. enum bfa_ioim_event {
  98. BFA_IOIM_SM_START = 1, /* io start request from host */
  99. BFA_IOIM_SM_COMP_GOOD = 2, /* io good comp, resource free */
  100. BFA_IOIM_SM_COMP = 3, /* io comp, resource is free */
  101. BFA_IOIM_SM_COMP_UTAG = 4, /* io comp, resource is free */
  102. BFA_IOIM_SM_DONE = 5, /* io comp, resource not free */
  103. BFA_IOIM_SM_FREE = 6, /* io resource is freed */
  104. BFA_IOIM_SM_ABORT = 7, /* abort request from scsi stack */
  105. BFA_IOIM_SM_ABORT_COMP = 8, /* abort from f/w */
  106. BFA_IOIM_SM_ABORT_DONE = 9, /* abort completion from f/w */
  107. BFA_IOIM_SM_QRESUME = 10, /* CQ space available to queue IO */
  108. BFA_IOIM_SM_SGALLOCED = 11, /* SG page allocation successful */
  109. BFA_IOIM_SM_SQRETRY = 12, /* sequence recovery retry */
  110. BFA_IOIM_SM_HCB = 13, /* bfa callback complete */
  111. BFA_IOIM_SM_CLEANUP = 14, /* IO cleanup from itnim */
  112. BFA_IOIM_SM_TMSTART = 15, /* IO cleanup from tskim */
  113. BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
  114. BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
  115. BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
  116. };
  117. /*
  118. * BFA TSKIM related definitions
  119. */
  120. /*
  121. * task management completion handling
  122. */
  123. #define bfa_tskim_qcomp(__tskim, __cbfn) do { \
  124. bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
  125. bfa_tskim_notify_comp(__tskim); \
  126. } while (0)
  127. #define bfa_tskim_notify_comp(__tskim) do { \
  128. if ((__tskim)->notify) \
  129. bfa_itnim_tskdone((__tskim)->itnim); \
  130. } while (0)
  131. enum bfa_tskim_event {
  132. BFA_TSKIM_SM_START = 1, /* TM command start */
  133. BFA_TSKIM_SM_DONE = 2, /* TM completion */
  134. BFA_TSKIM_SM_QRESUME = 3, /* resume after qfull */
  135. BFA_TSKIM_SM_HWFAIL = 5, /* IOC h/w failure event */
  136. BFA_TSKIM_SM_HCB = 6, /* BFA callback completion */
  137. BFA_TSKIM_SM_IOS_DONE = 7, /* IO and sub TM completions */
  138. BFA_TSKIM_SM_CLEANUP = 8, /* TM cleanup on ITN offline */
  139. BFA_TSKIM_SM_CLEANUP_DONE = 9, /* TM abort completion */
  140. BFA_TSKIM_SM_UTAG = 10, /* TM completion unknown tag */
  141. };
  142. /*
  143. * forward declaration for BFA ITNIM functions
  144. */
  145. static void bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
  146. static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
  147. static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
  148. static void bfa_itnim_cleanp_comp(void *itnim_cbarg);
  149. static void bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
  150. static void __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
  151. static void __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
  152. static void __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
  153. static void bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
  154. static void bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
  155. static void bfa_itnim_iotov(void *itnim_arg);
  156. static void bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
  157. static void bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
  158. static void bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
  159. /*
  160. * forward declaration of ITNIM state machine
  161. */
  162. static void bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
  163. enum bfa_itnim_event event);
  164. static void bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
  165. enum bfa_itnim_event event);
  166. static void bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
  167. enum bfa_itnim_event event);
  168. static void bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  169. enum bfa_itnim_event event);
  170. static void bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
  171. enum bfa_itnim_event event);
  172. static void bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
  173. enum bfa_itnim_event event);
  174. static void bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  175. enum bfa_itnim_event event);
  176. static void bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  177. enum bfa_itnim_event event);
  178. static void bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
  179. enum bfa_itnim_event event);
  180. static void bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
  181. enum bfa_itnim_event event);
  182. static void bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  183. enum bfa_itnim_event event);
  184. static void bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
  185. enum bfa_itnim_event event);
  186. static void bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  187. enum bfa_itnim_event event);
  188. static void bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  189. enum bfa_itnim_event event);
  190. static void bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  191. enum bfa_itnim_event event);
  192. /*
  193. * forward declaration for BFA IOIM functions
  194. */
  195. static bfa_boolean_t bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
  196. static bfa_boolean_t bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim);
  197. static bfa_boolean_t bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
  198. static void bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
  199. static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
  200. static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
  201. static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
  202. static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
  203. static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
  204. static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
  205. /*
  206. * forward declaration of BFA IO state machine
  207. */
  208. static void bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
  209. enum bfa_ioim_event event);
  210. static void bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
  211. enum bfa_ioim_event event);
  212. static void bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
  213. enum bfa_ioim_event event);
  214. static void bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
  215. enum bfa_ioim_event event);
  216. static void bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
  217. enum bfa_ioim_event event);
  218. static void bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
  219. enum bfa_ioim_event event);
  220. static void bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
  221. enum bfa_ioim_event event);
  222. static void bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
  223. enum bfa_ioim_event event);
  224. static void bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
  225. enum bfa_ioim_event event);
  226. static void bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
  227. enum bfa_ioim_event event);
  228. static void bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
  229. enum bfa_ioim_event event);
  230. static void bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
  231. enum bfa_ioim_event event);
  232. /*
  233. * forward declaration for BFA TSKIM functions
  234. */
  235. static void __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
  236. static void __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
  237. static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
  238. struct scsi_lun lun);
  239. static void bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
  240. static void bfa_tskim_cleanp_comp(void *tskim_cbarg);
  241. static void bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
  242. static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
  243. static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
  244. static void bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
  245. /*
  246. * forward declaration of BFA TSKIM state machine
  247. */
  248. static void bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
  249. enum bfa_tskim_event event);
  250. static void bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
  251. enum bfa_tskim_event event);
  252. static void bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
  253. enum bfa_tskim_event event);
  254. static void bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
  255. enum bfa_tskim_event event);
  256. static void bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
  257. enum bfa_tskim_event event);
  258. static void bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  259. enum bfa_tskim_event event);
  260. static void bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
  261. enum bfa_tskim_event event);
  262. /*
  263. * BFA FCP Initiator Mode module
  264. */
  265. /*
  266. * Compute and return memory needed by FCP(im) module.
  267. */
  268. static void
  269. bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  270. {
  271. bfa_itnim_meminfo(cfg, km_len);
  272. /*
  273. * IO memory
  274. */
  275. *km_len += cfg->fwcfg.num_ioim_reqs *
  276. (sizeof(struct bfa_ioim_s) + sizeof(struct bfa_ioim_sp_s));
  277. /*
  278. * task management command memory
  279. */
  280. if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
  281. cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
  282. *km_len += cfg->fwcfg.num_tskim_reqs * sizeof(struct bfa_tskim_s);
  283. }
  284. static void
  285. bfa_fcpim_attach(struct bfa_fcp_mod_s *fcp, void *bfad,
  286. struct bfa_iocfc_cfg_s *cfg, struct bfa_pcidev_s *pcidev)
  287. {
  288. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  289. struct bfa_s *bfa = fcp->bfa;
  290. bfa_trc(bfa, cfg->drvcfg.path_tov);
  291. bfa_trc(bfa, cfg->fwcfg.num_rports);
  292. bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
  293. bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
  294. fcpim->fcp = fcp;
  295. fcpim->bfa = bfa;
  296. fcpim->num_itnims = cfg->fwcfg.num_rports;
  297. fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
  298. fcpim->path_tov = cfg->drvcfg.path_tov;
  299. fcpim->delay_comp = cfg->drvcfg.delay_comp;
  300. fcpim->profile_comp = NULL;
  301. fcpim->profile_start = NULL;
  302. bfa_itnim_attach(fcpim);
  303. bfa_tskim_attach(fcpim);
  304. bfa_ioim_attach(fcpim);
  305. }
  306. static void
  307. bfa_fcpim_iocdisable(struct bfa_fcp_mod_s *fcp)
  308. {
  309. struct bfa_fcpim_s *fcpim = &fcp->fcpim;
  310. struct bfa_itnim_s *itnim;
  311. struct list_head *qe, *qen;
  312. /* Enqueue unused ioim resources to free_q */
  313. list_splice_tail_init(&fcpim->tskim_unused_q, &fcpim->tskim_free_q);
  314. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  315. itnim = (struct bfa_itnim_s *) qe;
  316. bfa_itnim_iocdisable(itnim);
  317. }
  318. }
  319. void
  320. bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
  321. {
  322. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  323. fcpim->path_tov = path_tov * 1000;
  324. if (fcpim->path_tov > BFA_FCPIM_PATHTOV_MAX)
  325. fcpim->path_tov = BFA_FCPIM_PATHTOV_MAX;
  326. }
  327. u16
  328. bfa_fcpim_path_tov_get(struct bfa_s *bfa)
  329. {
  330. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  331. return fcpim->path_tov / 1000;
  332. }
  333. #define bfa_fcpim_add_iostats(__l, __r, __stats) \
  334. (__l->__stats += __r->__stats)
  335. void
  336. bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
  337. struct bfa_itnim_iostats_s *rstats)
  338. {
  339. bfa_fcpim_add_iostats(lstats, rstats, total_ios);
  340. bfa_fcpim_add_iostats(lstats, rstats, qresumes);
  341. bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
  342. bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
  343. bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
  344. bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
  345. bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
  346. bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
  347. bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
  348. bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
  349. bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
  350. bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
  351. bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
  352. bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
  353. bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
  354. bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
  355. bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
  356. bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
  357. bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
  358. bfa_fcpim_add_iostats(lstats, rstats, onlines);
  359. bfa_fcpim_add_iostats(lstats, rstats, offlines);
  360. bfa_fcpim_add_iostats(lstats, rstats, creates);
  361. bfa_fcpim_add_iostats(lstats, rstats, deletes);
  362. bfa_fcpim_add_iostats(lstats, rstats, create_comps);
  363. bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
  364. bfa_fcpim_add_iostats(lstats, rstats, sler_events);
  365. bfa_fcpim_add_iostats(lstats, rstats, fw_create);
  366. bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
  367. bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
  368. bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
  369. bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
  370. bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
  371. bfa_fcpim_add_iostats(lstats, rstats, tm_success);
  372. bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
  373. bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
  374. bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
  375. bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
  376. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
  377. bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
  378. bfa_fcpim_add_iostats(lstats, rstats, io_comps);
  379. bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
  380. bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
  381. bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
  382. bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
  383. }
  384. bfa_status_t
  385. bfa_fcpim_port_iostats(struct bfa_s *bfa,
  386. struct bfa_itnim_iostats_s *stats, u8 lp_tag)
  387. {
  388. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  389. struct list_head *qe, *qen;
  390. struct bfa_itnim_s *itnim;
  391. /* accumulate IO stats from itnim */
  392. memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
  393. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  394. itnim = (struct bfa_itnim_s *) qe;
  395. if (itnim->rport->rport_info.lp_tag != lp_tag)
  396. continue;
  397. bfa_fcpim_add_stats(stats, &(itnim->stats));
  398. }
  399. return BFA_STATUS_OK;
  400. }
  401. void
  402. bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
  403. {
  404. struct bfa_itnim_latency_s *io_lat =
  405. &(ioim->itnim->ioprofile.io_latency);
  406. u32 val, idx;
  407. val = (u32)(jiffies - ioim->start_time);
  408. idx = bfa_ioim_get_index(scsi_bufflen((struct scsi_cmnd *)ioim->dio));
  409. bfa_itnim_ioprofile_update(ioim->itnim, idx);
  410. io_lat->count[idx]++;
  411. io_lat->min[idx] = (io_lat->min[idx] < val) ? io_lat->min[idx] : val;
  412. io_lat->max[idx] = (io_lat->max[idx] > val) ? io_lat->max[idx] : val;
  413. io_lat->avg[idx] += val;
  414. }
  415. void
  416. bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
  417. {
  418. ioim->start_time = jiffies;
  419. }
  420. bfa_status_t
  421. bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
  422. {
  423. struct bfa_itnim_s *itnim;
  424. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  425. struct list_head *qe, *qen;
  426. /* accumulate IO stats from itnim */
  427. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  428. itnim = (struct bfa_itnim_s *) qe;
  429. bfa_itnim_clear_stats(itnim);
  430. }
  431. fcpim->io_profile = BFA_TRUE;
  432. fcpim->io_profile_start_time = time;
  433. fcpim->profile_comp = bfa_ioim_profile_comp;
  434. fcpim->profile_start = bfa_ioim_profile_start;
  435. return BFA_STATUS_OK;
  436. }
  437. bfa_status_t
  438. bfa_fcpim_profile_off(struct bfa_s *bfa)
  439. {
  440. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  441. fcpim->io_profile = BFA_FALSE;
  442. fcpim->io_profile_start_time = 0;
  443. fcpim->profile_comp = NULL;
  444. fcpim->profile_start = NULL;
  445. return BFA_STATUS_OK;
  446. }
  447. u16
  448. bfa_fcpim_qdepth_get(struct bfa_s *bfa)
  449. {
  450. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  451. return fcpim->q_depth;
  452. }
  453. /*
  454. * BFA ITNIM module state machine functions
  455. */
  456. /*
  457. * Beginning/unallocated state - no events expected.
  458. */
  459. static void
  460. bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  461. {
  462. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  463. bfa_trc(itnim->bfa, event);
  464. switch (event) {
  465. case BFA_ITNIM_SM_CREATE:
  466. bfa_sm_set_state(itnim, bfa_itnim_sm_created);
  467. itnim->is_online = BFA_FALSE;
  468. bfa_fcpim_additn(itnim);
  469. break;
  470. default:
  471. bfa_sm_fault(itnim->bfa, event);
  472. }
  473. }
  474. /*
  475. * Beginning state, only online event expected.
  476. */
  477. static void
  478. bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  479. {
  480. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  481. bfa_trc(itnim->bfa, event);
  482. switch (event) {
  483. case BFA_ITNIM_SM_ONLINE:
  484. if (bfa_itnim_send_fwcreate(itnim))
  485. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  486. else
  487. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  488. break;
  489. case BFA_ITNIM_SM_DELETE:
  490. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  491. bfa_fcpim_delitn(itnim);
  492. break;
  493. case BFA_ITNIM_SM_HWFAIL:
  494. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  495. break;
  496. default:
  497. bfa_sm_fault(itnim->bfa, event);
  498. }
  499. }
  500. /*
  501. * Waiting for itnim create response from firmware.
  502. */
  503. static void
  504. bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  505. {
  506. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  507. bfa_trc(itnim->bfa, event);
  508. switch (event) {
  509. case BFA_ITNIM_SM_FWRSP:
  510. bfa_sm_set_state(itnim, bfa_itnim_sm_online);
  511. itnim->is_online = BFA_TRUE;
  512. bfa_itnim_iotov_online(itnim);
  513. bfa_itnim_online_cb(itnim);
  514. break;
  515. case BFA_ITNIM_SM_DELETE:
  516. bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
  517. break;
  518. case BFA_ITNIM_SM_OFFLINE:
  519. if (bfa_itnim_send_fwdelete(itnim))
  520. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  521. else
  522. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  523. break;
  524. case BFA_ITNIM_SM_HWFAIL:
  525. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  526. break;
  527. default:
  528. bfa_sm_fault(itnim->bfa, event);
  529. }
  530. }
  531. static void
  532. bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
  533. enum bfa_itnim_event event)
  534. {
  535. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  536. bfa_trc(itnim->bfa, event);
  537. switch (event) {
  538. case BFA_ITNIM_SM_QRESUME:
  539. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  540. bfa_itnim_send_fwcreate(itnim);
  541. break;
  542. case BFA_ITNIM_SM_DELETE:
  543. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  544. bfa_reqq_wcancel(&itnim->reqq_wait);
  545. bfa_fcpim_delitn(itnim);
  546. break;
  547. case BFA_ITNIM_SM_OFFLINE:
  548. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  549. bfa_reqq_wcancel(&itnim->reqq_wait);
  550. bfa_itnim_offline_cb(itnim);
  551. break;
  552. case BFA_ITNIM_SM_HWFAIL:
  553. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  554. bfa_reqq_wcancel(&itnim->reqq_wait);
  555. break;
  556. default:
  557. bfa_sm_fault(itnim->bfa, event);
  558. }
  559. }
  560. /*
  561. * Waiting for itnim create response from firmware, a delete is pending.
  562. */
  563. static void
  564. bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
  565. enum bfa_itnim_event event)
  566. {
  567. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  568. bfa_trc(itnim->bfa, event);
  569. switch (event) {
  570. case BFA_ITNIM_SM_FWRSP:
  571. if (bfa_itnim_send_fwdelete(itnim))
  572. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  573. else
  574. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  575. break;
  576. case BFA_ITNIM_SM_HWFAIL:
  577. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  578. bfa_fcpim_delitn(itnim);
  579. break;
  580. default:
  581. bfa_sm_fault(itnim->bfa, event);
  582. }
  583. }
  584. /*
  585. * Online state - normal parking state.
  586. */
  587. static void
  588. bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  589. {
  590. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  591. bfa_trc(itnim->bfa, event);
  592. switch (event) {
  593. case BFA_ITNIM_SM_OFFLINE:
  594. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  595. itnim->is_online = BFA_FALSE;
  596. bfa_itnim_iotov_start(itnim);
  597. bfa_itnim_cleanup(itnim);
  598. break;
  599. case BFA_ITNIM_SM_DELETE:
  600. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  601. itnim->is_online = BFA_FALSE;
  602. bfa_itnim_cleanup(itnim);
  603. break;
  604. case BFA_ITNIM_SM_SLER:
  605. bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
  606. itnim->is_online = BFA_FALSE;
  607. bfa_itnim_iotov_start(itnim);
  608. bfa_itnim_sler_cb(itnim);
  609. break;
  610. case BFA_ITNIM_SM_HWFAIL:
  611. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  612. itnim->is_online = BFA_FALSE;
  613. bfa_itnim_iotov_start(itnim);
  614. bfa_itnim_iocdisable_cleanup(itnim);
  615. break;
  616. default:
  617. bfa_sm_fault(itnim->bfa, event);
  618. }
  619. }
  620. /*
  621. * Second level error recovery need.
  622. */
  623. static void
  624. bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  625. {
  626. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  627. bfa_trc(itnim->bfa, event);
  628. switch (event) {
  629. case BFA_ITNIM_SM_OFFLINE:
  630. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
  631. bfa_itnim_cleanup(itnim);
  632. break;
  633. case BFA_ITNIM_SM_DELETE:
  634. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  635. bfa_itnim_cleanup(itnim);
  636. bfa_itnim_iotov_delete(itnim);
  637. break;
  638. case BFA_ITNIM_SM_HWFAIL:
  639. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  640. bfa_itnim_iocdisable_cleanup(itnim);
  641. break;
  642. default:
  643. bfa_sm_fault(itnim->bfa, event);
  644. }
  645. }
  646. /*
  647. * Going offline. Waiting for active IO cleanup.
  648. */
  649. static void
  650. bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
  651. enum bfa_itnim_event event)
  652. {
  653. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  654. bfa_trc(itnim->bfa, event);
  655. switch (event) {
  656. case BFA_ITNIM_SM_CLEANUP:
  657. if (bfa_itnim_send_fwdelete(itnim))
  658. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  659. else
  660. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
  661. break;
  662. case BFA_ITNIM_SM_DELETE:
  663. bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
  664. bfa_itnim_iotov_delete(itnim);
  665. break;
  666. case BFA_ITNIM_SM_HWFAIL:
  667. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  668. bfa_itnim_iocdisable_cleanup(itnim);
  669. bfa_itnim_offline_cb(itnim);
  670. break;
  671. case BFA_ITNIM_SM_SLER:
  672. break;
  673. default:
  674. bfa_sm_fault(itnim->bfa, event);
  675. }
  676. }
  677. /*
  678. * Deleting itnim. Waiting for active IO cleanup.
  679. */
  680. static void
  681. bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
  682. enum bfa_itnim_event event)
  683. {
  684. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  685. bfa_trc(itnim->bfa, event);
  686. switch (event) {
  687. case BFA_ITNIM_SM_CLEANUP:
  688. if (bfa_itnim_send_fwdelete(itnim))
  689. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  690. else
  691. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  692. break;
  693. case BFA_ITNIM_SM_HWFAIL:
  694. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  695. bfa_itnim_iocdisable_cleanup(itnim);
  696. break;
  697. default:
  698. bfa_sm_fault(itnim->bfa, event);
  699. }
  700. }
  701. /*
  702. * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  703. */
  704. static void
  705. bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  706. {
  707. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  708. bfa_trc(itnim->bfa, event);
  709. switch (event) {
  710. case BFA_ITNIM_SM_FWRSP:
  711. bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
  712. bfa_itnim_offline_cb(itnim);
  713. break;
  714. case BFA_ITNIM_SM_DELETE:
  715. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  716. break;
  717. case BFA_ITNIM_SM_HWFAIL:
  718. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  719. bfa_itnim_offline_cb(itnim);
  720. break;
  721. default:
  722. bfa_sm_fault(itnim->bfa, event);
  723. }
  724. }
  725. static void
  726. bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
  727. enum bfa_itnim_event event)
  728. {
  729. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  730. bfa_trc(itnim->bfa, event);
  731. switch (event) {
  732. case BFA_ITNIM_SM_QRESUME:
  733. bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
  734. bfa_itnim_send_fwdelete(itnim);
  735. break;
  736. case BFA_ITNIM_SM_DELETE:
  737. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
  738. break;
  739. case BFA_ITNIM_SM_HWFAIL:
  740. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  741. bfa_reqq_wcancel(&itnim->reqq_wait);
  742. bfa_itnim_offline_cb(itnim);
  743. break;
  744. default:
  745. bfa_sm_fault(itnim->bfa, event);
  746. }
  747. }
  748. /*
  749. * Offline state.
  750. */
  751. static void
  752. bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  753. {
  754. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  755. bfa_trc(itnim->bfa, event);
  756. switch (event) {
  757. case BFA_ITNIM_SM_DELETE:
  758. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  759. bfa_itnim_iotov_delete(itnim);
  760. bfa_fcpim_delitn(itnim);
  761. break;
  762. case BFA_ITNIM_SM_ONLINE:
  763. if (bfa_itnim_send_fwcreate(itnim))
  764. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  765. else
  766. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  767. break;
  768. case BFA_ITNIM_SM_HWFAIL:
  769. bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
  770. break;
  771. default:
  772. bfa_sm_fault(itnim->bfa, event);
  773. }
  774. }
  775. static void
  776. bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
  777. enum bfa_itnim_event event)
  778. {
  779. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  780. bfa_trc(itnim->bfa, event);
  781. switch (event) {
  782. case BFA_ITNIM_SM_DELETE:
  783. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  784. bfa_itnim_iotov_delete(itnim);
  785. bfa_fcpim_delitn(itnim);
  786. break;
  787. case BFA_ITNIM_SM_OFFLINE:
  788. bfa_itnim_offline_cb(itnim);
  789. break;
  790. case BFA_ITNIM_SM_ONLINE:
  791. if (bfa_itnim_send_fwcreate(itnim))
  792. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
  793. else
  794. bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
  795. break;
  796. case BFA_ITNIM_SM_HWFAIL:
  797. break;
  798. default:
  799. bfa_sm_fault(itnim->bfa, event);
  800. }
  801. }
  802. /*
  803. * Itnim is deleted, waiting for firmware response to delete.
  804. */
  805. static void
  806. bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
  807. {
  808. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  809. bfa_trc(itnim->bfa, event);
  810. switch (event) {
  811. case BFA_ITNIM_SM_FWRSP:
  812. case BFA_ITNIM_SM_HWFAIL:
  813. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  814. bfa_fcpim_delitn(itnim);
  815. break;
  816. default:
  817. bfa_sm_fault(itnim->bfa, event);
  818. }
  819. }
  820. static void
  821. bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
  822. enum bfa_itnim_event event)
  823. {
  824. bfa_trc(itnim->bfa, itnim->rport->rport_tag);
  825. bfa_trc(itnim->bfa, event);
  826. switch (event) {
  827. case BFA_ITNIM_SM_QRESUME:
  828. bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
  829. bfa_itnim_send_fwdelete(itnim);
  830. break;
  831. case BFA_ITNIM_SM_HWFAIL:
  832. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  833. bfa_reqq_wcancel(&itnim->reqq_wait);
  834. bfa_fcpim_delitn(itnim);
  835. break;
  836. default:
  837. bfa_sm_fault(itnim->bfa, event);
  838. }
  839. }
  840. /*
  841. * Initiate cleanup of all IOs on an IOC failure.
  842. */
  843. static void
  844. bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
  845. {
  846. struct bfa_tskim_s *tskim;
  847. struct bfa_ioim_s *ioim;
  848. struct list_head *qe, *qen;
  849. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  850. tskim = (struct bfa_tskim_s *) qe;
  851. bfa_tskim_iocdisable(tskim);
  852. }
  853. list_for_each_safe(qe, qen, &itnim->io_q) {
  854. ioim = (struct bfa_ioim_s *) qe;
  855. bfa_ioim_iocdisable(ioim);
  856. }
  857. /*
  858. * For IO request in pending queue, we pretend an early timeout.
  859. */
  860. list_for_each_safe(qe, qen, &itnim->pending_q) {
  861. ioim = (struct bfa_ioim_s *) qe;
  862. bfa_ioim_tov(ioim);
  863. }
  864. list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
  865. ioim = (struct bfa_ioim_s *) qe;
  866. bfa_ioim_iocdisable(ioim);
  867. }
  868. }
  869. /*
  870. * IO cleanup completion
  871. */
  872. static void
  873. bfa_itnim_cleanp_comp(void *itnim_cbarg)
  874. {
  875. struct bfa_itnim_s *itnim = itnim_cbarg;
  876. bfa_stats(itnim, cleanup_comps);
  877. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
  878. }
  879. /*
  880. * Initiate cleanup of all IOs.
  881. */
  882. static void
  883. bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
  884. {
  885. struct bfa_ioim_s *ioim;
  886. struct bfa_tskim_s *tskim;
  887. struct list_head *qe, *qen;
  888. bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
  889. list_for_each_safe(qe, qen, &itnim->io_q) {
  890. ioim = (struct bfa_ioim_s *) qe;
  891. /*
  892. * Move IO to a cleanup queue from active queue so that a later
  893. * TM will not pickup this IO.
  894. */
  895. list_del(&ioim->qe);
  896. list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
  897. bfa_wc_up(&itnim->wc);
  898. bfa_ioim_cleanup(ioim);
  899. }
  900. list_for_each_safe(qe, qen, &itnim->tsk_q) {
  901. tskim = (struct bfa_tskim_s *) qe;
  902. bfa_wc_up(&itnim->wc);
  903. bfa_tskim_cleanup(tskim);
  904. }
  905. bfa_wc_wait(&itnim->wc);
  906. }
  907. static void
  908. __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
  909. {
  910. struct bfa_itnim_s *itnim = cbarg;
  911. if (complete)
  912. bfa_cb_itnim_online(itnim->ditn);
  913. }
  914. static void
  915. __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
  916. {
  917. struct bfa_itnim_s *itnim = cbarg;
  918. if (complete)
  919. bfa_cb_itnim_offline(itnim->ditn);
  920. }
  921. static void
  922. __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
  923. {
  924. struct bfa_itnim_s *itnim = cbarg;
  925. if (complete)
  926. bfa_cb_itnim_sler(itnim->ditn);
  927. }
  928. /*
  929. * Call to resume any I/O requests waiting for room in request queue.
  930. */
  931. static void
  932. bfa_itnim_qresume(void *cbarg)
  933. {
  934. struct bfa_itnim_s *itnim = cbarg;
  935. bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
  936. }
  937. /*
  938. * bfa_itnim_public
  939. */
  940. void
  941. bfa_itnim_iodone(struct bfa_itnim_s *itnim)
  942. {
  943. bfa_wc_down(&itnim->wc);
  944. }
  945. void
  946. bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
  947. {
  948. bfa_wc_down(&itnim->wc);
  949. }
  950. void
  951. bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len)
  952. {
  953. /*
  954. * ITN memory
  955. */
  956. *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
  957. }
  958. void
  959. bfa_itnim_attach(struct bfa_fcpim_s *fcpim)
  960. {
  961. struct bfa_s *bfa = fcpim->bfa;
  962. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  963. struct bfa_itnim_s *itnim;
  964. int i, j;
  965. INIT_LIST_HEAD(&fcpim->itnim_q);
  966. itnim = (struct bfa_itnim_s *) bfa_mem_kva_curp(fcp);
  967. fcpim->itnim_arr = itnim;
  968. for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
  969. memset(itnim, 0, sizeof(struct bfa_itnim_s));
  970. itnim->bfa = bfa;
  971. itnim->fcpim = fcpim;
  972. itnim->reqq = BFA_REQQ_QOS_LO;
  973. itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
  974. itnim->iotov_active = BFA_FALSE;
  975. bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
  976. INIT_LIST_HEAD(&itnim->io_q);
  977. INIT_LIST_HEAD(&itnim->io_cleanup_q);
  978. INIT_LIST_HEAD(&itnim->pending_q);
  979. INIT_LIST_HEAD(&itnim->tsk_q);
  980. INIT_LIST_HEAD(&itnim->delay_comp_q);
  981. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  982. itnim->ioprofile.io_latency.min[j] = ~0;
  983. bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
  984. }
  985. bfa_mem_kva_curp(fcp) = (u8 *) itnim;
  986. }
  987. void
  988. bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
  989. {
  990. bfa_stats(itnim, ioc_disabled);
  991. bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
  992. }
  993. static bfa_boolean_t
  994. bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
  995. {
  996. struct bfi_itn_create_req_s *m;
  997. itnim->msg_no++;
  998. /*
  999. * check for room in queue to send request now
  1000. */
  1001. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1002. if (!m) {
  1003. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1004. return BFA_FALSE;
  1005. }
  1006. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_CREATE_REQ,
  1007. bfa_fn_lpu(itnim->bfa));
  1008. m->fw_handle = itnim->rport->fw_handle;
  1009. m->class = FC_CLASS_3;
  1010. m->seq_rec = itnim->seq_rec;
  1011. m->msg_no = itnim->msg_no;
  1012. bfa_stats(itnim, fw_create);
  1013. /*
  1014. * queue I/O message to firmware
  1015. */
  1016. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1017. return BFA_TRUE;
  1018. }
  1019. static bfa_boolean_t
  1020. bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
  1021. {
  1022. struct bfi_itn_delete_req_s *m;
  1023. /*
  1024. * check for room in queue to send request now
  1025. */
  1026. m = bfa_reqq_next(itnim->bfa, itnim->reqq);
  1027. if (!m) {
  1028. bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
  1029. return BFA_FALSE;
  1030. }
  1031. bfi_h2i_set(m->mh, BFI_MC_ITN, BFI_ITN_H2I_DELETE_REQ,
  1032. bfa_fn_lpu(itnim->bfa));
  1033. m->fw_handle = itnim->rport->fw_handle;
  1034. bfa_stats(itnim, fw_delete);
  1035. /*
  1036. * queue I/O message to firmware
  1037. */
  1038. bfa_reqq_produce(itnim->bfa, itnim->reqq, m->mh);
  1039. return BFA_TRUE;
  1040. }
  1041. /*
  1042. * Cleanup all pending failed inflight requests.
  1043. */
  1044. static void
  1045. bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
  1046. {
  1047. struct bfa_ioim_s *ioim;
  1048. struct list_head *qe, *qen;
  1049. list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
  1050. ioim = (struct bfa_ioim_s *)qe;
  1051. bfa_ioim_delayed_comp(ioim, iotov);
  1052. }
  1053. }
  1054. /*
  1055. * Start all pending IO requests.
  1056. */
  1057. static void
  1058. bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
  1059. {
  1060. struct bfa_ioim_s *ioim;
  1061. bfa_itnim_iotov_stop(itnim);
  1062. /*
  1063. * Abort all inflight IO requests in the queue
  1064. */
  1065. bfa_itnim_delayed_comp(itnim, BFA_FALSE);
  1066. /*
  1067. * Start all pending IO requests.
  1068. */
  1069. while (!list_empty(&itnim->pending_q)) {
  1070. bfa_q_deq(&itnim->pending_q, &ioim);
  1071. list_add_tail(&ioim->qe, &itnim->io_q);
  1072. bfa_ioim_start(ioim);
  1073. }
  1074. }
  1075. /*
  1076. * Fail all pending IO requests
  1077. */
  1078. static void
  1079. bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
  1080. {
  1081. struct bfa_ioim_s *ioim;
  1082. /*
  1083. * Fail all inflight IO requests in the queue
  1084. */
  1085. bfa_itnim_delayed_comp(itnim, BFA_TRUE);
  1086. /*
  1087. * Fail any pending IO requests.
  1088. */
  1089. while (!list_empty(&itnim->pending_q)) {
  1090. bfa_q_deq(&itnim->pending_q, &ioim);
  1091. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  1092. bfa_ioim_tov(ioim);
  1093. }
  1094. }
  1095. /*
  1096. * IO TOV timer callback. Fail any pending IO requests.
  1097. */
  1098. static void
  1099. bfa_itnim_iotov(void *itnim_arg)
  1100. {
  1101. struct bfa_itnim_s *itnim = itnim_arg;
  1102. itnim->iotov_active = BFA_FALSE;
  1103. bfa_cb_itnim_tov_begin(itnim->ditn);
  1104. bfa_itnim_iotov_cleanup(itnim);
  1105. bfa_cb_itnim_tov(itnim->ditn);
  1106. }
  1107. /*
  1108. * Start IO TOV timer for failing back pending IO requests in offline state.
  1109. */
  1110. static void
  1111. bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
  1112. {
  1113. if (itnim->fcpim->path_tov > 0) {
  1114. itnim->iotov_active = BFA_TRUE;
  1115. WARN_ON(!bfa_itnim_hold_io(itnim));
  1116. bfa_timer_start(itnim->bfa, &itnim->timer,
  1117. bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
  1118. }
  1119. }
  1120. /*
  1121. * Stop IO TOV timer.
  1122. */
  1123. static void
  1124. bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
  1125. {
  1126. if (itnim->iotov_active) {
  1127. itnim->iotov_active = BFA_FALSE;
  1128. bfa_timer_stop(&itnim->timer);
  1129. }
  1130. }
  1131. /*
  1132. * Stop IO TOV timer.
  1133. */
  1134. static void
  1135. bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
  1136. {
  1137. bfa_boolean_t pathtov_active = BFA_FALSE;
  1138. if (itnim->iotov_active)
  1139. pathtov_active = BFA_TRUE;
  1140. bfa_itnim_iotov_stop(itnim);
  1141. if (pathtov_active)
  1142. bfa_cb_itnim_tov_begin(itnim->ditn);
  1143. bfa_itnim_iotov_cleanup(itnim);
  1144. if (pathtov_active)
  1145. bfa_cb_itnim_tov(itnim->ditn);
  1146. }
  1147. static void
  1148. bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
  1149. {
  1150. struct bfa_fcpim_s *fcpim = BFA_FCPIM(itnim->bfa);
  1151. fcpim->del_itn_stats.del_itn_iocomp_aborted +=
  1152. itnim->stats.iocomp_aborted;
  1153. fcpim->del_itn_stats.del_itn_iocomp_timedout +=
  1154. itnim->stats.iocomp_timedout;
  1155. fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
  1156. itnim->stats.iocom_sqer_needed;
  1157. fcpim->del_itn_stats.del_itn_iocom_res_free +=
  1158. itnim->stats.iocom_res_free;
  1159. fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
  1160. itnim->stats.iocom_hostabrts;
  1161. fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
  1162. fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
  1163. fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
  1164. }
  1165. /*
  1166. * bfa_itnim_public
  1167. */
  1168. /*
  1169. * Itnim interrupt processing.
  1170. */
  1171. void
  1172. bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  1173. {
  1174. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1175. union bfi_itn_i2h_msg_u msg;
  1176. struct bfa_itnim_s *itnim;
  1177. bfa_trc(bfa, m->mhdr.msg_id);
  1178. msg.msg = m;
  1179. switch (m->mhdr.msg_id) {
  1180. case BFI_ITN_I2H_CREATE_RSP:
  1181. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1182. msg.create_rsp->bfa_handle);
  1183. WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
  1184. bfa_stats(itnim, create_comps);
  1185. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1186. break;
  1187. case BFI_ITN_I2H_DELETE_RSP:
  1188. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1189. msg.delete_rsp->bfa_handle);
  1190. WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
  1191. bfa_stats(itnim, delete_comps);
  1192. bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
  1193. break;
  1194. case BFI_ITN_I2H_SLER_EVENT:
  1195. itnim = BFA_ITNIM_FROM_TAG(fcpim,
  1196. msg.sler_event->bfa_handle);
  1197. bfa_stats(itnim, sler_events);
  1198. bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
  1199. break;
  1200. default:
  1201. bfa_trc(bfa, m->mhdr.msg_id);
  1202. WARN_ON(1);
  1203. }
  1204. }
  1205. /*
  1206. * bfa_itnim_api
  1207. */
  1208. struct bfa_itnim_s *
  1209. bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
  1210. {
  1211. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  1212. struct bfa_itnim_s *itnim;
  1213. bfa_itn_create(bfa, rport, bfa_itnim_isr);
  1214. itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
  1215. WARN_ON(itnim->rport != rport);
  1216. itnim->ditn = ditn;
  1217. bfa_stats(itnim, creates);
  1218. bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
  1219. return itnim;
  1220. }
  1221. void
  1222. bfa_itnim_delete(struct bfa_itnim_s *itnim)
  1223. {
  1224. bfa_stats(itnim, deletes);
  1225. bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
  1226. }
  1227. void
  1228. bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
  1229. {
  1230. itnim->seq_rec = seq_rec;
  1231. bfa_stats(itnim, onlines);
  1232. bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
  1233. }
  1234. void
  1235. bfa_itnim_offline(struct bfa_itnim_s *itnim)
  1236. {
  1237. bfa_stats(itnim, offlines);
  1238. bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
  1239. }
  1240. /*
  1241. * Return true if itnim is considered offline for holding off IO request.
  1242. * IO is not held if itnim is being deleted.
  1243. */
  1244. bfa_boolean_t
  1245. bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
  1246. {
  1247. return itnim->fcpim->path_tov && itnim->iotov_active &&
  1248. (bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
  1249. bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
  1250. bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
  1251. bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
  1252. bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
  1253. bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
  1254. }
  1255. #define bfa_io_lat_clock_res_div HZ
  1256. #define bfa_io_lat_clock_res_mul 1000
  1257. bfa_status_t
  1258. bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
  1259. struct bfa_itnim_ioprofile_s *ioprofile)
  1260. {
  1261. struct bfa_fcpim_s *fcpim;
  1262. if (!itnim)
  1263. return BFA_STATUS_NO_FCPIM_NEXUS;
  1264. fcpim = BFA_FCPIM(itnim->bfa);
  1265. if (!fcpim->io_profile)
  1266. return BFA_STATUS_IOPROFILE_OFF;
  1267. itnim->ioprofile.index = BFA_IOBUCKET_MAX;
  1268. itnim->ioprofile.io_profile_start_time =
  1269. bfa_io_profile_start_time(itnim->bfa);
  1270. itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
  1271. itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
  1272. *ioprofile = itnim->ioprofile;
  1273. return BFA_STATUS_OK;
  1274. }
  1275. void
  1276. bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
  1277. {
  1278. int j;
  1279. if (!itnim)
  1280. return;
  1281. memset(&itnim->stats, 0, sizeof(itnim->stats));
  1282. memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
  1283. for (j = 0; j < BFA_IOBUCKET_MAX; j++)
  1284. itnim->ioprofile.io_latency.min[j] = ~0;
  1285. }
  1286. /*
  1287. * BFA IO module state machine functions
  1288. */
  1289. /*
  1290. * IO is not started (unallocated).
  1291. */
  1292. static void
  1293. bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1294. {
  1295. switch (event) {
  1296. case BFA_IOIM_SM_START:
  1297. if (!bfa_itnim_is_online(ioim->itnim)) {
  1298. if (!bfa_itnim_hold_io(ioim->itnim)) {
  1299. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1300. list_del(&ioim->qe);
  1301. list_add_tail(&ioim->qe,
  1302. &ioim->fcpim->ioim_comp_q);
  1303. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1304. __bfa_cb_ioim_pathtov, ioim);
  1305. } else {
  1306. list_del(&ioim->qe);
  1307. list_add_tail(&ioim->qe,
  1308. &ioim->itnim->pending_q);
  1309. }
  1310. break;
  1311. }
  1312. if (ioim->nsges > BFI_SGE_INLINE) {
  1313. if (!bfa_ioim_sgpg_alloc(ioim)) {
  1314. bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
  1315. return;
  1316. }
  1317. }
  1318. if (!bfa_ioim_send_ioreq(ioim)) {
  1319. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1320. break;
  1321. }
  1322. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1323. break;
  1324. case BFA_IOIM_SM_IOTOV:
  1325. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1326. bfa_ioim_move_to_comp_q(ioim);
  1327. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1328. __bfa_cb_ioim_pathtov, ioim);
  1329. break;
  1330. case BFA_IOIM_SM_ABORT:
  1331. /*
  1332. * IO in pending queue can get abort requests. Complete abort
  1333. * requests immediately.
  1334. */
  1335. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1336. WARN_ON(!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
  1337. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1338. __bfa_cb_ioim_abort, ioim);
  1339. break;
  1340. default:
  1341. bfa_sm_fault(ioim->bfa, event);
  1342. }
  1343. }
  1344. /*
  1345. * IO is waiting for SG pages.
  1346. */
  1347. static void
  1348. bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1349. {
  1350. bfa_trc(ioim->bfa, ioim->iotag);
  1351. bfa_trc(ioim->bfa, event);
  1352. switch (event) {
  1353. case BFA_IOIM_SM_SGALLOCED:
  1354. if (!bfa_ioim_send_ioreq(ioim)) {
  1355. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1356. break;
  1357. }
  1358. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1359. break;
  1360. case BFA_IOIM_SM_CLEANUP:
  1361. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1362. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1363. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1364. ioim);
  1365. bfa_ioim_notify_cleanup(ioim);
  1366. break;
  1367. case BFA_IOIM_SM_ABORT:
  1368. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1369. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1370. bfa_ioim_move_to_comp_q(ioim);
  1371. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1372. ioim);
  1373. break;
  1374. case BFA_IOIM_SM_HWFAIL:
  1375. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1376. bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
  1377. bfa_ioim_move_to_comp_q(ioim);
  1378. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1379. ioim);
  1380. break;
  1381. default:
  1382. bfa_sm_fault(ioim->bfa, event);
  1383. }
  1384. }
  1385. /*
  1386. * IO is active.
  1387. */
  1388. static void
  1389. bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1390. {
  1391. switch (event) {
  1392. case BFA_IOIM_SM_COMP_GOOD:
  1393. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1394. bfa_ioim_move_to_comp_q(ioim);
  1395. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1396. __bfa_cb_ioim_good_comp, ioim);
  1397. break;
  1398. case BFA_IOIM_SM_COMP:
  1399. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1400. bfa_ioim_move_to_comp_q(ioim);
  1401. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1402. ioim);
  1403. break;
  1404. case BFA_IOIM_SM_DONE:
  1405. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1406. bfa_ioim_move_to_comp_q(ioim);
  1407. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
  1408. ioim);
  1409. break;
  1410. case BFA_IOIM_SM_ABORT:
  1411. ioim->iosp->abort_explicit = BFA_TRUE;
  1412. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1413. if (bfa_ioim_send_abort(ioim))
  1414. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1415. else {
  1416. bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
  1417. bfa_stats(ioim->itnim, qwait);
  1418. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1419. &ioim->iosp->reqq_wait);
  1420. }
  1421. break;
  1422. case BFA_IOIM_SM_CLEANUP:
  1423. ioim->iosp->abort_explicit = BFA_FALSE;
  1424. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1425. if (bfa_ioim_send_abort(ioim))
  1426. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1427. else {
  1428. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1429. bfa_stats(ioim->itnim, qwait);
  1430. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1431. &ioim->iosp->reqq_wait);
  1432. }
  1433. break;
  1434. case BFA_IOIM_SM_HWFAIL:
  1435. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1436. bfa_ioim_move_to_comp_q(ioim);
  1437. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1438. ioim);
  1439. break;
  1440. case BFA_IOIM_SM_SQRETRY:
  1441. if (bfa_ioim_maxretry_reached(ioim)) {
  1442. /* max retry reached, free IO */
  1443. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1444. bfa_ioim_move_to_comp_q(ioim);
  1445. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1446. __bfa_cb_ioim_failed, ioim);
  1447. break;
  1448. }
  1449. /* waiting for IO tag resource free */
  1450. bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
  1451. break;
  1452. default:
  1453. bfa_sm_fault(ioim->bfa, event);
  1454. }
  1455. }
  1456. /*
  1457. * IO is retried with new tag.
  1458. */
  1459. static void
  1460. bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1461. {
  1462. switch (event) {
  1463. case BFA_IOIM_SM_FREE:
  1464. /* abts and rrq done. Now retry the IO with new tag */
  1465. bfa_ioim_update_iotag(ioim);
  1466. if (!bfa_ioim_send_ioreq(ioim)) {
  1467. bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
  1468. break;
  1469. }
  1470. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1471. break;
  1472. case BFA_IOIM_SM_CLEANUP:
  1473. ioim->iosp->abort_explicit = BFA_FALSE;
  1474. ioim->io_cbfn = __bfa_cb_ioim_failed;
  1475. if (bfa_ioim_send_abort(ioim))
  1476. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1477. else {
  1478. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1479. bfa_stats(ioim->itnim, qwait);
  1480. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1481. &ioim->iosp->reqq_wait);
  1482. }
  1483. break;
  1484. case BFA_IOIM_SM_HWFAIL:
  1485. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1486. bfa_ioim_move_to_comp_q(ioim);
  1487. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
  1488. __bfa_cb_ioim_failed, ioim);
  1489. break;
  1490. case BFA_IOIM_SM_ABORT:
  1491. /* in this state IO abort is done.
  1492. * Waiting for IO tag resource free.
  1493. */
  1494. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1495. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1496. ioim);
  1497. break;
  1498. default:
  1499. bfa_sm_fault(ioim->bfa, event);
  1500. }
  1501. }
  1502. /*
  1503. * IO is being aborted, waiting for completion from firmware.
  1504. */
  1505. static void
  1506. bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1507. {
  1508. bfa_trc(ioim->bfa, ioim->iotag);
  1509. bfa_trc(ioim->bfa, event);
  1510. switch (event) {
  1511. case BFA_IOIM_SM_COMP_GOOD:
  1512. case BFA_IOIM_SM_COMP:
  1513. case BFA_IOIM_SM_DONE:
  1514. case BFA_IOIM_SM_FREE:
  1515. break;
  1516. case BFA_IOIM_SM_ABORT_DONE:
  1517. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1518. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1519. ioim);
  1520. break;
  1521. case BFA_IOIM_SM_ABORT_COMP:
  1522. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1523. bfa_ioim_move_to_comp_q(ioim);
  1524. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1525. ioim);
  1526. break;
  1527. case BFA_IOIM_SM_COMP_UTAG:
  1528. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1529. bfa_ioim_move_to_comp_q(ioim);
  1530. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1531. ioim);
  1532. break;
  1533. case BFA_IOIM_SM_CLEANUP:
  1534. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1535. ioim->iosp->abort_explicit = BFA_FALSE;
  1536. if (bfa_ioim_send_abort(ioim))
  1537. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1538. else {
  1539. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1540. bfa_stats(ioim->itnim, qwait);
  1541. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  1542. &ioim->iosp->reqq_wait);
  1543. }
  1544. break;
  1545. case BFA_IOIM_SM_HWFAIL:
  1546. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1547. bfa_ioim_move_to_comp_q(ioim);
  1548. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1549. ioim);
  1550. break;
  1551. default:
  1552. bfa_sm_fault(ioim->bfa, event);
  1553. }
  1554. }
  1555. /*
  1556. * IO is being cleaned up (implicit abort), waiting for completion from
  1557. * firmware.
  1558. */
  1559. static void
  1560. bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1561. {
  1562. bfa_trc(ioim->bfa, ioim->iotag);
  1563. bfa_trc(ioim->bfa, event);
  1564. switch (event) {
  1565. case BFA_IOIM_SM_COMP_GOOD:
  1566. case BFA_IOIM_SM_COMP:
  1567. case BFA_IOIM_SM_DONE:
  1568. case BFA_IOIM_SM_FREE:
  1569. break;
  1570. case BFA_IOIM_SM_ABORT:
  1571. /*
  1572. * IO is already being aborted implicitly
  1573. */
  1574. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1575. break;
  1576. case BFA_IOIM_SM_ABORT_DONE:
  1577. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1578. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1579. bfa_ioim_notify_cleanup(ioim);
  1580. break;
  1581. case BFA_IOIM_SM_ABORT_COMP:
  1582. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1583. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1584. bfa_ioim_notify_cleanup(ioim);
  1585. break;
  1586. case BFA_IOIM_SM_COMP_UTAG:
  1587. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1588. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1589. bfa_ioim_notify_cleanup(ioim);
  1590. break;
  1591. case BFA_IOIM_SM_HWFAIL:
  1592. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1593. bfa_ioim_move_to_comp_q(ioim);
  1594. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1595. ioim);
  1596. break;
  1597. case BFA_IOIM_SM_CLEANUP:
  1598. /*
  1599. * IO can be in cleanup state already due to TM command.
  1600. * 2nd cleanup request comes from ITN offline event.
  1601. */
  1602. break;
  1603. default:
  1604. bfa_sm_fault(ioim->bfa, event);
  1605. }
  1606. }
  1607. /*
  1608. * IO is waiting for room in request CQ
  1609. */
  1610. static void
  1611. bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1612. {
  1613. bfa_trc(ioim->bfa, ioim->iotag);
  1614. bfa_trc(ioim->bfa, event);
  1615. switch (event) {
  1616. case BFA_IOIM_SM_QRESUME:
  1617. bfa_sm_set_state(ioim, bfa_ioim_sm_active);
  1618. bfa_ioim_send_ioreq(ioim);
  1619. break;
  1620. case BFA_IOIM_SM_ABORT:
  1621. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1622. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1623. bfa_ioim_move_to_comp_q(ioim);
  1624. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1625. ioim);
  1626. break;
  1627. case BFA_IOIM_SM_CLEANUP:
  1628. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1629. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1630. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1631. ioim);
  1632. bfa_ioim_notify_cleanup(ioim);
  1633. break;
  1634. case BFA_IOIM_SM_HWFAIL:
  1635. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1636. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1637. bfa_ioim_move_to_comp_q(ioim);
  1638. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1639. ioim);
  1640. break;
  1641. default:
  1642. bfa_sm_fault(ioim->bfa, event);
  1643. }
  1644. }
  1645. /*
  1646. * Active IO is being aborted, waiting for room in request CQ.
  1647. */
  1648. static void
  1649. bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1650. {
  1651. bfa_trc(ioim->bfa, ioim->iotag);
  1652. bfa_trc(ioim->bfa, event);
  1653. switch (event) {
  1654. case BFA_IOIM_SM_QRESUME:
  1655. bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
  1656. bfa_ioim_send_abort(ioim);
  1657. break;
  1658. case BFA_IOIM_SM_CLEANUP:
  1659. WARN_ON(ioim->iosp->abort_explicit != BFA_TRUE);
  1660. ioim->iosp->abort_explicit = BFA_FALSE;
  1661. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
  1662. break;
  1663. case BFA_IOIM_SM_COMP_GOOD:
  1664. case BFA_IOIM_SM_COMP:
  1665. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1666. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1667. bfa_ioim_move_to_comp_q(ioim);
  1668. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1669. ioim);
  1670. break;
  1671. case BFA_IOIM_SM_DONE:
  1672. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1673. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1674. bfa_ioim_move_to_comp_q(ioim);
  1675. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
  1676. ioim);
  1677. break;
  1678. case BFA_IOIM_SM_HWFAIL:
  1679. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1680. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1681. bfa_ioim_move_to_comp_q(ioim);
  1682. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1683. ioim);
  1684. break;
  1685. default:
  1686. bfa_sm_fault(ioim->bfa, event);
  1687. }
  1688. }
  1689. /*
  1690. * Active IO is being cleaned up, waiting for room in request CQ.
  1691. */
  1692. static void
  1693. bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1694. {
  1695. bfa_trc(ioim->bfa, ioim->iotag);
  1696. bfa_trc(ioim->bfa, event);
  1697. switch (event) {
  1698. case BFA_IOIM_SM_QRESUME:
  1699. bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
  1700. bfa_ioim_send_abort(ioim);
  1701. break;
  1702. case BFA_IOIM_SM_ABORT:
  1703. /*
  1704. * IO is already being cleaned up implicitly
  1705. */
  1706. ioim->io_cbfn = __bfa_cb_ioim_abort;
  1707. break;
  1708. case BFA_IOIM_SM_COMP_GOOD:
  1709. case BFA_IOIM_SM_COMP:
  1710. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1711. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1712. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1713. bfa_ioim_notify_cleanup(ioim);
  1714. break;
  1715. case BFA_IOIM_SM_DONE:
  1716. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
  1717. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1718. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  1719. bfa_ioim_notify_cleanup(ioim);
  1720. break;
  1721. case BFA_IOIM_SM_HWFAIL:
  1722. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1723. bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
  1724. bfa_ioim_move_to_comp_q(ioim);
  1725. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
  1726. ioim);
  1727. break;
  1728. default:
  1729. bfa_sm_fault(ioim->bfa, event);
  1730. }
  1731. }
  1732. /*
  1733. * IO bfa callback is pending.
  1734. */
  1735. static void
  1736. bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1737. {
  1738. switch (event) {
  1739. case BFA_IOIM_SM_HCB:
  1740. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1741. bfa_ioim_free(ioim);
  1742. break;
  1743. case BFA_IOIM_SM_CLEANUP:
  1744. bfa_ioim_notify_cleanup(ioim);
  1745. break;
  1746. case BFA_IOIM_SM_HWFAIL:
  1747. break;
  1748. default:
  1749. bfa_sm_fault(ioim->bfa, event);
  1750. }
  1751. }
  1752. /*
  1753. * IO bfa callback is pending. IO resource cannot be freed.
  1754. */
  1755. static void
  1756. bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1757. {
  1758. bfa_trc(ioim->bfa, ioim->iotag);
  1759. bfa_trc(ioim->bfa, event);
  1760. switch (event) {
  1761. case BFA_IOIM_SM_HCB:
  1762. bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
  1763. list_del(&ioim->qe);
  1764. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
  1765. break;
  1766. case BFA_IOIM_SM_FREE:
  1767. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1768. break;
  1769. case BFA_IOIM_SM_CLEANUP:
  1770. bfa_ioim_notify_cleanup(ioim);
  1771. break;
  1772. case BFA_IOIM_SM_HWFAIL:
  1773. bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
  1774. break;
  1775. default:
  1776. bfa_sm_fault(ioim->bfa, event);
  1777. }
  1778. }
  1779. /*
  1780. * IO is completed, waiting resource free from firmware.
  1781. */
  1782. static void
  1783. bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
  1784. {
  1785. bfa_trc(ioim->bfa, ioim->iotag);
  1786. bfa_trc(ioim->bfa, event);
  1787. switch (event) {
  1788. case BFA_IOIM_SM_FREE:
  1789. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  1790. bfa_ioim_free(ioim);
  1791. break;
  1792. case BFA_IOIM_SM_CLEANUP:
  1793. bfa_ioim_notify_cleanup(ioim);
  1794. break;
  1795. case BFA_IOIM_SM_HWFAIL:
  1796. break;
  1797. default:
  1798. bfa_sm_fault(ioim->bfa, event);
  1799. }
  1800. }
  1801. /*
  1802. * This is called from bfa_fcpim_start after the bfa_init() with flash read
  1803. * is complete by driver. now invalidate the stale content of lun mask
  1804. * like unit attention, rp tag and lp tag.
  1805. */
  1806. static void
  1807. bfa_ioim_lm_init(struct bfa_s *bfa)
  1808. {
  1809. struct bfa_lun_mask_s *lunm_list;
  1810. int i;
  1811. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1812. return;
  1813. lunm_list = bfa_get_lun_mask_list(bfa);
  1814. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1815. lunm_list[i].ua = BFA_IOIM_LM_UA_RESET;
  1816. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  1817. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  1818. }
  1819. }
  1820. static void
  1821. __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
  1822. {
  1823. struct bfa_ioim_s *ioim = cbarg;
  1824. if (!complete) {
  1825. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1826. return;
  1827. }
  1828. bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
  1829. }
  1830. static void
  1831. __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
  1832. {
  1833. struct bfa_ioim_s *ioim = cbarg;
  1834. struct bfi_ioim_rsp_s *m;
  1835. u8 *snsinfo = NULL;
  1836. u8 sns_len = 0;
  1837. s32 residue = 0;
  1838. if (!complete) {
  1839. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  1840. return;
  1841. }
  1842. m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
  1843. if (m->io_status == BFI_IOIM_STS_OK) {
  1844. /*
  1845. * setup sense information, if present
  1846. */
  1847. if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
  1848. m->sns_len) {
  1849. sns_len = m->sns_len;
  1850. snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
  1851. ioim->iotag);
  1852. }
  1853. /*
  1854. * setup residue value correctly for normal completions
  1855. */
  1856. if (m->resid_flags == FCP_RESID_UNDER) {
  1857. residue = be32_to_cpu(m->residue);
  1858. bfa_stats(ioim->itnim, iocomp_underrun);
  1859. }
  1860. if (m->resid_flags == FCP_RESID_OVER) {
  1861. residue = be32_to_cpu(m->residue);
  1862. residue = -residue;
  1863. bfa_stats(ioim->itnim, iocomp_overrun);
  1864. }
  1865. }
  1866. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
  1867. m->scsi_status, sns_len, snsinfo, residue);
  1868. }
  1869. void
  1870. bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
  1871. u16 rp_tag, u8 lp_tag)
  1872. {
  1873. struct bfa_lun_mask_s *lun_list;
  1874. u8 i;
  1875. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1876. return;
  1877. lun_list = bfa_get_lun_mask_list(bfa);
  1878. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1879. if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1880. if ((lun_list[i].lp_wwn == lp_wwn) &&
  1881. (lun_list[i].rp_wwn == rp_wwn)) {
  1882. lun_list[i].rp_tag = rp_tag;
  1883. lun_list[i].lp_tag = lp_tag;
  1884. }
  1885. }
  1886. }
  1887. }
  1888. /*
  1889. * set UA for all active luns in LM DB
  1890. */
  1891. static void
  1892. bfa_ioim_lm_set_ua(struct bfa_s *bfa)
  1893. {
  1894. struct bfa_lun_mask_s *lunm_list;
  1895. int i;
  1896. lunm_list = bfa_get_lun_mask_list(bfa);
  1897. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1898. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1899. continue;
  1900. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1901. }
  1902. }
  1903. bfa_status_t
  1904. bfa_fcpim_lunmask_update(struct bfa_s *bfa, u32 update)
  1905. {
  1906. struct bfa_lunmask_cfg_s *lun_mask;
  1907. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1908. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1909. return BFA_STATUS_FAILED;
  1910. if (bfa_get_lun_mask_status(bfa) == update)
  1911. return BFA_STATUS_NO_CHANGE;
  1912. lun_mask = bfa_get_lun_mask(bfa);
  1913. lun_mask->status = update;
  1914. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED)
  1915. bfa_ioim_lm_set_ua(bfa);
  1916. return bfa_dconf_update(bfa);
  1917. }
  1918. bfa_status_t
  1919. bfa_fcpim_lunmask_clear(struct bfa_s *bfa)
  1920. {
  1921. int i;
  1922. struct bfa_lun_mask_s *lunm_list;
  1923. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1924. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1925. return BFA_STATUS_FAILED;
  1926. lunm_list = bfa_get_lun_mask_list(bfa);
  1927. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1928. if (lunm_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE) {
  1929. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID)
  1930. bfa_rport_unset_lunmask(bfa,
  1931. BFA_RPORT_FROM_TAG(bfa, lunm_list[i].rp_tag));
  1932. }
  1933. }
  1934. memset(lunm_list, 0, sizeof(struct bfa_lun_mask_s) * MAX_LUN_MASK_CFG);
  1935. return bfa_dconf_update(bfa);
  1936. }
  1937. bfa_status_t
  1938. bfa_fcpim_lunmask_query(struct bfa_s *bfa, void *buf)
  1939. {
  1940. struct bfa_lunmask_cfg_s *lun_mask;
  1941. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1942. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1943. return BFA_STATUS_FAILED;
  1944. lun_mask = bfa_get_lun_mask(bfa);
  1945. memcpy(buf, lun_mask, sizeof(struct bfa_lunmask_cfg_s));
  1946. return BFA_STATUS_OK;
  1947. }
  1948. bfa_status_t
  1949. bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  1950. wwn_t rpwwn, struct scsi_lun lun)
  1951. {
  1952. struct bfa_lun_mask_s *lunm_list;
  1953. struct bfa_rport_s *rp = NULL;
  1954. int i, free_index = MAX_LUN_MASK_CFG + 1;
  1955. struct bfa_fcs_lport_s *port = NULL;
  1956. struct bfa_fcs_rport_s *rp_fcs;
  1957. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  1958. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  1959. return BFA_STATUS_FAILED;
  1960. port = bfa_fcs_lookup_port(&((struct bfad_s *)bfa->bfad)->bfa_fcs,
  1961. vf_id, *pwwn);
  1962. if (port) {
  1963. *pwwn = port->port_cfg.pwwn;
  1964. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  1965. if (rp_fcs)
  1966. rp = rp_fcs->bfa_rport;
  1967. }
  1968. lunm_list = bfa_get_lun_mask_list(bfa);
  1969. /* if entry exists */
  1970. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1971. if (lunm_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
  1972. free_index = i;
  1973. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1974. (lunm_list[i].rp_wwn == rpwwn) &&
  1975. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  1976. scsilun_to_int((struct scsi_lun *)&lun)))
  1977. return BFA_STATUS_ENTRY_EXISTS;
  1978. }
  1979. if (free_index > MAX_LUN_MASK_CFG)
  1980. return BFA_STATUS_MAX_ENTRY_REACHED;
  1981. if (rp) {
  1982. lunm_list[free_index].lp_tag = bfa_lps_get_tag_from_pid(bfa,
  1983. rp->rport_info.local_pid);
  1984. lunm_list[free_index].rp_tag = rp->rport_tag;
  1985. } else {
  1986. lunm_list[free_index].lp_tag = BFA_LP_TAG_INVALID;
  1987. lunm_list[free_index].rp_tag = BFA_RPORT_TAG_INVALID;
  1988. }
  1989. lunm_list[free_index].lp_wwn = *pwwn;
  1990. lunm_list[free_index].rp_wwn = rpwwn;
  1991. lunm_list[free_index].lun = lun;
  1992. lunm_list[free_index].state = BFA_IOIM_LUN_MASK_ACTIVE;
  1993. /* set for all luns in this rp */
  1994. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  1995. if ((lunm_list[i].lp_wwn == *pwwn) &&
  1996. (lunm_list[i].rp_wwn == rpwwn))
  1997. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  1998. }
  1999. return bfa_dconf_update(bfa);
  2000. }
  2001. bfa_status_t
  2002. bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
  2003. wwn_t rpwwn, struct scsi_lun lun)
  2004. {
  2005. struct bfa_lun_mask_s *lunm_list;
  2006. struct bfa_rport_s *rp = NULL;
  2007. struct bfa_fcs_lport_s *port = NULL;
  2008. struct bfa_fcs_rport_s *rp_fcs;
  2009. int i;
  2010. /* in min cfg lunm_list could be NULL but no commands should run. */
  2011. if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_MINCFG)
  2012. return BFA_STATUS_FAILED;
  2013. bfa_trc(bfa, bfa_get_lun_mask_status(bfa));
  2014. bfa_trc(bfa, *pwwn);
  2015. bfa_trc(bfa, rpwwn);
  2016. bfa_trc(bfa, scsilun_to_int((struct scsi_lun *)&lun));
  2017. if (*pwwn == 0) {
  2018. port = bfa_fcs_lookup_port(
  2019. &((struct bfad_s *)bfa->bfad)->bfa_fcs,
  2020. vf_id, *pwwn);
  2021. if (port) {
  2022. *pwwn = port->port_cfg.pwwn;
  2023. rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
  2024. if (rp_fcs)
  2025. rp = rp_fcs->bfa_rport;
  2026. }
  2027. }
  2028. lunm_list = bfa_get_lun_mask_list(bfa);
  2029. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2030. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2031. (lunm_list[i].rp_wwn == rpwwn) &&
  2032. (scsilun_to_int((struct scsi_lun *)&lunm_list[i].lun) ==
  2033. scsilun_to_int((struct scsi_lun *)&lun))) {
  2034. lunm_list[i].lp_wwn = 0;
  2035. lunm_list[i].rp_wwn = 0;
  2036. int_to_scsilun(0, &lunm_list[i].lun);
  2037. lunm_list[i].state = BFA_IOIM_LUN_MASK_INACTIVE;
  2038. if (lunm_list[i].rp_tag != BFA_RPORT_TAG_INVALID) {
  2039. lunm_list[i].rp_tag = BFA_RPORT_TAG_INVALID;
  2040. lunm_list[i].lp_tag = BFA_LP_TAG_INVALID;
  2041. }
  2042. return bfa_dconf_update(bfa);
  2043. }
  2044. }
  2045. /* set for all luns in this rp */
  2046. for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
  2047. if ((lunm_list[i].lp_wwn == *pwwn) &&
  2048. (lunm_list[i].rp_wwn == rpwwn))
  2049. lunm_list[i].ua = BFA_IOIM_LM_UA_SET;
  2050. }
  2051. return BFA_STATUS_ENTRY_NOT_EXISTS;
  2052. }
  2053. static void
  2054. __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
  2055. {
  2056. struct bfa_ioim_s *ioim = cbarg;
  2057. if (!complete) {
  2058. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2059. return;
  2060. }
  2061. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
  2062. 0, 0, NULL, 0);
  2063. }
  2064. static void
  2065. __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
  2066. {
  2067. struct bfa_ioim_s *ioim = cbarg;
  2068. bfa_stats(ioim->itnim, path_tov_expired);
  2069. if (!complete) {
  2070. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2071. return;
  2072. }
  2073. bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
  2074. 0, 0, NULL, 0);
  2075. }
  2076. static void
  2077. __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
  2078. {
  2079. struct bfa_ioim_s *ioim = cbarg;
  2080. if (!complete) {
  2081. bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
  2082. return;
  2083. }
  2084. bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
  2085. }
  2086. static void
  2087. bfa_ioim_sgpg_alloced(void *cbarg)
  2088. {
  2089. struct bfa_ioim_s *ioim = cbarg;
  2090. ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2091. list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
  2092. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2093. bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
  2094. }
  2095. /*
  2096. * Send I/O request to firmware.
  2097. */
  2098. static bfa_boolean_t
  2099. bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
  2100. {
  2101. struct bfa_itnim_s *itnim = ioim->itnim;
  2102. struct bfi_ioim_req_s *m;
  2103. static struct fcp_cmnd_s cmnd_z0 = { { { 0 } } };
  2104. struct bfi_sge_s *sge, *sgpge;
  2105. u32 pgdlen = 0;
  2106. u32 fcp_dl;
  2107. u64 addr;
  2108. struct scatterlist *sg;
  2109. struct bfa_sgpg_s *sgpg;
  2110. struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
  2111. u32 i, sge_id, pgcumsz;
  2112. enum dma_data_direction dmadir;
  2113. /*
  2114. * check for room in queue to send request now
  2115. */
  2116. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2117. if (!m) {
  2118. bfa_stats(ioim->itnim, qwait);
  2119. bfa_reqq_wait(ioim->bfa, ioim->reqq,
  2120. &ioim->iosp->reqq_wait);
  2121. return BFA_FALSE;
  2122. }
  2123. /*
  2124. * build i/o request message next
  2125. */
  2126. m->io_tag = cpu_to_be16(ioim->iotag);
  2127. m->rport_hdl = ioim->itnim->rport->fw_handle;
  2128. m->io_timeout = 0;
  2129. sge = &m->sges[0];
  2130. sgpg = ioim->sgpg;
  2131. sge_id = 0;
  2132. sgpge = NULL;
  2133. pgcumsz = 0;
  2134. scsi_for_each_sg(cmnd, sg, ioim->nsges, i) {
  2135. if (i == 0) {
  2136. /* build inline IO SG element */
  2137. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2138. sge->sga = *(union bfi_addr_u *) &addr;
  2139. pgdlen = sg_dma_len(sg);
  2140. sge->sg_len = pgdlen;
  2141. sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
  2142. BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
  2143. bfa_sge_to_be(sge);
  2144. sge++;
  2145. } else {
  2146. if (sge_id == 0)
  2147. sgpge = sgpg->sgpg->sges;
  2148. addr = bfa_sgaddr_le(sg_dma_address(sg));
  2149. sgpge->sga = *(union bfi_addr_u *) &addr;
  2150. sgpge->sg_len = sg_dma_len(sg);
  2151. pgcumsz += sgpge->sg_len;
  2152. /* set flags */
  2153. if (i < (ioim->nsges - 1) &&
  2154. sge_id < (BFI_SGPG_DATA_SGES - 1))
  2155. sgpge->flags = BFI_SGE_DATA;
  2156. else if (i < (ioim->nsges - 1))
  2157. sgpge->flags = BFI_SGE_DATA_CPL;
  2158. else
  2159. sgpge->flags = BFI_SGE_DATA_LAST;
  2160. bfa_sge_to_le(sgpge);
  2161. sgpge++;
  2162. if (i == (ioim->nsges - 1)) {
  2163. sgpge->flags = BFI_SGE_PGDLEN;
  2164. sgpge->sga.a32.addr_lo = 0;
  2165. sgpge->sga.a32.addr_hi = 0;
  2166. sgpge->sg_len = pgcumsz;
  2167. bfa_sge_to_le(sgpge);
  2168. } else if (++sge_id == BFI_SGPG_DATA_SGES) {
  2169. sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
  2170. sgpge->flags = BFI_SGE_LINK;
  2171. sgpge->sga = sgpg->sgpg_pa;
  2172. sgpge->sg_len = pgcumsz;
  2173. bfa_sge_to_le(sgpge);
  2174. sge_id = 0;
  2175. pgcumsz = 0;
  2176. }
  2177. }
  2178. }
  2179. if (ioim->nsges > BFI_SGE_INLINE) {
  2180. sge->sga = ioim->sgpg->sgpg_pa;
  2181. } else {
  2182. sge->sga.a32.addr_lo = 0;
  2183. sge->sga.a32.addr_hi = 0;
  2184. }
  2185. sge->sg_len = pgdlen;
  2186. sge->flags = BFI_SGE_PGDLEN;
  2187. bfa_sge_to_be(sge);
  2188. /*
  2189. * set up I/O command parameters
  2190. */
  2191. m->cmnd = cmnd_z0;
  2192. int_to_scsilun(cmnd->device->lun, &m->cmnd.lun);
  2193. dmadir = cmnd->sc_data_direction;
  2194. if (dmadir == DMA_TO_DEVICE)
  2195. m->cmnd.iodir = FCP_IODIR_WRITE;
  2196. else if (dmadir == DMA_FROM_DEVICE)
  2197. m->cmnd.iodir = FCP_IODIR_READ;
  2198. else
  2199. m->cmnd.iodir = FCP_IODIR_NONE;
  2200. m->cmnd.cdb = *(struct scsi_cdb_s *) cmnd->cmnd;
  2201. fcp_dl = scsi_bufflen(cmnd);
  2202. m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
  2203. /*
  2204. * set up I/O message header
  2205. */
  2206. switch (m->cmnd.iodir) {
  2207. case FCP_IODIR_READ:
  2208. bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_fn_lpu(ioim->bfa));
  2209. bfa_stats(itnim, input_reqs);
  2210. ioim->itnim->stats.rd_throughput += fcp_dl;
  2211. break;
  2212. case FCP_IODIR_WRITE:
  2213. bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_fn_lpu(ioim->bfa));
  2214. bfa_stats(itnim, output_reqs);
  2215. ioim->itnim->stats.wr_throughput += fcp_dl;
  2216. break;
  2217. case FCP_IODIR_RW:
  2218. bfa_stats(itnim, input_reqs);
  2219. bfa_stats(itnim, output_reqs);
  2220. default:
  2221. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2222. }
  2223. if (itnim->seq_rec ||
  2224. (scsi_bufflen(cmnd) & (sizeof(u32) - 1)))
  2225. bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_fn_lpu(ioim->bfa));
  2226. /*
  2227. * queue I/O message to firmware
  2228. */
  2229. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2230. return BFA_TRUE;
  2231. }
  2232. /*
  2233. * Setup any additional SG pages needed.Inline SG element is setup
  2234. * at queuing time.
  2235. */
  2236. static bfa_boolean_t
  2237. bfa_ioim_sgpg_alloc(struct bfa_ioim_s *ioim)
  2238. {
  2239. u16 nsgpgs;
  2240. WARN_ON(ioim->nsges <= BFI_SGE_INLINE);
  2241. /*
  2242. * allocate SG pages needed
  2243. */
  2244. nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
  2245. if (!nsgpgs)
  2246. return BFA_TRUE;
  2247. if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
  2248. != BFA_STATUS_OK) {
  2249. bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
  2250. return BFA_FALSE;
  2251. }
  2252. ioim->nsgpgs = nsgpgs;
  2253. ioim->sgpg = bfa_q_first(&ioim->sgpg_q);
  2254. return BFA_TRUE;
  2255. }
  2256. /*
  2257. * Send I/O abort request to firmware.
  2258. */
  2259. static bfa_boolean_t
  2260. bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
  2261. {
  2262. struct bfi_ioim_abort_req_s *m;
  2263. enum bfi_ioim_h2i msgop;
  2264. /*
  2265. * check for room in queue to send request now
  2266. */
  2267. m = bfa_reqq_next(ioim->bfa, ioim->reqq);
  2268. if (!m)
  2269. return BFA_FALSE;
  2270. /*
  2271. * build i/o request message next
  2272. */
  2273. if (ioim->iosp->abort_explicit)
  2274. msgop = BFI_IOIM_H2I_IOABORT_REQ;
  2275. else
  2276. msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
  2277. bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_fn_lpu(ioim->bfa));
  2278. m->io_tag = cpu_to_be16(ioim->iotag);
  2279. m->abort_tag = ++ioim->abort_tag;
  2280. /*
  2281. * queue I/O message to firmware
  2282. */
  2283. bfa_reqq_produce(ioim->bfa, ioim->reqq, m->mh);
  2284. return BFA_TRUE;
  2285. }
  2286. /*
  2287. * Call to resume any I/O requests waiting for room in request queue.
  2288. */
  2289. static void
  2290. bfa_ioim_qresume(void *cbarg)
  2291. {
  2292. struct bfa_ioim_s *ioim = cbarg;
  2293. bfa_stats(ioim->itnim, qresumes);
  2294. bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
  2295. }
  2296. static void
  2297. bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
  2298. {
  2299. /*
  2300. * Move IO from itnim queue to fcpim global queue since itnim will be
  2301. * freed.
  2302. */
  2303. list_del(&ioim->qe);
  2304. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2305. if (!ioim->iosp->tskim) {
  2306. if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
  2307. bfa_cb_dequeue(&ioim->hcb_qe);
  2308. list_del(&ioim->qe);
  2309. list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
  2310. }
  2311. bfa_itnim_iodone(ioim->itnim);
  2312. } else
  2313. bfa_wc_down(&ioim->iosp->tskim->wc);
  2314. }
  2315. static bfa_boolean_t
  2316. bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
  2317. {
  2318. if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
  2319. (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim))) ||
  2320. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort)) ||
  2321. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull)) ||
  2322. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb)) ||
  2323. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free)) ||
  2324. (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
  2325. return BFA_FALSE;
  2326. return BFA_TRUE;
  2327. }
  2328. void
  2329. bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
  2330. {
  2331. /*
  2332. * If path tov timer expired, failback with PATHTOV status - these
  2333. * IO requests are not normally retried by IO stack.
  2334. *
  2335. * Otherwise device cameback online and fail it with normal failed
  2336. * status so that IO stack retries these failed IO requests.
  2337. */
  2338. if (iotov)
  2339. ioim->io_cbfn = __bfa_cb_ioim_pathtov;
  2340. else {
  2341. ioim->io_cbfn = __bfa_cb_ioim_failed;
  2342. bfa_stats(ioim->itnim, iocom_nexus_abort);
  2343. }
  2344. bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
  2345. /*
  2346. * Move IO to fcpim global queue since itnim will be
  2347. * freed.
  2348. */
  2349. list_del(&ioim->qe);
  2350. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2351. }
  2352. /*
  2353. * Memory allocation and initialization.
  2354. */
  2355. void
  2356. bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
  2357. {
  2358. struct bfa_ioim_s *ioim;
  2359. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  2360. struct bfa_ioim_sp_s *iosp;
  2361. u16 i;
  2362. /*
  2363. * claim memory first
  2364. */
  2365. ioim = (struct bfa_ioim_s *) bfa_mem_kva_curp(fcp);
  2366. fcpim->ioim_arr = ioim;
  2367. bfa_mem_kva_curp(fcp) = (u8 *) (ioim + fcpim->fcp->num_ioim_reqs);
  2368. iosp = (struct bfa_ioim_sp_s *) bfa_mem_kva_curp(fcp);
  2369. fcpim->ioim_sp_arr = iosp;
  2370. bfa_mem_kva_curp(fcp) = (u8 *) (iosp + fcpim->fcp->num_ioim_reqs);
  2371. /*
  2372. * Initialize ioim free queues
  2373. */
  2374. INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
  2375. INIT_LIST_HEAD(&fcpim->ioim_comp_q);
  2376. for (i = 0; i < fcpim->fcp->num_ioim_reqs;
  2377. i++, ioim++, iosp++) {
  2378. /*
  2379. * initialize IOIM
  2380. */
  2381. memset(ioim, 0, sizeof(struct bfa_ioim_s));
  2382. ioim->iotag = i;
  2383. ioim->bfa = fcpim->bfa;
  2384. ioim->fcpim = fcpim;
  2385. ioim->iosp = iosp;
  2386. INIT_LIST_HEAD(&ioim->sgpg_q);
  2387. bfa_reqq_winit(&ioim->iosp->reqq_wait,
  2388. bfa_ioim_qresume, ioim);
  2389. bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
  2390. bfa_ioim_sgpg_alloced, ioim);
  2391. bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
  2392. }
  2393. }
  2394. void
  2395. bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2396. {
  2397. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2398. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2399. struct bfa_ioim_s *ioim;
  2400. u16 iotag;
  2401. enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
  2402. iotag = be16_to_cpu(rsp->io_tag);
  2403. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2404. WARN_ON(ioim->iotag != iotag);
  2405. bfa_trc(ioim->bfa, ioim->iotag);
  2406. bfa_trc(ioim->bfa, rsp->io_status);
  2407. bfa_trc(ioim->bfa, rsp->reuse_io_tag);
  2408. if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
  2409. ioim->iosp->comp_rspmsg = *m;
  2410. switch (rsp->io_status) {
  2411. case BFI_IOIM_STS_OK:
  2412. bfa_stats(ioim->itnim, iocomp_ok);
  2413. if (rsp->reuse_io_tag == 0)
  2414. evt = BFA_IOIM_SM_DONE;
  2415. else
  2416. evt = BFA_IOIM_SM_COMP;
  2417. break;
  2418. case BFI_IOIM_STS_TIMEDOUT:
  2419. bfa_stats(ioim->itnim, iocomp_timedout);
  2420. case BFI_IOIM_STS_ABORTED:
  2421. rsp->io_status = BFI_IOIM_STS_ABORTED;
  2422. bfa_stats(ioim->itnim, iocomp_aborted);
  2423. if (rsp->reuse_io_tag == 0)
  2424. evt = BFA_IOIM_SM_DONE;
  2425. else
  2426. evt = BFA_IOIM_SM_COMP;
  2427. break;
  2428. case BFI_IOIM_STS_PROTO_ERR:
  2429. bfa_stats(ioim->itnim, iocom_proto_err);
  2430. WARN_ON(!rsp->reuse_io_tag);
  2431. evt = BFA_IOIM_SM_COMP;
  2432. break;
  2433. case BFI_IOIM_STS_SQER_NEEDED:
  2434. bfa_stats(ioim->itnim, iocom_sqer_needed);
  2435. WARN_ON(rsp->reuse_io_tag != 0);
  2436. evt = BFA_IOIM_SM_SQRETRY;
  2437. break;
  2438. case BFI_IOIM_STS_RES_FREE:
  2439. bfa_stats(ioim->itnim, iocom_res_free);
  2440. evt = BFA_IOIM_SM_FREE;
  2441. break;
  2442. case BFI_IOIM_STS_HOST_ABORTED:
  2443. bfa_stats(ioim->itnim, iocom_hostabrts);
  2444. if (rsp->abort_tag != ioim->abort_tag) {
  2445. bfa_trc(ioim->bfa, rsp->abort_tag);
  2446. bfa_trc(ioim->bfa, ioim->abort_tag);
  2447. return;
  2448. }
  2449. if (rsp->reuse_io_tag)
  2450. evt = BFA_IOIM_SM_ABORT_COMP;
  2451. else
  2452. evt = BFA_IOIM_SM_ABORT_DONE;
  2453. break;
  2454. case BFI_IOIM_STS_UTAG:
  2455. bfa_stats(ioim->itnim, iocom_utags);
  2456. evt = BFA_IOIM_SM_COMP_UTAG;
  2457. break;
  2458. default:
  2459. WARN_ON(1);
  2460. }
  2461. bfa_sm_send_event(ioim, evt);
  2462. }
  2463. void
  2464. bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  2465. {
  2466. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2467. struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
  2468. struct bfa_ioim_s *ioim;
  2469. u16 iotag;
  2470. iotag = be16_to_cpu(rsp->io_tag);
  2471. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
  2472. WARN_ON(ioim->iotag != iotag);
  2473. bfa_ioim_cb_profile_comp(fcpim, ioim);
  2474. bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
  2475. }
  2476. /*
  2477. * Called by itnim to clean up IO while going offline.
  2478. */
  2479. void
  2480. bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
  2481. {
  2482. bfa_trc(ioim->bfa, ioim->iotag);
  2483. bfa_stats(ioim->itnim, io_cleanups);
  2484. ioim->iosp->tskim = NULL;
  2485. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2486. }
  2487. void
  2488. bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
  2489. {
  2490. bfa_trc(ioim->bfa, ioim->iotag);
  2491. bfa_stats(ioim->itnim, io_tmaborts);
  2492. ioim->iosp->tskim = tskim;
  2493. bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
  2494. }
  2495. /*
  2496. * IOC failure handling.
  2497. */
  2498. void
  2499. bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
  2500. {
  2501. bfa_trc(ioim->bfa, ioim->iotag);
  2502. bfa_stats(ioim->itnim, io_iocdowns);
  2503. bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
  2504. }
  2505. /*
  2506. * IO offline TOV popped. Fail the pending IO.
  2507. */
  2508. void
  2509. bfa_ioim_tov(struct bfa_ioim_s *ioim)
  2510. {
  2511. bfa_trc(ioim->bfa, ioim->iotag);
  2512. bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
  2513. }
  2514. /*
  2515. * Allocate IOIM resource for initiator mode I/O request.
  2516. */
  2517. struct bfa_ioim_s *
  2518. bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
  2519. struct bfa_itnim_s *itnim, u16 nsges)
  2520. {
  2521. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  2522. struct bfa_ioim_s *ioim;
  2523. struct bfa_iotag_s *iotag = NULL;
  2524. /*
  2525. * alocate IOIM resource
  2526. */
  2527. bfa_q_deq(&fcpim->fcp->iotag_ioim_free_q, &iotag);
  2528. if (!iotag) {
  2529. bfa_stats(itnim, no_iotags);
  2530. return NULL;
  2531. }
  2532. ioim = BFA_IOIM_FROM_TAG(fcpim, iotag->tag);
  2533. ioim->dio = dio;
  2534. ioim->itnim = itnim;
  2535. ioim->nsges = nsges;
  2536. ioim->nsgpgs = 0;
  2537. bfa_stats(itnim, total_ios);
  2538. fcpim->ios_active++;
  2539. list_add_tail(&ioim->qe, &itnim->io_q);
  2540. return ioim;
  2541. }
  2542. void
  2543. bfa_ioim_free(struct bfa_ioim_s *ioim)
  2544. {
  2545. struct bfa_fcpim_s *fcpim = ioim->fcpim;
  2546. struct bfa_iotag_s *iotag;
  2547. if (ioim->nsgpgs > 0)
  2548. bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
  2549. bfa_stats(ioim->itnim, io_comps);
  2550. fcpim->ios_active--;
  2551. ioim->iotag &= BFA_IOIM_IOTAG_MASK;
  2552. WARN_ON(!(ioim->iotag <
  2553. (fcpim->fcp->num_ioim_reqs + fcpim->fcp->num_fwtio_reqs)));
  2554. iotag = BFA_IOTAG_FROM_TAG(fcpim->fcp, ioim->iotag);
  2555. if (ioim->iotag < fcpim->fcp->num_ioim_reqs)
  2556. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_ioim_free_q);
  2557. else
  2558. list_add_tail(&iotag->qe, &fcpim->fcp->iotag_tio_free_q);
  2559. list_del(&ioim->qe);
  2560. }
  2561. void
  2562. bfa_ioim_start(struct bfa_ioim_s *ioim)
  2563. {
  2564. bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
  2565. /*
  2566. * Obtain the queue over which this request has to be issued
  2567. */
  2568. ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
  2569. BFA_FALSE : bfa_itnim_get_reqq(ioim);
  2570. bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
  2571. }
  2572. /*
  2573. * Driver I/O abort request.
  2574. */
  2575. bfa_status_t
  2576. bfa_ioim_abort(struct bfa_ioim_s *ioim)
  2577. {
  2578. bfa_trc(ioim->bfa, ioim->iotag);
  2579. if (!bfa_ioim_is_abortable(ioim))
  2580. return BFA_STATUS_FAILED;
  2581. bfa_stats(ioim->itnim, io_aborts);
  2582. bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
  2583. return BFA_STATUS_OK;
  2584. }
  2585. /*
  2586. * BFA TSKIM state machine functions
  2587. */
  2588. /*
  2589. * Task management command beginning state.
  2590. */
  2591. static void
  2592. bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2593. {
  2594. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2595. switch (event) {
  2596. case BFA_TSKIM_SM_START:
  2597. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2598. bfa_tskim_gather_ios(tskim);
  2599. /*
  2600. * If device is offline, do not send TM on wire. Just cleanup
  2601. * any pending IO requests and complete TM request.
  2602. */
  2603. if (!bfa_itnim_is_online(tskim->itnim)) {
  2604. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2605. tskim->tsk_status = BFI_TSKIM_STS_OK;
  2606. bfa_tskim_cleanup_ios(tskim);
  2607. return;
  2608. }
  2609. if (!bfa_tskim_send(tskim)) {
  2610. bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
  2611. bfa_stats(tskim->itnim, tm_qwait);
  2612. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2613. &tskim->reqq_wait);
  2614. }
  2615. break;
  2616. default:
  2617. bfa_sm_fault(tskim->bfa, event);
  2618. }
  2619. }
  2620. /*
  2621. * TM command is active, awaiting completion from firmware to
  2622. * cleanup IO requests in TM scope.
  2623. */
  2624. static void
  2625. bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2626. {
  2627. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2628. switch (event) {
  2629. case BFA_TSKIM_SM_DONE:
  2630. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2631. bfa_tskim_cleanup_ios(tskim);
  2632. break;
  2633. case BFA_TSKIM_SM_CLEANUP:
  2634. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2635. if (!bfa_tskim_send_abort(tskim)) {
  2636. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
  2637. bfa_stats(tskim->itnim, tm_qwait);
  2638. bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
  2639. &tskim->reqq_wait);
  2640. }
  2641. break;
  2642. case BFA_TSKIM_SM_HWFAIL:
  2643. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2644. bfa_tskim_iocdisable_ios(tskim);
  2645. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2646. break;
  2647. default:
  2648. bfa_sm_fault(tskim->bfa, event);
  2649. }
  2650. }
  2651. /*
  2652. * An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  2653. * completion event from firmware.
  2654. */
  2655. static void
  2656. bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2657. {
  2658. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2659. switch (event) {
  2660. case BFA_TSKIM_SM_DONE:
  2661. /*
  2662. * Ignore and wait for ABORT completion from firmware.
  2663. */
  2664. break;
  2665. case BFA_TSKIM_SM_UTAG:
  2666. case BFA_TSKIM_SM_CLEANUP_DONE:
  2667. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2668. bfa_tskim_cleanup_ios(tskim);
  2669. break;
  2670. case BFA_TSKIM_SM_HWFAIL:
  2671. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2672. bfa_tskim_iocdisable_ios(tskim);
  2673. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2674. break;
  2675. default:
  2676. bfa_sm_fault(tskim->bfa, event);
  2677. }
  2678. }
  2679. static void
  2680. bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2681. {
  2682. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2683. switch (event) {
  2684. case BFA_TSKIM_SM_IOS_DONE:
  2685. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2686. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
  2687. break;
  2688. case BFA_TSKIM_SM_CLEANUP:
  2689. /*
  2690. * Ignore, TM command completed on wire.
  2691. * Notify TM conmpletion on IO cleanup completion.
  2692. */
  2693. break;
  2694. case BFA_TSKIM_SM_HWFAIL:
  2695. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2696. bfa_tskim_iocdisable_ios(tskim);
  2697. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2698. break;
  2699. default:
  2700. bfa_sm_fault(tskim->bfa, event);
  2701. }
  2702. }
  2703. /*
  2704. * Task management command is waiting for room in request CQ
  2705. */
  2706. static void
  2707. bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2708. {
  2709. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2710. switch (event) {
  2711. case BFA_TSKIM_SM_QRESUME:
  2712. bfa_sm_set_state(tskim, bfa_tskim_sm_active);
  2713. bfa_tskim_send(tskim);
  2714. break;
  2715. case BFA_TSKIM_SM_CLEANUP:
  2716. /*
  2717. * No need to send TM on wire since ITN is offline.
  2718. */
  2719. bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
  2720. bfa_reqq_wcancel(&tskim->reqq_wait);
  2721. bfa_tskim_cleanup_ios(tskim);
  2722. break;
  2723. case BFA_TSKIM_SM_HWFAIL:
  2724. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2725. bfa_reqq_wcancel(&tskim->reqq_wait);
  2726. bfa_tskim_iocdisable_ios(tskim);
  2727. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2728. break;
  2729. default:
  2730. bfa_sm_fault(tskim->bfa, event);
  2731. }
  2732. }
  2733. /*
  2734. * Task management command is active, awaiting for room in request CQ
  2735. * to send clean up request.
  2736. */
  2737. static void
  2738. bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
  2739. enum bfa_tskim_event event)
  2740. {
  2741. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2742. switch (event) {
  2743. case BFA_TSKIM_SM_DONE:
  2744. bfa_reqq_wcancel(&tskim->reqq_wait);
  2745. /*
  2746. * Fall through !!!
  2747. */
  2748. case BFA_TSKIM_SM_QRESUME:
  2749. bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
  2750. bfa_tskim_send_abort(tskim);
  2751. break;
  2752. case BFA_TSKIM_SM_HWFAIL:
  2753. bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
  2754. bfa_reqq_wcancel(&tskim->reqq_wait);
  2755. bfa_tskim_iocdisable_ios(tskim);
  2756. bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
  2757. break;
  2758. default:
  2759. bfa_sm_fault(tskim->bfa, event);
  2760. }
  2761. }
  2762. /*
  2763. * BFA callback is pending
  2764. */
  2765. static void
  2766. bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
  2767. {
  2768. bfa_trc(tskim->bfa, tskim->tsk_tag << 16 | event);
  2769. switch (event) {
  2770. case BFA_TSKIM_SM_HCB:
  2771. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  2772. bfa_tskim_free(tskim);
  2773. break;
  2774. case BFA_TSKIM_SM_CLEANUP:
  2775. bfa_tskim_notify_comp(tskim);
  2776. break;
  2777. case BFA_TSKIM_SM_HWFAIL:
  2778. break;
  2779. default:
  2780. bfa_sm_fault(tskim->bfa, event);
  2781. }
  2782. }
  2783. static void
  2784. __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
  2785. {
  2786. struct bfa_tskim_s *tskim = cbarg;
  2787. if (!complete) {
  2788. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2789. return;
  2790. }
  2791. bfa_stats(tskim->itnim, tm_success);
  2792. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
  2793. }
  2794. static void
  2795. __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
  2796. {
  2797. struct bfa_tskim_s *tskim = cbarg;
  2798. if (!complete) {
  2799. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
  2800. return;
  2801. }
  2802. bfa_stats(tskim->itnim, tm_failures);
  2803. bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
  2804. BFI_TSKIM_STS_FAILED);
  2805. }
  2806. static bfa_boolean_t
  2807. bfa_tskim_match_scope(struct bfa_tskim_s *tskim, struct scsi_lun lun)
  2808. {
  2809. switch (tskim->tm_cmnd) {
  2810. case FCP_TM_TARGET_RESET:
  2811. return BFA_TRUE;
  2812. case FCP_TM_ABORT_TASK_SET:
  2813. case FCP_TM_CLEAR_TASK_SET:
  2814. case FCP_TM_LUN_RESET:
  2815. case FCP_TM_CLEAR_ACA:
  2816. return !memcmp(&tskim->lun, &lun, sizeof(lun));
  2817. default:
  2818. WARN_ON(1);
  2819. }
  2820. return BFA_FALSE;
  2821. }
  2822. /*
  2823. * Gather affected IO requests and task management commands.
  2824. */
  2825. static void
  2826. bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
  2827. {
  2828. struct bfa_itnim_s *itnim = tskim->itnim;
  2829. struct bfa_ioim_s *ioim;
  2830. struct list_head *qe, *qen;
  2831. struct scsi_cmnd *cmnd;
  2832. struct scsi_lun scsilun;
  2833. INIT_LIST_HEAD(&tskim->io_q);
  2834. /*
  2835. * Gather any active IO requests first.
  2836. */
  2837. list_for_each_safe(qe, qen, &itnim->io_q) {
  2838. ioim = (struct bfa_ioim_s *) qe;
  2839. cmnd = (struct scsi_cmnd *) ioim->dio;
  2840. int_to_scsilun(cmnd->device->lun, &scsilun);
  2841. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2842. list_del(&ioim->qe);
  2843. list_add_tail(&ioim->qe, &tskim->io_q);
  2844. }
  2845. }
  2846. /*
  2847. * Failback any pending IO requests immediately.
  2848. */
  2849. list_for_each_safe(qe, qen, &itnim->pending_q) {
  2850. ioim = (struct bfa_ioim_s *) qe;
  2851. cmnd = (struct scsi_cmnd *) ioim->dio;
  2852. int_to_scsilun(cmnd->device->lun, &scsilun);
  2853. if (bfa_tskim_match_scope(tskim, scsilun)) {
  2854. list_del(&ioim->qe);
  2855. list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
  2856. bfa_ioim_tov(ioim);
  2857. }
  2858. }
  2859. }
  2860. /*
  2861. * IO cleanup completion
  2862. */
  2863. static void
  2864. bfa_tskim_cleanp_comp(void *tskim_cbarg)
  2865. {
  2866. struct bfa_tskim_s *tskim = tskim_cbarg;
  2867. bfa_stats(tskim->itnim, tm_io_comps);
  2868. bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
  2869. }
  2870. /*
  2871. * Gather affected IO requests and task management commands.
  2872. */
  2873. static void
  2874. bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
  2875. {
  2876. struct bfa_ioim_s *ioim;
  2877. struct list_head *qe, *qen;
  2878. bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
  2879. list_for_each_safe(qe, qen, &tskim->io_q) {
  2880. ioim = (struct bfa_ioim_s *) qe;
  2881. bfa_wc_up(&tskim->wc);
  2882. bfa_ioim_cleanup_tm(ioim, tskim);
  2883. }
  2884. bfa_wc_wait(&tskim->wc);
  2885. }
  2886. /*
  2887. * Send task management request to firmware.
  2888. */
  2889. static bfa_boolean_t
  2890. bfa_tskim_send(struct bfa_tskim_s *tskim)
  2891. {
  2892. struct bfa_itnim_s *itnim = tskim->itnim;
  2893. struct bfi_tskim_req_s *m;
  2894. /*
  2895. * check for room in queue to send request now
  2896. */
  2897. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2898. if (!m)
  2899. return BFA_FALSE;
  2900. /*
  2901. * build i/o request message next
  2902. */
  2903. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
  2904. bfa_fn_lpu(tskim->bfa));
  2905. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2906. m->itn_fhdl = tskim->itnim->rport->fw_handle;
  2907. m->t_secs = tskim->tsecs;
  2908. m->lun = tskim->lun;
  2909. m->tm_flags = tskim->tm_cmnd;
  2910. /*
  2911. * queue I/O message to firmware
  2912. */
  2913. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2914. return BFA_TRUE;
  2915. }
  2916. /*
  2917. * Send abort request to cleanup an active TM to firmware.
  2918. */
  2919. static bfa_boolean_t
  2920. bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
  2921. {
  2922. struct bfa_itnim_s *itnim = tskim->itnim;
  2923. struct bfi_tskim_abortreq_s *m;
  2924. /*
  2925. * check for room in queue to send request now
  2926. */
  2927. m = bfa_reqq_next(tskim->bfa, itnim->reqq);
  2928. if (!m)
  2929. return BFA_FALSE;
  2930. /*
  2931. * build i/o request message next
  2932. */
  2933. bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
  2934. bfa_fn_lpu(tskim->bfa));
  2935. m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
  2936. /*
  2937. * queue I/O message to firmware
  2938. */
  2939. bfa_reqq_produce(tskim->bfa, itnim->reqq, m->mh);
  2940. return BFA_TRUE;
  2941. }
  2942. /*
  2943. * Call to resume task management cmnd waiting for room in request queue.
  2944. */
  2945. static void
  2946. bfa_tskim_qresume(void *cbarg)
  2947. {
  2948. struct bfa_tskim_s *tskim = cbarg;
  2949. bfa_stats(tskim->itnim, tm_qresumes);
  2950. bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
  2951. }
  2952. /*
  2953. * Cleanup IOs associated with a task mangement command on IOC failures.
  2954. */
  2955. static void
  2956. bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
  2957. {
  2958. struct bfa_ioim_s *ioim;
  2959. struct list_head *qe, *qen;
  2960. list_for_each_safe(qe, qen, &tskim->io_q) {
  2961. ioim = (struct bfa_ioim_s *) qe;
  2962. bfa_ioim_iocdisable(ioim);
  2963. }
  2964. }
  2965. /*
  2966. * Notification on completions from related ioim.
  2967. */
  2968. void
  2969. bfa_tskim_iodone(struct bfa_tskim_s *tskim)
  2970. {
  2971. bfa_wc_down(&tskim->wc);
  2972. }
  2973. /*
  2974. * Handle IOC h/w failure notification from itnim.
  2975. */
  2976. void
  2977. bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
  2978. {
  2979. tskim->notify = BFA_FALSE;
  2980. bfa_stats(tskim->itnim, tm_iocdowns);
  2981. bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
  2982. }
  2983. /*
  2984. * Cleanup TM command and associated IOs as part of ITNIM offline.
  2985. */
  2986. void
  2987. bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
  2988. {
  2989. tskim->notify = BFA_TRUE;
  2990. bfa_stats(tskim->itnim, tm_cleanups);
  2991. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
  2992. }
  2993. /*
  2994. * Memory allocation and initialization.
  2995. */
  2996. void
  2997. bfa_tskim_attach(struct bfa_fcpim_s *fcpim)
  2998. {
  2999. struct bfa_tskim_s *tskim;
  3000. struct bfa_fcp_mod_s *fcp = fcpim->fcp;
  3001. u16 i;
  3002. INIT_LIST_HEAD(&fcpim->tskim_free_q);
  3003. INIT_LIST_HEAD(&fcpim->tskim_unused_q);
  3004. tskim = (struct bfa_tskim_s *) bfa_mem_kva_curp(fcp);
  3005. fcpim->tskim_arr = tskim;
  3006. for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
  3007. /*
  3008. * initialize TSKIM
  3009. */
  3010. memset(tskim, 0, sizeof(struct bfa_tskim_s));
  3011. tskim->tsk_tag = i;
  3012. tskim->bfa = fcpim->bfa;
  3013. tskim->fcpim = fcpim;
  3014. tskim->notify = BFA_FALSE;
  3015. bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
  3016. tskim);
  3017. bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
  3018. list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
  3019. }
  3020. bfa_mem_kva_curp(fcp) = (u8 *) tskim;
  3021. }
  3022. void
  3023. bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3024. {
  3025. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3026. struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
  3027. struct bfa_tskim_s *tskim;
  3028. u16 tsk_tag = be16_to_cpu(rsp->tsk_tag);
  3029. tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
  3030. WARN_ON(tskim->tsk_tag != tsk_tag);
  3031. tskim->tsk_status = rsp->tsk_status;
  3032. /*
  3033. * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
  3034. * requests. All other statuses are for normal completions.
  3035. */
  3036. if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
  3037. bfa_stats(tskim->itnim, tm_cleanup_comps);
  3038. bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
  3039. } else if (rsp->tsk_status == BFI_TSKIM_STS_UTAG) {
  3040. bfa_sm_send_event(tskim, BFA_TSKIM_SM_UTAG);
  3041. } else {
  3042. bfa_stats(tskim->itnim, tm_fw_rsps);
  3043. bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
  3044. }
  3045. }
  3046. struct bfa_tskim_s *
  3047. bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
  3048. {
  3049. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3050. struct bfa_tskim_s *tskim;
  3051. bfa_q_deq(&fcpim->tskim_free_q, &tskim);
  3052. if (tskim)
  3053. tskim->dtsk = dtsk;
  3054. return tskim;
  3055. }
  3056. void
  3057. bfa_tskim_free(struct bfa_tskim_s *tskim)
  3058. {
  3059. WARN_ON(!bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
  3060. list_del(&tskim->qe);
  3061. list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
  3062. }
  3063. /*
  3064. * Start a task management command.
  3065. *
  3066. * @param[in] tskim BFA task management command instance
  3067. * @param[in] itnim i-t nexus for the task management command
  3068. * @param[in] lun lun, if applicable
  3069. * @param[in] tm_cmnd Task management command code.
  3070. * @param[in] t_secs Timeout in seconds
  3071. *
  3072. * @return None.
  3073. */
  3074. void
  3075. bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim,
  3076. struct scsi_lun lun,
  3077. enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
  3078. {
  3079. tskim->itnim = itnim;
  3080. tskim->lun = lun;
  3081. tskim->tm_cmnd = tm_cmnd;
  3082. tskim->tsecs = tsecs;
  3083. tskim->notify = BFA_FALSE;
  3084. bfa_stats(itnim, tm_cmnds);
  3085. list_add_tail(&tskim->qe, &itnim->tsk_q);
  3086. bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
  3087. }
  3088. void
  3089. bfa_tskim_res_recfg(struct bfa_s *bfa, u16 num_tskim_fw)
  3090. {
  3091. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3092. struct list_head *qe;
  3093. int i;
  3094. for (i = 0; i < (fcpim->num_tskim_reqs - num_tskim_fw); i++) {
  3095. bfa_q_deq_tail(&fcpim->tskim_free_q, &qe);
  3096. list_add_tail(qe, &fcpim->tskim_unused_q);
  3097. }
  3098. }
  3099. /* BFA FCP module - parent module for fcpim */
  3100. BFA_MODULE(fcp);
  3101. static void
  3102. bfa_fcp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
  3103. struct bfa_s *bfa)
  3104. {
  3105. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3106. struct bfa_mem_kva_s *fcp_kva = BFA_MEM_FCP_KVA(bfa);
  3107. struct bfa_mem_dma_s *seg_ptr;
  3108. u16 nsegs, idx, per_seg_ios, num_io_req;
  3109. u32 km_len = 0;
  3110. /*
  3111. * ZERO for num_ioim_reqs and num_fwtio_reqs is allowed config value.
  3112. * So if the values are non zero, adjust them appropriately.
  3113. */
  3114. if (cfg->fwcfg.num_ioim_reqs &&
  3115. cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
  3116. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
  3117. else if (cfg->fwcfg.num_ioim_reqs > BFA_IOIM_MAX)
  3118. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3119. if (cfg->fwcfg.num_fwtio_reqs > BFA_FWTIO_MAX)
  3120. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3121. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3122. if (num_io_req > BFA_IO_MAX) {
  3123. if (cfg->fwcfg.num_ioim_reqs && cfg->fwcfg.num_fwtio_reqs) {
  3124. cfg->fwcfg.num_ioim_reqs = BFA_IO_MAX/2;
  3125. cfg->fwcfg.num_fwtio_reqs = BFA_IO_MAX/2;
  3126. } else if (cfg->fwcfg.num_fwtio_reqs)
  3127. cfg->fwcfg.num_fwtio_reqs = BFA_FWTIO_MAX;
  3128. else
  3129. cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MAX;
  3130. }
  3131. bfa_fcpim_meminfo(cfg, &km_len);
  3132. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3133. km_len += num_io_req * sizeof(struct bfa_iotag_s);
  3134. km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itn_s);
  3135. /* dma memory */
  3136. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3137. per_seg_ios = BFI_MEM_NREQS_SEG(BFI_IOIM_SNSLEN);
  3138. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3139. if (num_io_req >= per_seg_ios) {
  3140. num_io_req -= per_seg_ios;
  3141. bfa_mem_dma_setup(minfo, seg_ptr,
  3142. per_seg_ios * BFI_IOIM_SNSLEN);
  3143. } else
  3144. bfa_mem_dma_setup(minfo, seg_ptr,
  3145. num_io_req * BFI_IOIM_SNSLEN);
  3146. }
  3147. /* kva memory */
  3148. bfa_mem_kva_setup(minfo, fcp_kva, km_len);
  3149. }
  3150. static void
  3151. bfa_fcp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
  3152. struct bfa_pcidev_s *pcidev)
  3153. {
  3154. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3155. struct bfa_mem_dma_s *seg_ptr;
  3156. u16 idx, nsegs, num_io_req;
  3157. fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3158. fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs;
  3159. fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs;
  3160. fcp->num_itns = cfg->fwcfg.num_rports;
  3161. fcp->bfa = bfa;
  3162. /*
  3163. * Setup the pool of snsbase addr's, that is passed to fw as
  3164. * part of bfi_iocfc_cfg_s.
  3165. */
  3166. num_io_req = (cfg->fwcfg.num_ioim_reqs + cfg->fwcfg.num_fwtio_reqs);
  3167. nsegs = BFI_MEM_DMA_NSEGS(num_io_req, BFI_IOIM_SNSLEN);
  3168. bfa_mem_dma_seg_iter(fcp, seg_ptr, nsegs, idx) {
  3169. if (!bfa_mem_dma_virt(seg_ptr))
  3170. break;
  3171. fcp->snsbase[idx].pa = bfa_mem_dma_phys(seg_ptr);
  3172. fcp->snsbase[idx].kva = bfa_mem_dma_virt(seg_ptr);
  3173. bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa);
  3174. }
  3175. fcp->throttle_update_required = 1;
  3176. bfa_fcpim_attach(fcp, bfad, cfg, pcidev);
  3177. bfa_iotag_attach(fcp);
  3178. fcp->itn_arr = (struct bfa_itn_s *) bfa_mem_kva_curp(fcp);
  3179. bfa_mem_kva_curp(fcp) = (u8 *)fcp->itn_arr +
  3180. (fcp->num_itns * sizeof(struct bfa_itn_s));
  3181. memset(fcp->itn_arr, 0,
  3182. (fcp->num_itns * sizeof(struct bfa_itn_s)));
  3183. }
  3184. static void
  3185. bfa_fcp_detach(struct bfa_s *bfa)
  3186. {
  3187. }
  3188. static void
  3189. bfa_fcp_start(struct bfa_s *bfa)
  3190. {
  3191. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3192. /*
  3193. * bfa_init() with flash read is complete. now invalidate the stale
  3194. * content of lun mask like unit attention, rp tag and lp tag.
  3195. */
  3196. bfa_ioim_lm_init(fcp->bfa);
  3197. }
  3198. static void
  3199. bfa_fcp_stop(struct bfa_s *bfa)
  3200. {
  3201. }
  3202. static void
  3203. bfa_fcp_iocdisable(struct bfa_s *bfa)
  3204. {
  3205. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3206. bfa_fcpim_iocdisable(fcp);
  3207. }
  3208. void
  3209. bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw)
  3210. {
  3211. struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa);
  3212. struct list_head *qe;
  3213. int i;
  3214. /* Update io throttle value only once during driver load time */
  3215. if (!mod->throttle_update_required)
  3216. return;
  3217. for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) {
  3218. bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe);
  3219. list_add_tail(qe, &mod->iotag_unused_q);
  3220. }
  3221. if (mod->num_ioim_reqs != num_ioim_fw) {
  3222. bfa_trc(bfa, mod->num_ioim_reqs);
  3223. bfa_trc(bfa, num_ioim_fw);
  3224. }
  3225. mod->max_ioim_reqs = max_ioim_fw;
  3226. mod->num_ioim_reqs = num_ioim_fw;
  3227. mod->throttle_update_required = 0;
  3228. }
  3229. void
  3230. bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
  3231. void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m))
  3232. {
  3233. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3234. struct bfa_itn_s *itn;
  3235. itn = BFA_ITN_FROM_TAG(fcp, rport->rport_tag);
  3236. itn->isr = isr;
  3237. }
  3238. /*
  3239. * Itn interrupt processing.
  3240. */
  3241. void
  3242. bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
  3243. {
  3244. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3245. union bfi_itn_i2h_msg_u msg;
  3246. struct bfa_itn_s *itn;
  3247. msg.msg = m;
  3248. itn = BFA_ITN_FROM_TAG(fcp, msg.create_rsp->bfa_handle);
  3249. if (itn->isr)
  3250. itn->isr(bfa, m);
  3251. else
  3252. WARN_ON(1);
  3253. }
  3254. void
  3255. bfa_iotag_attach(struct bfa_fcp_mod_s *fcp)
  3256. {
  3257. struct bfa_iotag_s *iotag;
  3258. u16 num_io_req, i;
  3259. iotag = (struct bfa_iotag_s *) bfa_mem_kva_curp(fcp);
  3260. fcp->iotag_arr = iotag;
  3261. INIT_LIST_HEAD(&fcp->iotag_ioim_free_q);
  3262. INIT_LIST_HEAD(&fcp->iotag_tio_free_q);
  3263. INIT_LIST_HEAD(&fcp->iotag_unused_q);
  3264. num_io_req = fcp->num_ioim_reqs + fcp->num_fwtio_reqs;
  3265. for (i = 0; i < num_io_req; i++, iotag++) {
  3266. memset(iotag, 0, sizeof(struct bfa_iotag_s));
  3267. iotag->tag = i;
  3268. if (i < fcp->num_ioim_reqs)
  3269. list_add_tail(&iotag->qe, &fcp->iotag_ioim_free_q);
  3270. else
  3271. list_add_tail(&iotag->qe, &fcp->iotag_tio_free_q);
  3272. }
  3273. bfa_mem_kva_curp(fcp) = (u8 *) iotag;
  3274. }
  3275. /**
  3276. * To send config req, first try to use throttle value from flash
  3277. * If 0, then use driver parameter
  3278. * We need to use min(flash_val, drv_val) because
  3279. * memory allocation was done based on this cfg'd value
  3280. */
  3281. u16
  3282. bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param)
  3283. {
  3284. u16 tmp;
  3285. struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa);
  3286. /*
  3287. * If throttle value from flash is already in effect after driver is
  3288. * loaded then until next load, always return current value instead
  3289. * of actual flash value
  3290. */
  3291. if (!fcp->throttle_update_required)
  3292. return (u16)fcp->num_ioim_reqs;
  3293. tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0;
  3294. if (!tmp || (tmp > drv_cfg_param))
  3295. tmp = drv_cfg_param;
  3296. return tmp;
  3297. }
  3298. bfa_status_t
  3299. bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value)
  3300. {
  3301. if (!bfa_dconf_get_min_cfg(bfa)) {
  3302. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value;
  3303. BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1;
  3304. return BFA_STATUS_OK;
  3305. }
  3306. return BFA_STATUS_FAILED;
  3307. }
  3308. u16
  3309. bfa_fcpim_read_throttle(struct bfa_s *bfa)
  3310. {
  3311. struct bfa_throttle_cfg_s *throttle_cfg =
  3312. &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg);
  3313. return ((!bfa_dconf_get_min_cfg(bfa)) ?
  3314. ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0);
  3315. }
  3316. bfa_status_t
  3317. bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value)
  3318. {
  3319. /* in min cfg no commands should run. */
  3320. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3321. (!bfa_dconf_read_data_valid(bfa)))
  3322. return BFA_STATUS_FAILED;
  3323. bfa_fcpim_write_throttle(bfa, value);
  3324. return bfa_dconf_update(bfa);
  3325. }
  3326. bfa_status_t
  3327. bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf)
  3328. {
  3329. struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa);
  3330. struct bfa_defs_fcpim_throttle_s throttle;
  3331. if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) ||
  3332. (!bfa_dconf_read_data_valid(bfa)))
  3333. return BFA_STATUS_FAILED;
  3334. memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s));
  3335. throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs);
  3336. throttle.cfg_value = bfa_fcpim_read_throttle(bfa);
  3337. if (!throttle.cfg_value)
  3338. throttle.cfg_value = throttle.cur_value;
  3339. throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs);
  3340. memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s));
  3341. return BFA_STATUS_OK;
  3342. }