megaraid_sas_fusion.c 89 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085
  1. /*
  2. * Linux MegaRAID driver for SAS based RAID controllers
  3. *
  4. * Copyright (c) 2009-2013 LSI Corporation
  5. * Copyright (c) 2013-2014 Avago Technologies
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  19. *
  20. * FILE: megaraid_sas_fusion.c
  21. *
  22. * Authors: Avago Technologies
  23. * Sumant Patro
  24. * Adam Radford
  25. * Kashyap Desai <kashyap.desai@avagotech.com>
  26. * Sumit Saxena <sumit.saxena@avagotech.com>
  27. *
  28. * Send feedback to: megaraidlinux.pdl@avagotech.com
  29. *
  30. * Mail to: Avago Technologies, 350 West Trimble Road, Building 90,
  31. * San Jose, California 95131
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/types.h>
  35. #include <linux/pci.h>
  36. #include <linux/list.h>
  37. #include <linux/moduleparam.h>
  38. #include <linux/module.h>
  39. #include <linux/spinlock.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/delay.h>
  42. #include <linux/uio.h>
  43. #include <linux/uaccess.h>
  44. #include <linux/fs.h>
  45. #include <linux/compat.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/mutex.h>
  48. #include <linux/poll.h>
  49. #include <scsi/scsi.h>
  50. #include <scsi/scsi_cmnd.h>
  51. #include <scsi/scsi_device.h>
  52. #include <scsi/scsi_host.h>
  53. #include <scsi/scsi_dbg.h>
  54. #include <linux/dmi.h>
  55. #include "megaraid_sas_fusion.h"
  56. #include "megaraid_sas.h"
  57. extern void megasas_free_cmds(struct megasas_instance *instance);
  58. extern struct megasas_cmd *megasas_get_cmd(struct megasas_instance
  59. *instance);
  60. extern void
  61. megasas_complete_cmd(struct megasas_instance *instance,
  62. struct megasas_cmd *cmd, u8 alt_status);
  63. int
  64. wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
  65. int seconds);
  66. void
  67. megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd);
  68. int megasas_alloc_cmds(struct megasas_instance *instance);
  69. int
  70. megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs);
  71. int
  72. megasas_issue_polled(struct megasas_instance *instance,
  73. struct megasas_cmd *cmd);
  74. void
  75. megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
  76. int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
  77. void megaraid_sas_kill_hba(struct megasas_instance *instance);
  78. extern u32 megasas_dbg_lvl;
  79. void megasas_sriov_heartbeat_handler(unsigned long instance_addr);
  80. int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
  81. int initial);
  82. void megasas_start_timer(struct megasas_instance *instance,
  83. struct timer_list *timer,
  84. void *fn, unsigned long interval);
  85. extern struct megasas_mgmt_info megasas_mgmt_info;
  86. extern int resetwaittime;
  87. /**
  88. * megasas_enable_intr_fusion - Enables interrupts
  89. * @regs: MFI register set
  90. */
  91. void
  92. megasas_enable_intr_fusion(struct megasas_instance *instance)
  93. {
  94. struct megasas_register_set __iomem *regs;
  95. regs = instance->reg_set;
  96. instance->mask_interrupts = 0;
  97. /* For Thunderbolt/Invader also clear intr on enable */
  98. writel(~0, &regs->outbound_intr_status);
  99. readl(&regs->outbound_intr_status);
  100. writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
  101. /* Dummy readl to force pci flush */
  102. readl(&regs->outbound_intr_mask);
  103. }
  104. /**
  105. * megasas_disable_intr_fusion - Disables interrupt
  106. * @regs: MFI register set
  107. */
  108. void
  109. megasas_disable_intr_fusion(struct megasas_instance *instance)
  110. {
  111. u32 mask = 0xFFFFFFFF;
  112. u32 status;
  113. struct megasas_register_set __iomem *regs;
  114. regs = instance->reg_set;
  115. instance->mask_interrupts = 1;
  116. writel(mask, &regs->outbound_intr_mask);
  117. /* Dummy readl to force pci flush */
  118. status = readl(&regs->outbound_intr_mask);
  119. }
  120. int
  121. megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs)
  122. {
  123. u32 status;
  124. /*
  125. * Check if it is our interrupt
  126. */
  127. status = readl(&regs->outbound_intr_status);
  128. if (status & 1) {
  129. writel(status, &regs->outbound_intr_status);
  130. readl(&regs->outbound_intr_status);
  131. return 1;
  132. }
  133. if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
  134. return 0;
  135. return 1;
  136. }
  137. /**
  138. * megasas_get_cmd_fusion - Get a command from the free pool
  139. * @instance: Adapter soft state
  140. *
  141. * Returns a blk_tag indexed mpt frame
  142. */
  143. inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
  144. *instance, u32 blk_tag)
  145. {
  146. struct fusion_context *fusion;
  147. fusion = instance->ctrl_context;
  148. return fusion->cmd_list[blk_tag];
  149. }
  150. /**
  151. * megasas_return_cmd_fusion - Return a cmd to free command pool
  152. * @instance: Adapter soft state
  153. * @cmd: Command packet to be returned to free command pool
  154. */
  155. inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
  156. struct megasas_cmd_fusion *cmd)
  157. {
  158. cmd->scmd = NULL;
  159. memset(cmd->io_request, 0, sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
  160. }
  161. /**
  162. * megasas_fire_cmd_fusion - Sends command to the FW
  163. */
  164. static void
  165. megasas_fire_cmd_fusion(struct megasas_instance *instance,
  166. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
  167. {
  168. #if defined(writeq) && defined(CONFIG_64BIT)
  169. u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
  170. le32_to_cpu(req_desc->u.low));
  171. writeq(req_data, &instance->reg_set->inbound_low_queue_port);
  172. #else
  173. unsigned long flags;
  174. spin_lock_irqsave(&instance->hba_lock, flags);
  175. writel(le32_to_cpu(req_desc->u.low),
  176. &instance->reg_set->inbound_low_queue_port);
  177. writel(le32_to_cpu(req_desc->u.high),
  178. &instance->reg_set->inbound_high_queue_port);
  179. mmiowb();
  180. spin_unlock_irqrestore(&instance->hba_lock, flags);
  181. #endif
  182. }
  183. /**
  184. * megasas_teardown_frame_pool_fusion - Destroy the cmd frame DMA pool
  185. * @instance: Adapter soft state
  186. */
  187. static void megasas_teardown_frame_pool_fusion(
  188. struct megasas_instance *instance)
  189. {
  190. int i;
  191. struct fusion_context *fusion = instance->ctrl_context;
  192. u16 max_cmd = instance->max_fw_cmds;
  193. struct megasas_cmd_fusion *cmd;
  194. if (!fusion->sg_dma_pool || !fusion->sense_dma_pool) {
  195. dev_err(&instance->pdev->dev, "dma pool is null. SG Pool %p, "
  196. "sense pool : %p\n", fusion->sg_dma_pool,
  197. fusion->sense_dma_pool);
  198. return;
  199. }
  200. /*
  201. * Return all frames to pool
  202. */
  203. for (i = 0; i < max_cmd; i++) {
  204. cmd = fusion->cmd_list[i];
  205. if (cmd->sg_frame)
  206. pci_pool_free(fusion->sg_dma_pool, cmd->sg_frame,
  207. cmd->sg_frame_phys_addr);
  208. if (cmd->sense)
  209. pci_pool_free(fusion->sense_dma_pool, cmd->sense,
  210. cmd->sense_phys_addr);
  211. }
  212. /*
  213. * Now destroy the pool itself
  214. */
  215. pci_pool_destroy(fusion->sg_dma_pool);
  216. pci_pool_destroy(fusion->sense_dma_pool);
  217. fusion->sg_dma_pool = NULL;
  218. fusion->sense_dma_pool = NULL;
  219. }
  220. /**
  221. * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool
  222. * @instance: Adapter soft state
  223. */
  224. void
  225. megasas_free_cmds_fusion(struct megasas_instance *instance)
  226. {
  227. int i;
  228. struct fusion_context *fusion = instance->ctrl_context;
  229. u32 max_cmds, req_sz, reply_sz, io_frames_sz;
  230. req_sz = fusion->request_alloc_sz;
  231. reply_sz = fusion->reply_alloc_sz;
  232. io_frames_sz = fusion->io_frames_alloc_sz;
  233. max_cmds = instance->max_fw_cmds;
  234. /* Free descriptors and request Frames memory */
  235. if (fusion->req_frames_desc)
  236. dma_free_coherent(&instance->pdev->dev, req_sz,
  237. fusion->req_frames_desc,
  238. fusion->req_frames_desc_phys);
  239. if (fusion->reply_frames_desc) {
  240. pci_pool_free(fusion->reply_frames_desc_pool,
  241. fusion->reply_frames_desc,
  242. fusion->reply_frames_desc_phys);
  243. pci_pool_destroy(fusion->reply_frames_desc_pool);
  244. }
  245. if (fusion->io_request_frames) {
  246. pci_pool_free(fusion->io_request_frames_pool,
  247. fusion->io_request_frames,
  248. fusion->io_request_frames_phys);
  249. pci_pool_destroy(fusion->io_request_frames_pool);
  250. }
  251. /* Free the Fusion frame pool */
  252. megasas_teardown_frame_pool_fusion(instance);
  253. /* Free all the commands in the cmd_list */
  254. for (i = 0; i < max_cmds; i++)
  255. kfree(fusion->cmd_list[i]);
  256. /* Free the cmd_list buffer itself */
  257. kfree(fusion->cmd_list);
  258. fusion->cmd_list = NULL;
  259. }
  260. /**
  261. * megasas_create_frame_pool_fusion - Creates DMA pool for cmd frames
  262. * @instance: Adapter soft state
  263. *
  264. */
  265. static int megasas_create_frame_pool_fusion(struct megasas_instance *instance)
  266. {
  267. int i;
  268. u32 max_cmd;
  269. struct fusion_context *fusion;
  270. struct megasas_cmd_fusion *cmd;
  271. fusion = instance->ctrl_context;
  272. max_cmd = instance->max_fw_cmds;
  273. /*
  274. * Use DMA pool facility provided by PCI layer
  275. */
  276. fusion->sg_dma_pool = pci_pool_create("sg_pool_fusion", instance->pdev,
  277. instance->max_chain_frame_sz,
  278. 4, 0);
  279. if (!fusion->sg_dma_pool) {
  280. dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup request pool fusion\n");
  281. return -ENOMEM;
  282. }
  283. fusion->sense_dma_pool = pci_pool_create("sense pool fusion",
  284. instance->pdev,
  285. SCSI_SENSE_BUFFERSIZE, 64, 0);
  286. if (!fusion->sense_dma_pool) {
  287. dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool fusion\n");
  288. pci_pool_destroy(fusion->sg_dma_pool);
  289. fusion->sg_dma_pool = NULL;
  290. return -ENOMEM;
  291. }
  292. /*
  293. * Allocate and attach a frame to each of the commands in cmd_list
  294. */
  295. for (i = 0; i < max_cmd; i++) {
  296. cmd = fusion->cmd_list[i];
  297. cmd->sg_frame = pci_pool_alloc(fusion->sg_dma_pool,
  298. GFP_KERNEL,
  299. &cmd->sg_frame_phys_addr);
  300. cmd->sense = pci_pool_alloc(fusion->sense_dma_pool,
  301. GFP_KERNEL, &cmd->sense_phys_addr);
  302. /*
  303. * megasas_teardown_frame_pool_fusion() takes care of freeing
  304. * whatever has been allocated
  305. */
  306. if (!cmd->sg_frame || !cmd->sense) {
  307. dev_printk(KERN_DEBUG, &instance->pdev->dev, "pci_pool_alloc failed\n");
  308. megasas_teardown_frame_pool_fusion(instance);
  309. return -ENOMEM;
  310. }
  311. }
  312. return 0;
  313. }
  314. /**
  315. * megasas_alloc_cmds_fusion - Allocates the command packets
  316. * @instance: Adapter soft state
  317. *
  318. *
  319. * Each frame has a 32-bit field called context. This context is used to get
  320. * back the megasas_cmd_fusion from the frame when a frame gets completed
  321. * In this driver, the 32 bit values are the indices into an array cmd_list.
  322. * This array is used only to look up the megasas_cmd_fusion given the context.
  323. * The free commands themselves are maintained in a linked list called cmd_pool.
  324. *
  325. * cmds are formed in the io_request and sg_frame members of the
  326. * megasas_cmd_fusion. The context field is used to get a request descriptor
  327. * and is used as SMID of the cmd.
  328. * SMID value range is from 1 to max_fw_cmds.
  329. */
  330. int
  331. megasas_alloc_cmds_fusion(struct megasas_instance *instance)
  332. {
  333. int i, j, count;
  334. u32 max_cmd, io_frames_sz;
  335. struct fusion_context *fusion;
  336. struct megasas_cmd_fusion *cmd;
  337. union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
  338. u32 offset;
  339. dma_addr_t io_req_base_phys;
  340. u8 *io_req_base;
  341. fusion = instance->ctrl_context;
  342. max_cmd = instance->max_fw_cmds;
  343. fusion->req_frames_desc =
  344. dma_alloc_coherent(&instance->pdev->dev,
  345. fusion->request_alloc_sz,
  346. &fusion->req_frames_desc_phys, GFP_KERNEL);
  347. if (!fusion->req_frames_desc) {
  348. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  349. "request_frames\n");
  350. goto fail_req_desc;
  351. }
  352. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  353. fusion->reply_frames_desc_pool =
  354. pci_pool_create("reply_frames pool", instance->pdev,
  355. fusion->reply_alloc_sz * count, 16, 0);
  356. if (!fusion->reply_frames_desc_pool) {
  357. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  358. "reply_frame pool\n");
  359. goto fail_reply_desc;
  360. }
  361. fusion->reply_frames_desc =
  362. pci_pool_alloc(fusion->reply_frames_desc_pool, GFP_KERNEL,
  363. &fusion->reply_frames_desc_phys);
  364. if (!fusion->reply_frames_desc) {
  365. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  366. "reply_frame pool\n");
  367. pci_pool_destroy(fusion->reply_frames_desc_pool);
  368. goto fail_reply_desc;
  369. }
  370. reply_desc = fusion->reply_frames_desc;
  371. for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
  372. reply_desc->Words = cpu_to_le64(ULLONG_MAX);
  373. io_frames_sz = fusion->io_frames_alloc_sz;
  374. fusion->io_request_frames_pool =
  375. pci_pool_create("io_request_frames pool", instance->pdev,
  376. fusion->io_frames_alloc_sz, 16, 0);
  377. if (!fusion->io_request_frames_pool) {
  378. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  379. "io_request_frame pool\n");
  380. goto fail_io_frames;
  381. }
  382. fusion->io_request_frames =
  383. pci_pool_alloc(fusion->io_request_frames_pool, GFP_KERNEL,
  384. &fusion->io_request_frames_phys);
  385. if (!fusion->io_request_frames) {
  386. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  387. "io_request_frames frames\n");
  388. pci_pool_destroy(fusion->io_request_frames_pool);
  389. goto fail_io_frames;
  390. }
  391. /*
  392. * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
  393. * Allocate the dynamic array first and then allocate individual
  394. * commands.
  395. */
  396. fusion->cmd_list = kzalloc(sizeof(struct megasas_cmd_fusion *)
  397. * max_cmd, GFP_KERNEL);
  398. if (!fusion->cmd_list) {
  399. dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory. Could not alloc "
  400. "memory for cmd_list_fusion\n");
  401. goto fail_cmd_list;
  402. }
  403. max_cmd = instance->max_fw_cmds;
  404. for (i = 0; i < max_cmd; i++) {
  405. fusion->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd_fusion),
  406. GFP_KERNEL);
  407. if (!fusion->cmd_list[i]) {
  408. dev_err(&instance->pdev->dev, "Could not alloc cmd list fusion\n");
  409. for (j = 0; j < i; j++)
  410. kfree(fusion->cmd_list[j]);
  411. kfree(fusion->cmd_list);
  412. fusion->cmd_list = NULL;
  413. goto fail_cmd_list;
  414. }
  415. }
  416. /* The first 256 bytes (SMID 0) is not used. Don't add to cmd list */
  417. io_req_base = fusion->io_request_frames +
  418. MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
  419. io_req_base_phys = fusion->io_request_frames_phys +
  420. MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
  421. /*
  422. * Add all the commands to command pool (fusion->cmd_pool)
  423. */
  424. /* SMID 0 is reserved. Set SMID/index from 1 */
  425. for (i = 0; i < max_cmd; i++) {
  426. cmd = fusion->cmd_list[i];
  427. offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
  428. memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
  429. cmd->index = i + 1;
  430. cmd->scmd = NULL;
  431. cmd->sync_cmd_idx = (i >= instance->max_scsi_cmds) ?
  432. (i - instance->max_scsi_cmds) :
  433. (u32)ULONG_MAX; /* Set to Invalid */
  434. cmd->instance = instance;
  435. cmd->io_request =
  436. (struct MPI2_RAID_SCSI_IO_REQUEST *)
  437. (io_req_base + offset);
  438. memset(cmd->io_request, 0,
  439. sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
  440. cmd->io_request_phys_addr = io_req_base_phys + offset;
  441. }
  442. /*
  443. * Create a frame pool and assign one frame to each cmd
  444. */
  445. if (megasas_create_frame_pool_fusion(instance)) {
  446. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
  447. megasas_free_cmds_fusion(instance);
  448. goto fail_req_desc;
  449. }
  450. return 0;
  451. fail_cmd_list:
  452. pci_pool_free(fusion->io_request_frames_pool, fusion->io_request_frames,
  453. fusion->io_request_frames_phys);
  454. pci_pool_destroy(fusion->io_request_frames_pool);
  455. fail_io_frames:
  456. dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
  457. fusion->reply_frames_desc,
  458. fusion->reply_frames_desc_phys);
  459. pci_pool_free(fusion->reply_frames_desc_pool,
  460. fusion->reply_frames_desc,
  461. fusion->reply_frames_desc_phys);
  462. pci_pool_destroy(fusion->reply_frames_desc_pool);
  463. fail_reply_desc:
  464. dma_free_coherent(&instance->pdev->dev, fusion->request_alloc_sz,
  465. fusion->req_frames_desc,
  466. fusion->req_frames_desc_phys);
  467. fail_req_desc:
  468. return -ENOMEM;
  469. }
  470. /**
  471. * wait_and_poll - Issues a polling command
  472. * @instance: Adapter soft state
  473. * @cmd: Command packet to be issued
  474. *
  475. * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
  476. */
  477. int
  478. wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
  479. int seconds)
  480. {
  481. int i;
  482. struct megasas_header *frame_hdr = &cmd->frame->hdr;
  483. struct fusion_context *fusion;
  484. u32 msecs = seconds * 1000;
  485. fusion = instance->ctrl_context;
  486. /*
  487. * Wait for cmd_status to change
  488. */
  489. for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
  490. rmb();
  491. msleep(20);
  492. }
  493. if (frame_hdr->cmd_status == 0xff)
  494. return -ETIME;
  495. return (frame_hdr->cmd_status == MFI_STAT_OK) ?
  496. 0 : 1;
  497. }
  498. /**
  499. * megasas_ioc_init_fusion - Initializes the FW
  500. * @instance: Adapter soft state
  501. *
  502. * Issues the IOC Init cmd
  503. */
  504. int
  505. megasas_ioc_init_fusion(struct megasas_instance *instance)
  506. {
  507. struct megasas_init_frame *init_frame;
  508. struct MPI2_IOC_INIT_REQUEST *IOCInitMessage;
  509. dma_addr_t ioc_init_handle;
  510. struct megasas_cmd *cmd;
  511. u8 ret;
  512. struct fusion_context *fusion;
  513. union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
  514. int i;
  515. struct megasas_header *frame_hdr;
  516. const char *sys_info;
  517. MFI_CAPABILITIES *drv_ops;
  518. fusion = instance->ctrl_context;
  519. cmd = megasas_get_cmd(instance);
  520. if (!cmd) {
  521. dev_err(&instance->pdev->dev, "Could not allocate cmd for INIT Frame\n");
  522. ret = 1;
  523. goto fail_get_cmd;
  524. }
  525. IOCInitMessage =
  526. dma_alloc_coherent(&instance->pdev->dev,
  527. sizeof(struct MPI2_IOC_INIT_REQUEST),
  528. &ioc_init_handle, GFP_KERNEL);
  529. if (!IOCInitMessage) {
  530. dev_err(&instance->pdev->dev, "Could not allocate memory for "
  531. "IOCInitMessage\n");
  532. ret = 1;
  533. goto fail_fw_init;
  534. }
  535. memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
  536. IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
  537. IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER;
  538. IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
  539. IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
  540. IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
  541. IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
  542. IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys);
  543. IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
  544. IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
  545. init_frame = (struct megasas_init_frame *)cmd->frame;
  546. memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
  547. frame_hdr = &cmd->frame->hdr;
  548. frame_hdr->cmd_status = 0xFF;
  549. frame_hdr->flags = cpu_to_le16(
  550. le16_to_cpu(frame_hdr->flags) |
  551. MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
  552. init_frame->cmd = MFI_CMD_INIT;
  553. init_frame->cmd_status = 0xFF;
  554. drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
  555. /* driver support Extended MSIX */
  556. if (fusion->adapter_type == INVADER_SERIES)
  557. drv_ops->mfi_capabilities.support_additional_msix = 1;
  558. /* driver supports HA / Remote LUN over Fast Path interface */
  559. drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
  560. drv_ops->mfi_capabilities.support_max_255lds = 1;
  561. drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
  562. drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
  563. if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
  564. drv_ops->mfi_capabilities.support_ext_io_size = 1;
  565. /* Convert capability to LE32 */
  566. cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
  567. sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
  568. if (instance->system_info_buf && sys_info) {
  569. memcpy(instance->system_info_buf->systemId, sys_info,
  570. strlen(sys_info) > 64 ? 64 : strlen(sys_info));
  571. instance->system_info_buf->systemIdLength =
  572. strlen(sys_info) > 64 ? 64 : strlen(sys_info);
  573. init_frame->system_info_lo = instance->system_info_h;
  574. init_frame->system_info_hi = 0;
  575. }
  576. init_frame->queue_info_new_phys_addr_hi =
  577. cpu_to_le32(upper_32_bits(ioc_init_handle));
  578. init_frame->queue_info_new_phys_addr_lo =
  579. cpu_to_le32(lower_32_bits(ioc_init_handle));
  580. init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
  581. req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
  582. req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
  583. req_desc.MFAIo.RequestFlags =
  584. (MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
  585. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  586. /*
  587. * disable the intr before firing the init frame
  588. */
  589. instance->instancet->disable_intr(instance);
  590. for (i = 0; i < (10 * 1000); i += 20) {
  591. if (readl(&instance->reg_set->doorbell) & 1)
  592. msleep(20);
  593. else
  594. break;
  595. }
  596. megasas_fire_cmd_fusion(instance, &req_desc);
  597. wait_and_poll(instance, cmd, MFI_POLL_TIMEOUT_SECS);
  598. frame_hdr = &cmd->frame->hdr;
  599. if (frame_hdr->cmd_status != 0) {
  600. ret = 1;
  601. goto fail_fw_init;
  602. }
  603. dev_err(&instance->pdev->dev, "Init cmd success\n");
  604. ret = 0;
  605. fail_fw_init:
  606. megasas_return_cmd(instance, cmd);
  607. if (IOCInitMessage)
  608. dma_free_coherent(&instance->pdev->dev,
  609. sizeof(struct MPI2_IOC_INIT_REQUEST),
  610. IOCInitMessage, ioc_init_handle);
  611. fail_get_cmd:
  612. return ret;
  613. }
  614. /**
  615. * megasas_sync_pd_seq_num - JBOD SEQ MAP
  616. * @instance: Adapter soft state
  617. * @pend: set to 1, if it is pended jbod map.
  618. *
  619. * Issue Jbod map to the firmware. If it is pended command,
  620. * issue command and return. If it is first instance of jbod map
  621. * issue and receive command.
  622. */
  623. int
  624. megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
  625. int ret = 0;
  626. u32 pd_seq_map_sz;
  627. struct megasas_cmd *cmd;
  628. struct megasas_dcmd_frame *dcmd;
  629. struct fusion_context *fusion = instance->ctrl_context;
  630. struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
  631. dma_addr_t pd_seq_h;
  632. pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
  633. pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
  634. pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
  635. (sizeof(struct MR_PD_CFG_SEQ) *
  636. (MAX_PHYSICAL_DEVICES - 1));
  637. cmd = megasas_get_cmd(instance);
  638. if (!cmd) {
  639. dev_err(&instance->pdev->dev,
  640. "Could not get mfi cmd. Fail from %s %d\n",
  641. __func__, __LINE__);
  642. return -ENOMEM;
  643. }
  644. dcmd = &cmd->frame->dcmd;
  645. memset(pd_sync, 0, pd_seq_map_sz);
  646. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  647. dcmd->cmd = MFI_CMD_DCMD;
  648. dcmd->cmd_status = 0xFF;
  649. dcmd->sge_count = 1;
  650. dcmd->timeout = 0;
  651. dcmd->pad_0 = 0;
  652. dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
  653. dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
  654. dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(pd_seq_h);
  655. dcmd->sgl.sge32[0].length = cpu_to_le32(pd_seq_map_sz);
  656. if (pend) {
  657. dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
  658. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
  659. instance->jbod_seq_cmd = cmd;
  660. instance->instancet->issue_dcmd(instance, cmd);
  661. return 0;
  662. }
  663. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
  664. /* Below code is only for non pended DCMD */
  665. if (instance->ctrl_context && !instance->mask_interrupts)
  666. ret = megasas_issue_blocked_cmd(instance, cmd, 60);
  667. else
  668. ret = megasas_issue_polled(instance, cmd);
  669. if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
  670. dev_warn(&instance->pdev->dev,
  671. "driver supports max %d JBOD, but FW reports %d\n",
  672. MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
  673. ret = -EINVAL;
  674. }
  675. if (!ret)
  676. instance->pd_seq_map_id++;
  677. megasas_return_cmd(instance, cmd);
  678. return ret;
  679. }
  680. /*
  681. * megasas_get_ld_map_info - Returns FW's ld_map structure
  682. * @instance: Adapter soft state
  683. * @pend: Pend the command or not
  684. * Issues an internal command (DCMD) to get the FW's controller PD
  685. * list structure. This information is mainly used to find out SYSTEM
  686. * supported by the FW.
  687. * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
  688. * dcmd.mbox.b[0] - number of LDs being sync'd
  689. * dcmd.mbox.b[1] - 0 - complete command immediately.
  690. * - 1 - pend till config change
  691. * dcmd.mbox.b[2] - 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
  692. * - 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
  693. * uses extended struct MR_FW_RAID_MAP_EXT
  694. */
  695. static int
  696. megasas_get_ld_map_info(struct megasas_instance *instance)
  697. {
  698. int ret = 0;
  699. struct megasas_cmd *cmd;
  700. struct megasas_dcmd_frame *dcmd;
  701. void *ci;
  702. dma_addr_t ci_h = 0;
  703. u32 size_map_info;
  704. struct fusion_context *fusion;
  705. cmd = megasas_get_cmd(instance);
  706. if (!cmd) {
  707. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
  708. return -ENOMEM;
  709. }
  710. fusion = instance->ctrl_context;
  711. if (!fusion) {
  712. megasas_return_cmd(instance, cmd);
  713. return -ENXIO;
  714. }
  715. dcmd = &cmd->frame->dcmd;
  716. size_map_info = fusion->current_map_sz;
  717. ci = (void *) fusion->ld_map[(instance->map_id & 1)];
  718. ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
  719. if (!ci) {
  720. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
  721. megasas_return_cmd(instance, cmd);
  722. return -ENOMEM;
  723. }
  724. memset(ci, 0, fusion->max_map_sz);
  725. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  726. #if VD_EXT_DEBUG
  727. dev_dbg(&instance->pdev->dev,
  728. "%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
  729. __func__, cpu_to_le32(size_map_info));
  730. #endif
  731. dcmd->cmd = MFI_CMD_DCMD;
  732. dcmd->cmd_status = 0xFF;
  733. dcmd->sge_count = 1;
  734. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ);
  735. dcmd->timeout = 0;
  736. dcmd->pad_0 = 0;
  737. dcmd->data_xfer_len = cpu_to_le32(size_map_info);
  738. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
  739. dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
  740. dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
  741. if (instance->ctrl_context && !instance->mask_interrupts)
  742. ret = megasas_issue_blocked_cmd(instance, cmd,
  743. MEGASAS_BLOCKED_CMD_TIMEOUT);
  744. else
  745. ret = megasas_issue_polled(instance, cmd);
  746. megasas_return_cmd(instance, cmd);
  747. return ret;
  748. }
  749. u8
  750. megasas_get_map_info(struct megasas_instance *instance)
  751. {
  752. struct fusion_context *fusion = instance->ctrl_context;
  753. fusion->fast_path_io = 0;
  754. if (!megasas_get_ld_map_info(instance)) {
  755. if (MR_ValidateMapInfo(instance)) {
  756. fusion->fast_path_io = 1;
  757. return 0;
  758. }
  759. }
  760. return 1;
  761. }
  762. /*
  763. * megasas_sync_map_info - Returns FW's ld_map structure
  764. * @instance: Adapter soft state
  765. *
  766. * Issues an internal command (DCMD) to get the FW's controller PD
  767. * list structure. This information is mainly used to find out SYSTEM
  768. * supported by the FW.
  769. */
  770. int
  771. megasas_sync_map_info(struct megasas_instance *instance)
  772. {
  773. int ret = 0, i;
  774. struct megasas_cmd *cmd;
  775. struct megasas_dcmd_frame *dcmd;
  776. u32 size_sync_info, num_lds;
  777. struct fusion_context *fusion;
  778. struct MR_LD_TARGET_SYNC *ci = NULL;
  779. struct MR_DRV_RAID_MAP_ALL *map;
  780. struct MR_LD_RAID *raid;
  781. struct MR_LD_TARGET_SYNC *ld_sync;
  782. dma_addr_t ci_h = 0;
  783. u32 size_map_info;
  784. cmd = megasas_get_cmd(instance);
  785. if (!cmd) {
  786. dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
  787. return -ENOMEM;
  788. }
  789. fusion = instance->ctrl_context;
  790. if (!fusion) {
  791. megasas_return_cmd(instance, cmd);
  792. return 1;
  793. }
  794. map = fusion->ld_drv_map[instance->map_id & 1];
  795. num_lds = le16_to_cpu(map->raidMap.ldCount);
  796. dcmd = &cmd->frame->dcmd;
  797. size_sync_info = sizeof(struct MR_LD_TARGET_SYNC) *num_lds;
  798. memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
  799. ci = (struct MR_LD_TARGET_SYNC *)
  800. fusion->ld_map[(instance->map_id - 1) & 1];
  801. memset(ci, 0, fusion->max_map_sz);
  802. ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
  803. ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
  804. for (i = 0; i < num_lds; i++, ld_sync++) {
  805. raid = MR_LdRaidGet(i, map);
  806. ld_sync->targetId = MR_GetLDTgtId(i, map);
  807. ld_sync->seqNum = raid->seqNum;
  808. }
  809. size_map_info = fusion->current_map_sz;
  810. dcmd->cmd = MFI_CMD_DCMD;
  811. dcmd->cmd_status = 0xFF;
  812. dcmd->sge_count = 1;
  813. dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE);
  814. dcmd->timeout = 0;
  815. dcmd->pad_0 = 0;
  816. dcmd->data_xfer_len = cpu_to_le32(size_map_info);
  817. dcmd->mbox.b[0] = num_lds;
  818. dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
  819. dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
  820. dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
  821. dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
  822. instance->map_update_cmd = cmd;
  823. instance->instancet->issue_dcmd(instance, cmd);
  824. return ret;
  825. }
  826. /*
  827. * meagasas_display_intel_branding - Display branding string
  828. * @instance: per adapter object
  829. *
  830. * Return nothing.
  831. */
  832. static void
  833. megasas_display_intel_branding(struct megasas_instance *instance)
  834. {
  835. if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
  836. return;
  837. switch (instance->pdev->device) {
  838. case PCI_DEVICE_ID_LSI_INVADER:
  839. switch (instance->pdev->subsystem_device) {
  840. case MEGARAID_INTEL_RS3DC080_SSDID:
  841. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  842. instance->host->host_no,
  843. MEGARAID_INTEL_RS3DC080_BRANDING);
  844. break;
  845. case MEGARAID_INTEL_RS3DC040_SSDID:
  846. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  847. instance->host->host_no,
  848. MEGARAID_INTEL_RS3DC040_BRANDING);
  849. break;
  850. case MEGARAID_INTEL_RS3SC008_SSDID:
  851. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  852. instance->host->host_no,
  853. MEGARAID_INTEL_RS3SC008_BRANDING);
  854. break;
  855. case MEGARAID_INTEL_RS3MC044_SSDID:
  856. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  857. instance->host->host_no,
  858. MEGARAID_INTEL_RS3MC044_BRANDING);
  859. break;
  860. default:
  861. break;
  862. }
  863. break;
  864. case PCI_DEVICE_ID_LSI_FURY:
  865. switch (instance->pdev->subsystem_device) {
  866. case MEGARAID_INTEL_RS3WC080_SSDID:
  867. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  868. instance->host->host_no,
  869. MEGARAID_INTEL_RS3WC080_BRANDING);
  870. break;
  871. case MEGARAID_INTEL_RS3WC040_SSDID:
  872. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  873. instance->host->host_no,
  874. MEGARAID_INTEL_RS3WC040_BRANDING);
  875. break;
  876. default:
  877. break;
  878. }
  879. break;
  880. case PCI_DEVICE_ID_LSI_CUTLASS_52:
  881. case PCI_DEVICE_ID_LSI_CUTLASS_53:
  882. switch (instance->pdev->subsystem_device) {
  883. case MEGARAID_INTEL_RMS3BC160_SSDID:
  884. dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
  885. instance->host->host_no,
  886. MEGARAID_INTEL_RMS3BC160_BRANDING);
  887. break;
  888. default:
  889. break;
  890. }
  891. break;
  892. default:
  893. break;
  894. }
  895. }
  896. /**
  897. * megasas_init_adapter_fusion - Initializes the FW
  898. * @instance: Adapter soft state
  899. *
  900. * This is the main function for initializing firmware.
  901. */
  902. u32
  903. megasas_init_adapter_fusion(struct megasas_instance *instance)
  904. {
  905. struct megasas_register_set __iomem *reg_set;
  906. struct fusion_context *fusion;
  907. u32 max_cmd, scratch_pad_2;
  908. int i = 0, count;
  909. fusion = instance->ctrl_context;
  910. reg_set = instance->reg_set;
  911. /*
  912. * Get various operational parameters from status register
  913. */
  914. instance->max_fw_cmds =
  915. instance->instancet->read_fw_status_reg(reg_set) & 0x00FFFF;
  916. instance->max_fw_cmds = min(instance->max_fw_cmds, (u16)1008);
  917. /*
  918. * Reduce the max supported cmds by 1. This is to ensure that the
  919. * reply_q_sz (1 more than the max cmd that driver may send)
  920. * does not exceed max cmds that the FW can support
  921. */
  922. instance->max_fw_cmds = instance->max_fw_cmds-1;
  923. /*
  924. * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
  925. */
  926. instance->max_mfi_cmds =
  927. MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
  928. max_cmd = instance->max_fw_cmds;
  929. fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
  930. fusion->request_alloc_sz =
  931. sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
  932. fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)
  933. *(fusion->reply_q_depth);
  934. fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
  935. (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE *
  936. (max_cmd + 1)); /* Extra 1 for SMID 0 */
  937. scratch_pad_2 = readl(&instance->reg_set->outbound_scratch_pad_2);
  938. /* If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
  939. * Firmware support extended IO chain frame which is 4 times more than
  940. * legacy Firmware.
  941. * Legacy Firmware - Frame size is (8 * 128) = 1K
  942. * 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K
  943. */
  944. if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
  945. instance->max_chain_frame_sz =
  946. ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
  947. MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
  948. else
  949. instance->max_chain_frame_sz =
  950. ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
  951. MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
  952. if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
  953. dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
  954. instance->max_chain_frame_sz,
  955. MEGASAS_CHAIN_FRAME_SZ_MIN);
  956. instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
  957. }
  958. fusion->max_sge_in_main_msg =
  959. (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
  960. - offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
  961. fusion->max_sge_in_chain =
  962. instance->max_chain_frame_sz
  963. / sizeof(union MPI2_SGE_IO_UNION);
  964. instance->max_num_sge =
  965. rounddown_pow_of_two(fusion->max_sge_in_main_msg
  966. + fusion->max_sge_in_chain - 2);
  967. /* Used for pass thru MFI frame (DCMD) */
  968. fusion->chain_offset_mfi_pthru =
  969. offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
  970. fusion->chain_offset_io_request =
  971. (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
  972. sizeof(union MPI2_SGE_IO_UNION))/16;
  973. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  974. for (i = 0 ; i < count; i++)
  975. fusion->last_reply_idx[i] = 0;
  976. /*
  977. * For fusion adapters, 3 commands for IOCTL and 5 commands
  978. * for driver's internal DCMDs.
  979. */
  980. instance->max_scsi_cmds = instance->max_fw_cmds -
  981. (MEGASAS_FUSION_INTERNAL_CMDS +
  982. MEGASAS_FUSION_IOCTL_CMDS);
  983. sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
  984. /*
  985. * Allocate memory for descriptors
  986. * Create a pool of commands
  987. */
  988. if (megasas_alloc_cmds(instance))
  989. goto fail_alloc_mfi_cmds;
  990. if (megasas_alloc_cmds_fusion(instance))
  991. goto fail_alloc_cmds;
  992. if (megasas_ioc_init_fusion(instance))
  993. goto fail_ioc_init;
  994. megasas_display_intel_branding(instance);
  995. if (megasas_get_ctrl_info(instance)) {
  996. dev_err(&instance->pdev->dev,
  997. "Could not get controller info. Fail from %s %d\n",
  998. __func__, __LINE__);
  999. goto fail_ioc_init;
  1000. }
  1001. instance->flag_ieee = 1;
  1002. fusion->fast_path_io = 0;
  1003. fusion->drv_map_pages = get_order(fusion->drv_map_sz);
  1004. for (i = 0; i < 2; i++) {
  1005. fusion->ld_map[i] = NULL;
  1006. fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
  1007. fusion->drv_map_pages);
  1008. if (!fusion->ld_drv_map[i]) {
  1009. dev_err(&instance->pdev->dev, "Could not allocate "
  1010. "memory for local map info for %d pages\n",
  1011. fusion->drv_map_pages);
  1012. if (i == 1)
  1013. free_pages((ulong)fusion->ld_drv_map[0],
  1014. fusion->drv_map_pages);
  1015. goto fail_ioc_init;
  1016. }
  1017. memset(fusion->ld_drv_map[i], 0,
  1018. ((1 << PAGE_SHIFT) << fusion->drv_map_pages));
  1019. }
  1020. for (i = 0; i < 2; i++) {
  1021. fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
  1022. fusion->max_map_sz,
  1023. &fusion->ld_map_phys[i],
  1024. GFP_KERNEL);
  1025. if (!fusion->ld_map[i]) {
  1026. dev_err(&instance->pdev->dev, "Could not allocate memory "
  1027. "for map info\n");
  1028. goto fail_map_info;
  1029. }
  1030. }
  1031. if (!megasas_get_map_info(instance))
  1032. megasas_sync_map_info(instance);
  1033. return 0;
  1034. fail_map_info:
  1035. if (i == 1)
  1036. dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
  1037. fusion->ld_map[0], fusion->ld_map_phys[0]);
  1038. fail_ioc_init:
  1039. megasas_free_cmds_fusion(instance);
  1040. fail_alloc_cmds:
  1041. megasas_free_cmds(instance);
  1042. fail_alloc_mfi_cmds:
  1043. return 1;
  1044. }
  1045. /**
  1046. * map_cmd_status - Maps FW cmd status to OS cmd status
  1047. * @cmd : Pointer to cmd
  1048. * @status : status of cmd returned by FW
  1049. * @ext_status : ext status of cmd returned by FW
  1050. */
  1051. void
  1052. map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status)
  1053. {
  1054. switch (status) {
  1055. case MFI_STAT_OK:
  1056. cmd->scmd->result = DID_OK << 16;
  1057. break;
  1058. case MFI_STAT_SCSI_IO_FAILED:
  1059. case MFI_STAT_LD_INIT_IN_PROGRESS:
  1060. cmd->scmd->result = (DID_ERROR << 16) | ext_status;
  1061. break;
  1062. case MFI_STAT_SCSI_DONE_WITH_ERROR:
  1063. cmd->scmd->result = (DID_OK << 16) | ext_status;
  1064. if (ext_status == SAM_STAT_CHECK_CONDITION) {
  1065. memset(cmd->scmd->sense_buffer, 0,
  1066. SCSI_SENSE_BUFFERSIZE);
  1067. memcpy(cmd->scmd->sense_buffer, cmd->sense,
  1068. SCSI_SENSE_BUFFERSIZE);
  1069. cmd->scmd->result |= DRIVER_SENSE << 24;
  1070. }
  1071. break;
  1072. case MFI_STAT_LD_OFFLINE:
  1073. case MFI_STAT_DEVICE_NOT_FOUND:
  1074. cmd->scmd->result = DID_BAD_TARGET << 16;
  1075. break;
  1076. case MFI_STAT_CONFIG_SEQ_MISMATCH:
  1077. cmd->scmd->result = DID_IMM_RETRY << 16;
  1078. break;
  1079. default:
  1080. dev_printk(KERN_DEBUG, &cmd->instance->pdev->dev, "FW status %#x\n", status);
  1081. cmd->scmd->result = DID_ERROR << 16;
  1082. break;
  1083. }
  1084. }
  1085. /**
  1086. * megasas_make_sgl_fusion - Prepares 32-bit SGL
  1087. * @instance: Adapter soft state
  1088. * @scp: SCSI command from the mid-layer
  1089. * @sgl_ptr: SGL to be filled in
  1090. * @cmd: cmd we are working on
  1091. *
  1092. * If successful, this function returns the number of SG elements.
  1093. */
  1094. static int
  1095. megasas_make_sgl_fusion(struct megasas_instance *instance,
  1096. struct scsi_cmnd *scp,
  1097. struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
  1098. struct megasas_cmd_fusion *cmd)
  1099. {
  1100. int i, sg_processed, sge_count;
  1101. struct scatterlist *os_sgl;
  1102. struct fusion_context *fusion;
  1103. fusion = instance->ctrl_context;
  1104. if (fusion->adapter_type == INVADER_SERIES) {
  1105. struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
  1106. sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
  1107. sgl_ptr_end->Flags = 0;
  1108. }
  1109. sge_count = scsi_dma_map(scp);
  1110. BUG_ON(sge_count < 0);
  1111. if (sge_count > instance->max_num_sge || !sge_count)
  1112. return sge_count;
  1113. scsi_for_each_sg(scp, os_sgl, sge_count, i) {
  1114. sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
  1115. sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
  1116. sgl_ptr->Flags = 0;
  1117. if (fusion->adapter_type == INVADER_SERIES)
  1118. if (i == sge_count - 1)
  1119. sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
  1120. sgl_ptr++;
  1121. sg_processed = i + 1;
  1122. if ((sg_processed == (fusion->max_sge_in_main_msg - 1)) &&
  1123. (sge_count > fusion->max_sge_in_main_msg)) {
  1124. struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
  1125. if (fusion->adapter_type == INVADER_SERIES) {
  1126. if ((le16_to_cpu(cmd->io_request->IoFlags) &
  1127. MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
  1128. MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
  1129. cmd->io_request->ChainOffset =
  1130. fusion->
  1131. chain_offset_io_request;
  1132. else
  1133. cmd->io_request->ChainOffset = 0;
  1134. } else
  1135. cmd->io_request->ChainOffset =
  1136. fusion->chain_offset_io_request;
  1137. sg_chain = sgl_ptr;
  1138. /* Prepare chain element */
  1139. sg_chain->NextChainOffset = 0;
  1140. if (fusion->adapter_type == INVADER_SERIES)
  1141. sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
  1142. else
  1143. sg_chain->Flags =
  1144. (IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  1145. MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
  1146. sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
  1147. sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
  1148. sgl_ptr =
  1149. (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
  1150. memset(sgl_ptr, 0, instance->max_chain_frame_sz);
  1151. }
  1152. }
  1153. return sge_count;
  1154. }
  1155. /**
  1156. * megasas_set_pd_lba - Sets PD LBA
  1157. * @cdb: CDB
  1158. * @cdb_len: cdb length
  1159. * @start_blk: Start block of IO
  1160. *
  1161. * Used to set the PD LBA in CDB for FP IOs
  1162. */
  1163. void
  1164. megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
  1165. struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
  1166. struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
  1167. {
  1168. struct MR_LD_RAID *raid;
  1169. u32 ld;
  1170. u64 start_blk = io_info->pdBlock;
  1171. u8 *cdb = io_request->CDB.CDB32;
  1172. u32 num_blocks = io_info->numBlocks;
  1173. u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
  1174. /* Check if T10 PI (DIF) is enabled for this LD */
  1175. ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
  1176. raid = MR_LdRaidGet(ld, local_map_ptr);
  1177. if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
  1178. memset(cdb, 0, sizeof(io_request->CDB.CDB32));
  1179. cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
  1180. cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
  1181. if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
  1182. cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
  1183. else
  1184. cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
  1185. cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
  1186. /* LBA */
  1187. cdb[12] = (u8)((start_blk >> 56) & 0xff);
  1188. cdb[13] = (u8)((start_blk >> 48) & 0xff);
  1189. cdb[14] = (u8)((start_blk >> 40) & 0xff);
  1190. cdb[15] = (u8)((start_blk >> 32) & 0xff);
  1191. cdb[16] = (u8)((start_blk >> 24) & 0xff);
  1192. cdb[17] = (u8)((start_blk >> 16) & 0xff);
  1193. cdb[18] = (u8)((start_blk >> 8) & 0xff);
  1194. cdb[19] = (u8)(start_blk & 0xff);
  1195. /* Logical block reference tag */
  1196. io_request->CDB.EEDP32.PrimaryReferenceTag =
  1197. cpu_to_be32(ref_tag);
  1198. io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
  1199. io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
  1200. /* Transfer length */
  1201. cdb[28] = (u8)((num_blocks >> 24) & 0xff);
  1202. cdb[29] = (u8)((num_blocks >> 16) & 0xff);
  1203. cdb[30] = (u8)((num_blocks >> 8) & 0xff);
  1204. cdb[31] = (u8)(num_blocks & 0xff);
  1205. /* set SCSI IO EEDPFlags */
  1206. if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
  1207. io_request->EEDPFlags = cpu_to_le16(
  1208. MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
  1209. MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
  1210. MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
  1211. MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
  1212. MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
  1213. } else {
  1214. io_request->EEDPFlags = cpu_to_le16(
  1215. MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
  1216. MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
  1217. }
  1218. io_request->Control |= cpu_to_le32((0x4 << 26));
  1219. io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
  1220. } else {
  1221. /* Some drives don't support 16/12 byte CDB's, convert to 10 */
  1222. if (((cdb_len == 12) || (cdb_len == 16)) &&
  1223. (start_blk <= 0xffffffff)) {
  1224. if (cdb_len == 16) {
  1225. opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
  1226. flagvals = cdb[1];
  1227. groupnum = cdb[14];
  1228. control = cdb[15];
  1229. } else {
  1230. opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
  1231. flagvals = cdb[1];
  1232. groupnum = cdb[10];
  1233. control = cdb[11];
  1234. }
  1235. memset(cdb, 0, sizeof(io_request->CDB.CDB32));
  1236. cdb[0] = opcode;
  1237. cdb[1] = flagvals;
  1238. cdb[6] = groupnum;
  1239. cdb[9] = control;
  1240. /* Transfer length */
  1241. cdb[8] = (u8)(num_blocks & 0xff);
  1242. cdb[7] = (u8)((num_blocks >> 8) & 0xff);
  1243. io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
  1244. cdb_len = 10;
  1245. } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
  1246. /* Convert to 16 byte CDB for large LBA's */
  1247. switch (cdb_len) {
  1248. case 6:
  1249. opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
  1250. control = cdb[5];
  1251. break;
  1252. case 10:
  1253. opcode =
  1254. cdb[0] == READ_10 ? READ_16 : WRITE_16;
  1255. flagvals = cdb[1];
  1256. groupnum = cdb[6];
  1257. control = cdb[9];
  1258. break;
  1259. case 12:
  1260. opcode =
  1261. cdb[0] == READ_12 ? READ_16 : WRITE_16;
  1262. flagvals = cdb[1];
  1263. groupnum = cdb[10];
  1264. control = cdb[11];
  1265. break;
  1266. }
  1267. memset(cdb, 0, sizeof(io_request->CDB.CDB32));
  1268. cdb[0] = opcode;
  1269. cdb[1] = flagvals;
  1270. cdb[14] = groupnum;
  1271. cdb[15] = control;
  1272. /* Transfer length */
  1273. cdb[13] = (u8)(num_blocks & 0xff);
  1274. cdb[12] = (u8)((num_blocks >> 8) & 0xff);
  1275. cdb[11] = (u8)((num_blocks >> 16) & 0xff);
  1276. cdb[10] = (u8)((num_blocks >> 24) & 0xff);
  1277. io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
  1278. cdb_len = 16;
  1279. }
  1280. /* Normal case, just load LBA here */
  1281. switch (cdb_len) {
  1282. case 6:
  1283. {
  1284. u8 val = cdb[1] & 0xE0;
  1285. cdb[3] = (u8)(start_blk & 0xff);
  1286. cdb[2] = (u8)((start_blk >> 8) & 0xff);
  1287. cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
  1288. break;
  1289. }
  1290. case 10:
  1291. cdb[5] = (u8)(start_blk & 0xff);
  1292. cdb[4] = (u8)((start_blk >> 8) & 0xff);
  1293. cdb[3] = (u8)((start_blk >> 16) & 0xff);
  1294. cdb[2] = (u8)((start_blk >> 24) & 0xff);
  1295. break;
  1296. case 12:
  1297. cdb[5] = (u8)(start_blk & 0xff);
  1298. cdb[4] = (u8)((start_blk >> 8) & 0xff);
  1299. cdb[3] = (u8)((start_blk >> 16) & 0xff);
  1300. cdb[2] = (u8)((start_blk >> 24) & 0xff);
  1301. break;
  1302. case 16:
  1303. cdb[9] = (u8)(start_blk & 0xff);
  1304. cdb[8] = (u8)((start_blk >> 8) & 0xff);
  1305. cdb[7] = (u8)((start_blk >> 16) & 0xff);
  1306. cdb[6] = (u8)((start_blk >> 24) & 0xff);
  1307. cdb[5] = (u8)((start_blk >> 32) & 0xff);
  1308. cdb[4] = (u8)((start_blk >> 40) & 0xff);
  1309. cdb[3] = (u8)((start_blk >> 48) & 0xff);
  1310. cdb[2] = (u8)((start_blk >> 56) & 0xff);
  1311. break;
  1312. }
  1313. }
  1314. }
  1315. /**
  1316. * megasas_build_ldio_fusion - Prepares IOs to devices
  1317. * @instance: Adapter soft state
  1318. * @scp: SCSI command
  1319. * @cmd: Command to be prepared
  1320. *
  1321. * Prepares the io_request and chain elements (sg_frame) for IO
  1322. * The IO can be for PD (Fast Path) or LD
  1323. */
  1324. void
  1325. megasas_build_ldio_fusion(struct megasas_instance *instance,
  1326. struct scsi_cmnd *scp,
  1327. struct megasas_cmd_fusion *cmd)
  1328. {
  1329. u8 fp_possible;
  1330. u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
  1331. struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
  1332. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  1333. struct IO_REQUEST_INFO io_info;
  1334. struct fusion_context *fusion;
  1335. struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
  1336. u8 *raidLUN;
  1337. device_id = MEGASAS_DEV_INDEX(scp);
  1338. fusion = instance->ctrl_context;
  1339. io_request = cmd->io_request;
  1340. io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id);
  1341. io_request->RaidContext.status = 0;
  1342. io_request->RaidContext.exStatus = 0;
  1343. req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
  1344. start_lba_lo = 0;
  1345. start_lba_hi = 0;
  1346. fp_possible = 0;
  1347. /*
  1348. * 6-byte READ(0x08) or WRITE(0x0A) cdb
  1349. */
  1350. if (scp->cmd_len == 6) {
  1351. datalength = (u32) scp->cmnd[4];
  1352. start_lba_lo = ((u32) scp->cmnd[1] << 16) |
  1353. ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
  1354. start_lba_lo &= 0x1FFFFF;
  1355. }
  1356. /*
  1357. * 10-byte READ(0x28) or WRITE(0x2A) cdb
  1358. */
  1359. else if (scp->cmd_len == 10) {
  1360. datalength = (u32) scp->cmnd[8] |
  1361. ((u32) scp->cmnd[7] << 8);
  1362. start_lba_lo = ((u32) scp->cmnd[2] << 24) |
  1363. ((u32) scp->cmnd[3] << 16) |
  1364. ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
  1365. }
  1366. /*
  1367. * 12-byte READ(0xA8) or WRITE(0xAA) cdb
  1368. */
  1369. else if (scp->cmd_len == 12) {
  1370. datalength = ((u32) scp->cmnd[6] << 24) |
  1371. ((u32) scp->cmnd[7] << 16) |
  1372. ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
  1373. start_lba_lo = ((u32) scp->cmnd[2] << 24) |
  1374. ((u32) scp->cmnd[3] << 16) |
  1375. ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
  1376. }
  1377. /*
  1378. * 16-byte READ(0x88) or WRITE(0x8A) cdb
  1379. */
  1380. else if (scp->cmd_len == 16) {
  1381. datalength = ((u32) scp->cmnd[10] << 24) |
  1382. ((u32) scp->cmnd[11] << 16) |
  1383. ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
  1384. start_lba_lo = ((u32) scp->cmnd[6] << 24) |
  1385. ((u32) scp->cmnd[7] << 16) |
  1386. ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
  1387. start_lba_hi = ((u32) scp->cmnd[2] << 24) |
  1388. ((u32) scp->cmnd[3] << 16) |
  1389. ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
  1390. }
  1391. memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
  1392. io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
  1393. io_info.numBlocks = datalength;
  1394. io_info.ldTgtId = device_id;
  1395. io_request->DataLength = cpu_to_le32(scsi_bufflen(scp));
  1396. if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
  1397. io_info.isRead = 1;
  1398. local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
  1399. if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
  1400. instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
  1401. io_request->RaidContext.regLockFlags = 0;
  1402. fp_possible = 0;
  1403. } else {
  1404. if (MR_BuildRaidContext(instance, &io_info,
  1405. &io_request->RaidContext,
  1406. local_map_ptr, &raidLUN))
  1407. fp_possible = io_info.fpOkForIo;
  1408. }
  1409. /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
  1410. id by default, not CPU group id, otherwise all MSI-X queues won't
  1411. be utilized */
  1412. cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
  1413. raw_smp_processor_id() % instance->msix_vectors : 0;
  1414. if (fp_possible) {
  1415. megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
  1416. local_map_ptr, start_lba_lo);
  1417. io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  1418. cmd->request_desc->SCSIIO.RequestFlags =
  1419. (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY
  1420. << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1421. if (fusion->adapter_type == INVADER_SERIES) {
  1422. if (io_request->RaidContext.regLockFlags ==
  1423. REGION_TYPE_UNUSED)
  1424. cmd->request_desc->SCSIIO.RequestFlags =
  1425. (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
  1426. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1427. io_request->RaidContext.Type = MPI2_TYPE_CUDA;
  1428. io_request->RaidContext.nseg = 0x1;
  1429. io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
  1430. io_request->RaidContext.regLockFlags |=
  1431. (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
  1432. MR_RL_FLAGS_SEQ_NUM_ENABLE);
  1433. }
  1434. if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
  1435. (io_info.isRead)) {
  1436. io_info.devHandle =
  1437. get_updated_dev_handle(instance,
  1438. &fusion->load_balance_info[device_id],
  1439. &io_info);
  1440. scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
  1441. cmd->pd_r1_lb = io_info.pd_after_lb;
  1442. } else
  1443. scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
  1444. if ((raidLUN[0] == 1) &&
  1445. (local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
  1446. instance->dev_handle = !(instance->dev_handle);
  1447. io_info.devHandle =
  1448. local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
  1449. }
  1450. cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
  1451. io_request->DevHandle = io_info.devHandle;
  1452. /* populate the LUN field */
  1453. memcpy(io_request->LUN, raidLUN, 8);
  1454. } else {
  1455. io_request->RaidContext.timeoutValue =
  1456. cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
  1457. cmd->request_desc->SCSIIO.RequestFlags =
  1458. (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
  1459. << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1460. if (fusion->adapter_type == INVADER_SERIES) {
  1461. if (io_request->RaidContext.regLockFlags ==
  1462. REGION_TYPE_UNUSED)
  1463. cmd->request_desc->SCSIIO.RequestFlags =
  1464. (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
  1465. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1466. io_request->RaidContext.Type = MPI2_TYPE_CUDA;
  1467. io_request->RaidContext.regLockFlags |=
  1468. (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
  1469. MR_RL_FLAGS_SEQ_NUM_ENABLE);
  1470. io_request->RaidContext.nseg = 0x1;
  1471. }
  1472. io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
  1473. io_request->DevHandle = cpu_to_le16(device_id);
  1474. } /* Not FP */
  1475. }
  1476. /**
  1477. * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
  1478. * @instance: Adapter soft state
  1479. * @scp: SCSI command
  1480. * @cmd: Command to be prepared
  1481. *
  1482. * Prepares the io_request frame for non-rw io cmds for vd.
  1483. */
  1484. static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
  1485. struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
  1486. {
  1487. u32 device_id;
  1488. struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
  1489. u16 pd_index = 0;
  1490. struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
  1491. struct fusion_context *fusion = instance->ctrl_context;
  1492. u8 span, physArm;
  1493. __le16 devHandle;
  1494. u32 ld, arRef, pd;
  1495. struct MR_LD_RAID *raid;
  1496. struct RAID_CONTEXT *pRAID_Context;
  1497. u8 fp_possible = 1;
  1498. io_request = cmd->io_request;
  1499. device_id = MEGASAS_DEV_INDEX(scmd);
  1500. pd_index = MEGASAS_PD_INDEX(scmd);
  1501. local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
  1502. io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
  1503. /* get RAID_Context pointer */
  1504. pRAID_Context = &io_request->RaidContext;
  1505. /* Check with FW team */
  1506. pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
  1507. pRAID_Context->regLockRowLBA = 0;
  1508. pRAID_Context->regLockLength = 0;
  1509. if (fusion->fast_path_io && (
  1510. device_id < instance->fw_supported_vd_count)) {
  1511. ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
  1512. if (ld >= instance->fw_supported_vd_count - 1)
  1513. fp_possible = 0;
  1514. raid = MR_LdRaidGet(ld, local_map_ptr);
  1515. if (!(raid->capability.fpNonRWCapable))
  1516. fp_possible = 0;
  1517. } else
  1518. fp_possible = 0;
  1519. if (!fp_possible) {
  1520. io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
  1521. io_request->DevHandle = cpu_to_le16(device_id);
  1522. io_request->LUN[1] = scmd->device->lun;
  1523. pRAID_Context->timeoutValue =
  1524. cpu_to_le16 (scmd->request->timeout / HZ);
  1525. cmd->request_desc->SCSIIO.RequestFlags =
  1526. (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
  1527. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1528. } else {
  1529. /* set RAID context values */
  1530. pRAID_Context->configSeqNum = raid->seqNum;
  1531. pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ;
  1532. pRAID_Context->timeoutValue = cpu_to_le16(raid->fpIoTimeoutForLd);
  1533. /* get the DevHandle for the PD (since this is
  1534. fpNonRWCapable, this is a single disk RAID0) */
  1535. span = physArm = 0;
  1536. arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
  1537. pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
  1538. devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
  1539. /* build request descriptor */
  1540. cmd->request_desc->SCSIIO.RequestFlags =
  1541. (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
  1542. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1543. cmd->request_desc->SCSIIO.DevHandle = devHandle;
  1544. /* populate the LUN field */
  1545. memcpy(io_request->LUN, raid->LUN, 8);
  1546. /* build the raidScsiIO structure */
  1547. io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  1548. io_request->DevHandle = devHandle;
  1549. }
  1550. }
  1551. /**
  1552. * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
  1553. * @instance: Adapter soft state
  1554. * @scp: SCSI command
  1555. * @cmd: Command to be prepared
  1556. * @fp_possible: parameter to detect fast path or firmware path io.
  1557. *
  1558. * Prepares the io_request frame for rw/non-rw io cmds for syspds
  1559. */
  1560. static void
  1561. megasas_build_syspd_fusion(struct megasas_instance *instance,
  1562. struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd, u8 fp_possible)
  1563. {
  1564. u32 device_id;
  1565. struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
  1566. u16 pd_index = 0;
  1567. u16 os_timeout_value;
  1568. u16 timeout_limit;
  1569. struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
  1570. struct RAID_CONTEXT *pRAID_Context;
  1571. struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
  1572. struct fusion_context *fusion = instance->ctrl_context;
  1573. pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
  1574. device_id = MEGASAS_DEV_INDEX(scmd);
  1575. pd_index = MEGASAS_PD_INDEX(scmd);
  1576. os_timeout_value = scmd->request->timeout / HZ;
  1577. io_request = cmd->io_request;
  1578. /* get RAID_Context pointer */
  1579. pRAID_Context = &io_request->RaidContext;
  1580. pRAID_Context->regLockFlags = 0;
  1581. pRAID_Context->regLockRowLBA = 0;
  1582. pRAID_Context->regLockLength = 0;
  1583. io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
  1584. io_request->LUN[1] = scmd->device->lun;
  1585. pRAID_Context->RAIDFlags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
  1586. << MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
  1587. /* If FW supports PD sequence number */
  1588. if (instance->use_seqnum_jbod_fp &&
  1589. instance->pd_list[pd_index].driveType == TYPE_DISK) {
  1590. /* TgtId must be incremented by 255 as jbod seq number is index
  1591. * below raid map
  1592. */
  1593. pRAID_Context->VirtualDiskTgtId =
  1594. cpu_to_le16(device_id + (MAX_PHYSICAL_DEVICES - 1));
  1595. pRAID_Context->configSeqNum = pd_sync->seq[pd_index].seqNum;
  1596. io_request->DevHandle = pd_sync->seq[pd_index].devHandle;
  1597. pRAID_Context->regLockFlags |=
  1598. (MR_RL_FLAGS_SEQ_NUM_ENABLE|MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
  1599. pRAID_Context->Type = MPI2_TYPE_CUDA;
  1600. pRAID_Context->nseg = 0x1;
  1601. } else if (fusion->fast_path_io) {
  1602. pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
  1603. pRAID_Context->configSeqNum = 0;
  1604. local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
  1605. io_request->DevHandle =
  1606. local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
  1607. } else {
  1608. /* Want to send all IO via FW path */
  1609. pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
  1610. pRAID_Context->configSeqNum = 0;
  1611. io_request->DevHandle = cpu_to_le16(0xFFFF);
  1612. }
  1613. cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
  1614. cmd->request_desc->SCSIIO.MSIxIndex =
  1615. instance->msix_vectors ?
  1616. (raw_smp_processor_id() % instance->msix_vectors) : 0;
  1617. if (!fp_possible) {
  1618. /* system pd firmware path */
  1619. io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
  1620. cmd->request_desc->SCSIIO.RequestFlags =
  1621. (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
  1622. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1623. pRAID_Context->timeoutValue = cpu_to_le16(os_timeout_value);
  1624. pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id);
  1625. } else {
  1626. if (os_timeout_value)
  1627. os_timeout_value++;
  1628. /* system pd Fast Path */
  1629. io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  1630. timeout_limit = (scmd->device->type == TYPE_DISK) ?
  1631. 255 : 0xFFFF;
  1632. pRAID_Context->timeoutValue =
  1633. cpu_to_le16((os_timeout_value > timeout_limit) ?
  1634. timeout_limit : os_timeout_value);
  1635. if (fusion->adapter_type == INVADER_SERIES)
  1636. io_request->IoFlags |=
  1637. cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
  1638. cmd->request_desc->SCSIIO.RequestFlags =
  1639. (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
  1640. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  1641. }
  1642. }
  1643. /**
  1644. * megasas_build_io_fusion - Prepares IOs to devices
  1645. * @instance: Adapter soft state
  1646. * @scp: SCSI command
  1647. * @cmd: Command to be prepared
  1648. *
  1649. * Invokes helper functions to prepare request frames
  1650. * and sets flags appropriate for IO/Non-IO cmd
  1651. */
  1652. int
  1653. megasas_build_io_fusion(struct megasas_instance *instance,
  1654. struct scsi_cmnd *scp,
  1655. struct megasas_cmd_fusion *cmd)
  1656. {
  1657. u16 sge_count;
  1658. u8 cmd_type;
  1659. struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
  1660. /* Zero out some fields so they don't get reused */
  1661. memset(io_request->LUN, 0x0, 8);
  1662. io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
  1663. io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
  1664. io_request->EEDPFlags = 0;
  1665. io_request->Control = 0;
  1666. io_request->EEDPBlockSize = 0;
  1667. io_request->ChainOffset = 0;
  1668. io_request->RaidContext.RAIDFlags = 0;
  1669. io_request->RaidContext.Type = 0;
  1670. io_request->RaidContext.nseg = 0;
  1671. memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
  1672. /*
  1673. * Just the CDB length,rest of the Flags are zero
  1674. * This will be modified for FP in build_ldio_fusion
  1675. */
  1676. io_request->IoFlags = cpu_to_le16(scp->cmd_len);
  1677. switch (cmd_type = megasas_cmd_type(scp)) {
  1678. case READ_WRITE_LDIO:
  1679. megasas_build_ldio_fusion(instance, scp, cmd);
  1680. break;
  1681. case NON_READ_WRITE_LDIO:
  1682. megasas_build_ld_nonrw_fusion(instance, scp, cmd);
  1683. break;
  1684. case READ_WRITE_SYSPDIO:
  1685. case NON_READ_WRITE_SYSPDIO:
  1686. if (instance->secure_jbod_support &&
  1687. (cmd_type == NON_READ_WRITE_SYSPDIO))
  1688. megasas_build_syspd_fusion(instance, scp, cmd, 0);
  1689. else
  1690. megasas_build_syspd_fusion(instance, scp, cmd, 1);
  1691. break;
  1692. default:
  1693. break;
  1694. }
  1695. /*
  1696. * Construct SGL
  1697. */
  1698. sge_count =
  1699. megasas_make_sgl_fusion(instance, scp,
  1700. (struct MPI25_IEEE_SGE_CHAIN64 *)
  1701. &io_request->SGL, cmd);
  1702. if (sge_count > instance->max_num_sge) {
  1703. dev_err(&instance->pdev->dev, "Error. sge_count (0x%x) exceeds "
  1704. "max (0x%x) allowed\n", sge_count,
  1705. instance->max_num_sge);
  1706. return 1;
  1707. }
  1708. /* numSGE store lower 8 bit of sge_count.
  1709. * numSGEExt store higher 8 bit of sge_count
  1710. */
  1711. io_request->RaidContext.numSGE = sge_count;
  1712. io_request->RaidContext.numSGEExt = (u8)(sge_count >> 8);
  1713. io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
  1714. if (scp->sc_data_direction == PCI_DMA_TODEVICE)
  1715. io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
  1716. else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
  1717. io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
  1718. io_request->SGLOffset0 =
  1719. offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
  1720. io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr);
  1721. io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
  1722. cmd->scmd = scp;
  1723. scp->SCp.ptr = (char *)cmd;
  1724. return 0;
  1725. }
  1726. union MEGASAS_REQUEST_DESCRIPTOR_UNION *
  1727. megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
  1728. {
  1729. u8 *p;
  1730. struct fusion_context *fusion;
  1731. if (index >= instance->max_fw_cmds) {
  1732. dev_err(&instance->pdev->dev, "Invalid SMID (0x%x)request for "
  1733. "descriptor for scsi%d\n", index,
  1734. instance->host->host_no);
  1735. return NULL;
  1736. }
  1737. fusion = instance->ctrl_context;
  1738. p = fusion->req_frames_desc
  1739. +sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *index;
  1740. return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
  1741. }
  1742. /**
  1743. * megasas_build_and_issue_cmd_fusion -Main routine for building and
  1744. * issuing non IOCTL cmd
  1745. * @instance: Adapter soft state
  1746. * @scmd: pointer to scsi cmd from OS
  1747. */
  1748. static u32
  1749. megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
  1750. struct scsi_cmnd *scmd)
  1751. {
  1752. struct megasas_cmd_fusion *cmd;
  1753. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  1754. u32 index;
  1755. struct fusion_context *fusion;
  1756. fusion = instance->ctrl_context;
  1757. cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
  1758. index = cmd->index;
  1759. req_desc = megasas_get_request_descriptor(instance, index-1);
  1760. if (!req_desc)
  1761. return 1;
  1762. req_desc->Words = 0;
  1763. cmd->request_desc = req_desc;
  1764. if (megasas_build_io_fusion(instance, scmd, cmd)) {
  1765. megasas_return_cmd_fusion(instance, cmd);
  1766. dev_err(&instance->pdev->dev, "Error building command\n");
  1767. cmd->request_desc = NULL;
  1768. return 1;
  1769. }
  1770. req_desc = cmd->request_desc;
  1771. req_desc->SCSIIO.SMID = cpu_to_le16(index);
  1772. if (cmd->io_request->ChainOffset != 0 &&
  1773. cmd->io_request->ChainOffset != 0xF)
  1774. dev_err(&instance->pdev->dev, "The chain offset value is not "
  1775. "correct : %x\n", cmd->io_request->ChainOffset);
  1776. /*
  1777. * Issue the command to the FW
  1778. */
  1779. atomic_inc(&instance->fw_outstanding);
  1780. megasas_fire_cmd_fusion(instance, req_desc);
  1781. return 0;
  1782. }
  1783. /**
  1784. * complete_cmd_fusion - Completes command
  1785. * @instance: Adapter soft state
  1786. * Completes all commands that is in reply descriptor queue
  1787. */
  1788. int
  1789. complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex)
  1790. {
  1791. union MPI2_REPLY_DESCRIPTORS_UNION *desc;
  1792. struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
  1793. struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
  1794. struct fusion_context *fusion;
  1795. struct megasas_cmd *cmd_mfi;
  1796. struct megasas_cmd_fusion *cmd_fusion;
  1797. u16 smid, num_completed;
  1798. u8 reply_descript_type;
  1799. u32 status, extStatus, device_id;
  1800. union desc_value d_val;
  1801. struct LD_LOAD_BALANCE_INFO *lbinfo;
  1802. int threshold_reply_count = 0;
  1803. struct scsi_cmnd *scmd_local = NULL;
  1804. fusion = instance->ctrl_context;
  1805. if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR)
  1806. return IRQ_HANDLED;
  1807. desc = fusion->reply_frames_desc;
  1808. desc += ((MSIxIndex * fusion->reply_alloc_sz)/
  1809. sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) +
  1810. fusion->last_reply_idx[MSIxIndex];
  1811. reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
  1812. d_val.word = desc->Words;
  1813. reply_descript_type = reply_desc->ReplyFlags &
  1814. MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  1815. if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  1816. return IRQ_NONE;
  1817. num_completed = 0;
  1818. while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
  1819. d_val.u.high != cpu_to_le32(UINT_MAX)) {
  1820. smid = le16_to_cpu(reply_desc->SMID);
  1821. cmd_fusion = fusion->cmd_list[smid - 1];
  1822. scsi_io_req =
  1823. (struct MPI2_RAID_SCSI_IO_REQUEST *)
  1824. cmd_fusion->io_request;
  1825. if (cmd_fusion->scmd)
  1826. cmd_fusion->scmd->SCp.ptr = NULL;
  1827. scmd_local = cmd_fusion->scmd;
  1828. status = scsi_io_req->RaidContext.status;
  1829. extStatus = scsi_io_req->RaidContext.exStatus;
  1830. switch (scsi_io_req->Function) {
  1831. case MPI2_FUNCTION_SCSI_IO_REQUEST: /*Fast Path IO.*/
  1832. /* Update load balancing info */
  1833. device_id = MEGASAS_DEV_INDEX(scmd_local);
  1834. lbinfo = &fusion->load_balance_info[device_id];
  1835. if (cmd_fusion->scmd->SCp.Status &
  1836. MEGASAS_LOAD_BALANCE_FLAG) {
  1837. atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
  1838. cmd_fusion->scmd->SCp.Status &=
  1839. ~MEGASAS_LOAD_BALANCE_FLAG;
  1840. }
  1841. if (reply_descript_type ==
  1842. MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
  1843. if (megasas_dbg_lvl == 5)
  1844. dev_err(&instance->pdev->dev, "\nFAST Path "
  1845. "IO Success\n");
  1846. }
  1847. /* Fall thru and complete IO */
  1848. case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
  1849. /* Map the FW Cmd Status */
  1850. map_cmd_status(cmd_fusion, status, extStatus);
  1851. scsi_io_req->RaidContext.status = 0;
  1852. scsi_io_req->RaidContext.exStatus = 0;
  1853. megasas_return_cmd_fusion(instance, cmd_fusion);
  1854. scsi_dma_unmap(scmd_local);
  1855. scmd_local->scsi_done(scmd_local);
  1856. atomic_dec(&instance->fw_outstanding);
  1857. break;
  1858. case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
  1859. cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
  1860. /* Poll mode. Dummy free.
  1861. * In case of Interrupt mode, caller has reverse check.
  1862. */
  1863. if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
  1864. cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
  1865. megasas_return_cmd(instance, cmd_mfi);
  1866. } else
  1867. megasas_complete_cmd(instance, cmd_mfi, DID_OK);
  1868. break;
  1869. }
  1870. fusion->last_reply_idx[MSIxIndex]++;
  1871. if (fusion->last_reply_idx[MSIxIndex] >=
  1872. fusion->reply_q_depth)
  1873. fusion->last_reply_idx[MSIxIndex] = 0;
  1874. desc->Words = cpu_to_le64(ULLONG_MAX);
  1875. num_completed++;
  1876. threshold_reply_count++;
  1877. /* Get the next reply descriptor */
  1878. if (!fusion->last_reply_idx[MSIxIndex])
  1879. desc = fusion->reply_frames_desc +
  1880. ((MSIxIndex * fusion->reply_alloc_sz)/
  1881. sizeof(union MPI2_REPLY_DESCRIPTORS_UNION));
  1882. else
  1883. desc++;
  1884. reply_desc =
  1885. (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
  1886. d_val.word = desc->Words;
  1887. reply_descript_type = reply_desc->ReplyFlags &
  1888. MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  1889. if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  1890. break;
  1891. /*
  1892. * Write to reply post host index register after completing threshold
  1893. * number of reply counts and still there are more replies in reply queue
  1894. * pending to be completed
  1895. */
  1896. if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
  1897. if (fusion->adapter_type == INVADER_SERIES)
  1898. writel(((MSIxIndex & 0x7) << 24) |
  1899. fusion->last_reply_idx[MSIxIndex],
  1900. instance->reply_post_host_index_addr[MSIxIndex/8]);
  1901. else
  1902. writel((MSIxIndex << 24) |
  1903. fusion->last_reply_idx[MSIxIndex],
  1904. instance->reply_post_host_index_addr[0]);
  1905. threshold_reply_count = 0;
  1906. }
  1907. }
  1908. if (!num_completed)
  1909. return IRQ_NONE;
  1910. wmb();
  1911. if (fusion->adapter_type == INVADER_SERIES)
  1912. writel(((MSIxIndex & 0x7) << 24) |
  1913. fusion->last_reply_idx[MSIxIndex],
  1914. instance->reply_post_host_index_addr[MSIxIndex/8]);
  1915. else
  1916. writel((MSIxIndex << 24) |
  1917. fusion->last_reply_idx[MSIxIndex],
  1918. instance->reply_post_host_index_addr[0]);
  1919. megasas_check_and_restore_queue_depth(instance);
  1920. return IRQ_HANDLED;
  1921. }
  1922. /**
  1923. * megasas_complete_cmd_dpc_fusion - Completes command
  1924. * @instance: Adapter soft state
  1925. *
  1926. * Tasklet to complete cmds
  1927. */
  1928. void
  1929. megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
  1930. {
  1931. struct megasas_instance *instance =
  1932. (struct megasas_instance *)instance_addr;
  1933. unsigned long flags;
  1934. u32 count, MSIxIndex;
  1935. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  1936. /* If we have already declared adapter dead, donot complete cmds */
  1937. spin_lock_irqsave(&instance->hba_lock, flags);
  1938. if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
  1939. spin_unlock_irqrestore(&instance->hba_lock, flags);
  1940. return;
  1941. }
  1942. spin_unlock_irqrestore(&instance->hba_lock, flags);
  1943. for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++)
  1944. complete_cmd_fusion(instance, MSIxIndex);
  1945. }
  1946. /**
  1947. * megasas_isr_fusion - isr entry point
  1948. */
  1949. irqreturn_t megasas_isr_fusion(int irq, void *devp)
  1950. {
  1951. struct megasas_irq_context *irq_context = devp;
  1952. struct megasas_instance *instance = irq_context->instance;
  1953. u32 mfiStatus, fw_state, dma_state;
  1954. if (instance->mask_interrupts)
  1955. return IRQ_NONE;
  1956. if (!instance->msix_vectors) {
  1957. mfiStatus = instance->instancet->clear_intr(instance->reg_set);
  1958. if (!mfiStatus)
  1959. return IRQ_NONE;
  1960. }
  1961. /* If we are resetting, bail */
  1962. if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
  1963. instance->instancet->clear_intr(instance->reg_set);
  1964. return IRQ_HANDLED;
  1965. }
  1966. if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) {
  1967. instance->instancet->clear_intr(instance->reg_set);
  1968. /* If we didn't complete any commands, check for FW fault */
  1969. fw_state = instance->instancet->read_fw_status_reg(
  1970. instance->reg_set) & MFI_STATE_MASK;
  1971. dma_state = instance->instancet->read_fw_status_reg
  1972. (instance->reg_set) & MFI_STATE_DMADONE;
  1973. if (instance->crash_dump_drv_support &&
  1974. instance->crash_dump_app_support) {
  1975. /* Start collecting crash, if DMA bit is done */
  1976. if ((fw_state == MFI_STATE_FAULT) && dma_state)
  1977. schedule_work(&instance->crash_init);
  1978. else if (fw_state == MFI_STATE_FAULT)
  1979. schedule_work(&instance->work_init);
  1980. } else if (fw_state == MFI_STATE_FAULT) {
  1981. dev_warn(&instance->pdev->dev, "Iop2SysDoorbellInt"
  1982. "for scsi%d\n", instance->host->host_no);
  1983. schedule_work(&instance->work_init);
  1984. }
  1985. }
  1986. return IRQ_HANDLED;
  1987. }
  1988. /**
  1989. * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
  1990. * @instance: Adapter soft state
  1991. * mfi_cmd: megasas_cmd pointer
  1992. *
  1993. */
  1994. u8
  1995. build_mpt_mfi_pass_thru(struct megasas_instance *instance,
  1996. struct megasas_cmd *mfi_cmd)
  1997. {
  1998. struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
  1999. struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
  2000. struct megasas_cmd_fusion *cmd;
  2001. struct fusion_context *fusion;
  2002. struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
  2003. fusion = instance->ctrl_context;
  2004. cmd = megasas_get_cmd_fusion(instance,
  2005. instance->max_scsi_cmds + mfi_cmd->index);
  2006. /* Save the smid. To be used for returning the cmd */
  2007. mfi_cmd->context.smid = cmd->index;
  2008. /*
  2009. * For cmds where the flag is set, store the flag and check
  2010. * on completion. For cmds with this flag, don't call
  2011. * megasas_complete_cmd
  2012. */
  2013. if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
  2014. mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
  2015. io_req = cmd->io_request;
  2016. if (fusion->adapter_type == INVADER_SERIES) {
  2017. struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
  2018. (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
  2019. sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
  2020. sgl_ptr_end->Flags = 0;
  2021. }
  2022. mpi25_ieee_chain =
  2023. (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
  2024. io_req->Function = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
  2025. io_req->SGLOffset0 = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
  2026. SGL) / 4;
  2027. io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
  2028. mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
  2029. mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  2030. MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
  2031. mpi25_ieee_chain->Length = cpu_to_le32(instance->max_chain_frame_sz);
  2032. return 0;
  2033. }
  2034. /**
  2035. * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
  2036. * @instance: Adapter soft state
  2037. * @cmd: mfi cmd to build
  2038. *
  2039. */
  2040. union MEGASAS_REQUEST_DESCRIPTOR_UNION *
  2041. build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
  2042. {
  2043. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  2044. u16 index;
  2045. if (build_mpt_mfi_pass_thru(instance, cmd)) {
  2046. dev_err(&instance->pdev->dev, "Couldn't build MFI pass thru cmd\n");
  2047. return NULL;
  2048. }
  2049. index = cmd->context.smid;
  2050. req_desc = megasas_get_request_descriptor(instance, index - 1);
  2051. if (!req_desc)
  2052. return NULL;
  2053. req_desc->Words = 0;
  2054. req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
  2055. MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
  2056. req_desc->SCSIIO.SMID = cpu_to_le16(index);
  2057. return req_desc;
  2058. }
  2059. /**
  2060. * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
  2061. * @instance: Adapter soft state
  2062. * @cmd: mfi cmd pointer
  2063. *
  2064. */
  2065. void
  2066. megasas_issue_dcmd_fusion(struct megasas_instance *instance,
  2067. struct megasas_cmd *cmd)
  2068. {
  2069. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  2070. req_desc = build_mpt_cmd(instance, cmd);
  2071. if (!req_desc) {
  2072. dev_err(&instance->pdev->dev, "Couldn't issue MFI pass thru cmd\n");
  2073. return;
  2074. }
  2075. megasas_fire_cmd_fusion(instance, req_desc);
  2076. }
  2077. /**
  2078. * megasas_release_fusion - Reverses the FW initialization
  2079. * @instance: Adapter soft state
  2080. */
  2081. void
  2082. megasas_release_fusion(struct megasas_instance *instance)
  2083. {
  2084. megasas_free_cmds(instance);
  2085. megasas_free_cmds_fusion(instance);
  2086. iounmap(instance->reg_set);
  2087. pci_release_selected_regions(instance->pdev, 1<<instance->bar);
  2088. }
  2089. /**
  2090. * megasas_read_fw_status_reg_fusion - returns the current FW status value
  2091. * @regs: MFI register set
  2092. */
  2093. static u32
  2094. megasas_read_fw_status_reg_fusion(struct megasas_register_set __iomem *regs)
  2095. {
  2096. return readl(&(regs)->outbound_scratch_pad);
  2097. }
  2098. /**
  2099. * megasas_alloc_host_crash_buffer - Host buffers for Crash dump collection from Firmware
  2100. * @instance: Controller's soft instance
  2101. * return: Number of allocated host crash buffers
  2102. */
  2103. static void
  2104. megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
  2105. {
  2106. unsigned int i;
  2107. instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
  2108. for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
  2109. instance->crash_buf[i] = (void *)__get_free_pages(GFP_KERNEL,
  2110. instance->crash_buf_pages);
  2111. if (!instance->crash_buf[i]) {
  2112. dev_info(&instance->pdev->dev, "Firmware crash dump "
  2113. "memory allocation failed at index %d\n", i);
  2114. break;
  2115. }
  2116. memset(instance->crash_buf[i], 0,
  2117. ((1 << PAGE_SHIFT) << instance->crash_buf_pages));
  2118. }
  2119. instance->drv_buf_alloc = i;
  2120. }
  2121. /**
  2122. * megasas_free_host_crash_buffer - Host buffers for Crash dump collection from Firmware
  2123. * @instance: Controller's soft instance
  2124. */
  2125. void
  2126. megasas_free_host_crash_buffer(struct megasas_instance *instance)
  2127. {
  2128. unsigned int i
  2129. ;
  2130. for (i = 0; i < instance->drv_buf_alloc; i++) {
  2131. if (instance->crash_buf[i])
  2132. free_pages((ulong)instance->crash_buf[i],
  2133. instance->crash_buf_pages);
  2134. }
  2135. instance->drv_buf_index = 0;
  2136. instance->drv_buf_alloc = 0;
  2137. instance->fw_crash_state = UNAVAILABLE;
  2138. instance->fw_crash_buffer_size = 0;
  2139. }
  2140. /**
  2141. * megasas_adp_reset_fusion - For controller reset
  2142. * @regs: MFI register set
  2143. */
  2144. static int
  2145. megasas_adp_reset_fusion(struct megasas_instance *instance,
  2146. struct megasas_register_set __iomem *regs)
  2147. {
  2148. u32 host_diag, abs_state, retry;
  2149. /* Now try to reset the chip */
  2150. writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2151. writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2152. writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2153. writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2154. writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2155. writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2156. writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
  2157. /* Check that the diag write enable (DRWE) bit is on */
  2158. host_diag = readl(&instance->reg_set->fusion_host_diag);
  2159. retry = 0;
  2160. while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
  2161. msleep(100);
  2162. host_diag = readl(&instance->reg_set->fusion_host_diag);
  2163. if (retry++ == 100) {
  2164. dev_warn(&instance->pdev->dev,
  2165. "Host diag unlock failed from %s %d\n",
  2166. __func__, __LINE__);
  2167. break;
  2168. }
  2169. }
  2170. if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
  2171. return -1;
  2172. /* Send chip reset command */
  2173. writel(host_diag | HOST_DIAG_RESET_ADAPTER,
  2174. &instance->reg_set->fusion_host_diag);
  2175. msleep(3000);
  2176. /* Make sure reset adapter bit is cleared */
  2177. host_diag = readl(&instance->reg_set->fusion_host_diag);
  2178. retry = 0;
  2179. while (host_diag & HOST_DIAG_RESET_ADAPTER) {
  2180. msleep(100);
  2181. host_diag = readl(&instance->reg_set->fusion_host_diag);
  2182. if (retry++ == 1000) {
  2183. dev_warn(&instance->pdev->dev,
  2184. "Diag reset adapter never cleared %s %d\n",
  2185. __func__, __LINE__);
  2186. break;
  2187. }
  2188. }
  2189. if (host_diag & HOST_DIAG_RESET_ADAPTER)
  2190. return -1;
  2191. abs_state = instance->instancet->read_fw_status_reg(instance->reg_set)
  2192. & MFI_STATE_MASK;
  2193. retry = 0;
  2194. while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
  2195. msleep(100);
  2196. abs_state = instance->instancet->
  2197. read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
  2198. }
  2199. if (abs_state <= MFI_STATE_FW_INIT) {
  2200. dev_warn(&instance->pdev->dev,
  2201. "fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
  2202. abs_state, __func__, __LINE__);
  2203. return -1;
  2204. }
  2205. return 0;
  2206. }
  2207. /**
  2208. * megasas_check_reset_fusion - For controller reset check
  2209. * @regs: MFI register set
  2210. */
  2211. static int
  2212. megasas_check_reset_fusion(struct megasas_instance *instance,
  2213. struct megasas_register_set __iomem *regs)
  2214. {
  2215. return 0;
  2216. }
  2217. /* This function waits for outstanding commands on fusion to complete */
  2218. int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
  2219. int iotimeout, int *convert)
  2220. {
  2221. int i, outstanding, retval = 0, hb_seconds_missed = 0;
  2222. u32 fw_state;
  2223. for (i = 0; i < resetwaittime; i++) {
  2224. /* Check if firmware is in fault state */
  2225. fw_state = instance->instancet->read_fw_status_reg(
  2226. instance->reg_set) & MFI_STATE_MASK;
  2227. if (fw_state == MFI_STATE_FAULT) {
  2228. dev_warn(&instance->pdev->dev, "Found FW in FAULT state,"
  2229. " will reset adapter scsi%d.\n",
  2230. instance->host->host_no);
  2231. retval = 1;
  2232. goto out;
  2233. }
  2234. /* If SR-IOV VF mode & heartbeat timeout, don't wait */
  2235. if (instance->requestorId && !iotimeout) {
  2236. retval = 1;
  2237. goto out;
  2238. }
  2239. /* If SR-IOV VF mode & I/O timeout, check for HB timeout */
  2240. if (instance->requestorId && iotimeout) {
  2241. if (instance->hb_host_mem->HB.fwCounter !=
  2242. instance->hb_host_mem->HB.driverCounter) {
  2243. instance->hb_host_mem->HB.driverCounter =
  2244. instance->hb_host_mem->HB.fwCounter;
  2245. hb_seconds_missed = 0;
  2246. } else {
  2247. hb_seconds_missed++;
  2248. if (hb_seconds_missed ==
  2249. (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
  2250. dev_warn(&instance->pdev->dev, "SR-IOV:"
  2251. " Heartbeat never completed "
  2252. " while polling during I/O "
  2253. " timeout handling for "
  2254. "scsi%d.\n",
  2255. instance->host->host_no);
  2256. *convert = 1;
  2257. retval = 1;
  2258. goto out;
  2259. }
  2260. }
  2261. }
  2262. outstanding = atomic_read(&instance->fw_outstanding);
  2263. if (!outstanding)
  2264. goto out;
  2265. if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
  2266. dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
  2267. "commands to complete for scsi%d\n", i,
  2268. outstanding, instance->host->host_no);
  2269. megasas_complete_cmd_dpc_fusion(
  2270. (unsigned long)instance);
  2271. }
  2272. msleep(1000);
  2273. }
  2274. if (atomic_read(&instance->fw_outstanding)) {
  2275. dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
  2276. "will reset adapter scsi%d.\n",
  2277. instance->host->host_no);
  2278. *convert = 1;
  2279. retval = 1;
  2280. }
  2281. out:
  2282. return retval;
  2283. }
  2284. void megasas_reset_reply_desc(struct megasas_instance *instance)
  2285. {
  2286. int i, count;
  2287. struct fusion_context *fusion;
  2288. union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
  2289. fusion = instance->ctrl_context;
  2290. count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
  2291. for (i = 0 ; i < count ; i++)
  2292. fusion->last_reply_idx[i] = 0;
  2293. reply_desc = fusion->reply_frames_desc;
  2294. for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++)
  2295. reply_desc->Words = cpu_to_le64(ULLONG_MAX);
  2296. }
  2297. /*
  2298. * megasas_refire_mgmt_cmd : Re-fire management commands
  2299. * @instance: Controller's soft instance
  2300. */
  2301. void megasas_refire_mgmt_cmd(struct megasas_instance *instance)
  2302. {
  2303. int j;
  2304. struct megasas_cmd_fusion *cmd_fusion;
  2305. struct fusion_context *fusion;
  2306. struct megasas_cmd *cmd_mfi;
  2307. union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
  2308. u16 smid;
  2309. fusion = instance->ctrl_context;
  2310. /* Re-fire management commands.
  2311. * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
  2312. */
  2313. for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
  2314. cmd_fusion = fusion->cmd_list[j];
  2315. cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
  2316. smid = le16_to_cpu(cmd_mfi->context.smid);
  2317. if (!smid)
  2318. continue;
  2319. req_desc = megasas_get_request_descriptor
  2320. (instance, smid - 1);
  2321. if (req_desc && ((cmd_mfi->frame->dcmd.opcode !=
  2322. cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) &&
  2323. (cmd_mfi->frame->dcmd.opcode !=
  2324. cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO))))
  2325. megasas_fire_cmd_fusion(instance, req_desc);
  2326. else
  2327. megasas_return_cmd(instance, cmd_mfi);
  2328. }
  2329. }
  2330. /* Check for a second path that is currently UP */
  2331. int megasas_check_mpio_paths(struct megasas_instance *instance,
  2332. struct scsi_cmnd *scmd)
  2333. {
  2334. int i, j, retval = (DID_RESET << 16);
  2335. if (instance->mpio && instance->requestorId) {
  2336. for (i = 0 ; i < MAX_MGMT_ADAPTERS ; i++)
  2337. for (j = 0 ; j < MAX_LOGICAL_DRIVES; j++)
  2338. if (megasas_mgmt_info.instance[i] &&
  2339. (megasas_mgmt_info.instance[i] != instance) &&
  2340. megasas_mgmt_info.instance[i]->mpio &&
  2341. megasas_mgmt_info.instance[i]->requestorId
  2342. &&
  2343. (megasas_mgmt_info.instance[i]->ld_ids[j]
  2344. == scmd->device->id)) {
  2345. retval = (DID_NO_CONNECT << 16);
  2346. goto out;
  2347. }
  2348. }
  2349. out:
  2350. return retval;
  2351. }
  2352. /* Core fusion reset function */
  2353. int megasas_reset_fusion(struct Scsi_Host *shost, int iotimeout)
  2354. {
  2355. int retval = SUCCESS, i, convert = 0;
  2356. struct megasas_instance *instance;
  2357. struct megasas_cmd_fusion *cmd_fusion;
  2358. struct fusion_context *fusion;
  2359. u32 abs_state, status_reg, reset_adapter;
  2360. u32 io_timeout_in_crash_mode = 0;
  2361. struct scsi_cmnd *scmd_local = NULL;
  2362. instance = (struct megasas_instance *)shost->hostdata;
  2363. fusion = instance->ctrl_context;
  2364. mutex_lock(&instance->reset_mutex);
  2365. if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
  2366. dev_warn(&instance->pdev->dev, "Hardware critical error, "
  2367. "returning FAILED for scsi%d.\n",
  2368. instance->host->host_no);
  2369. mutex_unlock(&instance->reset_mutex);
  2370. return FAILED;
  2371. }
  2372. status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
  2373. abs_state = status_reg & MFI_STATE_MASK;
  2374. /* IO timeout detected, forcibly put FW in FAULT state */
  2375. if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
  2376. instance->crash_dump_app_support && iotimeout) {
  2377. dev_info(&instance->pdev->dev, "IO timeout is detected, "
  2378. "forcibly FAULT Firmware\n");
  2379. instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
  2380. status_reg = readl(&instance->reg_set->doorbell);
  2381. writel(status_reg | MFI_STATE_FORCE_OCR,
  2382. &instance->reg_set->doorbell);
  2383. readl(&instance->reg_set->doorbell);
  2384. mutex_unlock(&instance->reset_mutex);
  2385. do {
  2386. ssleep(3);
  2387. io_timeout_in_crash_mode++;
  2388. dev_dbg(&instance->pdev->dev, "waiting for [%d] "
  2389. "seconds for crash dump collection and OCR "
  2390. "to be done\n", (io_timeout_in_crash_mode * 3));
  2391. } while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
  2392. (io_timeout_in_crash_mode < 80));
  2393. if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
  2394. dev_info(&instance->pdev->dev, "OCR done for IO "
  2395. "timeout case\n");
  2396. retval = SUCCESS;
  2397. } else {
  2398. dev_info(&instance->pdev->dev, "Controller is not "
  2399. "operational after 240 seconds wait for IO "
  2400. "timeout case in FW crash dump mode\n do "
  2401. "OCR/kill adapter\n");
  2402. retval = megasas_reset_fusion(shost, 0);
  2403. }
  2404. return retval;
  2405. }
  2406. if (instance->requestorId && !instance->skip_heartbeat_timer_del)
  2407. del_timer_sync(&instance->sriov_heartbeat_timer);
  2408. set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
  2409. instance->adprecovery = MEGASAS_ADPRESET_SM_POLLING;
  2410. instance->instancet->disable_intr(instance);
  2411. msleep(1000);
  2412. /* First try waiting for commands to complete */
  2413. if (megasas_wait_for_outstanding_fusion(instance, iotimeout,
  2414. &convert)) {
  2415. instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
  2416. dev_warn(&instance->pdev->dev, "resetting fusion "
  2417. "adapter scsi%d.\n", instance->host->host_no);
  2418. if (convert)
  2419. iotimeout = 0;
  2420. /* Now return commands back to the OS */
  2421. for (i = 0 ; i < instance->max_scsi_cmds; i++) {
  2422. cmd_fusion = fusion->cmd_list[i];
  2423. scmd_local = cmd_fusion->scmd;
  2424. if (cmd_fusion->scmd) {
  2425. scmd_local->result =
  2426. megasas_check_mpio_paths(instance,
  2427. scmd_local);
  2428. megasas_return_cmd_fusion(instance, cmd_fusion);
  2429. scsi_dma_unmap(scmd_local);
  2430. scmd_local->scsi_done(scmd_local);
  2431. atomic_dec(&instance->fw_outstanding);
  2432. }
  2433. }
  2434. status_reg = instance->instancet->read_fw_status_reg(
  2435. instance->reg_set);
  2436. abs_state = status_reg & MFI_STATE_MASK;
  2437. reset_adapter = status_reg & MFI_RESET_ADAPTER;
  2438. if (instance->disableOnlineCtrlReset ||
  2439. (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
  2440. /* Reset not supported, kill adapter */
  2441. dev_warn(&instance->pdev->dev, "Reset not supported"
  2442. ", killing adapter scsi%d.\n",
  2443. instance->host->host_no);
  2444. megaraid_sas_kill_hba(instance);
  2445. instance->skip_heartbeat_timer_del = 1;
  2446. retval = FAILED;
  2447. goto out;
  2448. }
  2449. /* Let SR-IOV VF & PF sync up if there was a HB failure */
  2450. if (instance->requestorId && !iotimeout) {
  2451. msleep(MEGASAS_OCR_SETTLE_TIME_VF);
  2452. /* Look for a late HB update after VF settle time */
  2453. if (abs_state == MFI_STATE_OPERATIONAL &&
  2454. (instance->hb_host_mem->HB.fwCounter !=
  2455. instance->hb_host_mem->HB.driverCounter)) {
  2456. instance->hb_host_mem->HB.driverCounter =
  2457. instance->hb_host_mem->HB.fwCounter;
  2458. dev_warn(&instance->pdev->dev, "SR-IOV:"
  2459. "Late FW heartbeat update for "
  2460. "scsi%d.\n",
  2461. instance->host->host_no);
  2462. } else {
  2463. /* In VF mode, first poll for FW ready */
  2464. for (i = 0;
  2465. i < (MEGASAS_RESET_WAIT_TIME * 1000);
  2466. i += 20) {
  2467. status_reg =
  2468. instance->instancet->
  2469. read_fw_status_reg(
  2470. instance->reg_set);
  2471. abs_state = status_reg &
  2472. MFI_STATE_MASK;
  2473. if (abs_state == MFI_STATE_READY) {
  2474. dev_warn(&instance->pdev->dev,
  2475. "SR-IOV: FW was found"
  2476. "to be in ready state "
  2477. "for scsi%d.\n",
  2478. instance->host->host_no);
  2479. break;
  2480. }
  2481. msleep(20);
  2482. }
  2483. if (abs_state != MFI_STATE_READY) {
  2484. dev_warn(&instance->pdev->dev, "SR-IOV: "
  2485. "FW not in ready state after %d"
  2486. " seconds for scsi%d, status_reg = "
  2487. "0x%x.\n",
  2488. MEGASAS_RESET_WAIT_TIME,
  2489. instance->host->host_no,
  2490. status_reg);
  2491. megaraid_sas_kill_hba(instance);
  2492. instance->skip_heartbeat_timer_del = 1;
  2493. instance->adprecovery =
  2494. MEGASAS_HW_CRITICAL_ERROR;
  2495. retval = FAILED;
  2496. goto out;
  2497. }
  2498. }
  2499. }
  2500. /* Now try to reset the chip */
  2501. for (i = 0; i < MEGASAS_FUSION_MAX_RESET_TRIES; i++) {
  2502. if (instance->instancet->adp_reset
  2503. (instance, instance->reg_set))
  2504. continue;
  2505. /* Wait for FW to become ready */
  2506. if (megasas_transition_to_ready(instance, 1)) {
  2507. dev_warn(&instance->pdev->dev, "Failed to "
  2508. "transition controller to ready "
  2509. "for scsi%d.\n",
  2510. instance->host->host_no);
  2511. continue;
  2512. }
  2513. megasas_reset_reply_desc(instance);
  2514. if (megasas_ioc_init_fusion(instance)) {
  2515. dev_warn(&instance->pdev->dev,
  2516. "megasas_ioc_init_fusion() failed!"
  2517. " for scsi%d\n",
  2518. instance->host->host_no);
  2519. continue;
  2520. }
  2521. megasas_refire_mgmt_cmd(instance);
  2522. if (megasas_get_ctrl_info(instance)) {
  2523. dev_info(&instance->pdev->dev,
  2524. "Failed from %s %d\n",
  2525. __func__, __LINE__);
  2526. megaraid_sas_kill_hba(instance);
  2527. retval = FAILED;
  2528. }
  2529. /* Reset load balance info */
  2530. memset(fusion->load_balance_info, 0,
  2531. sizeof(struct LD_LOAD_BALANCE_INFO)
  2532. *MAX_LOGICAL_DRIVES_EXT);
  2533. if (!megasas_get_map_info(instance))
  2534. megasas_sync_map_info(instance);
  2535. megasas_setup_jbod_map(instance);
  2536. clear_bit(MEGASAS_FUSION_IN_RESET,
  2537. &instance->reset_flags);
  2538. instance->instancet->enable_intr(instance);
  2539. instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
  2540. /* Restart SR-IOV heartbeat */
  2541. if (instance->requestorId) {
  2542. if (!megasas_sriov_start_heartbeat(instance, 0))
  2543. megasas_start_timer(instance,
  2544. &instance->sriov_heartbeat_timer,
  2545. megasas_sriov_heartbeat_handler,
  2546. MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
  2547. else
  2548. instance->skip_heartbeat_timer_del = 1;
  2549. }
  2550. /* Adapter reset completed successfully */
  2551. dev_warn(&instance->pdev->dev, "Reset "
  2552. "successful for scsi%d.\n",
  2553. instance->host->host_no);
  2554. if (instance->crash_dump_drv_support &&
  2555. instance->crash_dump_app_support)
  2556. megasas_set_crash_dump_params(instance,
  2557. MR_CRASH_BUF_TURN_ON);
  2558. else
  2559. megasas_set_crash_dump_params(instance,
  2560. MR_CRASH_BUF_TURN_OFF);
  2561. retval = SUCCESS;
  2562. goto out;
  2563. }
  2564. /* Reset failed, kill the adapter */
  2565. dev_warn(&instance->pdev->dev, "Reset failed, killing "
  2566. "adapter scsi%d.\n", instance->host->host_no);
  2567. megaraid_sas_kill_hba(instance);
  2568. instance->skip_heartbeat_timer_del = 1;
  2569. retval = FAILED;
  2570. } else {
  2571. /* For VF: Restart HB timer if we didn't OCR */
  2572. if (instance->requestorId) {
  2573. megasas_start_timer(instance,
  2574. &instance->sriov_heartbeat_timer,
  2575. megasas_sriov_heartbeat_handler,
  2576. MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
  2577. }
  2578. clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
  2579. instance->instancet->enable_intr(instance);
  2580. instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
  2581. }
  2582. out:
  2583. clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
  2584. mutex_unlock(&instance->reset_mutex);
  2585. return retval;
  2586. }
  2587. /* Fusion Crash dump collection work queue */
  2588. void megasas_fusion_crash_dump_wq(struct work_struct *work)
  2589. {
  2590. struct megasas_instance *instance =
  2591. container_of(work, struct megasas_instance, crash_init);
  2592. u32 status_reg;
  2593. u8 partial_copy = 0;
  2594. status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
  2595. /*
  2596. * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
  2597. * to host crash buffers
  2598. */
  2599. if (instance->drv_buf_index == 0) {
  2600. /* Buffer is already allocated for old Crash dump.
  2601. * Do OCR and do not wait for crash dump collection
  2602. */
  2603. if (instance->drv_buf_alloc) {
  2604. dev_info(&instance->pdev->dev, "earlier crash dump is "
  2605. "not yet copied by application, ignoring this "
  2606. "crash dump and initiating OCR\n");
  2607. status_reg |= MFI_STATE_CRASH_DUMP_DONE;
  2608. writel(status_reg,
  2609. &instance->reg_set->outbound_scratch_pad);
  2610. readl(&instance->reg_set->outbound_scratch_pad);
  2611. return;
  2612. }
  2613. megasas_alloc_host_crash_buffer(instance);
  2614. dev_info(&instance->pdev->dev, "Number of host crash buffers "
  2615. "allocated: %d\n", instance->drv_buf_alloc);
  2616. }
  2617. /*
  2618. * Driver has allocated max buffers, which can be allocated
  2619. * and FW has more crash dump data, then driver will
  2620. * ignore the data.
  2621. */
  2622. if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
  2623. dev_info(&instance->pdev->dev, "Driver is done copying "
  2624. "the buffer: %d\n", instance->drv_buf_alloc);
  2625. status_reg |= MFI_STATE_CRASH_DUMP_DONE;
  2626. partial_copy = 1;
  2627. } else {
  2628. memcpy(instance->crash_buf[instance->drv_buf_index],
  2629. instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
  2630. instance->drv_buf_index++;
  2631. status_reg &= ~MFI_STATE_DMADONE;
  2632. }
  2633. if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
  2634. dev_info(&instance->pdev->dev, "Crash Dump is available,number "
  2635. "of copied buffers: %d\n", instance->drv_buf_index);
  2636. instance->fw_crash_buffer_size = instance->drv_buf_index;
  2637. instance->fw_crash_state = AVAILABLE;
  2638. instance->drv_buf_index = 0;
  2639. writel(status_reg, &instance->reg_set->outbound_scratch_pad);
  2640. readl(&instance->reg_set->outbound_scratch_pad);
  2641. if (!partial_copy)
  2642. megasas_reset_fusion(instance->host, 0);
  2643. } else {
  2644. writel(status_reg, &instance->reg_set->outbound_scratch_pad);
  2645. readl(&instance->reg_set->outbound_scratch_pad);
  2646. }
  2647. }
  2648. /* Fusion OCR work queue */
  2649. void megasas_fusion_ocr_wq(struct work_struct *work)
  2650. {
  2651. struct megasas_instance *instance =
  2652. container_of(work, struct megasas_instance, work_init);
  2653. megasas_reset_fusion(instance->host, 0);
  2654. }
  2655. struct megasas_instance_template megasas_instance_template_fusion = {
  2656. .enable_intr = megasas_enable_intr_fusion,
  2657. .disable_intr = megasas_disable_intr_fusion,
  2658. .clear_intr = megasas_clear_intr_fusion,
  2659. .read_fw_status_reg = megasas_read_fw_status_reg_fusion,
  2660. .adp_reset = megasas_adp_reset_fusion,
  2661. .check_reset = megasas_check_reset_fusion,
  2662. .service_isr = megasas_isr_fusion,
  2663. .tasklet = megasas_complete_cmd_dpc_fusion,
  2664. .init_adapter = megasas_init_adapter_fusion,
  2665. .build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
  2666. .issue_dcmd = megasas_issue_dcmd_fusion,
  2667. };