bnx2i_iscsi.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308
  1. /*
  2. * bnx2i_iscsi.c: QLogic NetXtreme II iSCSI driver.
  3. *
  4. * Copyright (c) 2006 - 2013 Broadcom Corporation
  5. * Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
  6. * Copyright (c) 2007, 2008 Mike Christie
  7. * Copyright (c) 2014, QLogic Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. *
  13. * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
  14. * Previously Maintained by: Eddie Wai (eddie.wai@broadcom.com)
  15. * Maintained by: QLogic-Storage-Upstream@qlogic.com
  16. */
  17. #include <linux/slab.h>
  18. #include <scsi/scsi_tcq.h>
  19. #include <scsi/libiscsi.h>
  20. #include "bnx2i.h"
  21. struct scsi_transport_template *bnx2i_scsi_xport_template;
  22. struct iscsi_transport bnx2i_iscsi_transport;
  23. static struct scsi_host_template bnx2i_host_template;
  24. /*
  25. * Global endpoint resource info
  26. */
  27. static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
  28. DECLARE_PER_CPU(struct bnx2i_percpu_s, bnx2i_percpu);
  29. static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
  30. {
  31. int retval = 0;
  32. if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
  33. test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
  34. test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
  35. retval = -EPERM;
  36. return retval;
  37. }
  38. /**
  39. * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
  40. * @cmd: iscsi cmd struct pointer
  41. * @buf_off: absolute buffer offset
  42. * @start_bd_off: u32 pointer to return the offset within the BD
  43. * indicated by 'start_bd_idx' on which 'buf_off' falls
  44. * @start_bd_idx: index of the BD on which 'buf_off' falls
  45. *
  46. * identifies & marks various bd info for scsi command's imm data,
  47. * unsolicited data and the first solicited data seq.
  48. */
  49. static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
  50. u32 *start_bd_off, u32 *start_bd_idx)
  51. {
  52. struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
  53. u32 cur_offset = 0;
  54. u32 cur_bd_idx = 0;
  55. if (buf_off) {
  56. while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
  57. cur_offset += bd_tbl->buffer_length;
  58. cur_bd_idx++;
  59. bd_tbl++;
  60. }
  61. }
  62. *start_bd_off = buf_off - cur_offset;
  63. *start_bd_idx = cur_bd_idx;
  64. }
  65. /**
  66. * bnx2i_setup_write_cmd_bd_info - sets up BD various information
  67. * @task: transport layer's cmd struct pointer
  68. *
  69. * identifies & marks various bd info for scsi command's immediate data,
  70. * unsolicited data and first solicited data seq which includes BD start
  71. * index & BD buf off. his function takes into account iscsi parameter such
  72. * as immediate data and unsolicited data is support on this connection.
  73. */
  74. static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
  75. {
  76. struct bnx2i_cmd *cmd = task->dd_data;
  77. u32 start_bd_offset;
  78. u32 start_bd_idx;
  79. u32 buffer_offset = 0;
  80. u32 cmd_len = cmd->req.total_data_transfer_length;
  81. /* if ImmediateData is turned off & IntialR2T is turned on,
  82. * there will be no immediate or unsolicited data, just return.
  83. */
  84. if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
  85. return;
  86. /* Immediate data */
  87. buffer_offset += task->imm_count;
  88. if (task->imm_count == cmd_len)
  89. return;
  90. if (iscsi_task_has_unsol_data(task)) {
  91. bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
  92. &start_bd_offset, &start_bd_idx);
  93. cmd->req.ud_buffer_offset = start_bd_offset;
  94. cmd->req.ud_start_bd_index = start_bd_idx;
  95. buffer_offset += task->unsol_r2t.data_length;
  96. }
  97. if (buffer_offset != cmd_len) {
  98. bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
  99. &start_bd_offset, &start_bd_idx);
  100. if ((start_bd_offset > task->conn->session->first_burst) ||
  101. (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
  102. int i = 0;
  103. iscsi_conn_printk(KERN_ALERT, task->conn,
  104. "bnx2i- error, buf offset 0x%x "
  105. "bd_valid %d use_sg %d\n",
  106. buffer_offset, cmd->io_tbl.bd_valid,
  107. scsi_sg_count(cmd->scsi_cmd));
  108. for (i = 0; i < cmd->io_tbl.bd_valid; i++)
  109. iscsi_conn_printk(KERN_ALERT, task->conn,
  110. "bnx2i err, bd[%d]: len %x\n",
  111. i, cmd->io_tbl.bd_tbl[i].\
  112. buffer_length);
  113. }
  114. cmd->req.sd_buffer_offset = start_bd_offset;
  115. cmd->req.sd_start_bd_index = start_bd_idx;
  116. }
  117. }
  118. /**
  119. * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
  120. * @hba: adapter instance
  121. * @cmd: iscsi cmd struct pointer
  122. *
  123. * map SG list
  124. */
  125. static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
  126. {
  127. struct scsi_cmnd *sc = cmd->scsi_cmd;
  128. struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
  129. struct scatterlist *sg;
  130. int byte_count = 0;
  131. int bd_count = 0;
  132. int sg_count;
  133. int sg_len;
  134. u64 addr;
  135. int i;
  136. BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
  137. sg_count = scsi_dma_map(sc);
  138. scsi_for_each_sg(sc, sg, sg_count, i) {
  139. sg_len = sg_dma_len(sg);
  140. addr = (u64) sg_dma_address(sg);
  141. bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
  142. bd[bd_count].buffer_addr_hi = addr >> 32;
  143. bd[bd_count].buffer_length = sg_len;
  144. bd[bd_count].flags = 0;
  145. if (bd_count == 0)
  146. bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
  147. byte_count += sg_len;
  148. bd_count++;
  149. }
  150. if (bd_count)
  151. bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
  152. BUG_ON(byte_count != scsi_bufflen(sc));
  153. return bd_count;
  154. }
  155. /**
  156. * bnx2i_iscsi_map_sg_list - maps SG list
  157. * @cmd: iscsi cmd struct pointer
  158. *
  159. * creates BD list table for the command
  160. */
  161. static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
  162. {
  163. int bd_count;
  164. bd_count = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
  165. if (!bd_count) {
  166. struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
  167. bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
  168. bd[0].buffer_length = bd[0].flags = 0;
  169. }
  170. cmd->io_tbl.bd_valid = bd_count;
  171. }
  172. /**
  173. * bnx2i_iscsi_unmap_sg_list - unmaps SG list
  174. * @cmd: iscsi cmd struct pointer
  175. *
  176. * unmap IO buffers and invalidate the BD table
  177. */
  178. void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
  179. {
  180. struct scsi_cmnd *sc = cmd->scsi_cmd;
  181. if (cmd->io_tbl.bd_valid && sc) {
  182. scsi_dma_unmap(sc);
  183. cmd->io_tbl.bd_valid = 0;
  184. }
  185. }
  186. static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
  187. {
  188. memset(&cmd->req, 0x00, sizeof(cmd->req));
  189. cmd->req.op_code = 0xFF;
  190. cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
  191. cmd->req.bd_list_addr_hi =
  192. (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
  193. }
  194. /**
  195. * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
  196. * @hba: pointer to adapter instance
  197. * @conn: pointer to iscsi connection
  198. * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
  199. *
  200. * update iscsi cid table entry with connection pointer. This enables
  201. * driver to quickly get hold of connection structure pointer in
  202. * completion/interrupt thread using iscsi context ID
  203. */
  204. static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
  205. struct bnx2i_conn *bnx2i_conn,
  206. u32 iscsi_cid)
  207. {
  208. if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
  209. iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
  210. "conn bind - entry #%d not free\n", iscsi_cid);
  211. return -EBUSY;
  212. }
  213. hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
  214. return 0;
  215. }
  216. /**
  217. * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
  218. * @hba: pointer to adapter instance
  219. * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
  220. */
  221. struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
  222. u16 iscsi_cid)
  223. {
  224. if (!hba->cid_que.conn_cid_tbl) {
  225. printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
  226. return NULL;
  227. } else if (iscsi_cid >= hba->max_active_conns) {
  228. printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
  229. return NULL;
  230. }
  231. return hba->cid_que.conn_cid_tbl[iscsi_cid];
  232. }
  233. /**
  234. * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
  235. * @hba: pointer to adapter instance
  236. */
  237. static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
  238. {
  239. int idx;
  240. if (!hba->cid_que.cid_free_cnt)
  241. return -1;
  242. idx = hba->cid_que.cid_q_cons_idx;
  243. hba->cid_que.cid_q_cons_idx++;
  244. if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
  245. hba->cid_que.cid_q_cons_idx = 0;
  246. hba->cid_que.cid_free_cnt--;
  247. return hba->cid_que.cid_que[idx];
  248. }
  249. /**
  250. * bnx2i_free_iscsi_cid - returns tcp port to free list
  251. * @hba: pointer to adapter instance
  252. * @iscsi_cid: iscsi context ID to free
  253. */
  254. static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
  255. {
  256. int idx;
  257. if (iscsi_cid == (u16) -1)
  258. return;
  259. hba->cid_que.cid_free_cnt++;
  260. idx = hba->cid_que.cid_q_prod_idx;
  261. hba->cid_que.cid_que[idx] = iscsi_cid;
  262. hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
  263. hba->cid_que.cid_q_prod_idx++;
  264. if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
  265. hba->cid_que.cid_q_prod_idx = 0;
  266. }
  267. /**
  268. * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
  269. * @hba: pointer to adapter instance
  270. *
  271. * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
  272. * and initialize table attributes
  273. */
  274. static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
  275. {
  276. int mem_size;
  277. int i;
  278. mem_size = hba->max_active_conns * sizeof(u32);
  279. mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
  280. hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
  281. if (!hba->cid_que.cid_que_base)
  282. return -ENOMEM;
  283. mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
  284. mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
  285. hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
  286. if (!hba->cid_que.conn_cid_tbl) {
  287. kfree(hba->cid_que.cid_que_base);
  288. hba->cid_que.cid_que_base = NULL;
  289. return -ENOMEM;
  290. }
  291. hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
  292. hba->cid_que.cid_q_prod_idx = 0;
  293. hba->cid_que.cid_q_cons_idx = 0;
  294. hba->cid_que.cid_q_max_idx = hba->max_active_conns;
  295. hba->cid_que.cid_free_cnt = hba->max_active_conns;
  296. for (i = 0; i < hba->max_active_conns; i++) {
  297. hba->cid_que.cid_que[i] = i;
  298. hba->cid_que.conn_cid_tbl[i] = NULL;
  299. }
  300. return 0;
  301. }
  302. /**
  303. * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
  304. * @hba: pointer to adapter instance
  305. */
  306. static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
  307. {
  308. kfree(hba->cid_que.cid_que_base);
  309. hba->cid_que.cid_que_base = NULL;
  310. kfree(hba->cid_que.conn_cid_tbl);
  311. hba->cid_que.conn_cid_tbl = NULL;
  312. }
  313. /**
  314. * bnx2i_alloc_ep - allocates ep structure from global pool
  315. * @hba: pointer to adapter instance
  316. *
  317. * routine allocates a free endpoint structure from global pool and
  318. * a tcp port to be used for this connection. Global resource lock,
  319. * 'bnx2i_resc_lock' is held while accessing shared global data structures
  320. */
  321. static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
  322. {
  323. struct iscsi_endpoint *ep;
  324. struct bnx2i_endpoint *bnx2i_ep;
  325. u32 ec_div;
  326. ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
  327. if (!ep) {
  328. printk(KERN_ERR "bnx2i: Could not allocate ep\n");
  329. return NULL;
  330. }
  331. bnx2i_ep = ep->dd_data;
  332. bnx2i_ep->cls_ep = ep;
  333. INIT_LIST_HEAD(&bnx2i_ep->link);
  334. bnx2i_ep->state = EP_STATE_IDLE;
  335. bnx2i_ep->ep_iscsi_cid = (u16) -1;
  336. bnx2i_ep->hba = hba;
  337. bnx2i_ep->hba_age = hba->age;
  338. ec_div = event_coal_div;
  339. while (ec_div >>= 1)
  340. bnx2i_ep->ec_shift += 1;
  341. hba->ofld_conns_active++;
  342. init_waitqueue_head(&bnx2i_ep->ofld_wait);
  343. return ep;
  344. }
  345. /**
  346. * bnx2i_free_ep - free endpoint
  347. * @ep: pointer to iscsi endpoint structure
  348. */
  349. static void bnx2i_free_ep(struct iscsi_endpoint *ep)
  350. {
  351. struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
  352. unsigned long flags;
  353. spin_lock_irqsave(&bnx2i_resc_lock, flags);
  354. bnx2i_ep->state = EP_STATE_IDLE;
  355. bnx2i_ep->hba->ofld_conns_active--;
  356. if (bnx2i_ep->ep_iscsi_cid != (u16) -1)
  357. bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
  358. if (bnx2i_ep->conn) {
  359. bnx2i_ep->conn->ep = NULL;
  360. bnx2i_ep->conn = NULL;
  361. }
  362. bnx2i_ep->hba = NULL;
  363. spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
  364. iscsi_destroy_endpoint(ep);
  365. }
  366. /**
  367. * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
  368. * @hba: adapter instance pointer
  369. * @session: iscsi session pointer
  370. * @cmd: iscsi command structure
  371. */
  372. static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
  373. struct bnx2i_cmd *cmd)
  374. {
  375. struct io_bdt *io = &cmd->io_tbl;
  376. struct iscsi_bd *bd;
  377. io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
  378. ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
  379. &io->bd_tbl_dma, GFP_KERNEL);
  380. if (!io->bd_tbl) {
  381. iscsi_session_printk(KERN_ERR, session, "Could not "
  382. "allocate bdt.\n");
  383. return -ENOMEM;
  384. }
  385. io->bd_valid = 0;
  386. return 0;
  387. }
  388. /**
  389. * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
  390. * @hba: adapter instance pointer
  391. * @session: iscsi session pointer
  392. * @cmd: iscsi command structure
  393. */
  394. static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
  395. struct iscsi_session *session)
  396. {
  397. int i;
  398. for (i = 0; i < session->cmds_max; i++) {
  399. struct iscsi_task *task = session->cmds[i];
  400. struct bnx2i_cmd *cmd = task->dd_data;
  401. if (cmd->io_tbl.bd_tbl)
  402. dma_free_coherent(&hba->pcidev->dev,
  403. ISCSI_MAX_BDS_PER_CMD *
  404. sizeof(struct iscsi_bd),
  405. cmd->io_tbl.bd_tbl,
  406. cmd->io_tbl.bd_tbl_dma);
  407. }
  408. }
  409. /**
  410. * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
  411. * @hba: adapter instance pointer
  412. * @session: iscsi session pointer
  413. */
  414. static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
  415. struct iscsi_session *session)
  416. {
  417. int i;
  418. for (i = 0; i < session->cmds_max; i++) {
  419. struct iscsi_task *task = session->cmds[i];
  420. struct bnx2i_cmd *cmd = task->dd_data;
  421. task->hdr = &cmd->hdr;
  422. task->hdr_max = sizeof(struct iscsi_hdr);
  423. if (bnx2i_alloc_bdt(hba, session, cmd))
  424. goto free_bdts;
  425. }
  426. return 0;
  427. free_bdts:
  428. bnx2i_destroy_cmd_pool(hba, session);
  429. return -ENOMEM;
  430. }
  431. /**
  432. * bnx2i_setup_mp_bdt - allocate BD table resources
  433. * @hba: pointer to adapter structure
  434. *
  435. * Allocate memory for dummy buffer and associated BD
  436. * table to be used by middle path (MP) requests
  437. */
  438. static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
  439. {
  440. int rc = 0;
  441. struct iscsi_bd *mp_bdt;
  442. u64 addr;
  443. hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  444. &hba->mp_bd_dma, GFP_KERNEL);
  445. if (!hba->mp_bd_tbl) {
  446. printk(KERN_ERR "unable to allocate Middle Path BDT\n");
  447. rc = -1;
  448. goto out;
  449. }
  450. hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
  451. CNIC_PAGE_SIZE,
  452. &hba->dummy_buf_dma, GFP_KERNEL);
  453. if (!hba->dummy_buffer) {
  454. printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
  455. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  456. hba->mp_bd_tbl, hba->mp_bd_dma);
  457. hba->mp_bd_tbl = NULL;
  458. rc = -1;
  459. goto out;
  460. }
  461. mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
  462. addr = (unsigned long) hba->dummy_buf_dma;
  463. mp_bdt->buffer_addr_lo = addr & 0xffffffff;
  464. mp_bdt->buffer_addr_hi = addr >> 32;
  465. mp_bdt->buffer_length = CNIC_PAGE_SIZE;
  466. mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
  467. ISCSI_BD_FIRST_IN_BD_CHAIN;
  468. out:
  469. return rc;
  470. }
  471. /**
  472. * bnx2i_free_mp_bdt - releases ITT back to free pool
  473. * @hba: pointer to adapter instance
  474. *
  475. * free MP dummy buffer and associated BD table
  476. */
  477. static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
  478. {
  479. if (hba->mp_bd_tbl) {
  480. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  481. hba->mp_bd_tbl, hba->mp_bd_dma);
  482. hba->mp_bd_tbl = NULL;
  483. }
  484. if (hba->dummy_buffer) {
  485. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  486. hba->dummy_buffer, hba->dummy_buf_dma);
  487. hba->dummy_buffer = NULL;
  488. }
  489. return;
  490. }
  491. /**
  492. * bnx2i_drop_session - notifies iscsid of connection error.
  493. * @hba: adapter instance pointer
  494. * @session: iscsi session pointer
  495. *
  496. * This notifies iscsid that there is a error, so it can initiate
  497. * recovery.
  498. *
  499. * This relies on caller using the iscsi class iterator so the object
  500. * is refcounted and does not disapper from under us.
  501. */
  502. void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
  503. {
  504. iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
  505. }
  506. /**
  507. * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
  508. * @hba: pointer to adapter instance
  509. * @ep: pointer to endpoint (transport identifier) structure
  510. *
  511. * EP destroy queue manager
  512. */
  513. static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
  514. struct bnx2i_endpoint *ep)
  515. {
  516. write_lock_bh(&hba->ep_rdwr_lock);
  517. list_add_tail(&ep->link, &hba->ep_destroy_list);
  518. write_unlock_bh(&hba->ep_rdwr_lock);
  519. return 0;
  520. }
  521. /**
  522. * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
  523. *
  524. * @hba: pointer to adapter instance
  525. * @ep: pointer to endpoint (transport identifier) structure
  526. *
  527. * EP destroy queue manager
  528. */
  529. static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
  530. struct bnx2i_endpoint *ep)
  531. {
  532. write_lock_bh(&hba->ep_rdwr_lock);
  533. list_del_init(&ep->link);
  534. write_unlock_bh(&hba->ep_rdwr_lock);
  535. return 0;
  536. }
  537. /**
  538. * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
  539. * @hba: pointer to adapter instance
  540. * @ep: pointer to endpoint (transport identifier) structure
  541. *
  542. * pending conn offload completion queue manager
  543. */
  544. static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
  545. struct bnx2i_endpoint *ep)
  546. {
  547. write_lock_bh(&hba->ep_rdwr_lock);
  548. list_add_tail(&ep->link, &hba->ep_ofld_list);
  549. write_unlock_bh(&hba->ep_rdwr_lock);
  550. return 0;
  551. }
  552. /**
  553. * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
  554. * @hba: pointer to adapter instance
  555. * @ep: pointer to endpoint (transport identifier) structure
  556. *
  557. * pending conn offload completion queue manager
  558. */
  559. static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
  560. struct bnx2i_endpoint *ep)
  561. {
  562. write_lock_bh(&hba->ep_rdwr_lock);
  563. list_del_init(&ep->link);
  564. write_unlock_bh(&hba->ep_rdwr_lock);
  565. return 0;
  566. }
  567. /**
  568. * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
  569. *
  570. * @hba: pointer to adapter instance
  571. * @iscsi_cid: iscsi context ID to find
  572. *
  573. */
  574. struct bnx2i_endpoint *
  575. bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
  576. {
  577. struct list_head *list;
  578. struct list_head *tmp;
  579. struct bnx2i_endpoint *ep;
  580. read_lock_bh(&hba->ep_rdwr_lock);
  581. list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
  582. ep = (struct bnx2i_endpoint *)list;
  583. if (ep->ep_iscsi_cid == iscsi_cid)
  584. break;
  585. ep = NULL;
  586. }
  587. read_unlock_bh(&hba->ep_rdwr_lock);
  588. if (!ep)
  589. printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
  590. return ep;
  591. }
  592. /**
  593. * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
  594. * @hba: pointer to adapter instance
  595. * @iscsi_cid: iscsi context ID to find
  596. *
  597. */
  598. struct bnx2i_endpoint *
  599. bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
  600. {
  601. struct list_head *list;
  602. struct list_head *tmp;
  603. struct bnx2i_endpoint *ep;
  604. read_lock_bh(&hba->ep_rdwr_lock);
  605. list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
  606. ep = (struct bnx2i_endpoint *)list;
  607. if (ep->ep_iscsi_cid == iscsi_cid)
  608. break;
  609. ep = NULL;
  610. }
  611. read_unlock_bh(&hba->ep_rdwr_lock);
  612. if (!ep)
  613. printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
  614. return ep;
  615. }
  616. /**
  617. * bnx2i_ep_active_list_add - add an entry to ep active list
  618. * @hba: pointer to adapter instance
  619. * @ep: pointer to endpoint (transport identifier) structure
  620. *
  621. * current active conn queue manager
  622. */
  623. static void bnx2i_ep_active_list_add(struct bnx2i_hba *hba,
  624. struct bnx2i_endpoint *ep)
  625. {
  626. write_lock_bh(&hba->ep_rdwr_lock);
  627. list_add_tail(&ep->link, &hba->ep_active_list);
  628. write_unlock_bh(&hba->ep_rdwr_lock);
  629. }
  630. /**
  631. * bnx2i_ep_active_list_del - deletes an entry to ep active list
  632. * @hba: pointer to adapter instance
  633. * @ep: pointer to endpoint (transport identifier) structure
  634. *
  635. * current active conn queue manager
  636. */
  637. static void bnx2i_ep_active_list_del(struct bnx2i_hba *hba,
  638. struct bnx2i_endpoint *ep)
  639. {
  640. write_lock_bh(&hba->ep_rdwr_lock);
  641. list_del_init(&ep->link);
  642. write_unlock_bh(&hba->ep_rdwr_lock);
  643. }
  644. /**
  645. * bnx2i_setup_host_queue_size - assigns shost->can_queue param
  646. * @hba: pointer to adapter instance
  647. * @shost: scsi host pointer
  648. *
  649. * Initializes 'can_queue' parameter based on how many outstanding commands
  650. * the device can handle. Each device 5708/5709/57710 has different
  651. * capabilities
  652. */
  653. static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
  654. struct Scsi_Host *shost)
  655. {
  656. if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
  657. shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
  658. else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
  659. shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
  660. else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
  661. shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
  662. else
  663. shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
  664. }
  665. /**
  666. * bnx2i_alloc_hba - allocate and init adapter instance
  667. * @cnic: cnic device pointer
  668. *
  669. * allocate & initialize adapter structure and call other
  670. * support routines to do per adapter initialization
  671. */
  672. struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
  673. {
  674. struct Scsi_Host *shost;
  675. struct bnx2i_hba *hba;
  676. shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
  677. if (!shost)
  678. return NULL;
  679. shost->dma_boundary = cnic->pcidev->dma_mask;
  680. shost->transportt = bnx2i_scsi_xport_template;
  681. shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
  682. shost->max_channel = 0;
  683. shost->max_lun = 512;
  684. shost->max_cmd_len = 16;
  685. hba = iscsi_host_priv(shost);
  686. hba->shost = shost;
  687. hba->netdev = cnic->netdev;
  688. /* Get PCI related information and update hba struct members */
  689. hba->pcidev = cnic->pcidev;
  690. pci_dev_get(hba->pcidev);
  691. hba->pci_did = hba->pcidev->device;
  692. hba->pci_vid = hba->pcidev->vendor;
  693. hba->pci_sdid = hba->pcidev->subsystem_device;
  694. hba->pci_svid = hba->pcidev->subsystem_vendor;
  695. hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
  696. hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
  697. bnx2i_identify_device(hba, cnic);
  698. bnx2i_setup_host_queue_size(hba, shost);
  699. hba->reg_base = pci_resource_start(hba->pcidev, 0);
  700. if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
  701. hba->regview = pci_iomap(hba->pcidev, 0, BNX2_MQ_CONFIG2);
  702. if (!hba->regview)
  703. goto ioreg_map_err;
  704. } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
  705. hba->regview = pci_iomap(hba->pcidev, 0, 4096);
  706. if (!hba->regview)
  707. goto ioreg_map_err;
  708. }
  709. if (bnx2i_setup_mp_bdt(hba))
  710. goto mp_bdt_mem_err;
  711. INIT_LIST_HEAD(&hba->ep_ofld_list);
  712. INIT_LIST_HEAD(&hba->ep_active_list);
  713. INIT_LIST_HEAD(&hba->ep_destroy_list);
  714. rwlock_init(&hba->ep_rdwr_lock);
  715. hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
  716. /* different values for 5708/5709/57710 */
  717. hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
  718. if (bnx2i_setup_free_cid_que(hba))
  719. goto cid_que_err;
  720. /* SQ/RQ/CQ size can be changed via sysfx interface */
  721. if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
  722. if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
  723. hba->max_sqes = sq_size;
  724. else
  725. hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
  726. } else { /* 5706/5708/5709 */
  727. if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
  728. hba->max_sqes = sq_size;
  729. else
  730. hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
  731. }
  732. hba->max_rqes = rq_size;
  733. hba->max_cqes = hba->max_sqes + rq_size;
  734. if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
  735. if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
  736. hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
  737. } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
  738. hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
  739. hba->num_ccell = hba->max_sqes / 2;
  740. spin_lock_init(&hba->lock);
  741. mutex_init(&hba->net_dev_lock);
  742. init_waitqueue_head(&hba->eh_wait);
  743. if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
  744. hba->hba_shutdown_tmo = 30 * HZ;
  745. hba->conn_teardown_tmo = 20 * HZ;
  746. hba->conn_ctx_destroy_tmo = 6 * HZ;
  747. } else { /* 5706/5708/5709 */
  748. hba->hba_shutdown_tmo = 20 * HZ;
  749. hba->conn_teardown_tmo = 10 * HZ;
  750. hba->conn_ctx_destroy_tmo = 2 * HZ;
  751. }
  752. #ifdef CONFIG_32BIT
  753. spin_lock_init(&hba->stat_lock);
  754. #endif
  755. memset(&hba->stats, 0, sizeof(struct iscsi_stats_info));
  756. if (iscsi_host_add(shost, &hba->pcidev->dev))
  757. goto free_dump_mem;
  758. return hba;
  759. free_dump_mem:
  760. bnx2i_release_free_cid_que(hba);
  761. cid_que_err:
  762. bnx2i_free_mp_bdt(hba);
  763. mp_bdt_mem_err:
  764. if (hba->regview) {
  765. pci_iounmap(hba->pcidev, hba->regview);
  766. hba->regview = NULL;
  767. }
  768. ioreg_map_err:
  769. pci_dev_put(hba->pcidev);
  770. scsi_host_put(shost);
  771. return NULL;
  772. }
  773. /**
  774. * bnx2i_free_hba- releases hba structure and resources held by the adapter
  775. * @hba: pointer to adapter instance
  776. *
  777. * free adapter structure and call various cleanup routines.
  778. */
  779. void bnx2i_free_hba(struct bnx2i_hba *hba)
  780. {
  781. struct Scsi_Host *shost = hba->shost;
  782. iscsi_host_remove(shost);
  783. INIT_LIST_HEAD(&hba->ep_ofld_list);
  784. INIT_LIST_HEAD(&hba->ep_active_list);
  785. INIT_LIST_HEAD(&hba->ep_destroy_list);
  786. pci_dev_put(hba->pcidev);
  787. if (hba->regview) {
  788. pci_iounmap(hba->pcidev, hba->regview);
  789. hba->regview = NULL;
  790. }
  791. bnx2i_free_mp_bdt(hba);
  792. bnx2i_release_free_cid_que(hba);
  793. iscsi_host_free(shost);
  794. }
  795. /**
  796. * bnx2i_conn_free_login_resources - free DMA resources used for login process
  797. * @hba: pointer to adapter instance
  798. * @bnx2i_conn: iscsi connection pointer
  799. *
  800. * Login related resources, mostly BDT & payload DMA memory is freed
  801. */
  802. static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
  803. struct bnx2i_conn *bnx2i_conn)
  804. {
  805. if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
  806. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  807. bnx2i_conn->gen_pdu.resp_bd_tbl,
  808. bnx2i_conn->gen_pdu.resp_bd_dma);
  809. bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
  810. }
  811. if (bnx2i_conn->gen_pdu.req_bd_tbl) {
  812. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  813. bnx2i_conn->gen_pdu.req_bd_tbl,
  814. bnx2i_conn->gen_pdu.req_bd_dma);
  815. bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
  816. }
  817. if (bnx2i_conn->gen_pdu.resp_buf) {
  818. dma_free_coherent(&hba->pcidev->dev,
  819. ISCSI_DEF_MAX_RECV_SEG_LEN,
  820. bnx2i_conn->gen_pdu.resp_buf,
  821. bnx2i_conn->gen_pdu.resp_dma_addr);
  822. bnx2i_conn->gen_pdu.resp_buf = NULL;
  823. }
  824. if (bnx2i_conn->gen_pdu.req_buf) {
  825. dma_free_coherent(&hba->pcidev->dev,
  826. ISCSI_DEF_MAX_RECV_SEG_LEN,
  827. bnx2i_conn->gen_pdu.req_buf,
  828. bnx2i_conn->gen_pdu.req_dma_addr);
  829. bnx2i_conn->gen_pdu.req_buf = NULL;
  830. }
  831. }
  832. /**
  833. * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
  834. * @hba: pointer to adapter instance
  835. * @bnx2i_conn: iscsi connection pointer
  836. *
  837. * Mgmt task DNA resources are allocated in this routine.
  838. */
  839. static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
  840. struct bnx2i_conn *bnx2i_conn)
  841. {
  842. /* Allocate memory for login request/response buffers */
  843. bnx2i_conn->gen_pdu.req_buf =
  844. dma_alloc_coherent(&hba->pcidev->dev,
  845. ISCSI_DEF_MAX_RECV_SEG_LEN,
  846. &bnx2i_conn->gen_pdu.req_dma_addr,
  847. GFP_KERNEL);
  848. if (bnx2i_conn->gen_pdu.req_buf == NULL)
  849. goto login_req_buf_failure;
  850. bnx2i_conn->gen_pdu.req_buf_size = 0;
  851. bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
  852. bnx2i_conn->gen_pdu.resp_buf =
  853. dma_alloc_coherent(&hba->pcidev->dev,
  854. ISCSI_DEF_MAX_RECV_SEG_LEN,
  855. &bnx2i_conn->gen_pdu.resp_dma_addr,
  856. GFP_KERNEL);
  857. if (bnx2i_conn->gen_pdu.resp_buf == NULL)
  858. goto login_resp_buf_failure;
  859. bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
  860. bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
  861. bnx2i_conn->gen_pdu.req_bd_tbl =
  862. dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  863. &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
  864. if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
  865. goto login_req_bd_tbl_failure;
  866. bnx2i_conn->gen_pdu.resp_bd_tbl =
  867. dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  868. &bnx2i_conn->gen_pdu.resp_bd_dma,
  869. GFP_KERNEL);
  870. if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
  871. goto login_resp_bd_tbl_failure;
  872. return 0;
  873. login_resp_bd_tbl_failure:
  874. dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
  875. bnx2i_conn->gen_pdu.req_bd_tbl,
  876. bnx2i_conn->gen_pdu.req_bd_dma);
  877. bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
  878. login_req_bd_tbl_failure:
  879. dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
  880. bnx2i_conn->gen_pdu.resp_buf,
  881. bnx2i_conn->gen_pdu.resp_dma_addr);
  882. bnx2i_conn->gen_pdu.resp_buf = NULL;
  883. login_resp_buf_failure:
  884. dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
  885. bnx2i_conn->gen_pdu.req_buf,
  886. bnx2i_conn->gen_pdu.req_dma_addr);
  887. bnx2i_conn->gen_pdu.req_buf = NULL;
  888. login_req_buf_failure:
  889. iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
  890. "login resource alloc failed!!\n");
  891. return -ENOMEM;
  892. }
  893. /**
  894. * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
  895. * @bnx2i_conn: iscsi connection pointer
  896. *
  897. * Allocates buffers and BD tables before shipping requests to cnic
  898. * for PDUs prepared by 'iscsid' daemon
  899. */
  900. static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
  901. {
  902. struct iscsi_bd *bd_tbl;
  903. bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
  904. bd_tbl->buffer_addr_hi =
  905. (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
  906. bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
  907. bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
  908. bnx2i_conn->gen_pdu.req_buf;
  909. bd_tbl->reserved0 = 0;
  910. bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
  911. ISCSI_BD_FIRST_IN_BD_CHAIN;
  912. bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.resp_bd_tbl;
  913. bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
  914. bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
  915. bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
  916. bd_tbl->reserved0 = 0;
  917. bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
  918. ISCSI_BD_FIRST_IN_BD_CHAIN;
  919. }
  920. /**
  921. * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
  922. * @task: transport layer task pointer
  923. *
  924. * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
  925. * Nop-out and Logout requests flow through this path.
  926. */
  927. static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
  928. {
  929. struct bnx2i_cmd *cmd = task->dd_data;
  930. struct bnx2i_conn *bnx2i_conn = cmd->conn;
  931. int rc = 0;
  932. char *buf;
  933. int data_len;
  934. bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
  935. switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
  936. case ISCSI_OP_LOGIN:
  937. bnx2i_send_iscsi_login(bnx2i_conn, task);
  938. break;
  939. case ISCSI_OP_NOOP_OUT:
  940. data_len = bnx2i_conn->gen_pdu.req_buf_size;
  941. buf = bnx2i_conn->gen_pdu.req_buf;
  942. if (data_len)
  943. rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
  944. buf, data_len, 1);
  945. else
  946. rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
  947. NULL, 0, 1);
  948. break;
  949. case ISCSI_OP_LOGOUT:
  950. rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
  951. break;
  952. case ISCSI_OP_SCSI_TMFUNC:
  953. rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
  954. break;
  955. case ISCSI_OP_TEXT:
  956. rc = bnx2i_send_iscsi_text(bnx2i_conn, task);
  957. break;
  958. default:
  959. iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
  960. "send_gen: unsupported op 0x%x\n",
  961. task->hdr->opcode);
  962. }
  963. return rc;
  964. }
  965. /**********************************************************************
  966. * SCSI-ML Interface
  967. **********************************************************************/
  968. /**
  969. * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
  970. * @sc: SCSI-ML command pointer
  971. * @cmd: iscsi cmd pointer
  972. */
  973. static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
  974. {
  975. u32 dword;
  976. int lpcnt;
  977. u8 *srcp;
  978. u32 *dstp;
  979. u32 scsi_lun[2];
  980. int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
  981. cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
  982. cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
  983. lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
  984. srcp = (u8 *) sc->cmnd;
  985. dstp = (u32 *) cmd->req.cdb;
  986. while (lpcnt--) {
  987. memcpy(&dword, (const void *) srcp, 4);
  988. *dstp = cpu_to_be32(dword);
  989. srcp += 4;
  990. dstp++;
  991. }
  992. if (sc->cmd_len & 0x3) {
  993. dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
  994. *dstp = cpu_to_be32(dword);
  995. }
  996. }
  997. static void bnx2i_cleanup_task(struct iscsi_task *task)
  998. {
  999. struct iscsi_conn *conn = task->conn;
  1000. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1001. struct bnx2i_hba *hba = bnx2i_conn->hba;
  1002. /*
  1003. * mgmt task or cmd was never sent to us to transmit.
  1004. */
  1005. if (!task->sc || task->state == ISCSI_TASK_PENDING)
  1006. return;
  1007. /*
  1008. * need to clean-up task context to claim dma buffers
  1009. */
  1010. if (task->state == ISCSI_TASK_ABRT_TMF) {
  1011. bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
  1012. spin_unlock_bh(&conn->session->back_lock);
  1013. spin_unlock_bh(&conn->session->frwd_lock);
  1014. wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
  1015. msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
  1016. spin_lock_bh(&conn->session->frwd_lock);
  1017. spin_lock_bh(&conn->session->back_lock);
  1018. }
  1019. bnx2i_iscsi_unmap_sg_list(task->dd_data);
  1020. }
  1021. /**
  1022. * bnx2i_mtask_xmit - transmit mtask to chip for further processing
  1023. * @conn: transport layer conn structure pointer
  1024. * @task: transport layer command structure pointer
  1025. */
  1026. static int
  1027. bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
  1028. {
  1029. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1030. struct bnx2i_hba *hba = bnx2i_conn->hba;
  1031. struct bnx2i_cmd *cmd = task->dd_data;
  1032. memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
  1033. bnx2i_setup_cmd_wqe_template(cmd);
  1034. bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
  1035. /* Tx PDU/data length count */
  1036. ADD_STATS_64(hba, tx_pdus, 1);
  1037. ADD_STATS_64(hba, tx_bytes, task->data_count);
  1038. if (task->data_count) {
  1039. memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
  1040. task->data_count);
  1041. bnx2i_conn->gen_pdu.req_wr_ptr =
  1042. bnx2i_conn->gen_pdu.req_buf + task->data_count;
  1043. }
  1044. cmd->conn = conn->dd_data;
  1045. cmd->scsi_cmd = NULL;
  1046. return bnx2i_iscsi_send_generic_request(task);
  1047. }
  1048. /**
  1049. * bnx2i_task_xmit - transmit iscsi command to chip for further processing
  1050. * @task: transport layer command structure pointer
  1051. *
  1052. * maps SG buffers and send request to chip/firmware in the form of SQ WQE
  1053. */
  1054. static int bnx2i_task_xmit(struct iscsi_task *task)
  1055. {
  1056. struct iscsi_conn *conn = task->conn;
  1057. struct iscsi_session *session = conn->session;
  1058. struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
  1059. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1060. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1061. struct scsi_cmnd *sc = task->sc;
  1062. struct bnx2i_cmd *cmd = task->dd_data;
  1063. struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
  1064. if (atomic_read(&bnx2i_conn->ep->num_active_cmds) + 1 >
  1065. hba->max_sqes)
  1066. return -ENOMEM;
  1067. /*
  1068. * If there is no scsi_cmnd this must be a mgmt task
  1069. */
  1070. if (!sc)
  1071. return bnx2i_mtask_xmit(conn, task);
  1072. bnx2i_setup_cmd_wqe_template(cmd);
  1073. cmd->req.op_code = ISCSI_OP_SCSI_CMD;
  1074. cmd->conn = bnx2i_conn;
  1075. cmd->scsi_cmd = sc;
  1076. cmd->req.total_data_transfer_length = scsi_bufflen(sc);
  1077. cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
  1078. bnx2i_iscsi_map_sg_list(cmd);
  1079. bnx2i_cpy_scsi_cdb(sc, cmd);
  1080. cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
  1081. if (sc->sc_data_direction == DMA_TO_DEVICE) {
  1082. cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
  1083. cmd->req.itt = task->itt |
  1084. (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
  1085. bnx2i_setup_write_cmd_bd_info(task);
  1086. } else {
  1087. if (scsi_bufflen(sc))
  1088. cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
  1089. cmd->req.itt = task->itt |
  1090. (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
  1091. }
  1092. cmd->req.num_bds = cmd->io_tbl.bd_valid;
  1093. if (!cmd->io_tbl.bd_valid) {
  1094. cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
  1095. cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
  1096. cmd->req.num_bds = 1;
  1097. }
  1098. bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
  1099. return 0;
  1100. }
  1101. /**
  1102. * bnx2i_session_create - create a new iscsi session
  1103. * @cmds_max: max commands supported
  1104. * @qdepth: scsi queue depth to support
  1105. * @initial_cmdsn: initial iscsi CMDSN to be used for this session
  1106. *
  1107. * Creates a new iSCSI session instance on given device.
  1108. */
  1109. static struct iscsi_cls_session *
  1110. bnx2i_session_create(struct iscsi_endpoint *ep,
  1111. uint16_t cmds_max, uint16_t qdepth,
  1112. uint32_t initial_cmdsn)
  1113. {
  1114. struct Scsi_Host *shost;
  1115. struct iscsi_cls_session *cls_session;
  1116. struct bnx2i_hba *hba;
  1117. struct bnx2i_endpoint *bnx2i_ep;
  1118. if (!ep) {
  1119. printk(KERN_ERR "bnx2i: missing ep.\n");
  1120. return NULL;
  1121. }
  1122. bnx2i_ep = ep->dd_data;
  1123. shost = bnx2i_ep->hba->shost;
  1124. hba = iscsi_host_priv(shost);
  1125. if (bnx2i_adapter_ready(hba))
  1126. return NULL;
  1127. /*
  1128. * user can override hw limit as long as it is within
  1129. * the min/max.
  1130. */
  1131. if (cmds_max > hba->max_sqes)
  1132. cmds_max = hba->max_sqes;
  1133. else if (cmds_max < BNX2I_SQ_WQES_MIN)
  1134. cmds_max = BNX2I_SQ_WQES_MIN;
  1135. cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
  1136. cmds_max, 0, sizeof(struct bnx2i_cmd),
  1137. initial_cmdsn, ISCSI_MAX_TARGET);
  1138. if (!cls_session)
  1139. return NULL;
  1140. if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
  1141. goto session_teardown;
  1142. return cls_session;
  1143. session_teardown:
  1144. iscsi_session_teardown(cls_session);
  1145. return NULL;
  1146. }
  1147. /**
  1148. * bnx2i_session_destroy - destroys iscsi session
  1149. * @cls_session: pointer to iscsi cls session
  1150. *
  1151. * Destroys previously created iSCSI session instance and releases
  1152. * all resources held by it
  1153. */
  1154. static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
  1155. {
  1156. struct iscsi_session *session = cls_session->dd_data;
  1157. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  1158. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1159. bnx2i_destroy_cmd_pool(hba, session);
  1160. iscsi_session_teardown(cls_session);
  1161. }
  1162. /**
  1163. * bnx2i_conn_create - create iscsi connection instance
  1164. * @cls_session: pointer to iscsi cls session
  1165. * @cid: iscsi cid as per rfc (not NX2's CID terminology)
  1166. *
  1167. * Creates a new iSCSI connection instance for a given session
  1168. */
  1169. static struct iscsi_cls_conn *
  1170. bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
  1171. {
  1172. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  1173. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1174. struct bnx2i_conn *bnx2i_conn;
  1175. struct iscsi_cls_conn *cls_conn;
  1176. struct iscsi_conn *conn;
  1177. cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
  1178. cid);
  1179. if (!cls_conn)
  1180. return NULL;
  1181. conn = cls_conn->dd_data;
  1182. bnx2i_conn = conn->dd_data;
  1183. bnx2i_conn->cls_conn = cls_conn;
  1184. bnx2i_conn->hba = hba;
  1185. atomic_set(&bnx2i_conn->work_cnt, 0);
  1186. /* 'ep' ptr will be assigned in bind() call */
  1187. bnx2i_conn->ep = NULL;
  1188. init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
  1189. if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
  1190. iscsi_conn_printk(KERN_ALERT, conn,
  1191. "conn_new: login resc alloc failed!!\n");
  1192. goto free_conn;
  1193. }
  1194. return cls_conn;
  1195. free_conn:
  1196. iscsi_conn_teardown(cls_conn);
  1197. return NULL;
  1198. }
  1199. /**
  1200. * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
  1201. * @cls_session: pointer to iscsi cls session
  1202. * @cls_conn: pointer to iscsi cls conn
  1203. * @transport_fd: 64-bit EP handle
  1204. * @is_leading: leading connection on this session?
  1205. *
  1206. * Binds together iSCSI session instance, iSCSI connection instance
  1207. * and the TCP connection. This routine returns error code if
  1208. * TCP connection does not belong on the device iSCSI sess/conn
  1209. * is bound
  1210. */
  1211. static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
  1212. struct iscsi_cls_conn *cls_conn,
  1213. uint64_t transport_fd, int is_leading)
  1214. {
  1215. struct iscsi_conn *conn = cls_conn->dd_data;
  1216. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1217. struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
  1218. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1219. struct bnx2i_endpoint *bnx2i_ep;
  1220. struct iscsi_endpoint *ep;
  1221. int ret_code;
  1222. ep = iscsi_lookup_endpoint(transport_fd);
  1223. if (!ep)
  1224. return -EINVAL;
  1225. /*
  1226. * Forcefully terminate all in progress connection recovery at the
  1227. * earliest, either in bind(), send_pdu(LOGIN), or conn_start()
  1228. */
  1229. if (bnx2i_adapter_ready(hba))
  1230. return -EIO;
  1231. bnx2i_ep = ep->dd_data;
  1232. if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
  1233. (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
  1234. /* Peer disconnect via' FIN or RST */
  1235. return -EINVAL;
  1236. if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
  1237. return -EINVAL;
  1238. if (bnx2i_ep->hba != hba) {
  1239. /* Error - TCP connection does not belong to this device
  1240. */
  1241. iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
  1242. "conn bind, ep=0x%p (%s) does not",
  1243. bnx2i_ep, bnx2i_ep->hba->netdev->name);
  1244. iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
  1245. "belong to hba (%s)\n",
  1246. hba->netdev->name);
  1247. return -EEXIST;
  1248. }
  1249. bnx2i_ep->conn = bnx2i_conn;
  1250. bnx2i_conn->ep = bnx2i_ep;
  1251. bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
  1252. bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
  1253. ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
  1254. bnx2i_ep->ep_iscsi_cid);
  1255. /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
  1256. * driver needs to explicitly replenish RQ index during setup.
  1257. */
  1258. if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
  1259. bnx2i_put_rq_buf(bnx2i_conn, 0);
  1260. bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
  1261. return ret_code;
  1262. }
  1263. /**
  1264. * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
  1265. * @cls_conn: pointer to iscsi cls conn
  1266. *
  1267. * Destroy an iSCSI connection instance and release memory resources held by
  1268. * this connection
  1269. */
  1270. static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
  1271. {
  1272. struct iscsi_conn *conn = cls_conn->dd_data;
  1273. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1274. struct Scsi_Host *shost;
  1275. struct bnx2i_hba *hba;
  1276. struct bnx2i_work *work, *tmp;
  1277. unsigned cpu = 0;
  1278. struct bnx2i_percpu_s *p;
  1279. shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
  1280. hba = iscsi_host_priv(shost);
  1281. bnx2i_conn_free_login_resources(hba, bnx2i_conn);
  1282. if (atomic_read(&bnx2i_conn->work_cnt)) {
  1283. for_each_online_cpu(cpu) {
  1284. p = &per_cpu(bnx2i_percpu, cpu);
  1285. spin_lock_bh(&p->p_work_lock);
  1286. list_for_each_entry_safe(work, tmp,
  1287. &p->work_list, list) {
  1288. if (work->session == conn->session &&
  1289. work->bnx2i_conn == bnx2i_conn) {
  1290. list_del_init(&work->list);
  1291. kfree(work);
  1292. if (!atomic_dec_and_test(
  1293. &bnx2i_conn->work_cnt))
  1294. break;
  1295. }
  1296. }
  1297. spin_unlock_bh(&p->p_work_lock);
  1298. }
  1299. }
  1300. iscsi_conn_teardown(cls_conn);
  1301. }
  1302. /**
  1303. * bnx2i_ep_get_param - return iscsi ep parameter to caller
  1304. * @ep: pointer to iscsi endpoint
  1305. * @param: parameter type identifier
  1306. * @buf: buffer pointer
  1307. *
  1308. * returns iSCSI ep parameters
  1309. */
  1310. static int bnx2i_ep_get_param(struct iscsi_endpoint *ep,
  1311. enum iscsi_param param, char *buf)
  1312. {
  1313. struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
  1314. struct bnx2i_hba *hba = bnx2i_ep->hba;
  1315. int len = -ENOTCONN;
  1316. if (!hba)
  1317. return -ENOTCONN;
  1318. switch (param) {
  1319. case ISCSI_PARAM_CONN_PORT:
  1320. mutex_lock(&hba->net_dev_lock);
  1321. if (bnx2i_ep->cm_sk)
  1322. len = sprintf(buf, "%hu\n", bnx2i_ep->cm_sk->dst_port);
  1323. mutex_unlock(&hba->net_dev_lock);
  1324. break;
  1325. case ISCSI_PARAM_CONN_ADDRESS:
  1326. mutex_lock(&hba->net_dev_lock);
  1327. if (bnx2i_ep->cm_sk)
  1328. len = sprintf(buf, "%pI4\n", &bnx2i_ep->cm_sk->dst_ip);
  1329. mutex_unlock(&hba->net_dev_lock);
  1330. break;
  1331. default:
  1332. return -ENOSYS;
  1333. }
  1334. return len;
  1335. }
  1336. /**
  1337. * bnx2i_host_get_param - returns host (adapter) related parameters
  1338. * @shost: scsi host pointer
  1339. * @param: parameter type identifier
  1340. * @buf: buffer pointer
  1341. */
  1342. static int bnx2i_host_get_param(struct Scsi_Host *shost,
  1343. enum iscsi_host_param param, char *buf)
  1344. {
  1345. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1346. int len = 0;
  1347. switch (param) {
  1348. case ISCSI_HOST_PARAM_HWADDRESS:
  1349. len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
  1350. break;
  1351. case ISCSI_HOST_PARAM_NETDEV_NAME:
  1352. len = sprintf(buf, "%s\n", hba->netdev->name);
  1353. break;
  1354. case ISCSI_HOST_PARAM_IPADDRESS: {
  1355. struct list_head *active_list = &hba->ep_active_list;
  1356. read_lock_bh(&hba->ep_rdwr_lock);
  1357. if (!list_empty(&hba->ep_active_list)) {
  1358. struct bnx2i_endpoint *bnx2i_ep;
  1359. struct cnic_sock *csk;
  1360. bnx2i_ep = list_first_entry(active_list,
  1361. struct bnx2i_endpoint,
  1362. link);
  1363. csk = bnx2i_ep->cm_sk;
  1364. if (test_bit(SK_F_IPV6, &csk->flags))
  1365. len = sprintf(buf, "%pI6\n", csk->src_ip);
  1366. else
  1367. len = sprintf(buf, "%pI4\n", csk->src_ip);
  1368. }
  1369. read_unlock_bh(&hba->ep_rdwr_lock);
  1370. break;
  1371. }
  1372. default:
  1373. return iscsi_host_get_param(shost, param, buf);
  1374. }
  1375. return len;
  1376. }
  1377. /**
  1378. * bnx2i_conn_start - completes iscsi connection migration to FFP
  1379. * @cls_conn: pointer to iscsi cls conn
  1380. *
  1381. * last call in FFP migration to handover iscsi conn to the driver
  1382. */
  1383. static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
  1384. {
  1385. struct iscsi_conn *conn = cls_conn->dd_data;
  1386. struct bnx2i_conn *bnx2i_conn = conn->dd_data;
  1387. bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
  1388. bnx2i_update_iscsi_conn(conn);
  1389. /*
  1390. * this should normally not sleep for a long time so it should
  1391. * not disrupt the caller.
  1392. */
  1393. bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
  1394. bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
  1395. bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
  1396. add_timer(&bnx2i_conn->ep->ofld_timer);
  1397. /* update iSCSI context for this conn, wait for CNIC to complete */
  1398. wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
  1399. bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
  1400. if (signal_pending(current))
  1401. flush_signals(current);
  1402. del_timer_sync(&bnx2i_conn->ep->ofld_timer);
  1403. iscsi_conn_start(cls_conn);
  1404. return 0;
  1405. }
  1406. /**
  1407. * bnx2i_conn_get_stats - returns iSCSI stats
  1408. * @cls_conn: pointer to iscsi cls conn
  1409. * @stats: pointer to iscsi statistic struct
  1410. */
  1411. static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
  1412. struct iscsi_stats *stats)
  1413. {
  1414. struct iscsi_conn *conn = cls_conn->dd_data;
  1415. stats->txdata_octets = conn->txdata_octets;
  1416. stats->rxdata_octets = conn->rxdata_octets;
  1417. stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
  1418. stats->dataout_pdus = conn->dataout_pdus_cnt;
  1419. stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
  1420. stats->datain_pdus = conn->datain_pdus_cnt;
  1421. stats->r2t_pdus = conn->r2t_pdus_cnt;
  1422. stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
  1423. stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
  1424. stats->digest_err = 0;
  1425. stats->timeout_err = 0;
  1426. strcpy(stats->custom[0].desc, "eh_abort_cnt");
  1427. stats->custom[0].value = conn->eh_abort_cnt;
  1428. stats->custom_length = 1;
  1429. }
  1430. /**
  1431. * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
  1432. * @dst_addr: target IP address
  1433. *
  1434. * check if route resolves to BNX2 device
  1435. */
  1436. static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
  1437. {
  1438. struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
  1439. struct bnx2i_hba *hba;
  1440. struct cnic_dev *cnic = NULL;
  1441. hba = get_adapter_list_head();
  1442. if (hba && hba->cnic)
  1443. cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
  1444. if (!cnic) {
  1445. printk(KERN_ALERT "bnx2i: no route,"
  1446. "can't connect using cnic\n");
  1447. goto no_nx2_route;
  1448. }
  1449. hba = bnx2i_find_hba_for_cnic(cnic);
  1450. if (!hba)
  1451. goto no_nx2_route;
  1452. if (bnx2i_adapter_ready(hba)) {
  1453. printk(KERN_ALERT "bnx2i: check route, hba not found\n");
  1454. goto no_nx2_route;
  1455. }
  1456. if (hba->netdev->mtu > hba->mtu_supported) {
  1457. printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
  1458. hba->netdev->name, hba->netdev->mtu);
  1459. printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
  1460. hba->mtu_supported);
  1461. goto no_nx2_route;
  1462. }
  1463. return hba;
  1464. no_nx2_route:
  1465. return NULL;
  1466. }
  1467. /**
  1468. * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
  1469. * @hba: pointer to adapter instance
  1470. * @ep: endpoint (transport identifier) structure
  1471. *
  1472. * destroys cm_sock structure and on chip iscsi context
  1473. */
  1474. static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
  1475. struct bnx2i_endpoint *ep)
  1476. {
  1477. if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) && ep->cm_sk)
  1478. hba->cnic->cm_destroy(ep->cm_sk);
  1479. if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
  1480. ep->state == EP_STATE_DISCONN_TIMEDOUT) {
  1481. if (ep->conn && ep->conn->cls_conn &&
  1482. ep->conn->cls_conn->dd_data) {
  1483. struct iscsi_conn *conn = ep->conn->cls_conn->dd_data;
  1484. /* Must suspend all rx queue activity for this ep */
  1485. set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
  1486. }
  1487. /* CONN_DISCONNECT timeout may or may not be an issue depending
  1488. * on what transcribed in TCP layer, different targets behave
  1489. * differently
  1490. */
  1491. printk(KERN_ALERT "bnx2i (%s): - WARN - CONN_DISCON timed out, "
  1492. "please submit GRC Dump, NW/PCIe trace, "
  1493. "driver msgs to developers for analysis\n",
  1494. hba->netdev->name);
  1495. }
  1496. ep->state = EP_STATE_CLEANUP_START;
  1497. init_timer(&ep->ofld_timer);
  1498. ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies;
  1499. ep->ofld_timer.function = bnx2i_ep_ofld_timer;
  1500. ep->ofld_timer.data = (unsigned long) ep;
  1501. add_timer(&ep->ofld_timer);
  1502. bnx2i_ep_destroy_list_add(hba, ep);
  1503. /* destroy iSCSI context, wait for it to complete */
  1504. if (bnx2i_send_conn_destroy(hba, ep))
  1505. ep->state = EP_STATE_CLEANUP_CMPL;
  1506. wait_event_interruptible(ep->ofld_wait,
  1507. (ep->state != EP_STATE_CLEANUP_START));
  1508. if (signal_pending(current))
  1509. flush_signals(current);
  1510. del_timer_sync(&ep->ofld_timer);
  1511. bnx2i_ep_destroy_list_del(hba, ep);
  1512. if (ep->state != EP_STATE_CLEANUP_CMPL)
  1513. /* should never happen */
  1514. printk(KERN_ALERT "bnx2i - conn destroy failed\n");
  1515. return 0;
  1516. }
  1517. /**
  1518. * bnx2i_ep_connect - establish TCP connection to target portal
  1519. * @shost: scsi host
  1520. * @dst_addr: target IP address
  1521. * @non_blocking: blocking or non-blocking call
  1522. *
  1523. * this routine initiates the TCP/IP connection by invoking Option-2 i/f
  1524. * with l5_core and the CNIC. This is a multi-step process of resolving
  1525. * route to target, create a iscsi connection context, handshaking with
  1526. * CNIC module to create/initialize the socket struct and finally
  1527. * sending down option-2 request to complete TCP 3-way handshake
  1528. */
  1529. static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
  1530. struct sockaddr *dst_addr,
  1531. int non_blocking)
  1532. {
  1533. u32 iscsi_cid = BNX2I_CID_RESERVED;
  1534. struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
  1535. struct sockaddr_in6 *desti6;
  1536. struct bnx2i_endpoint *bnx2i_ep;
  1537. struct bnx2i_hba *hba;
  1538. struct cnic_dev *cnic;
  1539. struct cnic_sockaddr saddr;
  1540. struct iscsi_endpoint *ep;
  1541. int rc = 0;
  1542. if (shost) {
  1543. /* driver is given scsi host to work with */
  1544. hba = iscsi_host_priv(shost);
  1545. } else
  1546. /*
  1547. * check if the given destination can be reached through
  1548. * a iscsi capable NetXtreme2 device
  1549. */
  1550. hba = bnx2i_check_route(dst_addr);
  1551. if (!hba) {
  1552. rc = -EINVAL;
  1553. goto nohba;
  1554. }
  1555. mutex_lock(&hba->net_dev_lock);
  1556. if (bnx2i_adapter_ready(hba) || !hba->cid_que.cid_free_cnt) {
  1557. rc = -EPERM;
  1558. goto check_busy;
  1559. }
  1560. cnic = hba->cnic;
  1561. ep = bnx2i_alloc_ep(hba);
  1562. if (!ep) {
  1563. rc = -ENOMEM;
  1564. goto check_busy;
  1565. }
  1566. bnx2i_ep = ep->dd_data;
  1567. atomic_set(&bnx2i_ep->num_active_cmds, 0);
  1568. iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
  1569. if (iscsi_cid == -1) {
  1570. printk(KERN_ALERT "bnx2i (%s): alloc_ep - unable to allocate "
  1571. "iscsi cid\n", hba->netdev->name);
  1572. rc = -ENOMEM;
  1573. bnx2i_free_ep(ep);
  1574. goto check_busy;
  1575. }
  1576. bnx2i_ep->hba_age = hba->age;
  1577. rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
  1578. if (rc != 0) {
  1579. printk(KERN_ALERT "bnx2i (%s): ep_conn - alloc QP resc error"
  1580. "\n", hba->netdev->name);
  1581. rc = -ENOMEM;
  1582. goto qp_resc_err;
  1583. }
  1584. bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
  1585. bnx2i_ep->state = EP_STATE_OFLD_START;
  1586. bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
  1587. init_timer(&bnx2i_ep->ofld_timer);
  1588. bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
  1589. bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
  1590. bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
  1591. add_timer(&bnx2i_ep->ofld_timer);
  1592. if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) {
  1593. if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
  1594. printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
  1595. hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
  1596. rc = -EBUSY;
  1597. } else
  1598. rc = -ENOSPC;
  1599. printk(KERN_ALERT "bnx2i (%s): unable to send conn offld kwqe"
  1600. "\n", hba->netdev->name);
  1601. bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
  1602. goto conn_failed;
  1603. }
  1604. /* Wait for CNIC hardware to setup conn context and return 'cid' */
  1605. wait_event_interruptible(bnx2i_ep->ofld_wait,
  1606. bnx2i_ep->state != EP_STATE_OFLD_START);
  1607. if (signal_pending(current))
  1608. flush_signals(current);
  1609. del_timer_sync(&bnx2i_ep->ofld_timer);
  1610. bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
  1611. if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
  1612. if (bnx2i_ep->state == EP_STATE_OFLD_FAILED_CID_BUSY) {
  1613. printk(KERN_ALERT "bnx2i (%s): iscsi cid %d is busy\n",
  1614. hba->netdev->name, bnx2i_ep->ep_iscsi_cid);
  1615. rc = -EBUSY;
  1616. } else
  1617. rc = -ENOSPC;
  1618. goto conn_failed;
  1619. }
  1620. rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
  1621. iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
  1622. if (rc) {
  1623. rc = -EINVAL;
  1624. /* Need to terminate and cleanup the connection */
  1625. goto release_ep;
  1626. }
  1627. bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
  1628. bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
  1629. clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
  1630. memset(&saddr, 0, sizeof(saddr));
  1631. if (dst_addr->sa_family == AF_INET) {
  1632. desti = (struct sockaddr_in *) dst_addr;
  1633. saddr.remote.v4 = *desti;
  1634. saddr.local.v4.sin_family = desti->sin_family;
  1635. } else if (dst_addr->sa_family == AF_INET6) {
  1636. desti6 = (struct sockaddr_in6 *) dst_addr;
  1637. saddr.remote.v6 = *desti6;
  1638. saddr.local.v6.sin6_family = desti6->sin6_family;
  1639. }
  1640. bnx2i_ep->timestamp = jiffies;
  1641. bnx2i_ep->state = EP_STATE_CONNECT_START;
  1642. if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
  1643. rc = -EINVAL;
  1644. goto conn_failed;
  1645. } else
  1646. rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
  1647. if (rc)
  1648. goto release_ep;
  1649. bnx2i_ep_active_list_add(hba, bnx2i_ep);
  1650. if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
  1651. goto del_active_ep;
  1652. mutex_unlock(&hba->net_dev_lock);
  1653. return ep;
  1654. del_active_ep:
  1655. bnx2i_ep_active_list_del(hba, bnx2i_ep);
  1656. release_ep:
  1657. if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
  1658. mutex_unlock(&hba->net_dev_lock);
  1659. return ERR_PTR(rc);
  1660. }
  1661. conn_failed:
  1662. bnx2i_free_qp_resc(hba, bnx2i_ep);
  1663. qp_resc_err:
  1664. bnx2i_free_ep(ep);
  1665. check_busy:
  1666. mutex_unlock(&hba->net_dev_lock);
  1667. nohba:
  1668. return ERR_PTR(rc);
  1669. }
  1670. /**
  1671. * bnx2i_ep_poll - polls for TCP connection establishement
  1672. * @ep: TCP connection (endpoint) handle
  1673. * @timeout_ms: timeout value in milli secs
  1674. *
  1675. * polls for TCP connect request to complete
  1676. */
  1677. static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
  1678. {
  1679. struct bnx2i_endpoint *bnx2i_ep;
  1680. int rc = 0;
  1681. bnx2i_ep = ep->dd_data;
  1682. if ((bnx2i_ep->state == EP_STATE_IDLE) ||
  1683. (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
  1684. (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
  1685. return -1;
  1686. if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
  1687. return 1;
  1688. rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
  1689. ((bnx2i_ep->state ==
  1690. EP_STATE_OFLD_FAILED) ||
  1691. (bnx2i_ep->state ==
  1692. EP_STATE_CONNECT_FAILED) ||
  1693. (bnx2i_ep->state ==
  1694. EP_STATE_CONNECT_COMPL)),
  1695. msecs_to_jiffies(timeout_ms));
  1696. if (bnx2i_ep->state == EP_STATE_OFLD_FAILED)
  1697. rc = -1;
  1698. if (rc > 0)
  1699. return 1;
  1700. else if (!rc)
  1701. return 0; /* timeout */
  1702. else
  1703. return rc;
  1704. }
  1705. /**
  1706. * bnx2i_ep_tcp_conn_active - check EP state transition
  1707. * @ep: endpoint pointer
  1708. *
  1709. * check if underlying TCP connection is active
  1710. */
  1711. static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
  1712. {
  1713. int ret;
  1714. int cnic_dev_10g = 0;
  1715. if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
  1716. cnic_dev_10g = 1;
  1717. switch (bnx2i_ep->state) {
  1718. case EP_STATE_CLEANUP_FAILED:
  1719. case EP_STATE_OFLD_FAILED:
  1720. case EP_STATE_DISCONN_TIMEDOUT:
  1721. ret = 0;
  1722. break;
  1723. case EP_STATE_CONNECT_START:
  1724. case EP_STATE_CONNECT_FAILED:
  1725. case EP_STATE_CONNECT_COMPL:
  1726. case EP_STATE_ULP_UPDATE_START:
  1727. case EP_STATE_ULP_UPDATE_COMPL:
  1728. case EP_STATE_TCP_FIN_RCVD:
  1729. case EP_STATE_LOGOUT_SENT:
  1730. case EP_STATE_LOGOUT_RESP_RCVD:
  1731. case EP_STATE_ULP_UPDATE_FAILED:
  1732. ret = 1;
  1733. break;
  1734. case EP_STATE_TCP_RST_RCVD:
  1735. if (cnic_dev_10g)
  1736. ret = 0;
  1737. else
  1738. ret = 1;
  1739. break;
  1740. default:
  1741. ret = 0;
  1742. }
  1743. return ret;
  1744. }
  1745. /*
  1746. * bnx2i_hw_ep_disconnect - executes TCP connection teardown process in the hw
  1747. * @ep: TCP connection (bnx2i endpoint) handle
  1748. *
  1749. * executes TCP connection teardown process
  1750. */
  1751. int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep)
  1752. {
  1753. struct bnx2i_hba *hba = bnx2i_ep->hba;
  1754. struct cnic_dev *cnic;
  1755. struct iscsi_session *session = NULL;
  1756. struct iscsi_conn *conn = NULL;
  1757. int ret = 0;
  1758. int close = 0;
  1759. int close_ret = 0;
  1760. if (!hba)
  1761. return 0;
  1762. cnic = hba->cnic;
  1763. if (!cnic)
  1764. return 0;
  1765. if (bnx2i_ep->state == EP_STATE_IDLE ||
  1766. bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
  1767. return 0;
  1768. if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
  1769. goto destroy_conn;
  1770. if (bnx2i_ep->conn) {
  1771. conn = bnx2i_ep->conn->cls_conn->dd_data;
  1772. session = conn->session;
  1773. }
  1774. init_timer(&bnx2i_ep->ofld_timer);
  1775. bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies;
  1776. bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
  1777. bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
  1778. add_timer(&bnx2i_ep->ofld_timer);
  1779. if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
  1780. goto out;
  1781. if (session) {
  1782. spin_lock_bh(&session->frwd_lock);
  1783. if (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD) {
  1784. if (session->state == ISCSI_STATE_LOGGING_OUT) {
  1785. if (bnx2i_ep->state == EP_STATE_LOGOUT_SENT) {
  1786. /* Logout sent, but no resp */
  1787. printk(KERN_ALERT "bnx2i (%s): WARNING"
  1788. " logout response was not "
  1789. "received!\n",
  1790. bnx2i_ep->hba->netdev->name);
  1791. } else if (bnx2i_ep->state ==
  1792. EP_STATE_LOGOUT_RESP_RCVD)
  1793. close = 1;
  1794. }
  1795. } else
  1796. close = 1;
  1797. spin_unlock_bh(&session->frwd_lock);
  1798. }
  1799. bnx2i_ep->state = EP_STATE_DISCONN_START;
  1800. if (close)
  1801. close_ret = cnic->cm_close(bnx2i_ep->cm_sk);
  1802. else
  1803. close_ret = cnic->cm_abort(bnx2i_ep->cm_sk);
  1804. if (close_ret)
  1805. printk(KERN_ALERT "bnx2i (%s): close/abort(%d) returned %d\n",
  1806. bnx2i_ep->hba->netdev->name, close, close_ret);
  1807. else
  1808. /* wait for option-2 conn teardown */
  1809. wait_event_interruptible(bnx2i_ep->ofld_wait,
  1810. ((bnx2i_ep->state != EP_STATE_DISCONN_START)
  1811. && (bnx2i_ep->state != EP_STATE_TCP_FIN_RCVD)));
  1812. if (signal_pending(current))
  1813. flush_signals(current);
  1814. del_timer_sync(&bnx2i_ep->ofld_timer);
  1815. destroy_conn:
  1816. bnx2i_ep_active_list_del(hba, bnx2i_ep);
  1817. if (bnx2i_tear_down_conn(hba, bnx2i_ep))
  1818. return -EINVAL;
  1819. out:
  1820. bnx2i_ep->state = EP_STATE_IDLE;
  1821. return ret;
  1822. }
  1823. /**
  1824. * bnx2i_ep_disconnect - executes TCP connection teardown process
  1825. * @ep: TCP connection (iscsi endpoint) handle
  1826. *
  1827. * executes TCP connection teardown process
  1828. */
  1829. static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
  1830. {
  1831. struct bnx2i_endpoint *bnx2i_ep;
  1832. struct bnx2i_conn *bnx2i_conn = NULL;
  1833. struct iscsi_conn *conn = NULL;
  1834. struct bnx2i_hba *hba;
  1835. bnx2i_ep = ep->dd_data;
  1836. /* driver should not attempt connection cleanup until TCP_CONNECT
  1837. * completes either successfully or fails. Timeout is 9-secs, so
  1838. * wait for it to complete
  1839. */
  1840. while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
  1841. !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
  1842. msleep(250);
  1843. if (bnx2i_ep->conn) {
  1844. bnx2i_conn = bnx2i_ep->conn;
  1845. conn = bnx2i_conn->cls_conn->dd_data;
  1846. iscsi_suspend_queue(conn);
  1847. }
  1848. hba = bnx2i_ep->hba;
  1849. mutex_lock(&hba->net_dev_lock);
  1850. if (bnx2i_ep->state == EP_STATE_DISCONN_TIMEDOUT)
  1851. goto out;
  1852. if (bnx2i_ep->state == EP_STATE_IDLE)
  1853. goto free_resc;
  1854. if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
  1855. (bnx2i_ep->hba_age != hba->age)) {
  1856. bnx2i_ep_active_list_del(hba, bnx2i_ep);
  1857. goto free_resc;
  1858. }
  1859. /* Do all chip cleanup here */
  1860. if (bnx2i_hw_ep_disconnect(bnx2i_ep)) {
  1861. mutex_unlock(&hba->net_dev_lock);
  1862. return;
  1863. }
  1864. free_resc:
  1865. bnx2i_free_qp_resc(hba, bnx2i_ep);
  1866. if (bnx2i_conn)
  1867. bnx2i_conn->ep = NULL;
  1868. bnx2i_free_ep(ep);
  1869. out:
  1870. mutex_unlock(&hba->net_dev_lock);
  1871. wake_up_interruptible(&hba->eh_wait);
  1872. }
  1873. /**
  1874. * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
  1875. * @buf: pointer to buffer containing iscsi path message
  1876. *
  1877. */
  1878. static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
  1879. {
  1880. struct bnx2i_hba *hba = iscsi_host_priv(shost);
  1881. char *buf = (char *) params;
  1882. u16 len = sizeof(*params);
  1883. /* handled by cnic driver */
  1884. hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
  1885. len);
  1886. return 0;
  1887. }
  1888. static umode_t bnx2i_attr_is_visible(int param_type, int param)
  1889. {
  1890. switch (param_type) {
  1891. case ISCSI_HOST_PARAM:
  1892. switch (param) {
  1893. case ISCSI_HOST_PARAM_NETDEV_NAME:
  1894. case ISCSI_HOST_PARAM_HWADDRESS:
  1895. case ISCSI_HOST_PARAM_IPADDRESS:
  1896. return S_IRUGO;
  1897. default:
  1898. return 0;
  1899. }
  1900. case ISCSI_PARAM:
  1901. switch (param) {
  1902. case ISCSI_PARAM_MAX_RECV_DLENGTH:
  1903. case ISCSI_PARAM_MAX_XMIT_DLENGTH:
  1904. case ISCSI_PARAM_HDRDGST_EN:
  1905. case ISCSI_PARAM_DATADGST_EN:
  1906. case ISCSI_PARAM_CONN_ADDRESS:
  1907. case ISCSI_PARAM_CONN_PORT:
  1908. case ISCSI_PARAM_EXP_STATSN:
  1909. case ISCSI_PARAM_PERSISTENT_ADDRESS:
  1910. case ISCSI_PARAM_PERSISTENT_PORT:
  1911. case ISCSI_PARAM_PING_TMO:
  1912. case ISCSI_PARAM_RECV_TMO:
  1913. case ISCSI_PARAM_INITIAL_R2T_EN:
  1914. case ISCSI_PARAM_MAX_R2T:
  1915. case ISCSI_PARAM_IMM_DATA_EN:
  1916. case ISCSI_PARAM_FIRST_BURST:
  1917. case ISCSI_PARAM_MAX_BURST:
  1918. case ISCSI_PARAM_PDU_INORDER_EN:
  1919. case ISCSI_PARAM_DATASEQ_INORDER_EN:
  1920. case ISCSI_PARAM_ERL:
  1921. case ISCSI_PARAM_TARGET_NAME:
  1922. case ISCSI_PARAM_TPGT:
  1923. case ISCSI_PARAM_USERNAME:
  1924. case ISCSI_PARAM_PASSWORD:
  1925. case ISCSI_PARAM_USERNAME_IN:
  1926. case ISCSI_PARAM_PASSWORD_IN:
  1927. case ISCSI_PARAM_FAST_ABORT:
  1928. case ISCSI_PARAM_ABORT_TMO:
  1929. case ISCSI_PARAM_LU_RESET_TMO:
  1930. case ISCSI_PARAM_TGT_RESET_TMO:
  1931. case ISCSI_PARAM_IFACE_NAME:
  1932. case ISCSI_PARAM_INITIATOR_NAME:
  1933. case ISCSI_PARAM_BOOT_ROOT:
  1934. case ISCSI_PARAM_BOOT_NIC:
  1935. case ISCSI_PARAM_BOOT_TARGET:
  1936. return S_IRUGO;
  1937. default:
  1938. return 0;
  1939. }
  1940. }
  1941. return 0;
  1942. }
  1943. /*
  1944. * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
  1945. * used while registering with the scsi host and iSCSI transport module.
  1946. */
  1947. static struct scsi_host_template bnx2i_host_template = {
  1948. .module = THIS_MODULE,
  1949. .name = "QLogic Offload iSCSI Initiator",
  1950. .proc_name = "bnx2i",
  1951. .queuecommand = iscsi_queuecommand,
  1952. .eh_abort_handler = iscsi_eh_abort,
  1953. .eh_device_reset_handler = iscsi_eh_device_reset,
  1954. .eh_target_reset_handler = iscsi_eh_recover_target,
  1955. .change_queue_depth = scsi_change_queue_depth,
  1956. .target_alloc = iscsi_target_alloc,
  1957. .can_queue = 2048,
  1958. .max_sectors = 127,
  1959. .cmd_per_lun = 128,
  1960. .this_id = -1,
  1961. .use_clustering = ENABLE_CLUSTERING,
  1962. .sg_tablesize = ISCSI_MAX_BDS_PER_CMD,
  1963. .shost_attrs = bnx2i_dev_attributes,
  1964. .track_queue_depth = 1,
  1965. };
  1966. struct iscsi_transport bnx2i_iscsi_transport = {
  1967. .owner = THIS_MODULE,
  1968. .name = "bnx2i",
  1969. .caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
  1970. CAP_MULTI_R2T | CAP_DATADGST |
  1971. CAP_DATA_PATH_OFFLOAD |
  1972. CAP_TEXT_NEGO,
  1973. .create_session = bnx2i_session_create,
  1974. .destroy_session = bnx2i_session_destroy,
  1975. .create_conn = bnx2i_conn_create,
  1976. .bind_conn = bnx2i_conn_bind,
  1977. .destroy_conn = bnx2i_conn_destroy,
  1978. .attr_is_visible = bnx2i_attr_is_visible,
  1979. .set_param = iscsi_set_param,
  1980. .get_conn_param = iscsi_conn_get_param,
  1981. .get_session_param = iscsi_session_get_param,
  1982. .get_host_param = bnx2i_host_get_param,
  1983. .start_conn = bnx2i_conn_start,
  1984. .stop_conn = iscsi_conn_stop,
  1985. .send_pdu = iscsi_conn_send_pdu,
  1986. .xmit_task = bnx2i_task_xmit,
  1987. .get_stats = bnx2i_conn_get_stats,
  1988. /* TCP connect - disconnect - option-2 interface calls */
  1989. .get_ep_param = bnx2i_ep_get_param,
  1990. .ep_connect = bnx2i_ep_connect,
  1991. .ep_poll = bnx2i_ep_poll,
  1992. .ep_disconnect = bnx2i_ep_disconnect,
  1993. .set_path = bnx2i_nl_set_path,
  1994. /* Error recovery timeout call */
  1995. .session_recovery_timedout = iscsi_session_recovery_timedout,
  1996. .cleanup_task = bnx2i_cleanup_task,
  1997. };