bnx2fc_els.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /*
  2. * bnx2fc_els.c: QLogic Linux FCoE offload driver.
  3. * This file contains helper routines that handle ELS requests
  4. * and responses.
  5. *
  6. * Copyright (c) 2008-2013 Broadcom Corporation
  7. * Copyright (c) 2014-2015 QLogic Corporation
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation.
  12. *
  13. * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
  14. */
  15. #include "bnx2fc.h"
  16. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  17. void *arg);
  18. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  19. void *arg);
  20. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  21. void *data, u32 data_len,
  22. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  23. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
  24. static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
  25. {
  26. struct bnx2fc_cmd *orig_io_req;
  27. struct bnx2fc_cmd *rrq_req;
  28. int rc = 0;
  29. BUG_ON(!cb_arg);
  30. rrq_req = cb_arg->io_req;
  31. orig_io_req = cb_arg->aborted_io_req;
  32. BUG_ON(!orig_io_req);
  33. BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
  34. orig_io_req->xid, rrq_req->xid);
  35. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  36. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
  37. /*
  38. * els req is timed out. cleanup the IO with FW and
  39. * drop the completion. Remove from active_cmd_queue.
  40. */
  41. BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
  42. rrq_req->xid);
  43. if (rrq_req->on_active_queue) {
  44. list_del_init(&rrq_req->link);
  45. rrq_req->on_active_queue = 0;
  46. rc = bnx2fc_initiate_cleanup(rrq_req);
  47. BUG_ON(rc);
  48. }
  49. }
  50. kfree(cb_arg);
  51. }
  52. int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
  53. {
  54. struct fc_els_rrq rrq;
  55. struct bnx2fc_rport *tgt = aborted_io_req->tgt;
  56. struct fc_lport *lport = tgt->rdata->local_port;
  57. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  58. u32 sid = tgt->sid;
  59. u32 r_a_tov = lport->r_a_tov;
  60. unsigned long start = jiffies;
  61. int rc;
  62. BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
  63. aborted_io_req->xid);
  64. memset(&rrq, 0, sizeof(rrq));
  65. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
  66. if (!cb_arg) {
  67. printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
  68. rc = -ENOMEM;
  69. goto rrq_err;
  70. }
  71. cb_arg->aborted_io_req = aborted_io_req;
  72. rrq.rrq_cmd = ELS_RRQ;
  73. hton24(rrq.rrq_s_id, sid);
  74. rrq.rrq_ox_id = htons(aborted_io_req->xid);
  75. rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
  76. retry_rrq:
  77. rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
  78. bnx2fc_rrq_compl, cb_arg,
  79. r_a_tov);
  80. if (rc == -ENOMEM) {
  81. if (time_after(jiffies, start + (10 * HZ))) {
  82. BNX2FC_ELS_DBG("rrq Failed\n");
  83. rc = FAILED;
  84. goto rrq_err;
  85. }
  86. msleep(20);
  87. goto retry_rrq;
  88. }
  89. rrq_err:
  90. if (rc) {
  91. BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
  92. aborted_io_req->xid);
  93. kfree(cb_arg);
  94. spin_lock_bh(&tgt->tgt_lock);
  95. kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
  96. spin_unlock_bh(&tgt->tgt_lock);
  97. }
  98. return rc;
  99. }
  100. static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
  101. {
  102. struct bnx2fc_cmd *els_req;
  103. struct bnx2fc_rport *tgt;
  104. struct bnx2fc_mp_req *mp_req;
  105. struct fc_frame_header *fc_hdr;
  106. unsigned char *buf;
  107. void *resp_buf;
  108. u32 resp_len, hdr_len;
  109. u16 l2_oxid;
  110. int frame_len;
  111. int rc = 0;
  112. l2_oxid = cb_arg->l2_oxid;
  113. BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
  114. els_req = cb_arg->io_req;
  115. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
  116. /*
  117. * els req is timed out. cleanup the IO with FW and
  118. * drop the completion. libfc will handle the els timeout
  119. */
  120. if (els_req->on_active_queue) {
  121. list_del_init(&els_req->link);
  122. els_req->on_active_queue = 0;
  123. rc = bnx2fc_initiate_cleanup(els_req);
  124. BUG_ON(rc);
  125. }
  126. goto free_arg;
  127. }
  128. tgt = els_req->tgt;
  129. mp_req = &(els_req->mp_req);
  130. fc_hdr = &(mp_req->resp_fc_hdr);
  131. resp_len = mp_req->resp_len;
  132. resp_buf = mp_req->resp_buf;
  133. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  134. if (!buf) {
  135. printk(KERN_ERR PFX "Unable to alloc mp buf\n");
  136. goto free_arg;
  137. }
  138. hdr_len = sizeof(*fc_hdr);
  139. if (hdr_len + resp_len > PAGE_SIZE) {
  140. printk(KERN_ERR PFX "l2_els_compl: resp len is "
  141. "beyond page size\n");
  142. goto free_buf;
  143. }
  144. memcpy(buf, fc_hdr, hdr_len);
  145. memcpy(buf + hdr_len, resp_buf, resp_len);
  146. frame_len = hdr_len + resp_len;
  147. bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
  148. free_buf:
  149. kfree(buf);
  150. free_arg:
  151. kfree(cb_arg);
  152. }
  153. int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  154. {
  155. struct fc_els_adisc *adisc;
  156. struct fc_frame_header *fh;
  157. struct bnx2fc_els_cb_arg *cb_arg;
  158. struct fc_lport *lport = tgt->rdata->local_port;
  159. u32 r_a_tov = lport->r_a_tov;
  160. int rc;
  161. fh = fc_frame_header_get(fp);
  162. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  163. if (!cb_arg) {
  164. printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
  165. return -ENOMEM;
  166. }
  167. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  168. BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  169. adisc = fc_frame_payload_get(fp, sizeof(*adisc));
  170. /* adisc is initialized by libfc */
  171. rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
  172. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  173. if (rc)
  174. kfree(cb_arg);
  175. return rc;
  176. }
  177. int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  178. {
  179. struct fc_els_logo *logo;
  180. struct fc_frame_header *fh;
  181. struct bnx2fc_els_cb_arg *cb_arg;
  182. struct fc_lport *lport = tgt->rdata->local_port;
  183. u32 r_a_tov = lport->r_a_tov;
  184. int rc;
  185. fh = fc_frame_header_get(fp);
  186. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  187. if (!cb_arg) {
  188. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  189. return -ENOMEM;
  190. }
  191. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  192. BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
  193. logo = fc_frame_payload_get(fp, sizeof(*logo));
  194. /* logo is initialized by libfc */
  195. rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
  196. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  197. if (rc)
  198. kfree(cb_arg);
  199. return rc;
  200. }
  201. int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
  202. {
  203. struct fc_els_rls *rls;
  204. struct fc_frame_header *fh;
  205. struct bnx2fc_els_cb_arg *cb_arg;
  206. struct fc_lport *lport = tgt->rdata->local_port;
  207. u32 r_a_tov = lport->r_a_tov;
  208. int rc;
  209. fh = fc_frame_header_get(fp);
  210. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  211. if (!cb_arg) {
  212. printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
  213. return -ENOMEM;
  214. }
  215. cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
  216. rls = fc_frame_payload_get(fp, sizeof(*rls));
  217. /* rls is initialized by libfc */
  218. rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
  219. bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
  220. if (rc)
  221. kfree(cb_arg);
  222. return rc;
  223. }
  224. void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
  225. {
  226. struct bnx2fc_mp_req *mp_req;
  227. struct fc_frame_header *fc_hdr, *fh;
  228. struct bnx2fc_cmd *srr_req;
  229. struct bnx2fc_cmd *orig_io_req;
  230. struct fc_frame *fp;
  231. unsigned char *buf;
  232. void *resp_buf;
  233. u32 resp_len, hdr_len;
  234. u8 opcode;
  235. int rc = 0;
  236. orig_io_req = cb_arg->aborted_io_req;
  237. srr_req = cb_arg->io_req;
  238. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
  239. /* SRR timedout */
  240. BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
  241. "orig_io - 0x%x\n",
  242. orig_io_req->xid);
  243. rc = bnx2fc_initiate_abts(srr_req);
  244. if (rc != SUCCESS) {
  245. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  246. "failed. issue cleanup\n");
  247. bnx2fc_initiate_cleanup(srr_req);
  248. }
  249. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  250. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  251. BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
  252. orig_io_req->xid, orig_io_req->req_flags);
  253. goto srr_compl_done;
  254. }
  255. orig_io_req->srr_retry++;
  256. if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
  257. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  258. spin_unlock_bh(&tgt->tgt_lock);
  259. rc = bnx2fc_send_srr(orig_io_req,
  260. orig_io_req->srr_offset,
  261. orig_io_req->srr_rctl);
  262. spin_lock_bh(&tgt->tgt_lock);
  263. if (!rc)
  264. goto srr_compl_done;
  265. }
  266. rc = bnx2fc_initiate_abts(orig_io_req);
  267. if (rc != SUCCESS) {
  268. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  269. "failed xid = 0x%x. issue cleanup\n",
  270. orig_io_req->xid);
  271. bnx2fc_initiate_cleanup(orig_io_req);
  272. }
  273. goto srr_compl_done;
  274. }
  275. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
  276. test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  277. BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
  278. orig_io_req->xid, orig_io_req->req_flags);
  279. goto srr_compl_done;
  280. }
  281. mp_req = &(srr_req->mp_req);
  282. fc_hdr = &(mp_req->resp_fc_hdr);
  283. resp_len = mp_req->resp_len;
  284. resp_buf = mp_req->resp_buf;
  285. hdr_len = sizeof(*fc_hdr);
  286. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  287. if (!buf) {
  288. printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
  289. goto srr_compl_done;
  290. }
  291. memcpy(buf, fc_hdr, hdr_len);
  292. memcpy(buf + hdr_len, resp_buf, resp_len);
  293. fp = fc_frame_alloc(NULL, resp_len);
  294. if (!fp) {
  295. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  296. goto free_buf;
  297. }
  298. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  299. /* Copy FC Frame header and payload into the frame */
  300. memcpy(fh, buf, hdr_len + resp_len);
  301. opcode = fc_frame_payload_op(fp);
  302. switch (opcode) {
  303. case ELS_LS_ACC:
  304. BNX2FC_IO_DBG(srr_req, "SRR success\n");
  305. break;
  306. case ELS_LS_RJT:
  307. BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
  308. rc = bnx2fc_initiate_abts(orig_io_req);
  309. if (rc != SUCCESS) {
  310. BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
  311. "failed xid = 0x%x. issue cleanup\n",
  312. orig_io_req->xid);
  313. bnx2fc_initiate_cleanup(orig_io_req);
  314. }
  315. break;
  316. default:
  317. BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
  318. opcode);
  319. break;
  320. }
  321. fc_frame_free(fp);
  322. free_buf:
  323. kfree(buf);
  324. srr_compl_done:
  325. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  326. }
  327. void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
  328. {
  329. struct bnx2fc_cmd *orig_io_req, *new_io_req;
  330. struct bnx2fc_cmd *rec_req;
  331. struct bnx2fc_mp_req *mp_req;
  332. struct fc_frame_header *fc_hdr, *fh;
  333. struct fc_els_ls_rjt *rjt;
  334. struct fc_els_rec_acc *acc;
  335. struct bnx2fc_rport *tgt;
  336. struct fcoe_err_report_entry *err_entry;
  337. struct scsi_cmnd *sc_cmd;
  338. enum fc_rctl r_ctl;
  339. unsigned char *buf;
  340. void *resp_buf;
  341. struct fc_frame *fp;
  342. u8 opcode;
  343. u32 offset;
  344. u32 e_stat;
  345. u32 resp_len, hdr_len;
  346. int rc = 0;
  347. bool send_seq_clnp = false;
  348. bool abort_io = false;
  349. BNX2FC_MISC_DBG("Entered rec_compl callback\n");
  350. rec_req = cb_arg->io_req;
  351. orig_io_req = cb_arg->aborted_io_req;
  352. BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
  353. tgt = orig_io_req->tgt;
  354. /* Handle REC timeout case */
  355. if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
  356. BNX2FC_IO_DBG(rec_req, "timed out, abort "
  357. "orig_io - 0x%x\n",
  358. orig_io_req->xid);
  359. /* els req is timed out. send abts for els */
  360. rc = bnx2fc_initiate_abts(rec_req);
  361. if (rc != SUCCESS) {
  362. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  363. "failed. issue cleanup\n");
  364. bnx2fc_initiate_cleanup(rec_req);
  365. }
  366. orig_io_req->rec_retry++;
  367. /* REC timedout. send ABTS to the orig IO req */
  368. if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
  369. spin_unlock_bh(&tgt->tgt_lock);
  370. rc = bnx2fc_send_rec(orig_io_req);
  371. spin_lock_bh(&tgt->tgt_lock);
  372. if (!rc)
  373. goto rec_compl_done;
  374. }
  375. rc = bnx2fc_initiate_abts(orig_io_req);
  376. if (rc != SUCCESS) {
  377. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  378. "failed xid = 0x%x. issue cleanup\n",
  379. orig_io_req->xid);
  380. bnx2fc_initiate_cleanup(orig_io_req);
  381. }
  382. goto rec_compl_done;
  383. }
  384. if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
  385. BNX2FC_IO_DBG(rec_req, "completed"
  386. "orig_io - 0x%x\n",
  387. orig_io_req->xid);
  388. goto rec_compl_done;
  389. }
  390. if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
  391. BNX2FC_IO_DBG(rec_req, "abts in prog "
  392. "orig_io - 0x%x\n",
  393. orig_io_req->xid);
  394. goto rec_compl_done;
  395. }
  396. mp_req = &(rec_req->mp_req);
  397. fc_hdr = &(mp_req->resp_fc_hdr);
  398. resp_len = mp_req->resp_len;
  399. acc = resp_buf = mp_req->resp_buf;
  400. hdr_len = sizeof(*fc_hdr);
  401. buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  402. if (!buf) {
  403. printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
  404. goto rec_compl_done;
  405. }
  406. memcpy(buf, fc_hdr, hdr_len);
  407. memcpy(buf + hdr_len, resp_buf, resp_len);
  408. fp = fc_frame_alloc(NULL, resp_len);
  409. if (!fp) {
  410. printk(KERN_ERR PFX "fc_frame_alloc failure\n");
  411. goto free_buf;
  412. }
  413. fh = (struct fc_frame_header *) fc_frame_header_get(fp);
  414. /* Copy FC Frame header and payload into the frame */
  415. memcpy(fh, buf, hdr_len + resp_len);
  416. opcode = fc_frame_payload_op(fp);
  417. if (opcode == ELS_LS_RJT) {
  418. BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
  419. rjt = fc_frame_payload_get(fp, sizeof(*rjt));
  420. if ((rjt->er_reason == ELS_RJT_LOGIC ||
  421. rjt->er_reason == ELS_RJT_UNAB) &&
  422. rjt->er_explan == ELS_EXPL_OXID_RXID) {
  423. BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
  424. new_io_req = bnx2fc_cmd_alloc(tgt);
  425. if (!new_io_req)
  426. goto abort_io;
  427. new_io_req->sc_cmd = orig_io_req->sc_cmd;
  428. /* cleanup orig_io_req that is with the FW */
  429. set_bit(BNX2FC_FLAG_CMD_LOST,
  430. &orig_io_req->req_flags);
  431. bnx2fc_initiate_cleanup(orig_io_req);
  432. /* Post a new IO req with the same sc_cmd */
  433. BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
  434. rc = bnx2fc_post_io_req(tgt, new_io_req);
  435. if (!rc)
  436. goto free_frame;
  437. BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
  438. }
  439. abort_io:
  440. rc = bnx2fc_initiate_abts(orig_io_req);
  441. if (rc != SUCCESS) {
  442. BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
  443. "failed. issue cleanup\n");
  444. bnx2fc_initiate_cleanup(orig_io_req);
  445. }
  446. } else if (opcode == ELS_LS_ACC) {
  447. /* REVISIT: Check if the exchange is already aborted */
  448. offset = ntohl(acc->reca_fc4value);
  449. e_stat = ntohl(acc->reca_e_stat);
  450. if (e_stat & ESB_ST_SEQ_INIT) {
  451. BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
  452. goto free_frame;
  453. }
  454. BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
  455. e_stat, offset);
  456. /* Seq initiative is with us */
  457. err_entry = (struct fcoe_err_report_entry *)
  458. &orig_io_req->err_entry;
  459. sc_cmd = orig_io_req->sc_cmd;
  460. if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
  461. /* SCSI WRITE command */
  462. if (offset == orig_io_req->data_xfer_len) {
  463. BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
  464. /* FCP_RSP lost */
  465. r_ctl = FC_RCTL_DD_CMD_STATUS;
  466. offset = 0;
  467. } else {
  468. /* start transmitting from offset */
  469. BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
  470. send_seq_clnp = true;
  471. r_ctl = FC_RCTL_DD_DATA_DESC;
  472. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  473. offset, r_ctl))
  474. abort_io = true;
  475. /* XFER_RDY */
  476. }
  477. } else {
  478. /* SCSI READ command */
  479. if (err_entry->data.rx_buf_off ==
  480. orig_io_req->data_xfer_len) {
  481. /* FCP_RSP lost */
  482. BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
  483. r_ctl = FC_RCTL_DD_CMD_STATUS;
  484. offset = 0;
  485. } else {
  486. /* request retransmission from this offset */
  487. send_seq_clnp = true;
  488. offset = err_entry->data.rx_buf_off;
  489. BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
  490. /* FCP_DATA lost */
  491. r_ctl = FC_RCTL_DD_SOL_DATA;
  492. if (bnx2fc_initiate_seq_cleanup(orig_io_req,
  493. offset, r_ctl))
  494. abort_io = true;
  495. }
  496. }
  497. if (abort_io) {
  498. rc = bnx2fc_initiate_abts(orig_io_req);
  499. if (rc != SUCCESS) {
  500. BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
  501. " failed. issue cleanup\n");
  502. bnx2fc_initiate_cleanup(orig_io_req);
  503. }
  504. } else if (!send_seq_clnp) {
  505. BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
  506. spin_unlock_bh(&tgt->tgt_lock);
  507. rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
  508. spin_lock_bh(&tgt->tgt_lock);
  509. if (rc) {
  510. BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
  511. " IO will abort\n");
  512. }
  513. }
  514. }
  515. free_frame:
  516. fc_frame_free(fp);
  517. free_buf:
  518. kfree(buf);
  519. rec_compl_done:
  520. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  521. kfree(cb_arg);
  522. }
  523. int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
  524. {
  525. struct fc_els_rec rec;
  526. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  527. struct fc_lport *lport = tgt->rdata->local_port;
  528. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  529. u32 sid = tgt->sid;
  530. u32 r_a_tov = lport->r_a_tov;
  531. int rc;
  532. BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
  533. memset(&rec, 0, sizeof(rec));
  534. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  535. if (!cb_arg) {
  536. printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
  537. rc = -ENOMEM;
  538. goto rec_err;
  539. }
  540. kref_get(&orig_io_req->refcount);
  541. cb_arg->aborted_io_req = orig_io_req;
  542. rec.rec_cmd = ELS_REC;
  543. hton24(rec.rec_s_id, sid);
  544. rec.rec_ox_id = htons(orig_io_req->xid);
  545. rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  546. rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
  547. bnx2fc_rec_compl, cb_arg,
  548. r_a_tov);
  549. rec_err:
  550. if (rc) {
  551. BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
  552. spin_lock_bh(&tgt->tgt_lock);
  553. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  554. spin_unlock_bh(&tgt->tgt_lock);
  555. kfree(cb_arg);
  556. }
  557. return rc;
  558. }
  559. int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
  560. {
  561. struct fcp_srr srr;
  562. struct bnx2fc_rport *tgt = orig_io_req->tgt;
  563. struct fc_lport *lport = tgt->rdata->local_port;
  564. struct bnx2fc_els_cb_arg *cb_arg = NULL;
  565. u32 r_a_tov = lport->r_a_tov;
  566. int rc;
  567. BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
  568. memset(&srr, 0, sizeof(srr));
  569. cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
  570. if (!cb_arg) {
  571. printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
  572. rc = -ENOMEM;
  573. goto srr_err;
  574. }
  575. kref_get(&orig_io_req->refcount);
  576. cb_arg->aborted_io_req = orig_io_req;
  577. srr.srr_op = ELS_SRR;
  578. srr.srr_ox_id = htons(orig_io_req->xid);
  579. srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
  580. srr.srr_rel_off = htonl(offset);
  581. srr.srr_r_ctl = r_ctl;
  582. orig_io_req->srr_offset = offset;
  583. orig_io_req->srr_rctl = r_ctl;
  584. rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
  585. bnx2fc_srr_compl, cb_arg,
  586. r_a_tov);
  587. srr_err:
  588. if (rc) {
  589. BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
  590. spin_lock_bh(&tgt->tgt_lock);
  591. kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
  592. spin_unlock_bh(&tgt->tgt_lock);
  593. kfree(cb_arg);
  594. } else
  595. set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
  596. return rc;
  597. }
  598. static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
  599. void *data, u32 data_len,
  600. void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
  601. struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
  602. {
  603. struct fcoe_port *port = tgt->port;
  604. struct bnx2fc_interface *interface = port->priv;
  605. struct fc_rport *rport = tgt->rport;
  606. struct fc_lport *lport = port->lport;
  607. struct bnx2fc_cmd *els_req;
  608. struct bnx2fc_mp_req *mp_req;
  609. struct fc_frame_header *fc_hdr;
  610. struct fcoe_task_ctx_entry *task;
  611. struct fcoe_task_ctx_entry *task_page;
  612. int rc = 0;
  613. int task_idx, index;
  614. u32 did, sid;
  615. u16 xid;
  616. rc = fc_remote_port_chkready(rport);
  617. if (rc) {
  618. printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
  619. rc = -EINVAL;
  620. goto els_err;
  621. }
  622. if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
  623. printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
  624. rc = -EINVAL;
  625. goto els_err;
  626. }
  627. if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
  628. printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
  629. rc = -EINVAL;
  630. goto els_err;
  631. }
  632. els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
  633. if (!els_req) {
  634. rc = -ENOMEM;
  635. goto els_err;
  636. }
  637. els_req->sc_cmd = NULL;
  638. els_req->port = port;
  639. els_req->tgt = tgt;
  640. els_req->cb_func = cb_func;
  641. cb_arg->io_req = els_req;
  642. els_req->cb_arg = cb_arg;
  643. els_req->data_xfer_len = data_len;
  644. mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
  645. rc = bnx2fc_init_mp_req(els_req);
  646. if (rc == FAILED) {
  647. printk(KERN_ERR PFX "ELS MP request init failed\n");
  648. spin_lock_bh(&tgt->tgt_lock);
  649. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  650. spin_unlock_bh(&tgt->tgt_lock);
  651. rc = -ENOMEM;
  652. goto els_err;
  653. } else {
  654. /* rc SUCCESS */
  655. rc = 0;
  656. }
  657. /* Set the data_xfer_len to the size of ELS payload */
  658. mp_req->req_len = data_len;
  659. els_req->data_xfer_len = mp_req->req_len;
  660. /* Fill ELS Payload */
  661. if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
  662. memcpy(mp_req->req_buf, data, data_len);
  663. } else {
  664. printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
  665. els_req->cb_func = NULL;
  666. els_req->cb_arg = NULL;
  667. spin_lock_bh(&tgt->tgt_lock);
  668. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  669. spin_unlock_bh(&tgt->tgt_lock);
  670. rc = -EINVAL;
  671. }
  672. if (rc)
  673. goto els_err;
  674. /* Fill FC header */
  675. fc_hdr = &(mp_req->req_fc_hdr);
  676. did = tgt->rport->port_id;
  677. sid = tgt->sid;
  678. if (op == ELS_SRR)
  679. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
  680. FC_TYPE_FCP, FC_FC_FIRST_SEQ |
  681. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  682. else
  683. __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
  684. FC_TYPE_ELS, FC_FC_FIRST_SEQ |
  685. FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
  686. /* Obtain exchange id */
  687. xid = els_req->xid;
  688. task_idx = xid/BNX2FC_TASKS_PER_PAGE;
  689. index = xid % BNX2FC_TASKS_PER_PAGE;
  690. /* Initialize task context for this IO request */
  691. task_page = (struct fcoe_task_ctx_entry *)
  692. interface->hba->task_ctx[task_idx];
  693. task = &(task_page[index]);
  694. bnx2fc_init_mp_task(els_req, task);
  695. spin_lock_bh(&tgt->tgt_lock);
  696. if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
  697. printk(KERN_ERR PFX "initiate_els.. session not ready\n");
  698. els_req->cb_func = NULL;
  699. els_req->cb_arg = NULL;
  700. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  701. spin_unlock_bh(&tgt->tgt_lock);
  702. return -EINVAL;
  703. }
  704. if (timer_msec)
  705. bnx2fc_cmd_timer_set(els_req, timer_msec);
  706. bnx2fc_add_2_sq(tgt, xid);
  707. els_req->on_active_queue = 1;
  708. list_add_tail(&els_req->link, &tgt->els_queue);
  709. /* Ring doorbell */
  710. bnx2fc_ring_doorbell(tgt);
  711. spin_unlock_bh(&tgt->tgt_lock);
  712. els_err:
  713. return rc;
  714. }
  715. void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
  716. struct fcoe_task_ctx_entry *task, u8 num_rq)
  717. {
  718. struct bnx2fc_mp_req *mp_req;
  719. struct fc_frame_header *fc_hdr;
  720. u64 *hdr;
  721. u64 *temp_hdr;
  722. BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
  723. "cmd_type = %d\n", els_req->xid, els_req->cmd_type);
  724. if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
  725. &els_req->req_flags)) {
  726. BNX2FC_ELS_DBG("Timer context finished processing this "
  727. "els - 0x%x\n", els_req->xid);
  728. /* This IO doesn't receive cleanup completion */
  729. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  730. return;
  731. }
  732. /* Cancel the timeout_work, as we received the response */
  733. if (cancel_delayed_work(&els_req->timeout_work))
  734. kref_put(&els_req->refcount,
  735. bnx2fc_cmd_release); /* drop timer hold */
  736. if (els_req->on_active_queue) {
  737. list_del_init(&els_req->link);
  738. els_req->on_active_queue = 0;
  739. }
  740. mp_req = &(els_req->mp_req);
  741. fc_hdr = &(mp_req->resp_fc_hdr);
  742. hdr = (u64 *)fc_hdr;
  743. temp_hdr = (u64 *)
  744. &task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
  745. hdr[0] = cpu_to_be64(temp_hdr[0]);
  746. hdr[1] = cpu_to_be64(temp_hdr[1]);
  747. hdr[2] = cpu_to_be64(temp_hdr[2]);
  748. mp_req->resp_len =
  749. task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
  750. /* Parse ELS response */
  751. if ((els_req->cb_func) && (els_req->cb_arg)) {
  752. els_req->cb_func(els_req->cb_arg);
  753. els_req->cb_arg = NULL;
  754. }
  755. kref_put(&els_req->refcount, bnx2fc_cmd_release);
  756. }
  757. static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
  758. void *arg)
  759. {
  760. struct fcoe_ctlr *fip = arg;
  761. struct fc_exch *exch = fc_seq_exch(seq);
  762. struct fc_lport *lport = exch->lp;
  763. u8 *mac;
  764. u8 op;
  765. if (IS_ERR(fp))
  766. goto done;
  767. mac = fr_cb(fp)->granted_mac;
  768. if (is_zero_ether_addr(mac)) {
  769. op = fc_frame_payload_op(fp);
  770. if (lport->vport) {
  771. if (op == ELS_LS_RJT) {
  772. printk(KERN_ERR PFX "bnx2fc_flogi_resp is LS_RJT\n");
  773. fc_vport_terminate(lport->vport);
  774. fc_frame_free(fp);
  775. return;
  776. }
  777. }
  778. fcoe_ctlr_recv_flogi(fip, lport, fp);
  779. }
  780. if (!is_zero_ether_addr(mac))
  781. fip->update_mac(lport, mac);
  782. done:
  783. fc_lport_flogi_resp(seq, fp, lport);
  784. }
  785. static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
  786. void *arg)
  787. {
  788. struct fcoe_ctlr *fip = arg;
  789. struct fc_exch *exch = fc_seq_exch(seq);
  790. struct fc_lport *lport = exch->lp;
  791. static u8 zero_mac[ETH_ALEN] = { 0 };
  792. if (!IS_ERR(fp))
  793. fip->update_mac(lport, zero_mac);
  794. fc_lport_logo_resp(seq, fp, lport);
  795. }
  796. struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
  797. struct fc_frame *fp, unsigned int op,
  798. void (*resp)(struct fc_seq *,
  799. struct fc_frame *,
  800. void *),
  801. void *arg, u32 timeout)
  802. {
  803. struct fcoe_port *port = lport_priv(lport);
  804. struct bnx2fc_interface *interface = port->priv;
  805. struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
  806. struct fc_frame_header *fh = fc_frame_header_get(fp);
  807. switch (op) {
  808. case ELS_FLOGI:
  809. case ELS_FDISC:
  810. return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
  811. fip, timeout);
  812. case ELS_LOGO:
  813. /* only hook onto fabric logouts, not port logouts */
  814. if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
  815. break;
  816. return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
  817. fip, timeout);
  818. }
  819. return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
  820. }