ql4_bsg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * QLogic iSCSI HBA Driver
  3. * Copyright (c) 2011-2013 QLogic Corporation
  4. *
  5. * See LICENSE.qla4xxx for copyright and licensing details.
  6. */
  7. #include "ql4_def.h"
  8. #include "ql4_glbl.h"
  9. #include "ql4_bsg.h"
  10. static int
  11. qla4xxx_read_flash(struct bsg_job *bsg_job)
  12. {
  13. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  14. struct scsi_qla_host *ha = to_qla_host(host);
  15. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  16. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  17. uint32_t offset = 0;
  18. uint32_t length = 0;
  19. dma_addr_t flash_dma;
  20. uint8_t *flash = NULL;
  21. int rval = -EINVAL;
  22. bsg_reply->reply_payload_rcv_len = 0;
  23. if (unlikely(pci_channel_offline(ha->pdev)))
  24. goto leave;
  25. if (ql4xxx_reset_active(ha)) {
  26. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  27. rval = -EBUSY;
  28. goto leave;
  29. }
  30. if (ha->flash_state != QLFLASH_WAITING) {
  31. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  32. "active\n", __func__);
  33. rval = -EBUSY;
  34. goto leave;
  35. }
  36. ha->flash_state = QLFLASH_READING;
  37. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  38. length = bsg_job->reply_payload.payload_len;
  39. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  40. GFP_KERNEL);
  41. if (!flash) {
  42. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  43. "data\n", __func__);
  44. rval = -ENOMEM;
  45. goto leave;
  46. }
  47. rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
  48. if (rval) {
  49. ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
  50. bsg_reply->result = DID_ERROR << 16;
  51. rval = -EIO;
  52. } else {
  53. bsg_reply->reply_payload_rcv_len =
  54. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  55. bsg_job->reply_payload.sg_cnt,
  56. flash, length);
  57. bsg_reply->result = DID_OK << 16;
  58. }
  59. bsg_job_done(bsg_job, bsg_reply->result,
  60. bsg_reply->reply_payload_rcv_len);
  61. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  62. leave:
  63. ha->flash_state = QLFLASH_WAITING;
  64. return rval;
  65. }
  66. static int
  67. qla4xxx_update_flash(struct bsg_job *bsg_job)
  68. {
  69. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  70. struct scsi_qla_host *ha = to_qla_host(host);
  71. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  72. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  73. uint32_t length = 0;
  74. uint32_t offset = 0;
  75. uint32_t options = 0;
  76. dma_addr_t flash_dma;
  77. uint8_t *flash = NULL;
  78. int rval = -EINVAL;
  79. bsg_reply->reply_payload_rcv_len = 0;
  80. if (unlikely(pci_channel_offline(ha->pdev)))
  81. goto leave;
  82. if (ql4xxx_reset_active(ha)) {
  83. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  84. rval = -EBUSY;
  85. goto leave;
  86. }
  87. if (ha->flash_state != QLFLASH_WAITING) {
  88. ql4_printk(KERN_ERR, ha, "%s: another flash operation "
  89. "active\n", __func__);
  90. rval = -EBUSY;
  91. goto leave;
  92. }
  93. ha->flash_state = QLFLASH_WRITING;
  94. length = bsg_job->request_payload.payload_len;
  95. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  96. options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  97. flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
  98. GFP_KERNEL);
  99. if (!flash) {
  100. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  101. "data\n", __func__);
  102. rval = -ENOMEM;
  103. goto leave;
  104. }
  105. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  106. bsg_job->request_payload.sg_cnt, flash, length);
  107. rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
  108. if (rval) {
  109. ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
  110. bsg_reply->result = DID_ERROR << 16;
  111. rval = -EIO;
  112. } else
  113. bsg_reply->result = DID_OK << 16;
  114. bsg_job_done(bsg_job, bsg_reply->result,
  115. bsg_reply->reply_payload_rcv_len);
  116. dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
  117. leave:
  118. ha->flash_state = QLFLASH_WAITING;
  119. return rval;
  120. }
  121. static int
  122. qla4xxx_get_acb_state(struct bsg_job *bsg_job)
  123. {
  124. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  125. struct scsi_qla_host *ha = to_qla_host(host);
  126. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  127. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  128. uint32_t status[MBOX_REG_COUNT];
  129. uint32_t acb_idx;
  130. uint32_t ip_idx;
  131. int rval = -EINVAL;
  132. bsg_reply->reply_payload_rcv_len = 0;
  133. if (unlikely(pci_channel_offline(ha->pdev)))
  134. goto leave;
  135. /* Only 4022 and above adapters are supported */
  136. if (is_qla4010(ha))
  137. goto leave;
  138. if (ql4xxx_reset_active(ha)) {
  139. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  140. rval = -EBUSY;
  141. goto leave;
  142. }
  143. if (bsg_job->reply_payload.payload_len < sizeof(status)) {
  144. ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
  145. __func__, bsg_job->reply_payload.payload_len);
  146. rval = -EINVAL;
  147. goto leave;
  148. }
  149. acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  150. ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  151. rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
  152. if (rval) {
  153. ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
  154. __func__);
  155. bsg_reply->result = DID_ERROR << 16;
  156. rval = -EIO;
  157. } else {
  158. bsg_reply->reply_payload_rcv_len =
  159. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  160. bsg_job->reply_payload.sg_cnt,
  161. status, sizeof(status));
  162. bsg_reply->result = DID_OK << 16;
  163. }
  164. bsg_job_done(bsg_job, bsg_reply->result,
  165. bsg_reply->reply_payload_rcv_len);
  166. leave:
  167. return rval;
  168. }
  169. static int
  170. qla4xxx_read_nvram(struct bsg_job *bsg_job)
  171. {
  172. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  173. struct scsi_qla_host *ha = to_qla_host(host);
  174. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  175. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  176. uint32_t offset = 0;
  177. uint32_t len = 0;
  178. uint32_t total_len = 0;
  179. dma_addr_t nvram_dma;
  180. uint8_t *nvram = NULL;
  181. int rval = -EINVAL;
  182. bsg_reply->reply_payload_rcv_len = 0;
  183. if (unlikely(pci_channel_offline(ha->pdev)))
  184. goto leave;
  185. /* Only 40xx adapters are supported */
  186. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  187. goto leave;
  188. if (ql4xxx_reset_active(ha)) {
  189. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  190. rval = -EBUSY;
  191. goto leave;
  192. }
  193. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  194. len = bsg_job->reply_payload.payload_len;
  195. total_len = offset + len;
  196. /* total len should not be greater than max NVRAM size */
  197. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  198. ((is_qla4022(ha) || is_qla4032(ha)) &&
  199. total_len > QL40X2_NVRAM_SIZE)) {
  200. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  201. " nvram size, offset=%d len=%d\n",
  202. __func__, offset, len);
  203. goto leave;
  204. }
  205. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  206. GFP_KERNEL);
  207. if (!nvram) {
  208. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
  209. "data\n", __func__);
  210. rval = -ENOMEM;
  211. goto leave;
  212. }
  213. rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
  214. if (rval) {
  215. ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
  216. bsg_reply->result = DID_ERROR << 16;
  217. rval = -EIO;
  218. } else {
  219. bsg_reply->reply_payload_rcv_len =
  220. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  221. bsg_job->reply_payload.sg_cnt,
  222. nvram, len);
  223. bsg_reply->result = DID_OK << 16;
  224. }
  225. bsg_job_done(bsg_job, bsg_reply->result,
  226. bsg_reply->reply_payload_rcv_len);
  227. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  228. leave:
  229. return rval;
  230. }
  231. static int
  232. qla4xxx_update_nvram(struct bsg_job *bsg_job)
  233. {
  234. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  235. struct scsi_qla_host *ha = to_qla_host(host);
  236. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  237. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  238. uint32_t offset = 0;
  239. uint32_t len = 0;
  240. uint32_t total_len = 0;
  241. dma_addr_t nvram_dma;
  242. uint8_t *nvram = NULL;
  243. int rval = -EINVAL;
  244. bsg_reply->reply_payload_rcv_len = 0;
  245. if (unlikely(pci_channel_offline(ha->pdev)))
  246. goto leave;
  247. if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
  248. goto leave;
  249. if (ql4xxx_reset_active(ha)) {
  250. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  251. rval = -EBUSY;
  252. goto leave;
  253. }
  254. offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  255. len = bsg_job->request_payload.payload_len;
  256. total_len = offset + len;
  257. /* total len should not be greater than max NVRAM size */
  258. if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
  259. ((is_qla4022(ha) || is_qla4032(ha)) &&
  260. total_len > QL40X2_NVRAM_SIZE)) {
  261. ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
  262. " nvram size, offset=%d len=%d\n",
  263. __func__, offset, len);
  264. goto leave;
  265. }
  266. nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
  267. GFP_KERNEL);
  268. if (!nvram) {
  269. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
  270. "data\n", __func__);
  271. rval = -ENOMEM;
  272. goto leave;
  273. }
  274. sg_copy_to_buffer(bsg_job->request_payload.sg_list,
  275. bsg_job->request_payload.sg_cnt, nvram, len);
  276. rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
  277. if (rval) {
  278. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  279. bsg_reply->result = DID_ERROR << 16;
  280. rval = -EIO;
  281. } else
  282. bsg_reply->result = DID_OK << 16;
  283. bsg_job_done(bsg_job, bsg_reply->result,
  284. bsg_reply->reply_payload_rcv_len);
  285. dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
  286. leave:
  287. return rval;
  288. }
  289. static int
  290. qla4xxx_restore_defaults(struct bsg_job *bsg_job)
  291. {
  292. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  293. struct scsi_qla_host *ha = to_qla_host(host);
  294. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  295. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  296. uint32_t region = 0;
  297. uint32_t field0 = 0;
  298. uint32_t field1 = 0;
  299. int rval = -EINVAL;
  300. bsg_reply->reply_payload_rcv_len = 0;
  301. if (unlikely(pci_channel_offline(ha->pdev)))
  302. goto leave;
  303. if (is_qla4010(ha))
  304. goto leave;
  305. if (ql4xxx_reset_active(ha)) {
  306. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  307. rval = -EBUSY;
  308. goto leave;
  309. }
  310. region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  311. field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
  312. field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
  313. rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
  314. if (rval) {
  315. ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
  316. bsg_reply->result = DID_ERROR << 16;
  317. rval = -EIO;
  318. } else
  319. bsg_reply->result = DID_OK << 16;
  320. bsg_job_done(bsg_job, bsg_reply->result,
  321. bsg_reply->reply_payload_rcv_len);
  322. leave:
  323. return rval;
  324. }
  325. static int
  326. qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
  327. {
  328. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  329. struct scsi_qla_host *ha = to_qla_host(host);
  330. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  331. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  332. uint32_t acb_type = 0;
  333. uint32_t len = 0;
  334. dma_addr_t acb_dma;
  335. uint8_t *acb = NULL;
  336. int rval = -EINVAL;
  337. bsg_reply->reply_payload_rcv_len = 0;
  338. if (unlikely(pci_channel_offline(ha->pdev)))
  339. goto leave;
  340. /* Only 4022 and above adapters are supported */
  341. if (is_qla4010(ha))
  342. goto leave;
  343. if (ql4xxx_reset_active(ha)) {
  344. ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
  345. rval = -EBUSY;
  346. goto leave;
  347. }
  348. acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  349. len = bsg_job->reply_payload.payload_len;
  350. if (len < sizeof(struct addr_ctrl_blk)) {
  351. ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
  352. __func__, len);
  353. rval = -EINVAL;
  354. goto leave;
  355. }
  356. acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
  357. if (!acb) {
  358. ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
  359. "data\n", __func__);
  360. rval = -ENOMEM;
  361. goto leave;
  362. }
  363. rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
  364. if (rval) {
  365. ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
  366. bsg_reply->result = DID_ERROR << 16;
  367. rval = -EIO;
  368. } else {
  369. bsg_reply->reply_payload_rcv_len =
  370. sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
  371. bsg_job->reply_payload.sg_cnt,
  372. acb, len);
  373. bsg_reply->result = DID_OK << 16;
  374. }
  375. bsg_job_done(bsg_job, bsg_reply->result,
  376. bsg_reply->reply_payload_rcv_len);
  377. dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
  378. leave:
  379. return rval;
  380. }
  381. static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job)
  382. {
  383. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  384. struct scsi_qla_host *ha = to_qla_host(host);
  385. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  386. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  387. uint8_t *rsp_ptr = NULL;
  388. uint32_t mbox_cmd[MBOX_REG_COUNT];
  389. uint32_t mbox_sts[MBOX_REG_COUNT];
  390. int status = QLA_ERROR;
  391. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  392. if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
  393. ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
  394. __func__);
  395. bsg_reply->result = DID_ERROR << 16;
  396. goto exit_diag_mem_test;
  397. }
  398. bsg_reply->reply_payload_rcv_len = 0;
  399. memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
  400. sizeof(uint32_t) * MBOX_REG_COUNT);
  401. DEBUG2(ql4_printk(KERN_INFO, ha,
  402. "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  403. __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
  404. mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
  405. mbox_cmd[7]));
  406. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  407. &mbox_sts[0]);
  408. DEBUG2(ql4_printk(KERN_INFO, ha,
  409. "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  410. __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
  411. mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
  412. mbox_sts[7]));
  413. if (status == QLA_SUCCESS)
  414. bsg_reply->result = DID_OK << 16;
  415. else
  416. bsg_reply->result = DID_ERROR << 16;
  417. /* Send mbox_sts to application */
  418. bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
  419. rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
  420. memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
  421. exit_diag_mem_test:
  422. DEBUG2(ql4_printk(KERN_INFO, ha,
  423. "%s: bsg_reply->result = x%x, status = %s\n",
  424. __func__, bsg_reply->result, STATUS(status)));
  425. bsg_job_done(bsg_job, bsg_reply->result,
  426. bsg_reply->reply_payload_rcv_len);
  427. }
  428. static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha,
  429. int wait_for_link)
  430. {
  431. int status = QLA_SUCCESS;
  432. if (!wait_for_completion_timeout(&ha->idc_comp, (IDC_COMP_TOV * HZ))) {
  433. ql4_printk(KERN_INFO, ha, "%s: IDC Complete notification not received, Waiting for another %d timeout",
  434. __func__, ha->idc_extend_tmo);
  435. if (ha->idc_extend_tmo) {
  436. if (!wait_for_completion_timeout(&ha->idc_comp,
  437. (ha->idc_extend_tmo * HZ))) {
  438. ha->notify_idc_comp = 0;
  439. ha->notify_link_up_comp = 0;
  440. ql4_printk(KERN_WARNING, ha, "%s: Aborting: IDC Complete notification not received",
  441. __func__);
  442. status = QLA_ERROR;
  443. goto exit_wait;
  444. } else {
  445. DEBUG2(ql4_printk(KERN_INFO, ha,
  446. "%s: IDC Complete notification received\n",
  447. __func__));
  448. }
  449. }
  450. } else {
  451. DEBUG2(ql4_printk(KERN_INFO, ha,
  452. "%s: IDC Complete notification received\n",
  453. __func__));
  454. }
  455. ha->notify_idc_comp = 0;
  456. if (wait_for_link) {
  457. if (!wait_for_completion_timeout(&ha->link_up_comp,
  458. (IDC_COMP_TOV * HZ))) {
  459. ha->notify_link_up_comp = 0;
  460. ql4_printk(KERN_WARNING, ha, "%s: Aborting: LINK UP notification not received",
  461. __func__);
  462. status = QLA_ERROR;
  463. goto exit_wait;
  464. } else {
  465. DEBUG2(ql4_printk(KERN_INFO, ha,
  466. "%s: LINK UP notification received\n",
  467. __func__));
  468. }
  469. ha->notify_link_up_comp = 0;
  470. }
  471. exit_wait:
  472. return status;
  473. }
  474. static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha,
  475. uint32_t *mbox_cmd)
  476. {
  477. uint32_t config = 0;
  478. int status = QLA_SUCCESS;
  479. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  480. status = qla4_83xx_get_port_config(ha, &config);
  481. if (status != QLA_SUCCESS)
  482. goto exit_pre_loopback_config;
  483. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Default port config=%08X\n",
  484. __func__, config));
  485. if ((config & ENABLE_INTERNAL_LOOPBACK) ||
  486. (config & ENABLE_EXTERNAL_LOOPBACK)) {
  487. ql4_printk(KERN_INFO, ha, "%s: Loopback diagnostics already in progress. Invalid request\n",
  488. __func__);
  489. goto exit_pre_loopback_config;
  490. }
  491. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
  492. config |= ENABLE_INTERNAL_LOOPBACK;
  493. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
  494. config |= ENABLE_EXTERNAL_LOOPBACK;
  495. config &= ~ENABLE_DCBX;
  496. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: New port config=%08X\n",
  497. __func__, config));
  498. ha->notify_idc_comp = 1;
  499. ha->notify_link_up_comp = 1;
  500. /* get the link state */
  501. qla4xxx_get_firmware_state(ha);
  502. status = qla4_83xx_set_port_config(ha, &config);
  503. if (status != QLA_SUCCESS) {
  504. ha->notify_idc_comp = 0;
  505. ha->notify_link_up_comp = 0;
  506. goto exit_pre_loopback_config;
  507. }
  508. exit_pre_loopback_config:
  509. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
  510. STATUS(status)));
  511. return status;
  512. }
  513. static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha,
  514. uint32_t *mbox_cmd)
  515. {
  516. int status = QLA_SUCCESS;
  517. uint32_t config = 0;
  518. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  519. status = qla4_83xx_get_port_config(ha, &config);
  520. if (status != QLA_SUCCESS)
  521. goto exit_post_loopback_config;
  522. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: port config=%08X\n", __func__,
  523. config));
  524. if (mbox_cmd[1] == QL_DIAG_CMD_TEST_INT_LOOPBACK)
  525. config &= ~ENABLE_INTERNAL_LOOPBACK;
  526. else if (mbox_cmd[1] == QL_DIAG_CMD_TEST_EXT_LOOPBACK)
  527. config &= ~ENABLE_EXTERNAL_LOOPBACK;
  528. config |= ENABLE_DCBX;
  529. DEBUG2(ql4_printk(KERN_INFO, ha,
  530. "%s: Restore default port config=%08X\n", __func__,
  531. config));
  532. ha->notify_idc_comp = 1;
  533. if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP)
  534. ha->notify_link_up_comp = 1;
  535. status = qla4_83xx_set_port_config(ha, &config);
  536. if (status != QLA_SUCCESS) {
  537. ql4_printk(KERN_INFO, ha, "%s: Scheduling adapter reset\n",
  538. __func__);
  539. set_bit(DPC_RESET_HA, &ha->dpc_flags);
  540. clear_bit(AF_LOOPBACK, &ha->flags);
  541. goto exit_post_loopback_config;
  542. }
  543. exit_post_loopback_config:
  544. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: status = %s\n", __func__,
  545. STATUS(status)));
  546. return status;
  547. }
  548. static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job)
  549. {
  550. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  551. struct scsi_qla_host *ha = to_qla_host(host);
  552. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  553. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  554. uint8_t *rsp_ptr = NULL;
  555. uint32_t mbox_cmd[MBOX_REG_COUNT];
  556. uint32_t mbox_sts[MBOX_REG_COUNT];
  557. int wait_for_link = 1;
  558. int status = QLA_ERROR;
  559. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  560. bsg_reply->reply_payload_rcv_len = 0;
  561. if (test_bit(AF_LOOPBACK, &ha->flags)) {
  562. ql4_printk(KERN_INFO, ha, "%s: Loopback Diagnostics already in progress. Invalid Request\n",
  563. __func__);
  564. bsg_reply->result = DID_ERROR << 16;
  565. goto exit_loopback_cmd;
  566. }
  567. if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
  568. ql4_printk(KERN_INFO, ha, "%s: Adapter reset in progress. Invalid Request\n",
  569. __func__);
  570. bsg_reply->result = DID_ERROR << 16;
  571. goto exit_loopback_cmd;
  572. }
  573. memcpy(mbox_cmd, &bsg_req->rqst_data.h_vendor.vendor_cmd[1],
  574. sizeof(uint32_t) * MBOX_REG_COUNT);
  575. if (is_qla8032(ha) || is_qla8042(ha)) {
  576. status = qla4_83xx_pre_loopback_config(ha, mbox_cmd);
  577. if (status != QLA_SUCCESS) {
  578. bsg_reply->result = DID_ERROR << 16;
  579. goto exit_loopback_cmd;
  580. }
  581. status = qla4_83xx_wait_for_loopback_config_comp(ha,
  582. wait_for_link);
  583. if (status != QLA_SUCCESS) {
  584. bsg_reply->result = DID_TIME_OUT << 16;
  585. goto restore;
  586. }
  587. }
  588. DEBUG2(ql4_printk(KERN_INFO, ha,
  589. "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  590. __func__, mbox_cmd[0], mbox_cmd[1], mbox_cmd[2],
  591. mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6],
  592. mbox_cmd[7]));
  593. status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
  594. &mbox_sts[0]);
  595. if (status == QLA_SUCCESS)
  596. bsg_reply->result = DID_OK << 16;
  597. else
  598. bsg_reply->result = DID_ERROR << 16;
  599. DEBUG2(ql4_printk(KERN_INFO, ha,
  600. "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n",
  601. __func__, mbox_sts[0], mbox_sts[1], mbox_sts[2],
  602. mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6],
  603. mbox_sts[7]));
  604. /* Send mbox_sts to application */
  605. bsg_job->reply_len = sizeof(struct iscsi_bsg_reply) + sizeof(mbox_sts);
  606. rsp_ptr = ((uint8_t *)bsg_reply) + sizeof(struct iscsi_bsg_reply);
  607. memcpy(rsp_ptr, mbox_sts, sizeof(mbox_sts));
  608. restore:
  609. if (is_qla8032(ha) || is_qla8042(ha)) {
  610. status = qla4_83xx_post_loopback_config(ha, mbox_cmd);
  611. if (status != QLA_SUCCESS) {
  612. bsg_reply->result = DID_ERROR << 16;
  613. goto exit_loopback_cmd;
  614. }
  615. /* for pre_loopback_config() wait for LINK UP only
  616. * if PHY LINK is UP */
  617. if (!(ha->addl_fw_state & FW_ADDSTATE_LINK_UP))
  618. wait_for_link = 0;
  619. status = qla4_83xx_wait_for_loopback_config_comp(ha,
  620. wait_for_link);
  621. if (status != QLA_SUCCESS) {
  622. bsg_reply->result = DID_TIME_OUT << 16;
  623. goto exit_loopback_cmd;
  624. }
  625. }
  626. exit_loopback_cmd:
  627. DEBUG2(ql4_printk(KERN_INFO, ha,
  628. "%s: bsg_reply->result = x%x, status = %s\n",
  629. __func__, bsg_reply->result, STATUS(status)));
  630. bsg_job_done(bsg_job, bsg_reply->result,
  631. bsg_reply->reply_payload_rcv_len);
  632. }
  633. static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job)
  634. {
  635. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  636. struct scsi_qla_host *ha = to_qla_host(host);
  637. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  638. uint32_t diag_cmd;
  639. int rval = -EINVAL;
  640. DEBUG2(ql4_printk(KERN_INFO, ha, "%s: in\n", __func__));
  641. diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
  642. if (diag_cmd == MBOX_CMD_DIAG_TEST) {
  643. switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) {
  644. case QL_DIAG_CMD_TEST_DDR_SIZE:
  645. case QL_DIAG_CMD_TEST_DDR_RW:
  646. case QL_DIAG_CMD_TEST_ONCHIP_MEM_RW:
  647. case QL_DIAG_CMD_TEST_NVRAM:
  648. case QL_DIAG_CMD_TEST_FLASH_ROM:
  649. case QL_DIAG_CMD_TEST_DMA_XFER:
  650. case QL_DIAG_CMD_SELF_DDR_RW:
  651. case QL_DIAG_CMD_SELF_ONCHIP_MEM_RW:
  652. /* Execute diag test for adapter RAM/FLASH */
  653. ql4xxx_execute_diag_cmd(bsg_job);
  654. /* Always return success as we want to sent bsg_reply
  655. * to Application */
  656. rval = QLA_SUCCESS;
  657. break;
  658. case QL_DIAG_CMD_TEST_INT_LOOPBACK:
  659. case QL_DIAG_CMD_TEST_EXT_LOOPBACK:
  660. /* Execute diag test for Network */
  661. qla4xxx_execute_diag_loopback_cmd(bsg_job);
  662. /* Always return success as we want to sent bsg_reply
  663. * to Application */
  664. rval = QLA_SUCCESS;
  665. break;
  666. default:
  667. ql4_printk(KERN_ERR, ha, "%s: Invalid diag test: 0x%x\n",
  668. __func__,
  669. bsg_req->rqst_data.h_vendor.vendor_cmd[2]);
  670. }
  671. } else if ((diag_cmd == MBOX_CMD_SET_LED_CONFIG) ||
  672. (diag_cmd == MBOX_CMD_GET_LED_CONFIG)) {
  673. ql4xxx_execute_diag_cmd(bsg_job);
  674. rval = QLA_SUCCESS;
  675. } else {
  676. ql4_printk(KERN_ERR, ha, "%s: Invalid diag cmd: 0x%x\n",
  677. __func__, diag_cmd);
  678. }
  679. return rval;
  680. }
  681. /**
  682. * qla4xxx_process_vendor_specific - handle vendor specific bsg request
  683. * @job: iscsi_bsg_job to handle
  684. **/
  685. int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
  686. {
  687. struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
  688. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  689. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  690. struct scsi_qla_host *ha = to_qla_host(host);
  691. switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
  692. case QLISCSI_VND_READ_FLASH:
  693. return qla4xxx_read_flash(bsg_job);
  694. case QLISCSI_VND_UPDATE_FLASH:
  695. return qla4xxx_update_flash(bsg_job);
  696. case QLISCSI_VND_GET_ACB_STATE:
  697. return qla4xxx_get_acb_state(bsg_job);
  698. case QLISCSI_VND_READ_NVRAM:
  699. return qla4xxx_read_nvram(bsg_job);
  700. case QLISCSI_VND_UPDATE_NVRAM:
  701. return qla4xxx_update_nvram(bsg_job);
  702. case QLISCSI_VND_RESTORE_DEFAULTS:
  703. return qla4xxx_restore_defaults(bsg_job);
  704. case QLISCSI_VND_GET_ACB:
  705. return qla4xxx_bsg_get_acb(bsg_job);
  706. case QLISCSI_VND_DIAG_TEST:
  707. return qla4xxx_execute_diag_test(bsg_job);
  708. default:
  709. ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
  710. "0x%x\n", __func__, bsg_req->msgcode);
  711. bsg_reply->result = (DID_ERROR << 16);
  712. bsg_reply->reply_payload_rcv_len = 0;
  713. bsg_job_done(bsg_job, bsg_reply->result,
  714. bsg_reply->reply_payload_rcv_len);
  715. return -ENOSYS;
  716. }
  717. }
  718. /**
  719. * qla4xxx_bsg_request - handle bsg request from ISCSI transport
  720. * @job: iscsi_bsg_job to handle
  721. */
  722. int qla4xxx_bsg_request(struct bsg_job *bsg_job)
  723. {
  724. struct iscsi_bsg_request *bsg_req = bsg_job->request;
  725. struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
  726. struct scsi_qla_host *ha = to_qla_host(host);
  727. switch (bsg_req->msgcode) {
  728. case ISCSI_BSG_HST_VENDOR:
  729. return qla4xxx_process_vendor_specific(bsg_job);
  730. default:
  731. ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
  732. __func__, bsg_req->msgcode);
  733. }
  734. return -ENOSYS;
  735. }