snic_disc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This program is free software; you may redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; version 2 of the License.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  9. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15. * SOFTWARE.
  16. */
  17. #include <linux/errno.h>
  18. #include <linux/mempool.h>
  19. #include <scsi/scsi_tcq.h>
  20. #include "snic_disc.h"
  21. #include "snic.h"
  22. #include "snic_io.h"
  23. /* snic target types */
  24. static const char * const snic_tgt_type_str[] = {
  25. [SNIC_TGT_DAS] = "DAS",
  26. [SNIC_TGT_SAN] = "SAN",
  27. };
  28. static inline const char *
  29. snic_tgt_type_to_str(int typ)
  30. {
  31. return ((typ > SNIC_TGT_NONE && typ <= SNIC_TGT_SAN) ?
  32. snic_tgt_type_str[typ] : "Unknown");
  33. }
  34. static const char * const snic_tgt_state_str[] = {
  35. [SNIC_TGT_STAT_INIT] = "INIT",
  36. [SNIC_TGT_STAT_ONLINE] = "ONLINE",
  37. [SNIC_TGT_STAT_OFFLINE] = "OFFLINE",
  38. [SNIC_TGT_STAT_DEL] = "DELETION IN PROGRESS",
  39. };
  40. const char *
  41. snic_tgt_state_to_str(int state)
  42. {
  43. return ((state >= SNIC_TGT_STAT_INIT && state <= SNIC_TGT_STAT_DEL) ?
  44. snic_tgt_state_str[state] : "UNKNOWN");
  45. }
  46. /*
  47. * Initiate report_tgt req desc
  48. */
  49. static void
  50. snic_report_tgt_init(struct snic_host_req *req, u32 hid, u8 *buf, u32 len,
  51. dma_addr_t rsp_buf_pa, ulong ctx)
  52. {
  53. struct snic_sg_desc *sgd = NULL;
  54. snic_io_hdr_enc(&req->hdr, SNIC_REQ_REPORT_TGTS, 0, SCSI_NO_TAG, hid,
  55. 1, ctx);
  56. req->u.rpt_tgts.sg_cnt = cpu_to_le16(1);
  57. sgd = req_to_sgl(req);
  58. sgd[0].addr = cpu_to_le64(rsp_buf_pa);
  59. sgd[0].len = cpu_to_le32(len);
  60. sgd[0]._resvd = 0;
  61. req->u.rpt_tgts.sg_addr = cpu_to_le64((ulong)sgd);
  62. }
  63. /*
  64. * snic_queue_report_tgt_req: Queues report target request.
  65. */
  66. static int
  67. snic_queue_report_tgt_req(struct snic *snic)
  68. {
  69. struct snic_req_info *rqi = NULL;
  70. u32 ntgts, buf_len = 0;
  71. u8 *buf = NULL;
  72. dma_addr_t pa = 0;
  73. int ret = 0;
  74. rqi = snic_req_init(snic, 1);
  75. if (!rqi) {
  76. ret = -ENOMEM;
  77. goto error;
  78. }
  79. if (snic->fwinfo.max_tgts)
  80. ntgts = min_t(u32, snic->fwinfo.max_tgts, snic->shost->max_id);
  81. else
  82. ntgts = snic->shost->max_id;
  83. /* Allocate Response Buffer */
  84. SNIC_BUG_ON(ntgts == 0);
  85. buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
  86. buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
  87. if (!buf) {
  88. snic_req_free(snic, rqi);
  89. SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
  90. ret = -ENOMEM;
  91. goto error;
  92. }
  93. SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
  94. pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
  95. if (pci_dma_mapping_error(snic->pdev, pa)) {
  96. kfree(buf);
  97. snic_req_free(snic, rqi);
  98. SNIC_HOST_ERR(snic->shost,
  99. "Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
  100. buf);
  101. ret = -EINVAL;
  102. goto error;
  103. }
  104. SNIC_BUG_ON(pa == 0);
  105. rqi->sge_va = (ulong) buf;
  106. snic_report_tgt_init(rqi->req,
  107. snic->config.hid,
  108. buf,
  109. buf_len,
  110. pa,
  111. (ulong)rqi);
  112. snic_handle_untagged_req(snic, rqi);
  113. ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
  114. if (ret) {
  115. pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
  116. kfree(buf);
  117. rqi->sge_va = 0;
  118. snic_release_untagged_req(snic, rqi);
  119. SNIC_HOST_ERR(snic->shost, "Queuing Report Tgts Failed.\n");
  120. goto error;
  121. }
  122. SNIC_DISC_DBG(snic->shost, "Report Targets Issued.\n");
  123. return ret;
  124. error:
  125. SNIC_HOST_ERR(snic->shost,
  126. "Queuing Report Targets Failed, err = %d\n",
  127. ret);
  128. return ret;
  129. } /* end of snic_queue_report_tgt_req */
  130. /* call into SML */
  131. static void
  132. snic_scsi_scan_tgt(struct work_struct *work)
  133. {
  134. struct snic_tgt *tgt = container_of(work, struct snic_tgt, scan_work);
  135. struct Scsi_Host *shost = dev_to_shost(&tgt->dev);
  136. unsigned long flags;
  137. SNIC_HOST_INFO(shost, "Scanning Target id 0x%x\n", tgt->id);
  138. scsi_scan_target(&tgt->dev,
  139. tgt->channel,
  140. tgt->scsi_tgt_id,
  141. SCAN_WILD_CARD,
  142. 1);
  143. spin_lock_irqsave(shost->host_lock, flags);
  144. tgt->flags &= ~SNIC_TGT_SCAN_PENDING;
  145. spin_unlock_irqrestore(shost->host_lock, flags);
  146. } /* end of snic_scsi_scan_tgt */
  147. /*
  148. * snic_tgt_lookup :
  149. */
  150. static struct snic_tgt *
  151. snic_tgt_lookup(struct snic *snic, struct snic_tgt_id *tgtid)
  152. {
  153. struct list_head *cur, *nxt;
  154. struct snic_tgt *tgt = NULL;
  155. list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
  156. tgt = list_entry(cur, struct snic_tgt, list);
  157. if (tgt->id == le32_to_cpu(tgtid->tgt_id))
  158. return tgt;
  159. tgt = NULL;
  160. }
  161. return tgt;
  162. } /* end of snic_tgt_lookup */
  163. /*
  164. * snic_tgt_dev_release : Called on dropping last ref for snic_tgt object
  165. */
  166. void
  167. snic_tgt_dev_release(struct device *dev)
  168. {
  169. struct snic_tgt *tgt = dev_to_tgt(dev);
  170. SNIC_HOST_INFO(snic_tgt_to_shost(tgt),
  171. "Target Device ID %d (%s) Permanently Deleted.\n",
  172. tgt->id,
  173. dev_name(dev));
  174. SNIC_BUG_ON(!list_empty(&tgt->list));
  175. kfree(tgt);
  176. }
  177. /*
  178. * snic_tgt_del : work function to delete snic_tgt
  179. */
  180. static void
  181. snic_tgt_del(struct work_struct *work)
  182. {
  183. struct snic_tgt *tgt = container_of(work, struct snic_tgt, del_work);
  184. struct Scsi_Host *shost = snic_tgt_to_shost(tgt);
  185. if (tgt->flags & SNIC_TGT_SCAN_PENDING)
  186. scsi_flush_work(shost);
  187. /* Block IOs on child devices, stops new IOs */
  188. scsi_target_block(&tgt->dev);
  189. /* Cleanup IOs */
  190. snic_tgt_scsi_abort_io(tgt);
  191. /* Unblock IOs now, to flush if there are any. */
  192. scsi_target_unblock(&tgt->dev, SDEV_TRANSPORT_OFFLINE);
  193. /* Delete SCSI Target and sdevs */
  194. scsi_remove_target(&tgt->dev); /* ?? */
  195. device_del(&tgt->dev);
  196. put_device(&tgt->dev);
  197. } /* end of snic_tgt_del */
  198. /* snic_tgt_create: checks for existence of snic_tgt, if it doesn't
  199. * it creates one.
  200. */
  201. static struct snic_tgt *
  202. snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid)
  203. {
  204. struct snic_tgt *tgt = NULL;
  205. unsigned long flags;
  206. int ret;
  207. tgt = snic_tgt_lookup(snic, tgtid);
  208. if (tgt) {
  209. /* update the information if required */
  210. return tgt;
  211. }
  212. tgt = kzalloc(sizeof(*tgt), GFP_KERNEL);
  213. if (!tgt) {
  214. SNIC_HOST_ERR(snic->shost, "Failure to allocate snic_tgt.\n");
  215. ret = -ENOMEM;
  216. return tgt;
  217. }
  218. INIT_LIST_HEAD(&tgt->list);
  219. tgt->id = le32_to_cpu(tgtid->tgt_id);
  220. tgt->channel = 0;
  221. SNIC_BUG_ON(le16_to_cpu(tgtid->tgt_type) > SNIC_TGT_SAN);
  222. tgt->tdata.typ = le16_to_cpu(tgtid->tgt_type);
  223. /*
  224. * Plugging into SML Device Tree
  225. */
  226. tgt->tdata.disc_id = 0;
  227. tgt->state = SNIC_TGT_STAT_INIT;
  228. device_initialize(&tgt->dev);
  229. tgt->dev.parent = get_device(&snic->shost->shost_gendev);
  230. tgt->dev.release = snic_tgt_dev_release;
  231. INIT_WORK(&tgt->scan_work, snic_scsi_scan_tgt);
  232. INIT_WORK(&tgt->del_work, snic_tgt_del);
  233. switch (tgt->tdata.typ) {
  234. case SNIC_TGT_DAS:
  235. dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
  236. snic->shost->host_no, tgt->channel, tgt->id);
  237. break;
  238. case SNIC_TGT_SAN:
  239. dev_set_name(&tgt->dev, "snic_san_tgt:%d:%d-%d",
  240. snic->shost->host_no, tgt->channel, tgt->id);
  241. break;
  242. default:
  243. SNIC_HOST_INFO(snic->shost, "Target type Unknown Detected.\n");
  244. dev_set_name(&tgt->dev, "snic_das_tgt:%d:%d-%d",
  245. snic->shost->host_no, tgt->channel, tgt->id);
  246. break;
  247. }
  248. spin_lock_irqsave(snic->shost->host_lock, flags);
  249. list_add_tail(&tgt->list, &snic->disc.tgt_list);
  250. tgt->scsi_tgt_id = snic->disc.nxt_tgt_id++;
  251. tgt->state = SNIC_TGT_STAT_ONLINE;
  252. spin_unlock_irqrestore(snic->shost->host_lock, flags);
  253. SNIC_HOST_INFO(snic->shost,
  254. "Tgt %d, type = %s detected. Adding..\n",
  255. tgt->id, snic_tgt_type_to_str(tgt->tdata.typ));
  256. ret = device_add(&tgt->dev);
  257. if (ret) {
  258. SNIC_HOST_ERR(snic->shost,
  259. "Snic Tgt: device_add, with err = %d\n",
  260. ret);
  261. put_device(&snic->shost->shost_gendev);
  262. kfree(tgt);
  263. tgt = NULL;
  264. return tgt;
  265. }
  266. SNIC_HOST_INFO(snic->shost, "Scanning %s.\n", dev_name(&tgt->dev));
  267. scsi_queue_work(snic->shost, &tgt->scan_work);
  268. return tgt;
  269. } /* end of snic_tgt_create */
  270. /* Handler for discovery */
  271. void
  272. snic_handle_tgt_disc(struct work_struct *work)
  273. {
  274. struct snic *snic = container_of(work, struct snic, tgt_work);
  275. struct snic_tgt_id *tgtid = NULL;
  276. struct snic_tgt *tgt = NULL;
  277. unsigned long flags;
  278. int i;
  279. spin_lock_irqsave(&snic->snic_lock, flags);
  280. if (snic->in_remove) {
  281. spin_unlock_irqrestore(&snic->snic_lock, flags);
  282. kfree(snic->disc.rtgt_info);
  283. return;
  284. }
  285. spin_unlock_irqrestore(&snic->snic_lock, flags);
  286. mutex_lock(&snic->disc.mutex);
  287. /* Discover triggered during disc in progress */
  288. if (snic->disc.req_cnt) {
  289. snic->disc.state = SNIC_DISC_DONE;
  290. snic->disc.req_cnt = 0;
  291. mutex_unlock(&snic->disc.mutex);
  292. kfree(snic->disc.rtgt_info);
  293. snic->disc.rtgt_info = NULL;
  294. SNIC_HOST_INFO(snic->shost, "tgt_disc: Discovery restart.\n");
  295. /* Start Discovery Again */
  296. snic_disc_start(snic);
  297. return;
  298. }
  299. tgtid = (struct snic_tgt_id *)snic->disc.rtgt_info;
  300. SNIC_BUG_ON(snic->disc.rtgt_cnt == 0 || tgtid == NULL);
  301. for (i = 0; i < snic->disc.rtgt_cnt; i++) {
  302. tgt = snic_tgt_create(snic, &tgtid[i]);
  303. if (!tgt) {
  304. int buf_sz = snic->disc.rtgt_cnt * sizeof(*tgtid);
  305. SNIC_HOST_ERR(snic->shost, "Failed to create tgt.\n");
  306. snic_hex_dump("rpt_tgt_rsp", (char *)tgtid, buf_sz);
  307. break;
  308. }
  309. }
  310. snic->disc.rtgt_info = NULL;
  311. snic->disc.state = SNIC_DISC_DONE;
  312. mutex_unlock(&snic->disc.mutex);
  313. SNIC_HOST_INFO(snic->shost, "Discovery Completed.\n");
  314. kfree(tgtid);
  315. } /* end of snic_handle_tgt_disc */
  316. int
  317. snic_report_tgt_cmpl_handler(struct snic *snic, struct snic_fw_req *fwreq)
  318. {
  319. u8 typ, cmpl_stat;
  320. u32 cmnd_id, hid, tgt_cnt = 0;
  321. ulong ctx;
  322. struct snic_req_info *rqi = NULL;
  323. struct snic_tgt_id *tgtid;
  324. int i, ret = 0;
  325. snic_io_hdr_dec(&fwreq->hdr, &typ, &cmpl_stat, &cmnd_id, &hid, &ctx);
  326. rqi = (struct snic_req_info *) ctx;
  327. tgtid = (struct snic_tgt_id *) rqi->sge_va;
  328. tgt_cnt = le32_to_cpu(fwreq->u.rpt_tgts_cmpl.tgt_cnt);
  329. if (tgt_cnt == 0) {
  330. SNIC_HOST_ERR(snic->shost, "No Targets Found on this host.\n");
  331. ret = 1;
  332. goto end;
  333. }
  334. /* printing list of targets here */
  335. SNIC_HOST_INFO(snic->shost, "Target Count = %d\n", tgt_cnt);
  336. SNIC_BUG_ON(tgt_cnt > snic->fwinfo.max_tgts);
  337. for (i = 0; i < tgt_cnt; i++)
  338. SNIC_HOST_INFO(snic->shost,
  339. "Tgt id = 0x%x\n",
  340. le32_to_cpu(tgtid[i].tgt_id));
  341. /*
  342. * Queue work for further processing,
  343. * Response Buffer Memory is freed after creating targets
  344. */
  345. snic->disc.rtgt_cnt = tgt_cnt;
  346. snic->disc.rtgt_info = (u8 *) tgtid;
  347. queue_work(snic_glob->event_q, &snic->tgt_work);
  348. ret = 0;
  349. end:
  350. /* Unmap Response Buffer */
  351. snic_pci_unmap_rsp_buf(snic, rqi);
  352. if (ret)
  353. kfree(tgtid);
  354. rqi->sge_va = 0;
  355. snic_release_untagged_req(snic, rqi);
  356. return ret;
  357. } /* end of snic_report_tgt_cmpl_handler */
  358. /* Discovery init fn */
  359. void
  360. snic_disc_init(struct snic_disc *disc)
  361. {
  362. INIT_LIST_HEAD(&disc->tgt_list);
  363. mutex_init(&disc->mutex);
  364. disc->disc_id = 0;
  365. disc->nxt_tgt_id = 0;
  366. disc->state = SNIC_DISC_INIT;
  367. disc->req_cnt = 0;
  368. disc->rtgt_cnt = 0;
  369. disc->rtgt_info = NULL;
  370. disc->cb = NULL;
  371. } /* end of snic_disc_init */
  372. /* Discovery, uninit fn */
  373. void
  374. snic_disc_term(struct snic *snic)
  375. {
  376. struct snic_disc *disc = &snic->disc;
  377. mutex_lock(&disc->mutex);
  378. if (disc->req_cnt) {
  379. disc->req_cnt = 0;
  380. SNIC_SCSI_DBG(snic->shost, "Terminating Discovery.\n");
  381. }
  382. mutex_unlock(&disc->mutex);
  383. }
  384. /*
  385. * snic_disc_start: Discovery Start ...
  386. */
  387. int
  388. snic_disc_start(struct snic *snic)
  389. {
  390. struct snic_disc *disc = &snic->disc;
  391. int ret = 0;
  392. SNIC_SCSI_DBG(snic->shost, "Discovery Start.\n");
  393. mutex_lock(&disc->mutex);
  394. if (disc->state == SNIC_DISC_PENDING) {
  395. disc->req_cnt++;
  396. mutex_unlock(&disc->mutex);
  397. return ret;
  398. }
  399. disc->state = SNIC_DISC_PENDING;
  400. mutex_unlock(&disc->mutex);
  401. ret = snic_queue_report_tgt_req(snic);
  402. if (ret)
  403. SNIC_HOST_INFO(snic->shost, "Discovery Failed, err=%d.\n", ret);
  404. return ret;
  405. } /* end of snic_disc_start */
  406. /*
  407. * snic_disc_work :
  408. */
  409. void
  410. snic_handle_disc(struct work_struct *work)
  411. {
  412. struct snic *snic = container_of(work, struct snic, disc_work);
  413. int ret = 0;
  414. SNIC_HOST_INFO(snic->shost, "disc_work: Discovery\n");
  415. ret = snic_disc_start(snic);
  416. if (ret)
  417. goto disc_err;
  418. disc_err:
  419. SNIC_HOST_ERR(snic->shost,
  420. "disc_work: Discovery Failed w/ err = %d\n",
  421. ret);
  422. } /* end of snic_disc_work */
  423. /*
  424. * snic_tgt_del_all : cleanup all snic targets
  425. * Called on unbinding the interface
  426. */
  427. void
  428. snic_tgt_del_all(struct snic *snic)
  429. {
  430. struct snic_tgt *tgt = NULL;
  431. struct list_head *cur, *nxt;
  432. unsigned long flags;
  433. mutex_lock(&snic->disc.mutex);
  434. spin_lock_irqsave(snic->shost->host_lock, flags);
  435. list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
  436. tgt = list_entry(cur, struct snic_tgt, list);
  437. tgt->state = SNIC_TGT_STAT_DEL;
  438. list_del_init(&tgt->list);
  439. SNIC_HOST_INFO(snic->shost, "Tgt %d q'ing for del\n", tgt->id);
  440. queue_work(snic_glob->event_q, &tgt->del_work);
  441. tgt = NULL;
  442. }
  443. spin_unlock_irqrestore(snic->shost->host_lock, flags);
  444. scsi_flush_work(snic->shost);
  445. mutex_unlock(&snic->disc.mutex);
  446. } /* end of snic_tgt_del_all */