snic_main.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037
  1. /*
  2. * Copyright 2014 Cisco Systems, Inc. All rights reserved.
  3. *
  4. * This program is free software; you may redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; version 2 of the License.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  9. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  10. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  11. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  12. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  13. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  14. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  15. * SOFTWARE.
  16. */
  17. #include <linux/module.h>
  18. #include <linux/mempool.h>
  19. #include <linux/string.h>
  20. #include <linux/slab.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/pci.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/workqueue.h>
  28. #include <scsi/scsi_host.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include "snic.h"
  31. #include "snic_fwint.h"
  32. #define PCI_DEVICE_ID_CISCO_SNIC 0x0046
  33. /* Supported devices by snic module */
  34. static struct pci_device_id snic_id_table[] = {
  35. {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) },
  36. { 0, } /* end of table */
  37. };
  38. unsigned int snic_log_level = 0x0;
  39. module_param(snic_log_level, int, S_IRUGO|S_IWUSR);
  40. MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels");
  41. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  42. unsigned int snic_trace_max_pages = 16;
  43. module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
  44. MODULE_PARM_DESC(snic_trace_max_pages,
  45. "Total allocated memory pages for snic trace buffer");
  46. #endif
  47. unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH;
  48. module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR);
  49. MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN");
  50. /*
  51. * snic_slave_alloc : callback function to SCSI Mid Layer, called on
  52. * scsi device initialization.
  53. */
  54. static int
  55. snic_slave_alloc(struct scsi_device *sdev)
  56. {
  57. struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev));
  58. if (!tgt || snic_tgt_chkready(tgt))
  59. return -ENXIO;
  60. return 0;
  61. }
  62. /*
  63. * snic_slave_configure : callback function to SCSI Mid Layer, called on
  64. * scsi device initialization.
  65. */
  66. static int
  67. snic_slave_configure(struct scsi_device *sdev)
  68. {
  69. struct snic *snic = shost_priv(sdev->host);
  70. u32 qdepth = 0, max_ios = 0;
  71. int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ;
  72. /* Set Queue Depth */
  73. max_ios = snic_max_qdepth;
  74. qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH);
  75. scsi_change_queue_depth(sdev, qdepth);
  76. if (snic->fwinfo.io_tmo > 1)
  77. tmo = snic->fwinfo.io_tmo * HZ;
  78. /* FW requires extended timeouts */
  79. blk_queue_rq_timeout(sdev->request_queue, tmo);
  80. return 0;
  81. }
  82. static int
  83. snic_change_queue_depth(struct scsi_device *sdev, int qdepth)
  84. {
  85. int qsz = 0;
  86. qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH);
  87. scsi_change_queue_depth(sdev, qsz);
  88. SNIC_INFO("QDepth Changed to %d\n", sdev->queue_depth);
  89. return sdev->queue_depth;
  90. }
  91. static struct scsi_host_template snic_host_template = {
  92. .module = THIS_MODULE,
  93. .name = SNIC_DRV_NAME,
  94. .queuecommand = snic_queuecommand,
  95. .eh_abort_handler = snic_abort_cmd,
  96. .eh_device_reset_handler = snic_device_reset,
  97. .eh_host_reset_handler = snic_host_reset,
  98. .slave_alloc = snic_slave_alloc,
  99. .slave_configure = snic_slave_configure,
  100. .change_queue_depth = snic_change_queue_depth,
  101. .this_id = -1,
  102. .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH,
  103. .can_queue = SNIC_MAX_IO_REQ,
  104. .use_clustering = ENABLE_CLUSTERING,
  105. .sg_tablesize = SNIC_MAX_SG_DESC_CNT,
  106. .max_sectors = 0x800,
  107. .shost_attrs = snic_attrs,
  108. .track_queue_depth = 1,
  109. .cmd_size = sizeof(struct snic_internal_io_state),
  110. .proc_name = "snic_scsi",
  111. };
  112. /*
  113. * snic_handle_link_event : Handles link events such as link up/down/error
  114. */
  115. void
  116. snic_handle_link_event(struct snic *snic)
  117. {
  118. unsigned long flags;
  119. spin_lock_irqsave(&snic->snic_lock, flags);
  120. if (snic->stop_link_events) {
  121. spin_unlock_irqrestore(&snic->snic_lock, flags);
  122. return;
  123. }
  124. spin_unlock_irqrestore(&snic->snic_lock, flags);
  125. queue_work(snic_glob->event_q, &snic->link_work);
  126. } /* end of snic_handle_link_event */
  127. /*
  128. * snic_notify_set : sets notification area
  129. * This notification area is to receive events from fw
  130. * Note: snic supports only MSIX interrupts, in which we can just call
  131. * svnic_dev_notify_set directly
  132. */
  133. static int
  134. snic_notify_set(struct snic *snic)
  135. {
  136. int ret = 0;
  137. enum vnic_dev_intr_mode intr_mode;
  138. intr_mode = svnic_dev_get_intr_mode(snic->vdev);
  139. if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) {
  140. ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY);
  141. } else {
  142. SNIC_HOST_ERR(snic->shost,
  143. "Interrupt mode should be setup before devcmd notify set %d\n",
  144. intr_mode);
  145. ret = -1;
  146. }
  147. return ret;
  148. } /* end of snic_notify_set */
  149. /*
  150. * snic_dev_wait : polls vnic open status.
  151. */
  152. static int
  153. snic_dev_wait(struct vnic_dev *vdev,
  154. int (*start)(struct vnic_dev *, int),
  155. int (*finished)(struct vnic_dev *, int *),
  156. int arg)
  157. {
  158. unsigned long time;
  159. int ret, done;
  160. int retry_cnt = 0;
  161. ret = start(vdev, arg);
  162. if (ret)
  163. return ret;
  164. /*
  165. * Wait for func to complete...2 seconds max.
  166. *
  167. * Sometimes schedule_timeout_uninterruptible take long time
  168. * to wakeup, which results skipping retry. The retry counter
  169. * ensures to retry at least two times.
  170. */
  171. time = jiffies + (HZ * 2);
  172. do {
  173. ret = finished(vdev, &done);
  174. if (ret)
  175. return ret;
  176. if (done)
  177. return 0;
  178. schedule_timeout_uninterruptible(HZ/10);
  179. ++retry_cnt;
  180. } while (time_after(time, jiffies) || (retry_cnt < 3));
  181. return -ETIMEDOUT;
  182. } /* end of snic_dev_wait */
  183. /*
  184. * snic_cleanup: called by snic_remove
  185. * Stops the snic device, masks all interrupts, Completed CQ entries are
  186. * drained. Posted WQ/RQ/Copy-WQ entries are cleanup
  187. */
  188. static int
  189. snic_cleanup(struct snic *snic)
  190. {
  191. unsigned int i;
  192. int ret;
  193. svnic_dev_disable(snic->vdev);
  194. for (i = 0; i < snic->intr_count; i++)
  195. svnic_intr_mask(&snic->intr[i]);
  196. for (i = 0; i < snic->wq_count; i++) {
  197. ret = svnic_wq_disable(&snic->wq[i]);
  198. if (ret)
  199. return ret;
  200. }
  201. /* Clean up completed IOs */
  202. snic_fwcq_cmpl_handler(snic, -1);
  203. snic_wq_cmpl_handler(snic, -1);
  204. /* Clean up the IOs that have not completed */
  205. for (i = 0; i < snic->wq_count; i++)
  206. svnic_wq_clean(&snic->wq[i], snic_free_wq_buf);
  207. for (i = 0; i < snic->cq_count; i++)
  208. svnic_cq_clean(&snic->cq[i]);
  209. for (i = 0; i < snic->intr_count; i++)
  210. svnic_intr_clean(&snic->intr[i]);
  211. /* Cleanup snic specific requests */
  212. snic_free_all_untagged_reqs(snic);
  213. /* Cleanup Pending SCSI commands */
  214. snic_shutdown_scsi_cleanup(snic);
  215. for (i = 0; i < SNIC_REQ_MAX_CACHES; i++)
  216. mempool_destroy(snic->req_pool[i]);
  217. return 0;
  218. } /* end of snic_cleanup */
  219. static void
  220. snic_iounmap(struct snic *snic)
  221. {
  222. if (snic->bar0.vaddr)
  223. iounmap(snic->bar0.vaddr);
  224. }
  225. /*
  226. * snic_vdev_open_done : polls for svnic_dev_open cmd completion.
  227. */
  228. static int
  229. snic_vdev_open_done(struct vnic_dev *vdev, int *done)
  230. {
  231. struct snic *snic = svnic_dev_priv(vdev);
  232. int ret;
  233. int nretries = 5;
  234. do {
  235. ret = svnic_dev_open_done(vdev, done);
  236. if (ret == 0)
  237. break;
  238. SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n");
  239. } while (nretries--);
  240. return ret;
  241. } /* end of snic_vdev_open_done */
  242. /*
  243. * snic_add_host : registers scsi host with ML
  244. */
  245. static int
  246. snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev)
  247. {
  248. int ret = 0;
  249. ret = scsi_add_host(shost, &pdev->dev);
  250. if (ret) {
  251. SNIC_HOST_ERR(shost,
  252. "snic: scsi_add_host failed. %d\n",
  253. ret);
  254. return ret;
  255. }
  256. SNIC_BUG_ON(shost->work_q != NULL);
  257. snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d",
  258. shost->host_no);
  259. shost->work_q = create_singlethread_workqueue(shost->work_q_name);
  260. if (!shost->work_q) {
  261. SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n");
  262. ret = -ENOMEM;
  263. }
  264. return ret;
  265. } /* end of snic_add_host */
  266. static void
  267. snic_del_host(struct Scsi_Host *shost)
  268. {
  269. if (!shost->work_q)
  270. return;
  271. destroy_workqueue(shost->work_q);
  272. shost->work_q = NULL;
  273. scsi_remove_host(shost);
  274. }
  275. int
  276. snic_get_state(struct snic *snic)
  277. {
  278. return atomic_read(&snic->state);
  279. }
  280. void
  281. snic_set_state(struct snic *snic, enum snic_state state)
  282. {
  283. SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n",
  284. snic_state_to_str(snic_get_state(snic)),
  285. snic_state_to_str(state));
  286. atomic_set(&snic->state, state);
  287. }
  288. /*
  289. * snic_probe : Initialize the snic interface.
  290. */
  291. static int
  292. snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  293. {
  294. struct Scsi_Host *shost;
  295. struct snic *snic;
  296. mempool_t *pool;
  297. unsigned long flags;
  298. u32 max_ios = 0;
  299. int ret, i;
  300. /* Device Information */
  301. SNIC_INFO("snic device %4x:%4x:%4x:%4x: ",
  302. pdev->vendor, pdev->device, pdev->subsystem_vendor,
  303. pdev->subsystem_device);
  304. SNIC_INFO("snic device bus %x: slot %x: fn %x\n",
  305. pdev->bus->number, PCI_SLOT(pdev->devfn),
  306. PCI_FUNC(pdev->devfn));
  307. /*
  308. * Allocate SCSI Host and setup association between host, and snic
  309. */
  310. shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic));
  311. if (!shost) {
  312. SNIC_ERR("Unable to alloc scsi_host\n");
  313. ret = -ENOMEM;
  314. goto prob_end;
  315. }
  316. snic = shost_priv(shost);
  317. snic->shost = shost;
  318. snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME,
  319. shost->host_no);
  320. SNIC_HOST_INFO(shost,
  321. "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n",
  322. shost->host_no, snic, shost, pdev->bus->number,
  323. PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
  324. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  325. /* Per snic debugfs init */
  326. ret = snic_stats_debugfs_init(snic);
  327. if (ret) {
  328. SNIC_HOST_ERR(snic->shost,
  329. "Failed to initialize debugfs stats\n");
  330. snic_stats_debugfs_remove(snic);
  331. }
  332. #endif
  333. /* Setup PCI Resources */
  334. pci_set_drvdata(pdev, snic);
  335. snic->pdev = pdev;
  336. ret = pci_enable_device(pdev);
  337. if (ret) {
  338. SNIC_HOST_ERR(shost,
  339. "Cannot enable PCI Resources, aborting : %d\n",
  340. ret);
  341. goto err_free_snic;
  342. }
  343. ret = pci_request_regions(pdev, SNIC_DRV_NAME);
  344. if (ret) {
  345. SNIC_HOST_ERR(shost,
  346. "Cannot obtain PCI Resources, aborting : %d\n",
  347. ret);
  348. goto err_pci_disable;
  349. }
  350. pci_set_master(pdev);
  351. /*
  352. * Query PCI Controller on system for DMA addressing
  353. * limitation for the device. Try 43-bit first, and
  354. * fail to 32-bit.
  355. */
  356. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
  357. if (ret) {
  358. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  359. if (ret) {
  360. SNIC_HOST_ERR(shost,
  361. "No Usable DMA Configuration, aborting %d\n",
  362. ret);
  363. goto err_rel_regions;
  364. }
  365. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  366. if (ret) {
  367. SNIC_HOST_ERR(shost,
  368. "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
  369. ret);
  370. goto err_rel_regions;
  371. }
  372. } else {
  373. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
  374. if (ret) {
  375. SNIC_HOST_ERR(shost,
  376. "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
  377. ret);
  378. goto err_rel_regions;
  379. }
  380. }
  381. /* Map vNIC resources from BAR0 */
  382. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  383. SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
  384. ret = -ENODEV;
  385. goto err_rel_regions;
  386. }
  387. snic->bar0.vaddr = pci_iomap(pdev, 0, 0);
  388. if (!snic->bar0.vaddr) {
  389. SNIC_HOST_ERR(shost,
  390. "Cannot memory map BAR0 res hdr aborting.\n");
  391. ret = -ENODEV;
  392. goto err_rel_regions;
  393. }
  394. snic->bar0.bus_addr = pci_resource_start(pdev, 0);
  395. snic->bar0.len = pci_resource_len(pdev, 0);
  396. SNIC_BUG_ON(snic->bar0.bus_addr == 0);
  397. /* Devcmd2 Resource Allocation and Initialization */
  398. snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1);
  399. if (!snic->vdev) {
  400. SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n");
  401. ret = -ENODEV;
  402. goto err_iounmap;
  403. }
  404. ret = svnic_dev_cmd_init(snic->vdev, 0);
  405. if (ret) {
  406. SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret);
  407. goto err_vnic_unreg;
  408. }
  409. ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0);
  410. if (ret) {
  411. SNIC_HOST_ERR(shost,
  412. "vNIC dev open failed, aborting. %d\n",
  413. ret);
  414. goto err_vnic_unreg;
  415. }
  416. ret = svnic_dev_init(snic->vdev, 0);
  417. if (ret) {
  418. SNIC_HOST_ERR(shost,
  419. "vNIC dev init failed. aborting. %d\n",
  420. ret);
  421. goto err_dev_close;
  422. }
  423. /* Get vNIC information */
  424. ret = snic_get_vnic_config(snic);
  425. if (ret) {
  426. SNIC_HOST_ERR(shost,
  427. "Get vNIC configuration failed, aborting. %d\n",
  428. ret);
  429. goto err_dev_close;
  430. }
  431. /* Configure Maximum Outstanding IO reqs */
  432. max_ios = snic->config.io_throttle_count;
  433. if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
  434. shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ,
  435. max_t(u32, SNIC_MIN_IO_REQ, max_ios));
  436. snic->max_tag_id = shost->can_queue;
  437. shost->max_lun = snic->config.luns_per_tgt;
  438. shost->max_id = SNIC_MAX_TARGET;
  439. shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/
  440. snic_get_res_counts(snic);
  441. /*
  442. * Assumption: Only MSIx is supported
  443. */
  444. ret = snic_set_intr_mode(snic);
  445. if (ret) {
  446. SNIC_HOST_ERR(shost,
  447. "Failed to set intr mode aborting. %d\n",
  448. ret);
  449. goto err_dev_close;
  450. }
  451. ret = snic_alloc_vnic_res(snic);
  452. if (ret) {
  453. SNIC_HOST_ERR(shost,
  454. "Failed to alloc vNIC resources aborting. %d\n",
  455. ret);
  456. goto err_clear_intr;
  457. }
  458. /* Initialize specific lists */
  459. INIT_LIST_HEAD(&snic->list);
  460. /*
  461. * spl_cmd_list for maintaining snic specific cmds
  462. * such as EXCH_VER_REQ, REPORT_TARGETS etc
  463. */
  464. INIT_LIST_HEAD(&snic->spl_cmd_list);
  465. spin_lock_init(&snic->spl_cmd_lock);
  466. /* initialize all snic locks */
  467. spin_lock_init(&snic->snic_lock);
  468. for (i = 0; i < SNIC_WQ_MAX; i++)
  469. spin_lock_init(&snic->wq_lock[i]);
  470. for (i = 0; i < SNIC_IO_LOCKS; i++)
  471. spin_lock_init(&snic->io_req_lock[i]);
  472. pool = mempool_create_slab_pool(2,
  473. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  474. if (!pool) {
  475. SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
  476. ret = -ENOMEM;
  477. goto err_free_res;
  478. }
  479. snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool;
  480. pool = mempool_create_slab_pool(2,
  481. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  482. if (!pool) {
  483. SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
  484. ret = -ENOMEM;
  485. goto err_free_dflt_sgl_pool;
  486. }
  487. snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool;
  488. pool = mempool_create_slab_pool(2,
  489. snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  490. if (!pool) {
  491. SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
  492. ret = -ENOMEM;
  493. goto err_free_max_sgl_pool;
  494. }
  495. snic->req_pool[SNIC_REQ_TM_CACHE] = pool;
  496. /* Initialize snic state */
  497. atomic_set(&snic->state, SNIC_INIT);
  498. atomic_set(&snic->ios_inflight, 0);
  499. /* Setup notification buffer area */
  500. ret = snic_notify_set(snic);
  501. if (ret) {
  502. SNIC_HOST_ERR(shost,
  503. "Failed to alloc notify buffer aborting. %d\n",
  504. ret);
  505. goto err_free_tmreq_pool;
  506. }
  507. /*
  508. * Initialization done with PCI system, hardware, firmware.
  509. * Add shost to SCSI
  510. */
  511. ret = snic_add_host(shost, pdev);
  512. if (ret) {
  513. SNIC_HOST_ERR(shost,
  514. "Adding scsi host Failed ... exiting. %d\n",
  515. ret);
  516. goto err_notify_unset;
  517. }
  518. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  519. list_add_tail(&snic->list, &snic_glob->snic_list);
  520. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  521. snic_disc_init(&snic->disc);
  522. INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc);
  523. INIT_WORK(&snic->disc_work, snic_handle_disc);
  524. INIT_WORK(&snic->link_work, snic_handle_link);
  525. /* Enable all queues */
  526. for (i = 0; i < snic->wq_count; i++)
  527. svnic_wq_enable(&snic->wq[i]);
  528. ret = svnic_dev_enable_wait(snic->vdev);
  529. if (ret) {
  530. SNIC_HOST_ERR(shost,
  531. "vNIC dev enable failed w/ error %d\n",
  532. ret);
  533. goto err_vdev_enable;
  534. }
  535. ret = snic_request_intr(snic);
  536. if (ret) {
  537. SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret);
  538. goto err_req_intr;
  539. }
  540. for (i = 0; i < snic->intr_count; i++)
  541. svnic_intr_unmask(&snic->intr[i]);
  542. snic_set_state(snic, SNIC_ONLINE);
  543. /* Get snic params */
  544. ret = snic_get_conf(snic);
  545. if (ret) {
  546. SNIC_HOST_ERR(shost,
  547. "Failed to get snic io config from FW w err %d\n",
  548. ret);
  549. goto err_get_conf;
  550. }
  551. ret = snic_disc_start(snic);
  552. if (ret) {
  553. SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n",
  554. ret);
  555. goto err_get_conf;
  556. }
  557. SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n");
  558. return 0;
  559. err_get_conf:
  560. snic_free_all_untagged_reqs(snic);
  561. for (i = 0; i < snic->intr_count; i++)
  562. svnic_intr_mask(&snic->intr[i]);
  563. snic_free_intr(snic);
  564. err_req_intr:
  565. svnic_dev_disable(snic->vdev);
  566. err_vdev_enable:
  567. for (i = 0; i < snic->wq_count; i++) {
  568. int rc = 0;
  569. rc = svnic_wq_disable(&snic->wq[i]);
  570. if (rc) {
  571. SNIC_HOST_ERR(shost,
  572. "WQ Disable Failed w/ err = %d\n", rc);
  573. break;
  574. }
  575. }
  576. snic_del_host(snic->shost);
  577. err_notify_unset:
  578. svnic_dev_notify_unset(snic->vdev);
  579. err_free_tmreq_pool:
  580. mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]);
  581. err_free_max_sgl_pool:
  582. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]);
  583. err_free_dflt_sgl_pool:
  584. mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]);
  585. err_free_res:
  586. snic_free_vnic_res(snic);
  587. err_clear_intr:
  588. snic_clear_intr_mode(snic);
  589. err_dev_close:
  590. svnic_dev_close(snic->vdev);
  591. err_vnic_unreg:
  592. svnic_dev_unregister(snic->vdev);
  593. err_iounmap:
  594. snic_iounmap(snic);
  595. err_rel_regions:
  596. pci_release_regions(pdev);
  597. err_pci_disable:
  598. pci_disable_device(pdev);
  599. err_free_snic:
  600. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  601. snic_stats_debugfs_remove(snic);
  602. #endif
  603. scsi_host_put(shost);
  604. pci_set_drvdata(pdev, NULL);
  605. prob_end:
  606. SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n",
  607. pdev->bus->number, PCI_SLOT(pdev->devfn),
  608. PCI_FUNC(pdev->devfn));
  609. return ret;
  610. } /* end of snic_probe */
  611. /*
  612. * snic_remove : invoked on unbinding the interface to cleanup the
  613. * resources allocated in snic_probe on initialization.
  614. */
  615. static void
  616. snic_remove(struct pci_dev *pdev)
  617. {
  618. struct snic *snic = pci_get_drvdata(pdev);
  619. unsigned long flags;
  620. if (!snic) {
  621. SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n",
  622. pdev->bus->number, PCI_SLOT(pdev->devfn),
  623. PCI_FUNC(pdev->devfn));
  624. return;
  625. }
  626. /*
  627. * Mark state so that the workqueue thread stops forwarding
  628. * received frames and link events. ISR and other threads
  629. * that can queue work items will also stop creating work
  630. * items on the snic workqueue
  631. */
  632. snic_set_state(snic, SNIC_OFFLINE);
  633. spin_lock_irqsave(&snic->snic_lock, flags);
  634. snic->stop_link_events = 1;
  635. spin_unlock_irqrestore(&snic->snic_lock, flags);
  636. flush_workqueue(snic_glob->event_q);
  637. snic_disc_term(snic);
  638. spin_lock_irqsave(&snic->snic_lock, flags);
  639. snic->in_remove = 1;
  640. spin_unlock_irqrestore(&snic->snic_lock, flags);
  641. /*
  642. * This stops the snic device, masks all interrupts, Completed
  643. * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
  644. * cleanup
  645. */
  646. snic_cleanup(snic);
  647. spin_lock_irqsave(&snic_glob->snic_list_lock, flags);
  648. list_del(&snic->list);
  649. spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags);
  650. snic_tgt_del_all(snic);
  651. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  652. snic_stats_debugfs_remove(snic);
  653. #endif
  654. snic_del_host(snic->shost);
  655. svnic_dev_notify_unset(snic->vdev);
  656. snic_free_intr(snic);
  657. snic_free_vnic_res(snic);
  658. snic_clear_intr_mode(snic);
  659. svnic_dev_close(snic->vdev);
  660. svnic_dev_unregister(snic->vdev);
  661. snic_iounmap(snic);
  662. pci_release_regions(pdev);
  663. pci_disable_device(pdev);
  664. pci_set_drvdata(pdev, NULL);
  665. /* this frees Scsi_Host and snic memory (continuous chunk) */
  666. scsi_host_put(snic->shost);
  667. } /* end of snic_remove */
  668. struct snic_global *snic_glob;
  669. /*
  670. * snic_global_data_init: Initialize SNIC Global Data
  671. * Notes: All the global lists, variables should be part of global data
  672. * this helps in debugging.
  673. */
  674. static int
  675. snic_global_data_init(void)
  676. {
  677. int ret = 0;
  678. struct kmem_cache *cachep;
  679. ssize_t len = 0;
  680. snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL);
  681. if (!snic_glob) {
  682. SNIC_ERR("Failed to allocate Global Context.\n");
  683. ret = -ENOMEM;
  684. goto gdi_end;
  685. }
  686. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  687. /* Debugfs related Initialization */
  688. /* Create debugfs entries for snic */
  689. ret = snic_debugfs_init();
  690. if (ret < 0) {
  691. SNIC_ERR("Failed to create sysfs dir for tracing and stats.\n");
  692. snic_debugfs_term();
  693. /* continue even if it fails */
  694. }
  695. /* Trace related Initialization */
  696. /* Allocate memory for trace buffer */
  697. ret = snic_trc_init();
  698. if (ret < 0) {
  699. SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n");
  700. snic_trc_free();
  701. /* continue even if it fails */
  702. }
  703. #endif
  704. INIT_LIST_HEAD(&snic_glob->snic_list);
  705. spin_lock_init(&snic_glob->snic_list_lock);
  706. /* Create a cache for allocation of snic_host_req+default size ESGLs */
  707. len = sizeof(struct snic_req_info);
  708. len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl);
  709. cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN,
  710. SLAB_HWCACHE_ALIGN, NULL);
  711. if (!cachep) {
  712. SNIC_ERR("Failed to create snic default sgl slab\n");
  713. ret = -ENOMEM;
  714. goto err_dflt_req_slab;
  715. }
  716. snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep;
  717. /* Create a cache for allocation of max size Extended SGLs */
  718. len = sizeof(struct snic_req_info);
  719. len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl);
  720. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  721. SLAB_HWCACHE_ALIGN, NULL);
  722. if (!cachep) {
  723. SNIC_ERR("Failed to create snic max sgl slab\n");
  724. ret = -ENOMEM;
  725. goto err_max_req_slab;
  726. }
  727. snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep;
  728. len = sizeof(struct snic_host_req);
  729. cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN,
  730. SLAB_HWCACHE_ALIGN, NULL);
  731. if (!cachep) {
  732. SNIC_ERR("Failed to create snic tm req slab\n");
  733. ret = -ENOMEM;
  734. goto err_tmreq_slab;
  735. }
  736. snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep;
  737. /* snic_event queue */
  738. snic_glob->event_q = create_singlethread_workqueue("snic_event_wq");
  739. if (!snic_glob->event_q) {
  740. SNIC_ERR("snic event queue create failed\n");
  741. ret = -ENOMEM;
  742. goto err_eventq;
  743. }
  744. return ret;
  745. err_eventq:
  746. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  747. err_tmreq_slab:
  748. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  749. err_max_req_slab:
  750. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  751. err_dflt_req_slab:
  752. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  753. snic_trc_free();
  754. snic_debugfs_term();
  755. #endif
  756. kfree(snic_glob);
  757. snic_glob = NULL;
  758. gdi_end:
  759. return ret;
  760. } /* end of snic_glob_init */
  761. /*
  762. * snic_global_data_cleanup : Frees SNIC Global Data
  763. */
  764. static void
  765. snic_global_data_cleanup(void)
  766. {
  767. SNIC_BUG_ON(snic_glob == NULL);
  768. destroy_workqueue(snic_glob->event_q);
  769. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]);
  770. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]);
  771. kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]);
  772. #ifdef CONFIG_SCSI_SNIC_DEBUG_FS
  773. /* Freeing Trace Resources */
  774. snic_trc_free();
  775. /* Freeing Debugfs Resources */
  776. snic_debugfs_term();
  777. #endif
  778. kfree(snic_glob);
  779. snic_glob = NULL;
  780. } /* end of snic_glob_cleanup */
  781. static struct pci_driver snic_driver = {
  782. .name = SNIC_DRV_NAME,
  783. .id_table = snic_id_table,
  784. .probe = snic_probe,
  785. .remove = snic_remove,
  786. };
  787. static int __init
  788. snic_init_module(void)
  789. {
  790. int ret = 0;
  791. #ifndef __x86_64__
  792. SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n");
  793. add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
  794. #endif
  795. SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION);
  796. ret = snic_global_data_init();
  797. if (ret) {
  798. SNIC_ERR("Failed to Initialize Global Data.\n");
  799. return ret;
  800. }
  801. ret = pci_register_driver(&snic_driver);
  802. if (ret < 0) {
  803. SNIC_ERR("PCI driver register error\n");
  804. goto err_pci_reg;
  805. }
  806. return ret;
  807. err_pci_reg:
  808. snic_global_data_cleanup();
  809. return ret;
  810. }
  811. static void __exit
  812. snic_cleanup_module(void)
  813. {
  814. pci_unregister_driver(&snic_driver);
  815. snic_global_data_cleanup();
  816. }
  817. module_init(snic_init_module);
  818. module_exit(snic_cleanup_module);
  819. MODULE_LICENSE("GPL v2");
  820. MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION);
  821. MODULE_VERSION(SNIC_DRV_VERSION);
  822. MODULE_DEVICE_TABLE(pci, snic_id_table);
  823. MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, "
  824. "Sesidhar Baddela <sebaddel@cisco.com>");