am53c974.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * AMD am53c974 driver.
  3. * Copyright (c) 2014 Hannes Reinecke, SUSE Linux GmbH
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/init.h>
  8. #include <linux/delay.h>
  9. #include <linux/pci.h>
  10. #include <linux/interrupt.h>
  11. #include <scsi/scsi_host.h>
  12. #include "esp_scsi.h"
  13. #define DRV_MODULE_NAME "am53c974"
  14. #define DRV_MODULE_VERSION "1.00"
  15. static bool am53c974_debug;
  16. static bool am53c974_fenab = true;
  17. #define esp_dma_log(f, a...) \
  18. do { \
  19. if (am53c974_debug) \
  20. shost_printk(KERN_DEBUG, esp->host, f, ##a); \
  21. } while (0)
  22. #define ESP_DMA_CMD 0x10
  23. #define ESP_DMA_STC 0x11
  24. #define ESP_DMA_SPA 0x12
  25. #define ESP_DMA_WBC 0x13
  26. #define ESP_DMA_WAC 0x14
  27. #define ESP_DMA_STATUS 0x15
  28. #define ESP_DMA_SMDLA 0x16
  29. #define ESP_DMA_WMAC 0x17
  30. #define ESP_DMA_CMD_IDLE 0x00
  31. #define ESP_DMA_CMD_BLAST 0x01
  32. #define ESP_DMA_CMD_ABORT 0x02
  33. #define ESP_DMA_CMD_START 0x03
  34. #define ESP_DMA_CMD_MASK 0x03
  35. #define ESP_DMA_CMD_DIAG 0x04
  36. #define ESP_DMA_CMD_MDL 0x10
  37. #define ESP_DMA_CMD_INTE_P 0x20
  38. #define ESP_DMA_CMD_INTE_D 0x40
  39. #define ESP_DMA_CMD_DIR 0x80
  40. #define ESP_DMA_STAT_PWDN 0x01
  41. #define ESP_DMA_STAT_ERROR 0x02
  42. #define ESP_DMA_STAT_ABORT 0x04
  43. #define ESP_DMA_STAT_DONE 0x08
  44. #define ESP_DMA_STAT_SCSIINT 0x10
  45. #define ESP_DMA_STAT_BCMPLT 0x20
  46. /* EEPROM is accessed with 16-bit values */
  47. #define DC390_EEPROM_READ 0x80
  48. #define DC390_EEPROM_LEN 0x40
  49. /*
  50. * DC390 EEPROM
  51. *
  52. * 8 * 4 bytes of per-device options
  53. * followed by HBA specific options
  54. */
  55. /* Per-device options */
  56. #define DC390_EE_MODE1 0x00
  57. #define DC390_EE_SPEED 0x01
  58. /* HBA-specific options */
  59. #define DC390_EE_ADAPT_SCSI_ID 0x40
  60. #define DC390_EE_MODE2 0x41
  61. #define DC390_EE_DELAY 0x42
  62. #define DC390_EE_TAG_CMD_NUM 0x43
  63. #define DC390_EE_MODE1_PARITY_CHK 0x01
  64. #define DC390_EE_MODE1_SYNC_NEGO 0x02
  65. #define DC390_EE_MODE1_EN_DISC 0x04
  66. #define DC390_EE_MODE1_SEND_START 0x08
  67. #define DC390_EE_MODE1_TCQ 0x10
  68. #define DC390_EE_MODE2_MORE_2DRV 0x01
  69. #define DC390_EE_MODE2_GREATER_1G 0x02
  70. #define DC390_EE_MODE2_RST_SCSI_BUS 0x04
  71. #define DC390_EE_MODE2_ACTIVE_NEGATION 0x08
  72. #define DC390_EE_MODE2_NO_SEEK 0x10
  73. #define DC390_EE_MODE2_LUN_CHECK 0x20
  74. struct pci_esp_priv {
  75. struct esp *esp;
  76. u8 dma_status;
  77. };
  78. static void pci_esp_dma_drain(struct esp *esp);
  79. static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
  80. {
  81. struct pci_dev *pdev = esp->dev;
  82. return pci_get_drvdata(pdev);
  83. }
  84. static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
  85. {
  86. iowrite8(val, esp->regs + (reg * 4UL));
  87. }
  88. static u8 pci_esp_read8(struct esp *esp, unsigned long reg)
  89. {
  90. return ioread8(esp->regs + (reg * 4UL));
  91. }
  92. static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
  93. {
  94. return iowrite32(val, esp->regs + (reg * 4UL));
  95. }
  96. static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
  97. size_t sz, int dir)
  98. {
  99. return pci_map_single(esp->dev, buf, sz, dir);
  100. }
  101. static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
  102. int num_sg, int dir)
  103. {
  104. return pci_map_sg(esp->dev, sg, num_sg, dir);
  105. }
  106. static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
  107. size_t sz, int dir)
  108. {
  109. pci_unmap_single(esp->dev, addr, sz, dir);
  110. }
  111. static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
  112. int num_sg, int dir)
  113. {
  114. pci_unmap_sg(esp->dev, sg, num_sg, dir);
  115. }
  116. static int pci_esp_irq_pending(struct esp *esp)
  117. {
  118. struct pci_esp_priv *pep = pci_esp_get_priv(esp);
  119. pep->dma_status = pci_esp_read8(esp, ESP_DMA_STATUS);
  120. esp_dma_log("dma intr dreg[%02x]\n", pep->dma_status);
  121. if (pep->dma_status & (ESP_DMA_STAT_ERROR |
  122. ESP_DMA_STAT_ABORT |
  123. ESP_DMA_STAT_DONE |
  124. ESP_DMA_STAT_SCSIINT))
  125. return 1;
  126. return 0;
  127. }
  128. static void pci_esp_reset_dma(struct esp *esp)
  129. {
  130. /* Nothing to do ? */
  131. }
  132. static void pci_esp_dma_drain(struct esp *esp)
  133. {
  134. u8 resid;
  135. int lim = 1000;
  136. if ((esp->sreg & ESP_STAT_PMASK) == ESP_DOP ||
  137. (esp->sreg & ESP_STAT_PMASK) == ESP_DIP)
  138. /* Data-In or Data-Out, nothing to be done */
  139. return;
  140. while (--lim > 0) {
  141. resid = pci_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES;
  142. if (resid <= 1)
  143. break;
  144. cpu_relax();
  145. }
  146. /*
  147. * When there is a residual BCMPLT will never be set
  148. * (obviously). But we still have to issue the BLAST
  149. * command, otherwise the data will not being transferred.
  150. * But we'll never know when the BLAST operation is
  151. * finished. So check for some time and give up eventually.
  152. */
  153. lim = 1000;
  154. pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_BLAST, ESP_DMA_CMD);
  155. while (pci_esp_read8(esp, ESP_DMA_STATUS) & ESP_DMA_STAT_BCMPLT) {
  156. if (--lim == 0)
  157. break;
  158. cpu_relax();
  159. }
  160. pci_esp_write8(esp, ESP_DMA_CMD_DIR | ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
  161. esp_dma_log("DMA blast done (%d tries, %d bytes left)\n", lim, resid);
  162. /* BLAST residual handling is currently untested */
  163. if (WARN_ON_ONCE(resid == 1)) {
  164. struct esp_cmd_entry *ent = esp->active_cmd;
  165. ent->flags |= ESP_CMD_FLAG_RESIDUAL;
  166. }
  167. }
  168. static void pci_esp_dma_invalidate(struct esp *esp)
  169. {
  170. struct pci_esp_priv *pep = pci_esp_get_priv(esp);
  171. esp_dma_log("invalidate DMA\n");
  172. pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
  173. pep->dma_status = 0;
  174. }
  175. static int pci_esp_dma_error(struct esp *esp)
  176. {
  177. struct pci_esp_priv *pep = pci_esp_get_priv(esp);
  178. if (pep->dma_status & ESP_DMA_STAT_ERROR) {
  179. u8 dma_cmd = pci_esp_read8(esp, ESP_DMA_CMD);
  180. if ((dma_cmd & ESP_DMA_CMD_MASK) == ESP_DMA_CMD_START)
  181. pci_esp_write8(esp, ESP_DMA_CMD_ABORT, ESP_DMA_CMD);
  182. return 1;
  183. }
  184. if (pep->dma_status & ESP_DMA_STAT_ABORT) {
  185. pci_esp_write8(esp, ESP_DMA_CMD_IDLE, ESP_DMA_CMD);
  186. pep->dma_status = pci_esp_read8(esp, ESP_DMA_CMD);
  187. return 1;
  188. }
  189. return 0;
  190. }
  191. static void pci_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
  192. u32 dma_count, int write, u8 cmd)
  193. {
  194. struct pci_esp_priv *pep = pci_esp_get_priv(esp);
  195. u32 val = 0;
  196. BUG_ON(!(cmd & ESP_CMD_DMA));
  197. pep->dma_status = 0;
  198. /* Set DMA engine to IDLE */
  199. if (write)
  200. /* DMA write direction logic is inverted */
  201. val |= ESP_DMA_CMD_DIR;
  202. pci_esp_write8(esp, ESP_DMA_CMD_IDLE | val, ESP_DMA_CMD);
  203. pci_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
  204. pci_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
  205. if (esp->config2 & ESP_CONFIG2_FENAB)
  206. pci_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
  207. pci_esp_write32(esp, esp_count, ESP_DMA_STC);
  208. pci_esp_write32(esp, addr, ESP_DMA_SPA);
  209. esp_dma_log("start dma addr[%x] count[%d:%d]\n",
  210. addr, esp_count, dma_count);
  211. scsi_esp_cmd(esp, cmd);
  212. /* Send DMA Start command */
  213. pci_esp_write8(esp, ESP_DMA_CMD_START | val, ESP_DMA_CMD);
  214. }
  215. static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
  216. {
  217. int dma_limit = 16;
  218. u32 base, end;
  219. /*
  220. * If CONFIG2_FENAB is set we can
  221. * handle up to 24 bit addresses
  222. */
  223. if (esp->config2 & ESP_CONFIG2_FENAB)
  224. dma_limit = 24;
  225. if (dma_len > (1U << dma_limit))
  226. dma_len = (1U << dma_limit);
  227. /*
  228. * Prevent crossing a 24-bit address boundary.
  229. */
  230. base = dma_addr & ((1U << 24) - 1U);
  231. end = base + dma_len;
  232. if (end > (1U << 24))
  233. end = (1U <<24);
  234. dma_len = end - base;
  235. return dma_len;
  236. }
  237. static const struct esp_driver_ops pci_esp_ops = {
  238. .esp_write8 = pci_esp_write8,
  239. .esp_read8 = pci_esp_read8,
  240. .map_single = pci_esp_map_single,
  241. .map_sg = pci_esp_map_sg,
  242. .unmap_single = pci_esp_unmap_single,
  243. .unmap_sg = pci_esp_unmap_sg,
  244. .irq_pending = pci_esp_irq_pending,
  245. .reset_dma = pci_esp_reset_dma,
  246. .dma_drain = pci_esp_dma_drain,
  247. .dma_invalidate = pci_esp_dma_invalidate,
  248. .send_dma_cmd = pci_esp_send_dma_cmd,
  249. .dma_error = pci_esp_dma_error,
  250. .dma_length_limit = pci_esp_dma_length_limit,
  251. };
  252. /*
  253. * Read DC-390 eeprom
  254. */
  255. static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
  256. {
  257. u8 carry_flag = 1, j = 0x80, bval;
  258. int i;
  259. for (i = 0; i < 9; i++) {
  260. if (carry_flag) {
  261. pci_write_config_byte(pdev, 0x80, 0x40);
  262. bval = 0xc0;
  263. } else
  264. bval = 0x80;
  265. udelay(160);
  266. pci_write_config_byte(pdev, 0x80, bval);
  267. udelay(160);
  268. pci_write_config_byte(pdev, 0x80, 0);
  269. udelay(160);
  270. carry_flag = (cmd & j) ? 1 : 0;
  271. j >>= 1;
  272. }
  273. }
  274. static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
  275. {
  276. int i;
  277. u16 wval = 0;
  278. u8 bval;
  279. for (i = 0; i < 16; i++) {
  280. wval <<= 1;
  281. pci_write_config_byte(pdev, 0x80, 0x80);
  282. udelay(160);
  283. pci_write_config_byte(pdev, 0x80, 0x40);
  284. udelay(160);
  285. pci_read_config_byte(pdev, 0x00, &bval);
  286. if (bval == 0x22)
  287. wval |= 1;
  288. }
  289. return wval;
  290. }
  291. static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
  292. {
  293. u8 cmd = DC390_EEPROM_READ, i;
  294. for (i = 0; i < DC390_EEPROM_LEN; i++) {
  295. pci_write_config_byte(pdev, 0xc0, 0);
  296. udelay(160);
  297. dc390_eeprom_prepare_read(pdev, cmd++);
  298. *ptr++ = dc390_eeprom_get_data(pdev);
  299. pci_write_config_byte(pdev, 0x80, 0);
  300. pci_write_config_byte(pdev, 0x80, 0);
  301. udelay(160);
  302. }
  303. }
  304. static void dc390_check_eeprom(struct esp *esp)
  305. {
  306. u8 EEbuf[128];
  307. u16 *ptr = (u16 *)EEbuf, wval = 0;
  308. int i;
  309. dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
  310. for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
  311. wval += *ptr;
  312. /* no Tekram EEprom found */
  313. if (wval != 0x1234) {
  314. struct pci_dev *pdev = esp->dev;
  315. dev_printk(KERN_INFO, &pdev->dev,
  316. "No valid Tekram EEprom found\n");
  317. return;
  318. }
  319. esp->scsi_id = EEbuf[DC390_EE_ADAPT_SCSI_ID];
  320. esp->num_tags = 2 << EEbuf[DC390_EE_TAG_CMD_NUM];
  321. if (EEbuf[DC390_EE_MODE2] & DC390_EE_MODE2_ACTIVE_NEGATION)
  322. esp->config4 |= ESP_CONFIG4_RADE | ESP_CONFIG4_RAE;
  323. }
  324. static int pci_esp_probe_one(struct pci_dev *pdev,
  325. const struct pci_device_id *id)
  326. {
  327. struct scsi_host_template *hostt = &scsi_esp_template;
  328. int err = -ENODEV;
  329. struct Scsi_Host *shost;
  330. struct esp *esp;
  331. struct pci_esp_priv *pep;
  332. if (pci_enable_device(pdev)) {
  333. dev_printk(KERN_INFO, &pdev->dev, "cannot enable device\n");
  334. return -ENODEV;
  335. }
  336. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
  337. dev_printk(KERN_INFO, &pdev->dev,
  338. "failed to set 32bit DMA mask\n");
  339. goto fail_disable_device;
  340. }
  341. shost = scsi_host_alloc(hostt, sizeof(struct esp));
  342. if (!shost) {
  343. dev_printk(KERN_INFO, &pdev->dev,
  344. "failed to allocate scsi host\n");
  345. err = -ENOMEM;
  346. goto fail_disable_device;
  347. }
  348. pep = kzalloc(sizeof(struct pci_esp_priv), GFP_KERNEL);
  349. if (!pep) {
  350. dev_printk(KERN_INFO, &pdev->dev,
  351. "failed to allocate esp_priv\n");
  352. err = -ENOMEM;
  353. goto fail_host_alloc;
  354. }
  355. esp = shost_priv(shost);
  356. esp->host = shost;
  357. esp->dev = pdev;
  358. esp->ops = &pci_esp_ops;
  359. /*
  360. * The am53c974 HBA has a design flaw of generating
  361. * spurious DMA completion interrupts when using
  362. * DMA for command submission.
  363. */
  364. esp->flags |= ESP_FLAG_USE_FIFO;
  365. /*
  366. * Enable CONFIG2_FENAB to allow for large DMA transfers
  367. */
  368. if (am53c974_fenab)
  369. esp->config2 |= ESP_CONFIG2_FENAB;
  370. pep->esp = esp;
  371. if (pci_request_regions(pdev, DRV_MODULE_NAME)) {
  372. dev_printk(KERN_ERR, &pdev->dev,
  373. "pci memory selection failed\n");
  374. goto fail_priv_alloc;
  375. }
  376. esp->regs = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
  377. if (!esp->regs) {
  378. dev_printk(KERN_ERR, &pdev->dev, "pci I/O map failed\n");
  379. err = -EINVAL;
  380. goto fail_release_regions;
  381. }
  382. esp->dma_regs = esp->regs;
  383. pci_set_master(pdev);
  384. esp->command_block = pci_alloc_consistent(pdev, 16,
  385. &esp->command_block_dma);
  386. if (!esp->command_block) {
  387. dev_printk(KERN_ERR, &pdev->dev,
  388. "failed to allocate command block\n");
  389. err = -ENOMEM;
  390. goto fail_unmap_regs;
  391. }
  392. pci_set_drvdata(pdev, pep);
  393. err = request_irq(pdev->irq, scsi_esp_intr, IRQF_SHARED,
  394. DRV_MODULE_NAME, esp);
  395. if (err < 0) {
  396. dev_printk(KERN_ERR, &pdev->dev, "failed to register IRQ\n");
  397. goto fail_unmap_command_block;
  398. }
  399. esp->scsi_id = 7;
  400. dc390_check_eeprom(esp);
  401. shost->this_id = esp->scsi_id;
  402. shost->max_id = 8;
  403. shost->irq = pdev->irq;
  404. shost->io_port = pci_resource_start(pdev, 0);
  405. shost->n_io_port = pci_resource_len(pdev, 0);
  406. shost->unique_id = shost->io_port;
  407. esp->scsi_id_mask = (1 << esp->scsi_id);
  408. /* Assume 40MHz clock */
  409. esp->cfreq = 40000000;
  410. err = scsi_esp_register(esp, &pdev->dev);
  411. if (err)
  412. goto fail_free_irq;
  413. return 0;
  414. fail_free_irq:
  415. free_irq(pdev->irq, esp);
  416. fail_unmap_command_block:
  417. pci_set_drvdata(pdev, NULL);
  418. pci_free_consistent(pdev, 16, esp->command_block,
  419. esp->command_block_dma);
  420. fail_unmap_regs:
  421. pci_iounmap(pdev, esp->regs);
  422. fail_release_regions:
  423. pci_release_regions(pdev);
  424. fail_priv_alloc:
  425. kfree(pep);
  426. fail_host_alloc:
  427. scsi_host_put(shost);
  428. fail_disable_device:
  429. pci_disable_device(pdev);
  430. return err;
  431. }
  432. static void pci_esp_remove_one(struct pci_dev *pdev)
  433. {
  434. struct pci_esp_priv *pep = pci_get_drvdata(pdev);
  435. struct esp *esp = pep->esp;
  436. scsi_esp_unregister(esp);
  437. free_irq(pdev->irq, esp);
  438. pci_set_drvdata(pdev, NULL);
  439. pci_free_consistent(pdev, 16, esp->command_block,
  440. esp->command_block_dma);
  441. pci_iounmap(pdev, esp->regs);
  442. pci_release_regions(pdev);
  443. pci_disable_device(pdev);
  444. kfree(pep);
  445. scsi_host_put(esp->host);
  446. }
  447. static struct pci_device_id am53c974_pci_tbl[] = {
  448. { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_SCSI,
  449. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  450. { }
  451. };
  452. MODULE_DEVICE_TABLE(pci, am53c974_pci_tbl);
  453. static struct pci_driver am53c974_driver = {
  454. .name = DRV_MODULE_NAME,
  455. .id_table = am53c974_pci_tbl,
  456. .probe = pci_esp_probe_one,
  457. .remove = pci_esp_remove_one,
  458. };
  459. static int __init am53c974_module_init(void)
  460. {
  461. return pci_register_driver(&am53c974_driver);
  462. }
  463. static void __exit am53c974_module_exit(void)
  464. {
  465. pci_unregister_driver(&am53c974_driver);
  466. }
  467. MODULE_DESCRIPTION("AM53C974 SCSI driver");
  468. MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
  469. MODULE_LICENSE("GPL");
  470. MODULE_VERSION(DRV_MODULE_VERSION);
  471. MODULE_ALIAS("tmscsim");
  472. module_param(am53c974_debug, bool, 0644);
  473. MODULE_PARM_DESC(am53c974_debug, "Enable debugging");
  474. module_param(am53c974_fenab, bool, 0444);
  475. MODULE_PARM_DESC(am53c974_fenab, "Enable 24-bit DMA transfer sizes");
  476. module_init(am53c974_module_init);
  477. module_exit(am53c974_module_exit);