qla_tmpl.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978
  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include "qla_tmpl.h"
  9. /* note default template is in big endian */
  10. static const uint32_t ql27xx_fwdt_default_template[] = {
  11. 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
  12. 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
  13. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  14. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  15. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  16. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  17. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  18. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  19. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  20. 0x00000000, 0x00000000, 0x00000000, 0x00000000,
  21. 0x00000000, 0x04010000, 0x14000000, 0x00000000,
  22. 0x02000000, 0x44000000, 0x09010000, 0x10000000,
  23. 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
  24. 0x00000000, 0x02000000, 0x00600000, 0x00000000,
  25. 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
  26. 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
  27. 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
  28. 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
  29. 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
  30. 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
  31. 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
  32. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  33. 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
  34. 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
  35. 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
  36. 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
  37. 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
  38. 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
  39. 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
  40. 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
  41. 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
  42. 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
  43. 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
  44. 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
  45. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  46. 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
  47. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  48. 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
  49. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  50. 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
  51. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  52. 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
  53. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  54. 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
  55. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  56. 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
  57. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  58. 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
  59. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  60. 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
  61. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  62. 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
  63. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  64. 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
  65. 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
  66. 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
  67. 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
  68. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  69. 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
  70. 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
  71. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  72. 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
  73. 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
  74. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  75. 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
  76. 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
  77. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  78. 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
  79. 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
  80. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  81. 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
  82. 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
  83. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  84. 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
  85. 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
  86. 0x00010000, 0x18000000, 0x00000000, 0x02000000,
  87. 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
  88. 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
  89. 0x00000000, 0x02000000, 0x01000000, 0x00000200,
  90. 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
  91. 0x02000000, 0x02000000, 0x00001000, 0x00000000,
  92. 0x07010000, 0x18000000, 0x00000000, 0x02000000,
  93. 0x00000000, 0x01000000, 0x07010000, 0x18000000,
  94. 0x00000000, 0x02000000, 0x00000000, 0x02000000,
  95. 0x07010000, 0x18000000, 0x00000000, 0x02000000,
  96. 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
  97. 0x00000000, 0x02000000, 0x00000000, 0xff000000,
  98. 0x10000000, 0x00000000, 0x00000080,
  99. };
  100. static inline void __iomem *
  101. qla27xx_isp_reg(struct scsi_qla_host *vha)
  102. {
  103. return &vha->hw->iobase->isp24;
  104. }
  105. static inline void
  106. qla27xx_insert16(uint16_t value, void *buf, ulong *len)
  107. {
  108. if (buf) {
  109. buf += *len;
  110. *(__le16 *)buf = cpu_to_le16(value);
  111. }
  112. *len += sizeof(value);
  113. }
  114. static inline void
  115. qla27xx_insert32(uint32_t value, void *buf, ulong *len)
  116. {
  117. if (buf) {
  118. buf += *len;
  119. *(__le32 *)buf = cpu_to_le32(value);
  120. }
  121. *len += sizeof(value);
  122. }
  123. static inline void
  124. qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
  125. {
  126. if (buf && mem && size) {
  127. buf += *len;
  128. memcpy(buf, mem, size);
  129. }
  130. *len += size;
  131. }
  132. static inline void
  133. qla27xx_read8(void __iomem *window, void *buf, ulong *len)
  134. {
  135. uint8_t value = ~0;
  136. if (buf) {
  137. value = RD_REG_BYTE(window);
  138. }
  139. qla27xx_insert32(value, buf, len);
  140. }
  141. static inline void
  142. qla27xx_read16(void __iomem *window, void *buf, ulong *len)
  143. {
  144. uint16_t value = ~0;
  145. if (buf) {
  146. value = RD_REG_WORD(window);
  147. }
  148. qla27xx_insert32(value, buf, len);
  149. }
  150. static inline void
  151. qla27xx_read32(void __iomem *window, void *buf, ulong *len)
  152. {
  153. uint32_t value = ~0;
  154. if (buf) {
  155. value = RD_REG_DWORD(window);
  156. }
  157. qla27xx_insert32(value, buf, len);
  158. }
  159. static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
  160. {
  161. return
  162. (width == 1) ? qla27xx_read8 :
  163. (width == 2) ? qla27xx_read16 :
  164. qla27xx_read32;
  165. }
  166. static inline void
  167. qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
  168. uint offset, void *buf, ulong *len)
  169. {
  170. void __iomem *window = (void __iomem *)reg + offset;
  171. qla27xx_read32(window, buf, len);
  172. }
  173. static inline void
  174. qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
  175. uint offset, uint32_t data, void *buf)
  176. {
  177. __iomem void *window = (void __iomem *)reg + offset;
  178. if (buf) {
  179. WRT_REG_DWORD(window, data);
  180. }
  181. }
  182. static inline void
  183. qla27xx_read_window(__iomem struct device_reg_24xx *reg,
  184. uint32_t addr, uint offset, uint count, uint width, void *buf,
  185. ulong *len)
  186. {
  187. void __iomem *window = (void __iomem *)reg + offset;
  188. void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
  189. qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
  190. while (count--) {
  191. qla27xx_insert32(addr, buf, len);
  192. readn(window, buf, len);
  193. window += width;
  194. addr++;
  195. }
  196. }
  197. static inline void
  198. qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
  199. {
  200. if (buf)
  201. ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
  202. ql_dbg(ql_dbg_misc + ql_dbg_verbose, NULL, 0xd011,
  203. "Skipping entry %d\n", ent->hdr.entry_type);
  204. }
  205. static int
  206. qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
  207. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  208. {
  209. ql_dbg(ql_dbg_misc, vha, 0xd100,
  210. "%s: nop [%lx]\n", __func__, *len);
  211. qla27xx_skip_entry(ent, buf);
  212. return false;
  213. }
  214. static int
  215. qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
  216. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  217. {
  218. ql_dbg(ql_dbg_misc, vha, 0xd1ff,
  219. "%s: end [%lx]\n", __func__, *len);
  220. qla27xx_skip_entry(ent, buf);
  221. /* terminate */
  222. return true;
  223. }
  224. static int
  225. qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
  226. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  227. {
  228. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  229. ql_dbg(ql_dbg_misc, vha, 0xd200,
  230. "%s: rdio t1 [%lx]\n", __func__, *len);
  231. qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
  232. ent->t256.reg_count, ent->t256.reg_width, buf, len);
  233. return false;
  234. }
  235. static int
  236. qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
  237. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  238. {
  239. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  240. ql_dbg(ql_dbg_misc, vha, 0xd201,
  241. "%s: wrio t1 [%lx]\n", __func__, *len);
  242. qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
  243. qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
  244. return false;
  245. }
  246. static int
  247. qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
  248. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  249. {
  250. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  251. ql_dbg(ql_dbg_misc, vha, 0xd202,
  252. "%s: rdio t2 [%lx]\n", __func__, *len);
  253. qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
  254. qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
  255. ent->t258.reg_count, ent->t258.reg_width, buf, len);
  256. return false;
  257. }
  258. static int
  259. qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
  260. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  261. {
  262. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  263. ql_dbg(ql_dbg_misc, vha, 0xd203,
  264. "%s: wrio t2 [%lx]\n", __func__, *len);
  265. qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
  266. qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
  267. qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
  268. return false;
  269. }
  270. static int
  271. qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
  272. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  273. {
  274. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  275. ql_dbg(ql_dbg_misc, vha, 0xd204,
  276. "%s: rdpci [%lx]\n", __func__, *len);
  277. qla27xx_insert32(ent->t260.pci_offset, buf, len);
  278. qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
  279. return false;
  280. }
  281. static int
  282. qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
  283. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  284. {
  285. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  286. ql_dbg(ql_dbg_misc, vha, 0xd205,
  287. "%s: wrpci [%lx]\n", __func__, *len);
  288. qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
  289. return false;
  290. }
  291. static int
  292. qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
  293. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  294. {
  295. ulong dwords;
  296. ulong start;
  297. ulong end;
  298. ql_dbg(ql_dbg_misc, vha, 0xd206,
  299. "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
  300. start = ent->t262.start_addr;
  301. end = ent->t262.end_addr;
  302. if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
  303. ;
  304. } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
  305. end = vha->hw->fw_memory_size;
  306. if (buf)
  307. ent->t262.end_addr = end;
  308. } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
  309. start = vha->hw->fw_shared_ram_start;
  310. end = vha->hw->fw_shared_ram_end;
  311. if (buf) {
  312. ent->t262.start_addr = start;
  313. ent->t262.end_addr = end;
  314. }
  315. } else {
  316. ql_dbg(ql_dbg_misc, vha, 0xd022,
  317. "%s: unknown area %x\n", __func__, ent->t262.ram_area);
  318. qla27xx_skip_entry(ent, buf);
  319. goto done;
  320. }
  321. if (end < start || end == 0) {
  322. ql_dbg(ql_dbg_misc, vha, 0xd023,
  323. "%s: unusable range (start=%x end=%x)\n", __func__,
  324. ent->t262.end_addr, ent->t262.start_addr);
  325. qla27xx_skip_entry(ent, buf);
  326. goto done;
  327. }
  328. dwords = end - start + 1;
  329. if (buf) {
  330. buf += *len;
  331. qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
  332. }
  333. *len += dwords * sizeof(uint32_t);
  334. done:
  335. return false;
  336. }
  337. static int
  338. qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
  339. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  340. {
  341. uint count = 0;
  342. uint i;
  343. uint length;
  344. ql_dbg(ql_dbg_misc, vha, 0xd207,
  345. "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
  346. if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
  347. for (i = 0; i < vha->hw->max_req_queues; i++) {
  348. struct req_que *req = vha->hw->req_q_map[i];
  349. if (!test_bit(i, vha->hw->req_qid_map))
  350. continue;
  351. if (req || !buf) {
  352. length = req ?
  353. req->length : REQUEST_ENTRY_CNT_24XX;
  354. qla27xx_insert16(i, buf, len);
  355. qla27xx_insert16(length, buf, len);
  356. qla27xx_insertbuf(req ? req->ring : NULL,
  357. length * sizeof(*req->ring), buf, len);
  358. count++;
  359. }
  360. }
  361. } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
  362. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  363. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  364. if (!test_bit(i, vha->hw->rsp_qid_map))
  365. continue;
  366. if (rsp || !buf) {
  367. length = rsp ?
  368. rsp->length : RESPONSE_ENTRY_CNT_MQ;
  369. qla27xx_insert16(i, buf, len);
  370. qla27xx_insert16(length, buf, len);
  371. qla27xx_insertbuf(rsp ? rsp->ring : NULL,
  372. length * sizeof(*rsp->ring), buf, len);
  373. count++;
  374. }
  375. }
  376. } else {
  377. ql_dbg(ql_dbg_misc, vha, 0xd026,
  378. "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
  379. qla27xx_skip_entry(ent, buf);
  380. }
  381. if (buf)
  382. ent->t263.num_queues = count;
  383. return false;
  384. }
  385. static int
  386. qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
  387. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  388. {
  389. ql_dbg(ql_dbg_misc, vha, 0xd208,
  390. "%s: getfce [%lx]\n", __func__, *len);
  391. if (vha->hw->fce) {
  392. if (buf) {
  393. ent->t264.fce_trace_size = FCE_SIZE;
  394. ent->t264.write_pointer = vha->hw->fce_wr;
  395. ent->t264.base_pointer = vha->hw->fce_dma;
  396. ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
  397. ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
  398. ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
  399. ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
  400. ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
  401. ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
  402. }
  403. qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
  404. } else {
  405. ql_dbg(ql_dbg_misc, vha, 0xd027,
  406. "%s: missing fce\n", __func__);
  407. qla27xx_skip_entry(ent, buf);
  408. }
  409. return false;
  410. }
  411. static int
  412. qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
  413. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  414. {
  415. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  416. ql_dbg(ql_dbg_misc, vha, 0xd209,
  417. "%s: pause risc [%lx]\n", __func__, *len);
  418. if (buf)
  419. qla24xx_pause_risc(reg, vha->hw);
  420. return false;
  421. }
  422. static int
  423. qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
  424. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  425. {
  426. ql_dbg(ql_dbg_misc, vha, 0xd20a,
  427. "%s: reset risc [%lx]\n", __func__, *len);
  428. if (buf)
  429. qla24xx_soft_reset(vha->hw);
  430. return false;
  431. }
  432. static int
  433. qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
  434. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  435. {
  436. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  437. ql_dbg(ql_dbg_misc, vha, 0xd20b,
  438. "%s: dis intr [%lx]\n", __func__, *len);
  439. qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
  440. return false;
  441. }
  442. static int
  443. qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
  444. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  445. {
  446. ql_dbg(ql_dbg_misc, vha, 0xd20c,
  447. "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
  448. if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_TRACE) {
  449. if (vha->hw->eft) {
  450. if (buf) {
  451. ent->t268.buf_size = EFT_SIZE;
  452. ent->t268.start_addr = vha->hw->eft_dma;
  453. }
  454. qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
  455. } else {
  456. ql_dbg(ql_dbg_misc, vha, 0xd028,
  457. "%s: missing eft\n", __func__);
  458. qla27xx_skip_entry(ent, buf);
  459. }
  460. } else {
  461. ql_dbg(ql_dbg_misc, vha, 0xd02b,
  462. "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
  463. qla27xx_skip_entry(ent, buf);
  464. }
  465. return false;
  466. }
  467. static int
  468. qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
  469. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  470. {
  471. ql_dbg(ql_dbg_misc, vha, 0xd20d,
  472. "%s: scratch [%lx]\n", __func__, *len);
  473. qla27xx_insert32(0xaaaaaaaa, buf, len);
  474. qla27xx_insert32(0xbbbbbbbb, buf, len);
  475. qla27xx_insert32(0xcccccccc, buf, len);
  476. qla27xx_insert32(0xdddddddd, buf, len);
  477. qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
  478. if (buf)
  479. ent->t269.scratch_size = 5 * sizeof(uint32_t);
  480. return false;
  481. }
  482. static int
  483. qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
  484. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  485. {
  486. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  487. ulong dwords = ent->t270.count;
  488. ulong addr = ent->t270.addr;
  489. ql_dbg(ql_dbg_misc, vha, 0xd20e,
  490. "%s: rdremreg [%lx]\n", __func__, *len);
  491. qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
  492. while (dwords--) {
  493. qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
  494. qla27xx_insert32(addr, buf, len);
  495. qla27xx_read_reg(reg, 0xc4, buf, len);
  496. addr += sizeof(uint32_t);
  497. }
  498. return false;
  499. }
  500. static int
  501. qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
  502. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  503. {
  504. struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
  505. ulong addr = ent->t271.addr;
  506. ulong data = ent->t271.data;
  507. ql_dbg(ql_dbg_misc, vha, 0xd20f,
  508. "%s: wrremreg [%lx]\n", __func__, *len);
  509. qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
  510. qla27xx_write_reg(reg, 0xc4, data, buf);
  511. qla27xx_write_reg(reg, 0xc0, addr, buf);
  512. return false;
  513. }
  514. static int
  515. qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
  516. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  517. {
  518. ulong dwords = ent->t272.count;
  519. ulong start = ent->t272.addr;
  520. ql_dbg(ql_dbg_misc, vha, 0xd210,
  521. "%s: rdremram [%lx]\n", __func__, *len);
  522. if (buf) {
  523. ql_dbg(ql_dbg_misc, vha, 0xd02c,
  524. "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
  525. buf += *len;
  526. qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
  527. }
  528. *len += dwords * sizeof(uint32_t);
  529. return false;
  530. }
  531. static int
  532. qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
  533. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  534. {
  535. ulong dwords = ent->t273.count;
  536. ulong addr = ent->t273.addr;
  537. uint32_t value;
  538. ql_dbg(ql_dbg_misc, vha, 0xd211,
  539. "%s: pcicfg [%lx]\n", __func__, *len);
  540. while (dwords--) {
  541. value = ~0;
  542. if (pci_read_config_dword(vha->hw->pdev, addr, &value))
  543. ql_dbg(ql_dbg_misc, vha, 0xd02d,
  544. "%s: failed pcicfg read at %lx\n", __func__, addr);
  545. qla27xx_insert32(addr, buf, len);
  546. qla27xx_insert32(value, buf, len);
  547. addr += sizeof(uint32_t);
  548. }
  549. return false;
  550. }
  551. static int
  552. qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
  553. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  554. {
  555. uint count = 0;
  556. uint i;
  557. ql_dbg(ql_dbg_misc, vha, 0xd212,
  558. "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
  559. if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
  560. for (i = 0; i < vha->hw->max_req_queues; i++) {
  561. struct req_que *req = vha->hw->req_q_map[i];
  562. if (!test_bit(i, vha->hw->req_qid_map))
  563. continue;
  564. if (req || !buf) {
  565. qla27xx_insert16(i, buf, len);
  566. qla27xx_insert16(1, buf, len);
  567. qla27xx_insert32(req && req->out_ptr ?
  568. *req->out_ptr : 0, buf, len);
  569. count++;
  570. }
  571. }
  572. } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
  573. for (i = 0; i < vha->hw->max_rsp_queues; i++) {
  574. struct rsp_que *rsp = vha->hw->rsp_q_map[i];
  575. if (!test_bit(i, vha->hw->rsp_qid_map))
  576. continue;
  577. if (rsp || !buf) {
  578. qla27xx_insert16(i, buf, len);
  579. qla27xx_insert16(1, buf, len);
  580. qla27xx_insert32(rsp && rsp->in_ptr ?
  581. *rsp->in_ptr : 0, buf, len);
  582. count++;
  583. }
  584. }
  585. } else {
  586. ql_dbg(ql_dbg_misc, vha, 0xd02f,
  587. "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
  588. qla27xx_skip_entry(ent, buf);
  589. }
  590. if (buf)
  591. ent->t274.num_queues = count;
  592. if (!count)
  593. qla27xx_skip_entry(ent, buf);
  594. return false;
  595. }
  596. static int
  597. qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
  598. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  599. {
  600. ulong offset = offsetof(typeof(*ent), t275.buffer);
  601. ql_dbg(ql_dbg_misc, vha, 0xd213,
  602. "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
  603. if (!ent->t275.length) {
  604. ql_dbg(ql_dbg_misc, vha, 0xd020,
  605. "%s: buffer zero length\n", __func__);
  606. qla27xx_skip_entry(ent, buf);
  607. goto done;
  608. }
  609. if (offset + ent->t275.length > ent->hdr.entry_size) {
  610. ql_dbg(ql_dbg_misc, vha, 0xd030,
  611. "%s: buffer overflow\n", __func__);
  612. qla27xx_skip_entry(ent, buf);
  613. goto done;
  614. }
  615. qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
  616. done:
  617. return false;
  618. }
  619. static int
  620. qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
  621. struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
  622. {
  623. ql_dbg(ql_dbg_misc, vha, 0xd2ff,
  624. "%s: type %x [%lx]\n", __func__, ent->hdr.entry_type, *len);
  625. qla27xx_skip_entry(ent, buf);
  626. return false;
  627. }
  628. struct qla27xx_fwdt_entry_call {
  629. uint type;
  630. int (*call)(
  631. struct scsi_qla_host *,
  632. struct qla27xx_fwdt_entry *,
  633. void *,
  634. ulong *);
  635. };
  636. static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
  637. { ENTRY_TYPE_NOP , qla27xx_fwdt_entry_t0 } ,
  638. { ENTRY_TYPE_TMP_END , qla27xx_fwdt_entry_t255 } ,
  639. { ENTRY_TYPE_RD_IOB_T1 , qla27xx_fwdt_entry_t256 } ,
  640. { ENTRY_TYPE_WR_IOB_T1 , qla27xx_fwdt_entry_t257 } ,
  641. { ENTRY_TYPE_RD_IOB_T2 , qla27xx_fwdt_entry_t258 } ,
  642. { ENTRY_TYPE_WR_IOB_T2 , qla27xx_fwdt_entry_t259 } ,
  643. { ENTRY_TYPE_RD_PCI , qla27xx_fwdt_entry_t260 } ,
  644. { ENTRY_TYPE_WR_PCI , qla27xx_fwdt_entry_t261 } ,
  645. { ENTRY_TYPE_RD_RAM , qla27xx_fwdt_entry_t262 } ,
  646. { ENTRY_TYPE_GET_QUEUE , qla27xx_fwdt_entry_t263 } ,
  647. { ENTRY_TYPE_GET_FCE , qla27xx_fwdt_entry_t264 } ,
  648. { ENTRY_TYPE_PSE_RISC , qla27xx_fwdt_entry_t265 } ,
  649. { ENTRY_TYPE_RST_RISC , qla27xx_fwdt_entry_t266 } ,
  650. { ENTRY_TYPE_DIS_INTR , qla27xx_fwdt_entry_t267 } ,
  651. { ENTRY_TYPE_GET_HBUF , qla27xx_fwdt_entry_t268 } ,
  652. { ENTRY_TYPE_SCRATCH , qla27xx_fwdt_entry_t269 } ,
  653. { ENTRY_TYPE_RDREMREG , qla27xx_fwdt_entry_t270 } ,
  654. { ENTRY_TYPE_WRREMREG , qla27xx_fwdt_entry_t271 } ,
  655. { ENTRY_TYPE_RDREMRAM , qla27xx_fwdt_entry_t272 } ,
  656. { ENTRY_TYPE_PCICFG , qla27xx_fwdt_entry_t273 } ,
  657. { ENTRY_TYPE_GET_SHADOW , qla27xx_fwdt_entry_t274 } ,
  658. { ENTRY_TYPE_WRITE_BUF , qla27xx_fwdt_entry_t275 } ,
  659. { -1 , qla27xx_fwdt_entry_other }
  660. };
  661. static inline int (*qla27xx_find_entry(uint type))
  662. (struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
  663. {
  664. struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
  665. while (list->type < type)
  666. list++;
  667. if (list->type == type)
  668. return list->call;
  669. return qla27xx_fwdt_entry_other;
  670. }
  671. static inline void *
  672. qla27xx_next_entry(void *p)
  673. {
  674. struct qla27xx_fwdt_entry *ent = p;
  675. return p + ent->hdr.entry_size;
  676. }
  677. static void
  678. qla27xx_walk_template(struct scsi_qla_host *vha,
  679. struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
  680. {
  681. struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
  682. ulong count = tmp->entry_count;
  683. ql_dbg(ql_dbg_misc, vha, 0xd01a,
  684. "%s: entry count %lx\n", __func__, count);
  685. while (count--) {
  686. if (qla27xx_find_entry(ent->hdr.entry_type)(vha, ent, buf, len))
  687. break;
  688. ent = qla27xx_next_entry(ent);
  689. }
  690. if (count)
  691. ql_dbg(ql_dbg_misc, vha, 0xd018,
  692. "%s: residual count (%lx)\n", __func__, count);
  693. if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
  694. ql_dbg(ql_dbg_misc, vha, 0xd019,
  695. "%s: missing end (%lx)\n", __func__, count);
  696. ql_dbg(ql_dbg_misc, vha, 0xd01b,
  697. "%s: len=%lx\n", __func__, *len);
  698. if (buf) {
  699. ql_log(ql_log_warn, vha, 0xd015,
  700. "Firmware dump saved to temp buffer (%ld/%p)\n",
  701. vha->host_no, vha->hw->fw_dump);
  702. qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
  703. }
  704. }
  705. static void
  706. qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
  707. {
  708. tmp->capture_timestamp = jiffies;
  709. }
  710. static void
  711. qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
  712. {
  713. uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
  714. sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
  715. v+0, v+1, v+2, v+3, v+4, v+5);
  716. tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
  717. tmp->driver_info[1] = v[5] << 8 | v[4];
  718. tmp->driver_info[2] = 0x12345678;
  719. }
  720. static void
  721. qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
  722. struct scsi_qla_host *vha)
  723. {
  724. tmp->firmware_version[0] = vha->hw->fw_major_version;
  725. tmp->firmware_version[1] = vha->hw->fw_minor_version;
  726. tmp->firmware_version[2] = vha->hw->fw_subminor_version;
  727. tmp->firmware_version[3] =
  728. vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
  729. tmp->firmware_version[4] =
  730. vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
  731. }
  732. static void
  733. ql27xx_edit_template(struct scsi_qla_host *vha,
  734. struct qla27xx_fwdt_template *tmp)
  735. {
  736. qla27xx_time_stamp(tmp);
  737. qla27xx_driver_info(tmp);
  738. qla27xx_firmware_info(tmp, vha);
  739. }
  740. static inline uint32_t
  741. qla27xx_template_checksum(void *p, ulong size)
  742. {
  743. uint32_t *buf = p;
  744. uint64_t sum = 0;
  745. size /= sizeof(*buf);
  746. while (size--)
  747. sum += *buf++;
  748. sum = (sum & 0xffffffff) + (sum >> 32);
  749. return ~sum;
  750. }
  751. static inline int
  752. qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
  753. {
  754. return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
  755. }
  756. static inline int
  757. qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
  758. {
  759. return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
  760. }
  761. static void
  762. qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
  763. {
  764. struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
  765. ulong len;
  766. if (qla27xx_fwdt_template_valid(tmp)) {
  767. len = tmp->template_size;
  768. tmp = memcpy(vha->hw->fw_dump, tmp, len);
  769. ql27xx_edit_template(vha, tmp);
  770. qla27xx_walk_template(vha, tmp, tmp, &len);
  771. vha->hw->fw_dump_len = len;
  772. vha->hw->fw_dumped = 1;
  773. }
  774. }
  775. ulong
  776. qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
  777. {
  778. struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
  779. ulong len = 0;
  780. if (qla27xx_fwdt_template_valid(tmp)) {
  781. len = tmp->template_size;
  782. qla27xx_walk_template(vha, tmp, NULL, &len);
  783. }
  784. return len;
  785. }
  786. ulong
  787. qla27xx_fwdt_template_size(void *p)
  788. {
  789. struct qla27xx_fwdt_template *tmp = p;
  790. return tmp->template_size;
  791. }
  792. ulong
  793. qla27xx_fwdt_template_default_size(void)
  794. {
  795. return sizeof(ql27xx_fwdt_default_template);
  796. }
  797. const void *
  798. qla27xx_fwdt_template_default(void)
  799. {
  800. return ql27xx_fwdt_default_template;
  801. }
  802. int
  803. qla27xx_fwdt_template_valid(void *p)
  804. {
  805. struct qla27xx_fwdt_template *tmp = p;
  806. if (!qla27xx_verify_template_header(tmp)) {
  807. ql_log(ql_log_warn, NULL, 0xd01c,
  808. "%s: template type %x\n", __func__, tmp->template_type);
  809. return false;
  810. }
  811. if (!qla27xx_verify_template_checksum(tmp)) {
  812. ql_log(ql_log_warn, NULL, 0xd01d,
  813. "%s: failed template checksum\n", __func__);
  814. return false;
  815. }
  816. return true;
  817. }
  818. void
  819. qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
  820. {
  821. ulong flags = 0;
  822. #ifndef __CHECKER__
  823. if (!hardware_locked)
  824. spin_lock_irqsave(&vha->hw->hardware_lock, flags);
  825. #endif
  826. if (!vha->hw->fw_dump)
  827. ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
  828. else if (!vha->hw->fw_dump_template)
  829. ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
  830. else if (vha->hw->fw_dumped)
  831. ql_log(ql_log_warn, vha, 0xd300,
  832. "Firmware has been previously dumped (%p),"
  833. " -- ignoring request\n", vha->hw->fw_dump);
  834. else
  835. qla27xx_execute_fwdt_template(vha);
  836. #ifndef __CHECKER__
  837. if (!hardware_locked)
  838. spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
  839. #endif
  840. }