qed_hw.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/dma-mapping.h>
  12. #include <linux/errno.h>
  13. #include <linux/kernel.h>
  14. #include <linux/list.h>
  15. #include <linux/mutex.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/string.h>
  20. #include <linux/qed/qed_chain.h>
  21. #include "qed.h"
  22. #include "qed_hsi.h"
  23. #include "qed_hw.h"
  24. #include "qed_reg_addr.h"
  25. #define QED_BAR_ACQUIRE_TIMEOUT 1000
  26. /* Invalid values */
  27. #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1))
  28. struct qed_ptt {
  29. struct list_head list_entry;
  30. unsigned int idx;
  31. struct pxp_ptt_entry pxp;
  32. };
  33. struct qed_ptt_pool {
  34. struct list_head free_list;
  35. spinlock_t lock; /* ptt synchronized access */
  36. struct qed_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
  37. };
  38. int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
  39. {
  40. struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool),
  41. GFP_ATOMIC);
  42. int i;
  43. if (!p_pool)
  44. return -ENOMEM;
  45. INIT_LIST_HEAD(&p_pool->free_list);
  46. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  47. p_pool->ptts[i].idx = i;
  48. p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
  49. p_pool->ptts[i].pxp.pretend.control = 0;
  50. if (i >= RESERVED_PTT_MAX)
  51. list_add(&p_pool->ptts[i].list_entry,
  52. &p_pool->free_list);
  53. }
  54. p_hwfn->p_ptt_pool = p_pool;
  55. spin_lock_init(&p_pool->lock);
  56. return 0;
  57. }
  58. void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
  59. {
  60. struct qed_ptt *p_ptt;
  61. int i;
  62. for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
  63. p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
  64. p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
  65. }
  66. }
  67. void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
  68. {
  69. kfree(p_hwfn->p_ptt_pool);
  70. p_hwfn->p_ptt_pool = NULL;
  71. }
  72. struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
  73. {
  74. struct qed_ptt *p_ptt;
  75. unsigned int i;
  76. /* Take the free PTT from the list */
  77. for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
  78. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  79. if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
  80. p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
  81. struct qed_ptt, list_entry);
  82. list_del(&p_ptt->list_entry);
  83. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  84. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  85. "allocated ptt %d\n", p_ptt->idx);
  86. return p_ptt;
  87. }
  88. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  89. usleep_range(1000, 2000);
  90. }
  91. DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
  92. return NULL;
  93. }
  94. void qed_ptt_release(struct qed_hwfn *p_hwfn,
  95. struct qed_ptt *p_ptt)
  96. {
  97. spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
  98. list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
  99. spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
  100. }
  101. u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
  102. struct qed_ptt *p_ptt)
  103. {
  104. /* The HW is using DWORDS and we need to translate it to Bytes */
  105. return le32_to_cpu(p_ptt->pxp.offset) << 2;
  106. }
  107. static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
  108. {
  109. return PXP_PF_WINDOW_ADMIN_PER_PF_START +
  110. p_ptt->idx * sizeof(struct pxp_ptt_entry);
  111. }
  112. u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
  113. {
  114. return PXP_EXTERNAL_BAR_PF_WINDOW_START +
  115. p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
  116. }
  117. void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
  118. struct qed_ptt *p_ptt,
  119. u32 new_hw_addr)
  120. {
  121. u32 prev_hw_addr;
  122. prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  123. if (new_hw_addr == prev_hw_addr)
  124. return;
  125. /* Update PTT entery in admin window */
  126. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  127. "Updating PTT entry %d to offset 0x%x\n",
  128. p_ptt->idx, new_hw_addr);
  129. /* The HW is using DWORDS and the address is in Bytes */
  130. p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
  131. REG_WR(p_hwfn,
  132. qed_ptt_config_addr(p_ptt) +
  133. offsetof(struct pxp_ptt_entry, offset),
  134. le32_to_cpu(p_ptt->pxp.offset));
  135. }
  136. static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
  137. struct qed_ptt *p_ptt,
  138. u32 hw_addr)
  139. {
  140. u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
  141. u32 offset;
  142. offset = hw_addr - win_hw_addr;
  143. /* Verify the address is within the window */
  144. if (hw_addr < win_hw_addr ||
  145. offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
  146. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
  147. offset = 0;
  148. }
  149. return qed_ptt_get_bar_addr(p_ptt) + offset;
  150. }
  151. struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
  152. enum reserved_ptts ptt_idx)
  153. {
  154. if (ptt_idx >= RESERVED_PTT_MAX) {
  155. DP_NOTICE(p_hwfn,
  156. "Requested PTT %d is out of range\n", ptt_idx);
  157. return NULL;
  158. }
  159. return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
  160. }
  161. void qed_wr(struct qed_hwfn *p_hwfn,
  162. struct qed_ptt *p_ptt,
  163. u32 hw_addr, u32 val)
  164. {
  165. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  166. REG_WR(p_hwfn, bar_addr, val);
  167. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  168. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  169. bar_addr, hw_addr, val);
  170. }
  171. u32 qed_rd(struct qed_hwfn *p_hwfn,
  172. struct qed_ptt *p_ptt,
  173. u32 hw_addr)
  174. {
  175. u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
  176. u32 val = REG_RD(p_hwfn, bar_addr);
  177. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  178. "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
  179. bar_addr, hw_addr, val);
  180. return val;
  181. }
  182. static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
  183. struct qed_ptt *p_ptt,
  184. void *addr,
  185. u32 hw_addr,
  186. size_t n,
  187. bool to_device)
  188. {
  189. u32 dw_count, *host_addr, hw_offset;
  190. size_t quota, done = 0;
  191. u32 __iomem *reg_addr;
  192. while (done < n) {
  193. quota = min_t(size_t, n - done,
  194. PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
  195. qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
  196. hw_offset = qed_ptt_get_bar_addr(p_ptt);
  197. dw_count = quota / 4;
  198. host_addr = (u32 *)((u8 *)addr + done);
  199. reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
  200. if (to_device)
  201. while (dw_count--)
  202. DIRECT_REG_WR(reg_addr++, *host_addr++);
  203. else
  204. while (dw_count--)
  205. *host_addr++ = DIRECT_REG_RD(reg_addr++);
  206. done += quota;
  207. }
  208. }
  209. void qed_memcpy_from(struct qed_hwfn *p_hwfn,
  210. struct qed_ptt *p_ptt,
  211. void *dest, u32 hw_addr, size_t n)
  212. {
  213. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  214. "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
  215. hw_addr, dest, hw_addr, (unsigned long)n);
  216. qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
  217. }
  218. void qed_memcpy_to(struct qed_hwfn *p_hwfn,
  219. struct qed_ptt *p_ptt,
  220. u32 hw_addr, void *src, size_t n)
  221. {
  222. DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
  223. "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
  224. hw_addr, hw_addr, src, (unsigned long)n);
  225. qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
  226. }
  227. void qed_fid_pretend(struct qed_hwfn *p_hwfn,
  228. struct qed_ptt *p_ptt,
  229. u16 fid)
  230. {
  231. u16 control = 0;
  232. SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
  233. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
  234. /* Every pretend undos previous pretends, including
  235. * previous port pretend.
  236. */
  237. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  238. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  239. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  240. if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
  241. fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
  242. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  243. p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
  244. REG_WR(p_hwfn,
  245. qed_ptt_config_addr(p_ptt) +
  246. offsetof(struct pxp_ptt_entry, pretend),
  247. *(u32 *)&p_ptt->pxp.pretend);
  248. }
  249. void qed_port_pretend(struct qed_hwfn *p_hwfn,
  250. struct qed_ptt *p_ptt,
  251. u8 port_id)
  252. {
  253. u16 control = 0;
  254. SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
  255. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
  256. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  257. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  258. REG_WR(p_hwfn,
  259. qed_ptt_config_addr(p_ptt) +
  260. offsetof(struct pxp_ptt_entry, pretend),
  261. *(u32 *)&p_ptt->pxp.pretend);
  262. }
  263. void qed_port_unpretend(struct qed_hwfn *p_hwfn,
  264. struct qed_ptt *p_ptt)
  265. {
  266. u16 control = 0;
  267. SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
  268. SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
  269. SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
  270. p_ptt->pxp.pretend.control = cpu_to_le16(control);
  271. REG_WR(p_hwfn,
  272. qed_ptt_config_addr(p_ptt) +
  273. offsetof(struct pxp_ptt_entry, pretend),
  274. *(u32 *)&p_ptt->pxp.pretend);
  275. }
  276. /* DMAE */
  277. static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
  278. const u8 is_src_type_grc,
  279. const u8 is_dst_type_grc,
  280. struct qed_dmae_params *p_params)
  281. {
  282. u32 opcode = 0;
  283. u16 opcodeB = 0;
  284. /* Whether the source is the PCIe or the GRC.
  285. * 0- The source is the PCIe
  286. * 1- The source is the GRC.
  287. */
  288. opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
  289. : DMAE_CMD_SRC_MASK_PCIE) <<
  290. DMAE_CMD_SRC_SHIFT;
  291. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
  292. DMAE_CMD_SRC_PF_ID_SHIFT);
  293. /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
  294. opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
  295. : DMAE_CMD_DST_MASK_PCIE) <<
  296. DMAE_CMD_DST_SHIFT;
  297. opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
  298. DMAE_CMD_DST_PF_ID_SHIFT);
  299. /* Whether to write a completion word to the completion destination:
  300. * 0-Do not write a completion word
  301. * 1-Write the completion word
  302. */
  303. opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
  304. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  305. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  306. if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
  307. opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
  308. opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
  309. opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
  310. /* reset source address in next go */
  311. opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
  312. DMAE_CMD_SRC_ADDR_RESET_SHIFT);
  313. /* reset dest address in next go */
  314. opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
  315. DMAE_CMD_DST_ADDR_RESET_SHIFT);
  316. opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
  317. DMAE_CMD_SRC_VF_ID_SHIFT);
  318. opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
  319. DMAE_CMD_DST_VF_ID_SHIFT);
  320. p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
  321. p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
  322. }
  323. u32 qed_dmae_idx_to_go_cmd(u8 idx)
  324. {
  325. /* All the DMAE 'go' registers form an array in internal memory */
  326. return DMAE_REG_GO_C0 + (idx << 2);
  327. }
  328. static int
  329. qed_dmae_post_command(struct qed_hwfn *p_hwfn,
  330. struct qed_ptt *p_ptt)
  331. {
  332. struct dmae_cmd *command = p_hwfn->dmae_info.p_dmae_cmd;
  333. u8 idx_cmd = p_hwfn->dmae_info.channel, i;
  334. int qed_status = 0;
  335. /* verify address is not NULL */
  336. if ((((command->dst_addr_lo == 0) && (command->dst_addr_hi == 0)) ||
  337. ((command->src_addr_lo == 0) && (command->src_addr_hi == 0)))) {
  338. DP_NOTICE(p_hwfn,
  339. "source or destination address 0 idx_cmd=%d\n"
  340. "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  341. idx_cmd,
  342. le32_to_cpu(command->opcode),
  343. le16_to_cpu(command->opcode_b),
  344. le16_to_cpu(command->length),
  345. le32_to_cpu(command->src_addr_hi),
  346. le32_to_cpu(command->src_addr_lo),
  347. le32_to_cpu(command->dst_addr_hi),
  348. le32_to_cpu(command->dst_addr_lo));
  349. return -EINVAL;
  350. }
  351. DP_VERBOSE(p_hwfn,
  352. NETIF_MSG_HW,
  353. "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
  354. idx_cmd,
  355. le32_to_cpu(command->opcode),
  356. le16_to_cpu(command->opcode_b),
  357. le16_to_cpu(command->length),
  358. le32_to_cpu(command->src_addr_hi),
  359. le32_to_cpu(command->src_addr_lo),
  360. le32_to_cpu(command->dst_addr_hi),
  361. le32_to_cpu(command->dst_addr_lo));
  362. /* Copy the command to DMAE - need to do it before every call
  363. * for source/dest address no reset.
  364. * The first 9 DWs are the command registers, the 10 DW is the
  365. * GO register, and the rest are result registers
  366. * (which are read only by the client).
  367. */
  368. for (i = 0; i < DMAE_CMD_SIZE; i++) {
  369. u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
  370. *(((u32 *)command) + i) : 0;
  371. qed_wr(p_hwfn, p_ptt,
  372. DMAE_REG_CMD_MEM +
  373. (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
  374. (i * sizeof(u32)), data);
  375. }
  376. qed_wr(p_hwfn, p_ptt,
  377. qed_dmae_idx_to_go_cmd(idx_cmd),
  378. DMAE_GO_VALUE);
  379. return qed_status;
  380. }
  381. int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
  382. {
  383. dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
  384. struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
  385. u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
  386. u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
  387. *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  388. sizeof(u32),
  389. p_addr,
  390. GFP_KERNEL);
  391. if (!*p_comp) {
  392. DP_NOTICE(p_hwfn, "Failed to allocate `p_completion_word'\n");
  393. goto err;
  394. }
  395. p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
  396. *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  397. sizeof(struct dmae_cmd),
  398. p_addr, GFP_KERNEL);
  399. if (!*p_cmd) {
  400. DP_NOTICE(p_hwfn, "Failed to allocate `struct dmae_cmd'\n");
  401. goto err;
  402. }
  403. p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  404. *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  405. sizeof(u32) * DMAE_MAX_RW_SIZE,
  406. p_addr, GFP_KERNEL);
  407. if (!*p_buff) {
  408. DP_NOTICE(p_hwfn, "Failed to allocate `intermediate_buffer'\n");
  409. goto err;
  410. }
  411. p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
  412. return 0;
  413. err:
  414. qed_dmae_info_free(p_hwfn);
  415. return -ENOMEM;
  416. }
  417. void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
  418. {
  419. dma_addr_t p_phys;
  420. /* Just make sure no one is in the middle */
  421. mutex_lock(&p_hwfn->dmae_info.mutex);
  422. if (p_hwfn->dmae_info.p_completion_word) {
  423. p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
  424. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  425. sizeof(u32),
  426. p_hwfn->dmae_info.p_completion_word,
  427. p_phys);
  428. p_hwfn->dmae_info.p_completion_word = NULL;
  429. }
  430. if (p_hwfn->dmae_info.p_dmae_cmd) {
  431. p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
  432. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  433. sizeof(struct dmae_cmd),
  434. p_hwfn->dmae_info.p_dmae_cmd,
  435. p_phys);
  436. p_hwfn->dmae_info.p_dmae_cmd = NULL;
  437. }
  438. if (p_hwfn->dmae_info.p_intermediate_buffer) {
  439. p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  440. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  441. sizeof(u32) * DMAE_MAX_RW_SIZE,
  442. p_hwfn->dmae_info.p_intermediate_buffer,
  443. p_phys);
  444. p_hwfn->dmae_info.p_intermediate_buffer = NULL;
  445. }
  446. mutex_unlock(&p_hwfn->dmae_info.mutex);
  447. }
  448. static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
  449. {
  450. u32 wait_cnt = 0;
  451. u32 wait_cnt_limit = 10000;
  452. int qed_status = 0;
  453. barrier();
  454. while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
  455. udelay(DMAE_MIN_WAIT_TIME);
  456. if (++wait_cnt > wait_cnt_limit) {
  457. DP_NOTICE(p_hwfn->cdev,
  458. "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
  459. *p_hwfn->dmae_info.p_completion_word,
  460. DMAE_COMPLETION_VAL);
  461. qed_status = -EBUSY;
  462. break;
  463. }
  464. /* to sync the completion_word since we are not
  465. * using the volatile keyword for p_completion_word
  466. */
  467. barrier();
  468. }
  469. if (qed_status == 0)
  470. *p_hwfn->dmae_info.p_completion_word = 0;
  471. return qed_status;
  472. }
  473. static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
  474. struct qed_ptt *p_ptt,
  475. u64 src_addr,
  476. u64 dst_addr,
  477. u8 src_type,
  478. u8 dst_type,
  479. u32 length)
  480. {
  481. dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
  482. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  483. int qed_status = 0;
  484. switch (src_type) {
  485. case QED_DMAE_ADDRESS_GRC:
  486. case QED_DMAE_ADDRESS_HOST_PHYS:
  487. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
  488. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
  489. break;
  490. /* for virtual source addresses we use the intermediate buffer. */
  491. case QED_DMAE_ADDRESS_HOST_VIRT:
  492. cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
  493. cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
  494. memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
  495. (void *)(uintptr_t)src_addr,
  496. length * sizeof(u32));
  497. break;
  498. default:
  499. return -EINVAL;
  500. }
  501. switch (dst_type) {
  502. case QED_DMAE_ADDRESS_GRC:
  503. case QED_DMAE_ADDRESS_HOST_PHYS:
  504. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
  505. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
  506. break;
  507. /* for virtual source addresses we use the intermediate buffer. */
  508. case QED_DMAE_ADDRESS_HOST_VIRT:
  509. cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
  510. cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
  511. break;
  512. default:
  513. return -EINVAL;
  514. }
  515. cmd->length = cpu_to_le16((u16)length);
  516. qed_dmae_post_command(p_hwfn, p_ptt);
  517. qed_status = qed_dmae_operation_wait(p_hwfn);
  518. if (qed_status) {
  519. DP_NOTICE(p_hwfn,
  520. "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
  521. src_addr,
  522. dst_addr,
  523. length);
  524. return qed_status;
  525. }
  526. if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
  527. memcpy((void *)(uintptr_t)(dst_addr),
  528. &p_hwfn->dmae_info.p_intermediate_buffer[0],
  529. length * sizeof(u32));
  530. return 0;
  531. }
  532. static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
  533. struct qed_ptt *p_ptt,
  534. u64 src_addr, u64 dst_addr,
  535. u8 src_type, u8 dst_type,
  536. u32 size_in_dwords,
  537. struct qed_dmae_params *p_params)
  538. {
  539. dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
  540. u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
  541. struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
  542. u64 src_addr_split = 0, dst_addr_split = 0;
  543. u16 length_limit = DMAE_MAX_RW_SIZE;
  544. int qed_status = 0;
  545. u32 offset = 0;
  546. qed_dmae_opcode(p_hwfn,
  547. (src_type == QED_DMAE_ADDRESS_GRC),
  548. (dst_type == QED_DMAE_ADDRESS_GRC),
  549. p_params);
  550. cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
  551. cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
  552. cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
  553. /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
  554. cnt_split = size_in_dwords / length_limit;
  555. length_mod = size_in_dwords % length_limit;
  556. src_addr_split = src_addr;
  557. dst_addr_split = dst_addr;
  558. for (i = 0; i <= cnt_split; i++) {
  559. offset = length_limit * i;
  560. if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
  561. if (src_type == QED_DMAE_ADDRESS_GRC)
  562. src_addr_split = src_addr + offset;
  563. else
  564. src_addr_split = src_addr + (offset * 4);
  565. }
  566. if (dst_type == QED_DMAE_ADDRESS_GRC)
  567. dst_addr_split = dst_addr + offset;
  568. else
  569. dst_addr_split = dst_addr + (offset * 4);
  570. length_cur = (cnt_split == i) ? length_mod : length_limit;
  571. /* might be zero on last iteration */
  572. if (!length_cur)
  573. continue;
  574. qed_status = qed_dmae_execute_sub_operation(p_hwfn,
  575. p_ptt,
  576. src_addr_split,
  577. dst_addr_split,
  578. src_type,
  579. dst_type,
  580. length_cur);
  581. if (qed_status) {
  582. DP_NOTICE(p_hwfn,
  583. "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
  584. qed_status,
  585. src_addr,
  586. dst_addr,
  587. length_cur);
  588. break;
  589. }
  590. }
  591. return qed_status;
  592. }
  593. int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
  594. struct qed_ptt *p_ptt,
  595. u64 source_addr,
  596. u32 grc_addr,
  597. u32 size_in_dwords,
  598. u32 flags)
  599. {
  600. u32 grc_addr_in_dw = grc_addr / sizeof(u32);
  601. struct qed_dmae_params params;
  602. int rc;
  603. memset(&params, 0, sizeof(struct qed_dmae_params));
  604. params.flags = flags;
  605. mutex_lock(&p_hwfn->dmae_info.mutex);
  606. rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
  607. grc_addr_in_dw,
  608. QED_DMAE_ADDRESS_HOST_VIRT,
  609. QED_DMAE_ADDRESS_GRC,
  610. size_in_dwords, &params);
  611. mutex_unlock(&p_hwfn->dmae_info.mutex);
  612. return rc;
  613. }
  614. u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
  615. enum protocol_type proto,
  616. union qed_qm_pq_params *p_params)
  617. {
  618. u16 pq_id = 0;
  619. if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
  620. !p_params) {
  621. DP_NOTICE(p_hwfn,
  622. "Protocol %d received NULL PQ params\n",
  623. proto);
  624. return 0;
  625. }
  626. switch (proto) {
  627. case PROTOCOLID_CORE:
  628. if (p_params->core.tc == LB_TC)
  629. pq_id = p_hwfn->qm_info.pure_lb_pq;
  630. else
  631. pq_id = p_hwfn->qm_info.offload_pq;
  632. break;
  633. case PROTOCOLID_ETH:
  634. pq_id = p_params->eth.tc;
  635. break;
  636. default:
  637. pq_id = 0;
  638. }
  639. pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
  640. return pq_id;
  641. }