qed_mcp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <asm/byteorder.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/mutex.h>
  14. #include <linux/slab.h>
  15. #include <linux/string.h>
  16. #include "qed.h"
  17. #include "qed_hsi.h"
  18. #include "qed_hw.h"
  19. #include "qed_mcp.h"
  20. #include "qed_reg_addr.h"
  21. #define CHIP_MCP_RESP_ITER_US 10
  22. #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
  23. #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
  24. #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
  25. qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
  26. _val)
  27. #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
  28. qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
  29. #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
  30. DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
  31. offsetof(struct public_drv_mb, _field), _val)
  32. #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
  33. DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
  34. offsetof(struct public_drv_mb, _field))
  35. #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
  36. DRV_ID_PDA_COMP_VER_SHIFT)
  37. #define MCP_BYTES_PER_MBIT_SHIFT 17
  38. bool qed_mcp_is_init(struct qed_hwfn *p_hwfn)
  39. {
  40. if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
  41. return false;
  42. return true;
  43. }
  44. void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
  45. struct qed_ptt *p_ptt)
  46. {
  47. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  48. PUBLIC_PORT);
  49. u32 mfw_mb_offsize = qed_rd(p_hwfn, p_ptt, addr);
  50. p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
  51. MFW_PORT(p_hwfn));
  52. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  53. "port_addr = 0x%x, port_id 0x%02x\n",
  54. p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
  55. }
  56. void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
  57. struct qed_ptt *p_ptt)
  58. {
  59. u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
  60. u32 tmp, i;
  61. if (!p_hwfn->mcp_info->public_base)
  62. return;
  63. for (i = 0; i < length; i++) {
  64. tmp = qed_rd(p_hwfn, p_ptt,
  65. p_hwfn->mcp_info->mfw_mb_addr +
  66. (i << 2) + sizeof(u32));
  67. /* The MB data is actually BE; Need to force it to cpu */
  68. ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
  69. be32_to_cpu((__force __be32)tmp);
  70. }
  71. }
  72. int qed_mcp_free(struct qed_hwfn *p_hwfn)
  73. {
  74. if (p_hwfn->mcp_info) {
  75. kfree(p_hwfn->mcp_info->mfw_mb_cur);
  76. kfree(p_hwfn->mcp_info->mfw_mb_shadow);
  77. }
  78. kfree(p_hwfn->mcp_info);
  79. return 0;
  80. }
  81. static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn,
  82. struct qed_ptt *p_ptt)
  83. {
  84. struct qed_mcp_info *p_info = p_hwfn->mcp_info;
  85. u32 drv_mb_offsize, mfw_mb_offsize;
  86. u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
  87. p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
  88. if (!p_info->public_base)
  89. return 0;
  90. p_info->public_base |= GRCBASE_MCP;
  91. /* Calculate the driver and MFW mailbox address */
  92. drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
  93. SECTION_OFFSIZE_ADDR(p_info->public_base,
  94. PUBLIC_DRV_MB));
  95. p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
  96. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  97. "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
  98. drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
  99. /* Set the MFW MB address */
  100. mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
  101. SECTION_OFFSIZE_ADDR(p_info->public_base,
  102. PUBLIC_MFW_MB));
  103. p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
  104. p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
  105. /* Get the current driver mailbox sequence before sending
  106. * the first command
  107. */
  108. p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  109. DRV_MSG_SEQ_NUMBER_MASK;
  110. /* Get current FW pulse sequence */
  111. p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
  112. DRV_PULSE_SEQ_MASK;
  113. p_info->mcp_hist = (u16)qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  114. return 0;
  115. }
  116. int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
  117. struct qed_ptt *p_ptt)
  118. {
  119. struct qed_mcp_info *p_info;
  120. u32 size;
  121. /* Allocate mcp_info structure */
  122. p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC);
  123. if (!p_hwfn->mcp_info)
  124. goto err;
  125. p_info = p_hwfn->mcp_info;
  126. if (qed_load_mcp_offsets(p_hwfn, p_ptt) != 0) {
  127. DP_NOTICE(p_hwfn, "MCP is not initialized\n");
  128. /* Do not free mcp_info here, since public_base indicate that
  129. * the MCP is not initialized
  130. */
  131. return 0;
  132. }
  133. size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
  134. p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC);
  135. p_info->mfw_mb_shadow =
  136. kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS(
  137. p_info->mfw_mb_length), GFP_ATOMIC);
  138. if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
  139. goto err;
  140. /* Initialize the MFW mutex */
  141. mutex_init(&p_info->mutex);
  142. return 0;
  143. err:
  144. DP_NOTICE(p_hwfn, "Failed to allocate mcp memory\n");
  145. qed_mcp_free(p_hwfn);
  146. return -ENOMEM;
  147. }
  148. int qed_mcp_reset(struct qed_hwfn *p_hwfn,
  149. struct qed_ptt *p_ptt)
  150. {
  151. u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
  152. u8 delay = CHIP_MCP_RESP_ITER_US;
  153. u32 org_mcp_reset_seq, cnt = 0;
  154. int rc = 0;
  155. /* Set drv command along with the updated sequence */
  156. org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
  157. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header,
  158. (DRV_MSG_CODE_MCP_RESET | seq));
  159. do {
  160. /* Wait for MFW response */
  161. udelay(delay);
  162. /* Give the FW up to 500 second (50*1000*10usec) */
  163. } while ((org_mcp_reset_seq == qed_rd(p_hwfn, p_ptt,
  164. MISCS_REG_GENERIC_POR_0)) &&
  165. (cnt++ < QED_MCP_RESET_RETRIES));
  166. if (org_mcp_reset_seq !=
  167. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  168. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  169. "MCP was reset after %d usec\n", cnt * delay);
  170. } else {
  171. DP_ERR(p_hwfn, "Failed to reset MCP\n");
  172. rc = -EAGAIN;
  173. }
  174. return rc;
  175. }
  176. static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn,
  177. struct qed_ptt *p_ptt,
  178. u32 cmd,
  179. u32 param,
  180. u32 *o_mcp_resp,
  181. u32 *o_mcp_param)
  182. {
  183. u8 delay = CHIP_MCP_RESP_ITER_US;
  184. u32 seq, cnt = 1, actual_mb_seq;
  185. int rc = 0;
  186. /* Get actual driver mailbox sequence */
  187. actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
  188. DRV_MSG_SEQ_NUMBER_MASK;
  189. /* Use MCP history register to check if MCP reset occurred between
  190. * init time and now.
  191. */
  192. if (p_hwfn->mcp_info->mcp_hist !=
  193. qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
  194. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Rereading MCP offsets\n");
  195. qed_load_mcp_offsets(p_hwfn, p_ptt);
  196. qed_mcp_cmd_port_init(p_hwfn, p_ptt);
  197. }
  198. seq = ++p_hwfn->mcp_info->drv_mb_seq;
  199. /* Set drv param */
  200. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
  201. /* Set drv command along with the updated sequence */
  202. DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
  203. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  204. "wrote command (%x) to MFW MB param 0x%08x\n",
  205. (cmd | seq), param);
  206. do {
  207. /* Wait for MFW response */
  208. udelay(delay);
  209. *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
  210. /* Give the FW up to 5 second (500*10ms) */
  211. } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
  212. (cnt++ < QED_DRV_MB_MAX_RETRIES));
  213. DP_VERBOSE(p_hwfn, QED_MSG_SP,
  214. "[after %d ms] read (%x) seq is (%x) from FW MB\n",
  215. cnt * delay, *o_mcp_resp, seq);
  216. /* Is this a reply to our command? */
  217. if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
  218. *o_mcp_resp &= FW_MSG_CODE_MASK;
  219. /* Get the MCP param */
  220. *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
  221. } else {
  222. /* FW BUG! */
  223. DP_ERR(p_hwfn, "MFW failed to respond!\n");
  224. *o_mcp_resp = 0;
  225. rc = -EAGAIN;
  226. }
  227. return rc;
  228. }
  229. int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
  230. struct qed_ptt *p_ptt,
  231. u32 cmd,
  232. u32 param,
  233. u32 *o_mcp_resp,
  234. u32 *o_mcp_param)
  235. {
  236. int rc = 0;
  237. /* MCP not initialized */
  238. if (!qed_mcp_is_init(p_hwfn)) {
  239. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  240. return -EBUSY;
  241. }
  242. /* Lock Mutex to ensure only single thread is
  243. * accessing the MCP at one time
  244. */
  245. mutex_lock(&p_hwfn->mcp_info->mutex);
  246. rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param,
  247. o_mcp_resp, o_mcp_param);
  248. /* Release Mutex */
  249. mutex_unlock(&p_hwfn->mcp_info->mutex);
  250. return rc;
  251. }
  252. static void qed_mcp_set_drv_ver(struct qed_dev *cdev,
  253. struct qed_hwfn *p_hwfn,
  254. struct qed_ptt *p_ptt)
  255. {
  256. u32 i;
  257. /* Copy version string to MCP */
  258. for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++)
  259. DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i],
  260. *(u32 *)&cdev->ver_str[i * sizeof(u32)]);
  261. }
  262. int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
  263. struct qed_ptt *p_ptt,
  264. u32 *p_load_code)
  265. {
  266. struct qed_dev *cdev = p_hwfn->cdev;
  267. u32 param;
  268. int rc;
  269. if (!qed_mcp_is_init(p_hwfn)) {
  270. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  271. return -EBUSY;
  272. }
  273. /* Save driver's version to shmem */
  274. qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt);
  275. DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
  276. p_hwfn->mcp_info->drv_mb_seq,
  277. p_hwfn->mcp_info->drv_pulse_seq);
  278. /* Load Request */
  279. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
  280. (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
  281. cdev->drv_type),
  282. p_load_code, &param);
  283. /* if mcp fails to respond we must abort */
  284. if (rc) {
  285. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  286. return rc;
  287. }
  288. /* If MFW refused (e.g. other port is in diagnostic mode) we
  289. * must abort. This can happen in the following cases:
  290. * - Other port is in diagnostic mode
  291. * - Previously loaded function on the engine is not compliant with
  292. * the requester.
  293. * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
  294. * -
  295. */
  296. if (!(*p_load_code) ||
  297. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
  298. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
  299. ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
  300. DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
  301. return -EBUSY;
  302. }
  303. return 0;
  304. }
  305. static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
  306. struct qed_ptt *p_ptt,
  307. bool b_reset)
  308. {
  309. struct qed_mcp_link_state *p_link;
  310. u32 status = 0;
  311. p_link = &p_hwfn->mcp_info->link_output;
  312. memset(p_link, 0, sizeof(*p_link));
  313. if (!b_reset) {
  314. status = qed_rd(p_hwfn, p_ptt,
  315. p_hwfn->mcp_info->port_addr +
  316. offsetof(struct public_port, link_status));
  317. DP_VERBOSE(p_hwfn, (NETIF_MSG_LINK | QED_MSG_SP),
  318. "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
  319. status,
  320. (u32)(p_hwfn->mcp_info->port_addr +
  321. offsetof(struct public_port,
  322. link_status)));
  323. } else {
  324. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  325. "Resetting link indications\n");
  326. return;
  327. }
  328. p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
  329. p_link->full_duplex = true;
  330. switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
  331. case LINK_STATUS_SPEED_AND_DUPLEX_100G:
  332. p_link->speed = 100000;
  333. break;
  334. case LINK_STATUS_SPEED_AND_DUPLEX_50G:
  335. p_link->speed = 50000;
  336. break;
  337. case LINK_STATUS_SPEED_AND_DUPLEX_40G:
  338. p_link->speed = 40000;
  339. break;
  340. case LINK_STATUS_SPEED_AND_DUPLEX_25G:
  341. p_link->speed = 25000;
  342. break;
  343. case LINK_STATUS_SPEED_AND_DUPLEX_20G:
  344. p_link->speed = 20000;
  345. break;
  346. case LINK_STATUS_SPEED_AND_DUPLEX_10G:
  347. p_link->speed = 10000;
  348. break;
  349. case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
  350. p_link->full_duplex = false;
  351. /* Fall-through */
  352. case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
  353. p_link->speed = 1000;
  354. break;
  355. default:
  356. p_link->speed = 0;
  357. p_link->link_up = 0;
  358. }
  359. /* Correct speed according to bandwidth allocation */
  360. if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
  361. p_link->speed = p_link->speed *
  362. p_hwfn->mcp_info->func_info.bandwidth_max /
  363. 100;
  364. qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
  365. p_link->speed);
  366. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  367. "Configured MAX bandwidth to be %08x Mb/sec\n",
  368. p_link->speed);
  369. }
  370. p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
  371. p_link->an_complete = !!(status &
  372. LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
  373. p_link->parallel_detection = !!(status &
  374. LINK_STATUS_PARALLEL_DETECTION_USED);
  375. p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
  376. p_link->partner_adv_speed |=
  377. (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
  378. QED_LINK_PARTNER_SPEED_1G_FD : 0;
  379. p_link->partner_adv_speed |=
  380. (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
  381. QED_LINK_PARTNER_SPEED_1G_HD : 0;
  382. p_link->partner_adv_speed |=
  383. (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
  384. QED_LINK_PARTNER_SPEED_10G : 0;
  385. p_link->partner_adv_speed |=
  386. (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
  387. QED_LINK_PARTNER_SPEED_20G : 0;
  388. p_link->partner_adv_speed |=
  389. (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
  390. QED_LINK_PARTNER_SPEED_40G : 0;
  391. p_link->partner_adv_speed |=
  392. (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
  393. QED_LINK_PARTNER_SPEED_50G : 0;
  394. p_link->partner_adv_speed |=
  395. (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
  396. QED_LINK_PARTNER_SPEED_100G : 0;
  397. p_link->partner_tx_flow_ctrl_en =
  398. !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
  399. p_link->partner_rx_flow_ctrl_en =
  400. !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
  401. switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
  402. case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
  403. p_link->partner_adv_pause = QED_LINK_PARTNER_SYMMETRIC_PAUSE;
  404. break;
  405. case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
  406. p_link->partner_adv_pause = QED_LINK_PARTNER_ASYMMETRIC_PAUSE;
  407. break;
  408. case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
  409. p_link->partner_adv_pause = QED_LINK_PARTNER_BOTH_PAUSE;
  410. break;
  411. default:
  412. p_link->partner_adv_pause = 0;
  413. }
  414. p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
  415. qed_link_update(p_hwfn);
  416. }
  417. int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
  418. struct qed_ptt *p_ptt,
  419. bool b_up)
  420. {
  421. struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
  422. u32 param = 0, reply = 0, cmd;
  423. struct pmm_phy_cfg phy_cfg;
  424. int rc = 0;
  425. u32 i;
  426. if (!qed_mcp_is_init(p_hwfn)) {
  427. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  428. return -EBUSY;
  429. }
  430. /* Set the shmem configuration according to params */
  431. memset(&phy_cfg, 0, sizeof(phy_cfg));
  432. cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
  433. if (!params->speed.autoneg)
  434. phy_cfg.speed = params->speed.forced_speed;
  435. phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
  436. phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
  437. phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
  438. phy_cfg.adv_speed = params->speed.advertised_speeds;
  439. phy_cfg.loopback_mode = params->loopback_mode;
  440. /* Write the requested configuration to shmem */
  441. for (i = 0; i < sizeof(phy_cfg); i += 4)
  442. qed_wr(p_hwfn, p_ptt,
  443. p_hwfn->mcp_info->drv_mb_addr +
  444. offsetof(struct public_drv_mb, union_data) + i,
  445. ((u32 *)&phy_cfg)[i >> 2]);
  446. if (b_up) {
  447. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  448. "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
  449. phy_cfg.speed,
  450. phy_cfg.pause,
  451. phy_cfg.adv_speed,
  452. phy_cfg.loopback_mode,
  453. phy_cfg.feature_config_flags);
  454. } else {
  455. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  456. "Resetting link\n");
  457. }
  458. DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n",
  459. p_hwfn->mcp_info->drv_mb_seq,
  460. p_hwfn->mcp_info->drv_pulse_seq);
  461. /* Load Request */
  462. rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, &param);
  463. /* if mcp fails to respond we must abort */
  464. if (rc) {
  465. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  466. return rc;
  467. }
  468. /* Reset the link status if needed */
  469. if (!b_up)
  470. qed_mcp_handle_link_change(p_hwfn, p_ptt, true);
  471. return 0;
  472. }
  473. int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
  474. struct qed_ptt *p_ptt)
  475. {
  476. struct qed_mcp_info *info = p_hwfn->mcp_info;
  477. int rc = 0;
  478. bool found = false;
  479. u16 i;
  480. DP_VERBOSE(p_hwfn, QED_MSG_SP, "Received message from MFW\n");
  481. /* Read Messages from MFW */
  482. qed_mcp_read_mb(p_hwfn, p_ptt);
  483. /* Compare current messages to old ones */
  484. for (i = 0; i < info->mfw_mb_length; i++) {
  485. if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
  486. continue;
  487. found = true;
  488. DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
  489. "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
  490. i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
  491. switch (i) {
  492. case MFW_DRV_MSG_LINK_CHANGE:
  493. qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
  494. break;
  495. default:
  496. DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
  497. rc = -EINVAL;
  498. }
  499. }
  500. /* ACK everything */
  501. for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
  502. __be32 val = cpu_to_be32(((u32 *)info->mfw_mb_cur)[i]);
  503. /* MFW expect answer in BE, so we force write in that format */
  504. qed_wr(p_hwfn, p_ptt,
  505. info->mfw_mb_addr + sizeof(u32) +
  506. MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
  507. sizeof(u32) + i * sizeof(u32),
  508. (__force u32)val);
  509. }
  510. if (!found) {
  511. DP_NOTICE(p_hwfn,
  512. "Received an MFW message indication but no new message!\n");
  513. rc = -EINVAL;
  514. }
  515. /* Copy the new mfw messages into the shadow */
  516. memcpy(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
  517. return rc;
  518. }
  519. int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
  520. u32 *p_mfw_ver)
  521. {
  522. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  523. struct qed_ptt *p_ptt;
  524. u32 global_offsize;
  525. p_ptt = qed_ptt_acquire(p_hwfn);
  526. if (!p_ptt)
  527. return -EBUSY;
  528. global_offsize = qed_rd(p_hwfn, p_ptt,
  529. SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
  530. public_base,
  531. PUBLIC_GLOBAL));
  532. *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
  533. SECTION_ADDR(global_offsize, 0) +
  534. offsetof(struct public_global, mfw_ver));
  535. qed_ptt_release(p_hwfn, p_ptt);
  536. return 0;
  537. }
  538. int qed_mcp_get_media_type(struct qed_dev *cdev,
  539. u32 *p_media_type)
  540. {
  541. struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
  542. struct qed_ptt *p_ptt;
  543. if (!qed_mcp_is_init(p_hwfn)) {
  544. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  545. return -EBUSY;
  546. }
  547. *p_media_type = MEDIA_UNSPECIFIED;
  548. p_ptt = qed_ptt_acquire(p_hwfn);
  549. if (!p_ptt)
  550. return -EBUSY;
  551. *p_media_type = qed_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
  552. offsetof(struct public_port, media_type));
  553. qed_ptt_release(p_hwfn, p_ptt);
  554. return 0;
  555. }
  556. static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
  557. struct qed_ptt *p_ptt,
  558. struct public_func *p_data,
  559. int pfid)
  560. {
  561. u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
  562. PUBLIC_FUNC);
  563. u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
  564. u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
  565. u32 i, size;
  566. memset(p_data, 0, sizeof(*p_data));
  567. size = min_t(u32, sizeof(*p_data),
  568. QED_SECTION_SIZE(mfw_path_offsize));
  569. for (i = 0; i < size / sizeof(u32); i++)
  570. ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
  571. func_addr + (i << 2));
  572. return size;
  573. }
  574. static int
  575. qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
  576. struct public_func *p_info,
  577. enum qed_pci_personality *p_proto)
  578. {
  579. int rc = 0;
  580. switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
  581. case FUNC_MF_CFG_PROTOCOL_ETHERNET:
  582. *p_proto = QED_PCI_ETH;
  583. break;
  584. default:
  585. rc = -EINVAL;
  586. }
  587. return rc;
  588. }
  589. int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
  590. struct qed_ptt *p_ptt)
  591. {
  592. struct qed_mcp_function_info *info;
  593. struct public_func shmem_info;
  594. qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
  595. MCP_PF_ID(p_hwfn));
  596. info = &p_hwfn->mcp_info->func_info;
  597. info->pause_on_host = (shmem_info.config &
  598. FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
  599. if (qed_mcp_get_shmem_proto(p_hwfn, &shmem_info,
  600. &info->protocol)) {
  601. DP_ERR(p_hwfn, "Unknown personality %08x\n",
  602. (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
  603. return -EINVAL;
  604. }
  605. if (p_hwfn->cdev->mf_mode != SF) {
  606. info->bandwidth_min = (shmem_info.config &
  607. FUNC_MF_CFG_MIN_BW_MASK) >>
  608. FUNC_MF_CFG_MIN_BW_SHIFT;
  609. if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
  610. DP_INFO(p_hwfn,
  611. "bandwidth minimum out of bounds [%02x]. Set to 1\n",
  612. info->bandwidth_min);
  613. info->bandwidth_min = 1;
  614. }
  615. info->bandwidth_max = (shmem_info.config &
  616. FUNC_MF_CFG_MAX_BW_MASK) >>
  617. FUNC_MF_CFG_MAX_BW_SHIFT;
  618. if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
  619. DP_INFO(p_hwfn,
  620. "bandwidth maximum out of bounds [%02x]. Set to 100\n",
  621. info->bandwidth_max);
  622. info->bandwidth_max = 100;
  623. }
  624. }
  625. if (shmem_info.mac_upper || shmem_info.mac_lower) {
  626. info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
  627. info->mac[1] = (u8)(shmem_info.mac_upper);
  628. info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
  629. info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
  630. info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
  631. info->mac[5] = (u8)(shmem_info.mac_lower);
  632. } else {
  633. DP_NOTICE(p_hwfn, "MAC is 0 in shmem\n");
  634. }
  635. info->wwn_port = (u64)shmem_info.fcoe_wwn_port_name_upper |
  636. (((u64)shmem_info.fcoe_wwn_port_name_lower) << 32);
  637. info->wwn_node = (u64)shmem_info.fcoe_wwn_node_name_upper |
  638. (((u64)shmem_info.fcoe_wwn_node_name_lower) << 32);
  639. info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
  640. DP_VERBOSE(p_hwfn, (QED_MSG_SP | NETIF_MSG_IFUP),
  641. "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
  642. info->pause_on_host, info->protocol,
  643. info->bandwidth_min, info->bandwidth_max,
  644. info->mac[0], info->mac[1], info->mac[2],
  645. info->mac[3], info->mac[4], info->mac[5],
  646. info->wwn_port, info->wwn_node, info->ovlan);
  647. return 0;
  648. }
  649. struct qed_mcp_link_params
  650. *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn)
  651. {
  652. if (!p_hwfn || !p_hwfn->mcp_info)
  653. return NULL;
  654. return &p_hwfn->mcp_info->link_input;
  655. }
  656. struct qed_mcp_link_state
  657. *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn)
  658. {
  659. if (!p_hwfn || !p_hwfn->mcp_info)
  660. return NULL;
  661. return &p_hwfn->mcp_info->link_output;
  662. }
  663. struct qed_mcp_link_capabilities
  664. *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn)
  665. {
  666. if (!p_hwfn || !p_hwfn->mcp_info)
  667. return NULL;
  668. return &p_hwfn->mcp_info->link_capabilities;
  669. }
  670. int qed_mcp_drain(struct qed_hwfn *p_hwfn,
  671. struct qed_ptt *p_ptt)
  672. {
  673. u32 resp = 0, param = 0;
  674. int rc;
  675. rc = qed_mcp_cmd(p_hwfn, p_ptt,
  676. DRV_MSG_CODE_NIG_DRAIN, 100,
  677. &resp, &param);
  678. /* Wait for the drain to complete before returning */
  679. msleep(120);
  680. return rc;
  681. }
  682. int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
  683. struct qed_ptt *p_ptt,
  684. u32 *p_flash_size)
  685. {
  686. u32 flash_size;
  687. flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
  688. flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
  689. MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
  690. flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
  691. *p_flash_size = flash_size;
  692. return 0;
  693. }
  694. int
  695. qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
  696. struct qed_ptt *p_ptt,
  697. struct qed_mcp_drv_version *p_ver)
  698. {
  699. int rc = 0;
  700. u32 param = 0, reply = 0, i;
  701. if (!qed_mcp_is_init(p_hwfn)) {
  702. DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
  703. return -EBUSY;
  704. }
  705. DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version,
  706. p_ver->version);
  707. /* Copy version string to shmem */
  708. for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) {
  709. DRV_MB_WR(p_hwfn, p_ptt,
  710. union_data.drv_version.name[i * sizeof(u32)],
  711. *(u32 *)&p_ver->name[i * sizeof(u32)]);
  712. }
  713. rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply,
  714. &param);
  715. if (rc) {
  716. DP_ERR(p_hwfn, "MCP response failure, aborting\n");
  717. return rc;
  718. }
  719. return 0;
  720. }