qed_init_ops.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/io.h>
  10. #include <linux/delay.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/string.h>
  15. #include "qed.h"
  16. #include "qed_hsi.h"
  17. #include "qed_hw.h"
  18. #include "qed_init_ops.h"
  19. #include "qed_reg_addr.h"
  20. #define QED_INIT_MAX_POLL_COUNT 100
  21. #define QED_INIT_POLL_PERIOD_US 500
  22. static u32 pxp_global_win[] = {
  23. 0,
  24. 0,
  25. 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */
  26. 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
  27. 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
  28. 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
  29. 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
  30. 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
  31. 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
  32. 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
  33. 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
  34. 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
  35. 0,
  36. 0,
  37. 0,
  38. 0,
  39. 0,
  40. 0,
  41. 0,
  42. };
  43. void qed_init_iro_array(struct qed_dev *cdev)
  44. {
  45. cdev->iro_arr = iro_arr;
  46. }
  47. /* Runtime configuration helpers */
  48. void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
  49. {
  50. int i;
  51. for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
  52. p_hwfn->rt_data[i].b_valid = false;
  53. }
  54. void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
  55. u32 rt_offset,
  56. u32 val)
  57. {
  58. p_hwfn->rt_data[rt_offset].init_val = val;
  59. p_hwfn->rt_data[rt_offset].b_valid = true;
  60. }
  61. void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
  62. u32 rt_offset,
  63. u32 *val,
  64. size_t size)
  65. {
  66. size_t i;
  67. for (i = 0; i < size / sizeof(u32); i++) {
  68. p_hwfn->rt_data[rt_offset + i].init_val = val[i];
  69. p_hwfn->rt_data[rt_offset + i].b_valid = true;
  70. }
  71. }
  72. static void qed_init_rt(struct qed_hwfn *p_hwfn,
  73. struct qed_ptt *p_ptt,
  74. u32 addr,
  75. u32 rt_offset,
  76. u32 size)
  77. {
  78. struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset;
  79. u32 i;
  80. for (i = 0; i < size; i++) {
  81. if (!rt_data[i].b_valid)
  82. continue;
  83. qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val);
  84. }
  85. }
  86. int qed_init_alloc(struct qed_hwfn *p_hwfn)
  87. {
  88. struct qed_rt_data *rt_data;
  89. rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC);
  90. if (!rt_data)
  91. return -ENOMEM;
  92. p_hwfn->rt_data = rt_data;
  93. return 0;
  94. }
  95. void qed_init_free(struct qed_hwfn *p_hwfn)
  96. {
  97. kfree(p_hwfn->rt_data);
  98. p_hwfn->rt_data = NULL;
  99. }
  100. static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
  101. struct qed_ptt *p_ptt,
  102. u32 addr,
  103. u32 dmae_data_offset,
  104. u32 size,
  105. const u32 *buf,
  106. bool b_must_dmae,
  107. bool b_can_dmae)
  108. {
  109. int rc = 0;
  110. /* Perform DMAE only for lengthy enough sections or for wide-bus */
  111. if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
  112. const u32 *data = buf + dmae_data_offset;
  113. u32 i;
  114. for (i = 0; i < size; i++)
  115. qed_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
  116. } else {
  117. rc = qed_dmae_host2grc(p_hwfn, p_ptt,
  118. (uintptr_t)(buf + dmae_data_offset),
  119. addr, size, 0);
  120. }
  121. return rc;
  122. }
  123. static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
  124. struct qed_ptt *p_ptt,
  125. u32 addr,
  126. u32 fill,
  127. u32 fill_count)
  128. {
  129. static u32 zero_buffer[DMAE_MAX_RW_SIZE];
  130. memset(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
  131. /* invoke the DMAE virtual/physical buffer API with
  132. * 1. DMAE init channel
  133. * 2. addr,
  134. * 3. p_hwfb->temp_data,
  135. * 4. fill_count
  136. */
  137. return qed_dmae_host2grc(p_hwfn, p_ptt,
  138. (uintptr_t)(&zero_buffer[0]),
  139. addr, fill_count,
  140. QED_DMAE_FLAG_RW_REPL_SRC);
  141. }
  142. static void qed_init_fill(struct qed_hwfn *p_hwfn,
  143. struct qed_ptt *p_ptt,
  144. u32 addr,
  145. u32 fill,
  146. u32 fill_count)
  147. {
  148. u32 i;
  149. for (i = 0; i < fill_count; i++, addr += sizeof(u32))
  150. qed_wr(p_hwfn, p_ptt, addr, fill);
  151. }
  152. static int qed_init_cmd_array(struct qed_hwfn *p_hwfn,
  153. struct qed_ptt *p_ptt,
  154. struct init_write_op *cmd,
  155. bool b_must_dmae,
  156. bool b_can_dmae)
  157. {
  158. u32 data = le32_to_cpu(cmd->data);
  159. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  160. u32 dmae_array_offset = le32_to_cpu(cmd->args.array_offset);
  161. u32 offset, output_len, input_len, max_size;
  162. struct qed_dev *cdev = p_hwfn->cdev;
  163. union init_array_hdr *hdr;
  164. const u32 *array_data;
  165. int rc = 0;
  166. u32 size;
  167. array_data = cdev->fw_data->arr_data;
  168. hdr = (union init_array_hdr *)(array_data +
  169. dmae_array_offset);
  170. data = le32_to_cpu(hdr->raw.data);
  171. switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
  172. case INIT_ARR_ZIPPED:
  173. offset = dmae_array_offset + 1;
  174. input_len = GET_FIELD(data,
  175. INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
  176. max_size = MAX_ZIPPED_SIZE * 4;
  177. memset(p_hwfn->unzip_buf, 0, max_size);
  178. output_len = qed_unzip_data(p_hwfn, input_len,
  179. (u8 *)&array_data[offset],
  180. max_size, (u8 *)p_hwfn->unzip_buf);
  181. if (output_len) {
  182. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr, 0,
  183. output_len,
  184. p_hwfn->unzip_buf,
  185. b_must_dmae, b_can_dmae);
  186. } else {
  187. DP_NOTICE(p_hwfn, "Failed to unzip dmae data\n");
  188. rc = -EINVAL;
  189. }
  190. break;
  191. case INIT_ARR_PATTERN:
  192. {
  193. u32 repeats = GET_FIELD(data,
  194. INIT_ARRAY_PATTERN_HDR_REPETITIONS);
  195. u32 i;
  196. size = GET_FIELD(data, INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
  197. for (i = 0; i < repeats; i++, addr += size << 2) {
  198. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  199. dmae_array_offset + 1,
  200. size, array_data,
  201. b_must_dmae, b_can_dmae);
  202. if (rc)
  203. break;
  204. }
  205. break;
  206. }
  207. case INIT_ARR_STANDARD:
  208. size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
  209. rc = qed_init_array_dmae(p_hwfn, p_ptt, addr,
  210. dmae_array_offset + 1,
  211. size, array_data,
  212. b_must_dmae, b_can_dmae);
  213. break;
  214. }
  215. return rc;
  216. }
  217. /* init_ops write command */
  218. static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
  219. struct qed_ptt *p_ptt,
  220. struct init_write_op *cmd,
  221. bool b_can_dmae)
  222. {
  223. u32 data = le32_to_cpu(cmd->data);
  224. u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
  225. bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
  226. union init_write_args *arg = &cmd->args;
  227. int rc = 0;
  228. /* Sanitize */
  229. if (b_must_dmae && !b_can_dmae) {
  230. DP_NOTICE(p_hwfn,
  231. "Need to write to %08x for Wide-bus but DMAE isn't allowed\n",
  232. addr);
  233. return -EINVAL;
  234. }
  235. switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
  236. case INIT_SRC_INLINE:
  237. qed_wr(p_hwfn, p_ptt, addr,
  238. le32_to_cpu(arg->inline_val));
  239. break;
  240. case INIT_SRC_ZEROS:
  241. if (b_must_dmae ||
  242. (b_can_dmae && (le32_to_cpu(arg->zeros_count) >= 64)))
  243. rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0,
  244. le32_to_cpu(arg->zeros_count));
  245. else
  246. qed_init_fill(p_hwfn, p_ptt, addr, 0,
  247. le32_to_cpu(arg->zeros_count));
  248. break;
  249. case INIT_SRC_ARRAY:
  250. rc = qed_init_cmd_array(p_hwfn, p_ptt, cmd,
  251. b_must_dmae, b_can_dmae);
  252. break;
  253. case INIT_SRC_RUNTIME:
  254. qed_init_rt(p_hwfn, p_ptt, addr,
  255. le16_to_cpu(arg->runtime.offset),
  256. le16_to_cpu(arg->runtime.size));
  257. break;
  258. }
  259. return rc;
  260. }
  261. static inline bool comp_eq(u32 val, u32 expected_val)
  262. {
  263. return val == expected_val;
  264. }
  265. static inline bool comp_and(u32 val, u32 expected_val)
  266. {
  267. return (val & expected_val) == expected_val;
  268. }
  269. static inline bool comp_or(u32 val, u32 expected_val)
  270. {
  271. return (val | expected_val) > 0;
  272. }
  273. /* init_ops read/poll commands */
  274. static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
  275. struct qed_ptt *p_ptt,
  276. struct init_read_op *cmd)
  277. {
  278. u32 data = le32_to_cpu(cmd->op_data);
  279. u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
  280. bool (*comp_check)(u32 val,
  281. u32 expected_val);
  282. u32 delay = QED_INIT_POLL_PERIOD_US, val;
  283. val = qed_rd(p_hwfn, p_ptt, addr);
  284. data = le32_to_cpu(cmd->op_data);
  285. if (GET_FIELD(data, INIT_READ_OP_POLL)) {
  286. int i;
  287. switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) {
  288. case INIT_COMPARISON_EQ:
  289. comp_check = comp_eq;
  290. break;
  291. case INIT_COMPARISON_OR:
  292. comp_check = comp_or;
  293. break;
  294. case INIT_COMPARISON_AND:
  295. comp_check = comp_and;
  296. break;
  297. default:
  298. comp_check = NULL;
  299. DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
  300. data);
  301. return;
  302. }
  303. for (i = 0;
  304. i < QED_INIT_MAX_POLL_COUNT &&
  305. !comp_check(val, le32_to_cpu(cmd->expected_val));
  306. i++) {
  307. udelay(delay);
  308. val = qed_rd(p_hwfn, p_ptt, addr);
  309. }
  310. if (i == QED_INIT_MAX_POLL_COUNT)
  311. DP_ERR(p_hwfn,
  312. "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n",
  313. addr, le32_to_cpu(cmd->expected_val),
  314. val, data);
  315. }
  316. }
  317. /* init_ops callbacks entry point */
  318. static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
  319. struct qed_ptt *p_ptt,
  320. struct init_callback_op *p_cmd)
  321. {
  322. DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
  323. }
  324. static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
  325. u16 *offset,
  326. int modes)
  327. {
  328. struct qed_dev *cdev = p_hwfn->cdev;
  329. const u8 *modes_tree_buf;
  330. u8 arg1, arg2, tree_val;
  331. modes_tree_buf = cdev->fw_data->modes_tree_buf;
  332. tree_val = modes_tree_buf[(*offset)++];
  333. switch (tree_val) {
  334. case INIT_MODE_OP_NOT:
  335. return qed_init_cmd_mode_match(p_hwfn, offset, modes) ^ 1;
  336. case INIT_MODE_OP_OR:
  337. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  338. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  339. return arg1 | arg2;
  340. case INIT_MODE_OP_AND:
  341. arg1 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  342. arg2 = qed_init_cmd_mode_match(p_hwfn, offset, modes);
  343. return arg1 & arg2;
  344. default:
  345. tree_val -= MAX_INIT_MODE_OPS;
  346. return (modes & (1 << tree_val)) ? 1 : 0;
  347. }
  348. }
  349. static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
  350. struct init_if_mode_op *p_cmd,
  351. int modes)
  352. {
  353. u16 offset = le16_to_cpu(p_cmd->modes_buf_offset);
  354. if (qed_init_cmd_mode_match(p_hwfn, &offset, modes))
  355. return 0;
  356. else
  357. return GET_FIELD(le32_to_cpu(p_cmd->op_data),
  358. INIT_IF_MODE_OP_CMD_OFFSET);
  359. }
  360. static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
  361. struct init_if_phase_op *p_cmd,
  362. u32 phase,
  363. u32 phase_id)
  364. {
  365. u32 data = le32_to_cpu(p_cmd->phase_data);
  366. u32 op_data = le32_to_cpu(p_cmd->op_data);
  367. if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
  368. (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
  369. GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
  370. return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
  371. else
  372. return 0;
  373. }
  374. int qed_init_run(struct qed_hwfn *p_hwfn,
  375. struct qed_ptt *p_ptt,
  376. int phase,
  377. int phase_id,
  378. int modes)
  379. {
  380. struct qed_dev *cdev = p_hwfn->cdev;
  381. u32 cmd_num, num_init_ops;
  382. union init_op *init_ops;
  383. bool b_dmae = false;
  384. int rc = 0;
  385. num_init_ops = cdev->fw_data->init_ops_size;
  386. init_ops = cdev->fw_data->init_ops;
  387. p_hwfn->unzip_buf = kzalloc(MAX_ZIPPED_SIZE * 4, GFP_ATOMIC);
  388. if (!p_hwfn->unzip_buf) {
  389. DP_NOTICE(p_hwfn, "Failed to allocate unzip buffer\n");
  390. return -ENOMEM;
  391. }
  392. for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
  393. union init_op *cmd = &init_ops[cmd_num];
  394. u32 data = le32_to_cpu(cmd->raw.op_data);
  395. switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
  396. case INIT_OP_WRITE:
  397. rc = qed_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
  398. b_dmae);
  399. break;
  400. case INIT_OP_READ:
  401. qed_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
  402. break;
  403. case INIT_OP_IF_MODE:
  404. cmd_num += qed_init_cmd_mode(p_hwfn, &cmd->if_mode,
  405. modes);
  406. break;
  407. case INIT_OP_IF_PHASE:
  408. cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
  409. phase, phase_id);
  410. b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
  411. break;
  412. case INIT_OP_DELAY:
  413. /* qed_init_run is always invoked from
  414. * sleep-able context
  415. */
  416. udelay(le32_to_cpu(cmd->delay.delay));
  417. break;
  418. case INIT_OP_CALLBACK:
  419. qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
  420. break;
  421. }
  422. if (rc)
  423. break;
  424. }
  425. kfree(p_hwfn->unzip_buf);
  426. return rc;
  427. }
  428. void qed_gtt_init(struct qed_hwfn *p_hwfn)
  429. {
  430. u32 gtt_base;
  431. u32 i;
  432. /* Set the global windows */
  433. gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
  434. for (i = 0; i < ARRAY_SIZE(pxp_global_win); i++)
  435. if (pxp_global_win[i])
  436. REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
  437. pxp_global_win[i]);
  438. }
  439. int qed_init_fw_data(struct qed_dev *cdev,
  440. const u8 *data)
  441. {
  442. struct qed_fw_data *fw = cdev->fw_data;
  443. struct bin_buffer_hdr *buf_hdr;
  444. u32 offset, len;
  445. if (!data) {
  446. DP_NOTICE(cdev, "Invalid fw data\n");
  447. return -EINVAL;
  448. }
  449. buf_hdr = (struct bin_buffer_hdr *)data;
  450. offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
  451. fw->init_ops = (union init_op *)(data + offset);
  452. offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
  453. fw->arr_data = (u32 *)(data + offset);
  454. offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
  455. fw->modes_tree_buf = (u8 *)(data + offset);
  456. len = buf_hdr[BIN_BUF_INIT_CMD].length;
  457. fw->init_ops_size = len / sizeof(struct init_raw_op);
  458. return 0;
  459. }