qed_cxt.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /* QLogic qed NIC Driver
  2. * Copyright (c) 2015 QLogic Corporation
  3. *
  4. * This software is available under the terms of the GNU General Public License
  5. * (GPL) Version 2, available from the file COPYING in the main directory of
  6. * this source tree.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/bitops.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/errno.h>
  12. #include <linux/kernel.h>
  13. #include <linux/list.h>
  14. #include <linux/log2.h>
  15. #include <linux/pci.h>
  16. #include <linux/slab.h>
  17. #include <linux/string.h>
  18. #include <linux/bitops.h>
  19. #include "qed.h"
  20. #include "qed_cxt.h"
  21. #include "qed_dev_api.h"
  22. #include "qed_hsi.h"
  23. #include "qed_hw.h"
  24. #include "qed_init_ops.h"
  25. #include "qed_reg_addr.h"
  26. /* Max number of connection types in HW (DQ/CDU etc.) */
  27. #define MAX_CONN_TYPES PROTOCOLID_COMMON
  28. #define NUM_TASK_TYPES 2
  29. #define NUM_TASK_PF_SEGMENTS 4
  30. /* QM constants */
  31. #define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
  32. /* Doorbell-Queue constants */
  33. #define DQ_RANGE_SHIFT 4
  34. #define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
  35. /* ILT constants */
  36. #define ILT_DEFAULT_HW_P_SIZE 3
  37. #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
  38. #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
  39. /* ILT entry structure */
  40. #define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
  41. #define ILT_ENTRY_PHY_ADDR_SHIFT 0
  42. #define ILT_ENTRY_VALID_MASK 0x1ULL
  43. #define ILT_ENTRY_VALID_SHIFT 52
  44. #define ILT_ENTRY_IN_REGS 2
  45. #define ILT_REG_SIZE_IN_BYTES 4
  46. /* connection context union */
  47. union conn_context {
  48. struct core_conn_context core_ctx;
  49. struct eth_conn_context eth_ctx;
  50. };
  51. #define CONN_CXT_SIZE(p_hwfn) \
  52. ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
  53. /* PF per protocl configuration object */
  54. struct qed_conn_type_cfg {
  55. u32 cid_count;
  56. u32 cid_start;
  57. };
  58. /* ILT Client configuration, Per connection type (protocol) resources. */
  59. #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
  60. #define CDUC_BLK (0)
  61. enum ilt_clients {
  62. ILT_CLI_CDUC,
  63. ILT_CLI_QM,
  64. ILT_CLI_MAX
  65. };
  66. struct ilt_cfg_pair {
  67. u32 reg;
  68. u32 val;
  69. };
  70. struct qed_ilt_cli_blk {
  71. u32 total_size; /* 0 means not active */
  72. u32 real_size_in_page;
  73. u32 start_line;
  74. };
  75. struct qed_ilt_client_cfg {
  76. bool active;
  77. /* ILT boundaries */
  78. struct ilt_cfg_pair first;
  79. struct ilt_cfg_pair last;
  80. struct ilt_cfg_pair p_size;
  81. /* ILT client blocks for PF */
  82. struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
  83. u32 pf_total_lines;
  84. };
  85. /* Per Path -
  86. * ILT shadow table
  87. * Protocol acquired CID lists
  88. * PF start line in ILT
  89. */
  90. struct qed_dma_mem {
  91. dma_addr_t p_phys;
  92. void *p_virt;
  93. size_t size;
  94. };
  95. struct qed_cid_acquired_map {
  96. u32 start_cid;
  97. u32 max_count;
  98. unsigned long *cid_map;
  99. };
  100. struct qed_cxt_mngr {
  101. /* Per protocl configuration */
  102. struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
  103. /* computed ILT structure */
  104. struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
  105. /* Acquired CIDs */
  106. struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
  107. /* ILT shadow table */
  108. struct qed_dma_mem *ilt_shadow;
  109. u32 pf_start_line;
  110. };
  111. static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
  112. {
  113. u32 type, pf_cids = 0;
  114. for (type = 0; type < MAX_CONN_TYPES; type++)
  115. pf_cids += p_mngr->conn_cfg[type].cid_count;
  116. return pf_cids;
  117. }
  118. static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
  119. struct qed_qm_iids *iids)
  120. {
  121. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  122. int type;
  123. for (type = 0; type < MAX_CONN_TYPES; type++)
  124. iids->cids += p_mngr->conn_cfg[type].cid_count;
  125. DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
  126. }
  127. /* set the iids count per protocol */
  128. static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
  129. enum protocol_type type,
  130. u32 cid_count)
  131. {
  132. struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
  133. struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
  134. p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
  135. }
  136. static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
  137. struct qed_ilt_cli_blk *p_blk,
  138. u32 start_line, u32 total_size,
  139. u32 elem_size)
  140. {
  141. u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
  142. /* verify thatits called only once for each block */
  143. if (p_blk->total_size)
  144. return;
  145. p_blk->total_size = total_size;
  146. p_blk->real_size_in_page = 0;
  147. if (elem_size)
  148. p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
  149. p_blk->start_line = start_line;
  150. }
  151. static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
  152. struct qed_ilt_client_cfg *p_cli,
  153. struct qed_ilt_cli_blk *p_blk,
  154. u32 *p_line, enum ilt_clients client_id)
  155. {
  156. if (!p_blk->total_size)
  157. return;
  158. if (!p_cli->active)
  159. p_cli->first.val = *p_line;
  160. p_cli->active = true;
  161. *p_line += DIV_ROUND_UP(p_blk->total_size,
  162. p_blk->real_size_in_page);
  163. p_cli->last.val = *p_line - 1;
  164. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  165. "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n",
  166. client_id, p_cli->first.val,
  167. p_cli->last.val, p_blk->total_size,
  168. p_blk->real_size_in_page, p_blk->start_line);
  169. }
  170. int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
  171. {
  172. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  173. struct qed_ilt_client_cfg *p_cli;
  174. struct qed_ilt_cli_blk *p_blk;
  175. u32 curr_line, total, pf_cids;
  176. struct qed_qm_iids qm_iids;
  177. memset(&qm_iids, 0, sizeof(qm_iids));
  178. p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
  179. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  180. "hwfn [%d] - Set context manager starting line to be 0x%08x\n",
  181. p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
  182. /* CDUC */
  183. p_cli = &p_mngr->clients[ILT_CLI_CDUC];
  184. curr_line = p_mngr->pf_start_line;
  185. p_cli->pf_total_lines = 0;
  186. /* get the counters for the CDUC and QM clients */
  187. pf_cids = qed_cxt_cdu_iids(p_mngr);
  188. p_blk = &p_cli->pf_blks[CDUC_BLK];
  189. total = pf_cids * CONN_CXT_SIZE(p_hwfn);
  190. qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
  191. total, CONN_CXT_SIZE(p_hwfn));
  192. qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
  193. p_cli->pf_total_lines = curr_line - p_blk->start_line;
  194. /* QM */
  195. p_cli = &p_mngr->clients[ILT_CLI_QM];
  196. p_blk = &p_cli->pf_blks[0];
  197. qed_cxt_qm_iids(p_hwfn, &qm_iids);
  198. total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
  199. p_hwfn->qm_info.num_pqs, 0);
  200. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  201. "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
  202. qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
  203. qed_ilt_cli_blk_fill(p_cli, p_blk,
  204. curr_line, total * 0x1000,
  205. QM_PQ_ELEMENT_SIZE);
  206. qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
  207. p_cli->pf_total_lines = curr_line - p_blk->start_line;
  208. if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
  209. RESC_NUM(p_hwfn, QED_ILT)) {
  210. DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
  211. curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
  212. return -EINVAL;
  213. }
  214. return 0;
  215. }
  216. #define for_each_ilt_valid_client(pos, clients) \
  217. for (pos = 0; pos < ILT_CLI_MAX; pos++)
  218. /* Total number of ILT lines used by this PF */
  219. static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
  220. {
  221. u32 size = 0;
  222. u32 i;
  223. for_each_ilt_valid_client(i, ilt_clients) {
  224. if (!ilt_clients[i].active)
  225. continue;
  226. size += (ilt_clients[i].last.val -
  227. ilt_clients[i].first.val + 1);
  228. }
  229. return size;
  230. }
  231. static void qed_ilt_shadow_free(struct qed_hwfn *p_hwfn)
  232. {
  233. struct qed_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
  234. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  235. u32 ilt_size, i;
  236. ilt_size = qed_cxt_ilt_shadow_size(p_cli);
  237. for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
  238. struct qed_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
  239. if (p_dma->p_virt)
  240. dma_free_coherent(&p_hwfn->cdev->pdev->dev,
  241. p_dma->size, p_dma->p_virt,
  242. p_dma->p_phys);
  243. p_dma->p_virt = NULL;
  244. }
  245. kfree(p_mngr->ilt_shadow);
  246. }
  247. static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
  248. struct qed_ilt_cli_blk *p_blk,
  249. enum ilt_clients ilt_client,
  250. u32 start_line_offset)
  251. {
  252. struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
  253. u32 lines, line, sz_left;
  254. if (!p_blk->total_size)
  255. return 0;
  256. sz_left = p_blk->total_size;
  257. lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
  258. line = p_blk->start_line + start_line_offset -
  259. p_hwfn->p_cxt_mngr->pf_start_line;
  260. for (; lines; lines--) {
  261. dma_addr_t p_phys;
  262. void *p_virt;
  263. u32 size;
  264. size = min_t(u32, sz_left,
  265. p_blk->real_size_in_page);
  266. p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
  267. size,
  268. &p_phys,
  269. GFP_KERNEL);
  270. if (!p_virt)
  271. return -ENOMEM;
  272. memset(p_virt, 0, size);
  273. ilt_shadow[line].p_phys = p_phys;
  274. ilt_shadow[line].p_virt = p_virt;
  275. ilt_shadow[line].size = size;
  276. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  277. "ILT shadow: Line [%d] Physical 0x%llx Virtual %p Size %d\n",
  278. line, (u64)p_phys, p_virt, size);
  279. sz_left -= size;
  280. line++;
  281. }
  282. return 0;
  283. }
  284. static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
  285. {
  286. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  287. struct qed_ilt_client_cfg *clients = p_mngr->clients;
  288. struct qed_ilt_cli_blk *p_blk;
  289. u32 size, i, j;
  290. int rc;
  291. size = qed_cxt_ilt_shadow_size(clients);
  292. p_mngr->ilt_shadow = kcalloc(size, sizeof(struct qed_dma_mem),
  293. GFP_KERNEL);
  294. if (!p_mngr->ilt_shadow) {
  295. DP_NOTICE(p_hwfn, "Failed to allocate ilt shadow table\n");
  296. rc = -ENOMEM;
  297. goto ilt_shadow_fail;
  298. }
  299. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  300. "Allocated 0x%x bytes for ilt shadow\n",
  301. (u32)(size * sizeof(struct qed_dma_mem)));
  302. for_each_ilt_valid_client(i, clients) {
  303. if (!clients[i].active)
  304. continue;
  305. for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
  306. p_blk = &clients[i].pf_blks[j];
  307. rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
  308. if (rc != 0)
  309. goto ilt_shadow_fail;
  310. }
  311. }
  312. return 0;
  313. ilt_shadow_fail:
  314. qed_ilt_shadow_free(p_hwfn);
  315. return rc;
  316. }
  317. static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
  318. {
  319. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  320. u32 type;
  321. for (type = 0; type < MAX_CONN_TYPES; type++) {
  322. kfree(p_mngr->acquired[type].cid_map);
  323. p_mngr->acquired[type].max_count = 0;
  324. p_mngr->acquired[type].start_cid = 0;
  325. }
  326. }
  327. static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
  328. {
  329. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  330. u32 start_cid = 0;
  331. u32 type;
  332. for (type = 0; type < MAX_CONN_TYPES; type++) {
  333. u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
  334. u32 size;
  335. if (cid_cnt == 0)
  336. continue;
  337. size = DIV_ROUND_UP(cid_cnt,
  338. sizeof(unsigned long) * BITS_PER_BYTE) *
  339. sizeof(unsigned long);
  340. p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
  341. if (!p_mngr->acquired[type].cid_map)
  342. goto cid_map_fail;
  343. p_mngr->acquired[type].max_count = cid_cnt;
  344. p_mngr->acquired[type].start_cid = start_cid;
  345. p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
  346. DP_VERBOSE(p_hwfn, QED_MSG_CXT,
  347. "Type %08x start: %08x count %08x\n",
  348. type, p_mngr->acquired[type].start_cid,
  349. p_mngr->acquired[type].max_count);
  350. start_cid += cid_cnt;
  351. }
  352. return 0;
  353. cid_map_fail:
  354. qed_cid_map_free(p_hwfn);
  355. return -ENOMEM;
  356. }
  357. int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
  358. {
  359. struct qed_cxt_mngr *p_mngr;
  360. u32 i;
  361. p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC);
  362. if (!p_mngr) {
  363. DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n");
  364. return -ENOMEM;
  365. }
  366. /* Initialize ILT client registers */
  367. p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
  368. p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
  369. p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
  370. p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
  371. p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
  372. p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
  373. /* default ILT page size for all clients is 32K */
  374. for (i = 0; i < ILT_CLI_MAX; i++)
  375. p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
  376. /* Set the cxt mangr pointer priori to further allocations */
  377. p_hwfn->p_cxt_mngr = p_mngr;
  378. return 0;
  379. }
  380. int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
  381. {
  382. int rc;
  383. /* Allocate the ILT shadow table */
  384. rc = qed_ilt_shadow_alloc(p_hwfn);
  385. if (rc) {
  386. DP_NOTICE(p_hwfn, "Failed to allocate ilt memory\n");
  387. goto tables_alloc_fail;
  388. }
  389. /* Allocate and initialize the acquired cids bitmaps */
  390. rc = qed_cid_map_alloc(p_hwfn);
  391. if (rc) {
  392. DP_NOTICE(p_hwfn, "Failed to allocate cid maps\n");
  393. goto tables_alloc_fail;
  394. }
  395. return 0;
  396. tables_alloc_fail:
  397. qed_cxt_mngr_free(p_hwfn);
  398. return rc;
  399. }
  400. void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
  401. {
  402. if (!p_hwfn->p_cxt_mngr)
  403. return;
  404. qed_cid_map_free(p_hwfn);
  405. qed_ilt_shadow_free(p_hwfn);
  406. kfree(p_hwfn->p_cxt_mngr);
  407. p_hwfn->p_cxt_mngr = NULL;
  408. }
  409. void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
  410. {
  411. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  412. int type;
  413. /* Reset acquired cids */
  414. for (type = 0; type < MAX_CONN_TYPES; type++) {
  415. u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
  416. if (cid_cnt == 0)
  417. continue;
  418. memset(p_mngr->acquired[type].cid_map, 0,
  419. DIV_ROUND_UP(cid_cnt,
  420. sizeof(unsigned long) * BITS_PER_BYTE) *
  421. sizeof(unsigned long));
  422. }
  423. }
  424. /* CDU Common */
  425. #define CDUC_CXT_SIZE_SHIFT \
  426. CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
  427. #define CDUC_CXT_SIZE_MASK \
  428. (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
  429. #define CDUC_BLOCK_WASTE_SHIFT \
  430. CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
  431. #define CDUC_BLOCK_WASTE_MASK \
  432. (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
  433. #define CDUC_NCIB_SHIFT \
  434. CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
  435. #define CDUC_NCIB_MASK \
  436. (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
  437. static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
  438. {
  439. u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
  440. /* CDUC - connection configuration */
  441. page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
  442. cxt_size = CONN_CXT_SIZE(p_hwfn);
  443. elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
  444. block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
  445. SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
  446. SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
  447. SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
  448. STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
  449. }
  450. void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
  451. {
  452. struct qed_qm_pf_rt_init_params params;
  453. struct qed_qm_info *qm_info = &p_hwfn->qm_info;
  454. struct qed_qm_iids iids;
  455. memset(&iids, 0, sizeof(iids));
  456. qed_cxt_qm_iids(p_hwfn, &iids);
  457. memset(&params, 0, sizeof(params));
  458. params.port_id = p_hwfn->port_id;
  459. params.pf_id = p_hwfn->rel_pf_id;
  460. params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
  461. params.is_first_pf = p_hwfn->first_on_engine;
  462. params.num_pf_cids = iids.cids;
  463. params.start_pq = qm_info->start_pq;
  464. params.num_pf_pqs = qm_info->num_pqs;
  465. params.start_vport = qm_info->num_vports;
  466. params.pf_wfq = qm_info->pf_wfq;
  467. params.pf_rl = qm_info->pf_rl;
  468. params.pq_params = qm_info->qm_pq_params;
  469. params.vport_params = qm_info->qm_vport_params;
  470. qed_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, &params);
  471. }
  472. /* CM PF */
  473. static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
  474. {
  475. union qed_qm_pq_params pq_params;
  476. u16 pq;
  477. /* XCM pure-LB queue */
  478. memset(&pq_params, 0, sizeof(pq_params));
  479. pq_params.core.tc = LB_TC;
  480. pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
  481. STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq);
  482. return 0;
  483. }
  484. /* DQ PF */
  485. static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
  486. {
  487. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  488. u32 dq_pf_max_cid = 0;
  489. dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
  490. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
  491. dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
  492. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
  493. dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
  494. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
  495. dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
  496. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
  497. dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
  498. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
  499. /* 5 - PF */
  500. dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
  501. STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
  502. }
  503. static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
  504. {
  505. struct qed_ilt_client_cfg *ilt_clients;
  506. int i;
  507. ilt_clients = p_hwfn->p_cxt_mngr->clients;
  508. for_each_ilt_valid_client(i, ilt_clients) {
  509. if (!ilt_clients[i].active)
  510. continue;
  511. STORE_RT_REG(p_hwfn,
  512. ilt_clients[i].first.reg,
  513. ilt_clients[i].first.val);
  514. STORE_RT_REG(p_hwfn,
  515. ilt_clients[i].last.reg,
  516. ilt_clients[i].last.val);
  517. STORE_RT_REG(p_hwfn,
  518. ilt_clients[i].p_size.reg,
  519. ilt_clients[i].p_size.val);
  520. }
  521. }
  522. /* ILT (PSWRQ2) PF */
  523. static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
  524. {
  525. struct qed_ilt_client_cfg *clients;
  526. struct qed_cxt_mngr *p_mngr;
  527. struct qed_dma_mem *p_shdw;
  528. u32 line, rt_offst, i;
  529. qed_ilt_bounds_init(p_hwfn);
  530. p_mngr = p_hwfn->p_cxt_mngr;
  531. p_shdw = p_mngr->ilt_shadow;
  532. clients = p_hwfn->p_cxt_mngr->clients;
  533. for_each_ilt_valid_client(i, clients) {
  534. if (!clients[i].active)
  535. continue;
  536. /** Client's 1st val and RT array are absolute, ILT shadows'
  537. * lines are relative.
  538. */
  539. line = clients[i].first.val - p_mngr->pf_start_line;
  540. rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
  541. clients[i].first.val * ILT_ENTRY_IN_REGS;
  542. for (; line <= clients[i].last.val - p_mngr->pf_start_line;
  543. line++, rt_offst += ILT_ENTRY_IN_REGS) {
  544. u64 ilt_hw_entry = 0;
  545. /** p_virt could be NULL incase of dynamic
  546. * allocation
  547. */
  548. if (p_shdw[line].p_virt) {
  549. SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
  550. SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
  551. (p_shdw[line].p_phys >> 12));
  552. DP_VERBOSE(p_hwfn, QED_MSG_ILT,
  553. "Setting RT[0x%08x] from ILT[0x%08x] [Client is %d] to Physical addr: 0x%llx\n",
  554. rt_offst, line, i,
  555. (u64)(p_shdw[line].p_phys >> 12));
  556. }
  557. STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
  558. }
  559. }
  560. }
  561. void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
  562. {
  563. qed_cdu_init_common(p_hwfn);
  564. }
  565. void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
  566. {
  567. qed_qm_init_pf(p_hwfn);
  568. qed_cm_init_pf(p_hwfn);
  569. qed_dq_init_pf(p_hwfn);
  570. qed_ilt_init_pf(p_hwfn);
  571. }
  572. int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
  573. enum protocol_type type,
  574. u32 *p_cid)
  575. {
  576. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  577. u32 rel_cid;
  578. if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
  579. DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
  580. return -EINVAL;
  581. }
  582. rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
  583. p_mngr->acquired[type].max_count);
  584. if (rel_cid >= p_mngr->acquired[type].max_count) {
  585. DP_NOTICE(p_hwfn, "no CID available for protocol %d\n",
  586. type);
  587. return -EINVAL;
  588. }
  589. __set_bit(rel_cid, p_mngr->acquired[type].cid_map);
  590. *p_cid = rel_cid + p_mngr->acquired[type].start_cid;
  591. return 0;
  592. }
  593. static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
  594. u32 cid,
  595. enum protocol_type *p_type)
  596. {
  597. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  598. struct qed_cid_acquired_map *p_map;
  599. enum protocol_type p;
  600. u32 rel_cid;
  601. /* Iterate over protocols and find matching cid range */
  602. for (p = 0; p < MAX_CONN_TYPES; p++) {
  603. p_map = &p_mngr->acquired[p];
  604. if (!p_map->cid_map)
  605. continue;
  606. if (cid >= p_map->start_cid &&
  607. cid < p_map->start_cid + p_map->max_count)
  608. break;
  609. }
  610. *p_type = p;
  611. if (p == MAX_CONN_TYPES) {
  612. DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
  613. return false;
  614. }
  615. rel_cid = cid - p_map->start_cid;
  616. if (!test_bit(rel_cid, p_map->cid_map)) {
  617. DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
  618. return false;
  619. }
  620. return true;
  621. }
  622. void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
  623. u32 cid)
  624. {
  625. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  626. enum protocol_type type;
  627. bool b_acquired;
  628. u32 rel_cid;
  629. /* Test acquired and find matching per-protocol map */
  630. b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
  631. if (!b_acquired)
  632. return;
  633. rel_cid = cid - p_mngr->acquired[type].start_cid;
  634. __clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
  635. }
  636. int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
  637. struct qed_cxt_info *p_info)
  638. {
  639. struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
  640. u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
  641. enum protocol_type type;
  642. bool b_acquired;
  643. /* Test acquired and find matching per-protocol map */
  644. b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
  645. if (!b_acquired)
  646. return -EINVAL;
  647. /* set the protocl type */
  648. p_info->type = type;
  649. /* compute context virtual pointer */
  650. hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
  651. conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
  652. cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
  653. line = p_info->iid / cxts_per_p;
  654. /* Make sure context is allocated (dynamic allocation) */
  655. if (!p_mngr->ilt_shadow[line].p_virt)
  656. return -EINVAL;
  657. p_info->p_cxt = p_mngr->ilt_shadow[line].p_virt +
  658. p_info->iid % cxts_per_p * conn_cxt_size;
  659. DP_VERBOSE(p_hwfn, (QED_MSG_ILT | QED_MSG_CXT),
  660. "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
  661. p_info->iid / cxts_per_p, p_info->p_cxt, p_info->iid);
  662. return 0;
  663. }
  664. int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
  665. {
  666. struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
  667. /* Set the number of required CORE connections */
  668. u32 core_cids = 1; /* SPQ */
  669. qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
  670. qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
  671. p_params->num_cons);
  672. return 0;
  673. }