nic_main.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131
  1. /*
  2. * Copyright (C) 2015 Cavium, Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms of version 2 of the GNU General Public License
  6. * as published by the Free Software Foundation.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/pci.h>
  11. #include <linux/etherdevice.h>
  12. #include <linux/of.h>
  13. #include "nic_reg.h"
  14. #include "nic.h"
  15. #include "q_struct.h"
  16. #include "thunder_bgx.h"
  17. #define DRV_NAME "thunder-nic"
  18. #define DRV_VERSION "1.0"
  19. struct nicpf {
  20. struct pci_dev *pdev;
  21. u8 node;
  22. unsigned int flags;
  23. u8 num_vf_en; /* No of VF enabled */
  24. bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
  25. void __iomem *reg_base; /* Register start address */
  26. u8 num_sqs_en; /* Secondary qsets enabled */
  27. u64 nicvf[MAX_NUM_VFS_SUPPORTED];
  28. u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
  29. u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
  30. bool sqs_used[MAX_NUM_VFS_SUPPORTED];
  31. struct pkind_cfg pkind;
  32. #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
  33. #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
  34. #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
  35. u8 vf_lmac_map[MAX_LMAC];
  36. struct delayed_work dwork;
  37. struct workqueue_struct *check_link;
  38. u8 link[MAX_LMAC];
  39. u8 duplex[MAX_LMAC];
  40. u32 speed[MAX_LMAC];
  41. u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
  42. u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
  43. u16 rss_ind_tbl_size;
  44. bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
  45. /* MSI-X */
  46. bool msix_enabled;
  47. u8 num_vec;
  48. struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
  49. bool irq_allocated[NIC_PF_MSIX_VECTORS];
  50. };
  51. static inline bool pass1_silicon(struct nicpf *nic)
  52. {
  53. return nic->pdev->revision < 8;
  54. }
  55. /* Supported devices */
  56. static const struct pci_device_id nic_id_table[] = {
  57. { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
  58. { 0, } /* end of table */
  59. };
  60. MODULE_AUTHOR("Sunil Goutham");
  61. MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
  62. MODULE_LICENSE("GPL v2");
  63. MODULE_VERSION(DRV_VERSION);
  64. MODULE_DEVICE_TABLE(pci, nic_id_table);
  65. /* The Cavium ThunderX network controller can *only* be found in SoCs
  66. * containing the ThunderX ARM64 CPU implementation. All accesses to the device
  67. * registers on this platform are implicitly strongly ordered with respect
  68. * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
  69. * with no memory barriers in this driver. The readq()/writeq() functions add
  70. * explicit ordering operation which in this case are redundant, and only
  71. * add overhead.
  72. */
  73. /* Register read/write APIs */
  74. static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
  75. {
  76. writeq_relaxed(val, nic->reg_base + offset);
  77. }
  78. static u64 nic_reg_read(struct nicpf *nic, u64 offset)
  79. {
  80. return readq_relaxed(nic->reg_base + offset);
  81. }
  82. /* PF -> VF mailbox communication APIs */
  83. static void nic_enable_mbx_intr(struct nicpf *nic)
  84. {
  85. /* Enable mailbox interrupt for all 128 VFs */
  86. nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
  87. nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
  88. }
  89. static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
  90. {
  91. nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
  92. }
  93. static u64 nic_get_mbx_addr(int vf)
  94. {
  95. return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
  96. }
  97. /* Send a mailbox message to VF
  98. * @vf: vf to which this message to be sent
  99. * @mbx: Message to be sent
  100. */
  101. static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
  102. {
  103. void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
  104. u64 *msg = (u64 *)mbx;
  105. /* In first revision HW, mbox interrupt is triggerred
  106. * when PF writes to MBOX(1), in next revisions when
  107. * PF writes to MBOX(0)
  108. */
  109. if (pass1_silicon(nic)) {
  110. /* see the comment for nic_reg_write()/nic_reg_read()
  111. * functions above
  112. */
  113. writeq_relaxed(msg[0], mbx_addr);
  114. writeq_relaxed(msg[1], mbx_addr + 8);
  115. } else {
  116. writeq_relaxed(msg[1], mbx_addr + 8);
  117. writeq_relaxed(msg[0], mbx_addr);
  118. }
  119. }
  120. /* Responds to VF's READY message with VF's
  121. * ID, node, MAC address e.t.c
  122. * @vf: VF which sent READY message
  123. */
  124. static void nic_mbx_send_ready(struct nicpf *nic, int vf)
  125. {
  126. union nic_mbx mbx = {};
  127. int bgx_idx, lmac;
  128. const char *mac;
  129. mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
  130. mbx.nic_cfg.vf_id = vf;
  131. mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
  132. if (vf < MAX_LMAC) {
  133. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  134. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  135. mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
  136. if (mac)
  137. ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
  138. }
  139. mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
  140. mbx.nic_cfg.node_id = nic->node;
  141. mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
  142. nic_send_msg_to_vf(nic, vf, &mbx);
  143. }
  144. /* ACKs VF's mailbox message
  145. * @vf: VF to which ACK to be sent
  146. */
  147. static void nic_mbx_send_ack(struct nicpf *nic, int vf)
  148. {
  149. union nic_mbx mbx = {};
  150. mbx.msg.msg = NIC_MBOX_MSG_ACK;
  151. nic_send_msg_to_vf(nic, vf, &mbx);
  152. }
  153. /* NACKs VF's mailbox message that PF is not able to
  154. * complete the action
  155. * @vf: VF to which ACK to be sent
  156. */
  157. static void nic_mbx_send_nack(struct nicpf *nic, int vf)
  158. {
  159. union nic_mbx mbx = {};
  160. mbx.msg.msg = NIC_MBOX_MSG_NACK;
  161. nic_send_msg_to_vf(nic, vf, &mbx);
  162. }
  163. /* Flush all in flight receive packets to memory and
  164. * bring down an active RQ
  165. */
  166. static int nic_rcv_queue_sw_sync(struct nicpf *nic)
  167. {
  168. u16 timeout = ~0x00;
  169. nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
  170. /* Wait till sync cycle is finished */
  171. while (timeout) {
  172. if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
  173. break;
  174. timeout--;
  175. }
  176. nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
  177. if (!timeout) {
  178. dev_err(&nic->pdev->dev, "Receive queue software sync failed");
  179. return 1;
  180. }
  181. return 0;
  182. }
  183. /* Get BGX Rx/Tx stats and respond to VF's request */
  184. static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
  185. {
  186. int bgx_idx, lmac;
  187. union nic_mbx mbx = {};
  188. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
  189. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
  190. mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
  191. mbx.bgx_stats.vf_id = bgx->vf_id;
  192. mbx.bgx_stats.rx = bgx->rx;
  193. mbx.bgx_stats.idx = bgx->idx;
  194. if (bgx->rx)
  195. mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
  196. lmac, bgx->idx);
  197. else
  198. mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
  199. lmac, bgx->idx);
  200. nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
  201. }
  202. /* Update hardware min/max frame size */
  203. static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
  204. {
  205. if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
  206. dev_err(&nic->pdev->dev,
  207. "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
  208. vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
  209. return 1;
  210. }
  211. new_frs += ETH_HLEN;
  212. if (new_frs <= nic->pkind.maxlen)
  213. return 0;
  214. nic->pkind.maxlen = new_frs;
  215. nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
  216. return 0;
  217. }
  218. /* Set minimum transmit packet size */
  219. static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
  220. {
  221. int lmac;
  222. u64 lmac_cfg;
  223. /* Max value that can be set is 60 */
  224. if (size > 60)
  225. size = 60;
  226. for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
  227. lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
  228. lmac_cfg &= ~(0xF << 2);
  229. lmac_cfg |= ((size / 4) << 2);
  230. nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
  231. }
  232. }
  233. /* Function to check number of LMACs present and set VF::LMAC mapping.
  234. * Mapping will be used while initializing channels.
  235. */
  236. static void nic_set_lmac_vf_mapping(struct nicpf *nic)
  237. {
  238. unsigned bgx_map = bgx_get_map(nic->node);
  239. int bgx, next_bgx_lmac = 0;
  240. int lmac, lmac_cnt = 0;
  241. u64 lmac_credit;
  242. nic->num_vf_en = 0;
  243. for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
  244. if (!(bgx_map & (1 << bgx)))
  245. continue;
  246. lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
  247. for (lmac = 0; lmac < lmac_cnt; lmac++)
  248. nic->vf_lmac_map[next_bgx_lmac++] =
  249. NIC_SET_VF_LMAC_MAP(bgx, lmac);
  250. nic->num_vf_en += lmac_cnt;
  251. /* Program LMAC credits */
  252. lmac_credit = (1ull << 1); /* channel credit enable */
  253. lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
  254. /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
  255. lmac_credit |= (((((48 * 1024) / lmac_cnt) -
  256. NIC_HW_MAX_FRS) / 16) << 12);
  257. lmac = bgx * MAX_LMAC_PER_BGX;
  258. for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
  259. nic_reg_write(nic,
  260. NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
  261. lmac_credit);
  262. }
  263. }
  264. #define BGX0_BLOCK 8
  265. #define BGX1_BLOCK 9
  266. static void nic_init_hw(struct nicpf *nic)
  267. {
  268. int i;
  269. u64 cqm_cfg;
  270. /* Enable NIC HW block */
  271. nic_reg_write(nic, NIC_PF_CFG, 0x3);
  272. /* Enable backpressure */
  273. nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
  274. /* Disable TNS mode on both interfaces */
  275. nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
  276. (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
  277. nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
  278. (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
  279. nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
  280. (1ULL << 63) | BGX0_BLOCK);
  281. nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
  282. (1ULL << 63) | BGX1_BLOCK);
  283. /* PKIND configuration */
  284. nic->pkind.minlen = 0;
  285. nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
  286. nic->pkind.lenerr_en = 1;
  287. nic->pkind.rx_hdr = 0;
  288. nic->pkind.hdr_sl = 0;
  289. for (i = 0; i < NIC_MAX_PKIND; i++)
  290. nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
  291. *(u64 *)&nic->pkind);
  292. nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
  293. /* Timer config */
  294. nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
  295. /* Enable VLAN ethertype matching and stripping */
  296. nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
  297. (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
  298. /* Check if HW expected value is higher (could be in future chips) */
  299. cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
  300. if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
  301. nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
  302. }
  303. /* Channel parse index configuration */
  304. static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
  305. {
  306. u32 vnic, bgx, lmac, chan;
  307. u32 padd, cpi_count = 0;
  308. u64 cpi_base, cpi, rssi_base, rssi;
  309. u8 qset, rq_idx = 0;
  310. vnic = cfg->vf_id;
  311. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
  312. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
  313. chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
  314. cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
  315. rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
  316. /* Rx channel configuration */
  317. nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
  318. (1ull << 63) | (vnic << 0));
  319. nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
  320. ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
  321. if (cfg->cpi_alg == CPI_ALG_NONE)
  322. cpi_count = 1;
  323. else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
  324. cpi_count = 8;
  325. else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
  326. cpi_count = 16;
  327. else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
  328. cpi_count = NIC_MAX_CPI_PER_LMAC;
  329. /* RSS Qset, Qidx mapping */
  330. qset = cfg->vf_id;
  331. rssi = rssi_base;
  332. for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
  333. nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
  334. (qset << 3) | rq_idx);
  335. rq_idx++;
  336. }
  337. rssi = 0;
  338. cpi = cpi_base;
  339. for (; cpi < (cpi_base + cpi_count); cpi++) {
  340. /* Determine port to channel adder */
  341. if (cfg->cpi_alg != CPI_ALG_DIFF)
  342. padd = cpi % cpi_count;
  343. else
  344. padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
  345. /* Leave RSS_SIZE as '0' to disable RSS */
  346. if (pass1_silicon(nic)) {
  347. nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
  348. (vnic << 24) | (padd << 16) |
  349. (rssi_base + rssi));
  350. } else {
  351. /* Set MPI_ALG to '0' to disable MCAM parsing */
  352. nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
  353. (padd << 16));
  354. /* MPI index is same as CPI if MPI_ALG is not enabled */
  355. nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
  356. (vnic << 24) | (rssi_base + rssi));
  357. }
  358. if ((rssi + 1) >= cfg->rq_cnt)
  359. continue;
  360. if (cfg->cpi_alg == CPI_ALG_VLAN)
  361. rssi++;
  362. else if (cfg->cpi_alg == CPI_ALG_VLAN16)
  363. rssi = ((cpi - cpi_base) & 0xe) >> 1;
  364. else if (cfg->cpi_alg == CPI_ALG_DIFF)
  365. rssi = ((cpi - cpi_base) & 0x38) >> 3;
  366. }
  367. nic->cpi_base[cfg->vf_id] = cpi_base;
  368. nic->rssi_base[cfg->vf_id] = rssi_base;
  369. }
  370. /* Responsds to VF with its RSS indirection table size */
  371. static void nic_send_rss_size(struct nicpf *nic, int vf)
  372. {
  373. union nic_mbx mbx = {};
  374. u64 *msg;
  375. msg = (u64 *)&mbx;
  376. mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
  377. mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
  378. nic_send_msg_to_vf(nic, vf, &mbx);
  379. }
  380. /* Receive side scaling configuration
  381. * configure:
  382. * - RSS index
  383. * - indir table i.e hash::RQ mapping
  384. * - no of hash bits to consider
  385. */
  386. static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
  387. {
  388. u8 qset, idx = 0;
  389. u64 cpi_cfg, cpi_base, rssi_base, rssi;
  390. u64 idx_addr;
  391. rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
  392. rssi = rssi_base;
  393. qset = cfg->vf_id;
  394. for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
  395. u8 svf = cfg->ind_tbl[idx] >> 3;
  396. if (svf)
  397. qset = nic->vf_sqs[cfg->vf_id][svf - 1];
  398. else
  399. qset = cfg->vf_id;
  400. nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
  401. (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
  402. idx++;
  403. }
  404. cpi_base = nic->cpi_base[cfg->vf_id];
  405. if (pass1_silicon(nic))
  406. idx_addr = NIC_PF_CPI_0_2047_CFG;
  407. else
  408. idx_addr = NIC_PF_MPI_0_2047_CFG;
  409. cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
  410. cpi_cfg &= ~(0xFULL << 20);
  411. cpi_cfg |= (cfg->hash_bits << 20);
  412. nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
  413. }
  414. /* 4 level transmit side scheduler configutation
  415. * for TNS bypass mode
  416. *
  417. * Sample configuration for SQ0
  418. * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
  419. * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
  420. * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
  421. * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
  422. * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
  423. * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
  424. * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
  425. * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
  426. */
  427. static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
  428. struct sq_cfg_msg *sq)
  429. {
  430. u32 bgx, lmac, chan;
  431. u32 tl2, tl3, tl4;
  432. u32 rr_quantum;
  433. u8 sq_idx = sq->sq_num;
  434. u8 pqs_vnic;
  435. if (sq->sqs_mode)
  436. pqs_vnic = nic->pqs_vf[vnic];
  437. else
  438. pqs_vnic = vnic;
  439. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
  440. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
  441. /* 24 bytes for FCS, IPG and preamble */
  442. rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
  443. tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
  444. tl4 += sq_idx;
  445. if (sq->sqs_mode)
  446. tl4 += vnic * 8;
  447. tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
  448. nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
  449. ((u64)vnic << NIC_QS_ID_SHIFT) |
  450. ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
  451. nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
  452. ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
  453. nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
  454. chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
  455. nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
  456. /* Enable backpressure on the channel */
  457. nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
  458. tl2 = tl3 >> 2;
  459. nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
  460. nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
  461. /* No priorities as of now */
  462. nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
  463. }
  464. /* Send primary nicvf pointer to secondary QS's VF */
  465. static void nic_send_pnicvf(struct nicpf *nic, int sqs)
  466. {
  467. union nic_mbx mbx = {};
  468. mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
  469. mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
  470. nic_send_msg_to_vf(nic, sqs, &mbx);
  471. }
  472. /* Send SQS's nicvf pointer to primary QS's VF */
  473. static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
  474. {
  475. union nic_mbx mbx = {};
  476. int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
  477. mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
  478. mbx.nicvf.sqs_id = nicvf->sqs_id;
  479. mbx.nicvf.nicvf = nic->nicvf[sqs_id];
  480. nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
  481. }
  482. /* Find next available Qset that can be assigned as a
  483. * secondary Qset to a VF.
  484. */
  485. static int nic_nxt_avail_sqs(struct nicpf *nic)
  486. {
  487. int sqs;
  488. for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
  489. if (!nic->sqs_used[sqs])
  490. nic->sqs_used[sqs] = true;
  491. else
  492. continue;
  493. return sqs + nic->num_vf_en;
  494. }
  495. return -1;
  496. }
  497. /* Allocate additional Qsets for requested VF */
  498. static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
  499. {
  500. union nic_mbx mbx = {};
  501. int idx, alloc_qs = 0;
  502. int sqs_id;
  503. if (!nic->num_sqs_en)
  504. goto send_mbox;
  505. for (idx = 0; idx < sqs->qs_count; idx++) {
  506. sqs_id = nic_nxt_avail_sqs(nic);
  507. if (sqs_id < 0)
  508. break;
  509. nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
  510. nic->pqs_vf[sqs_id] = sqs->vf_id;
  511. alloc_qs++;
  512. }
  513. send_mbox:
  514. mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
  515. mbx.sqs_alloc.vf_id = sqs->vf_id;
  516. mbx.sqs_alloc.qs_count = alloc_qs;
  517. nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
  518. }
  519. static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
  520. {
  521. int bgx_idx, lmac_idx;
  522. if (lbk->vf_id > MAX_LMAC)
  523. return -1;
  524. bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
  525. lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
  526. bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
  527. return 0;
  528. }
  529. static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
  530. {
  531. int bgx, lmac;
  532. nic->vf_enabled[vf] = enable;
  533. if (vf >= nic->num_vf_en)
  534. return;
  535. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  536. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  537. bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
  538. }
  539. /* Interrupt handler to handle mailbox messages from VFs */
  540. static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
  541. {
  542. union nic_mbx mbx = {};
  543. u64 *mbx_data;
  544. u64 mbx_addr;
  545. u64 reg_addr;
  546. u64 cfg;
  547. int bgx, lmac;
  548. int i;
  549. int ret = 0;
  550. nic->mbx_lock[vf] = true;
  551. mbx_addr = nic_get_mbx_addr(vf);
  552. mbx_data = (u64 *)&mbx;
  553. for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
  554. *mbx_data = nic_reg_read(nic, mbx_addr);
  555. mbx_data++;
  556. mbx_addr += sizeof(u64);
  557. }
  558. dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
  559. __func__, mbx.msg.msg, vf);
  560. switch (mbx.msg.msg) {
  561. case NIC_MBOX_MSG_READY:
  562. nic_mbx_send_ready(nic, vf);
  563. if (vf < MAX_LMAC) {
  564. nic->link[vf] = 0;
  565. nic->duplex[vf] = 0;
  566. nic->speed[vf] = 0;
  567. }
  568. ret = 1;
  569. break;
  570. case NIC_MBOX_MSG_QS_CFG:
  571. reg_addr = NIC_PF_QSET_0_127_CFG |
  572. (mbx.qs.num << NIC_QS_ID_SHIFT);
  573. cfg = mbx.qs.cfg;
  574. /* Check if its a secondary Qset */
  575. if (vf >= nic->num_vf_en) {
  576. cfg = cfg & (~0x7FULL);
  577. /* Assign this Qset to primary Qset's VF */
  578. cfg |= nic->pqs_vf[vf];
  579. }
  580. nic_reg_write(nic, reg_addr, cfg);
  581. break;
  582. case NIC_MBOX_MSG_RQ_CFG:
  583. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
  584. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  585. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  586. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  587. break;
  588. case NIC_MBOX_MSG_RQ_BP_CFG:
  589. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
  590. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  591. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  592. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  593. break;
  594. case NIC_MBOX_MSG_RQ_SW_SYNC:
  595. ret = nic_rcv_queue_sw_sync(nic);
  596. break;
  597. case NIC_MBOX_MSG_RQ_DROP_CFG:
  598. reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
  599. (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
  600. (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
  601. nic_reg_write(nic, reg_addr, mbx.rq.cfg);
  602. break;
  603. case NIC_MBOX_MSG_SQ_CFG:
  604. reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
  605. (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
  606. (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
  607. nic_reg_write(nic, reg_addr, mbx.sq.cfg);
  608. nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
  609. break;
  610. case NIC_MBOX_MSG_SET_MAC:
  611. if (vf >= nic->num_vf_en)
  612. break;
  613. lmac = mbx.mac.vf_id;
  614. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
  615. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
  616. bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
  617. break;
  618. case NIC_MBOX_MSG_SET_MAX_FRS:
  619. ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
  620. mbx.frs.vf_id);
  621. break;
  622. case NIC_MBOX_MSG_CPI_CFG:
  623. nic_config_cpi(nic, &mbx.cpi_cfg);
  624. break;
  625. case NIC_MBOX_MSG_RSS_SIZE:
  626. nic_send_rss_size(nic, vf);
  627. goto unlock;
  628. case NIC_MBOX_MSG_RSS_CFG:
  629. case NIC_MBOX_MSG_RSS_CFG_CONT:
  630. nic_config_rss(nic, &mbx.rss_cfg);
  631. break;
  632. case NIC_MBOX_MSG_CFG_DONE:
  633. /* Last message of VF config msg sequence */
  634. nic_enable_vf(nic, vf, true);
  635. goto unlock;
  636. case NIC_MBOX_MSG_SHUTDOWN:
  637. /* First msg in VF teardown sequence */
  638. if (vf >= nic->num_vf_en)
  639. nic->sqs_used[vf - nic->num_vf_en] = false;
  640. nic->pqs_vf[vf] = 0;
  641. nic_enable_vf(nic, vf, false);
  642. break;
  643. case NIC_MBOX_MSG_ALLOC_SQS:
  644. nic_alloc_sqs(nic, &mbx.sqs_alloc);
  645. goto unlock;
  646. case NIC_MBOX_MSG_NICVF_PTR:
  647. nic->nicvf[vf] = mbx.nicvf.nicvf;
  648. break;
  649. case NIC_MBOX_MSG_PNICVF_PTR:
  650. nic_send_pnicvf(nic, vf);
  651. goto unlock;
  652. case NIC_MBOX_MSG_SNICVF_PTR:
  653. nic_send_snicvf(nic, &mbx.nicvf);
  654. goto unlock;
  655. case NIC_MBOX_MSG_BGX_STATS:
  656. nic_get_bgx_stats(nic, &mbx.bgx_stats);
  657. goto unlock;
  658. case NIC_MBOX_MSG_LOOPBACK:
  659. ret = nic_config_loopback(nic, &mbx.lbk);
  660. break;
  661. default:
  662. dev_err(&nic->pdev->dev,
  663. "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
  664. break;
  665. }
  666. if (!ret)
  667. nic_mbx_send_ack(nic, vf);
  668. else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
  669. nic_mbx_send_nack(nic, vf);
  670. unlock:
  671. nic->mbx_lock[vf] = false;
  672. }
  673. static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
  674. {
  675. u64 intr;
  676. u8 vf, vf_per_mbx_reg = 64;
  677. intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
  678. dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
  679. for (vf = 0; vf < vf_per_mbx_reg; vf++) {
  680. if (intr & (1ULL << vf)) {
  681. dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
  682. vf + (mbx * vf_per_mbx_reg));
  683. nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
  684. nic_clear_mbx_intr(nic, vf, mbx);
  685. }
  686. }
  687. }
  688. static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
  689. {
  690. struct nicpf *nic = (struct nicpf *)nic_irq;
  691. nic_mbx_intr_handler(nic, 0);
  692. return IRQ_HANDLED;
  693. }
  694. static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
  695. {
  696. struct nicpf *nic = (struct nicpf *)nic_irq;
  697. nic_mbx_intr_handler(nic, 1);
  698. return IRQ_HANDLED;
  699. }
  700. static int nic_enable_msix(struct nicpf *nic)
  701. {
  702. int i, ret;
  703. nic->num_vec = NIC_PF_MSIX_VECTORS;
  704. for (i = 0; i < nic->num_vec; i++)
  705. nic->msix_entries[i].entry = i;
  706. ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
  707. if (ret) {
  708. dev_err(&nic->pdev->dev,
  709. "Request for #%d msix vectors failed\n",
  710. nic->num_vec);
  711. return ret;
  712. }
  713. nic->msix_enabled = 1;
  714. return 0;
  715. }
  716. static void nic_disable_msix(struct nicpf *nic)
  717. {
  718. if (nic->msix_enabled) {
  719. pci_disable_msix(nic->pdev);
  720. nic->msix_enabled = 0;
  721. nic->num_vec = 0;
  722. }
  723. }
  724. static void nic_free_all_interrupts(struct nicpf *nic)
  725. {
  726. int irq;
  727. for (irq = 0; irq < nic->num_vec; irq++) {
  728. if (nic->irq_allocated[irq])
  729. free_irq(nic->msix_entries[irq].vector, nic);
  730. nic->irq_allocated[irq] = false;
  731. }
  732. }
  733. static int nic_register_interrupts(struct nicpf *nic)
  734. {
  735. int ret;
  736. /* Enable MSI-X */
  737. ret = nic_enable_msix(nic);
  738. if (ret)
  739. return ret;
  740. /* Register mailbox interrupt handlers */
  741. ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
  742. nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
  743. if (ret)
  744. goto fail;
  745. nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
  746. ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
  747. nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
  748. if (ret)
  749. goto fail;
  750. nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
  751. /* Enable mailbox interrupt */
  752. nic_enable_mbx_intr(nic);
  753. return 0;
  754. fail:
  755. dev_err(&nic->pdev->dev, "Request irq failed\n");
  756. nic_free_all_interrupts(nic);
  757. return ret;
  758. }
  759. static void nic_unregister_interrupts(struct nicpf *nic)
  760. {
  761. nic_free_all_interrupts(nic);
  762. nic_disable_msix(nic);
  763. }
  764. static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
  765. {
  766. int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
  767. u16 total_vf;
  768. /* Check if its a multi-node environment */
  769. if (nr_node_ids > 1)
  770. sqs_per_vf = MAX_SQS_PER_VF;
  771. pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
  772. pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
  773. return min(total_vf - vf_en, vf_en * sqs_per_vf);
  774. }
  775. static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
  776. {
  777. int pos = 0;
  778. int vf_en;
  779. int err;
  780. u16 total_vf_cnt;
  781. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  782. if (!pos) {
  783. dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
  784. return -ENODEV;
  785. }
  786. pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
  787. if (total_vf_cnt < nic->num_vf_en)
  788. nic->num_vf_en = total_vf_cnt;
  789. if (!total_vf_cnt)
  790. return 0;
  791. vf_en = nic->num_vf_en;
  792. nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
  793. vf_en += nic->num_sqs_en;
  794. err = pci_enable_sriov(pdev, vf_en);
  795. if (err) {
  796. dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
  797. vf_en);
  798. nic->num_vf_en = 0;
  799. return err;
  800. }
  801. dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
  802. vf_en);
  803. nic->flags |= NIC_SRIOV_ENABLED;
  804. return 0;
  805. }
  806. /* Poll for BGX LMAC link status and update corresponding VF
  807. * if there is a change, valid only if internal L2 switch
  808. * is not present otherwise VF link is always treated as up
  809. */
  810. static void nic_poll_for_link(struct work_struct *work)
  811. {
  812. union nic_mbx mbx = {};
  813. struct nicpf *nic;
  814. struct bgx_link_status link;
  815. u8 vf, bgx, lmac;
  816. nic = container_of(work, struct nicpf, dwork.work);
  817. mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
  818. for (vf = 0; vf < nic->num_vf_en; vf++) {
  819. /* Poll only if VF is UP */
  820. if (!nic->vf_enabled[vf])
  821. continue;
  822. /* Get BGX, LMAC indices for the VF */
  823. bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  824. lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
  825. /* Get interface link status */
  826. bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
  827. /* Inform VF only if link status changed */
  828. if (nic->link[vf] == link.link_up)
  829. continue;
  830. if (!nic->mbx_lock[vf]) {
  831. nic->link[vf] = link.link_up;
  832. nic->duplex[vf] = link.duplex;
  833. nic->speed[vf] = link.speed;
  834. /* Send a mbox message to VF with current link status */
  835. mbx.link_status.link_up = link.link_up;
  836. mbx.link_status.duplex = link.duplex;
  837. mbx.link_status.speed = link.speed;
  838. nic_send_msg_to_vf(nic, vf, &mbx);
  839. }
  840. }
  841. queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
  842. }
  843. static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  844. {
  845. struct device *dev = &pdev->dev;
  846. struct nicpf *nic;
  847. int err;
  848. BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
  849. nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
  850. if (!nic)
  851. return -ENOMEM;
  852. pci_set_drvdata(pdev, nic);
  853. nic->pdev = pdev;
  854. err = pci_enable_device(pdev);
  855. if (err) {
  856. dev_err(dev, "Failed to enable PCI device\n");
  857. pci_set_drvdata(pdev, NULL);
  858. return err;
  859. }
  860. err = pci_request_regions(pdev, DRV_NAME);
  861. if (err) {
  862. dev_err(dev, "PCI request regions failed 0x%x\n", err);
  863. goto err_disable_device;
  864. }
  865. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
  866. if (err) {
  867. dev_err(dev, "Unable to get usable DMA configuration\n");
  868. goto err_release_regions;
  869. }
  870. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
  871. if (err) {
  872. dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
  873. goto err_release_regions;
  874. }
  875. /* MAP PF's configuration registers */
  876. nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
  877. if (!nic->reg_base) {
  878. dev_err(dev, "Cannot map config register space, aborting\n");
  879. err = -ENOMEM;
  880. goto err_release_regions;
  881. }
  882. nic->node = nic_get_node_id(pdev);
  883. nic_set_lmac_vf_mapping(nic);
  884. /* Initialize hardware */
  885. nic_init_hw(nic);
  886. /* Set RSS TBL size for each VF */
  887. nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
  888. /* Register interrupts */
  889. err = nic_register_interrupts(nic);
  890. if (err)
  891. goto err_release_regions;
  892. /* Configure SRIOV */
  893. err = nic_sriov_init(pdev, nic);
  894. if (err)
  895. goto err_unregister_interrupts;
  896. /* Register a physical link status poll fn() */
  897. nic->check_link = alloc_workqueue("check_link_status",
  898. WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
  899. if (!nic->check_link) {
  900. err = -ENOMEM;
  901. goto err_disable_sriov;
  902. }
  903. INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
  904. queue_delayed_work(nic->check_link, &nic->dwork, 0);
  905. return 0;
  906. err_disable_sriov:
  907. if (nic->flags & NIC_SRIOV_ENABLED)
  908. pci_disable_sriov(pdev);
  909. err_unregister_interrupts:
  910. nic_unregister_interrupts(nic);
  911. err_release_regions:
  912. pci_release_regions(pdev);
  913. err_disable_device:
  914. pci_disable_device(pdev);
  915. pci_set_drvdata(pdev, NULL);
  916. return err;
  917. }
  918. static void nic_remove(struct pci_dev *pdev)
  919. {
  920. struct nicpf *nic = pci_get_drvdata(pdev);
  921. if (!nic)
  922. return;
  923. if (nic->flags & NIC_SRIOV_ENABLED)
  924. pci_disable_sriov(pdev);
  925. if (nic->check_link) {
  926. /* Destroy work Queue */
  927. cancel_delayed_work_sync(&nic->dwork);
  928. destroy_workqueue(nic->check_link);
  929. }
  930. nic_unregister_interrupts(nic);
  931. pci_release_regions(pdev);
  932. pci_disable_device(pdev);
  933. pci_set_drvdata(pdev, NULL);
  934. }
  935. static struct pci_driver nic_driver = {
  936. .name = DRV_NAME,
  937. .id_table = nic_id_table,
  938. .probe = nic_probe,
  939. .remove = nic_remove,
  940. };
  941. static int __init nic_init_module(void)
  942. {
  943. pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
  944. return pci_register_driver(&nic_driver);
  945. }
  946. static void __exit nic_cleanup_module(void)
  947. {
  948. pci_unregister_driver(&nic_driver);
  949. }
  950. module_init(nic_init_module);
  951. module_exit(nic_cleanup_module);