fm10k_pf.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905
  1. /* Intel Ethernet Switch Host Interface Driver
  2. * Copyright(c) 2013 - 2014 Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * The full GNU General Public License is included in this distribution in
  14. * the file called "COPYING".
  15. *
  16. * Contact Information:
  17. * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
  18. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  19. */
  20. #include "fm10k_pf.h"
  21. #include "fm10k_vf.h"
  22. /**
  23. * fm10k_reset_hw_pf - PF hardware reset
  24. * @hw: pointer to hardware structure
  25. *
  26. * This function should return the hardware to a state similar to the
  27. * one it is in after being powered on.
  28. **/
  29. static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
  30. {
  31. s32 err;
  32. u32 reg;
  33. u16 i;
  34. /* Disable interrupts */
  35. fm10k_write_reg(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(ALL));
  36. /* Lock ITR2 reg 0 into itself and disable interrupt moderation */
  37. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  38. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  39. /* We assume here Tx and Rx queue 0 are owned by the PF */
  40. /* Shut off VF access to their queues forcing them to queue 0 */
  41. for (i = 0; i < FM10K_TQMAP_TABLE_SIZE; i++) {
  42. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  43. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  44. }
  45. /* shut down all rings */
  46. err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
  47. if (err)
  48. return err;
  49. /* Verify that DMA is no longer active */
  50. reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
  51. if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
  52. return FM10K_ERR_DMA_PENDING;
  53. /* verify the switch is ready for reset */
  54. reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
  55. if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
  56. goto out;
  57. /* Inititate data path reset */
  58. reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
  59. fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
  60. /* Flush write and allow 100us for reset to complete */
  61. fm10k_write_flush(hw);
  62. udelay(FM10K_RESET_TIMEOUT);
  63. /* Verify we made it out of reset */
  64. reg = fm10k_read_reg(hw, FM10K_IP);
  65. if (!(reg & FM10K_IP_NOTINRESET))
  66. err = FM10K_ERR_RESET_FAILED;
  67. out:
  68. return err;
  69. }
  70. /**
  71. * fm10k_is_ari_hierarchy_pf - Indicate ARI hierarchy support
  72. * @hw: pointer to hardware structure
  73. *
  74. * Looks at the ARI hierarchy bit to determine whether ARI is supported or not.
  75. **/
  76. static bool fm10k_is_ari_hierarchy_pf(struct fm10k_hw *hw)
  77. {
  78. u16 sriov_ctrl = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_SRIOV_CTRL);
  79. return !!(sriov_ctrl & FM10K_PCIE_SRIOV_CTRL_VFARI);
  80. }
  81. /**
  82. * fm10k_init_hw_pf - PF hardware initialization
  83. * @hw: pointer to hardware structure
  84. *
  85. **/
  86. static s32 fm10k_init_hw_pf(struct fm10k_hw *hw)
  87. {
  88. u32 dma_ctrl, txqctl;
  89. u16 i;
  90. /* Establish default VSI as valid */
  91. fm10k_write_reg(hw, FM10K_DGLORTDEC(fm10k_dglort_default), 0);
  92. fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_default),
  93. FM10K_DGLORTMAP_ANY);
  94. /* Invalidate all other GLORT entries */
  95. for (i = 1; i < FM10K_DGLORT_COUNT; i++)
  96. fm10k_write_reg(hw, FM10K_DGLORTMAP(i), FM10K_DGLORTMAP_NONE);
  97. /* reset ITR2(0) to point to itself */
  98. fm10k_write_reg(hw, FM10K_ITR2(0), 0);
  99. /* reset VF ITR2(0) to point to 0 avoid PF registers */
  100. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), 0);
  101. /* loop through all PF ITR2 registers pointing them to the previous */
  102. for (i = 1; i < FM10K_ITR_REG_COUNT_PF; i++)
  103. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  104. /* Enable interrupt moderator if not already enabled */
  105. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  106. /* compute the default txqctl configuration */
  107. txqctl = FM10K_TXQCTL_PF | FM10K_TXQCTL_UNLIMITED_BW |
  108. (hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT);
  109. for (i = 0; i < FM10K_MAX_QUEUES; i++) {
  110. /* configure rings for 256 Queue / 32 Descriptor cache mode */
  111. fm10k_write_reg(hw, FM10K_TQDLOC(i),
  112. (i * FM10K_TQDLOC_BASE_32_DESC) |
  113. FM10K_TQDLOC_SIZE_32_DESC);
  114. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  115. /* configure rings to provide TPH processing hints */
  116. fm10k_write_reg(hw, FM10K_TPH_TXCTRL(i),
  117. FM10K_TPH_TXCTRL_DESC_TPHEN |
  118. FM10K_TPH_TXCTRL_DESC_RROEN |
  119. FM10K_TPH_TXCTRL_DESC_WROEN |
  120. FM10K_TPH_TXCTRL_DATA_RROEN);
  121. fm10k_write_reg(hw, FM10K_TPH_RXCTRL(i),
  122. FM10K_TPH_RXCTRL_DESC_TPHEN |
  123. FM10K_TPH_RXCTRL_DESC_RROEN |
  124. FM10K_TPH_RXCTRL_DATA_WROEN |
  125. FM10K_TPH_RXCTRL_HDR_WROEN);
  126. }
  127. /* set max hold interval to align with 1.024 usec in all modes */
  128. switch (hw->bus.speed) {
  129. case fm10k_bus_speed_2500:
  130. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN1;
  131. break;
  132. case fm10k_bus_speed_5000:
  133. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN2;
  134. break;
  135. case fm10k_bus_speed_8000:
  136. dma_ctrl = FM10K_DMA_CTRL_MAX_HOLD_1US_GEN3;
  137. break;
  138. default:
  139. dma_ctrl = 0;
  140. break;
  141. }
  142. /* Configure TSO flags */
  143. fm10k_write_reg(hw, FM10K_DTXTCPFLGL, FM10K_TSO_FLAGS_LOW);
  144. fm10k_write_reg(hw, FM10K_DTXTCPFLGH, FM10K_TSO_FLAGS_HI);
  145. /* Enable DMA engine
  146. * Set Rx Descriptor size to 32
  147. * Set Minimum MSS to 64
  148. * Set Maximum number of Rx queues to 256 / 32 Descriptor
  149. */
  150. dma_ctrl |= FM10K_DMA_CTRL_TX_ENABLE | FM10K_DMA_CTRL_RX_ENABLE |
  151. FM10K_DMA_CTRL_RX_DESC_SIZE | FM10K_DMA_CTRL_MINMSS_64 |
  152. FM10K_DMA_CTRL_32_DESC;
  153. fm10k_write_reg(hw, FM10K_DMA_CTRL, dma_ctrl);
  154. /* record maximum queue count, we limit ourselves to 128 */
  155. hw->mac.max_queues = FM10K_MAX_QUEUES_PF;
  156. /* We support either 64 VFs or 7 VFs depending on if we have ARI */
  157. hw->iov.total_vfs = fm10k_is_ari_hierarchy_pf(hw) ? 64 : 7;
  158. return 0;
  159. }
  160. /**
  161. * fm10k_update_vlan_pf - Update status of VLAN ID in VLAN filter table
  162. * @hw: pointer to hardware structure
  163. * @vid: VLAN ID to add to table
  164. * @vsi: Index indicating VF ID or PF ID in table
  165. * @set: Indicates if this is a set or clear operation
  166. *
  167. * This function adds or removes the corresponding VLAN ID from the VLAN
  168. * filter table for the corresponding function. In addition to the
  169. * standard set/clear that supports one bit a multi-bit write is
  170. * supported to set 64 bits at a time.
  171. **/
  172. static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
  173. {
  174. u32 vlan_table, reg, mask, bit, len;
  175. /* verify the VSI index is valid */
  176. if (vsi > FM10K_VLAN_TABLE_VSI_MAX)
  177. return FM10K_ERR_PARAM;
  178. /* VLAN multi-bit write:
  179. * The multi-bit write has several parts to it.
  180. * 3 2 1 0
  181. * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
  182. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  183. * | RSVD0 | Length |C|RSVD0| VLAN ID |
  184. * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  185. *
  186. * VLAN ID: Vlan Starting value
  187. * RSVD0: Reserved section, must be 0
  188. * C: Flag field, 0 is set, 1 is clear (Used in VF VLAN message)
  189. * Length: Number of times to repeat the bit being set
  190. */
  191. len = vid >> 16;
  192. vid = (vid << 17) >> 17;
  193. /* verify the reserved 0 fields are 0 */
  194. if (len >= FM10K_VLAN_TABLE_VID_MAX || vid >= FM10K_VLAN_TABLE_VID_MAX)
  195. return FM10K_ERR_PARAM;
  196. /* Loop through the table updating all required VLANs */
  197. for (reg = FM10K_VLAN_TABLE(vsi, vid / 32), bit = vid % 32;
  198. len < FM10K_VLAN_TABLE_VID_MAX;
  199. len -= 32 - bit, reg++, bit = 0) {
  200. /* record the initial state of the register */
  201. vlan_table = fm10k_read_reg(hw, reg);
  202. /* truncate mask if we are at the start or end of the run */
  203. mask = (~(u32)0 >> ((len < 31) ? 31 - len : 0)) << bit;
  204. /* make necessary modifications to the register */
  205. mask &= set ? ~vlan_table : vlan_table;
  206. if (mask)
  207. fm10k_write_reg(hw, reg, vlan_table ^ mask);
  208. }
  209. return 0;
  210. }
  211. /**
  212. * fm10k_read_mac_addr_pf - Read device MAC address
  213. * @hw: pointer to the HW structure
  214. *
  215. * Reads the device MAC address from the SM_AREA and stores the value.
  216. **/
  217. static s32 fm10k_read_mac_addr_pf(struct fm10k_hw *hw)
  218. {
  219. u8 perm_addr[ETH_ALEN];
  220. u32 serial_num;
  221. int i;
  222. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(1));
  223. /* last byte should be all 1's */
  224. if ((~serial_num) << 24)
  225. return FM10K_ERR_INVALID_MAC_ADDR;
  226. perm_addr[0] = (u8)(serial_num >> 24);
  227. perm_addr[1] = (u8)(serial_num >> 16);
  228. perm_addr[2] = (u8)(serial_num >> 8);
  229. serial_num = fm10k_read_reg(hw, FM10K_SM_AREA(0));
  230. /* first byte should be all 1's */
  231. if ((~serial_num) >> 24)
  232. return FM10K_ERR_INVALID_MAC_ADDR;
  233. perm_addr[3] = (u8)(serial_num >> 16);
  234. perm_addr[4] = (u8)(serial_num >> 8);
  235. perm_addr[5] = (u8)(serial_num);
  236. for (i = 0; i < ETH_ALEN; i++) {
  237. hw->mac.perm_addr[i] = perm_addr[i];
  238. hw->mac.addr[i] = perm_addr[i];
  239. }
  240. return 0;
  241. }
  242. /**
  243. * fm10k_glort_valid_pf - Validate that the provided glort is valid
  244. * @hw: pointer to the HW structure
  245. * @glort: base glort to be validated
  246. *
  247. * This function will return an error if the provided glort is invalid
  248. **/
  249. bool fm10k_glort_valid_pf(struct fm10k_hw *hw, u16 glort)
  250. {
  251. glort &= hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT;
  252. return glort == (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE);
  253. }
  254. /**
  255. * fm10k_update_xc_addr_pf - Update device addresses
  256. * @hw: pointer to the HW structure
  257. * @glort: base resource tag for this request
  258. * @mac: MAC address to add/remove from table
  259. * @vid: VLAN ID to add/remove from table
  260. * @add: Indicates if this is an add or remove operation
  261. * @flags: flags field to indicate add and secure
  262. *
  263. * This function generates a message to the Switch API requesting
  264. * that the given logical port add/remove the given L2 MAC/VLAN address.
  265. **/
  266. static s32 fm10k_update_xc_addr_pf(struct fm10k_hw *hw, u16 glort,
  267. const u8 *mac, u16 vid, bool add, u8 flags)
  268. {
  269. struct fm10k_mbx_info *mbx = &hw->mbx;
  270. struct fm10k_mac_update mac_update;
  271. u32 msg[5];
  272. /* clear set bit from VLAN ID */
  273. vid &= ~FM10K_VLAN_CLEAR;
  274. /* if glort or vlan are not valid return error */
  275. if (!fm10k_glort_valid_pf(hw, glort) || vid >= FM10K_VLAN_TABLE_VID_MAX)
  276. return FM10K_ERR_PARAM;
  277. /* record fields */
  278. mac_update.mac_lower = cpu_to_le32(((u32)mac[2] << 24) |
  279. ((u32)mac[3] << 16) |
  280. ((u32)mac[4] << 8) |
  281. ((u32)mac[5]));
  282. mac_update.mac_upper = cpu_to_le16(((u32)mac[0] << 8) |
  283. ((u32)mac[1]));
  284. mac_update.vlan = cpu_to_le16(vid);
  285. mac_update.glort = cpu_to_le16(glort);
  286. mac_update.action = add ? 0 : 1;
  287. mac_update.flags = flags;
  288. /* populate mac_update fields */
  289. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_UPDATE_MAC_FWD_RULE);
  290. fm10k_tlv_attr_put_le_struct(msg, FM10K_PF_ATTR_ID_MAC_UPDATE,
  291. &mac_update, sizeof(mac_update));
  292. /* load onto outgoing mailbox */
  293. return mbx->ops.enqueue_tx(hw, mbx, msg);
  294. }
  295. /**
  296. * fm10k_update_uc_addr_pf - Update device unicast addresses
  297. * @hw: pointer to the HW structure
  298. * @glort: base resource tag for this request
  299. * @mac: MAC address to add/remove from table
  300. * @vid: VLAN ID to add/remove from table
  301. * @add: Indicates if this is an add or remove operation
  302. * @flags: flags field to indicate add and secure
  303. *
  304. * This function is used to add or remove unicast addresses for
  305. * the PF.
  306. **/
  307. static s32 fm10k_update_uc_addr_pf(struct fm10k_hw *hw, u16 glort,
  308. const u8 *mac, u16 vid, bool add, u8 flags)
  309. {
  310. /* verify MAC address is valid */
  311. if (!is_valid_ether_addr(mac))
  312. return FM10K_ERR_PARAM;
  313. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, flags);
  314. }
  315. /**
  316. * fm10k_update_mc_addr_pf - Update device multicast addresses
  317. * @hw: pointer to the HW structure
  318. * @glort: base resource tag for this request
  319. * @mac: MAC address to add/remove from table
  320. * @vid: VLAN ID to add/remove from table
  321. * @add: Indicates if this is an add or remove operation
  322. *
  323. * This function is used to add or remove multicast MAC addresses for
  324. * the PF.
  325. **/
  326. static s32 fm10k_update_mc_addr_pf(struct fm10k_hw *hw, u16 glort,
  327. const u8 *mac, u16 vid, bool add)
  328. {
  329. /* verify multicast address is valid */
  330. if (!is_multicast_ether_addr(mac))
  331. return FM10K_ERR_PARAM;
  332. return fm10k_update_xc_addr_pf(hw, glort, mac, vid, add, 0);
  333. }
  334. /**
  335. * fm10k_update_xcast_mode_pf - Request update of multicast mode
  336. * @hw: pointer to hardware structure
  337. * @glort: base resource tag for this request
  338. * @mode: integer value indicating mode being requested
  339. *
  340. * This function will attempt to request a higher mode for the port
  341. * so that it can enable either multicast, multicast promiscuous, or
  342. * promiscuous mode of operation.
  343. **/
  344. static s32 fm10k_update_xcast_mode_pf(struct fm10k_hw *hw, u16 glort, u8 mode)
  345. {
  346. struct fm10k_mbx_info *mbx = &hw->mbx;
  347. u32 msg[3], xcast_mode;
  348. if (mode > FM10K_XCAST_MODE_NONE)
  349. return FM10K_ERR_PARAM;
  350. /* if glort is not valid return error */
  351. if (!fm10k_glort_valid_pf(hw, glort))
  352. return FM10K_ERR_PARAM;
  353. /* write xcast mode as a single u32 value,
  354. * lower 16 bits: glort
  355. * upper 16 bits: mode
  356. */
  357. xcast_mode = ((u32)mode << 16) | glort;
  358. /* generate message requesting to change xcast mode */
  359. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_XCAST_MODES);
  360. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_XCAST_MODE, xcast_mode);
  361. /* load onto outgoing mailbox */
  362. return mbx->ops.enqueue_tx(hw, mbx, msg);
  363. }
  364. /**
  365. * fm10k_update_int_moderator_pf - Update interrupt moderator linked list
  366. * @hw: pointer to hardware structure
  367. *
  368. * This function walks through the MSI-X vector table to determine the
  369. * number of active interrupts and based on that information updates the
  370. * interrupt moderator linked list.
  371. **/
  372. static void fm10k_update_int_moderator_pf(struct fm10k_hw *hw)
  373. {
  374. u32 i;
  375. /* Disable interrupt moderator */
  376. fm10k_write_reg(hw, FM10K_INT_CTRL, 0);
  377. /* loop through PF from last to first looking enabled vectors */
  378. for (i = FM10K_ITR_REG_COUNT_PF - 1; i; i--) {
  379. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  380. break;
  381. }
  382. /* always reset VFITR2[0] to point to last enabled PF vector */
  383. fm10k_write_reg(hw, FM10K_ITR2(FM10K_ITR_REG_COUNT_PF), i);
  384. /* reset ITR2[0] to point to last enabled PF vector */
  385. if (!hw->iov.num_vfs)
  386. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  387. /* Enable interrupt moderator */
  388. fm10k_write_reg(hw, FM10K_INT_CTRL, FM10K_INT_CTRL_ENABLEMODERATOR);
  389. }
  390. /**
  391. * fm10k_update_lport_state_pf - Notify the switch of a change in port state
  392. * @hw: pointer to the HW structure
  393. * @glort: base resource tag for this request
  394. * @count: number of logical ports being updated
  395. * @enable: boolean value indicating enable or disable
  396. *
  397. * This function is used to add/remove a logical port from the switch.
  398. **/
  399. static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
  400. u16 count, bool enable)
  401. {
  402. struct fm10k_mbx_info *mbx = &hw->mbx;
  403. u32 msg[3], lport_msg;
  404. /* do nothing if we are being asked to create or destroy 0 ports */
  405. if (!count)
  406. return 0;
  407. /* if glort is not valid return error */
  408. if (!fm10k_glort_valid_pf(hw, glort))
  409. return FM10K_ERR_PARAM;
  410. /* construct the lport message from the 2 pieces of data we have */
  411. lport_msg = ((u32)count << 16) | glort;
  412. /* generate lport create/delete message */
  413. fm10k_tlv_msg_init(msg, enable ? FM10K_PF_MSG_ID_LPORT_CREATE :
  414. FM10K_PF_MSG_ID_LPORT_DELETE);
  415. fm10k_tlv_attr_put_u32(msg, FM10K_PF_ATTR_ID_PORT, lport_msg);
  416. /* load onto outgoing mailbox */
  417. return mbx->ops.enqueue_tx(hw, mbx, msg);
  418. }
  419. /**
  420. * fm10k_configure_dglort_map_pf - Configures GLORT entry and queues
  421. * @hw: pointer to hardware structure
  422. * @dglort: pointer to dglort configuration structure
  423. *
  424. * Reads the configuration structure contained in dglort_cfg and uses
  425. * that information to then populate a DGLORTMAP/DEC entry and the queues
  426. * to which it has been assigned.
  427. **/
  428. static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
  429. struct fm10k_dglort_cfg *dglort)
  430. {
  431. u16 glort, queue_count, vsi_count, pc_count;
  432. u16 vsi, queue, pc, q_idx;
  433. u32 txqctl, dglortdec, dglortmap;
  434. /* verify the dglort pointer */
  435. if (!dglort)
  436. return FM10K_ERR_PARAM;
  437. /* verify the dglort values */
  438. if ((dglort->idx > 7) || (dglort->rss_l > 7) || (dglort->pc_l > 3) ||
  439. (dglort->vsi_l > 6) || (dglort->vsi_b > 64) ||
  440. (dglort->queue_l > 8) || (dglort->queue_b >= 256))
  441. return FM10K_ERR_PARAM;
  442. /* determine count of VSIs and queues */
  443. queue_count = 1 << (dglort->rss_l + dglort->pc_l);
  444. vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
  445. glort = dglort->glort;
  446. q_idx = dglort->queue_b;
  447. /* configure SGLORT for queues */
  448. for (vsi = 0; vsi < vsi_count; vsi++, glort++) {
  449. for (queue = 0; queue < queue_count; queue++, q_idx++) {
  450. if (q_idx >= FM10K_MAX_QUEUES)
  451. break;
  452. fm10k_write_reg(hw, FM10K_TX_SGLORT(q_idx), glort);
  453. fm10k_write_reg(hw, FM10K_RX_SGLORT(q_idx), glort);
  454. }
  455. }
  456. /* determine count of PCs and queues */
  457. queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
  458. pc_count = 1 << dglort->pc_l;
  459. /* configure PC for Tx queues */
  460. for (pc = 0; pc < pc_count; pc++) {
  461. q_idx = pc + dglort->queue_b;
  462. for (queue = 0; queue < queue_count; queue++) {
  463. if (q_idx >= FM10K_MAX_QUEUES)
  464. break;
  465. txqctl = fm10k_read_reg(hw, FM10K_TXQCTL(q_idx));
  466. txqctl &= ~FM10K_TXQCTL_PC_MASK;
  467. txqctl |= pc << FM10K_TXQCTL_PC_SHIFT;
  468. fm10k_write_reg(hw, FM10K_TXQCTL(q_idx), txqctl);
  469. q_idx += pc_count;
  470. }
  471. }
  472. /* configure DGLORTDEC */
  473. dglortdec = ((u32)(dglort->rss_l) << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) |
  474. ((u32)(dglort->queue_b) << FM10K_DGLORTDEC_QBASE_SHIFT) |
  475. ((u32)(dglort->pc_l) << FM10K_DGLORTDEC_PCLENGTH_SHIFT) |
  476. ((u32)(dglort->vsi_b) << FM10K_DGLORTDEC_VSIBASE_SHIFT) |
  477. ((u32)(dglort->vsi_l) << FM10K_DGLORTDEC_VSILENGTH_SHIFT) |
  478. ((u32)(dglort->queue_l));
  479. if (dglort->inner_rss)
  480. dglortdec |= FM10K_DGLORTDEC_INNERRSS_ENABLE;
  481. /* configure DGLORTMAP */
  482. dglortmap = (dglort->idx == fm10k_dglort_default) ?
  483. FM10K_DGLORTMAP_ANY : FM10K_DGLORTMAP_ZERO;
  484. dglortmap <<= dglort->vsi_l + dglort->queue_l + dglort->shared_l;
  485. dglortmap |= dglort->glort;
  486. /* write values to hardware */
  487. fm10k_write_reg(hw, FM10K_DGLORTDEC(dglort->idx), dglortdec);
  488. fm10k_write_reg(hw, FM10K_DGLORTMAP(dglort->idx), dglortmap);
  489. return 0;
  490. }
  491. u16 fm10k_queues_per_pool(struct fm10k_hw *hw)
  492. {
  493. u16 num_pools = hw->iov.num_pools;
  494. return (num_pools > 32) ? 2 : (num_pools > 16) ? 4 : (num_pools > 8) ?
  495. 8 : FM10K_MAX_QUEUES_POOL;
  496. }
  497. u16 fm10k_vf_queue_index(struct fm10k_hw *hw, u16 vf_idx)
  498. {
  499. u16 num_vfs = hw->iov.num_vfs;
  500. u16 vf_q_idx = FM10K_MAX_QUEUES;
  501. vf_q_idx -= fm10k_queues_per_pool(hw) * (num_vfs - vf_idx);
  502. return vf_q_idx;
  503. }
  504. static u16 fm10k_vectors_per_pool(struct fm10k_hw *hw)
  505. {
  506. u16 num_pools = hw->iov.num_pools;
  507. return (num_pools > 32) ? 8 : (num_pools > 16) ? 16 :
  508. FM10K_MAX_VECTORS_POOL;
  509. }
  510. static u16 fm10k_vf_vector_index(struct fm10k_hw *hw, u16 vf_idx)
  511. {
  512. u16 vf_v_idx = FM10K_MAX_VECTORS_PF;
  513. vf_v_idx += fm10k_vectors_per_pool(hw) * vf_idx;
  514. return vf_v_idx;
  515. }
  516. /**
  517. * fm10k_iov_assign_resources_pf - Assign pool resources for virtualization
  518. * @hw: pointer to the HW structure
  519. * @num_vfs: number of VFs to be allocated
  520. * @num_pools: number of virtualization pools to be allocated
  521. *
  522. * Allocates queues and traffic classes to virtualization entities to prepare
  523. * the PF for SR-IOV and VMDq
  524. **/
  525. static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
  526. u16 num_pools)
  527. {
  528. u16 qmap_stride, qpp, vpp, vf_q_idx, vf_q_idx0, qmap_idx;
  529. u32 vid = hw->mac.default_vid << FM10K_TXQCTL_VID_SHIFT;
  530. int i, j;
  531. /* hardware only supports up to 64 pools */
  532. if (num_pools > 64)
  533. return FM10K_ERR_PARAM;
  534. /* the number of VFs cannot exceed the number of pools */
  535. if ((num_vfs > num_pools) || (num_vfs > hw->iov.total_vfs))
  536. return FM10K_ERR_PARAM;
  537. /* record number of virtualization entities */
  538. hw->iov.num_vfs = num_vfs;
  539. hw->iov.num_pools = num_pools;
  540. /* determine qmap offsets and counts */
  541. qmap_stride = (num_vfs > 8) ? 32 : 256;
  542. qpp = fm10k_queues_per_pool(hw);
  543. vpp = fm10k_vectors_per_pool(hw);
  544. /* calculate starting index for queues */
  545. vf_q_idx = fm10k_vf_queue_index(hw, 0);
  546. qmap_idx = 0;
  547. /* establish TCs with -1 credits and no quanta to prevent transmit */
  548. for (i = 0; i < num_vfs; i++) {
  549. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(i), 0);
  550. fm10k_write_reg(hw, FM10K_TC_RATE(i), 0);
  551. fm10k_write_reg(hw, FM10K_TC_CREDIT(i),
  552. FM10K_TC_CREDIT_CREDIT_MASK);
  553. }
  554. /* zero out all mbmem registers */
  555. for (i = FM10K_VFMBMEM_LEN * num_vfs; i--;)
  556. fm10k_write_reg(hw, FM10K_MBMEM(i), 0);
  557. /* clear event notification of VF FLR */
  558. fm10k_write_reg(hw, FM10K_PFVFLREC(0), ~0);
  559. fm10k_write_reg(hw, FM10K_PFVFLREC(1), ~0);
  560. /* loop through unallocated rings assigning them back to PF */
  561. for (i = FM10K_MAX_QUEUES_PF; i < vf_q_idx; i++) {
  562. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  563. fm10k_write_reg(hw, FM10K_TXQCTL(i), FM10K_TXQCTL_PF |
  564. FM10K_TXQCTL_UNLIMITED_BW | vid);
  565. fm10k_write_reg(hw, FM10K_RXQCTL(i), FM10K_RXQCTL_PF);
  566. }
  567. /* PF should have already updated VFITR2[0] */
  568. /* update all ITR registers to flow to VFITR2[0] */
  569. for (i = FM10K_ITR_REG_COUNT_PF + 1; i < FM10K_ITR_REG_COUNT; i++) {
  570. if (!(i & (vpp - 1)))
  571. fm10k_write_reg(hw, FM10K_ITR2(i), i - vpp);
  572. else
  573. fm10k_write_reg(hw, FM10K_ITR2(i), i - 1);
  574. }
  575. /* update PF ITR2[0] to reference the last vector */
  576. fm10k_write_reg(hw, FM10K_ITR2(0),
  577. fm10k_vf_vector_index(hw, num_vfs - 1));
  578. /* loop through rings populating rings and TCs */
  579. for (i = 0; i < num_vfs; i++) {
  580. /* record index for VF queue 0 for use in end of loop */
  581. vf_q_idx0 = vf_q_idx;
  582. for (j = 0; j < qpp; j++, qmap_idx++, vf_q_idx++) {
  583. /* assign VF and locked TC to queues */
  584. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  585. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx),
  586. (i << FM10K_TXQCTL_TC_SHIFT) | i |
  587. FM10K_TXQCTL_VF | vid);
  588. fm10k_write_reg(hw, FM10K_RXDCTL(vf_q_idx),
  589. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  590. FM10K_RXDCTL_DROP_ON_EMPTY);
  591. fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
  592. FM10K_RXQCTL_VF |
  593. (i << FM10K_RXQCTL_VF_SHIFT));
  594. /* map queue pair to VF */
  595. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  596. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx);
  597. }
  598. /* repeat the first ring for all of the remaining VF rings */
  599. for (; j < qmap_stride; j++, qmap_idx++) {
  600. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx0);
  601. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), vf_q_idx0);
  602. }
  603. }
  604. /* loop through remaining indexes assigning all to queue 0 */
  605. while (qmap_idx < FM10K_TQMAP_TABLE_SIZE) {
  606. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  607. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx), 0);
  608. qmap_idx++;
  609. }
  610. return 0;
  611. }
  612. /**
  613. * fm10k_iov_configure_tc_pf - Configure the shaping group for VF
  614. * @hw: pointer to the HW structure
  615. * @vf_idx: index of VF receiving GLORT
  616. * @rate: Rate indicated in Mb/s
  617. *
  618. * Configured the TC for a given VF to allow only up to a given number
  619. * of Mb/s of outgoing Tx throughput.
  620. **/
  621. static s32 fm10k_iov_configure_tc_pf(struct fm10k_hw *hw, u16 vf_idx, int rate)
  622. {
  623. /* configure defaults */
  624. u32 interval = FM10K_TC_RATE_INTERVAL_4US_GEN3;
  625. u32 tc_rate = FM10K_TC_RATE_QUANTA_MASK;
  626. /* verify vf is in range */
  627. if (vf_idx >= hw->iov.num_vfs)
  628. return FM10K_ERR_PARAM;
  629. /* set interval to align with 4.096 usec in all modes */
  630. switch (hw->bus.speed) {
  631. case fm10k_bus_speed_2500:
  632. interval = FM10K_TC_RATE_INTERVAL_4US_GEN1;
  633. break;
  634. case fm10k_bus_speed_5000:
  635. interval = FM10K_TC_RATE_INTERVAL_4US_GEN2;
  636. break;
  637. default:
  638. break;
  639. }
  640. if (rate) {
  641. if (rate > FM10K_VF_TC_MAX || rate < FM10K_VF_TC_MIN)
  642. return FM10K_ERR_PARAM;
  643. /* The quanta is measured in Bytes per 4.096 or 8.192 usec
  644. * The rate is provided in Mbits per second
  645. * To tralslate from rate to quanta we need to multiply the
  646. * rate by 8.192 usec and divide by 8 bits/byte. To avoid
  647. * dealing with floating point we can round the values up
  648. * to the nearest whole number ratio which gives us 128 / 125.
  649. */
  650. tc_rate = (rate * 128) / 125;
  651. /* try to keep the rate limiting accurate by increasing
  652. * the number of credits and interval for rates less than 4Gb/s
  653. */
  654. if (rate < 4000)
  655. interval <<= 1;
  656. else
  657. tc_rate >>= 1;
  658. }
  659. /* update rate limiter with new values */
  660. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), tc_rate | interval);
  661. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  662. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx), FM10K_TC_MAXCREDIT_64K);
  663. return 0;
  664. }
  665. /**
  666. * fm10k_iov_assign_int_moderator_pf - Add VF interrupts to moderator list
  667. * @hw: pointer to the HW structure
  668. * @vf_idx: index of VF receiving GLORT
  669. *
  670. * Update the interrupt moderator linked list to include any MSI-X
  671. * interrupts which the VF has enabled in the MSI-X vector table.
  672. **/
  673. static s32 fm10k_iov_assign_int_moderator_pf(struct fm10k_hw *hw, u16 vf_idx)
  674. {
  675. u16 vf_v_idx, vf_v_limit, i;
  676. /* verify vf is in range */
  677. if (vf_idx >= hw->iov.num_vfs)
  678. return FM10K_ERR_PARAM;
  679. /* determine vector offset and count */
  680. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  681. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  682. /* search for first vector that is not masked */
  683. for (i = vf_v_limit - 1; i > vf_v_idx; i--) {
  684. if (!fm10k_read_reg(hw, FM10K_MSIX_VECTOR_MASK(i)))
  685. break;
  686. }
  687. /* reset linked list so it now includes our active vectors */
  688. if (vf_idx == (hw->iov.num_vfs - 1))
  689. fm10k_write_reg(hw, FM10K_ITR2(0), i);
  690. else
  691. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), i);
  692. return 0;
  693. }
  694. /**
  695. * fm10k_iov_assign_default_mac_vlan_pf - Assign a MAC and VLAN to VF
  696. * @hw: pointer to the HW structure
  697. * @vf_info: pointer to VF information structure
  698. *
  699. * Assign a MAC address and default VLAN to a VF and notify it of the update
  700. **/
  701. static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
  702. struct fm10k_vf_info *vf_info)
  703. {
  704. u16 qmap_stride, queues_per_pool, vf_q_idx, timeout, qmap_idx, i;
  705. u32 msg[4], txdctl, txqctl, tdbal = 0, tdbah = 0;
  706. s32 err = 0;
  707. u16 vf_idx, vf_vid;
  708. /* verify vf is in range */
  709. if (!vf_info || vf_info->vf_idx >= hw->iov.num_vfs)
  710. return FM10K_ERR_PARAM;
  711. /* determine qmap offsets and counts */
  712. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  713. queues_per_pool = fm10k_queues_per_pool(hw);
  714. /* calculate starting index for queues */
  715. vf_idx = vf_info->vf_idx;
  716. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  717. qmap_idx = qmap_stride * vf_idx;
  718. /* MAP Tx queue back to 0 temporarily, and disable it */
  719. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
  720. fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
  721. /* determine correct default VLAN ID */
  722. if (vf_info->pf_vid)
  723. vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
  724. else
  725. vf_vid = vf_info->sw_vid;
  726. /* generate MAC_ADDR request */
  727. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_MAC_VLAN);
  728. fm10k_tlv_attr_put_mac_vlan(msg, FM10K_MAC_VLAN_MSG_DEFAULT_MAC,
  729. vf_info->mac, vf_vid);
  730. /* load onto outgoing mailbox, ignore any errors on enqueue */
  731. if (vf_info->mbx.ops.enqueue_tx)
  732. vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  733. /* verify ring has disabled before modifying base address registers */
  734. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  735. for (timeout = 0; txdctl & FM10K_TXDCTL_ENABLE; timeout++) {
  736. /* limit ourselves to a 1ms timeout */
  737. if (timeout == 10) {
  738. err = FM10K_ERR_DMA_PENDING;
  739. goto err_out;
  740. }
  741. usleep_range(100, 200);
  742. txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(vf_q_idx));
  743. }
  744. /* Update base address registers to contain MAC address */
  745. if (is_valid_ether_addr(vf_info->mac)) {
  746. tdbal = (((u32)vf_info->mac[3]) << 24) |
  747. (((u32)vf_info->mac[4]) << 16) |
  748. (((u32)vf_info->mac[5]) << 8);
  749. tdbah = (((u32)0xFF) << 24) |
  750. (((u32)vf_info->mac[0]) << 16) |
  751. (((u32)vf_info->mac[1]) << 8) |
  752. ((u32)vf_info->mac[2]);
  753. }
  754. /* Record the base address into queue 0 */
  755. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx), tdbal);
  756. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx), tdbah);
  757. err_out:
  758. /* configure Queue control register */
  759. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) &
  760. FM10K_TXQCTL_VID_MASK;
  761. txqctl |= (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  762. FM10K_TXQCTL_VF | vf_idx;
  763. /* assign VID */
  764. for (i = 0; i < queues_per_pool; i++)
  765. fm10k_write_reg(hw, FM10K_TXQCTL(vf_q_idx + i), txqctl);
  766. /* restore the queue back to VF ownership */
  767. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
  768. return err;
  769. }
  770. /**
  771. * fm10k_iov_reset_resources_pf - Reassign queues and interrupts to a VF
  772. * @hw: pointer to the HW structure
  773. * @vf_info: pointer to VF information structure
  774. *
  775. * Reassign the interrupts and queues to a VF following an FLR
  776. **/
  777. static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
  778. struct fm10k_vf_info *vf_info)
  779. {
  780. u16 qmap_stride, queues_per_pool, vf_q_idx, qmap_idx;
  781. u32 tdbal = 0, tdbah = 0, txqctl, rxqctl;
  782. u16 vf_v_idx, vf_v_limit, vf_vid;
  783. u8 vf_idx = vf_info->vf_idx;
  784. int i;
  785. /* verify vf is in range */
  786. if (vf_idx >= hw->iov.num_vfs)
  787. return FM10K_ERR_PARAM;
  788. /* clear event notification of VF FLR */
  789. fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
  790. /* force timeout and then disconnect the mailbox */
  791. vf_info->mbx.timeout = 0;
  792. if (vf_info->mbx.ops.disconnect)
  793. vf_info->mbx.ops.disconnect(hw, &vf_info->mbx);
  794. /* determine vector offset and count */
  795. vf_v_idx = fm10k_vf_vector_index(hw, vf_idx);
  796. vf_v_limit = vf_v_idx + fm10k_vectors_per_pool(hw);
  797. /* determine qmap offsets and counts */
  798. qmap_stride = (hw->iov.num_vfs > 8) ? 32 : 256;
  799. queues_per_pool = fm10k_queues_per_pool(hw);
  800. qmap_idx = qmap_stride * vf_idx;
  801. /* make all the queues inaccessible to the VF */
  802. for (i = qmap_idx; i < (qmap_idx + qmap_stride); i++) {
  803. fm10k_write_reg(hw, FM10K_TQMAP(i), 0);
  804. fm10k_write_reg(hw, FM10K_RQMAP(i), 0);
  805. }
  806. /* calculate starting index for queues */
  807. vf_q_idx = fm10k_vf_queue_index(hw, vf_idx);
  808. /* determine correct default VLAN ID */
  809. if (vf_info->pf_vid)
  810. vf_vid = vf_info->pf_vid;
  811. else
  812. vf_vid = vf_info->sw_vid;
  813. /* configure Queue control register */
  814. txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
  815. (vf_idx << FM10K_TXQCTL_TC_SHIFT) |
  816. FM10K_TXQCTL_VF | vf_idx;
  817. rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
  818. /* stop further DMA and reset queue ownership back to VF */
  819. for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
  820. fm10k_write_reg(hw, FM10K_TXDCTL(i), 0);
  821. fm10k_write_reg(hw, FM10K_TXQCTL(i), txqctl);
  822. fm10k_write_reg(hw, FM10K_RXDCTL(i),
  823. FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
  824. FM10K_RXDCTL_DROP_ON_EMPTY);
  825. fm10k_write_reg(hw, FM10K_RXQCTL(i), rxqctl);
  826. }
  827. /* reset TC with -1 credits and no quanta to prevent transmit */
  828. fm10k_write_reg(hw, FM10K_TC_MAXCREDIT(vf_idx), 0);
  829. fm10k_write_reg(hw, FM10K_TC_RATE(vf_idx), 0);
  830. fm10k_write_reg(hw, FM10K_TC_CREDIT(vf_idx),
  831. FM10K_TC_CREDIT_CREDIT_MASK);
  832. /* update our first entry in the table based on previous VF */
  833. if (!vf_idx)
  834. hw->mac.ops.update_int_moderator(hw);
  835. else
  836. hw->iov.ops.assign_int_moderator(hw, vf_idx - 1);
  837. /* reset linked list so it now includes our active vectors */
  838. if (vf_idx == (hw->iov.num_vfs - 1))
  839. fm10k_write_reg(hw, FM10K_ITR2(0), vf_v_idx);
  840. else
  841. fm10k_write_reg(hw, FM10K_ITR2(vf_v_limit), vf_v_idx);
  842. /* link remaining vectors so that next points to previous */
  843. for (vf_v_idx++; vf_v_idx < vf_v_limit; vf_v_idx++)
  844. fm10k_write_reg(hw, FM10K_ITR2(vf_v_idx), vf_v_idx - 1);
  845. /* zero out MBMEM, VLAN_TABLE, RETA, RSSRK, and MRQC registers */
  846. for (i = FM10K_VFMBMEM_LEN; i--;)
  847. fm10k_write_reg(hw, FM10K_MBMEM_VF(vf_idx, i), 0);
  848. for (i = FM10K_VLAN_TABLE_SIZE; i--;)
  849. fm10k_write_reg(hw, FM10K_VLAN_TABLE(vf_info->vsi, i), 0);
  850. for (i = FM10K_RETA_SIZE; i--;)
  851. fm10k_write_reg(hw, FM10K_RETA(vf_info->vsi, i), 0);
  852. for (i = FM10K_RSSRK_SIZE; i--;)
  853. fm10k_write_reg(hw, FM10K_RSSRK(vf_info->vsi, i), 0);
  854. fm10k_write_reg(hw, FM10K_MRQC(vf_info->vsi), 0);
  855. /* Update base address registers to contain MAC address */
  856. if (is_valid_ether_addr(vf_info->mac)) {
  857. tdbal = (((u32)vf_info->mac[3]) << 24) |
  858. (((u32)vf_info->mac[4]) << 16) |
  859. (((u32)vf_info->mac[5]) << 8);
  860. tdbah = (((u32)0xFF) << 24) |
  861. (((u32)vf_info->mac[0]) << 16) |
  862. (((u32)vf_info->mac[1]) << 8) |
  863. ((u32)vf_info->mac[2]);
  864. }
  865. /* map queue pairs back to VF from last to first */
  866. for (i = queues_per_pool; i--;) {
  867. fm10k_write_reg(hw, FM10K_TDBAL(vf_q_idx + i), tdbal);
  868. fm10k_write_reg(hw, FM10K_TDBAH(vf_q_idx + i), tdbah);
  869. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx + i);
  870. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx + i);
  871. }
  872. /* repeat the first ring for all the remaining VF rings */
  873. for (i = queues_per_pool; i < qmap_stride; i++) {
  874. fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx + i), vf_q_idx);
  875. fm10k_write_reg(hw, FM10K_RQMAP(qmap_idx + i), vf_q_idx);
  876. }
  877. return 0;
  878. }
  879. /**
  880. * fm10k_iov_set_lport_pf - Assign and enable a logical port for a given VF
  881. * @hw: pointer to hardware structure
  882. * @vf_info: pointer to VF information structure
  883. * @lport_idx: Logical port offset from the hardware glort
  884. * @flags: Set of capability flags to extend port beyond basic functionality
  885. *
  886. * This function allows enabling a VF port by assigning it a GLORT and
  887. * setting the flags so that it can enable an Rx mode.
  888. **/
  889. static s32 fm10k_iov_set_lport_pf(struct fm10k_hw *hw,
  890. struct fm10k_vf_info *vf_info,
  891. u16 lport_idx, u8 flags)
  892. {
  893. u16 glort = (hw->mac.dglort_map + lport_idx) & FM10K_DGLORTMAP_NONE;
  894. /* if glort is not valid return error */
  895. if (!fm10k_glort_valid_pf(hw, glort))
  896. return FM10K_ERR_PARAM;
  897. vf_info->vf_flags = flags | FM10K_VF_FLAG_NONE_CAPABLE;
  898. vf_info->glort = glort;
  899. return 0;
  900. }
  901. /**
  902. * fm10k_iov_reset_lport_pf - Disable a logical port for a given VF
  903. * @hw: pointer to hardware structure
  904. * @vf_info: pointer to VF information structure
  905. *
  906. * This function disables a VF port by stripping it of a GLORT and
  907. * setting the flags so that it cannot enable any Rx mode.
  908. **/
  909. static void fm10k_iov_reset_lport_pf(struct fm10k_hw *hw,
  910. struct fm10k_vf_info *vf_info)
  911. {
  912. u32 msg[1];
  913. /* need to disable the port if it is already enabled */
  914. if (FM10K_VF_FLAG_ENABLED(vf_info)) {
  915. /* notify switch that this port has been disabled */
  916. fm10k_update_lport_state_pf(hw, vf_info->glort, 1, false);
  917. /* generate port state response to notify VF it is not ready */
  918. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  919. vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  920. }
  921. /* clear flags and glort if it exists */
  922. vf_info->vf_flags = 0;
  923. vf_info->glort = 0;
  924. }
  925. /**
  926. * fm10k_iov_update_stats_pf - Updates hardware related statistics for VFs
  927. * @hw: pointer to hardware structure
  928. * @q: stats for all queues of a VF
  929. * @vf_idx: index of VF
  930. *
  931. * This function collects queue stats for VFs.
  932. **/
  933. static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
  934. struct fm10k_hw_stats_q *q,
  935. u16 vf_idx)
  936. {
  937. u32 idx, qpp;
  938. /* get stats for all of the queues */
  939. qpp = fm10k_queues_per_pool(hw);
  940. idx = fm10k_vf_queue_index(hw, vf_idx);
  941. fm10k_update_hw_stats_q(hw, q, idx, qpp);
  942. }
  943. static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
  944. struct fm10k_vf_info *vf_info,
  945. u64 timestamp)
  946. {
  947. u32 msg[4];
  948. /* generate port state response to notify VF it is not ready */
  949. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
  950. fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
  951. return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
  952. }
  953. /**
  954. * fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
  955. * @hw: Pointer to hardware structure
  956. * @results: Pointer array to message, results[0] is pointer to message
  957. * @mbx: Pointer to mailbox information structure
  958. *
  959. * This function is a default handler for MSI-X requests from the VF. The
  960. * assumption is that in this case it is acceptable to just directly
  961. * hand off the message from the VF to the underlying shared code.
  962. **/
  963. s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
  964. struct fm10k_mbx_info *mbx)
  965. {
  966. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  967. u8 vf_idx = vf_info->vf_idx;
  968. return hw->iov.ops.assign_int_moderator(hw, vf_idx);
  969. }
  970. /**
  971. * fm10k_iov_select_vid - Select correct default VID
  972. * @hw: Pointer to hardware structure
  973. * @vid: VID to correct
  974. *
  975. * Will report an error if VID is out of range. For VID = 0, it will return
  976. * either the pf_vid or sw_vid depending on which one is set.
  977. */
  978. static inline s32 fm10k_iov_select_vid(struct fm10k_vf_info *vf_info, u16 vid)
  979. {
  980. if (!vid)
  981. return vf_info->pf_vid ? vf_info->pf_vid : vf_info->sw_vid;
  982. else if (vf_info->pf_vid && vid != vf_info->pf_vid)
  983. return FM10K_ERR_PARAM;
  984. else
  985. return vid;
  986. }
  987. /**
  988. * fm10k_iov_msg_mac_vlan_pf - Message handler for MAC/VLAN request from VF
  989. * @hw: Pointer to hardware structure
  990. * @results: Pointer array to message, results[0] is pointer to message
  991. * @mbx: Pointer to mailbox information structure
  992. *
  993. * This function is a default handler for MAC/VLAN requests from the VF.
  994. * The assumption is that in this case it is acceptable to just directly
  995. * hand off the message from the VF to the underlying shared code.
  996. **/
  997. s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *hw, u32 **results,
  998. struct fm10k_mbx_info *mbx)
  999. {
  1000. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1001. u8 mac[ETH_ALEN];
  1002. u32 *result;
  1003. int err = 0;
  1004. bool set;
  1005. u16 vlan;
  1006. u32 vid;
  1007. /* we shouldn't be updating rules on a disabled interface */
  1008. if (!FM10K_VF_FLAG_ENABLED(vf_info))
  1009. err = FM10K_ERR_PARAM;
  1010. if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
  1011. result = results[FM10K_MAC_VLAN_MSG_VLAN];
  1012. /* record VLAN id requested */
  1013. err = fm10k_tlv_attr_get_u32(result, &vid);
  1014. if (err)
  1015. return err;
  1016. /* verify upper 16 bits are zero */
  1017. if (vid >> 16)
  1018. return FM10K_ERR_PARAM;
  1019. set = !(vid & FM10K_VLAN_CLEAR);
  1020. vid &= ~FM10K_VLAN_CLEAR;
  1021. err = fm10k_iov_select_vid(vf_info, vid);
  1022. if (err < 0)
  1023. return err;
  1024. else
  1025. vid = err;
  1026. /* update VSI info for VF in regards to VLAN table */
  1027. err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
  1028. }
  1029. if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
  1030. result = results[FM10K_MAC_VLAN_MSG_MAC];
  1031. /* record unicast MAC address requested */
  1032. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1033. if (err)
  1034. return err;
  1035. /* block attempts to set MAC for a locked device */
  1036. if (is_valid_ether_addr(vf_info->mac) &&
  1037. memcmp(mac, vf_info->mac, ETH_ALEN))
  1038. return FM10K_ERR_PARAM;
  1039. set = !(vlan & FM10K_VLAN_CLEAR);
  1040. vlan &= ~FM10K_VLAN_CLEAR;
  1041. err = fm10k_iov_select_vid(vf_info, vlan);
  1042. if (err < 0)
  1043. return err;
  1044. else
  1045. vlan = err;
  1046. /* notify switch of request for new unicast address */
  1047. err = hw->mac.ops.update_uc_addr(hw, vf_info->glort,
  1048. mac, vlan, set, 0);
  1049. }
  1050. if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
  1051. result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
  1052. /* record multicast MAC address requested */
  1053. err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
  1054. if (err)
  1055. return err;
  1056. /* verify that the VF is allowed to request multicast */
  1057. if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
  1058. return FM10K_ERR_PARAM;
  1059. set = !(vlan & FM10K_VLAN_CLEAR);
  1060. vlan &= ~FM10K_VLAN_CLEAR;
  1061. err = fm10k_iov_select_vid(vf_info, vlan);
  1062. if (err < 0)
  1063. return err;
  1064. else
  1065. vlan = err;
  1066. /* notify switch of request for new multicast address */
  1067. err = hw->mac.ops.update_mc_addr(hw, vf_info->glort,
  1068. mac, vlan, set);
  1069. }
  1070. return err;
  1071. }
  1072. /**
  1073. * fm10k_iov_supported_xcast_mode_pf - Determine best match for xcast mode
  1074. * @vf_info: VF info structure containing capability flags
  1075. * @mode: Requested xcast mode
  1076. *
  1077. * This function outputs the mode that most closely matches the requested
  1078. * mode. If not modes match it will request we disable the port
  1079. **/
  1080. static u8 fm10k_iov_supported_xcast_mode_pf(struct fm10k_vf_info *vf_info,
  1081. u8 mode)
  1082. {
  1083. u8 vf_flags = vf_info->vf_flags;
  1084. /* match up mode to capabilities as best as possible */
  1085. switch (mode) {
  1086. case FM10K_XCAST_MODE_PROMISC:
  1087. if (vf_flags & FM10K_VF_FLAG_PROMISC_CAPABLE)
  1088. return FM10K_XCAST_MODE_PROMISC;
  1089. /* fallthough */
  1090. case FM10K_XCAST_MODE_ALLMULTI:
  1091. if (vf_flags & FM10K_VF_FLAG_ALLMULTI_CAPABLE)
  1092. return FM10K_XCAST_MODE_ALLMULTI;
  1093. /* fallthough */
  1094. case FM10K_XCAST_MODE_MULTI:
  1095. if (vf_flags & FM10K_VF_FLAG_MULTI_CAPABLE)
  1096. return FM10K_XCAST_MODE_MULTI;
  1097. /* fallthough */
  1098. case FM10K_XCAST_MODE_NONE:
  1099. if (vf_flags & FM10K_VF_FLAG_NONE_CAPABLE)
  1100. return FM10K_XCAST_MODE_NONE;
  1101. /* fallthough */
  1102. default:
  1103. break;
  1104. }
  1105. /* disable interface as it should not be able to request any */
  1106. return FM10K_XCAST_MODE_DISABLE;
  1107. }
  1108. /**
  1109. * fm10k_iov_msg_lport_state_pf - Message handler for port state requests
  1110. * @hw: Pointer to hardware structure
  1111. * @results: Pointer array to message, results[0] is pointer to message
  1112. * @mbx: Pointer to mailbox information structure
  1113. *
  1114. * This function is a default handler for port state requests. The port
  1115. * state requests for now are basic and consist of enabling or disabling
  1116. * the port.
  1117. **/
  1118. s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
  1119. struct fm10k_mbx_info *mbx)
  1120. {
  1121. struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
  1122. u32 *result;
  1123. s32 err = 0;
  1124. u32 msg[2];
  1125. u8 mode = 0;
  1126. /* verify VF is allowed to enable even minimal mode */
  1127. if (!(vf_info->vf_flags & FM10K_VF_FLAG_NONE_CAPABLE))
  1128. return FM10K_ERR_PARAM;
  1129. if (!!results[FM10K_LPORT_STATE_MSG_XCAST_MODE]) {
  1130. result = results[FM10K_LPORT_STATE_MSG_XCAST_MODE];
  1131. /* XCAST mode update requested */
  1132. err = fm10k_tlv_attr_get_u8(result, &mode);
  1133. if (err)
  1134. return FM10K_ERR_PARAM;
  1135. /* prep for possible demotion depending on capabilities */
  1136. mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
  1137. /* if mode is not currently enabled, enable it */
  1138. if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
  1139. fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
  1140. /* swap mode back to a bit flag */
  1141. mode = FM10K_VF_FLAG_SET_MODE(mode);
  1142. } else if (!results[FM10K_LPORT_STATE_MSG_DISABLE]) {
  1143. /* need to disable the port if it is already enabled */
  1144. if (FM10K_VF_FLAG_ENABLED(vf_info))
  1145. err = fm10k_update_lport_state_pf(hw, vf_info->glort,
  1146. 1, false);
  1147. /* we need to clear VF_FLAG_ENABLED flags in order to ensure
  1148. * that we actually re-enable the LPORT state below. Note that
  1149. * this has no impact if the VF is already disabled, as the
  1150. * flags are already cleared.
  1151. */
  1152. if (!err)
  1153. vf_info->vf_flags = FM10K_VF_FLAG_CAPABLE(vf_info);
  1154. /* when enabling the port we should reset the rate limiters */
  1155. hw->iov.ops.configure_tc(hw, vf_info->vf_idx, vf_info->rate);
  1156. /* set mode for minimal functionality */
  1157. mode = FM10K_VF_FLAG_SET_MODE_NONE;
  1158. /* generate port state response to notify VF it is ready */
  1159. fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_LPORT_STATE);
  1160. fm10k_tlv_attr_put_bool(msg, FM10K_LPORT_STATE_MSG_READY);
  1161. mbx->ops.enqueue_tx(hw, mbx, msg);
  1162. }
  1163. /* if enable state toggled note the update */
  1164. if (!err && (!FM10K_VF_FLAG_ENABLED(vf_info) != !mode))
  1165. err = fm10k_update_lport_state_pf(hw, vf_info->glort, 1,
  1166. !!mode);
  1167. /* if state change succeeded, then update our stored state */
  1168. mode |= FM10K_VF_FLAG_CAPABLE(vf_info);
  1169. if (!err)
  1170. vf_info->vf_flags = mode;
  1171. return err;
  1172. }
  1173. const struct fm10k_msg_data fm10k_iov_msg_data_pf[] = {
  1174. FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
  1175. FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
  1176. FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
  1177. FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
  1178. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
  1179. };
  1180. /**
  1181. * fm10k_update_stats_hw_pf - Updates hardware related statistics of PF
  1182. * @hw: pointer to hardware structure
  1183. * @stats: pointer to the stats structure to update
  1184. *
  1185. * This function collects and aggregates global and per queue hardware
  1186. * statistics.
  1187. **/
  1188. static void fm10k_update_hw_stats_pf(struct fm10k_hw *hw,
  1189. struct fm10k_hw_stats *stats)
  1190. {
  1191. u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
  1192. u32 id, id_prev;
  1193. /* Use Tx queue 0 as a canary to detect a reset */
  1194. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1195. /* Read Global Statistics */
  1196. do {
  1197. timeout = fm10k_read_hw_stats_32b(hw, FM10K_STATS_TIMEOUT,
  1198. &stats->timeout);
  1199. ur = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UR, &stats->ur);
  1200. ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
  1201. um = fm10k_read_hw_stats_32b(hw, FM10K_STATS_UM, &stats->um);
  1202. xec = fm10k_read_hw_stats_32b(hw, FM10K_STATS_XEC, &stats->xec);
  1203. vlan_drop = fm10k_read_hw_stats_32b(hw, FM10K_STATS_VLAN_DROP,
  1204. &stats->vlan_drop);
  1205. loopback_drop = fm10k_read_hw_stats_32b(hw,
  1206. FM10K_STATS_LOOPBACK_DROP,
  1207. &stats->loopback_drop);
  1208. nodesc_drop = fm10k_read_hw_stats_32b(hw,
  1209. FM10K_STATS_NODESC_DROP,
  1210. &stats->nodesc_drop);
  1211. /* if value has not changed then we have consistent data */
  1212. id_prev = id;
  1213. id = fm10k_read_reg(hw, FM10K_TXQCTL(0));
  1214. } while ((id ^ id_prev) & FM10K_TXQCTL_ID_MASK);
  1215. /* drop non-ID bits and set VALID ID bit */
  1216. id &= FM10K_TXQCTL_ID_MASK;
  1217. id |= FM10K_STAT_VALID;
  1218. /* Update Global Statistics */
  1219. if (stats->stats_idx == id) {
  1220. stats->timeout.count += timeout;
  1221. stats->ur.count += ur;
  1222. stats->ca.count += ca;
  1223. stats->um.count += um;
  1224. stats->xec.count += xec;
  1225. stats->vlan_drop.count += vlan_drop;
  1226. stats->loopback_drop.count += loopback_drop;
  1227. stats->nodesc_drop.count += nodesc_drop;
  1228. }
  1229. /* Update bases and record current PF id */
  1230. fm10k_update_hw_base_32b(&stats->timeout, timeout);
  1231. fm10k_update_hw_base_32b(&stats->ur, ur);
  1232. fm10k_update_hw_base_32b(&stats->ca, ca);
  1233. fm10k_update_hw_base_32b(&stats->um, um);
  1234. fm10k_update_hw_base_32b(&stats->xec, xec);
  1235. fm10k_update_hw_base_32b(&stats->vlan_drop, vlan_drop);
  1236. fm10k_update_hw_base_32b(&stats->loopback_drop, loopback_drop);
  1237. fm10k_update_hw_base_32b(&stats->nodesc_drop, nodesc_drop);
  1238. stats->stats_idx = id;
  1239. /* Update Queue Statistics */
  1240. fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues);
  1241. }
  1242. /**
  1243. * fm10k_rebind_hw_stats_pf - Resets base for hardware statistics of PF
  1244. * @hw: pointer to hardware structure
  1245. * @stats: pointer to the stats structure to update
  1246. *
  1247. * This function resets the base for global and per queue hardware
  1248. * statistics.
  1249. **/
  1250. static void fm10k_rebind_hw_stats_pf(struct fm10k_hw *hw,
  1251. struct fm10k_hw_stats *stats)
  1252. {
  1253. /* Unbind Global Statistics */
  1254. fm10k_unbind_hw_stats_32b(&stats->timeout);
  1255. fm10k_unbind_hw_stats_32b(&stats->ur);
  1256. fm10k_unbind_hw_stats_32b(&stats->ca);
  1257. fm10k_unbind_hw_stats_32b(&stats->um);
  1258. fm10k_unbind_hw_stats_32b(&stats->xec);
  1259. fm10k_unbind_hw_stats_32b(&stats->vlan_drop);
  1260. fm10k_unbind_hw_stats_32b(&stats->loopback_drop);
  1261. fm10k_unbind_hw_stats_32b(&stats->nodesc_drop);
  1262. /* Unbind Queue Statistics */
  1263. fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues);
  1264. /* Reinitialize bases for all stats */
  1265. fm10k_update_hw_stats_pf(hw, stats);
  1266. }
  1267. /**
  1268. * fm10k_set_dma_mask_pf - Configures PhyAddrSpace to limit DMA to system
  1269. * @hw: pointer to hardware structure
  1270. * @dma_mask: 64 bit DMA mask required for platform
  1271. *
  1272. * This function sets the PHYADDR.PhyAddrSpace bits for the endpoint in order
  1273. * to limit the access to memory beyond what is physically in the system.
  1274. **/
  1275. static void fm10k_set_dma_mask_pf(struct fm10k_hw *hw, u64 dma_mask)
  1276. {
  1277. /* we need to write the upper 32 bits of DMA mask to PhyAddrSpace */
  1278. u32 phyaddr = (u32)(dma_mask >> 32);
  1279. fm10k_write_reg(hw, FM10K_PHYADDR, phyaddr);
  1280. }
  1281. /**
  1282. * fm10k_get_fault_pf - Record a fault in one of the interface units
  1283. * @hw: pointer to hardware structure
  1284. * @type: pointer to fault type register offset
  1285. * @fault: pointer to memory location to record the fault
  1286. *
  1287. * Record the fault register contents to the fault data structure and
  1288. * clear the entry from the register.
  1289. *
  1290. * Returns ERR_PARAM if invalid register is specified or no error is present.
  1291. **/
  1292. static s32 fm10k_get_fault_pf(struct fm10k_hw *hw, int type,
  1293. struct fm10k_fault *fault)
  1294. {
  1295. u32 func;
  1296. /* verify the fault register is in range and is aligned */
  1297. switch (type) {
  1298. case FM10K_PCA_FAULT:
  1299. case FM10K_THI_FAULT:
  1300. case FM10K_FUM_FAULT:
  1301. break;
  1302. default:
  1303. return FM10K_ERR_PARAM;
  1304. }
  1305. /* only service faults that are valid */
  1306. func = fm10k_read_reg(hw, type + FM10K_FAULT_FUNC);
  1307. if (!(func & FM10K_FAULT_FUNC_VALID))
  1308. return FM10K_ERR_PARAM;
  1309. /* read remaining fields */
  1310. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_HI);
  1311. fault->address <<= 32;
  1312. fault->address = fm10k_read_reg(hw, type + FM10K_FAULT_ADDR_LO);
  1313. fault->specinfo = fm10k_read_reg(hw, type + FM10K_FAULT_SPECINFO);
  1314. /* clear valid bit to allow for next error */
  1315. fm10k_write_reg(hw, type + FM10K_FAULT_FUNC, FM10K_FAULT_FUNC_VALID);
  1316. /* Record which function triggered the error */
  1317. if (func & FM10K_FAULT_FUNC_PF)
  1318. fault->func = 0;
  1319. else
  1320. fault->func = 1 + ((func & FM10K_FAULT_FUNC_VF_MASK) >>
  1321. FM10K_FAULT_FUNC_VF_SHIFT);
  1322. /* record fault type */
  1323. fault->type = func & FM10K_FAULT_FUNC_TYPE_MASK;
  1324. return 0;
  1325. }
  1326. /**
  1327. * fm10k_request_lport_map_pf - Request LPORT map from the switch API
  1328. * @hw: pointer to hardware structure
  1329. *
  1330. **/
  1331. static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
  1332. {
  1333. struct fm10k_mbx_info *mbx = &hw->mbx;
  1334. u32 msg[1];
  1335. /* issue request asking for LPORT map */
  1336. fm10k_tlv_msg_init(msg, FM10K_PF_MSG_ID_LPORT_MAP);
  1337. /* load onto outgoing mailbox */
  1338. return mbx->ops.enqueue_tx(hw, mbx, msg);
  1339. }
  1340. /**
  1341. * fm10k_get_host_state_pf - Returns the state of the switch and mailbox
  1342. * @hw: pointer to hardware structure
  1343. * @switch_ready: pointer to boolean value that will record switch state
  1344. *
  1345. * This funciton will check the DMA_CTRL2 register and mailbox in order
  1346. * to determine if the switch is ready for the PF to begin requesting
  1347. * addresses and mapping traffic to the local interface.
  1348. **/
  1349. static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
  1350. {
  1351. s32 ret_val = 0;
  1352. u32 dma_ctrl2;
  1353. /* verify the switch is ready for interaction */
  1354. dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
  1355. if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
  1356. goto out;
  1357. /* retrieve generic host state info */
  1358. ret_val = fm10k_get_host_state_generic(hw, switch_ready);
  1359. if (ret_val)
  1360. goto out;
  1361. /* interface cannot receive traffic without logical ports */
  1362. if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
  1363. ret_val = fm10k_request_lport_map_pf(hw);
  1364. out:
  1365. return ret_val;
  1366. }
  1367. /* This structure defines the attibutes to be parsed below */
  1368. const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
  1369. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
  1370. FM10K_TLV_ATTR_LAST
  1371. };
  1372. /**
  1373. * fm10k_msg_lport_map_pf - Message handler for lport_map message from SM
  1374. * @hw: Pointer to hardware structure
  1375. * @results: pointer array containing parsed data
  1376. * @mbx: Pointer to mailbox information structure
  1377. *
  1378. * This handler configures the lport mapping based on the reply from the
  1379. * switch API.
  1380. **/
  1381. s32 fm10k_msg_lport_map_pf(struct fm10k_hw *hw, u32 **results,
  1382. struct fm10k_mbx_info *mbx)
  1383. {
  1384. u16 glort, mask;
  1385. u32 dglort_map;
  1386. s32 err;
  1387. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_LPORT_MAP],
  1388. &dglort_map);
  1389. if (err)
  1390. return err;
  1391. /* extract values out of the header */
  1392. glort = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_GLORT);
  1393. mask = FM10K_MSG_HDR_FIELD_GET(dglort_map, LPORT_MAP_MASK);
  1394. /* verify mask is set and none of the masked bits in glort are set */
  1395. if (!mask || (glort & ~mask))
  1396. return FM10K_ERR_PARAM;
  1397. /* verify the mask is contiguous, and that it is 1's followed by 0's */
  1398. if (((~(mask - 1) & mask) + mask) & FM10K_DGLORTMAP_NONE)
  1399. return FM10K_ERR_PARAM;
  1400. /* record the glort, mask, and port count */
  1401. hw->mac.dglort_map = dglort_map;
  1402. return 0;
  1403. }
  1404. const struct fm10k_tlv_attr fm10k_update_pvid_msg_attr[] = {
  1405. FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_UPDATE_PVID),
  1406. FM10K_TLV_ATTR_LAST
  1407. };
  1408. /**
  1409. * fm10k_msg_update_pvid_pf - Message handler for port VLAN message from SM
  1410. * @hw: Pointer to hardware structure
  1411. * @results: pointer array containing parsed data
  1412. * @mbx: Pointer to mailbox information structure
  1413. *
  1414. * This handler configures the default VLAN for the PF
  1415. **/
  1416. s32 fm10k_msg_update_pvid_pf(struct fm10k_hw *hw, u32 **results,
  1417. struct fm10k_mbx_info *mbx)
  1418. {
  1419. u16 glort, pvid;
  1420. u32 pvid_update;
  1421. s32 err;
  1422. err = fm10k_tlv_attr_get_u32(results[FM10K_PF_ATTR_ID_UPDATE_PVID],
  1423. &pvid_update);
  1424. if (err)
  1425. return err;
  1426. /* extract values from the pvid update */
  1427. glort = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_GLORT);
  1428. pvid = FM10K_MSG_HDR_FIELD_GET(pvid_update, UPDATE_PVID_PVID);
  1429. /* if glort is not valid return error */
  1430. if (!fm10k_glort_valid_pf(hw, glort))
  1431. return FM10K_ERR_PARAM;
  1432. /* verify VID is valid */
  1433. if (pvid >= FM10K_VLAN_TABLE_VID_MAX)
  1434. return FM10K_ERR_PARAM;
  1435. /* record the port VLAN ID value */
  1436. hw->mac.default_vid = pvid;
  1437. return 0;
  1438. }
  1439. /**
  1440. * fm10k_record_global_table_data - Move global table data to swapi table info
  1441. * @from: pointer to source table data structure
  1442. * @to: pointer to destination table info structure
  1443. *
  1444. * This function is will copy table_data to the table_info contained in
  1445. * the hw struct.
  1446. **/
  1447. static void fm10k_record_global_table_data(struct fm10k_global_table_data *from,
  1448. struct fm10k_swapi_table_info *to)
  1449. {
  1450. /* convert from le32 struct to CPU byte ordered values */
  1451. to->used = le32_to_cpu(from->used);
  1452. to->avail = le32_to_cpu(from->avail);
  1453. }
  1454. const struct fm10k_tlv_attr fm10k_err_msg_attr[] = {
  1455. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
  1456. sizeof(struct fm10k_swapi_error)),
  1457. FM10K_TLV_ATTR_LAST
  1458. };
  1459. /**
  1460. * fm10k_msg_err_pf - Message handler for error reply
  1461. * @hw: Pointer to hardware structure
  1462. * @results: pointer array containing parsed data
  1463. * @mbx: Pointer to mailbox information structure
  1464. *
  1465. * This handler will capture the data for any error replies to previous
  1466. * messages that the PF has sent.
  1467. **/
  1468. s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
  1469. struct fm10k_mbx_info *mbx)
  1470. {
  1471. struct fm10k_swapi_error err_msg;
  1472. s32 err;
  1473. /* extract structure from message */
  1474. err = fm10k_tlv_attr_get_le_struct(results[FM10K_PF_ATTR_ID_ERR],
  1475. &err_msg, sizeof(err_msg));
  1476. if (err)
  1477. return err;
  1478. /* record table status */
  1479. fm10k_record_global_table_data(&err_msg.mac, &hw->swapi.mac);
  1480. fm10k_record_global_table_data(&err_msg.nexthop, &hw->swapi.nexthop);
  1481. fm10k_record_global_table_data(&err_msg.ffu, &hw->swapi.ffu);
  1482. /* record SW API status value */
  1483. hw->swapi.status = le32_to_cpu(err_msg.status);
  1484. return 0;
  1485. }
  1486. const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
  1487. FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
  1488. sizeof(struct fm10k_swapi_1588_timestamp)),
  1489. FM10K_TLV_ATTR_LAST
  1490. };
  1491. /* currently there is no shared 1588 timestamp handler */
  1492. /**
  1493. * fm10k_adjust_systime_pf - Adjust systime frequency
  1494. * @hw: pointer to hardware structure
  1495. * @ppb: adjustment rate in parts per billion
  1496. *
  1497. * This function will adjust the SYSTIME_CFG register contained in BAR 4
  1498. * if this function is supported for BAR 4 access. The adjustment amount
  1499. * is based on the parts per billion value provided and adjusted to a
  1500. * value based on parts per 2^48 clock cycles.
  1501. *
  1502. * If adjustment is not supported or the requested value is too large
  1503. * we will return an error.
  1504. **/
  1505. static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
  1506. {
  1507. u64 systime_adjust;
  1508. /* if sw_addr is not set we don't have switch register access */
  1509. if (!hw->sw_addr)
  1510. return ppb ? FM10K_ERR_PARAM : 0;
  1511. /* we must convert the value from parts per billion to parts per
  1512. * 2^48 cycles. In addition I have opted to only use the 30 most
  1513. * significant bits of the adjustment value as the 8 least
  1514. * significant bits are located in another register and represent
  1515. * a value significantly less than a part per billion, the result
  1516. * of dropping the 8 least significant bits is that the adjustment
  1517. * value is effectively multiplied by 2^8 when we write it.
  1518. *
  1519. * As a result of all this the math for this breaks down as follows:
  1520. * ppb / 10^9 == adjust * 2^8 / 2^48
  1521. * If we solve this for adjust, and simplify it comes out as:
  1522. * ppb * 2^31 / 5^9 == adjust
  1523. */
  1524. systime_adjust = (ppb < 0) ? -ppb : ppb;
  1525. systime_adjust <<= 31;
  1526. do_div(systime_adjust, 1953125);
  1527. /* verify the requested adjustment value is in range */
  1528. if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
  1529. return FM10K_ERR_PARAM;
  1530. if (ppb > 0)
  1531. systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
  1532. fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
  1533. return 0;
  1534. }
  1535. /**
  1536. * fm10k_read_systime_pf - Reads value of systime registers
  1537. * @hw: pointer to the hardware structure
  1538. *
  1539. * Function reads the content of 2 registers, combined to represent a 64 bit
  1540. * value measured in nanosecods. In order to guarantee the value is accurate
  1541. * we check the 32 most significant bits both before and after reading the
  1542. * 32 least significant bits to verify they didn't change as we were reading
  1543. * the registers.
  1544. **/
  1545. static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
  1546. {
  1547. u32 systime_l, systime_h, systime_tmp;
  1548. systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
  1549. do {
  1550. systime_tmp = systime_h;
  1551. systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
  1552. systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
  1553. } while (systime_tmp != systime_h);
  1554. return ((u64)systime_h << 32) | systime_l;
  1555. }
  1556. static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
  1557. FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
  1558. FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
  1559. FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_msg_lport_map_pf),
  1560. FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
  1561. FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
  1562. FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_msg_update_pvid_pf),
  1563. FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
  1564. };
  1565. static struct fm10k_mac_ops mac_ops_pf = {
  1566. .get_bus_info = &fm10k_get_bus_info_generic,
  1567. .reset_hw = &fm10k_reset_hw_pf,
  1568. .init_hw = &fm10k_init_hw_pf,
  1569. .start_hw = &fm10k_start_hw_generic,
  1570. .stop_hw = &fm10k_stop_hw_generic,
  1571. .update_vlan = &fm10k_update_vlan_pf,
  1572. .read_mac_addr = &fm10k_read_mac_addr_pf,
  1573. .update_uc_addr = &fm10k_update_uc_addr_pf,
  1574. .update_mc_addr = &fm10k_update_mc_addr_pf,
  1575. .update_xcast_mode = &fm10k_update_xcast_mode_pf,
  1576. .update_int_moderator = &fm10k_update_int_moderator_pf,
  1577. .update_lport_state = &fm10k_update_lport_state_pf,
  1578. .update_hw_stats = &fm10k_update_hw_stats_pf,
  1579. .rebind_hw_stats = &fm10k_rebind_hw_stats_pf,
  1580. .configure_dglort_map = &fm10k_configure_dglort_map_pf,
  1581. .set_dma_mask = &fm10k_set_dma_mask_pf,
  1582. .get_fault = &fm10k_get_fault_pf,
  1583. .get_host_state = &fm10k_get_host_state_pf,
  1584. .adjust_systime = &fm10k_adjust_systime_pf,
  1585. .read_systime = &fm10k_read_systime_pf,
  1586. };
  1587. static struct fm10k_iov_ops iov_ops_pf = {
  1588. .assign_resources = &fm10k_iov_assign_resources_pf,
  1589. .configure_tc = &fm10k_iov_configure_tc_pf,
  1590. .assign_int_moderator = &fm10k_iov_assign_int_moderator_pf,
  1591. .assign_default_mac_vlan = fm10k_iov_assign_default_mac_vlan_pf,
  1592. .reset_resources = &fm10k_iov_reset_resources_pf,
  1593. .set_lport = &fm10k_iov_set_lport_pf,
  1594. .reset_lport = &fm10k_iov_reset_lport_pf,
  1595. .update_stats = &fm10k_iov_update_stats_pf,
  1596. .report_timestamp = &fm10k_iov_report_timestamp_pf,
  1597. };
  1598. static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
  1599. {
  1600. fm10k_get_invariants_generic(hw);
  1601. return fm10k_sm_mbx_init(hw, &hw->mbx, fm10k_msg_data_pf);
  1602. }
  1603. struct fm10k_info fm10k_pf_info = {
  1604. .mac = fm10k_mac_pf,
  1605. .get_invariants = &fm10k_get_invariants_pf,
  1606. .mac_ops = &mac_ops_pf,
  1607. .iov_ops = &iov_ops_pf,
  1608. };