cxgb4_dcb.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259
  1. /*
  2. * Copyright (C) 2013-2014 Chelsio Communications. All rights reserved.
  3. *
  4. * Written by Anish Bhatt (anish@chelsio.com)
  5. * Casey Leedom (leedom@chelsio.com)
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms and conditions of the GNU General Public License,
  9. * version 2, as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * The full GNU General Public License is included in this distribution in
  17. * the file called "COPYING".
  18. *
  19. */
  20. #include "cxgb4.h"
  21. /* DCBx version control
  22. */
  23. static const char * const dcb_ver_array[] = {
  24. "Unknown",
  25. "DCBx-CIN",
  26. "DCBx-CEE 1.01",
  27. "DCBx-IEEE",
  28. "", "", "",
  29. "Auto Negotiated"
  30. };
  31. static inline bool cxgb4_dcb_state_synced(enum cxgb4_dcb_state state)
  32. {
  33. if (state == CXGB4_DCB_STATE_FW_ALLSYNCED ||
  34. state == CXGB4_DCB_STATE_HOST)
  35. return true;
  36. else
  37. return false;
  38. }
  39. /* Initialize a port's Data Center Bridging state. Typically used after a
  40. * Link Down event.
  41. */
  42. void cxgb4_dcb_state_init(struct net_device *dev)
  43. {
  44. struct port_info *pi = netdev2pinfo(dev);
  45. struct port_dcb_info *dcb = &pi->dcb;
  46. int version_temp = dcb->dcb_version;
  47. memset(dcb, 0, sizeof(struct port_dcb_info));
  48. dcb->state = CXGB4_DCB_STATE_START;
  49. if (version_temp)
  50. dcb->dcb_version = version_temp;
  51. netdev_dbg(dev, "%s: Initializing DCB state for port[%d]\n",
  52. __func__, pi->port_id);
  53. }
  54. void cxgb4_dcb_version_init(struct net_device *dev)
  55. {
  56. struct port_info *pi = netdev2pinfo(dev);
  57. struct port_dcb_info *dcb = &pi->dcb;
  58. /* Any writes here are only done on kernels that exlicitly need
  59. * a specific version, say < 2.6.38 which only support CEE
  60. */
  61. dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
  62. }
  63. static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
  64. {
  65. struct port_info *pi = netdev2pinfo(dev);
  66. struct adapter *adap = pi->adapter;
  67. struct port_dcb_info *dcb = &pi->dcb;
  68. struct dcb_app app;
  69. int i, err;
  70. /* zero priority implies remove */
  71. app.priority = 0;
  72. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  73. /* Check if app list is exhausted */
  74. if (!dcb->app_priority[i].protocolid)
  75. break;
  76. app.protocol = dcb->app_priority[i].protocolid;
  77. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
  78. app.priority = dcb->app_priority[i].user_prio_map;
  79. app.selector = dcb->app_priority[i].sel_field + 1;
  80. err = dcb_ieee_delapp(dev, &app);
  81. } else {
  82. app.selector = !!(dcb->app_priority[i].sel_field);
  83. err = dcb_setapp(dev, &app);
  84. }
  85. if (err) {
  86. dev_err(adap->pdev_dev,
  87. "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
  88. dcb_ver_array[dcb->dcb_version], app.selector,
  89. app.protocol, -err);
  90. break;
  91. }
  92. }
  93. }
  94. /* Finite State machine for Data Center Bridging.
  95. */
  96. void cxgb4_dcb_state_fsm(struct net_device *dev,
  97. enum cxgb4_dcb_state_input transition_to)
  98. {
  99. struct port_info *pi = netdev2pinfo(dev);
  100. struct port_dcb_info *dcb = &pi->dcb;
  101. struct adapter *adap = pi->adapter;
  102. enum cxgb4_dcb_state current_state = dcb->state;
  103. netdev_dbg(dev, "%s: State change from %d to %d for %s\n",
  104. __func__, dcb->state, transition_to, dev->name);
  105. switch (current_state) {
  106. case CXGB4_DCB_STATE_START: {
  107. switch (transition_to) {
  108. case CXGB4_DCB_INPUT_FW_DISABLED: {
  109. /* we're going to use Host DCB */
  110. dcb->state = CXGB4_DCB_STATE_HOST;
  111. dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
  112. break;
  113. }
  114. case CXGB4_DCB_INPUT_FW_ENABLED: {
  115. /* we're going to use Firmware DCB */
  116. dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
  117. dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
  118. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
  119. dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
  120. else
  121. dcb->supported |= DCB_CAP_DCBX_VER_CEE;
  122. break;
  123. }
  124. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  125. /* expected transition */
  126. break;
  127. }
  128. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  129. dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
  130. break;
  131. }
  132. default:
  133. goto bad_state_input;
  134. }
  135. break;
  136. }
  137. case CXGB4_DCB_STATE_FW_INCOMPLETE: {
  138. switch (transition_to) {
  139. case CXGB4_DCB_INPUT_FW_ENABLED: {
  140. /* we're alreaady in firmware DCB mode */
  141. break;
  142. }
  143. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  144. /* we're already incomplete */
  145. break;
  146. }
  147. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  148. dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
  149. dcb->enabled = 1;
  150. linkwatch_fire_event(dev);
  151. break;
  152. }
  153. default:
  154. goto bad_state_input;
  155. }
  156. break;
  157. }
  158. case CXGB4_DCB_STATE_FW_ALLSYNCED: {
  159. switch (transition_to) {
  160. case CXGB4_DCB_INPUT_FW_ENABLED: {
  161. /* we're alreaady in firmware DCB mode */
  162. break;
  163. }
  164. case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
  165. /* We were successfully running with firmware DCB but
  166. * now it's telling us that it's in an "incomplete
  167. * state. We need to reset back to a ground state
  168. * of incomplete.
  169. */
  170. cxgb4_dcb_cleanup_apps(dev);
  171. cxgb4_dcb_state_init(dev);
  172. dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
  173. dcb->supported = CXGB4_DCBX_FW_SUPPORT;
  174. linkwatch_fire_event(dev);
  175. break;
  176. }
  177. case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
  178. /* we're already all sync'ed
  179. * this is only applicable for IEEE or
  180. * when another VI already completed negotiaton
  181. */
  182. dcb->enabled = 1;
  183. linkwatch_fire_event(dev);
  184. break;
  185. }
  186. default:
  187. goto bad_state_input;
  188. }
  189. break;
  190. }
  191. case CXGB4_DCB_STATE_HOST: {
  192. switch (transition_to) {
  193. case CXGB4_DCB_INPUT_FW_DISABLED: {
  194. /* we're alreaady in Host DCB mode */
  195. break;
  196. }
  197. default:
  198. goto bad_state_input;
  199. }
  200. break;
  201. }
  202. default:
  203. goto bad_state_transition;
  204. }
  205. return;
  206. bad_state_input:
  207. dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n",
  208. transition_to);
  209. return;
  210. bad_state_transition:
  211. dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n",
  212. current_state, transition_to);
  213. }
  214. /* Handle a DCB/DCBX update message from the firmware.
  215. */
  216. void cxgb4_dcb_handle_fw_update(struct adapter *adap,
  217. const struct fw_port_cmd *pcmd)
  218. {
  219. const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
  220. int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
  221. struct net_device *dev = adap->port[port];
  222. struct port_info *pi = netdev_priv(dev);
  223. struct port_dcb_info *dcb = &pi->dcb;
  224. int dcb_type = pcmd->u.dcb.pgid.type;
  225. int dcb_running_version;
  226. /* Handle Firmware DCB Control messages separately since they drive
  227. * our state machine.
  228. */
  229. if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
  230. enum cxgb4_dcb_state_input input =
  231. ((pcmd->u.dcb.control.all_syncd_pkd &
  232. FW_PORT_CMD_ALL_SYNCD_F)
  233. ? CXGB4_DCB_STATE_FW_ALLSYNCED
  234. : CXGB4_DCB_STATE_FW_INCOMPLETE);
  235. if (dcb->dcb_version != FW_PORT_DCB_VER_UNKNOWN) {
  236. dcb_running_version = FW_PORT_CMD_DCB_VERSION_G(
  237. be16_to_cpu(
  238. pcmd->u.dcb.control.dcb_version_to_app_state));
  239. if (dcb_running_version == FW_PORT_DCB_VER_CEE1D01 ||
  240. dcb_running_version == FW_PORT_DCB_VER_IEEE) {
  241. dcb->dcb_version = dcb_running_version;
  242. dev_warn(adap->pdev_dev, "Interface %s is running %s\n",
  243. dev->name,
  244. dcb_ver_array[dcb->dcb_version]);
  245. } else {
  246. dev_warn(adap->pdev_dev,
  247. "Something screwed up, requested firmware for %s, but firmware returned %s instead\n",
  248. dcb_ver_array[dcb->dcb_version],
  249. dcb_ver_array[dcb_running_version]);
  250. dcb->dcb_version = FW_PORT_DCB_VER_UNKNOWN;
  251. }
  252. }
  253. cxgb4_dcb_state_fsm(dev, input);
  254. return;
  255. }
  256. /* It's weird, and almost certainly an error, to get Firmware DCB
  257. * messages when we either haven't been told whether we're going to be
  258. * doing Host or Firmware DCB; and even worse when we've been told
  259. * that we're doing Host DCB!
  260. */
  261. if (dcb->state == CXGB4_DCB_STATE_START ||
  262. dcb->state == CXGB4_DCB_STATE_HOST) {
  263. dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n",
  264. dcb->state);
  265. return;
  266. }
  267. /* Now handle the general Firmware DCB update messages ...
  268. */
  269. switch (dcb_type) {
  270. case FW_PORT_DCB_TYPE_PGID:
  271. dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid);
  272. dcb->msgs |= CXGB4_DCB_FW_PGID;
  273. break;
  274. case FW_PORT_DCB_TYPE_PGRATE:
  275. dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported;
  276. memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate,
  277. sizeof(dcb->pgrate));
  278. memcpy(dcb->tsa, &fwdcb->pgrate.tsa,
  279. sizeof(dcb->tsa));
  280. dcb->msgs |= CXGB4_DCB_FW_PGRATE;
  281. if (dcb->msgs & CXGB4_DCB_FW_PGID)
  282. IEEE_FAUX_SYNC(dev, dcb);
  283. break;
  284. case FW_PORT_DCB_TYPE_PRIORATE:
  285. memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate,
  286. sizeof(dcb->priorate));
  287. dcb->msgs |= CXGB4_DCB_FW_PRIORATE;
  288. break;
  289. case FW_PORT_DCB_TYPE_PFC:
  290. dcb->pfcen = fwdcb->pfc.pfcen;
  291. dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs;
  292. dcb->msgs |= CXGB4_DCB_FW_PFC;
  293. IEEE_FAUX_SYNC(dev, dcb);
  294. break;
  295. case FW_PORT_DCB_TYPE_APP_ID: {
  296. const struct fw_port_app_priority *fwap = &fwdcb->app_priority;
  297. int idx = fwap->idx;
  298. struct app_priority *ap = &dcb->app_priority[idx];
  299. struct dcb_app app = {
  300. .protocol = be16_to_cpu(fwap->protocolid),
  301. };
  302. int err;
  303. /* Convert from firmware format to relevant format
  304. * when using app selector
  305. */
  306. if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
  307. app.selector = (fwap->sel_field + 1);
  308. app.priority = ffs(fwap->user_prio_map) - 1;
  309. err = dcb_ieee_setapp(dev, &app);
  310. IEEE_FAUX_SYNC(dev, dcb);
  311. } else {
  312. /* Default is CEE */
  313. app.selector = !!(fwap->sel_field);
  314. app.priority = fwap->user_prio_map;
  315. err = dcb_setapp(dev, &app);
  316. }
  317. if (err)
  318. dev_err(adap->pdev_dev,
  319. "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n",
  320. app.selector, app.protocol, app.priority, -err);
  321. ap->user_prio_map = fwap->user_prio_map;
  322. ap->sel_field = fwap->sel_field;
  323. ap->protocolid = be16_to_cpu(fwap->protocolid);
  324. dcb->msgs |= CXGB4_DCB_FW_APP_ID;
  325. break;
  326. }
  327. default:
  328. dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n",
  329. dcb_type);
  330. break;
  331. }
  332. }
  333. /* Data Center Bridging netlink operations.
  334. */
  335. /* Get current DCB enabled/disabled state.
  336. */
  337. static u8 cxgb4_getstate(struct net_device *dev)
  338. {
  339. struct port_info *pi = netdev2pinfo(dev);
  340. return pi->dcb.enabled;
  341. }
  342. /* Set DCB enabled/disabled.
  343. */
  344. static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
  345. {
  346. struct port_info *pi = netdev2pinfo(dev);
  347. /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
  348. if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
  349. pi->dcb.enabled = enabled;
  350. return 0;
  351. }
  352. /* Firmware doesn't provide any mechanism to control the DCB state.
  353. */
  354. if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
  355. return 1;
  356. return 0;
  357. }
  358. static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
  359. u8 *prio_type, u8 *pgid, u8 *bw_per,
  360. u8 *up_tc_map, int local)
  361. {
  362. struct fw_port_cmd pcmd;
  363. struct port_info *pi = netdev2pinfo(dev);
  364. struct adapter *adap = pi->adapter;
  365. int err;
  366. *prio_type = *pgid = *bw_per = *up_tc_map = 0;
  367. if (local)
  368. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  369. else
  370. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  371. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  372. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  373. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  374. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  375. return;
  376. }
  377. *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
  378. if (local)
  379. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  380. else
  381. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  382. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  383. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  384. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  385. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  386. -err);
  387. return;
  388. }
  389. *bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid];
  390. *up_tc_map = (1 << tc);
  391. /* prio_type is link strict */
  392. if (*pgid != 0xF)
  393. *prio_type = 0x2;
  394. }
  395. static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
  396. u8 *prio_type, u8 *pgid, u8 *bw_per,
  397. u8 *up_tc_map)
  398. {
  399. /* tc 0 is written at MSB position */
  400. return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
  401. up_tc_map, 1);
  402. }
  403. static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
  404. u8 *prio_type, u8 *pgid, u8 *bw_per,
  405. u8 *up_tc_map)
  406. {
  407. /* tc 0 is written at MSB position */
  408. return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
  409. up_tc_map, 0);
  410. }
  411. static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
  412. u8 prio_type, u8 pgid, u8 bw_per,
  413. u8 up_tc_map)
  414. {
  415. struct fw_port_cmd pcmd;
  416. struct port_info *pi = netdev2pinfo(dev);
  417. struct adapter *adap = pi->adapter;
  418. int fw_tc = 7 - tc;
  419. u32 _pgid;
  420. int err;
  421. if (pgid == DCB_ATTR_VALUE_UNDEFINED)
  422. return;
  423. if (bw_per == DCB_ATTR_VALUE_UNDEFINED)
  424. return;
  425. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  426. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  427. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  428. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  429. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  430. return;
  431. }
  432. _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  433. _pgid &= ~(0xF << (fw_tc * 4));
  434. _pgid |= pgid << (fw_tc * 4);
  435. pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
  436. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  437. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  438. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  439. dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n",
  440. -err);
  441. return;
  442. }
  443. memset(&pcmd, 0, sizeof(struct fw_port_cmd));
  444. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  445. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  446. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  447. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  448. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  449. -err);
  450. return;
  451. }
  452. pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
  453. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  454. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  455. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  456. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  457. if (err != FW_PORT_DCB_CFG_SUCCESS)
  458. dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
  459. -err);
  460. }
  461. static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per,
  462. int local)
  463. {
  464. struct fw_port_cmd pcmd;
  465. struct port_info *pi = netdev2pinfo(dev);
  466. struct adapter *adap = pi->adapter;
  467. int err;
  468. if (local)
  469. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  470. else
  471. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  472. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  473. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  474. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  475. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  476. -err);
  477. return;
  478. }
  479. *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid];
  480. }
  481. static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per)
  482. {
  483. return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1);
  484. }
  485. static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per)
  486. {
  487. return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0);
  488. }
  489. static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
  490. u8 bw_per)
  491. {
  492. struct fw_port_cmd pcmd;
  493. struct port_info *pi = netdev2pinfo(dev);
  494. struct adapter *adap = pi->adapter;
  495. int err;
  496. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  497. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  498. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  499. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  500. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  501. -err);
  502. return;
  503. }
  504. pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
  505. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  506. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  507. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  508. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  509. if (err != FW_PORT_DCB_CFG_SUCCESS)
  510. dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
  511. -err);
  512. }
  513. /* Return whether the specified Traffic Class Priority has Priority Pause
  514. * Frames enabled.
  515. */
  516. static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
  517. {
  518. struct port_info *pi = netdev2pinfo(dev);
  519. struct port_dcb_info *dcb = &pi->dcb;
  520. if (!cxgb4_dcb_state_synced(dcb->state) ||
  521. priority >= CXGB4_MAX_PRIORITY)
  522. *pfccfg = 0;
  523. else
  524. *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
  525. }
  526. /* Enable/disable Priority Pause Frames for the specified Traffic Class
  527. * Priority.
  528. */
  529. static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
  530. {
  531. struct fw_port_cmd pcmd;
  532. struct port_info *pi = netdev2pinfo(dev);
  533. struct adapter *adap = pi->adapter;
  534. int err;
  535. if (!cxgb4_dcb_state_synced(pi->dcb.state) ||
  536. priority >= CXGB4_MAX_PRIORITY)
  537. return;
  538. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  539. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  540. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  541. pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
  542. pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
  543. if (pfccfg)
  544. pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
  545. else
  546. pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
  547. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  548. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  549. dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err);
  550. return;
  551. }
  552. pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen;
  553. }
  554. static u8 cxgb4_setall(struct net_device *dev)
  555. {
  556. return 0;
  557. }
  558. /* Return DCB capabilities.
  559. */
  560. static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps)
  561. {
  562. struct port_info *pi = netdev2pinfo(dev);
  563. switch (cap_id) {
  564. case DCB_CAP_ATTR_PG:
  565. case DCB_CAP_ATTR_PFC:
  566. *caps = true;
  567. break;
  568. case DCB_CAP_ATTR_PG_TCS:
  569. /* 8 priorities for PG represented by bitmap */
  570. *caps = 0x80;
  571. break;
  572. case DCB_CAP_ATTR_PFC_TCS:
  573. /* 8 priorities for PFC represented by bitmap */
  574. *caps = 0x80;
  575. break;
  576. case DCB_CAP_ATTR_GSP:
  577. *caps = true;
  578. break;
  579. case DCB_CAP_ATTR_UP2TC:
  580. case DCB_CAP_ATTR_BCN:
  581. *caps = false;
  582. break;
  583. case DCB_CAP_ATTR_DCBX:
  584. *caps = pi->dcb.supported;
  585. break;
  586. default:
  587. *caps = false;
  588. }
  589. return 0;
  590. }
  591. /* Return the number of Traffic Classes for the indicated Traffic Class ID.
  592. */
  593. static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num)
  594. {
  595. struct port_info *pi = netdev2pinfo(dev);
  596. switch (tcs_id) {
  597. case DCB_NUMTCS_ATTR_PG:
  598. if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE)
  599. *num = pi->dcb.pg_num_tcs_supported;
  600. else
  601. *num = 0x8;
  602. break;
  603. case DCB_NUMTCS_ATTR_PFC:
  604. *num = 0x8;
  605. break;
  606. default:
  607. return -EINVAL;
  608. }
  609. return 0;
  610. }
  611. /* Set the number of Traffic Classes supported for the indicated Traffic Class
  612. * ID.
  613. */
  614. static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num)
  615. {
  616. /* Setting the number of Traffic Classes isn't supported.
  617. */
  618. return -ENOSYS;
  619. }
  620. /* Return whether Priority Flow Control is enabled. */
  621. static u8 cxgb4_getpfcstate(struct net_device *dev)
  622. {
  623. struct port_info *pi = netdev2pinfo(dev);
  624. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  625. return false;
  626. return pi->dcb.pfcen != 0;
  627. }
  628. /* Enable/disable Priority Flow Control. */
  629. static void cxgb4_setpfcstate(struct net_device *dev, u8 state)
  630. {
  631. /* We can't enable/disable Priority Flow Control but we also can't
  632. * return an error ...
  633. */
  634. }
  635. /* Return the Application User Priority Map associated with the specified
  636. * Application ID.
  637. */
  638. static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  639. int peer)
  640. {
  641. struct port_info *pi = netdev2pinfo(dev);
  642. struct adapter *adap = pi->adapter;
  643. int i;
  644. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  645. return 0;
  646. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  647. struct fw_port_cmd pcmd;
  648. int err;
  649. if (peer)
  650. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  651. else
  652. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  653. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  654. pcmd.u.dcb.app_priority.idx = i;
  655. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  656. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  657. dev_err(adap->pdev_dev, "DCB APP read failed with %d\n",
  658. -err);
  659. return err;
  660. }
  661. if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id)
  662. if (pcmd.u.dcb.app_priority.sel_field == app_idtype)
  663. return pcmd.u.dcb.app_priority.user_prio_map;
  664. /* exhausted app list */
  665. if (!pcmd.u.dcb.app_priority.protocolid)
  666. break;
  667. }
  668. return -EEXIST;
  669. }
  670. /* Return the Application User Priority Map associated with the specified
  671. * Application ID.
  672. */
  673. static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
  674. {
  675. /* Convert app_idtype to firmware format before querying */
  676. return __cxgb4_getapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
  677. app_idtype : 3, app_id, 0);
  678. }
  679. /* Write a new Application User Priority Map for the specified Application ID
  680. */
  681. static int __cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  682. u8 app_prio)
  683. {
  684. struct fw_port_cmd pcmd;
  685. struct port_info *pi = netdev2pinfo(dev);
  686. struct adapter *adap = pi->adapter;
  687. int i, err;
  688. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  689. return -EINVAL;
  690. /* DCB info gets thrown away on link up */
  691. if (!netif_carrier_ok(dev))
  692. return -ENOLINK;
  693. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  694. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  695. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  696. pcmd.u.dcb.app_priority.idx = i;
  697. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  698. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  699. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  700. -err);
  701. return err;
  702. }
  703. if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) {
  704. /* overwrite existing app table */
  705. pcmd.u.dcb.app_priority.protocolid = 0;
  706. break;
  707. }
  708. /* find first empty slot */
  709. if (!pcmd.u.dcb.app_priority.protocolid)
  710. break;
  711. }
  712. if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) {
  713. /* no empty slots available */
  714. dev_err(adap->pdev_dev, "DCB app table full\n");
  715. return -EBUSY;
  716. }
  717. /* write out new app table entry */
  718. INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
  719. if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
  720. pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY_F);
  721. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  722. pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
  723. pcmd.u.dcb.app_priority.sel_field = app_idtype;
  724. pcmd.u.dcb.app_priority.user_prio_map = app_prio;
  725. pcmd.u.dcb.app_priority.idx = i;
  726. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  727. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  728. dev_err(adap->pdev_dev, "DCB app table write failed with %d\n",
  729. -err);
  730. return err;
  731. }
  732. return 0;
  733. }
  734. /* Priority for CEE inside dcb_app is bitmask, with 0 being an invalid value */
  735. static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
  736. u8 app_prio)
  737. {
  738. int ret;
  739. struct dcb_app app = {
  740. .selector = app_idtype,
  741. .protocol = app_id,
  742. .priority = app_prio,
  743. };
  744. if (app_idtype != DCB_APP_IDTYPE_ETHTYPE &&
  745. app_idtype != DCB_APP_IDTYPE_PORTNUM)
  746. return -EINVAL;
  747. /* Convert app_idtype to a format that firmware understands */
  748. ret = __cxgb4_setapp(dev, app_idtype == DCB_APP_IDTYPE_ETHTYPE ?
  749. app_idtype : 3, app_id, app_prio);
  750. if (ret)
  751. return ret;
  752. return dcb_setapp(dev, &app);
  753. }
  754. /* Return whether IEEE Data Center Bridging has been negotiated.
  755. */
  756. static inline int
  757. cxgb4_ieee_negotiation_complete(struct net_device *dev,
  758. enum cxgb4_dcb_fw_msgs dcb_subtype)
  759. {
  760. struct port_info *pi = netdev2pinfo(dev);
  761. struct port_dcb_info *dcb = &pi->dcb;
  762. if (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED)
  763. if (dcb_subtype && !(dcb->msgs & dcb_subtype))
  764. return 0;
  765. return (cxgb4_dcb_state_synced(dcb->state) &&
  766. (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
  767. }
  768. static int cxgb4_ieee_read_ets(struct net_device *dev, struct ieee_ets *ets,
  769. int local)
  770. {
  771. struct port_info *pi = netdev2pinfo(dev);
  772. struct port_dcb_info *dcb = &pi->dcb;
  773. struct adapter *adap = pi->adapter;
  774. uint32_t tc_info;
  775. struct fw_port_cmd pcmd;
  776. int i, bwg, err;
  777. if (!(dcb->msgs & (CXGB4_DCB_FW_PGID | CXGB4_DCB_FW_PGRATE)))
  778. return 0;
  779. ets->ets_cap = dcb->pg_num_tcs_supported;
  780. if (local) {
  781. ets->willing = 1;
  782. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  783. } else {
  784. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  785. }
  786. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  787. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  788. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  789. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  790. return err;
  791. }
  792. tc_info = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  793. if (local)
  794. INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
  795. else
  796. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  797. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  798. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  799. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  800. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  801. -err);
  802. return err;
  803. }
  804. for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
  805. bwg = (tc_info >> ((7 - i) * 4)) & 0xF;
  806. ets->prio_tc[i] = bwg;
  807. ets->tc_tx_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
  808. ets->tc_rx_bw[i] = ets->tc_tx_bw[i];
  809. ets->tc_tsa[i] = pcmd.u.dcb.pgrate.tsa[i];
  810. }
  811. return 0;
  812. }
  813. static int cxgb4_ieee_get_ets(struct net_device *dev, struct ieee_ets *ets)
  814. {
  815. return cxgb4_ieee_read_ets(dev, ets, 1);
  816. }
  817. /* We reuse this for peer PFC as well, as we can't have it enabled one way */
  818. static int cxgb4_ieee_get_pfc(struct net_device *dev, struct ieee_pfc *pfc)
  819. {
  820. struct port_info *pi = netdev2pinfo(dev);
  821. struct port_dcb_info *dcb = &pi->dcb;
  822. memset(pfc, 0, sizeof(struct ieee_pfc));
  823. if (!(dcb->msgs & CXGB4_DCB_FW_PFC))
  824. return 0;
  825. pfc->pfc_cap = dcb->pfc_num_tcs_supported;
  826. pfc->pfc_en = bitswap_1(dcb->pfcen);
  827. return 0;
  828. }
  829. static int cxgb4_ieee_peer_ets(struct net_device *dev, struct ieee_ets *ets)
  830. {
  831. return cxgb4_ieee_read_ets(dev, ets, 0);
  832. }
  833. /* Fill in the Application User Priority Map associated with the
  834. * specified Application.
  835. * Priority for IEEE dcb_app is an integer, with 0 being a valid value
  836. */
  837. static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
  838. {
  839. int prio;
  840. if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
  841. return -EINVAL;
  842. if (!(app->selector && app->protocol))
  843. return -EINVAL;
  844. /* Try querying firmware first, use firmware format */
  845. prio = __cxgb4_getapp(dev, app->selector - 1, app->protocol, 0);
  846. if (prio < 0)
  847. prio = dcb_ieee_getapp_mask(dev, app);
  848. app->priority = ffs(prio) - 1;
  849. return 0;
  850. }
  851. /* Write a new Application User Priority Map for the specified Application ID.
  852. * Priority for IEEE dcb_app is an integer, with 0 being a valid value
  853. */
  854. static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
  855. {
  856. int ret;
  857. if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
  858. return -EINVAL;
  859. if (!(app->selector && app->protocol))
  860. return -EINVAL;
  861. if (!(app->selector > IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  862. app->selector < IEEE_8021QAZ_APP_SEL_ANY))
  863. return -EINVAL;
  864. /* change selector to a format that firmware understands */
  865. ret = __cxgb4_setapp(dev, app->selector - 1, app->protocol,
  866. (1 << app->priority));
  867. if (ret)
  868. return ret;
  869. return dcb_ieee_setapp(dev, app);
  870. }
  871. /* Return our DCBX parameters.
  872. */
  873. static u8 cxgb4_getdcbx(struct net_device *dev)
  874. {
  875. struct port_info *pi = netdev2pinfo(dev);
  876. /* This is already set by cxgb4_set_dcb_caps, so just return it */
  877. return pi->dcb.supported;
  878. }
  879. /* Set our DCBX parameters.
  880. */
  881. static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
  882. {
  883. struct port_info *pi = netdev2pinfo(dev);
  884. /* Filter out requests which exceed our capabilities.
  885. */
  886. if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT))
  887. != dcb_request)
  888. return 1;
  889. /* Can't enable DCB if we haven't successfully negotiated it.
  890. */
  891. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  892. return 1;
  893. /* There's currently no mechanism to allow for the firmware DCBX
  894. * negotiation to be changed from the Host Driver. If the caller
  895. * requests exactly the same parameters that we already have then
  896. * we'll allow them to be successfully "set" ...
  897. */
  898. if (dcb_request != pi->dcb.supported)
  899. return 1;
  900. pi->dcb.supported = dcb_request;
  901. return 0;
  902. }
  903. static int cxgb4_getpeer_app(struct net_device *dev,
  904. struct dcb_peer_app_info *info, u16 *app_count)
  905. {
  906. struct fw_port_cmd pcmd;
  907. struct port_info *pi = netdev2pinfo(dev);
  908. struct adapter *adap = pi->adapter;
  909. int i, err = 0;
  910. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  911. return 1;
  912. info->willing = 0;
  913. info->error = 0;
  914. *app_count = 0;
  915. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  916. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  917. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  918. pcmd.u.dcb.app_priority.idx = *app_count;
  919. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  920. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  921. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  922. -err);
  923. return err;
  924. }
  925. /* find first empty slot */
  926. if (!pcmd.u.dcb.app_priority.protocolid)
  927. break;
  928. }
  929. *app_count = i;
  930. return err;
  931. }
  932. static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
  933. {
  934. struct fw_port_cmd pcmd;
  935. struct port_info *pi = netdev2pinfo(dev);
  936. struct adapter *adap = pi->adapter;
  937. int i, err = 0;
  938. if (!cxgb4_dcb_state_synced(pi->dcb.state))
  939. return 1;
  940. for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
  941. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  942. pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
  943. pcmd.u.dcb.app_priority.idx = i;
  944. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  945. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  946. dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
  947. -err);
  948. return err;
  949. }
  950. /* find first empty slot */
  951. if (!pcmd.u.dcb.app_priority.protocolid)
  952. break;
  953. table[i].selector = (pcmd.u.dcb.app_priority.sel_field + 1);
  954. table[i].protocol =
  955. be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
  956. table[i].priority =
  957. ffs(pcmd.u.dcb.app_priority.user_prio_map) - 1;
  958. }
  959. return err;
  960. }
  961. /* Return Priority Group information.
  962. */
  963. static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
  964. {
  965. struct fw_port_cmd pcmd;
  966. struct port_info *pi = netdev2pinfo(dev);
  967. struct adapter *adap = pi->adapter;
  968. u32 pgid;
  969. int i, err;
  970. /* We're always "willing" -- the Switch Fabric always dictates the
  971. * DCBX parameters to us.
  972. */
  973. pg->willing = true;
  974. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  975. pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
  976. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  977. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  978. dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
  979. return err;
  980. }
  981. pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
  982. for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
  983. pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
  984. INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
  985. pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
  986. err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
  987. if (err != FW_PORT_DCB_CFG_SUCCESS) {
  988. dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
  989. -err);
  990. return err;
  991. }
  992. for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
  993. pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
  994. pg->tcs_supported = pcmd.u.dcb.pgrate.num_tcs_supported;
  995. return 0;
  996. }
  997. /* Return Priority Flow Control information.
  998. */
  999. static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
  1000. {
  1001. struct port_info *pi = netdev2pinfo(dev);
  1002. cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
  1003. /* Firmware sends this to us in a formwat that is a bit flipped version
  1004. * of spec, correct it before we send it to host. This is taken care of
  1005. * by bit shifting in other uses of pfcen
  1006. */
  1007. pfc->pfc_en = bitswap_1(pi->dcb.pfcen);
  1008. pfc->tcs_supported = pi->dcb.pfc_num_tcs_supported;
  1009. return 0;
  1010. }
  1011. const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
  1012. .ieee_getets = cxgb4_ieee_get_ets,
  1013. .ieee_getpfc = cxgb4_ieee_get_pfc,
  1014. .ieee_getapp = cxgb4_ieee_getapp,
  1015. .ieee_setapp = cxgb4_ieee_setapp,
  1016. .ieee_peer_getets = cxgb4_ieee_peer_ets,
  1017. .ieee_peer_getpfc = cxgb4_ieee_get_pfc,
  1018. /* CEE std */
  1019. .getstate = cxgb4_getstate,
  1020. .setstate = cxgb4_setstate,
  1021. .getpgtccfgtx = cxgb4_getpgtccfg_tx,
  1022. .getpgbwgcfgtx = cxgb4_getpgbwgcfg_tx,
  1023. .getpgtccfgrx = cxgb4_getpgtccfg_rx,
  1024. .getpgbwgcfgrx = cxgb4_getpgbwgcfg_rx,
  1025. .setpgtccfgtx = cxgb4_setpgtccfg_tx,
  1026. .setpgbwgcfgtx = cxgb4_setpgbwgcfg_tx,
  1027. .setpfccfg = cxgb4_setpfccfg,
  1028. .getpfccfg = cxgb4_getpfccfg,
  1029. .setall = cxgb4_setall,
  1030. .getcap = cxgb4_getcap,
  1031. .getnumtcs = cxgb4_getnumtcs,
  1032. .setnumtcs = cxgb4_setnumtcs,
  1033. .getpfcstate = cxgb4_getpfcstate,
  1034. .setpfcstate = cxgb4_setpfcstate,
  1035. .getapp = cxgb4_getapp,
  1036. .setapp = cxgb4_setapp,
  1037. /* DCBX configuration */
  1038. .getdcbx = cxgb4_getdcbx,
  1039. .setdcbx = cxgb4_setdcbx,
  1040. /* peer apps */
  1041. .peer_getappinfo = cxgb4_getpeer_app,
  1042. .peer_getapptable = cxgb4_getpeerapp_tbl,
  1043. /* CEE peer */
  1044. .cee_peer_getpg = cxgb4_cee_peer_getpg,
  1045. .cee_peer_getpfc = cxgb4_cee_peer_getpfc,
  1046. };