en_ethtool.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/kernel.h>
  34. #include <linux/ethtool.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/mlx4/driver.h>
  37. #include <linux/mlx4/device.h>
  38. #include <linux/in.h>
  39. #include <net/ip.h>
  40. #include <linux/bitmap.h>
  41. #include "mlx4_en.h"
  42. #include "en_port.h"
  43. #define EN_ETHTOOL_QP_ATTACH (1ull << 63)
  44. #define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff)
  45. #define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff)
  46. static int mlx4_en_moderation_update(struct mlx4_en_priv *priv)
  47. {
  48. int i;
  49. int err = 0;
  50. for (i = 0; i < priv->tx_ring_num; i++) {
  51. priv->tx_cq[i]->moder_cnt = priv->tx_frames;
  52. priv->tx_cq[i]->moder_time = priv->tx_usecs;
  53. if (priv->port_up) {
  54. err = mlx4_en_set_cq_moder(priv, priv->tx_cq[i]);
  55. if (err)
  56. return err;
  57. }
  58. }
  59. if (priv->adaptive_rx_coal)
  60. return 0;
  61. for (i = 0; i < priv->rx_ring_num; i++) {
  62. priv->rx_cq[i]->moder_cnt = priv->rx_frames;
  63. priv->rx_cq[i]->moder_time = priv->rx_usecs;
  64. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  65. if (priv->port_up) {
  66. err = mlx4_en_set_cq_moder(priv, priv->rx_cq[i]);
  67. if (err)
  68. return err;
  69. }
  70. }
  71. return err;
  72. }
  73. static void
  74. mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
  75. {
  76. struct mlx4_en_priv *priv = netdev_priv(dev);
  77. struct mlx4_en_dev *mdev = priv->mdev;
  78. strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
  79. strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
  80. sizeof(drvinfo->version));
  81. snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
  82. "%d.%d.%d",
  83. (u16) (mdev->dev->caps.fw_ver >> 32),
  84. (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
  85. (u16) (mdev->dev->caps.fw_ver & 0xffff));
  86. strlcpy(drvinfo->bus_info, pci_name(mdev->dev->persist->pdev),
  87. sizeof(drvinfo->bus_info));
  88. }
  89. static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
  90. "blueflame",
  91. "phv-bit"
  92. };
  93. static const char main_strings[][ETH_GSTRING_LEN] = {
  94. /* main statistics */
  95. "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
  96. "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
  97. "rx_length_errors", "rx_over_errors", "rx_crc_errors",
  98. "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
  99. "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
  100. "tx_heartbeat_errors", "tx_window_errors",
  101. /* port statistics */
  102. "tso_packets",
  103. "xmit_more",
  104. "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
  105. "rx_csum_good", "rx_csum_none", "rx_csum_complete", "tx_chksum_offload",
  106. /* pf statistics */
  107. "pf_rx_packets",
  108. "pf_rx_bytes",
  109. "pf_tx_packets",
  110. "pf_tx_bytes",
  111. /* priority flow control statistics rx */
  112. "rx_pause_prio_0", "rx_pause_duration_prio_0",
  113. "rx_pause_transition_prio_0",
  114. "rx_pause_prio_1", "rx_pause_duration_prio_1",
  115. "rx_pause_transition_prio_1",
  116. "rx_pause_prio_2", "rx_pause_duration_prio_2",
  117. "rx_pause_transition_prio_2",
  118. "rx_pause_prio_3", "rx_pause_duration_prio_3",
  119. "rx_pause_transition_prio_3",
  120. "rx_pause_prio_4", "rx_pause_duration_prio_4",
  121. "rx_pause_transition_prio_4",
  122. "rx_pause_prio_5", "rx_pause_duration_prio_5",
  123. "rx_pause_transition_prio_5",
  124. "rx_pause_prio_6", "rx_pause_duration_prio_6",
  125. "rx_pause_transition_prio_6",
  126. "rx_pause_prio_7", "rx_pause_duration_prio_7",
  127. "rx_pause_transition_prio_7",
  128. /* flow control statistics rx */
  129. "rx_pause", "rx_pause_duration", "rx_pause_transition",
  130. /* priority flow control statistics tx */
  131. "tx_pause_prio_0", "tx_pause_duration_prio_0",
  132. "tx_pause_transition_prio_0",
  133. "tx_pause_prio_1", "tx_pause_duration_prio_1",
  134. "tx_pause_transition_prio_1",
  135. "tx_pause_prio_2", "tx_pause_duration_prio_2",
  136. "tx_pause_transition_prio_2",
  137. "tx_pause_prio_3", "tx_pause_duration_prio_3",
  138. "tx_pause_transition_prio_3",
  139. "tx_pause_prio_4", "tx_pause_duration_prio_4",
  140. "tx_pause_transition_prio_4",
  141. "tx_pause_prio_5", "tx_pause_duration_prio_5",
  142. "tx_pause_transition_prio_5",
  143. "tx_pause_prio_6", "tx_pause_duration_prio_6",
  144. "tx_pause_transition_prio_6",
  145. "tx_pause_prio_7", "tx_pause_duration_prio_7",
  146. "tx_pause_transition_prio_7",
  147. /* flow control statistics tx */
  148. "tx_pause", "tx_pause_duration", "tx_pause_transition",
  149. /* packet statistics */
  150. "rx_multicast_packets",
  151. "rx_broadcast_packets",
  152. "rx_jabbers",
  153. "rx_in_range_length_error",
  154. "rx_out_range_length_error",
  155. "tx_multicast_packets",
  156. "tx_broadcast_packets",
  157. "rx_prio_0_packets", "rx_prio_0_bytes",
  158. "rx_prio_1_packets", "rx_prio_1_bytes",
  159. "rx_prio_2_packets", "rx_prio_2_bytes",
  160. "rx_prio_3_packets", "rx_prio_3_bytes",
  161. "rx_prio_4_packets", "rx_prio_4_bytes",
  162. "rx_prio_5_packets", "rx_prio_5_bytes",
  163. "rx_prio_6_packets", "rx_prio_6_bytes",
  164. "rx_prio_7_packets", "rx_prio_7_bytes",
  165. "rx_novlan_packets", "rx_novlan_bytes",
  166. "tx_prio_0_packets", "tx_prio_0_bytes",
  167. "tx_prio_1_packets", "tx_prio_1_bytes",
  168. "tx_prio_2_packets", "tx_prio_2_bytes",
  169. "tx_prio_3_packets", "tx_prio_3_bytes",
  170. "tx_prio_4_packets", "tx_prio_4_bytes",
  171. "tx_prio_5_packets", "tx_prio_5_bytes",
  172. "tx_prio_6_packets", "tx_prio_6_bytes",
  173. "tx_prio_7_packets", "tx_prio_7_bytes",
  174. "tx_novlan_packets", "tx_novlan_bytes",
  175. };
  176. static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
  177. "Interrupt Test",
  178. "Link Test",
  179. "Speed Test",
  180. "Register Test",
  181. "Loopback Test",
  182. };
  183. static u32 mlx4_en_get_msglevel(struct net_device *dev)
  184. {
  185. return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
  186. }
  187. static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
  188. {
  189. ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
  190. }
  191. static void mlx4_en_get_wol(struct net_device *netdev,
  192. struct ethtool_wolinfo *wol)
  193. {
  194. struct mlx4_en_priv *priv = netdev_priv(netdev);
  195. int err = 0;
  196. u64 config = 0;
  197. u64 mask;
  198. if ((priv->port < 1) || (priv->port > 2)) {
  199. en_err(priv, "Failed to get WoL information\n");
  200. return;
  201. }
  202. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  203. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  204. if (!(priv->mdev->dev->caps.flags & mask)) {
  205. wol->supported = 0;
  206. wol->wolopts = 0;
  207. return;
  208. }
  209. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  210. if (err) {
  211. en_err(priv, "Failed to get WoL information\n");
  212. return;
  213. }
  214. if (config & MLX4_EN_WOL_MAGIC)
  215. wol->supported = WAKE_MAGIC;
  216. else
  217. wol->supported = 0;
  218. if (config & MLX4_EN_WOL_ENABLED)
  219. wol->wolopts = WAKE_MAGIC;
  220. else
  221. wol->wolopts = 0;
  222. }
  223. static int mlx4_en_set_wol(struct net_device *netdev,
  224. struct ethtool_wolinfo *wol)
  225. {
  226. struct mlx4_en_priv *priv = netdev_priv(netdev);
  227. u64 config = 0;
  228. int err = 0;
  229. u64 mask;
  230. if ((priv->port < 1) || (priv->port > 2))
  231. return -EOPNOTSUPP;
  232. mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
  233. MLX4_DEV_CAP_FLAG_WOL_PORT2;
  234. if (!(priv->mdev->dev->caps.flags & mask))
  235. return -EOPNOTSUPP;
  236. if (wol->supported & ~WAKE_MAGIC)
  237. return -EINVAL;
  238. err = mlx4_wol_read(priv->mdev->dev, &config, priv->port);
  239. if (err) {
  240. en_err(priv, "Failed to get WoL info, unable to modify\n");
  241. return err;
  242. }
  243. if (wol->wolopts & WAKE_MAGIC) {
  244. config |= MLX4_EN_WOL_DO_MODIFY | MLX4_EN_WOL_ENABLED |
  245. MLX4_EN_WOL_MAGIC;
  246. } else {
  247. config &= ~(MLX4_EN_WOL_ENABLED | MLX4_EN_WOL_MAGIC);
  248. config |= MLX4_EN_WOL_DO_MODIFY;
  249. }
  250. err = mlx4_wol_write(priv->mdev->dev, config, priv->port);
  251. if (err)
  252. en_err(priv, "Failed to set WoL information\n");
  253. return err;
  254. }
  255. struct bitmap_iterator {
  256. unsigned long *stats_bitmap;
  257. unsigned int count;
  258. unsigned int iterator;
  259. bool advance_array; /* if set, force no increments */
  260. };
  261. static inline void bitmap_iterator_init(struct bitmap_iterator *h,
  262. unsigned long *stats_bitmap,
  263. int count)
  264. {
  265. h->iterator = 0;
  266. h->advance_array = !bitmap_empty(stats_bitmap, count);
  267. h->count = h->advance_array ? bitmap_weight(stats_bitmap, count)
  268. : count;
  269. h->stats_bitmap = stats_bitmap;
  270. }
  271. static inline int bitmap_iterator_test(struct bitmap_iterator *h)
  272. {
  273. return !h->advance_array ? 1 : test_bit(h->iterator, h->stats_bitmap);
  274. }
  275. static inline int bitmap_iterator_inc(struct bitmap_iterator *h)
  276. {
  277. return h->iterator++;
  278. }
  279. static inline unsigned int
  280. bitmap_iterator_count(struct bitmap_iterator *h)
  281. {
  282. return h->count;
  283. }
  284. static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
  285. {
  286. struct mlx4_en_priv *priv = netdev_priv(dev);
  287. struct bitmap_iterator it;
  288. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  289. switch (sset) {
  290. case ETH_SS_STATS:
  291. return bitmap_iterator_count(&it) +
  292. (priv->tx_ring_num * 2) +
  293. #ifdef CONFIG_NET_RX_BUSY_POLL
  294. (priv->rx_ring_num * 5);
  295. #else
  296. (priv->rx_ring_num * 2);
  297. #endif
  298. case ETH_SS_TEST:
  299. return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
  300. & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
  301. case ETH_SS_PRIV_FLAGS:
  302. return ARRAY_SIZE(mlx4_en_priv_flags);
  303. default:
  304. return -EOPNOTSUPP;
  305. }
  306. }
  307. static void mlx4_en_get_ethtool_stats(struct net_device *dev,
  308. struct ethtool_stats *stats, uint64_t *data)
  309. {
  310. struct mlx4_en_priv *priv = netdev_priv(dev);
  311. int index = 0;
  312. int i;
  313. struct bitmap_iterator it;
  314. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  315. spin_lock_bh(&priv->stats_lock);
  316. for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
  317. if (bitmap_iterator_test(&it))
  318. data[index++] = ((unsigned long *)&priv->stats)[i];
  319. for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
  320. if (bitmap_iterator_test(&it))
  321. data[index++] = ((unsigned long *)&priv->port_stats)[i];
  322. for (i = 0; i < NUM_PF_STATS; i++, bitmap_iterator_inc(&it))
  323. if (bitmap_iterator_test(&it))
  324. data[index++] =
  325. ((unsigned long *)&priv->pf_stats)[i];
  326. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_RX;
  327. i++, bitmap_iterator_inc(&it))
  328. if (bitmap_iterator_test(&it))
  329. data[index++] =
  330. ((u64 *)&priv->rx_priority_flowstats)[i];
  331. for (i = 0; i < NUM_FLOW_STATS_RX; i++, bitmap_iterator_inc(&it))
  332. if (bitmap_iterator_test(&it))
  333. data[index++] = ((u64 *)&priv->rx_flowstats)[i];
  334. for (i = 0; i < NUM_FLOW_PRIORITY_STATS_TX;
  335. i++, bitmap_iterator_inc(&it))
  336. if (bitmap_iterator_test(&it))
  337. data[index++] =
  338. ((u64 *)&priv->tx_priority_flowstats)[i];
  339. for (i = 0; i < NUM_FLOW_STATS_TX; i++, bitmap_iterator_inc(&it))
  340. if (bitmap_iterator_test(&it))
  341. data[index++] = ((u64 *)&priv->tx_flowstats)[i];
  342. for (i = 0; i < NUM_PKT_STATS; i++, bitmap_iterator_inc(&it))
  343. if (bitmap_iterator_test(&it))
  344. data[index++] = ((unsigned long *)&priv->pkstats)[i];
  345. for (i = 0; i < priv->tx_ring_num; i++) {
  346. data[index++] = priv->tx_ring[i]->packets;
  347. data[index++] = priv->tx_ring[i]->bytes;
  348. }
  349. for (i = 0; i < priv->rx_ring_num; i++) {
  350. data[index++] = priv->rx_ring[i]->packets;
  351. data[index++] = priv->rx_ring[i]->bytes;
  352. #ifdef CONFIG_NET_RX_BUSY_POLL
  353. data[index++] = priv->rx_ring[i]->yields;
  354. data[index++] = priv->rx_ring[i]->misses;
  355. data[index++] = priv->rx_ring[i]->cleaned;
  356. #endif
  357. }
  358. spin_unlock_bh(&priv->stats_lock);
  359. }
  360. static void mlx4_en_self_test(struct net_device *dev,
  361. struct ethtool_test *etest, u64 *buf)
  362. {
  363. mlx4_en_ex_selftest(dev, &etest->flags, buf);
  364. }
  365. static void mlx4_en_get_strings(struct net_device *dev,
  366. uint32_t stringset, uint8_t *data)
  367. {
  368. struct mlx4_en_priv *priv = netdev_priv(dev);
  369. int index = 0;
  370. int i, strings = 0;
  371. struct bitmap_iterator it;
  372. bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
  373. switch (stringset) {
  374. case ETH_SS_TEST:
  375. for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
  376. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  377. if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
  378. for (; i < MLX4_EN_NUM_SELF_TEST; i++)
  379. strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
  380. break;
  381. case ETH_SS_STATS:
  382. /* Add main counters */
  383. for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
  384. bitmap_iterator_inc(&it))
  385. if (bitmap_iterator_test(&it))
  386. strcpy(data + (index++) * ETH_GSTRING_LEN,
  387. main_strings[strings]);
  388. for (i = 0; i < NUM_PORT_STATS; i++, strings++,
  389. bitmap_iterator_inc(&it))
  390. if (bitmap_iterator_test(&it))
  391. strcpy(data + (index++) * ETH_GSTRING_LEN,
  392. main_strings[strings]);
  393. for (i = 0; i < NUM_PF_STATS; i++, strings++,
  394. bitmap_iterator_inc(&it))
  395. if (bitmap_iterator_test(&it))
  396. strcpy(data + (index++) * ETH_GSTRING_LEN,
  397. main_strings[strings]);
  398. for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
  399. bitmap_iterator_inc(&it))
  400. if (bitmap_iterator_test(&it))
  401. strcpy(data + (index++) * ETH_GSTRING_LEN,
  402. main_strings[strings]);
  403. for (i = 0; i < NUM_PKT_STATS; i++, strings++,
  404. bitmap_iterator_inc(&it))
  405. if (bitmap_iterator_test(&it))
  406. strcpy(data + (index++) * ETH_GSTRING_LEN,
  407. main_strings[strings]);
  408. for (i = 0; i < priv->tx_ring_num; i++) {
  409. sprintf(data + (index++) * ETH_GSTRING_LEN,
  410. "tx%d_packets", i);
  411. sprintf(data + (index++) * ETH_GSTRING_LEN,
  412. "tx%d_bytes", i);
  413. }
  414. for (i = 0; i < priv->rx_ring_num; i++) {
  415. sprintf(data + (index++) * ETH_GSTRING_LEN,
  416. "rx%d_packets", i);
  417. sprintf(data + (index++) * ETH_GSTRING_LEN,
  418. "rx%d_bytes", i);
  419. #ifdef CONFIG_NET_RX_BUSY_POLL
  420. sprintf(data + (index++) * ETH_GSTRING_LEN,
  421. "rx%d_napi_yield", i);
  422. sprintf(data + (index++) * ETH_GSTRING_LEN,
  423. "rx%d_misses", i);
  424. sprintf(data + (index++) * ETH_GSTRING_LEN,
  425. "rx%d_cleaned", i);
  426. #endif
  427. }
  428. break;
  429. case ETH_SS_PRIV_FLAGS:
  430. for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
  431. strcpy(data + i * ETH_GSTRING_LEN,
  432. mlx4_en_priv_flags[i]);
  433. break;
  434. }
  435. }
  436. static u32 mlx4_en_autoneg_get(struct net_device *dev)
  437. {
  438. struct mlx4_en_priv *priv = netdev_priv(dev);
  439. struct mlx4_en_dev *mdev = priv->mdev;
  440. u32 autoneg = AUTONEG_DISABLE;
  441. if ((mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP) &&
  442. (priv->port_state.flags & MLX4_EN_PORT_ANE))
  443. autoneg = AUTONEG_ENABLE;
  444. return autoneg;
  445. }
  446. static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg)
  447. {
  448. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  449. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  450. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  451. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  452. return SUPPORTED_TP;
  453. }
  454. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  455. | MLX4_PROT_MASK(MLX4_10GBASE_SR)
  456. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  457. | MLX4_PROT_MASK(MLX4_40GBASE_CR4)
  458. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  459. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  460. return SUPPORTED_FIBRE;
  461. }
  462. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  463. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  464. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  465. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  466. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  467. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  468. return SUPPORTED_Backplane;
  469. }
  470. return 0;
  471. }
  472. static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg)
  473. {
  474. u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_oper);
  475. if (!eth_proto) /* link down */
  476. eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap);
  477. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T)
  478. | MLX4_PROT_MASK(MLX4_1000BASE_T)
  479. | MLX4_PROT_MASK(MLX4_100BASE_TX))) {
  480. return PORT_TP;
  481. }
  482. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_SR)
  483. | MLX4_PROT_MASK(MLX4_56GBASE_SR4)
  484. | MLX4_PROT_MASK(MLX4_40GBASE_SR4)
  485. | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) {
  486. return PORT_FIBRE;
  487. }
  488. if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR)
  489. | MLX4_PROT_MASK(MLX4_56GBASE_CR4)
  490. | MLX4_PROT_MASK(MLX4_40GBASE_CR4))) {
  491. return PORT_DA;
  492. }
  493. if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4)
  494. | MLX4_PROT_MASK(MLX4_40GBASE_KR4)
  495. | MLX4_PROT_MASK(MLX4_20GBASE_KR2)
  496. | MLX4_PROT_MASK(MLX4_10GBASE_KR)
  497. | MLX4_PROT_MASK(MLX4_10GBASE_KX4)
  498. | MLX4_PROT_MASK(MLX4_1000BASE_KX))) {
  499. return PORT_NONE;
  500. }
  501. return PORT_OTHER;
  502. }
  503. #define MLX4_LINK_MODES_SZ \
  504. (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8)
  505. enum ethtool_report {
  506. SUPPORTED = 0,
  507. ADVERTISED = 1,
  508. SPEED = 2
  509. };
  510. /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */
  511. static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = {
  512. [MLX4_100BASE_TX] = {
  513. SUPPORTED_100baseT_Full,
  514. ADVERTISED_100baseT_Full,
  515. SPEED_100
  516. },
  517. [MLX4_1000BASE_T] = {
  518. SUPPORTED_1000baseT_Full,
  519. ADVERTISED_1000baseT_Full,
  520. SPEED_1000
  521. },
  522. [MLX4_1000BASE_CX_SGMII] = {
  523. SUPPORTED_1000baseKX_Full,
  524. ADVERTISED_1000baseKX_Full,
  525. SPEED_1000
  526. },
  527. [MLX4_1000BASE_KX] = {
  528. SUPPORTED_1000baseKX_Full,
  529. ADVERTISED_1000baseKX_Full,
  530. SPEED_1000
  531. },
  532. [MLX4_10GBASE_T] = {
  533. SUPPORTED_10000baseT_Full,
  534. ADVERTISED_10000baseT_Full,
  535. SPEED_10000
  536. },
  537. [MLX4_10GBASE_CX4] = {
  538. SUPPORTED_10000baseKX4_Full,
  539. ADVERTISED_10000baseKX4_Full,
  540. SPEED_10000
  541. },
  542. [MLX4_10GBASE_KX4] = {
  543. SUPPORTED_10000baseKX4_Full,
  544. ADVERTISED_10000baseKX4_Full,
  545. SPEED_10000
  546. },
  547. [MLX4_10GBASE_KR] = {
  548. SUPPORTED_10000baseKR_Full,
  549. ADVERTISED_10000baseKR_Full,
  550. SPEED_10000
  551. },
  552. [MLX4_10GBASE_CR] = {
  553. SUPPORTED_10000baseKR_Full,
  554. ADVERTISED_10000baseKR_Full,
  555. SPEED_10000
  556. },
  557. [MLX4_10GBASE_SR] = {
  558. SUPPORTED_10000baseKR_Full,
  559. ADVERTISED_10000baseKR_Full,
  560. SPEED_10000
  561. },
  562. [MLX4_20GBASE_KR2] = {
  563. SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full,
  564. ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full,
  565. SPEED_20000
  566. },
  567. [MLX4_40GBASE_CR4] = {
  568. SUPPORTED_40000baseCR4_Full,
  569. ADVERTISED_40000baseCR4_Full,
  570. SPEED_40000
  571. },
  572. [MLX4_40GBASE_KR4] = {
  573. SUPPORTED_40000baseKR4_Full,
  574. ADVERTISED_40000baseKR4_Full,
  575. SPEED_40000
  576. },
  577. [MLX4_40GBASE_SR4] = {
  578. SUPPORTED_40000baseSR4_Full,
  579. ADVERTISED_40000baseSR4_Full,
  580. SPEED_40000
  581. },
  582. [MLX4_56GBASE_KR4] = {
  583. SUPPORTED_56000baseKR4_Full,
  584. ADVERTISED_56000baseKR4_Full,
  585. SPEED_56000
  586. },
  587. [MLX4_56GBASE_CR4] = {
  588. SUPPORTED_56000baseCR4_Full,
  589. ADVERTISED_56000baseCR4_Full,
  590. SPEED_56000
  591. },
  592. [MLX4_56GBASE_SR4] = {
  593. SUPPORTED_56000baseSR4_Full,
  594. ADVERTISED_56000baseSR4_Full,
  595. SPEED_56000
  596. },
  597. };
  598. static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report)
  599. {
  600. int i;
  601. u32 link_modes = 0;
  602. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  603. if (eth_proto & MLX4_PROT_MASK(i))
  604. link_modes |= ptys2ethtool_map[i][report];
  605. }
  606. return link_modes;
  607. }
  608. static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report)
  609. {
  610. int i;
  611. u32 ptys_modes = 0;
  612. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  613. if (ptys2ethtool_map[i][report] & link_modes)
  614. ptys_modes |= 1 << i;
  615. }
  616. return ptys_modes;
  617. }
  618. /* Convert actual speed (SPEED_XXX) to ptys link modes */
  619. static u32 speed2ptys_link_modes(u32 speed)
  620. {
  621. int i;
  622. u32 ptys_modes = 0;
  623. for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
  624. if (ptys2ethtool_map[i][SPEED] == speed)
  625. ptys_modes |= 1 << i;
  626. }
  627. return ptys_modes;
  628. }
  629. static int ethtool_get_ptys_settings(struct net_device *dev,
  630. struct ethtool_cmd *cmd)
  631. {
  632. struct mlx4_en_priv *priv = netdev_priv(dev);
  633. struct mlx4_ptys_reg ptys_reg;
  634. u32 eth_proto;
  635. int ret;
  636. memset(&ptys_reg, 0, sizeof(ptys_reg));
  637. ptys_reg.local_port = priv->port;
  638. ptys_reg.proto_mask = MLX4_PTYS_EN;
  639. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  640. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  641. if (ret) {
  642. en_warn(priv, "Failed to run mlx4_ACCESS_PTYS_REG status(%x)",
  643. ret);
  644. return ret;
  645. }
  646. en_dbg(DRV, priv, "ptys_reg.proto_mask %x\n",
  647. ptys_reg.proto_mask);
  648. en_dbg(DRV, priv, "ptys_reg.eth_proto_cap %x\n",
  649. be32_to_cpu(ptys_reg.eth_proto_cap));
  650. en_dbg(DRV, priv, "ptys_reg.eth_proto_admin %x\n",
  651. be32_to_cpu(ptys_reg.eth_proto_admin));
  652. en_dbg(DRV, priv, "ptys_reg.eth_proto_oper %x\n",
  653. be32_to_cpu(ptys_reg.eth_proto_oper));
  654. en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n",
  655. be32_to_cpu(ptys_reg.eth_proto_lp_adv));
  656. cmd->supported = 0;
  657. cmd->advertising = 0;
  658. cmd->supported |= ptys_get_supported_port(&ptys_reg);
  659. eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap);
  660. cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED);
  661. eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin);
  662. cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED);
  663. cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  664. cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0;
  665. cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ?
  666. ADVERTISED_Asym_Pause : 0;
  667. cmd->port = ptys_get_active_port(&ptys_reg);
  668. cmd->transceiver = (SUPPORTED_TP & cmd->supported) ?
  669. XCVR_EXTERNAL : XCVR_INTERNAL;
  670. if (mlx4_en_autoneg_get(dev)) {
  671. cmd->supported |= SUPPORTED_Autoneg;
  672. cmd->advertising |= ADVERTISED_Autoneg;
  673. }
  674. cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  675. AUTONEG_ENABLE : AUTONEG_DISABLE;
  676. eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv);
  677. cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED);
  678. cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ?
  679. ADVERTISED_Autoneg : 0;
  680. cmd->phy_address = 0;
  681. cmd->mdio_support = 0;
  682. cmd->maxtxpkt = 0;
  683. cmd->maxrxpkt = 0;
  684. cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
  685. cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
  686. return ret;
  687. }
  688. static void ethtool_get_default_settings(struct net_device *dev,
  689. struct ethtool_cmd *cmd)
  690. {
  691. struct mlx4_en_priv *priv = netdev_priv(dev);
  692. int trans_type;
  693. cmd->autoneg = AUTONEG_DISABLE;
  694. cmd->supported = SUPPORTED_10000baseT_Full;
  695. cmd->advertising = ADVERTISED_10000baseT_Full;
  696. trans_type = priv->port_state.transceiver;
  697. if (trans_type > 0 && trans_type <= 0xC) {
  698. cmd->port = PORT_FIBRE;
  699. cmd->transceiver = XCVR_EXTERNAL;
  700. cmd->supported |= SUPPORTED_FIBRE;
  701. cmd->advertising |= ADVERTISED_FIBRE;
  702. } else if (trans_type == 0x80 || trans_type == 0) {
  703. cmd->port = PORT_TP;
  704. cmd->transceiver = XCVR_INTERNAL;
  705. cmd->supported |= SUPPORTED_TP;
  706. cmd->advertising |= ADVERTISED_TP;
  707. } else {
  708. cmd->port = -1;
  709. cmd->transceiver = -1;
  710. }
  711. }
  712. static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  713. {
  714. struct mlx4_en_priv *priv = netdev_priv(dev);
  715. int ret = -EINVAL;
  716. if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
  717. return -ENOMEM;
  718. en_dbg(DRV, priv, "query port state.flags ANC(%x) ANE(%x)\n",
  719. priv->port_state.flags & MLX4_EN_PORT_ANC,
  720. priv->port_state.flags & MLX4_EN_PORT_ANE);
  721. if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL)
  722. ret = ethtool_get_ptys_settings(dev, cmd);
  723. if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */
  724. ethtool_get_default_settings(dev, cmd);
  725. if (netif_carrier_ok(dev)) {
  726. ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
  727. cmd->duplex = DUPLEX_FULL;
  728. } else {
  729. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  730. cmd->duplex = DUPLEX_UNKNOWN;
  731. }
  732. return 0;
  733. }
  734. /* Calculate PTYS admin according ethtool speed (SPEED_XXX) */
  735. static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed,
  736. __be32 proto_cap)
  737. {
  738. __be32 proto_admin = 0;
  739. if (!speed) { /* Speed = 0 ==> Reset Link modes */
  740. proto_admin = proto_cap;
  741. en_info(priv, "Speed was set to 0, Reset advertised Link Modes to default (%x)\n",
  742. be32_to_cpu(proto_cap));
  743. } else {
  744. u32 ptys_link_modes = speed2ptys_link_modes(speed);
  745. proto_admin = cpu_to_be32(ptys_link_modes) & proto_cap;
  746. en_info(priv, "Setting Speed to %d\n", speed);
  747. }
  748. return proto_admin;
  749. }
  750. static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  751. {
  752. struct mlx4_en_priv *priv = netdev_priv(dev);
  753. struct mlx4_ptys_reg ptys_reg;
  754. __be32 proto_admin;
  755. int ret;
  756. u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED);
  757. int speed = ethtool_cmd_speed(cmd);
  758. en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n",
  759. speed, cmd->advertising, cmd->autoneg, cmd->duplex);
  760. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) ||
  761. (cmd->duplex == DUPLEX_HALF))
  762. return -EINVAL;
  763. memset(&ptys_reg, 0, sizeof(ptys_reg));
  764. ptys_reg.local_port = priv->port;
  765. ptys_reg.proto_mask = MLX4_PTYS_EN;
  766. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev,
  767. MLX4_ACCESS_REG_QUERY, &ptys_reg);
  768. if (ret) {
  769. en_warn(priv, "Failed to QUERY mlx4_ACCESS_PTYS_REG status(%x)\n",
  770. ret);
  771. return 0;
  772. }
  773. proto_admin = cmd->autoneg == AUTONEG_ENABLE ?
  774. cpu_to_be32(ptys_adv) :
  775. speed_set_ptys_admin(priv, speed,
  776. ptys_reg.eth_proto_cap);
  777. proto_admin &= ptys_reg.eth_proto_cap;
  778. if (!proto_admin) {
  779. en_warn(priv, "Not supported link mode(s) requested, check supported link modes.\n");
  780. return -EINVAL; /* nothing to change due to bad input */
  781. }
  782. if (proto_admin == ptys_reg.eth_proto_admin)
  783. return 0; /* Nothing to change */
  784. en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
  785. be32_to_cpu(proto_admin));
  786. ptys_reg.eth_proto_admin = proto_admin;
  787. ret = mlx4_ACCESS_PTYS_REG(priv->mdev->dev, MLX4_ACCESS_REG_WRITE,
  788. &ptys_reg);
  789. if (ret) {
  790. en_warn(priv, "Failed to write mlx4_ACCESS_PTYS_REG eth_proto_admin(0x%x) status(0x%x)",
  791. be32_to_cpu(ptys_reg.eth_proto_admin), ret);
  792. return ret;
  793. }
  794. mutex_lock(&priv->mdev->state_lock);
  795. if (priv->port_up) {
  796. en_warn(priv, "Port link mode changed, restarting port...\n");
  797. mlx4_en_stop_port(dev, 1);
  798. if (mlx4_en_start_port(dev))
  799. en_err(priv, "Failed restarting port %d\n", priv->port);
  800. }
  801. mutex_unlock(&priv->mdev->state_lock);
  802. return 0;
  803. }
  804. static int mlx4_en_get_coalesce(struct net_device *dev,
  805. struct ethtool_coalesce *coal)
  806. {
  807. struct mlx4_en_priv *priv = netdev_priv(dev);
  808. coal->tx_coalesce_usecs = priv->tx_usecs;
  809. coal->tx_max_coalesced_frames = priv->tx_frames;
  810. coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
  811. coal->rx_coalesce_usecs = priv->rx_usecs;
  812. coal->rx_max_coalesced_frames = priv->rx_frames;
  813. coal->pkt_rate_low = priv->pkt_rate_low;
  814. coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
  815. coal->pkt_rate_high = priv->pkt_rate_high;
  816. coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
  817. coal->rate_sample_interval = priv->sample_interval;
  818. coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
  819. return 0;
  820. }
  821. static int mlx4_en_set_coalesce(struct net_device *dev,
  822. struct ethtool_coalesce *coal)
  823. {
  824. struct mlx4_en_priv *priv = netdev_priv(dev);
  825. if (!coal->tx_max_coalesced_frames_irq)
  826. return -EINVAL;
  827. if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
  828. coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
  829. coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
  830. coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
  831. netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
  832. __func__, MLX4_EN_MAX_COAL_TIME);
  833. return -ERANGE;
  834. }
  835. if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
  836. coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
  837. netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
  838. __func__, MLX4_EN_MAX_COAL_PKTS);
  839. return -ERANGE;
  840. }
  841. priv->rx_frames = (coal->rx_max_coalesced_frames ==
  842. MLX4_EN_AUTO_CONF) ?
  843. MLX4_EN_RX_COAL_TARGET :
  844. coal->rx_max_coalesced_frames;
  845. priv->rx_usecs = (coal->rx_coalesce_usecs ==
  846. MLX4_EN_AUTO_CONF) ?
  847. MLX4_EN_RX_COAL_TIME :
  848. coal->rx_coalesce_usecs;
  849. /* Setting TX coalescing parameters */
  850. if (coal->tx_coalesce_usecs != priv->tx_usecs ||
  851. coal->tx_max_coalesced_frames != priv->tx_frames) {
  852. priv->tx_usecs = coal->tx_coalesce_usecs;
  853. priv->tx_frames = coal->tx_max_coalesced_frames;
  854. }
  855. /* Set adaptive coalescing params */
  856. priv->pkt_rate_low = coal->pkt_rate_low;
  857. priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
  858. priv->pkt_rate_high = coal->pkt_rate_high;
  859. priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
  860. priv->sample_interval = coal->rate_sample_interval;
  861. priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
  862. priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
  863. return mlx4_en_moderation_update(priv);
  864. }
  865. static int mlx4_en_set_pauseparam(struct net_device *dev,
  866. struct ethtool_pauseparam *pause)
  867. {
  868. struct mlx4_en_priv *priv = netdev_priv(dev);
  869. struct mlx4_en_dev *mdev = priv->mdev;
  870. int err;
  871. if (pause->autoneg)
  872. return -EINVAL;
  873. priv->prof->tx_pause = pause->tx_pause != 0;
  874. priv->prof->rx_pause = pause->rx_pause != 0;
  875. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  876. priv->rx_skb_size + ETH_FCS_LEN,
  877. priv->prof->tx_pause,
  878. priv->prof->tx_ppp,
  879. priv->prof->rx_pause,
  880. priv->prof->rx_ppp);
  881. if (err)
  882. en_err(priv, "Failed setting pause params\n");
  883. else
  884. mlx4_en_update_pfc_stats_bitmap(mdev->dev, &priv->stats_bitmap,
  885. priv->prof->rx_ppp,
  886. priv->prof->rx_pause,
  887. priv->prof->tx_ppp,
  888. priv->prof->tx_pause);
  889. return err;
  890. }
  891. static void mlx4_en_get_pauseparam(struct net_device *dev,
  892. struct ethtool_pauseparam *pause)
  893. {
  894. struct mlx4_en_priv *priv = netdev_priv(dev);
  895. pause->tx_pause = priv->prof->tx_pause;
  896. pause->rx_pause = priv->prof->rx_pause;
  897. }
  898. static int mlx4_en_set_ringparam(struct net_device *dev,
  899. struct ethtool_ringparam *param)
  900. {
  901. struct mlx4_en_priv *priv = netdev_priv(dev);
  902. struct mlx4_en_dev *mdev = priv->mdev;
  903. u32 rx_size, tx_size;
  904. int port_up = 0;
  905. int err = 0;
  906. if (param->rx_jumbo_pending || param->rx_mini_pending)
  907. return -EINVAL;
  908. rx_size = roundup_pow_of_two(param->rx_pending);
  909. rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE);
  910. rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE);
  911. tx_size = roundup_pow_of_two(param->tx_pending);
  912. tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE);
  913. tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE);
  914. if (rx_size == (priv->port_up ? priv->rx_ring[0]->actual_size :
  915. priv->rx_ring[0]->size) &&
  916. tx_size == priv->tx_ring[0]->size)
  917. return 0;
  918. mutex_lock(&mdev->state_lock);
  919. if (priv->port_up) {
  920. port_up = 1;
  921. mlx4_en_stop_port(dev, 1);
  922. }
  923. mlx4_en_free_resources(priv);
  924. priv->prof->tx_ring_size = tx_size;
  925. priv->prof->rx_ring_size = rx_size;
  926. err = mlx4_en_alloc_resources(priv);
  927. if (err) {
  928. en_err(priv, "Failed reallocating port resources\n");
  929. goto out;
  930. }
  931. if (port_up) {
  932. err = mlx4_en_start_port(dev);
  933. if (err)
  934. en_err(priv, "Failed starting port\n");
  935. }
  936. err = mlx4_en_moderation_update(priv);
  937. out:
  938. mutex_unlock(&mdev->state_lock);
  939. return err;
  940. }
  941. static void mlx4_en_get_ringparam(struct net_device *dev,
  942. struct ethtool_ringparam *param)
  943. {
  944. struct mlx4_en_priv *priv = netdev_priv(dev);
  945. memset(param, 0, sizeof(*param));
  946. param->rx_max_pending = MLX4_EN_MAX_RX_SIZE;
  947. param->tx_max_pending = MLX4_EN_MAX_TX_SIZE;
  948. param->rx_pending = priv->port_up ?
  949. priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size;
  950. param->tx_pending = priv->tx_ring[0]->size;
  951. }
  952. static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
  953. {
  954. struct mlx4_en_priv *priv = netdev_priv(dev);
  955. return priv->rx_ring_num;
  956. }
  957. static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
  958. {
  959. return MLX4_EN_RSS_KEY_SIZE;
  960. }
  961. static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
  962. {
  963. struct mlx4_en_priv *priv = netdev_priv(dev);
  964. /* check if requested function is supported by the device */
  965. if (hfunc == ETH_RSS_HASH_TOP) {
  966. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
  967. return -EINVAL;
  968. if (!(dev->features & NETIF_F_RXHASH))
  969. en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
  970. return 0;
  971. } else if (hfunc == ETH_RSS_HASH_XOR) {
  972. if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
  973. return -EINVAL;
  974. if (dev->features & NETIF_F_RXHASH)
  975. en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
  976. return 0;
  977. }
  978. return -EINVAL;
  979. }
  980. static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
  981. u8 *hfunc)
  982. {
  983. struct mlx4_en_priv *priv = netdev_priv(dev);
  984. struct mlx4_en_rss_map *rss_map = &priv->rss_map;
  985. int rss_rings;
  986. size_t n = priv->rx_ring_num;
  987. int err = 0;
  988. rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
  989. rss_rings = 1 << ilog2(rss_rings);
  990. while (n--) {
  991. if (!ring_index)
  992. break;
  993. ring_index[n] = rss_map->qps[n % rss_rings].qpn -
  994. rss_map->base_qpn;
  995. }
  996. if (key)
  997. memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
  998. if (hfunc)
  999. *hfunc = priv->rss_hash_fn;
  1000. return err;
  1001. }
  1002. static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
  1003. const u8 *key, const u8 hfunc)
  1004. {
  1005. struct mlx4_en_priv *priv = netdev_priv(dev);
  1006. struct mlx4_en_dev *mdev = priv->mdev;
  1007. int port_up = 0;
  1008. int err = 0;
  1009. int i;
  1010. int rss_rings = 0;
  1011. /* Calculate RSS table size and make sure flows are spread evenly
  1012. * between rings
  1013. */
  1014. for (i = 0; i < priv->rx_ring_num; i++) {
  1015. if (!ring_index)
  1016. continue;
  1017. if (i > 0 && !ring_index[i] && !rss_rings)
  1018. rss_rings = i;
  1019. if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
  1020. return -EINVAL;
  1021. }
  1022. if (!rss_rings)
  1023. rss_rings = priv->rx_ring_num;
  1024. /* RSS table size must be an order of 2 */
  1025. if (!is_power_of_2(rss_rings))
  1026. return -EINVAL;
  1027. if (hfunc != ETH_RSS_HASH_NO_CHANGE) {
  1028. err = mlx4_en_check_rxfh_func(dev, hfunc);
  1029. if (err)
  1030. return err;
  1031. }
  1032. mutex_lock(&mdev->state_lock);
  1033. if (priv->port_up) {
  1034. port_up = 1;
  1035. mlx4_en_stop_port(dev, 1);
  1036. }
  1037. if (ring_index)
  1038. priv->prof->rss_rings = rss_rings;
  1039. if (key)
  1040. memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
  1041. if (hfunc != ETH_RSS_HASH_NO_CHANGE)
  1042. priv->rss_hash_fn = hfunc;
  1043. if (port_up) {
  1044. err = mlx4_en_start_port(dev);
  1045. if (err)
  1046. en_err(priv, "Failed starting port\n");
  1047. }
  1048. mutex_unlock(&mdev->state_lock);
  1049. return err;
  1050. }
  1051. #define all_zeros_or_all_ones(field) \
  1052. ((field) == 0 || (field) == (__force typeof(field))-1)
  1053. static int mlx4_en_validate_flow(struct net_device *dev,
  1054. struct ethtool_rxnfc *cmd)
  1055. {
  1056. struct ethtool_usrip4_spec *l3_mask;
  1057. struct ethtool_tcpip4_spec *l4_mask;
  1058. struct ethhdr *eth_mask;
  1059. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1060. return -EINVAL;
  1061. if (cmd->fs.flow_type & FLOW_MAC_EXT) {
  1062. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1063. if (!is_broadcast_ether_addr(cmd->fs.m_ext.h_dest))
  1064. return -EINVAL;
  1065. }
  1066. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1067. case TCP_V4_FLOW:
  1068. case UDP_V4_FLOW:
  1069. if (cmd->fs.m_u.tcp_ip4_spec.tos)
  1070. return -EINVAL;
  1071. l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1072. /* don't allow mask which isn't all 0 or 1 */
  1073. if (!all_zeros_or_all_ones(l4_mask->ip4src) ||
  1074. !all_zeros_or_all_ones(l4_mask->ip4dst) ||
  1075. !all_zeros_or_all_ones(l4_mask->psrc) ||
  1076. !all_zeros_or_all_ones(l4_mask->pdst))
  1077. return -EINVAL;
  1078. break;
  1079. case IP_USER_FLOW:
  1080. l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1081. if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
  1082. cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 ||
  1083. (!l3_mask->ip4src && !l3_mask->ip4dst) ||
  1084. !all_zeros_or_all_ones(l3_mask->ip4src) ||
  1085. !all_zeros_or_all_ones(l3_mask->ip4dst))
  1086. return -EINVAL;
  1087. break;
  1088. case ETHER_FLOW:
  1089. eth_mask = &cmd->fs.m_u.ether_spec;
  1090. /* source mac mask must not be set */
  1091. if (!is_zero_ether_addr(eth_mask->h_source))
  1092. return -EINVAL;
  1093. /* dest mac mask must be ff:ff:ff:ff:ff:ff */
  1094. if (!is_broadcast_ether_addr(eth_mask->h_dest))
  1095. return -EINVAL;
  1096. if (!all_zeros_or_all_ones(eth_mask->h_proto))
  1097. return -EINVAL;
  1098. break;
  1099. default:
  1100. return -EINVAL;
  1101. }
  1102. if ((cmd->fs.flow_type & FLOW_EXT)) {
  1103. if (cmd->fs.m_ext.vlan_etype ||
  1104. !((cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1105. 0 ||
  1106. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)) ==
  1107. cpu_to_be16(VLAN_VID_MASK)))
  1108. return -EINVAL;
  1109. if (cmd->fs.m_ext.vlan_tci) {
  1110. if (be16_to_cpu(cmd->fs.h_ext.vlan_tci) >= VLAN_N_VID)
  1111. return -EINVAL;
  1112. }
  1113. }
  1114. return 0;
  1115. }
  1116. static int mlx4_en_ethtool_add_mac_rule(struct ethtool_rxnfc *cmd,
  1117. struct list_head *rule_list_h,
  1118. struct mlx4_spec_list *spec_l2,
  1119. unsigned char *mac)
  1120. {
  1121. int err = 0;
  1122. __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
  1123. spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH;
  1124. memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN);
  1125. memcpy(spec_l2->eth.dst_mac, mac, ETH_ALEN);
  1126. if ((cmd->fs.flow_type & FLOW_EXT) &&
  1127. (cmd->fs.m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
  1128. spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci;
  1129. spec_l2->eth.vlan_id_msk = cpu_to_be16(VLAN_VID_MASK);
  1130. }
  1131. list_add_tail(&spec_l2->list, rule_list_h);
  1132. return err;
  1133. }
  1134. static int mlx4_en_ethtool_add_mac_rule_by_ipv4(struct mlx4_en_priv *priv,
  1135. struct ethtool_rxnfc *cmd,
  1136. struct list_head *rule_list_h,
  1137. struct mlx4_spec_list *spec_l2,
  1138. __be32 ipv4_dst)
  1139. {
  1140. #ifdef CONFIG_INET
  1141. unsigned char mac[ETH_ALEN];
  1142. if (!ipv4_is_multicast(ipv4_dst)) {
  1143. if (cmd->fs.flow_type & FLOW_MAC_EXT)
  1144. memcpy(&mac, cmd->fs.h_ext.h_dest, ETH_ALEN);
  1145. else
  1146. memcpy(&mac, priv->dev->dev_addr, ETH_ALEN);
  1147. } else {
  1148. ip_eth_mc_map(ipv4_dst, mac);
  1149. }
  1150. return mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2, &mac[0]);
  1151. #else
  1152. return -EINVAL;
  1153. #endif
  1154. }
  1155. static int add_ip_rule(struct mlx4_en_priv *priv,
  1156. struct ethtool_rxnfc *cmd,
  1157. struct list_head *list_h)
  1158. {
  1159. int err;
  1160. struct mlx4_spec_list *spec_l2 = NULL;
  1161. struct mlx4_spec_list *spec_l3 = NULL;
  1162. struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec;
  1163. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1164. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1165. if (!spec_l2 || !spec_l3) {
  1166. err = -ENOMEM;
  1167. goto free_spec;
  1168. }
  1169. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h, spec_l2,
  1170. cmd->fs.h_u.
  1171. usr_ip4_spec.ip4dst);
  1172. if (err)
  1173. goto free_spec;
  1174. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1175. spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src;
  1176. if (l3_mask->ip4src)
  1177. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1178. spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst;
  1179. if (l3_mask->ip4dst)
  1180. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1181. list_add_tail(&spec_l3->list, list_h);
  1182. return 0;
  1183. free_spec:
  1184. kfree(spec_l2);
  1185. kfree(spec_l3);
  1186. return err;
  1187. }
  1188. static int add_tcp_udp_rule(struct mlx4_en_priv *priv,
  1189. struct ethtool_rxnfc *cmd,
  1190. struct list_head *list_h, int proto)
  1191. {
  1192. int err;
  1193. struct mlx4_spec_list *spec_l2 = NULL;
  1194. struct mlx4_spec_list *spec_l3 = NULL;
  1195. struct mlx4_spec_list *spec_l4 = NULL;
  1196. struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec;
  1197. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1198. spec_l3 = kzalloc(sizeof(*spec_l3), GFP_KERNEL);
  1199. spec_l4 = kzalloc(sizeof(*spec_l4), GFP_KERNEL);
  1200. if (!spec_l2 || !spec_l3 || !spec_l4) {
  1201. err = -ENOMEM;
  1202. goto free_spec;
  1203. }
  1204. spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4;
  1205. if (proto == TCP_V4_FLOW) {
  1206. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1207. spec_l2,
  1208. cmd->fs.h_u.
  1209. tcp_ip4_spec.ip4dst);
  1210. if (err)
  1211. goto free_spec;
  1212. spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP;
  1213. spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src;
  1214. spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst;
  1215. spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc;
  1216. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst;
  1217. } else {
  1218. err = mlx4_en_ethtool_add_mac_rule_by_ipv4(priv, cmd, list_h,
  1219. spec_l2,
  1220. cmd->fs.h_u.
  1221. udp_ip4_spec.ip4dst);
  1222. if (err)
  1223. goto free_spec;
  1224. spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP;
  1225. spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src;
  1226. spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst;
  1227. spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc;
  1228. spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst;
  1229. }
  1230. if (l4_mask->ip4src)
  1231. spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK;
  1232. if (l4_mask->ip4dst)
  1233. spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK;
  1234. if (l4_mask->psrc)
  1235. spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK;
  1236. if (l4_mask->pdst)
  1237. spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK;
  1238. list_add_tail(&spec_l3->list, list_h);
  1239. list_add_tail(&spec_l4->list, list_h);
  1240. return 0;
  1241. free_spec:
  1242. kfree(spec_l2);
  1243. kfree(spec_l3);
  1244. kfree(spec_l4);
  1245. return err;
  1246. }
  1247. static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev,
  1248. struct ethtool_rxnfc *cmd,
  1249. struct list_head *rule_list_h)
  1250. {
  1251. int err;
  1252. struct ethhdr *eth_spec;
  1253. struct mlx4_spec_list *spec_l2;
  1254. struct mlx4_en_priv *priv = netdev_priv(dev);
  1255. err = mlx4_en_validate_flow(dev, cmd);
  1256. if (err)
  1257. return err;
  1258. switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
  1259. case ETHER_FLOW:
  1260. spec_l2 = kzalloc(sizeof(*spec_l2), GFP_KERNEL);
  1261. if (!spec_l2)
  1262. return -ENOMEM;
  1263. eth_spec = &cmd->fs.h_u.ether_spec;
  1264. mlx4_en_ethtool_add_mac_rule(cmd, rule_list_h, spec_l2,
  1265. &eth_spec->h_dest[0]);
  1266. spec_l2->eth.ether_type = eth_spec->h_proto;
  1267. if (eth_spec->h_proto)
  1268. spec_l2->eth.ether_type_enable = 1;
  1269. break;
  1270. case IP_USER_FLOW:
  1271. err = add_ip_rule(priv, cmd, rule_list_h);
  1272. break;
  1273. case TCP_V4_FLOW:
  1274. err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW);
  1275. break;
  1276. case UDP_V4_FLOW:
  1277. err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW);
  1278. break;
  1279. }
  1280. return err;
  1281. }
  1282. static int mlx4_en_flow_replace(struct net_device *dev,
  1283. struct ethtool_rxnfc *cmd)
  1284. {
  1285. int err;
  1286. struct mlx4_en_priv *priv = netdev_priv(dev);
  1287. struct ethtool_flow_id *loc_rule;
  1288. struct mlx4_spec_list *spec, *tmp_spec;
  1289. u32 qpn;
  1290. u64 reg_id;
  1291. struct mlx4_net_trans_rule rule = {
  1292. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1293. .exclusive = 0,
  1294. .allow_loopback = 1,
  1295. .promisc_mode = MLX4_FS_REGULAR,
  1296. };
  1297. rule.port = priv->port;
  1298. rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location;
  1299. INIT_LIST_HEAD(&rule.list);
  1300. /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */
  1301. if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC)
  1302. qpn = priv->drop_qp.qpn;
  1303. else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) {
  1304. qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
  1305. } else {
  1306. if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
  1307. en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
  1308. cmd->fs.ring_cookie);
  1309. return -EINVAL;
  1310. }
  1311. qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
  1312. if (!qpn) {
  1313. en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
  1314. cmd->fs.ring_cookie);
  1315. return -EINVAL;
  1316. }
  1317. }
  1318. rule.qpn = qpn;
  1319. err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list);
  1320. if (err)
  1321. goto out_free_list;
  1322. loc_rule = &priv->ethtool_rules[cmd->fs.location];
  1323. if (loc_rule->id) {
  1324. err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id);
  1325. if (err) {
  1326. en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n",
  1327. cmd->fs.location, loc_rule->id);
  1328. goto out_free_list;
  1329. }
  1330. loc_rule->id = 0;
  1331. memset(&loc_rule->flow_spec, 0,
  1332. sizeof(struct ethtool_rx_flow_spec));
  1333. list_del(&loc_rule->list);
  1334. }
  1335. err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
  1336. if (err) {
  1337. en_err(priv, "Fail to attach network rule at location %d\n",
  1338. cmd->fs.location);
  1339. goto out_free_list;
  1340. }
  1341. loc_rule->id = reg_id;
  1342. memcpy(&loc_rule->flow_spec, &cmd->fs,
  1343. sizeof(struct ethtool_rx_flow_spec));
  1344. list_add_tail(&loc_rule->list, &priv->ethtool_list);
  1345. out_free_list:
  1346. list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) {
  1347. list_del(&spec->list);
  1348. kfree(spec);
  1349. }
  1350. return err;
  1351. }
  1352. static int mlx4_en_flow_detach(struct net_device *dev,
  1353. struct ethtool_rxnfc *cmd)
  1354. {
  1355. int err = 0;
  1356. struct ethtool_flow_id *rule;
  1357. struct mlx4_en_priv *priv = netdev_priv(dev);
  1358. if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
  1359. return -EINVAL;
  1360. rule = &priv->ethtool_rules[cmd->fs.location];
  1361. if (!rule->id) {
  1362. err = -ENOENT;
  1363. goto out;
  1364. }
  1365. err = mlx4_flow_detach(priv->mdev->dev, rule->id);
  1366. if (err) {
  1367. en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n",
  1368. cmd->fs.location, rule->id);
  1369. goto out;
  1370. }
  1371. rule->id = 0;
  1372. memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec));
  1373. list_del(&rule->list);
  1374. out:
  1375. return err;
  1376. }
  1377. static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1378. int loc)
  1379. {
  1380. int err = 0;
  1381. struct ethtool_flow_id *rule;
  1382. struct mlx4_en_priv *priv = netdev_priv(dev);
  1383. if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
  1384. return -EINVAL;
  1385. rule = &priv->ethtool_rules[loc];
  1386. if (rule->id)
  1387. memcpy(&cmd->fs, &rule->flow_spec,
  1388. sizeof(struct ethtool_rx_flow_spec));
  1389. else
  1390. err = -ENOENT;
  1391. return err;
  1392. }
  1393. static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv)
  1394. {
  1395. int i, res = 0;
  1396. for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
  1397. if (priv->ethtool_rules[i].id)
  1398. res++;
  1399. }
  1400. return res;
  1401. }
  1402. static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
  1403. u32 *rule_locs)
  1404. {
  1405. struct mlx4_en_priv *priv = netdev_priv(dev);
  1406. struct mlx4_en_dev *mdev = priv->mdev;
  1407. int err = 0;
  1408. int i = 0, priority = 0;
  1409. if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT ||
  1410. cmd->cmd == ETHTOOL_GRXCLSRULE ||
  1411. cmd->cmd == ETHTOOL_GRXCLSRLALL) &&
  1412. (mdev->dev->caps.steering_mode !=
  1413. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up))
  1414. return -EINVAL;
  1415. switch (cmd->cmd) {
  1416. case ETHTOOL_GRXRINGS:
  1417. cmd->data = priv->rx_ring_num;
  1418. break;
  1419. case ETHTOOL_GRXCLSRLCNT:
  1420. cmd->rule_cnt = mlx4_en_get_num_flows(priv);
  1421. break;
  1422. case ETHTOOL_GRXCLSRULE:
  1423. err = mlx4_en_get_flow(dev, cmd, cmd->fs.location);
  1424. break;
  1425. case ETHTOOL_GRXCLSRLALL:
  1426. while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) {
  1427. err = mlx4_en_get_flow(dev, cmd, i);
  1428. if (!err)
  1429. rule_locs[priority++] = i;
  1430. i++;
  1431. }
  1432. err = 0;
  1433. break;
  1434. default:
  1435. err = -EOPNOTSUPP;
  1436. break;
  1437. }
  1438. return err;
  1439. }
  1440. static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
  1441. {
  1442. int err = 0;
  1443. struct mlx4_en_priv *priv = netdev_priv(dev);
  1444. struct mlx4_en_dev *mdev = priv->mdev;
  1445. if (mdev->dev->caps.steering_mode !=
  1446. MLX4_STEERING_MODE_DEVICE_MANAGED || !priv->port_up)
  1447. return -EINVAL;
  1448. switch (cmd->cmd) {
  1449. case ETHTOOL_SRXCLSRLINS:
  1450. err = mlx4_en_flow_replace(dev, cmd);
  1451. break;
  1452. case ETHTOOL_SRXCLSRLDEL:
  1453. err = mlx4_en_flow_detach(dev, cmd);
  1454. break;
  1455. default:
  1456. en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd);
  1457. return -EINVAL;
  1458. }
  1459. return err;
  1460. }
  1461. static void mlx4_en_get_channels(struct net_device *dev,
  1462. struct ethtool_channels *channel)
  1463. {
  1464. struct mlx4_en_priv *priv = netdev_priv(dev);
  1465. memset(channel, 0, sizeof(*channel));
  1466. channel->max_rx = MAX_RX_RINGS;
  1467. channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
  1468. channel->rx_count = priv->rx_ring_num;
  1469. channel->tx_count = priv->tx_ring_num / MLX4_EN_NUM_UP;
  1470. }
  1471. static int mlx4_en_set_channels(struct net_device *dev,
  1472. struct ethtool_channels *channel)
  1473. {
  1474. struct mlx4_en_priv *priv = netdev_priv(dev);
  1475. struct mlx4_en_dev *mdev = priv->mdev;
  1476. int port_up = 0;
  1477. int err = 0;
  1478. if (channel->other_count || channel->combined_count ||
  1479. channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
  1480. channel->rx_count > MAX_RX_RINGS ||
  1481. !channel->tx_count || !channel->rx_count)
  1482. return -EINVAL;
  1483. mutex_lock(&mdev->state_lock);
  1484. if (priv->port_up) {
  1485. port_up = 1;
  1486. mlx4_en_stop_port(dev, 1);
  1487. }
  1488. mlx4_en_free_resources(priv);
  1489. priv->num_tx_rings_p_up = channel->tx_count;
  1490. priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
  1491. priv->rx_ring_num = channel->rx_count;
  1492. err = mlx4_en_alloc_resources(priv);
  1493. if (err) {
  1494. en_err(priv, "Failed reallocating port resources\n");
  1495. goto out;
  1496. }
  1497. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  1498. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1499. if (dev->num_tc)
  1500. mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
  1501. en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
  1502. en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
  1503. if (port_up) {
  1504. err = mlx4_en_start_port(dev);
  1505. if (err)
  1506. en_err(priv, "Failed starting port\n");
  1507. }
  1508. err = mlx4_en_moderation_update(priv);
  1509. out:
  1510. mutex_unlock(&mdev->state_lock);
  1511. return err;
  1512. }
  1513. static int mlx4_en_get_ts_info(struct net_device *dev,
  1514. struct ethtool_ts_info *info)
  1515. {
  1516. struct mlx4_en_priv *priv = netdev_priv(dev);
  1517. struct mlx4_en_dev *mdev = priv->mdev;
  1518. int ret;
  1519. ret = ethtool_op_get_ts_info(dev, info);
  1520. if (ret)
  1521. return ret;
  1522. if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
  1523. info->so_timestamping |=
  1524. SOF_TIMESTAMPING_TX_HARDWARE |
  1525. SOF_TIMESTAMPING_RX_HARDWARE |
  1526. SOF_TIMESTAMPING_RAW_HARDWARE;
  1527. info->tx_types =
  1528. (1 << HWTSTAMP_TX_OFF) |
  1529. (1 << HWTSTAMP_TX_ON);
  1530. info->rx_filters =
  1531. (1 << HWTSTAMP_FILTER_NONE) |
  1532. (1 << HWTSTAMP_FILTER_ALL);
  1533. if (mdev->ptp_clock)
  1534. info->phc_index = ptp_clock_index(mdev->ptp_clock);
  1535. }
  1536. return ret;
  1537. }
  1538. static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
  1539. {
  1540. struct mlx4_en_priv *priv = netdev_priv(dev);
  1541. struct mlx4_en_dev *mdev = priv->mdev;
  1542. bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1543. bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
  1544. bool phv_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_PHV);
  1545. bool phv_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_PHV);
  1546. int i;
  1547. int ret = 0;
  1548. if (bf_enabled_new != bf_enabled_old) {
  1549. if (bf_enabled_new) {
  1550. bool bf_supported = true;
  1551. for (i = 0; i < priv->tx_ring_num; i++)
  1552. bf_supported &= priv->tx_ring[i]->bf_alloced;
  1553. if (!bf_supported) {
  1554. en_err(priv, "BlueFlame is not supported\n");
  1555. return -EINVAL;
  1556. }
  1557. priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1558. } else {
  1559. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
  1560. }
  1561. for (i = 0; i < priv->tx_ring_num; i++)
  1562. priv->tx_ring[i]->bf_enabled = bf_enabled_new;
  1563. en_info(priv, "BlueFlame %s\n",
  1564. bf_enabled_new ? "Enabled" : "Disabled");
  1565. }
  1566. if (phv_enabled_new != phv_enabled_old) {
  1567. ret = set_phv_bit(mdev->dev, priv->port, (int)phv_enabled_new);
  1568. if (ret)
  1569. return ret;
  1570. else if (phv_enabled_new)
  1571. priv->pflags |= MLX4_EN_PRIV_FLAGS_PHV;
  1572. else
  1573. priv->pflags &= ~MLX4_EN_PRIV_FLAGS_PHV;
  1574. en_info(priv, "PHV bit %s\n",
  1575. phv_enabled_new ? "Enabled" : "Disabled");
  1576. }
  1577. return 0;
  1578. }
  1579. static u32 mlx4_en_get_priv_flags(struct net_device *dev)
  1580. {
  1581. struct mlx4_en_priv *priv = netdev_priv(dev);
  1582. return priv->pflags;
  1583. }
  1584. static int mlx4_en_get_tunable(struct net_device *dev,
  1585. const struct ethtool_tunable *tuna,
  1586. void *data)
  1587. {
  1588. const struct mlx4_en_priv *priv = netdev_priv(dev);
  1589. int ret = 0;
  1590. switch (tuna->id) {
  1591. case ETHTOOL_TX_COPYBREAK:
  1592. *(u32 *)data = priv->prof->inline_thold;
  1593. break;
  1594. default:
  1595. ret = -EINVAL;
  1596. break;
  1597. }
  1598. return ret;
  1599. }
  1600. static int mlx4_en_set_tunable(struct net_device *dev,
  1601. const struct ethtool_tunable *tuna,
  1602. const void *data)
  1603. {
  1604. struct mlx4_en_priv *priv = netdev_priv(dev);
  1605. int val, ret = 0;
  1606. switch (tuna->id) {
  1607. case ETHTOOL_TX_COPYBREAK:
  1608. val = *(u32 *)data;
  1609. if (val < MIN_PKT_LEN || val > MAX_INLINE)
  1610. ret = -EINVAL;
  1611. else
  1612. priv->prof->inline_thold = val;
  1613. break;
  1614. default:
  1615. ret = -EINVAL;
  1616. break;
  1617. }
  1618. return ret;
  1619. }
  1620. static int mlx4_en_get_module_info(struct net_device *dev,
  1621. struct ethtool_modinfo *modinfo)
  1622. {
  1623. struct mlx4_en_priv *priv = netdev_priv(dev);
  1624. struct mlx4_en_dev *mdev = priv->mdev;
  1625. int ret;
  1626. u8 data[4];
  1627. /* Read first 2 bytes to get Module & REV ID */
  1628. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1629. 0/*offset*/, 2/*size*/, data);
  1630. if (ret < 2)
  1631. return -EIO;
  1632. switch (data[0] /* identifier */) {
  1633. case MLX4_MODULE_ID_QSFP:
  1634. modinfo->type = ETH_MODULE_SFF_8436;
  1635. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1636. break;
  1637. case MLX4_MODULE_ID_QSFP_PLUS:
  1638. if (data[1] >= 0x3) { /* revision id */
  1639. modinfo->type = ETH_MODULE_SFF_8636;
  1640. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1641. } else {
  1642. modinfo->type = ETH_MODULE_SFF_8436;
  1643. modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
  1644. }
  1645. break;
  1646. case MLX4_MODULE_ID_QSFP28:
  1647. modinfo->type = ETH_MODULE_SFF_8636;
  1648. modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
  1649. break;
  1650. case MLX4_MODULE_ID_SFP:
  1651. modinfo->type = ETH_MODULE_SFF_8472;
  1652. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1653. break;
  1654. default:
  1655. return -ENOSYS;
  1656. }
  1657. return 0;
  1658. }
  1659. static int mlx4_en_get_module_eeprom(struct net_device *dev,
  1660. struct ethtool_eeprom *ee,
  1661. u8 *data)
  1662. {
  1663. struct mlx4_en_priv *priv = netdev_priv(dev);
  1664. struct mlx4_en_dev *mdev = priv->mdev;
  1665. int offset = ee->offset;
  1666. int i = 0, ret;
  1667. if (ee->len == 0)
  1668. return -EINVAL;
  1669. memset(data, 0, ee->len);
  1670. while (i < ee->len) {
  1671. en_dbg(DRV, priv,
  1672. "mlx4_get_module_info i(%d) offset(%d) len(%d)\n",
  1673. i, offset, ee->len - i);
  1674. ret = mlx4_get_module_info(mdev->dev, priv->port,
  1675. offset, ee->len - i, data + i);
  1676. if (!ret) /* Done reading */
  1677. return 0;
  1678. if (ret < 0) {
  1679. en_err(priv,
  1680. "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n",
  1681. i, offset, ee->len - i, ret);
  1682. return 0;
  1683. }
  1684. i += ret;
  1685. offset += ret;
  1686. }
  1687. return 0;
  1688. }
  1689. static int mlx4_en_set_phys_id(struct net_device *dev,
  1690. enum ethtool_phys_id_state state)
  1691. {
  1692. int err;
  1693. u16 beacon_duration;
  1694. struct mlx4_en_priv *priv = netdev_priv(dev);
  1695. struct mlx4_en_dev *mdev = priv->mdev;
  1696. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_BEACON))
  1697. return -EOPNOTSUPP;
  1698. switch (state) {
  1699. case ETHTOOL_ID_ACTIVE:
  1700. beacon_duration = PORT_BEACON_MAX_LIMIT;
  1701. break;
  1702. case ETHTOOL_ID_INACTIVE:
  1703. beacon_duration = 0;
  1704. break;
  1705. default:
  1706. return -EOPNOTSUPP;
  1707. }
  1708. err = mlx4_SET_PORT_BEACON(mdev->dev, priv->port, beacon_duration);
  1709. return err;
  1710. }
  1711. const struct ethtool_ops mlx4_en_ethtool_ops = {
  1712. .get_drvinfo = mlx4_en_get_drvinfo,
  1713. .get_settings = mlx4_en_get_settings,
  1714. .set_settings = mlx4_en_set_settings,
  1715. .get_link = ethtool_op_get_link,
  1716. .get_strings = mlx4_en_get_strings,
  1717. .get_sset_count = mlx4_en_get_sset_count,
  1718. .get_ethtool_stats = mlx4_en_get_ethtool_stats,
  1719. .self_test = mlx4_en_self_test,
  1720. .set_phys_id = mlx4_en_set_phys_id,
  1721. .get_wol = mlx4_en_get_wol,
  1722. .set_wol = mlx4_en_set_wol,
  1723. .get_msglevel = mlx4_en_get_msglevel,
  1724. .set_msglevel = mlx4_en_set_msglevel,
  1725. .get_coalesce = mlx4_en_get_coalesce,
  1726. .set_coalesce = mlx4_en_set_coalesce,
  1727. .get_pauseparam = mlx4_en_get_pauseparam,
  1728. .set_pauseparam = mlx4_en_set_pauseparam,
  1729. .get_ringparam = mlx4_en_get_ringparam,
  1730. .set_ringparam = mlx4_en_set_ringparam,
  1731. .get_rxnfc = mlx4_en_get_rxnfc,
  1732. .set_rxnfc = mlx4_en_set_rxnfc,
  1733. .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
  1734. .get_rxfh_key_size = mlx4_en_get_rxfh_key_size,
  1735. .get_rxfh = mlx4_en_get_rxfh,
  1736. .set_rxfh = mlx4_en_set_rxfh,
  1737. .get_channels = mlx4_en_get_channels,
  1738. .set_channels = mlx4_en_set_channels,
  1739. .get_ts_info = mlx4_en_get_ts_info,
  1740. .set_priv_flags = mlx4_en_set_priv_flags,
  1741. .get_priv_flags = mlx4_en_get_priv_flags,
  1742. .get_tunable = mlx4_en_get_tunable,
  1743. .set_tunable = mlx4_en_set_tunable,
  1744. .get_module_info = mlx4_en_get_module_info,
  1745. .get_module_eeprom = mlx4_en_get_module_eeprom
  1746. };