ethtool.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /****************************************************************************
  2. * Driver for Solarflare network controllers and boards
  3. * Copyright 2005-2006 Fen Systems Ltd.
  4. * Copyright 2006-2013 Solarflare Communications Inc.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published
  8. * by the Free Software Foundation, incorporated herein by reference.
  9. */
  10. #include <linux/netdevice.h>
  11. #include <linux/ethtool.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/in.h>
  14. #include "net_driver.h"
  15. #include "workarounds.h"
  16. #include "selftest.h"
  17. #include "efx.h"
  18. #include "filter.h"
  19. #include "nic.h"
  20. struct efx_sw_stat_desc {
  21. const char *name;
  22. enum {
  23. EFX_ETHTOOL_STAT_SOURCE_nic,
  24. EFX_ETHTOOL_STAT_SOURCE_channel,
  25. EFX_ETHTOOL_STAT_SOURCE_tx_queue
  26. } source;
  27. unsigned offset;
  28. u64(*get_stat) (void *field); /* Reader function */
  29. };
  30. /* Initialiser for a struct efx_sw_stat_desc with type-checking */
  31. #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
  32. get_stat_function) { \
  33. .name = #stat_name, \
  34. .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \
  35. .offset = ((((field_type *) 0) == \
  36. &((struct efx_##source_name *)0)->field) ? \
  37. offsetof(struct efx_##source_name, field) : \
  38. offsetof(struct efx_##source_name, field)), \
  39. .get_stat = get_stat_function, \
  40. }
  41. static u64 efx_get_uint_stat(void *field)
  42. {
  43. return *(unsigned int *)field;
  44. }
  45. static u64 efx_get_atomic_stat(void *field)
  46. {
  47. return atomic_read((atomic_t *) field);
  48. }
  49. #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
  50. EFX_ETHTOOL_STAT(field, nic, field, \
  51. atomic_t, efx_get_atomic_stat)
  52. #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \
  53. EFX_ETHTOOL_STAT(field, channel, n_##field, \
  54. unsigned int, efx_get_uint_stat)
  55. #define EFX_ETHTOOL_UINT_TXQ_STAT(field) \
  56. EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
  57. unsigned int, efx_get_uint_stat)
  58. static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
  59. EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
  60. EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
  61. EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
  62. EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
  63. EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
  64. EFX_ETHTOOL_UINT_TXQ_STAT(pio_packets),
  65. EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
  66. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
  67. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
  68. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
  69. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
  70. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
  71. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
  72. EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
  73. };
  74. #define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
  75. #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
  76. /**************************************************************************
  77. *
  78. * Ethtool operations
  79. *
  80. **************************************************************************
  81. */
  82. /* Identify device by flashing LEDs */
  83. static int efx_ethtool_phys_id(struct net_device *net_dev,
  84. enum ethtool_phys_id_state state)
  85. {
  86. struct efx_nic *efx = netdev_priv(net_dev);
  87. enum efx_led_mode mode = EFX_LED_DEFAULT;
  88. switch (state) {
  89. case ETHTOOL_ID_ON:
  90. mode = EFX_LED_ON;
  91. break;
  92. case ETHTOOL_ID_OFF:
  93. mode = EFX_LED_OFF;
  94. break;
  95. case ETHTOOL_ID_INACTIVE:
  96. mode = EFX_LED_DEFAULT;
  97. break;
  98. case ETHTOOL_ID_ACTIVE:
  99. return 1; /* cycle on/off once per second */
  100. }
  101. efx->type->set_id_led(efx, mode);
  102. return 0;
  103. }
  104. /* This must be called with rtnl_lock held. */
  105. static int efx_ethtool_get_settings(struct net_device *net_dev,
  106. struct ethtool_cmd *ecmd)
  107. {
  108. struct efx_nic *efx = netdev_priv(net_dev);
  109. struct efx_link_state *link_state = &efx->link_state;
  110. mutex_lock(&efx->mac_lock);
  111. efx->phy_op->get_settings(efx, ecmd);
  112. mutex_unlock(&efx->mac_lock);
  113. /* Both MACs support pause frames (bidirectional and respond-only) */
  114. ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
  115. if (LOOPBACK_INTERNAL(efx)) {
  116. ethtool_cmd_speed_set(ecmd, link_state->speed);
  117. ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
  118. }
  119. return 0;
  120. }
  121. /* This must be called with rtnl_lock held. */
  122. static int efx_ethtool_set_settings(struct net_device *net_dev,
  123. struct ethtool_cmd *ecmd)
  124. {
  125. struct efx_nic *efx = netdev_priv(net_dev);
  126. int rc;
  127. /* GMAC does not support 1000Mbps HD */
  128. if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
  129. (ecmd->duplex != DUPLEX_FULL)) {
  130. netif_dbg(efx, drv, efx->net_dev,
  131. "rejecting unsupported 1000Mbps HD setting\n");
  132. return -EINVAL;
  133. }
  134. mutex_lock(&efx->mac_lock);
  135. rc = efx->phy_op->set_settings(efx, ecmd);
  136. mutex_unlock(&efx->mac_lock);
  137. return rc;
  138. }
  139. static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
  140. struct ethtool_drvinfo *info)
  141. {
  142. struct efx_nic *efx = netdev_priv(net_dev);
  143. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  144. strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
  145. if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
  146. efx_mcdi_print_fwver(efx, info->fw_version,
  147. sizeof(info->fw_version));
  148. strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
  149. }
  150. static int efx_ethtool_get_regs_len(struct net_device *net_dev)
  151. {
  152. return efx_nic_get_regs_len(netdev_priv(net_dev));
  153. }
  154. static void efx_ethtool_get_regs(struct net_device *net_dev,
  155. struct ethtool_regs *regs, void *buf)
  156. {
  157. struct efx_nic *efx = netdev_priv(net_dev);
  158. regs->version = efx->type->revision;
  159. efx_nic_get_regs(efx, buf);
  160. }
  161. static u32 efx_ethtool_get_msglevel(struct net_device *net_dev)
  162. {
  163. struct efx_nic *efx = netdev_priv(net_dev);
  164. return efx->msg_enable;
  165. }
  166. static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
  167. {
  168. struct efx_nic *efx = netdev_priv(net_dev);
  169. efx->msg_enable = msg_enable;
  170. }
  171. /**
  172. * efx_fill_test - fill in an individual self-test entry
  173. * @test_index: Index of the test
  174. * @strings: Ethtool strings, or %NULL
  175. * @data: Ethtool test results, or %NULL
  176. * @test: Pointer to test result (used only if data != %NULL)
  177. * @unit_format: Unit name format (e.g. "chan\%d")
  178. * @unit_id: Unit id (e.g. 0 for "chan0")
  179. * @test_format: Test name format (e.g. "loopback.\%s.tx.sent")
  180. * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent")
  181. *
  182. * Fill in an individual self-test entry.
  183. */
  184. static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
  185. int *test, const char *unit_format, int unit_id,
  186. const char *test_format, const char *test_id)
  187. {
  188. char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
  189. /* Fill data value, if applicable */
  190. if (data)
  191. data[test_index] = *test;
  192. /* Fill string, if applicable */
  193. if (strings) {
  194. if (strchr(unit_format, '%'))
  195. snprintf(unit_str, sizeof(unit_str),
  196. unit_format, unit_id);
  197. else
  198. strcpy(unit_str, unit_format);
  199. snprintf(test_str, sizeof(test_str), test_format, test_id);
  200. snprintf(strings + test_index * ETH_GSTRING_LEN,
  201. ETH_GSTRING_LEN,
  202. "%-6s %-24s", unit_str, test_str);
  203. }
  204. }
  205. #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel
  206. #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
  207. #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
  208. #define EFX_LOOPBACK_NAME(_mode, _counter) \
  209. "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode)
  210. /**
  211. * efx_fill_loopback_test - fill in a block of loopback self-test entries
  212. * @efx: Efx NIC
  213. * @lb_tests: Efx loopback self-test results structure
  214. * @mode: Loopback test mode
  215. * @test_index: Starting index of the test
  216. * @strings: Ethtool strings, or %NULL
  217. * @data: Ethtool test results, or %NULL
  218. *
  219. * Fill in a block of loopback self-test entries. Return new test
  220. * index.
  221. */
  222. static int efx_fill_loopback_test(struct efx_nic *efx,
  223. struct efx_loopback_self_tests *lb_tests,
  224. enum efx_loopback_mode mode,
  225. unsigned int test_index,
  226. u8 *strings, u64 *data)
  227. {
  228. struct efx_channel *channel =
  229. efx_get_channel(efx, efx->tx_channel_offset);
  230. struct efx_tx_queue *tx_queue;
  231. efx_for_each_channel_tx_queue(tx_queue, channel) {
  232. efx_fill_test(test_index++, strings, data,
  233. &lb_tests->tx_sent[tx_queue->queue],
  234. EFX_TX_QUEUE_NAME(tx_queue),
  235. EFX_LOOPBACK_NAME(mode, "tx_sent"));
  236. efx_fill_test(test_index++, strings, data,
  237. &lb_tests->tx_done[tx_queue->queue],
  238. EFX_TX_QUEUE_NAME(tx_queue),
  239. EFX_LOOPBACK_NAME(mode, "tx_done"));
  240. }
  241. efx_fill_test(test_index++, strings, data,
  242. &lb_tests->rx_good,
  243. "rx", 0,
  244. EFX_LOOPBACK_NAME(mode, "rx_good"));
  245. efx_fill_test(test_index++, strings, data,
  246. &lb_tests->rx_bad,
  247. "rx", 0,
  248. EFX_LOOPBACK_NAME(mode, "rx_bad"));
  249. return test_index;
  250. }
  251. /**
  252. * efx_ethtool_fill_self_tests - get self-test details
  253. * @efx: Efx NIC
  254. * @tests: Efx self-test results structure, or %NULL
  255. * @strings: Ethtool strings, or %NULL
  256. * @data: Ethtool test results, or %NULL
  257. *
  258. * Get self-test number of strings, strings, and/or test results.
  259. * Return number of strings (== number of test results).
  260. *
  261. * The reason for merging these three functions is to make sure that
  262. * they can never be inconsistent.
  263. */
  264. static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
  265. struct efx_self_tests *tests,
  266. u8 *strings, u64 *data)
  267. {
  268. struct efx_channel *channel;
  269. unsigned int n = 0, i;
  270. enum efx_loopback_mode mode;
  271. efx_fill_test(n++, strings, data, &tests->phy_alive,
  272. "phy", 0, "alive", NULL);
  273. efx_fill_test(n++, strings, data, &tests->nvram,
  274. "core", 0, "nvram", NULL);
  275. efx_fill_test(n++, strings, data, &tests->interrupt,
  276. "core", 0, "interrupt", NULL);
  277. /* Event queues */
  278. efx_for_each_channel(channel, efx) {
  279. efx_fill_test(n++, strings, data,
  280. &tests->eventq_dma[channel->channel],
  281. EFX_CHANNEL_NAME(channel),
  282. "eventq.dma", NULL);
  283. efx_fill_test(n++, strings, data,
  284. &tests->eventq_int[channel->channel],
  285. EFX_CHANNEL_NAME(channel),
  286. "eventq.int", NULL);
  287. }
  288. efx_fill_test(n++, strings, data, &tests->memory,
  289. "core", 0, "memory", NULL);
  290. efx_fill_test(n++, strings, data, &tests->registers,
  291. "core", 0, "registers", NULL);
  292. if (efx->phy_op->run_tests != NULL) {
  293. EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
  294. for (i = 0; true; ++i) {
  295. const char *name;
  296. EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
  297. name = efx->phy_op->test_name(efx, i);
  298. if (name == NULL)
  299. break;
  300. efx_fill_test(n++, strings, data, &tests->phy_ext[i],
  301. "phy", 0, name, NULL);
  302. }
  303. }
  304. /* Loopback tests */
  305. for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
  306. if (!(efx->loopback_modes & (1 << mode)))
  307. continue;
  308. n = efx_fill_loopback_test(efx,
  309. &tests->loopback[mode], mode, n,
  310. strings, data);
  311. }
  312. return n;
  313. }
  314. static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
  315. {
  316. size_t n_stats = 0;
  317. struct efx_channel *channel;
  318. efx_for_each_channel(channel, efx) {
  319. if (efx_channel_has_tx_queues(channel)) {
  320. n_stats++;
  321. if (strings != NULL) {
  322. snprintf(strings, ETH_GSTRING_LEN,
  323. "tx-%u.tx_packets",
  324. channel->tx_queue[0].queue /
  325. EFX_TXQ_TYPES);
  326. strings += ETH_GSTRING_LEN;
  327. }
  328. }
  329. }
  330. efx_for_each_channel(channel, efx) {
  331. if (efx_channel_has_rx_queue(channel)) {
  332. n_stats++;
  333. if (strings != NULL) {
  334. snprintf(strings, ETH_GSTRING_LEN,
  335. "rx-%d.rx_packets", channel->channel);
  336. strings += ETH_GSTRING_LEN;
  337. }
  338. }
  339. }
  340. return n_stats;
  341. }
  342. static int efx_ethtool_get_sset_count(struct net_device *net_dev,
  343. int string_set)
  344. {
  345. struct efx_nic *efx = netdev_priv(net_dev);
  346. switch (string_set) {
  347. case ETH_SS_STATS:
  348. return efx->type->describe_stats(efx, NULL) +
  349. EFX_ETHTOOL_SW_STAT_COUNT +
  350. efx_describe_per_queue_stats(efx, NULL) +
  351. efx_ptp_describe_stats(efx, NULL);
  352. case ETH_SS_TEST:
  353. return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
  354. default:
  355. return -EINVAL;
  356. }
  357. }
  358. static void efx_ethtool_get_strings(struct net_device *net_dev,
  359. u32 string_set, u8 *strings)
  360. {
  361. struct efx_nic *efx = netdev_priv(net_dev);
  362. int i;
  363. switch (string_set) {
  364. case ETH_SS_STATS:
  365. strings += (efx->type->describe_stats(efx, strings) *
  366. ETH_GSTRING_LEN);
  367. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
  368. strlcpy(strings + i * ETH_GSTRING_LEN,
  369. efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
  370. strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
  371. strings += (efx_describe_per_queue_stats(efx, strings) *
  372. ETH_GSTRING_LEN);
  373. efx_ptp_describe_stats(efx, strings);
  374. break;
  375. case ETH_SS_TEST:
  376. efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
  377. break;
  378. default:
  379. /* No other string sets */
  380. break;
  381. }
  382. }
  383. static void efx_ethtool_get_stats(struct net_device *net_dev,
  384. struct ethtool_stats *stats,
  385. u64 *data)
  386. {
  387. struct efx_nic *efx = netdev_priv(net_dev);
  388. const struct efx_sw_stat_desc *stat;
  389. struct efx_channel *channel;
  390. struct efx_tx_queue *tx_queue;
  391. struct efx_rx_queue *rx_queue;
  392. int i;
  393. spin_lock_bh(&efx->stats_lock);
  394. /* Get NIC statistics */
  395. data += efx->type->update_stats(efx, data, NULL);
  396. /* Get software statistics */
  397. for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
  398. stat = &efx_sw_stat_desc[i];
  399. switch (stat->source) {
  400. case EFX_ETHTOOL_STAT_SOURCE_nic:
  401. data[i] = stat->get_stat((void *)efx + stat->offset);
  402. break;
  403. case EFX_ETHTOOL_STAT_SOURCE_channel:
  404. data[i] = 0;
  405. efx_for_each_channel(channel, efx)
  406. data[i] += stat->get_stat((void *)channel +
  407. stat->offset);
  408. break;
  409. case EFX_ETHTOOL_STAT_SOURCE_tx_queue:
  410. data[i] = 0;
  411. efx_for_each_channel(channel, efx) {
  412. efx_for_each_channel_tx_queue(tx_queue, channel)
  413. data[i] +=
  414. stat->get_stat((void *)tx_queue
  415. + stat->offset);
  416. }
  417. break;
  418. }
  419. }
  420. data += EFX_ETHTOOL_SW_STAT_COUNT;
  421. spin_unlock_bh(&efx->stats_lock);
  422. efx_for_each_channel(channel, efx) {
  423. if (efx_channel_has_tx_queues(channel)) {
  424. *data = 0;
  425. efx_for_each_channel_tx_queue(tx_queue, channel) {
  426. *data += tx_queue->tx_packets;
  427. }
  428. data++;
  429. }
  430. }
  431. efx_for_each_channel(channel, efx) {
  432. if (efx_channel_has_rx_queue(channel)) {
  433. *data = 0;
  434. efx_for_each_channel_rx_queue(rx_queue, channel) {
  435. *data += rx_queue->rx_packets;
  436. }
  437. data++;
  438. }
  439. }
  440. efx_ptp_update_stats(efx, data);
  441. }
  442. static void efx_ethtool_self_test(struct net_device *net_dev,
  443. struct ethtool_test *test, u64 *data)
  444. {
  445. struct efx_nic *efx = netdev_priv(net_dev);
  446. struct efx_self_tests *efx_tests;
  447. bool already_up;
  448. int rc = -ENOMEM;
  449. efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
  450. if (!efx_tests)
  451. goto fail;
  452. if (efx->state != STATE_READY) {
  453. rc = -EBUSY;
  454. goto out;
  455. }
  456. netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
  457. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  458. /* We need rx buffers and interrupts. */
  459. already_up = (efx->net_dev->flags & IFF_UP);
  460. if (!already_up) {
  461. rc = dev_open(efx->net_dev);
  462. if (rc) {
  463. netif_err(efx, drv, efx->net_dev,
  464. "failed opening device.\n");
  465. goto out;
  466. }
  467. }
  468. rc = efx_selftest(efx, efx_tests, test->flags);
  469. if (!already_up)
  470. dev_close(efx->net_dev);
  471. netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
  472. rc == 0 ? "passed" : "failed",
  473. (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
  474. out:
  475. efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
  476. kfree(efx_tests);
  477. fail:
  478. if (rc)
  479. test->flags |= ETH_TEST_FL_FAILED;
  480. }
  481. /* Restart autonegotiation */
  482. static int efx_ethtool_nway_reset(struct net_device *net_dev)
  483. {
  484. struct efx_nic *efx = netdev_priv(net_dev);
  485. return mdio45_nway_restart(&efx->mdio);
  486. }
  487. /*
  488. * Each channel has a single IRQ and moderation timer, started by any
  489. * completion (or other event). Unless the module parameter
  490. * separate_tx_channels is set, IRQs and moderation are therefore
  491. * shared between RX and TX completions. In this case, when RX IRQ
  492. * moderation is explicitly changed then TX IRQ moderation is
  493. * automatically changed too, but otherwise we fail if the two values
  494. * are requested to be different.
  495. *
  496. * The hardware does not support a limit on the number of completions
  497. * before an IRQ, so we do not use the max_frames fields. We should
  498. * report and require that max_frames == (usecs != 0), but this would
  499. * invalidate existing user documentation.
  500. *
  501. * The hardware does not have distinct settings for interrupt
  502. * moderation while the previous IRQ is being handled, so we should
  503. * not use the 'irq' fields. However, an earlier developer
  504. * misunderstood the meaning of the 'irq' fields and the driver did
  505. * not support the standard fields. To avoid invalidating existing
  506. * user documentation, we report and accept changes through either the
  507. * standard or 'irq' fields. If both are changed at the same time, we
  508. * prefer the standard field.
  509. *
  510. * We implement adaptive IRQ moderation, but use a different algorithm
  511. * from that assumed in the definition of struct ethtool_coalesce.
  512. * Therefore we do not use any of the adaptive moderation parameters
  513. * in it.
  514. */
  515. static int efx_ethtool_get_coalesce(struct net_device *net_dev,
  516. struct ethtool_coalesce *coalesce)
  517. {
  518. struct efx_nic *efx = netdev_priv(net_dev);
  519. unsigned int tx_usecs, rx_usecs;
  520. bool rx_adaptive;
  521. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
  522. coalesce->tx_coalesce_usecs = tx_usecs;
  523. coalesce->tx_coalesce_usecs_irq = tx_usecs;
  524. coalesce->rx_coalesce_usecs = rx_usecs;
  525. coalesce->rx_coalesce_usecs_irq = rx_usecs;
  526. coalesce->use_adaptive_rx_coalesce = rx_adaptive;
  527. return 0;
  528. }
  529. static int efx_ethtool_set_coalesce(struct net_device *net_dev,
  530. struct ethtool_coalesce *coalesce)
  531. {
  532. struct efx_nic *efx = netdev_priv(net_dev);
  533. struct efx_channel *channel;
  534. unsigned int tx_usecs, rx_usecs;
  535. bool adaptive, rx_may_override_tx;
  536. int rc;
  537. if (coalesce->use_adaptive_tx_coalesce)
  538. return -EINVAL;
  539. efx_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
  540. if (coalesce->rx_coalesce_usecs != rx_usecs)
  541. rx_usecs = coalesce->rx_coalesce_usecs;
  542. else
  543. rx_usecs = coalesce->rx_coalesce_usecs_irq;
  544. adaptive = coalesce->use_adaptive_rx_coalesce;
  545. /* If channels are shared, TX IRQ moderation can be quietly
  546. * overridden unless it is changed from its old value.
  547. */
  548. rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
  549. coalesce->tx_coalesce_usecs_irq == tx_usecs);
  550. if (coalesce->tx_coalesce_usecs != tx_usecs)
  551. tx_usecs = coalesce->tx_coalesce_usecs;
  552. else
  553. tx_usecs = coalesce->tx_coalesce_usecs_irq;
  554. rc = efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
  555. rx_may_override_tx);
  556. if (rc != 0)
  557. return rc;
  558. efx_for_each_channel(channel, efx)
  559. efx->type->push_irq_moderation(channel);
  560. return 0;
  561. }
  562. static void efx_ethtool_get_ringparam(struct net_device *net_dev,
  563. struct ethtool_ringparam *ring)
  564. {
  565. struct efx_nic *efx = netdev_priv(net_dev);
  566. ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
  567. ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
  568. ring->rx_pending = efx->rxq_entries;
  569. ring->tx_pending = efx->txq_entries;
  570. }
  571. static int efx_ethtool_set_ringparam(struct net_device *net_dev,
  572. struct ethtool_ringparam *ring)
  573. {
  574. struct efx_nic *efx = netdev_priv(net_dev);
  575. u32 txq_entries;
  576. if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
  577. ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
  578. ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
  579. return -EINVAL;
  580. if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
  581. netif_err(efx, drv, efx->net_dev,
  582. "RX queues cannot be smaller than %u\n",
  583. EFX_RXQ_MIN_ENT);
  584. return -EINVAL;
  585. }
  586. txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
  587. if (txq_entries != ring->tx_pending)
  588. netif_warn(efx, drv, efx->net_dev,
  589. "increasing TX queue size to minimum of %u\n",
  590. txq_entries);
  591. return efx_realloc_channels(efx, ring->rx_pending, txq_entries);
  592. }
  593. static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
  594. struct ethtool_pauseparam *pause)
  595. {
  596. struct efx_nic *efx = netdev_priv(net_dev);
  597. u8 wanted_fc, old_fc;
  598. u32 old_adv;
  599. int rc = 0;
  600. mutex_lock(&efx->mac_lock);
  601. wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) |
  602. (pause->tx_pause ? EFX_FC_TX : 0) |
  603. (pause->autoneg ? EFX_FC_AUTO : 0));
  604. if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) {
  605. netif_dbg(efx, drv, efx->net_dev,
  606. "Flow control unsupported: tx ON rx OFF\n");
  607. rc = -EINVAL;
  608. goto out;
  609. }
  610. if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) {
  611. netif_dbg(efx, drv, efx->net_dev,
  612. "Autonegotiation is disabled\n");
  613. rc = -EINVAL;
  614. goto out;
  615. }
  616. /* Hook for Falcon bug 11482 workaround */
  617. if (efx->type->prepare_enable_fc_tx &&
  618. (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
  619. efx->type->prepare_enable_fc_tx(efx);
  620. old_adv = efx->link_advertising;
  621. old_fc = efx->wanted_fc;
  622. efx_link_set_wanted_fc(efx, wanted_fc);
  623. if (efx->link_advertising != old_adv ||
  624. (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
  625. rc = efx->phy_op->reconfigure(efx);
  626. if (rc) {
  627. netif_err(efx, drv, efx->net_dev,
  628. "Unable to advertise requested flow "
  629. "control setting\n");
  630. goto out;
  631. }
  632. }
  633. /* Reconfigure the MAC. The PHY *may* generate a link state change event
  634. * if the user just changed the advertised capabilities, but there's no
  635. * harm doing this twice */
  636. efx_mac_reconfigure(efx);
  637. out:
  638. mutex_unlock(&efx->mac_lock);
  639. return rc;
  640. }
  641. static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
  642. struct ethtool_pauseparam *pause)
  643. {
  644. struct efx_nic *efx = netdev_priv(net_dev);
  645. pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX);
  646. pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX);
  647. pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
  648. }
  649. static void efx_ethtool_get_wol(struct net_device *net_dev,
  650. struct ethtool_wolinfo *wol)
  651. {
  652. struct efx_nic *efx = netdev_priv(net_dev);
  653. return efx->type->get_wol(efx, wol);
  654. }
  655. static int efx_ethtool_set_wol(struct net_device *net_dev,
  656. struct ethtool_wolinfo *wol)
  657. {
  658. struct efx_nic *efx = netdev_priv(net_dev);
  659. return efx->type->set_wol(efx, wol->wolopts);
  660. }
  661. static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
  662. {
  663. struct efx_nic *efx = netdev_priv(net_dev);
  664. int rc;
  665. rc = efx->type->map_reset_flags(flags);
  666. if (rc < 0)
  667. return rc;
  668. return efx_reset(efx, rc);
  669. }
  670. /* MAC address mask including only I/G bit */
  671. static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
  672. #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
  673. #define PORT_FULL_MASK ((__force __be16)~0)
  674. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  675. static int efx_ethtool_get_class_rule(struct efx_nic *efx,
  676. struct ethtool_rx_flow_spec *rule)
  677. {
  678. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  679. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  680. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  681. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  682. struct efx_filter_spec spec;
  683. int rc;
  684. rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
  685. rule->location, &spec);
  686. if (rc)
  687. return rc;
  688. if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
  689. rule->ring_cookie = RX_CLS_FLOW_DISC;
  690. else
  691. rule->ring_cookie = spec.dmaq_id;
  692. if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
  693. spec.ether_type == htons(ETH_P_IP) &&
  694. (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
  695. (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
  696. !(spec.match_flags &
  697. ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
  698. EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
  699. EFX_FILTER_MATCH_IP_PROTO |
  700. EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
  701. rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
  702. TCP_V4_FLOW : UDP_V4_FLOW);
  703. if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
  704. ip_entry->ip4dst = spec.loc_host[0];
  705. ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
  706. }
  707. if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
  708. ip_entry->ip4src = spec.rem_host[0];
  709. ip_mask->ip4src = IP4_ADDR_FULL_MASK;
  710. }
  711. if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
  712. ip_entry->pdst = spec.loc_port;
  713. ip_mask->pdst = PORT_FULL_MASK;
  714. }
  715. if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
  716. ip_entry->psrc = spec.rem_port;
  717. ip_mask->psrc = PORT_FULL_MASK;
  718. }
  719. } else if (!(spec.match_flags &
  720. ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
  721. EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
  722. EFX_FILTER_MATCH_OUTER_VID))) {
  723. rule->flow_type = ETHER_FLOW;
  724. if (spec.match_flags &
  725. (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
  726. ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
  727. if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
  728. eth_broadcast_addr(mac_mask->h_dest);
  729. else
  730. ether_addr_copy(mac_mask->h_dest,
  731. mac_addr_ig_mask);
  732. }
  733. if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
  734. ether_addr_copy(mac_entry->h_source, spec.rem_mac);
  735. eth_broadcast_addr(mac_mask->h_source);
  736. }
  737. if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
  738. mac_entry->h_proto = spec.ether_type;
  739. mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
  740. }
  741. } else {
  742. /* The above should handle all filters that we insert */
  743. WARN_ON(1);
  744. return -EINVAL;
  745. }
  746. if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
  747. rule->flow_type |= FLOW_EXT;
  748. rule->h_ext.vlan_tci = spec.outer_vid;
  749. rule->m_ext.vlan_tci = htons(0xfff);
  750. }
  751. return rc;
  752. }
  753. static int
  754. efx_ethtool_get_rxnfc(struct net_device *net_dev,
  755. struct ethtool_rxnfc *info, u32 *rule_locs)
  756. {
  757. struct efx_nic *efx = netdev_priv(net_dev);
  758. switch (info->cmd) {
  759. case ETHTOOL_GRXRINGS:
  760. info->data = efx->n_rx_channels;
  761. return 0;
  762. case ETHTOOL_GRXFH: {
  763. unsigned min_revision = 0;
  764. info->data = 0;
  765. switch (info->flow_type) {
  766. case TCP_V4_FLOW:
  767. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  768. /* fall through */
  769. case UDP_V4_FLOW:
  770. case SCTP_V4_FLOW:
  771. case AH_ESP_V4_FLOW:
  772. case IPV4_FLOW:
  773. info->data |= RXH_IP_SRC | RXH_IP_DST;
  774. min_revision = EFX_REV_FALCON_B0;
  775. break;
  776. case TCP_V6_FLOW:
  777. info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
  778. /* fall through */
  779. case UDP_V6_FLOW:
  780. case SCTP_V6_FLOW:
  781. case AH_ESP_V6_FLOW:
  782. case IPV6_FLOW:
  783. info->data |= RXH_IP_SRC | RXH_IP_DST;
  784. min_revision = EFX_REV_SIENA_A0;
  785. break;
  786. default:
  787. break;
  788. }
  789. if (efx_nic_rev(efx) < min_revision)
  790. info->data = 0;
  791. return 0;
  792. }
  793. case ETHTOOL_GRXCLSRLCNT:
  794. info->data = efx_filter_get_rx_id_limit(efx);
  795. if (info->data == 0)
  796. return -EOPNOTSUPP;
  797. info->data |= RX_CLS_LOC_SPECIAL;
  798. info->rule_cnt =
  799. efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
  800. return 0;
  801. case ETHTOOL_GRXCLSRULE:
  802. if (efx_filter_get_rx_id_limit(efx) == 0)
  803. return -EOPNOTSUPP;
  804. return efx_ethtool_get_class_rule(efx, &info->fs);
  805. case ETHTOOL_GRXCLSRLALL: {
  806. s32 rc;
  807. info->data = efx_filter_get_rx_id_limit(efx);
  808. if (info->data == 0)
  809. return -EOPNOTSUPP;
  810. rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
  811. rule_locs, info->rule_cnt);
  812. if (rc < 0)
  813. return rc;
  814. info->rule_cnt = rc;
  815. return 0;
  816. }
  817. default:
  818. return -EOPNOTSUPP;
  819. }
  820. }
  821. static int efx_ethtool_set_class_rule(struct efx_nic *efx,
  822. struct ethtool_rx_flow_spec *rule)
  823. {
  824. struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
  825. struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
  826. struct ethhdr *mac_entry = &rule->h_u.ether_spec;
  827. struct ethhdr *mac_mask = &rule->m_u.ether_spec;
  828. struct efx_filter_spec spec;
  829. int rc;
  830. /* Check that user wants us to choose the location */
  831. if (rule->location != RX_CLS_LOC_ANY)
  832. return -EINVAL;
  833. /* Range-check ring_cookie */
  834. if (rule->ring_cookie >= efx->n_rx_channels &&
  835. rule->ring_cookie != RX_CLS_FLOW_DISC)
  836. return -EINVAL;
  837. /* Check for unsupported extensions */
  838. if ((rule->flow_type & FLOW_EXT) &&
  839. (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
  840. rule->m_ext.data[1]))
  841. return -EINVAL;
  842. efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
  843. efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
  844. (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
  845. EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
  846. switch (rule->flow_type & ~FLOW_EXT) {
  847. case TCP_V4_FLOW:
  848. case UDP_V4_FLOW:
  849. spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
  850. EFX_FILTER_MATCH_IP_PROTO);
  851. spec.ether_type = htons(ETH_P_IP);
  852. spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
  853. IPPROTO_TCP : IPPROTO_UDP);
  854. if (ip_mask->ip4dst) {
  855. if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
  856. return -EINVAL;
  857. spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
  858. spec.loc_host[0] = ip_entry->ip4dst;
  859. }
  860. if (ip_mask->ip4src) {
  861. if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
  862. return -EINVAL;
  863. spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
  864. spec.rem_host[0] = ip_entry->ip4src;
  865. }
  866. if (ip_mask->pdst) {
  867. if (ip_mask->pdst != PORT_FULL_MASK)
  868. return -EINVAL;
  869. spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
  870. spec.loc_port = ip_entry->pdst;
  871. }
  872. if (ip_mask->psrc) {
  873. if (ip_mask->psrc != PORT_FULL_MASK)
  874. return -EINVAL;
  875. spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
  876. spec.rem_port = ip_entry->psrc;
  877. }
  878. if (ip_mask->tos)
  879. return -EINVAL;
  880. break;
  881. case ETHER_FLOW:
  882. if (!is_zero_ether_addr(mac_mask->h_dest)) {
  883. if (ether_addr_equal(mac_mask->h_dest,
  884. mac_addr_ig_mask))
  885. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
  886. else if (is_broadcast_ether_addr(mac_mask->h_dest))
  887. spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
  888. else
  889. return -EINVAL;
  890. ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
  891. }
  892. if (!is_zero_ether_addr(mac_mask->h_source)) {
  893. if (!is_broadcast_ether_addr(mac_mask->h_source))
  894. return -EINVAL;
  895. spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
  896. ether_addr_copy(spec.rem_mac, mac_entry->h_source);
  897. }
  898. if (mac_mask->h_proto) {
  899. if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
  900. return -EINVAL;
  901. spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
  902. spec.ether_type = mac_entry->h_proto;
  903. }
  904. break;
  905. default:
  906. return -EINVAL;
  907. }
  908. if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
  909. if (rule->m_ext.vlan_tci != htons(0xfff))
  910. return -EINVAL;
  911. spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
  912. spec.outer_vid = rule->h_ext.vlan_tci;
  913. }
  914. rc = efx_filter_insert_filter(efx, &spec, true);
  915. if (rc < 0)
  916. return rc;
  917. rule->location = rc;
  918. return 0;
  919. }
  920. static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
  921. struct ethtool_rxnfc *info)
  922. {
  923. struct efx_nic *efx = netdev_priv(net_dev);
  924. if (efx_filter_get_rx_id_limit(efx) == 0)
  925. return -EOPNOTSUPP;
  926. switch (info->cmd) {
  927. case ETHTOOL_SRXCLSRLINS:
  928. return efx_ethtool_set_class_rule(efx, &info->fs);
  929. case ETHTOOL_SRXCLSRLDEL:
  930. return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
  931. info->fs.location);
  932. default:
  933. return -EOPNOTSUPP;
  934. }
  935. }
  936. static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
  937. {
  938. struct efx_nic *efx = netdev_priv(net_dev);
  939. return ((efx_nic_rev(efx) < EFX_REV_FALCON_B0 ||
  940. efx->n_rx_channels == 1) ?
  941. 0 : ARRAY_SIZE(efx->rx_indir_table));
  942. }
  943. static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
  944. u8 *hfunc)
  945. {
  946. struct efx_nic *efx = netdev_priv(net_dev);
  947. if (hfunc)
  948. *hfunc = ETH_RSS_HASH_TOP;
  949. if (indir)
  950. memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
  951. return 0;
  952. }
  953. static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
  954. const u8 *key, const u8 hfunc)
  955. {
  956. struct efx_nic *efx = netdev_priv(net_dev);
  957. /* We do not allow change in unsupported parameters */
  958. if (key ||
  959. (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
  960. return -EOPNOTSUPP;
  961. if (!indir)
  962. return 0;
  963. return efx->type->rx_push_rss_config(efx, true, indir);
  964. }
  965. static int efx_ethtool_get_ts_info(struct net_device *net_dev,
  966. struct ethtool_ts_info *ts_info)
  967. {
  968. struct efx_nic *efx = netdev_priv(net_dev);
  969. /* Software capabilities */
  970. ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
  971. SOF_TIMESTAMPING_SOFTWARE);
  972. ts_info->phc_index = -1;
  973. efx_ptp_get_ts_info(efx, ts_info);
  974. return 0;
  975. }
  976. static int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
  977. struct ethtool_eeprom *ee,
  978. u8 *data)
  979. {
  980. struct efx_nic *efx = netdev_priv(net_dev);
  981. int ret;
  982. if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
  983. return -EOPNOTSUPP;
  984. mutex_lock(&efx->mac_lock);
  985. ret = efx->phy_op->get_module_eeprom(efx, ee, data);
  986. mutex_unlock(&efx->mac_lock);
  987. return ret;
  988. }
  989. static int efx_ethtool_get_module_info(struct net_device *net_dev,
  990. struct ethtool_modinfo *modinfo)
  991. {
  992. struct efx_nic *efx = netdev_priv(net_dev);
  993. int ret;
  994. if (!efx->phy_op || !efx->phy_op->get_module_info)
  995. return -EOPNOTSUPP;
  996. mutex_lock(&efx->mac_lock);
  997. ret = efx->phy_op->get_module_info(efx, modinfo);
  998. mutex_unlock(&efx->mac_lock);
  999. return ret;
  1000. }
  1001. const struct ethtool_ops efx_ethtool_ops = {
  1002. .get_settings = efx_ethtool_get_settings,
  1003. .set_settings = efx_ethtool_set_settings,
  1004. .get_drvinfo = efx_ethtool_get_drvinfo,
  1005. .get_regs_len = efx_ethtool_get_regs_len,
  1006. .get_regs = efx_ethtool_get_regs,
  1007. .get_msglevel = efx_ethtool_get_msglevel,
  1008. .set_msglevel = efx_ethtool_set_msglevel,
  1009. .nway_reset = efx_ethtool_nway_reset,
  1010. .get_link = ethtool_op_get_link,
  1011. .get_coalesce = efx_ethtool_get_coalesce,
  1012. .set_coalesce = efx_ethtool_set_coalesce,
  1013. .get_ringparam = efx_ethtool_get_ringparam,
  1014. .set_ringparam = efx_ethtool_set_ringparam,
  1015. .get_pauseparam = efx_ethtool_get_pauseparam,
  1016. .set_pauseparam = efx_ethtool_set_pauseparam,
  1017. .get_sset_count = efx_ethtool_get_sset_count,
  1018. .self_test = efx_ethtool_self_test,
  1019. .get_strings = efx_ethtool_get_strings,
  1020. .set_phys_id = efx_ethtool_phys_id,
  1021. .get_ethtool_stats = efx_ethtool_get_stats,
  1022. .get_wol = efx_ethtool_get_wol,
  1023. .set_wol = efx_ethtool_set_wol,
  1024. .reset = efx_ethtool_reset,
  1025. .get_rxnfc = efx_ethtool_get_rxnfc,
  1026. .set_rxnfc = efx_ethtool_set_rxnfc,
  1027. .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
  1028. .get_rxfh = efx_ethtool_get_rxfh,
  1029. .set_rxfh = efx_ethtool_set_rxfh,
  1030. .get_ts_info = efx_ethtool_get_ts_info,
  1031. .get_module_info = efx_ethtool_get_module_info,
  1032. .get_module_eeprom = efx_ethtool_get_module_eeprom,
  1033. };