bnx2x_ethtool.c 99 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676
  1. /* bnx2x_ethtool.c: QLogic Everest network driver.
  2. *
  3. * Copyright (c) 2007-2013 Broadcom Corporation
  4. * Copyright (c) 2014 QLogic Corporation
  5. * All rights reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation.
  10. *
  11. * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
  12. * Written by: Eliezer Tamir
  13. * Based on code from Michael Chan's bnx2 driver
  14. * UDP CSUM errata workaround by Arik Gendelman
  15. * Slowpath and fastpath rework by Vladislav Zolotarov
  16. * Statistics and Link management by Yitchak Gertner
  17. *
  18. */
  19. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20. #include <linux/ethtool.h>
  21. #include <linux/netdevice.h>
  22. #include <linux/types.h>
  23. #include <linux/sched.h>
  24. #include <linux/crc32.h>
  25. #include "bnx2x.h"
  26. #include "bnx2x_cmn.h"
  27. #include "bnx2x_dump.h"
  28. #include "bnx2x_init.h"
  29. /* Note: in the format strings below %s is replaced by the queue-name which is
  30. * either its index or 'fcoe' for the fcoe queue. Make sure the format string
  31. * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
  32. */
  33. #define MAX_QUEUE_NAME_LEN 4
  34. static const struct {
  35. long offset;
  36. int size;
  37. char string[ETH_GSTRING_LEN];
  38. } bnx2x_q_stats_arr[] = {
  39. /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
  40. { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
  41. 8, "[%s]: rx_ucast_packets" },
  42. { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
  43. 8, "[%s]: rx_mcast_packets" },
  44. { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
  45. 8, "[%s]: rx_bcast_packets" },
  46. { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
  47. { Q_STATS_OFFSET32(rx_err_discard_pkt),
  48. 4, "[%s]: rx_phy_ip_err_discards"},
  49. { Q_STATS_OFFSET32(rx_skb_alloc_failed),
  50. 4, "[%s]: rx_skb_alloc_discard" },
  51. { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
  52. { Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
  53. /* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
  54. 8, "[%s]: tx_ucast_packets" },
  55. { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
  56. 8, "[%s]: tx_mcast_packets" },
  57. { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
  58. 8, "[%s]: tx_bcast_packets" },
  59. { Q_STATS_OFFSET32(total_tpa_aggregations_hi),
  60. 8, "[%s]: tpa_aggregations" },
  61. { Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
  62. 8, "[%s]: tpa_aggregated_frames"},
  63. { Q_STATS_OFFSET32(total_tpa_bytes_hi), 8, "[%s]: tpa_bytes"},
  64. { Q_STATS_OFFSET32(driver_filtered_tx_pkt),
  65. 4, "[%s]: driver_filtered_tx_pkt" }
  66. };
  67. #define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
  68. static const struct {
  69. long offset;
  70. int size;
  71. u32 flags;
  72. #define STATS_FLAGS_PORT 1
  73. #define STATS_FLAGS_FUNC 2
  74. #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
  75. char string[ETH_GSTRING_LEN];
  76. } bnx2x_stats_arr[] = {
  77. /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
  78. 8, STATS_FLAGS_BOTH, "rx_bytes" },
  79. { STATS_OFFSET32(error_bytes_received_hi),
  80. 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
  81. { STATS_OFFSET32(total_unicast_packets_received_hi),
  82. 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
  83. { STATS_OFFSET32(total_multicast_packets_received_hi),
  84. 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
  85. { STATS_OFFSET32(total_broadcast_packets_received_hi),
  86. 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
  87. { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
  88. 8, STATS_FLAGS_PORT, "rx_crc_errors" },
  89. { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
  90. 8, STATS_FLAGS_PORT, "rx_align_errors" },
  91. { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
  92. 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
  93. { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
  94. 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
  95. /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
  96. 8, STATS_FLAGS_PORT, "rx_fragments" },
  97. { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
  98. 8, STATS_FLAGS_PORT, "rx_jabbers" },
  99. { STATS_OFFSET32(no_buff_discard_hi),
  100. 8, STATS_FLAGS_BOTH, "rx_discards" },
  101. { STATS_OFFSET32(mac_filter_discard),
  102. 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
  103. { STATS_OFFSET32(mf_tag_discard),
  104. 4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
  105. { STATS_OFFSET32(pfc_frames_received_hi),
  106. 8, STATS_FLAGS_PORT, "pfc_frames_received" },
  107. { STATS_OFFSET32(pfc_frames_sent_hi),
  108. 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
  109. { STATS_OFFSET32(brb_drop_hi),
  110. 8, STATS_FLAGS_PORT, "rx_brb_discard" },
  111. { STATS_OFFSET32(brb_truncate_hi),
  112. 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
  113. { STATS_OFFSET32(pause_frames_received_hi),
  114. 8, STATS_FLAGS_PORT, "rx_pause_frames" },
  115. { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
  116. 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
  117. { STATS_OFFSET32(nig_timer_max),
  118. 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
  119. /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
  120. 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
  121. { STATS_OFFSET32(rx_skb_alloc_failed),
  122. 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
  123. { STATS_OFFSET32(hw_csum_err),
  124. 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
  125. { STATS_OFFSET32(total_bytes_transmitted_hi),
  126. 8, STATS_FLAGS_BOTH, "tx_bytes" },
  127. { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
  128. 8, STATS_FLAGS_PORT, "tx_error_bytes" },
  129. { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
  130. 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
  131. { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
  132. 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
  133. { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
  134. 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
  135. { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
  136. 8, STATS_FLAGS_PORT, "tx_mac_errors" },
  137. { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
  138. 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
  139. /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
  140. 8, STATS_FLAGS_PORT, "tx_single_collisions" },
  141. { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
  142. 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
  143. { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
  144. 8, STATS_FLAGS_PORT, "tx_deferred" },
  145. { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
  146. 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
  147. { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
  148. 8, STATS_FLAGS_PORT, "tx_late_collisions" },
  149. { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
  150. 8, STATS_FLAGS_PORT, "tx_total_collisions" },
  151. { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
  152. 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
  153. { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
  154. 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
  155. { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
  156. 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
  157. { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
  158. 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
  159. /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
  160. 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
  161. { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
  162. 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
  163. { STATS_OFFSET32(etherstatspktsover1522octets_hi),
  164. 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
  165. { STATS_OFFSET32(pause_frames_sent_hi),
  166. 8, STATS_FLAGS_PORT, "tx_pause_frames" },
  167. { STATS_OFFSET32(total_tpa_aggregations_hi),
  168. 8, STATS_FLAGS_FUNC, "tpa_aggregations" },
  169. { STATS_OFFSET32(total_tpa_aggregated_frames_hi),
  170. 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
  171. { STATS_OFFSET32(total_tpa_bytes_hi),
  172. 8, STATS_FLAGS_FUNC, "tpa_bytes"},
  173. { STATS_OFFSET32(recoverable_error),
  174. 4, STATS_FLAGS_FUNC, "recoverable_errors" },
  175. { STATS_OFFSET32(unrecoverable_error),
  176. 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
  177. { STATS_OFFSET32(driver_filtered_tx_pkt),
  178. 4, STATS_FLAGS_FUNC, "driver_filtered_tx_pkt" },
  179. { STATS_OFFSET32(eee_tx_lpi),
  180. 4, STATS_FLAGS_PORT, "Tx LPI entry count"}
  181. };
  182. #define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
  183. static int bnx2x_get_port_type(struct bnx2x *bp)
  184. {
  185. int port_type;
  186. u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
  187. switch (bp->link_params.phy[phy_idx].media_type) {
  188. case ETH_PHY_SFPP_10G_FIBER:
  189. case ETH_PHY_SFP_1G_FIBER:
  190. case ETH_PHY_XFP_FIBER:
  191. case ETH_PHY_KR:
  192. case ETH_PHY_CX4:
  193. port_type = PORT_FIBRE;
  194. break;
  195. case ETH_PHY_DA_TWINAX:
  196. port_type = PORT_DA;
  197. break;
  198. case ETH_PHY_BASE_T:
  199. port_type = PORT_TP;
  200. break;
  201. case ETH_PHY_NOT_PRESENT:
  202. port_type = PORT_NONE;
  203. break;
  204. case ETH_PHY_UNSPECIFIED:
  205. default:
  206. port_type = PORT_OTHER;
  207. break;
  208. }
  209. return port_type;
  210. }
  211. static int bnx2x_get_vf_settings(struct net_device *dev,
  212. struct ethtool_cmd *cmd)
  213. {
  214. struct bnx2x *bp = netdev_priv(dev);
  215. if (bp->state == BNX2X_STATE_OPEN) {
  216. if (test_bit(BNX2X_LINK_REPORT_FD,
  217. &bp->vf_link_vars.link_report_flags))
  218. cmd->duplex = DUPLEX_FULL;
  219. else
  220. cmd->duplex = DUPLEX_HALF;
  221. ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
  222. } else {
  223. cmd->duplex = DUPLEX_UNKNOWN;
  224. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  225. }
  226. cmd->port = PORT_OTHER;
  227. cmd->phy_address = 0;
  228. cmd->transceiver = XCVR_INTERNAL;
  229. cmd->autoneg = AUTONEG_DISABLE;
  230. cmd->maxtxpkt = 0;
  231. cmd->maxrxpkt = 0;
  232. DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
  233. " supported 0x%x advertising 0x%x speed %u\n"
  234. " duplex %d port %d phy_address %d transceiver %d\n"
  235. " autoneg %d maxtxpkt %d maxrxpkt %d\n",
  236. cmd->cmd, cmd->supported, cmd->advertising,
  237. ethtool_cmd_speed(cmd),
  238. cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
  239. cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  240. return 0;
  241. }
  242. static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  243. {
  244. struct bnx2x *bp = netdev_priv(dev);
  245. int cfg_idx = bnx2x_get_link_cfg_idx(bp);
  246. u32 media_type;
  247. /* Dual Media boards present all available port types */
  248. cmd->supported = bp->port.supported[cfg_idx] |
  249. (bp->port.supported[cfg_idx ^ 1] &
  250. (SUPPORTED_TP | SUPPORTED_FIBRE));
  251. cmd->advertising = bp->port.advertising[cfg_idx];
  252. media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
  253. if (media_type == ETH_PHY_SFP_1G_FIBER) {
  254. cmd->supported &= ~(SUPPORTED_10000baseT_Full);
  255. cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
  256. }
  257. if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
  258. !(bp->flags & MF_FUNC_DIS)) {
  259. cmd->duplex = bp->link_vars.duplex;
  260. if (IS_MF(bp) && !BP_NOMCP(bp))
  261. ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
  262. else
  263. ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
  264. } else {
  265. cmd->duplex = DUPLEX_UNKNOWN;
  266. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  267. }
  268. cmd->port = bnx2x_get_port_type(bp);
  269. cmd->phy_address = bp->mdio.prtad;
  270. cmd->transceiver = XCVR_INTERNAL;
  271. if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
  272. cmd->autoneg = AUTONEG_ENABLE;
  273. else
  274. cmd->autoneg = AUTONEG_DISABLE;
  275. /* Publish LP advertised speeds and FC */
  276. if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
  277. u32 status = bp->link_vars.link_status;
  278. cmd->lp_advertising |= ADVERTISED_Autoneg;
  279. if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
  280. cmd->lp_advertising |= ADVERTISED_Pause;
  281. if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
  282. cmd->lp_advertising |= ADVERTISED_Asym_Pause;
  283. if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
  284. cmd->lp_advertising |= ADVERTISED_10baseT_Half;
  285. if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
  286. cmd->lp_advertising |= ADVERTISED_10baseT_Full;
  287. if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
  288. cmd->lp_advertising |= ADVERTISED_100baseT_Half;
  289. if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
  290. cmd->lp_advertising |= ADVERTISED_100baseT_Full;
  291. if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
  292. cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
  293. if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
  294. if (media_type == ETH_PHY_KR) {
  295. cmd->lp_advertising |=
  296. ADVERTISED_1000baseKX_Full;
  297. } else {
  298. cmd->lp_advertising |=
  299. ADVERTISED_1000baseT_Full;
  300. }
  301. }
  302. if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
  303. cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
  304. if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
  305. if (media_type == ETH_PHY_KR) {
  306. cmd->lp_advertising |=
  307. ADVERTISED_10000baseKR_Full;
  308. } else {
  309. cmd->lp_advertising |=
  310. ADVERTISED_10000baseT_Full;
  311. }
  312. }
  313. if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
  314. cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
  315. }
  316. cmd->maxtxpkt = 0;
  317. cmd->maxrxpkt = 0;
  318. DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
  319. " supported 0x%x advertising 0x%x speed %u\n"
  320. " duplex %d port %d phy_address %d transceiver %d\n"
  321. " autoneg %d maxtxpkt %d maxrxpkt %d\n",
  322. cmd->cmd, cmd->supported, cmd->advertising,
  323. ethtool_cmd_speed(cmd),
  324. cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
  325. cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  326. return 0;
  327. }
  328. static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  329. {
  330. struct bnx2x *bp = netdev_priv(dev);
  331. u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
  332. u32 speed, phy_idx;
  333. if (IS_MF_SD(bp))
  334. return 0;
  335. DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
  336. " supported 0x%x advertising 0x%x speed %u\n"
  337. " duplex %d port %d phy_address %d transceiver %d\n"
  338. " autoneg %d maxtxpkt %d maxrxpkt %d\n",
  339. cmd->cmd, cmd->supported, cmd->advertising,
  340. ethtool_cmd_speed(cmd),
  341. cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
  342. cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
  343. speed = ethtool_cmd_speed(cmd);
  344. /* If received a request for an unknown duplex, assume full*/
  345. if (cmd->duplex == DUPLEX_UNKNOWN)
  346. cmd->duplex = DUPLEX_FULL;
  347. if (IS_MF_SI(bp)) {
  348. u32 part;
  349. u32 line_speed = bp->link_vars.line_speed;
  350. /* use 10G if no link detected */
  351. if (!line_speed)
  352. line_speed = 10000;
  353. if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
  354. DP(BNX2X_MSG_ETHTOOL,
  355. "To set speed BC %X or higher is required, please upgrade BC\n",
  356. REQ_BC_VER_4_SET_MF_BW);
  357. return -EINVAL;
  358. }
  359. part = (speed * 100) / line_speed;
  360. if (line_speed < speed || !part) {
  361. DP(BNX2X_MSG_ETHTOOL,
  362. "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
  363. return -EINVAL;
  364. }
  365. if (bp->state != BNX2X_STATE_OPEN)
  366. /* store value for following "load" */
  367. bp->pending_max = part;
  368. else
  369. bnx2x_update_max_mf_config(bp, part);
  370. return 0;
  371. }
  372. cfg_idx = bnx2x_get_link_cfg_idx(bp);
  373. old_multi_phy_config = bp->link_params.multi_phy_config;
  374. if (cmd->port != bnx2x_get_port_type(bp)) {
  375. switch (cmd->port) {
  376. case PORT_TP:
  377. if (!(bp->port.supported[0] & SUPPORTED_TP ||
  378. bp->port.supported[1] & SUPPORTED_TP)) {
  379. DP(BNX2X_MSG_ETHTOOL,
  380. "Unsupported port type\n");
  381. return -EINVAL;
  382. }
  383. bp->link_params.multi_phy_config &=
  384. ~PORT_HW_CFG_PHY_SELECTION_MASK;
  385. if (bp->link_params.multi_phy_config &
  386. PORT_HW_CFG_PHY_SWAPPED_ENABLED)
  387. bp->link_params.multi_phy_config |=
  388. PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
  389. else
  390. bp->link_params.multi_phy_config |=
  391. PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
  392. break;
  393. case PORT_FIBRE:
  394. case PORT_DA:
  395. case PORT_NONE:
  396. if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
  397. bp->port.supported[1] & SUPPORTED_FIBRE)) {
  398. DP(BNX2X_MSG_ETHTOOL,
  399. "Unsupported port type\n");
  400. return -EINVAL;
  401. }
  402. bp->link_params.multi_phy_config &=
  403. ~PORT_HW_CFG_PHY_SELECTION_MASK;
  404. if (bp->link_params.multi_phy_config &
  405. PORT_HW_CFG_PHY_SWAPPED_ENABLED)
  406. bp->link_params.multi_phy_config |=
  407. PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
  408. else
  409. bp->link_params.multi_phy_config |=
  410. PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
  411. break;
  412. default:
  413. DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
  414. return -EINVAL;
  415. }
  416. }
  417. /* Save new config in case command complete successfully */
  418. new_multi_phy_config = bp->link_params.multi_phy_config;
  419. /* Get the new cfg_idx */
  420. cfg_idx = bnx2x_get_link_cfg_idx(bp);
  421. /* Restore old config in case command failed */
  422. bp->link_params.multi_phy_config = old_multi_phy_config;
  423. DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
  424. if (cmd->autoneg == AUTONEG_ENABLE) {
  425. u32 an_supported_speed = bp->port.supported[cfg_idx];
  426. if (bp->link_params.phy[EXT_PHY1].type ==
  427. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
  428. an_supported_speed |= (SUPPORTED_100baseT_Half |
  429. SUPPORTED_100baseT_Full);
  430. if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
  431. DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
  432. return -EINVAL;
  433. }
  434. /* advertise the requested speed and duplex if supported */
  435. if (cmd->advertising & ~an_supported_speed) {
  436. DP(BNX2X_MSG_ETHTOOL,
  437. "Advertisement parameters are not supported\n");
  438. return -EINVAL;
  439. }
  440. bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
  441. bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
  442. bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
  443. cmd->advertising);
  444. if (cmd->advertising) {
  445. bp->link_params.speed_cap_mask[cfg_idx] = 0;
  446. if (cmd->advertising & ADVERTISED_10baseT_Half) {
  447. bp->link_params.speed_cap_mask[cfg_idx] |=
  448. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
  449. }
  450. if (cmd->advertising & ADVERTISED_10baseT_Full)
  451. bp->link_params.speed_cap_mask[cfg_idx] |=
  452. PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
  453. if (cmd->advertising & ADVERTISED_100baseT_Full)
  454. bp->link_params.speed_cap_mask[cfg_idx] |=
  455. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
  456. if (cmd->advertising & ADVERTISED_100baseT_Half) {
  457. bp->link_params.speed_cap_mask[cfg_idx] |=
  458. PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
  459. }
  460. if (cmd->advertising & ADVERTISED_1000baseT_Half) {
  461. bp->link_params.speed_cap_mask[cfg_idx] |=
  462. PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
  463. }
  464. if (cmd->advertising & (ADVERTISED_1000baseT_Full |
  465. ADVERTISED_1000baseKX_Full))
  466. bp->link_params.speed_cap_mask[cfg_idx] |=
  467. PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
  468. if (cmd->advertising & (ADVERTISED_10000baseT_Full |
  469. ADVERTISED_10000baseKX4_Full |
  470. ADVERTISED_10000baseKR_Full))
  471. bp->link_params.speed_cap_mask[cfg_idx] |=
  472. PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
  473. if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
  474. bp->link_params.speed_cap_mask[cfg_idx] |=
  475. PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
  476. }
  477. } else { /* forced speed */
  478. /* advertise the requested speed and duplex if supported */
  479. switch (speed) {
  480. case SPEED_10:
  481. if (cmd->duplex == DUPLEX_FULL) {
  482. if (!(bp->port.supported[cfg_idx] &
  483. SUPPORTED_10baseT_Full)) {
  484. DP(BNX2X_MSG_ETHTOOL,
  485. "10M full not supported\n");
  486. return -EINVAL;
  487. }
  488. advertising = (ADVERTISED_10baseT_Full |
  489. ADVERTISED_TP);
  490. } else {
  491. if (!(bp->port.supported[cfg_idx] &
  492. SUPPORTED_10baseT_Half)) {
  493. DP(BNX2X_MSG_ETHTOOL,
  494. "10M half not supported\n");
  495. return -EINVAL;
  496. }
  497. advertising = (ADVERTISED_10baseT_Half |
  498. ADVERTISED_TP);
  499. }
  500. break;
  501. case SPEED_100:
  502. if (cmd->duplex == DUPLEX_FULL) {
  503. if (!(bp->port.supported[cfg_idx] &
  504. SUPPORTED_100baseT_Full)) {
  505. DP(BNX2X_MSG_ETHTOOL,
  506. "100M full not supported\n");
  507. return -EINVAL;
  508. }
  509. advertising = (ADVERTISED_100baseT_Full |
  510. ADVERTISED_TP);
  511. } else {
  512. if (!(bp->port.supported[cfg_idx] &
  513. SUPPORTED_100baseT_Half)) {
  514. DP(BNX2X_MSG_ETHTOOL,
  515. "100M half not supported\n");
  516. return -EINVAL;
  517. }
  518. advertising = (ADVERTISED_100baseT_Half |
  519. ADVERTISED_TP);
  520. }
  521. break;
  522. case SPEED_1000:
  523. if (cmd->duplex != DUPLEX_FULL) {
  524. DP(BNX2X_MSG_ETHTOOL,
  525. "1G half not supported\n");
  526. return -EINVAL;
  527. }
  528. if (bp->port.supported[cfg_idx] &
  529. SUPPORTED_1000baseT_Full) {
  530. advertising = (ADVERTISED_1000baseT_Full |
  531. ADVERTISED_TP);
  532. } else if (bp->port.supported[cfg_idx] &
  533. SUPPORTED_1000baseKX_Full) {
  534. advertising = ADVERTISED_1000baseKX_Full;
  535. } else {
  536. DP(BNX2X_MSG_ETHTOOL,
  537. "1G full not supported\n");
  538. return -EINVAL;
  539. }
  540. break;
  541. case SPEED_2500:
  542. if (cmd->duplex != DUPLEX_FULL) {
  543. DP(BNX2X_MSG_ETHTOOL,
  544. "2.5G half not supported\n");
  545. return -EINVAL;
  546. }
  547. if (!(bp->port.supported[cfg_idx]
  548. & SUPPORTED_2500baseX_Full)) {
  549. DP(BNX2X_MSG_ETHTOOL,
  550. "2.5G full not supported\n");
  551. return -EINVAL;
  552. }
  553. advertising = (ADVERTISED_2500baseX_Full |
  554. ADVERTISED_TP);
  555. break;
  556. case SPEED_10000:
  557. if (cmd->duplex != DUPLEX_FULL) {
  558. DP(BNX2X_MSG_ETHTOOL,
  559. "10G half not supported\n");
  560. return -EINVAL;
  561. }
  562. phy_idx = bnx2x_get_cur_phy_idx(bp);
  563. if ((bp->port.supported[cfg_idx] &
  564. SUPPORTED_10000baseT_Full) &&
  565. (bp->link_params.phy[phy_idx].media_type !=
  566. ETH_PHY_SFP_1G_FIBER)) {
  567. advertising = (ADVERTISED_10000baseT_Full |
  568. ADVERTISED_FIBRE);
  569. } else if (bp->port.supported[cfg_idx] &
  570. SUPPORTED_10000baseKR_Full) {
  571. advertising = (ADVERTISED_10000baseKR_Full |
  572. ADVERTISED_FIBRE);
  573. } else {
  574. DP(BNX2X_MSG_ETHTOOL,
  575. "10G full not supported\n");
  576. return -EINVAL;
  577. }
  578. break;
  579. default:
  580. DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
  581. return -EINVAL;
  582. }
  583. bp->link_params.req_line_speed[cfg_idx] = speed;
  584. bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
  585. bp->port.advertising[cfg_idx] = advertising;
  586. }
  587. DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
  588. " req_duplex %d advertising 0x%x\n",
  589. bp->link_params.req_line_speed[cfg_idx],
  590. bp->link_params.req_duplex[cfg_idx],
  591. bp->port.advertising[cfg_idx]);
  592. /* Set new config */
  593. bp->link_params.multi_phy_config = new_multi_phy_config;
  594. if (netif_running(dev)) {
  595. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  596. bnx2x_force_link_reset(bp);
  597. bnx2x_link_set(bp);
  598. }
  599. return 0;
  600. }
  601. #define DUMP_ALL_PRESETS 0x1FFF
  602. #define DUMP_MAX_PRESETS 13
  603. static int __bnx2x_get_preset_regs_len(struct bnx2x *bp, u32 preset)
  604. {
  605. if (CHIP_IS_E1(bp))
  606. return dump_num_registers[0][preset-1];
  607. else if (CHIP_IS_E1H(bp))
  608. return dump_num_registers[1][preset-1];
  609. else if (CHIP_IS_E2(bp))
  610. return dump_num_registers[2][preset-1];
  611. else if (CHIP_IS_E3A0(bp))
  612. return dump_num_registers[3][preset-1];
  613. else if (CHIP_IS_E3B0(bp))
  614. return dump_num_registers[4][preset-1];
  615. else
  616. return 0;
  617. }
  618. static int __bnx2x_get_regs_len(struct bnx2x *bp)
  619. {
  620. u32 preset_idx;
  621. int regdump_len = 0;
  622. /* Calculate the total preset regs length */
  623. for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++)
  624. regdump_len += __bnx2x_get_preset_regs_len(bp, preset_idx);
  625. return regdump_len;
  626. }
  627. static int bnx2x_get_regs_len(struct net_device *dev)
  628. {
  629. struct bnx2x *bp = netdev_priv(dev);
  630. int regdump_len = 0;
  631. if (IS_VF(bp))
  632. return 0;
  633. regdump_len = __bnx2x_get_regs_len(bp);
  634. regdump_len *= 4;
  635. regdump_len += sizeof(struct dump_header);
  636. return regdump_len;
  637. }
  638. #define IS_E1_REG(chips) ((chips & DUMP_CHIP_E1) == DUMP_CHIP_E1)
  639. #define IS_E1H_REG(chips) ((chips & DUMP_CHIP_E1H) == DUMP_CHIP_E1H)
  640. #define IS_E2_REG(chips) ((chips & DUMP_CHIP_E2) == DUMP_CHIP_E2)
  641. #define IS_E3A0_REG(chips) ((chips & DUMP_CHIP_E3A0) == DUMP_CHIP_E3A0)
  642. #define IS_E3B0_REG(chips) ((chips & DUMP_CHIP_E3B0) == DUMP_CHIP_E3B0)
  643. #define IS_REG_IN_PRESET(presets, idx) \
  644. ((presets & (1 << (idx-1))) == (1 << (idx-1)))
  645. /******* Paged registers info selectors ********/
  646. static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
  647. {
  648. if (CHIP_IS_E2(bp))
  649. return page_vals_e2;
  650. else if (CHIP_IS_E3(bp))
  651. return page_vals_e3;
  652. else
  653. return NULL;
  654. }
  655. static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
  656. {
  657. if (CHIP_IS_E2(bp))
  658. return PAGE_MODE_VALUES_E2;
  659. else if (CHIP_IS_E3(bp))
  660. return PAGE_MODE_VALUES_E3;
  661. else
  662. return 0;
  663. }
  664. static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
  665. {
  666. if (CHIP_IS_E2(bp))
  667. return page_write_regs_e2;
  668. else if (CHIP_IS_E3(bp))
  669. return page_write_regs_e3;
  670. else
  671. return NULL;
  672. }
  673. static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
  674. {
  675. if (CHIP_IS_E2(bp))
  676. return PAGE_WRITE_REGS_E2;
  677. else if (CHIP_IS_E3(bp))
  678. return PAGE_WRITE_REGS_E3;
  679. else
  680. return 0;
  681. }
  682. static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
  683. {
  684. if (CHIP_IS_E2(bp))
  685. return page_read_regs_e2;
  686. else if (CHIP_IS_E3(bp))
  687. return page_read_regs_e3;
  688. else
  689. return NULL;
  690. }
  691. static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
  692. {
  693. if (CHIP_IS_E2(bp))
  694. return PAGE_READ_REGS_E2;
  695. else if (CHIP_IS_E3(bp))
  696. return PAGE_READ_REGS_E3;
  697. else
  698. return 0;
  699. }
  700. static bool bnx2x_is_reg_in_chip(struct bnx2x *bp,
  701. const struct reg_addr *reg_info)
  702. {
  703. if (CHIP_IS_E1(bp))
  704. return IS_E1_REG(reg_info->chips);
  705. else if (CHIP_IS_E1H(bp))
  706. return IS_E1H_REG(reg_info->chips);
  707. else if (CHIP_IS_E2(bp))
  708. return IS_E2_REG(reg_info->chips);
  709. else if (CHIP_IS_E3A0(bp))
  710. return IS_E3A0_REG(reg_info->chips);
  711. else if (CHIP_IS_E3B0(bp))
  712. return IS_E3B0_REG(reg_info->chips);
  713. else
  714. return false;
  715. }
  716. static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
  717. const struct wreg_addr *wreg_info)
  718. {
  719. if (CHIP_IS_E1(bp))
  720. return IS_E1_REG(wreg_info->chips);
  721. else if (CHIP_IS_E1H(bp))
  722. return IS_E1H_REG(wreg_info->chips);
  723. else if (CHIP_IS_E2(bp))
  724. return IS_E2_REG(wreg_info->chips);
  725. else if (CHIP_IS_E3A0(bp))
  726. return IS_E3A0_REG(wreg_info->chips);
  727. else if (CHIP_IS_E3B0(bp))
  728. return IS_E3B0_REG(wreg_info->chips);
  729. else
  730. return false;
  731. }
  732. /**
  733. * bnx2x_read_pages_regs - read "paged" registers
  734. *
  735. * @bp device handle
  736. * @p output buffer
  737. *
  738. * Reads "paged" memories: memories that may only be read by first writing to a
  739. * specific address ("write address") and then reading from a specific address
  740. * ("read address"). There may be more than one write address per "page" and
  741. * more than one read address per write address.
  742. */
  743. static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p, u32 preset)
  744. {
  745. u32 i, j, k, n;
  746. /* addresses of the paged registers */
  747. const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
  748. /* number of paged registers */
  749. int num_pages = __bnx2x_get_page_reg_num(bp);
  750. /* write addresses */
  751. const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
  752. /* number of write addresses */
  753. int write_num = __bnx2x_get_page_write_num(bp);
  754. /* read addresses info */
  755. const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
  756. /* number of read addresses */
  757. int read_num = __bnx2x_get_page_read_num(bp);
  758. u32 addr, size;
  759. for (i = 0; i < num_pages; i++) {
  760. for (j = 0; j < write_num; j++) {
  761. REG_WR(bp, write_addr[j], page_addr[i]);
  762. for (k = 0; k < read_num; k++) {
  763. if (IS_REG_IN_PRESET(read_addr[k].presets,
  764. preset)) {
  765. size = read_addr[k].size;
  766. for (n = 0; n < size; n++) {
  767. addr = read_addr[k].addr + n*4;
  768. *p++ = REG_RD(bp, addr);
  769. }
  770. }
  771. }
  772. }
  773. }
  774. }
  775. static int __bnx2x_get_preset_regs(struct bnx2x *bp, u32 *p, u32 preset)
  776. {
  777. u32 i, j, addr;
  778. const struct wreg_addr *wreg_addr_p = NULL;
  779. if (CHIP_IS_E1(bp))
  780. wreg_addr_p = &wreg_addr_e1;
  781. else if (CHIP_IS_E1H(bp))
  782. wreg_addr_p = &wreg_addr_e1h;
  783. else if (CHIP_IS_E2(bp))
  784. wreg_addr_p = &wreg_addr_e2;
  785. else if (CHIP_IS_E3A0(bp))
  786. wreg_addr_p = &wreg_addr_e3;
  787. else if (CHIP_IS_E3B0(bp))
  788. wreg_addr_p = &wreg_addr_e3b0;
  789. /* Read the idle_chk registers */
  790. for (i = 0; i < IDLE_REGS_COUNT; i++) {
  791. if (bnx2x_is_reg_in_chip(bp, &idle_reg_addrs[i]) &&
  792. IS_REG_IN_PRESET(idle_reg_addrs[i].presets, preset)) {
  793. for (j = 0; j < idle_reg_addrs[i].size; j++)
  794. *p++ = REG_RD(bp, idle_reg_addrs[i].addr + j*4);
  795. }
  796. }
  797. /* Read the regular registers */
  798. for (i = 0; i < REGS_COUNT; i++) {
  799. if (bnx2x_is_reg_in_chip(bp, &reg_addrs[i]) &&
  800. IS_REG_IN_PRESET(reg_addrs[i].presets, preset)) {
  801. for (j = 0; j < reg_addrs[i].size; j++)
  802. *p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
  803. }
  804. }
  805. /* Read the CAM registers */
  806. if (bnx2x_is_wreg_in_chip(bp, wreg_addr_p) &&
  807. IS_REG_IN_PRESET(wreg_addr_p->presets, preset)) {
  808. for (i = 0; i < wreg_addr_p->size; i++) {
  809. *p++ = REG_RD(bp, wreg_addr_p->addr + i*4);
  810. /* In case of wreg_addr register, read additional
  811. registers from read_regs array
  812. */
  813. for (j = 0; j < wreg_addr_p->read_regs_count; j++) {
  814. addr = *(wreg_addr_p->read_regs);
  815. *p++ = REG_RD(bp, addr + j*4);
  816. }
  817. }
  818. }
  819. /* Paged registers are supported in E2 & E3 only */
  820. if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp)) {
  821. /* Read "paged" registers */
  822. bnx2x_read_pages_regs(bp, p, preset);
  823. }
  824. return 0;
  825. }
  826. static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
  827. {
  828. u32 preset_idx;
  829. /* Read all registers, by reading all preset registers */
  830. for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) {
  831. /* Skip presets with IOR */
  832. if ((preset_idx == 2) ||
  833. (preset_idx == 5) ||
  834. (preset_idx == 8) ||
  835. (preset_idx == 11))
  836. continue;
  837. __bnx2x_get_preset_regs(bp, p, preset_idx);
  838. p += __bnx2x_get_preset_regs_len(bp, preset_idx);
  839. }
  840. }
  841. static void bnx2x_get_regs(struct net_device *dev,
  842. struct ethtool_regs *regs, void *_p)
  843. {
  844. u32 *p = _p;
  845. struct bnx2x *bp = netdev_priv(dev);
  846. struct dump_header dump_hdr = {0};
  847. regs->version = 2;
  848. memset(p, 0, regs->len);
  849. if (!netif_running(bp->dev))
  850. return;
  851. /* Disable parity attentions as long as following dump may
  852. * cause false alarms by reading never written registers. We
  853. * will re-enable parity attentions right after the dump.
  854. */
  855. bnx2x_disable_blocks_parity(bp);
  856. dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
  857. dump_hdr.preset = DUMP_ALL_PRESETS;
  858. dump_hdr.version = BNX2X_DUMP_VERSION;
  859. /* dump_meta_data presents OR of CHIP and PATH. */
  860. if (CHIP_IS_E1(bp)) {
  861. dump_hdr.dump_meta_data = DUMP_CHIP_E1;
  862. } else if (CHIP_IS_E1H(bp)) {
  863. dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
  864. } else if (CHIP_IS_E2(bp)) {
  865. dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
  866. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  867. } else if (CHIP_IS_E3A0(bp)) {
  868. dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
  869. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  870. } else if (CHIP_IS_E3B0(bp)) {
  871. dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
  872. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  873. }
  874. memcpy(p, &dump_hdr, sizeof(struct dump_header));
  875. p += dump_hdr.header_size + 1;
  876. /* Actually read the registers */
  877. __bnx2x_get_regs(bp, p);
  878. /* Re-enable parity attentions */
  879. bnx2x_clear_blocks_parity(bp);
  880. bnx2x_enable_blocks_parity(bp);
  881. }
  882. static int bnx2x_get_preset_regs_len(struct net_device *dev, u32 preset)
  883. {
  884. struct bnx2x *bp = netdev_priv(dev);
  885. int regdump_len = 0;
  886. regdump_len = __bnx2x_get_preset_regs_len(bp, preset);
  887. regdump_len *= 4;
  888. regdump_len += sizeof(struct dump_header);
  889. return regdump_len;
  890. }
  891. static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
  892. {
  893. struct bnx2x *bp = netdev_priv(dev);
  894. /* Use the ethtool_dump "flag" field as the dump preset index */
  895. if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
  896. return -EINVAL;
  897. bp->dump_preset_idx = val->flag;
  898. return 0;
  899. }
  900. static int bnx2x_get_dump_flag(struct net_device *dev,
  901. struct ethtool_dump *dump)
  902. {
  903. struct bnx2x *bp = netdev_priv(dev);
  904. dump->version = BNX2X_DUMP_VERSION;
  905. dump->flag = bp->dump_preset_idx;
  906. /* Calculate the requested preset idx length */
  907. dump->len = bnx2x_get_preset_regs_len(dev, bp->dump_preset_idx);
  908. DP(BNX2X_MSG_ETHTOOL, "Get dump preset %d length=%d\n",
  909. bp->dump_preset_idx, dump->len);
  910. return 0;
  911. }
  912. static int bnx2x_get_dump_data(struct net_device *dev,
  913. struct ethtool_dump *dump,
  914. void *buffer)
  915. {
  916. u32 *p = buffer;
  917. struct bnx2x *bp = netdev_priv(dev);
  918. struct dump_header dump_hdr = {0};
  919. /* Disable parity attentions as long as following dump may
  920. * cause false alarms by reading never written registers. We
  921. * will re-enable parity attentions right after the dump.
  922. */
  923. bnx2x_disable_blocks_parity(bp);
  924. dump_hdr.header_size = (sizeof(struct dump_header) / 4) - 1;
  925. dump_hdr.preset = bp->dump_preset_idx;
  926. dump_hdr.version = BNX2X_DUMP_VERSION;
  927. DP(BNX2X_MSG_ETHTOOL, "Get dump data of preset %d\n", dump_hdr.preset);
  928. /* dump_meta_data presents OR of CHIP and PATH. */
  929. if (CHIP_IS_E1(bp)) {
  930. dump_hdr.dump_meta_data = DUMP_CHIP_E1;
  931. } else if (CHIP_IS_E1H(bp)) {
  932. dump_hdr.dump_meta_data = DUMP_CHIP_E1H;
  933. } else if (CHIP_IS_E2(bp)) {
  934. dump_hdr.dump_meta_data = DUMP_CHIP_E2 |
  935. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  936. } else if (CHIP_IS_E3A0(bp)) {
  937. dump_hdr.dump_meta_data = DUMP_CHIP_E3A0 |
  938. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  939. } else if (CHIP_IS_E3B0(bp)) {
  940. dump_hdr.dump_meta_data = DUMP_CHIP_E3B0 |
  941. (BP_PATH(bp) ? DUMP_PATH_1 : DUMP_PATH_0);
  942. }
  943. memcpy(p, &dump_hdr, sizeof(struct dump_header));
  944. p += dump_hdr.header_size + 1;
  945. /* Actually read the registers */
  946. __bnx2x_get_preset_regs(bp, p, dump_hdr.preset);
  947. /* Re-enable parity attentions */
  948. bnx2x_clear_blocks_parity(bp);
  949. bnx2x_enable_blocks_parity(bp);
  950. return 0;
  951. }
  952. static void bnx2x_get_drvinfo(struct net_device *dev,
  953. struct ethtool_drvinfo *info)
  954. {
  955. struct bnx2x *bp = netdev_priv(dev);
  956. strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
  957. strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
  958. bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version));
  959. strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
  960. }
  961. static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  962. {
  963. struct bnx2x *bp = netdev_priv(dev);
  964. if (bp->flags & NO_WOL_FLAG) {
  965. wol->supported = 0;
  966. wol->wolopts = 0;
  967. } else {
  968. wol->supported = WAKE_MAGIC;
  969. if (bp->wol)
  970. wol->wolopts = WAKE_MAGIC;
  971. else
  972. wol->wolopts = 0;
  973. }
  974. memset(&wol->sopass, 0, sizeof(wol->sopass));
  975. }
  976. static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  977. {
  978. struct bnx2x *bp = netdev_priv(dev);
  979. if (wol->wolopts & ~WAKE_MAGIC) {
  980. DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
  981. return -EINVAL;
  982. }
  983. if (wol->wolopts & WAKE_MAGIC) {
  984. if (bp->flags & NO_WOL_FLAG) {
  985. DP(BNX2X_MSG_ETHTOOL, "WOL not supported\n");
  986. return -EINVAL;
  987. }
  988. bp->wol = 1;
  989. } else
  990. bp->wol = 0;
  991. if (SHMEM2_HAS(bp, curr_cfg))
  992. SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
  993. return 0;
  994. }
  995. static u32 bnx2x_get_msglevel(struct net_device *dev)
  996. {
  997. struct bnx2x *bp = netdev_priv(dev);
  998. return bp->msg_enable;
  999. }
  1000. static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
  1001. {
  1002. struct bnx2x *bp = netdev_priv(dev);
  1003. if (capable(CAP_NET_ADMIN)) {
  1004. /* dump MCP trace */
  1005. if (IS_PF(bp) && (level & BNX2X_MSG_MCP))
  1006. bnx2x_fw_dump_lvl(bp, KERN_INFO);
  1007. bp->msg_enable = level;
  1008. }
  1009. }
  1010. static int bnx2x_nway_reset(struct net_device *dev)
  1011. {
  1012. struct bnx2x *bp = netdev_priv(dev);
  1013. if (!bp->port.pmf)
  1014. return 0;
  1015. if (netif_running(dev)) {
  1016. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1017. bnx2x_force_link_reset(bp);
  1018. bnx2x_link_set(bp);
  1019. }
  1020. return 0;
  1021. }
  1022. static u32 bnx2x_get_link(struct net_device *dev)
  1023. {
  1024. struct bnx2x *bp = netdev_priv(dev);
  1025. if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
  1026. return 0;
  1027. if (IS_VF(bp))
  1028. return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
  1029. &bp->vf_link_vars.link_report_flags);
  1030. return bp->link_vars.link_up;
  1031. }
  1032. static int bnx2x_get_eeprom_len(struct net_device *dev)
  1033. {
  1034. struct bnx2x *bp = netdev_priv(dev);
  1035. return bp->common.flash_size;
  1036. }
  1037. /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
  1038. * had we done things the other way around, if two pfs from the same port would
  1039. * attempt to access nvram at the same time, we could run into a scenario such
  1040. * as:
  1041. * pf A takes the port lock.
  1042. * pf B succeeds in taking the same lock since they are from the same port.
  1043. * pf A takes the per pf misc lock. Performs eeprom access.
  1044. * pf A finishes. Unlocks the per pf misc lock.
  1045. * Pf B takes the lock and proceeds to perform it's own access.
  1046. * pf A unlocks the per port lock, while pf B is still working (!).
  1047. * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
  1048. * access corrupted by pf B)
  1049. */
  1050. static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
  1051. {
  1052. int port = BP_PORT(bp);
  1053. int count, i;
  1054. u32 val;
  1055. /* acquire HW lock: protect against other PFs in PF Direct Assignment */
  1056. bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
  1057. /* adjust timeout for emulation/FPGA */
  1058. count = BNX2X_NVRAM_TIMEOUT_COUNT;
  1059. if (CHIP_REV_IS_SLOW(bp))
  1060. count *= 100;
  1061. /* request access to nvram interface */
  1062. REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
  1063. (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
  1064. for (i = 0; i < count*10; i++) {
  1065. val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
  1066. if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
  1067. break;
  1068. udelay(5);
  1069. }
  1070. if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
  1071. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1072. "cannot get access to nvram interface\n");
  1073. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
  1074. return -EBUSY;
  1075. }
  1076. return 0;
  1077. }
  1078. static int bnx2x_release_nvram_lock(struct bnx2x *bp)
  1079. {
  1080. int port = BP_PORT(bp);
  1081. int count, i;
  1082. u32 val;
  1083. /* adjust timeout for emulation/FPGA */
  1084. count = BNX2X_NVRAM_TIMEOUT_COUNT;
  1085. if (CHIP_REV_IS_SLOW(bp))
  1086. count *= 100;
  1087. /* relinquish nvram interface */
  1088. REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
  1089. (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
  1090. for (i = 0; i < count*10; i++) {
  1091. val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
  1092. if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
  1093. break;
  1094. udelay(5);
  1095. }
  1096. if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
  1097. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1098. "cannot free access to nvram interface\n");
  1099. return -EBUSY;
  1100. }
  1101. /* release HW lock: protect against other PFs in PF Direct Assignment */
  1102. bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
  1103. return 0;
  1104. }
  1105. static void bnx2x_enable_nvram_access(struct bnx2x *bp)
  1106. {
  1107. u32 val;
  1108. val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
  1109. /* enable both bits, even on read */
  1110. REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
  1111. (val | MCPR_NVM_ACCESS_ENABLE_EN |
  1112. MCPR_NVM_ACCESS_ENABLE_WR_EN));
  1113. }
  1114. static void bnx2x_disable_nvram_access(struct bnx2x *bp)
  1115. {
  1116. u32 val;
  1117. val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
  1118. /* disable both bits, even after read */
  1119. REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
  1120. (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
  1121. MCPR_NVM_ACCESS_ENABLE_WR_EN)));
  1122. }
  1123. static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
  1124. u32 cmd_flags)
  1125. {
  1126. int count, i, rc;
  1127. u32 val;
  1128. /* build the command word */
  1129. cmd_flags |= MCPR_NVM_COMMAND_DOIT;
  1130. /* need to clear DONE bit separately */
  1131. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
  1132. /* address of the NVRAM to read from */
  1133. REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
  1134. (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
  1135. /* issue a read command */
  1136. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
  1137. /* adjust timeout for emulation/FPGA */
  1138. count = BNX2X_NVRAM_TIMEOUT_COUNT;
  1139. if (CHIP_REV_IS_SLOW(bp))
  1140. count *= 100;
  1141. /* wait for completion */
  1142. *ret_val = 0;
  1143. rc = -EBUSY;
  1144. for (i = 0; i < count; i++) {
  1145. udelay(5);
  1146. val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
  1147. if (val & MCPR_NVM_COMMAND_DONE) {
  1148. val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
  1149. /* we read nvram data in cpu order
  1150. * but ethtool sees it as an array of bytes
  1151. * converting to big-endian will do the work
  1152. */
  1153. *ret_val = cpu_to_be32(val);
  1154. rc = 0;
  1155. break;
  1156. }
  1157. }
  1158. if (rc == -EBUSY)
  1159. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1160. "nvram read timeout expired\n");
  1161. return rc;
  1162. }
  1163. int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
  1164. int buf_size)
  1165. {
  1166. int rc;
  1167. u32 cmd_flags;
  1168. __be32 val;
  1169. if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
  1170. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1171. "Invalid parameter: offset 0x%x buf_size 0x%x\n",
  1172. offset, buf_size);
  1173. return -EINVAL;
  1174. }
  1175. if (offset + buf_size > bp->common.flash_size) {
  1176. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1177. "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
  1178. offset, buf_size, bp->common.flash_size);
  1179. return -EINVAL;
  1180. }
  1181. /* request access to nvram interface */
  1182. rc = bnx2x_acquire_nvram_lock(bp);
  1183. if (rc)
  1184. return rc;
  1185. /* enable access to nvram interface */
  1186. bnx2x_enable_nvram_access(bp);
  1187. /* read the first word(s) */
  1188. cmd_flags = MCPR_NVM_COMMAND_FIRST;
  1189. while ((buf_size > sizeof(u32)) && (rc == 0)) {
  1190. rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
  1191. memcpy(ret_buf, &val, 4);
  1192. /* advance to the next dword */
  1193. offset += sizeof(u32);
  1194. ret_buf += sizeof(u32);
  1195. buf_size -= sizeof(u32);
  1196. cmd_flags = 0;
  1197. }
  1198. if (rc == 0) {
  1199. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  1200. rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
  1201. memcpy(ret_buf, &val, 4);
  1202. }
  1203. /* disable access to nvram interface */
  1204. bnx2x_disable_nvram_access(bp);
  1205. bnx2x_release_nvram_lock(bp);
  1206. return rc;
  1207. }
  1208. static int bnx2x_nvram_read32(struct bnx2x *bp, u32 offset, u32 *buf,
  1209. int buf_size)
  1210. {
  1211. int rc;
  1212. rc = bnx2x_nvram_read(bp, offset, (u8 *)buf, buf_size);
  1213. if (!rc) {
  1214. __be32 *be = (__be32 *)buf;
  1215. while ((buf_size -= 4) >= 0)
  1216. *buf++ = be32_to_cpu(*be++);
  1217. }
  1218. return rc;
  1219. }
  1220. static bool bnx2x_is_nvm_accessible(struct bnx2x *bp)
  1221. {
  1222. int rc = 1;
  1223. u16 pm = 0;
  1224. struct net_device *dev = pci_get_drvdata(bp->pdev);
  1225. if (bp->pdev->pm_cap)
  1226. rc = pci_read_config_word(bp->pdev,
  1227. bp->pdev->pm_cap + PCI_PM_CTRL, &pm);
  1228. if ((rc && !netif_running(dev)) ||
  1229. (!rc && ((pm & PCI_PM_CTRL_STATE_MASK) != (__force u16)PCI_D0)))
  1230. return false;
  1231. return true;
  1232. }
  1233. static int bnx2x_get_eeprom(struct net_device *dev,
  1234. struct ethtool_eeprom *eeprom, u8 *eebuf)
  1235. {
  1236. struct bnx2x *bp = netdev_priv(dev);
  1237. if (!bnx2x_is_nvm_accessible(bp)) {
  1238. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1239. "cannot access eeprom when the interface is down\n");
  1240. return -EAGAIN;
  1241. }
  1242. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
  1243. " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
  1244. eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
  1245. eeprom->len, eeprom->len);
  1246. /* parameters already validated in ethtool_get_eeprom */
  1247. return bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
  1248. }
  1249. static int bnx2x_get_module_eeprom(struct net_device *dev,
  1250. struct ethtool_eeprom *ee,
  1251. u8 *data)
  1252. {
  1253. struct bnx2x *bp = netdev_priv(dev);
  1254. int rc = -EINVAL, phy_idx;
  1255. u8 *user_data = data;
  1256. unsigned int start_addr = ee->offset, xfer_size = 0;
  1257. if (!bnx2x_is_nvm_accessible(bp)) {
  1258. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1259. "cannot access eeprom when the interface is down\n");
  1260. return -EAGAIN;
  1261. }
  1262. phy_idx = bnx2x_get_cur_phy_idx(bp);
  1263. /* Read A0 section */
  1264. if (start_addr < ETH_MODULE_SFF_8079_LEN) {
  1265. /* Limit transfer size to the A0 section boundary */
  1266. if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
  1267. xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
  1268. else
  1269. xfer_size = ee->len;
  1270. bnx2x_acquire_phy_lock(bp);
  1271. rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
  1272. &bp->link_params,
  1273. I2C_DEV_ADDR_A0,
  1274. start_addr,
  1275. xfer_size,
  1276. user_data);
  1277. bnx2x_release_phy_lock(bp);
  1278. if (rc) {
  1279. DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
  1280. return -EINVAL;
  1281. }
  1282. user_data += xfer_size;
  1283. start_addr += xfer_size;
  1284. }
  1285. /* Read A2 section */
  1286. if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
  1287. (start_addr < ETH_MODULE_SFF_8472_LEN)) {
  1288. xfer_size = ee->len - xfer_size;
  1289. /* Limit transfer size to the A2 section boundary */
  1290. if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
  1291. xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
  1292. start_addr -= ETH_MODULE_SFF_8079_LEN;
  1293. bnx2x_acquire_phy_lock(bp);
  1294. rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
  1295. &bp->link_params,
  1296. I2C_DEV_ADDR_A2,
  1297. start_addr,
  1298. xfer_size,
  1299. user_data);
  1300. bnx2x_release_phy_lock(bp);
  1301. if (rc) {
  1302. DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
  1303. return -EINVAL;
  1304. }
  1305. }
  1306. return rc;
  1307. }
  1308. static int bnx2x_get_module_info(struct net_device *dev,
  1309. struct ethtool_modinfo *modinfo)
  1310. {
  1311. struct bnx2x *bp = netdev_priv(dev);
  1312. int phy_idx, rc;
  1313. u8 sff8472_comp, diag_type;
  1314. if (!bnx2x_is_nvm_accessible(bp)) {
  1315. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1316. "cannot access eeprom when the interface is down\n");
  1317. return -EAGAIN;
  1318. }
  1319. phy_idx = bnx2x_get_cur_phy_idx(bp);
  1320. bnx2x_acquire_phy_lock(bp);
  1321. rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
  1322. &bp->link_params,
  1323. I2C_DEV_ADDR_A0,
  1324. SFP_EEPROM_SFF_8472_COMP_ADDR,
  1325. SFP_EEPROM_SFF_8472_COMP_SIZE,
  1326. &sff8472_comp);
  1327. bnx2x_release_phy_lock(bp);
  1328. if (rc) {
  1329. DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
  1330. return -EINVAL;
  1331. }
  1332. bnx2x_acquire_phy_lock(bp);
  1333. rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
  1334. &bp->link_params,
  1335. I2C_DEV_ADDR_A0,
  1336. SFP_EEPROM_DIAG_TYPE_ADDR,
  1337. SFP_EEPROM_DIAG_TYPE_SIZE,
  1338. &diag_type);
  1339. bnx2x_release_phy_lock(bp);
  1340. if (rc) {
  1341. DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
  1342. return -EINVAL;
  1343. }
  1344. if (!sff8472_comp ||
  1345. (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
  1346. modinfo->type = ETH_MODULE_SFF_8079;
  1347. modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
  1348. } else {
  1349. modinfo->type = ETH_MODULE_SFF_8472;
  1350. modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
  1351. }
  1352. return 0;
  1353. }
  1354. static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
  1355. u32 cmd_flags)
  1356. {
  1357. int count, i, rc;
  1358. /* build the command word */
  1359. cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
  1360. /* need to clear DONE bit separately */
  1361. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
  1362. /* write the data */
  1363. REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
  1364. /* address of the NVRAM to write to */
  1365. REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
  1366. (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
  1367. /* issue the write command */
  1368. REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
  1369. /* adjust timeout for emulation/FPGA */
  1370. count = BNX2X_NVRAM_TIMEOUT_COUNT;
  1371. if (CHIP_REV_IS_SLOW(bp))
  1372. count *= 100;
  1373. /* wait for completion */
  1374. rc = -EBUSY;
  1375. for (i = 0; i < count; i++) {
  1376. udelay(5);
  1377. val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
  1378. if (val & MCPR_NVM_COMMAND_DONE) {
  1379. rc = 0;
  1380. break;
  1381. }
  1382. }
  1383. if (rc == -EBUSY)
  1384. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1385. "nvram write timeout expired\n");
  1386. return rc;
  1387. }
  1388. #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
  1389. static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
  1390. int buf_size)
  1391. {
  1392. int rc;
  1393. u32 cmd_flags, align_offset, val;
  1394. __be32 val_be;
  1395. if (offset + buf_size > bp->common.flash_size) {
  1396. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1397. "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
  1398. offset, buf_size, bp->common.flash_size);
  1399. return -EINVAL;
  1400. }
  1401. /* request access to nvram interface */
  1402. rc = bnx2x_acquire_nvram_lock(bp);
  1403. if (rc)
  1404. return rc;
  1405. /* enable access to nvram interface */
  1406. bnx2x_enable_nvram_access(bp);
  1407. cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
  1408. align_offset = (offset & ~0x03);
  1409. rc = bnx2x_nvram_read_dword(bp, align_offset, &val_be, cmd_flags);
  1410. if (rc == 0) {
  1411. /* nvram data is returned as an array of bytes
  1412. * convert it back to cpu order
  1413. */
  1414. val = be32_to_cpu(val_be);
  1415. val &= ~le32_to_cpu((__force __le32)
  1416. (0xff << BYTE_OFFSET(offset)));
  1417. val |= le32_to_cpu((__force __le32)
  1418. (*data_buf << BYTE_OFFSET(offset)));
  1419. rc = bnx2x_nvram_write_dword(bp, align_offset, val,
  1420. cmd_flags);
  1421. }
  1422. /* disable access to nvram interface */
  1423. bnx2x_disable_nvram_access(bp);
  1424. bnx2x_release_nvram_lock(bp);
  1425. return rc;
  1426. }
  1427. static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
  1428. int buf_size)
  1429. {
  1430. int rc;
  1431. u32 cmd_flags;
  1432. u32 val;
  1433. u32 written_so_far;
  1434. if (buf_size == 1) /* ethtool */
  1435. return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
  1436. if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
  1437. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1438. "Invalid parameter: offset 0x%x buf_size 0x%x\n",
  1439. offset, buf_size);
  1440. return -EINVAL;
  1441. }
  1442. if (offset + buf_size > bp->common.flash_size) {
  1443. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1444. "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
  1445. offset, buf_size, bp->common.flash_size);
  1446. return -EINVAL;
  1447. }
  1448. /* request access to nvram interface */
  1449. rc = bnx2x_acquire_nvram_lock(bp);
  1450. if (rc)
  1451. return rc;
  1452. /* enable access to nvram interface */
  1453. bnx2x_enable_nvram_access(bp);
  1454. written_so_far = 0;
  1455. cmd_flags = MCPR_NVM_COMMAND_FIRST;
  1456. while ((written_so_far < buf_size) && (rc == 0)) {
  1457. if (written_so_far == (buf_size - sizeof(u32)))
  1458. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  1459. else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
  1460. cmd_flags |= MCPR_NVM_COMMAND_LAST;
  1461. else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
  1462. cmd_flags |= MCPR_NVM_COMMAND_FIRST;
  1463. memcpy(&val, data_buf, 4);
  1464. /* Notice unlike bnx2x_nvram_read_dword() this will not
  1465. * change val using be32_to_cpu(), which causes data to flip
  1466. * if the eeprom is read and then written back. This is due
  1467. * to tools utilizing this functionality that would break
  1468. * if this would be resolved.
  1469. */
  1470. rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
  1471. /* advance to the next dword */
  1472. offset += sizeof(u32);
  1473. data_buf += sizeof(u32);
  1474. written_so_far += sizeof(u32);
  1475. /* At end of each 4Kb page, release nvram lock to allow MFW
  1476. * chance to take it for its own use.
  1477. */
  1478. if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
  1479. (written_so_far < buf_size)) {
  1480. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1481. "Releasing NVM lock after offset 0x%x\n",
  1482. (u32)(offset - sizeof(u32)));
  1483. bnx2x_release_nvram_lock(bp);
  1484. usleep_range(1000, 2000);
  1485. rc = bnx2x_acquire_nvram_lock(bp);
  1486. if (rc)
  1487. return rc;
  1488. }
  1489. cmd_flags = 0;
  1490. }
  1491. /* disable access to nvram interface */
  1492. bnx2x_disable_nvram_access(bp);
  1493. bnx2x_release_nvram_lock(bp);
  1494. return rc;
  1495. }
  1496. static int bnx2x_set_eeprom(struct net_device *dev,
  1497. struct ethtool_eeprom *eeprom, u8 *eebuf)
  1498. {
  1499. struct bnx2x *bp = netdev_priv(dev);
  1500. int port = BP_PORT(bp);
  1501. int rc = 0;
  1502. u32 ext_phy_config;
  1503. if (!bnx2x_is_nvm_accessible(bp)) {
  1504. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1505. "cannot access eeprom when the interface is down\n");
  1506. return -EAGAIN;
  1507. }
  1508. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
  1509. " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
  1510. eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
  1511. eeprom->len, eeprom->len);
  1512. /* parameters already validated in ethtool_set_eeprom */
  1513. /* PHY eeprom can be accessed only by the PMF */
  1514. if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
  1515. !bp->port.pmf) {
  1516. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1517. "wrong magic or interface is not pmf\n");
  1518. return -EINVAL;
  1519. }
  1520. ext_phy_config =
  1521. SHMEM_RD(bp,
  1522. dev_info.port_hw_config[port].external_phy_config);
  1523. if (eeprom->magic == 0x50485950) {
  1524. /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
  1525. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1526. bnx2x_acquire_phy_lock(bp);
  1527. rc |= bnx2x_link_reset(&bp->link_params,
  1528. &bp->link_vars, 0);
  1529. if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
  1530. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
  1531. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
  1532. MISC_REGISTERS_GPIO_HIGH, port);
  1533. bnx2x_release_phy_lock(bp);
  1534. bnx2x_link_report(bp);
  1535. } else if (eeprom->magic == 0x50485952) {
  1536. /* 'PHYR' (0x50485952): re-init link after FW upgrade */
  1537. if (bp->state == BNX2X_STATE_OPEN) {
  1538. bnx2x_acquire_phy_lock(bp);
  1539. rc |= bnx2x_link_reset(&bp->link_params,
  1540. &bp->link_vars, 1);
  1541. rc |= bnx2x_phy_init(&bp->link_params,
  1542. &bp->link_vars);
  1543. bnx2x_release_phy_lock(bp);
  1544. bnx2x_calc_fc_adv(bp);
  1545. }
  1546. } else if (eeprom->magic == 0x53985943) {
  1547. /* 'PHYC' (0x53985943): PHY FW upgrade completed */
  1548. if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
  1549. PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
  1550. /* DSP Remove Download Mode */
  1551. bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
  1552. MISC_REGISTERS_GPIO_LOW, port);
  1553. bnx2x_acquire_phy_lock(bp);
  1554. bnx2x_sfx7101_sp_sw_reset(bp,
  1555. &bp->link_params.phy[EXT_PHY1]);
  1556. /* wait 0.5 sec to allow it to run */
  1557. msleep(500);
  1558. bnx2x_ext_phy_hw_reset(bp, port);
  1559. msleep(500);
  1560. bnx2x_release_phy_lock(bp);
  1561. }
  1562. } else
  1563. rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
  1564. return rc;
  1565. }
  1566. static int bnx2x_get_coalesce(struct net_device *dev,
  1567. struct ethtool_coalesce *coal)
  1568. {
  1569. struct bnx2x *bp = netdev_priv(dev);
  1570. memset(coal, 0, sizeof(struct ethtool_coalesce));
  1571. coal->rx_coalesce_usecs = bp->rx_ticks;
  1572. coal->tx_coalesce_usecs = bp->tx_ticks;
  1573. return 0;
  1574. }
  1575. static int bnx2x_set_coalesce(struct net_device *dev,
  1576. struct ethtool_coalesce *coal)
  1577. {
  1578. struct bnx2x *bp = netdev_priv(dev);
  1579. bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
  1580. if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
  1581. bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
  1582. bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
  1583. if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
  1584. bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
  1585. if (netif_running(dev))
  1586. bnx2x_update_coalesce(bp);
  1587. return 0;
  1588. }
  1589. static void bnx2x_get_ringparam(struct net_device *dev,
  1590. struct ethtool_ringparam *ering)
  1591. {
  1592. struct bnx2x *bp = netdev_priv(dev);
  1593. ering->rx_max_pending = MAX_RX_AVAIL;
  1594. if (bp->rx_ring_size)
  1595. ering->rx_pending = bp->rx_ring_size;
  1596. else
  1597. ering->rx_pending = MAX_RX_AVAIL;
  1598. ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
  1599. ering->tx_pending = bp->tx_ring_size;
  1600. }
  1601. static int bnx2x_set_ringparam(struct net_device *dev,
  1602. struct ethtool_ringparam *ering)
  1603. {
  1604. struct bnx2x *bp = netdev_priv(dev);
  1605. DP(BNX2X_MSG_ETHTOOL,
  1606. "set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
  1607. ering->rx_pending, ering->tx_pending);
  1608. if (pci_num_vf(bp->pdev)) {
  1609. DP(BNX2X_MSG_IOV,
  1610. "VFs are enabled, can not change ring parameters\n");
  1611. return -EPERM;
  1612. }
  1613. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  1614. DP(BNX2X_MSG_ETHTOOL,
  1615. "Handling parity error recovery. Try again later\n");
  1616. return -EAGAIN;
  1617. }
  1618. if ((ering->rx_pending > MAX_RX_AVAIL) ||
  1619. (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
  1620. MIN_RX_SIZE_TPA)) ||
  1621. (ering->tx_pending > (IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL)) ||
  1622. (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
  1623. DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
  1624. return -EINVAL;
  1625. }
  1626. bp->rx_ring_size = ering->rx_pending;
  1627. bp->tx_ring_size = ering->tx_pending;
  1628. return bnx2x_reload_if_running(dev);
  1629. }
  1630. static void bnx2x_get_pauseparam(struct net_device *dev,
  1631. struct ethtool_pauseparam *epause)
  1632. {
  1633. struct bnx2x *bp = netdev_priv(dev);
  1634. int cfg_idx = bnx2x_get_link_cfg_idx(bp);
  1635. int cfg_reg;
  1636. epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
  1637. BNX2X_FLOW_CTRL_AUTO);
  1638. if (!epause->autoneg)
  1639. cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
  1640. else
  1641. cfg_reg = bp->link_params.req_fc_auto_adv;
  1642. epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
  1643. BNX2X_FLOW_CTRL_RX);
  1644. epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
  1645. BNX2X_FLOW_CTRL_TX);
  1646. DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
  1647. " autoneg %d rx_pause %d tx_pause %d\n",
  1648. epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
  1649. }
  1650. static int bnx2x_set_pauseparam(struct net_device *dev,
  1651. struct ethtool_pauseparam *epause)
  1652. {
  1653. struct bnx2x *bp = netdev_priv(dev);
  1654. u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
  1655. if (IS_MF(bp))
  1656. return 0;
  1657. DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
  1658. " autoneg %d rx_pause %d tx_pause %d\n",
  1659. epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
  1660. bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
  1661. if (epause->rx_pause)
  1662. bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
  1663. if (epause->tx_pause)
  1664. bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
  1665. if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
  1666. bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
  1667. if (epause->autoneg) {
  1668. if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
  1669. DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
  1670. return -EINVAL;
  1671. }
  1672. if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
  1673. bp->link_params.req_flow_ctrl[cfg_idx] =
  1674. BNX2X_FLOW_CTRL_AUTO;
  1675. }
  1676. bp->link_params.req_fc_auto_adv = 0;
  1677. if (epause->rx_pause)
  1678. bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX;
  1679. if (epause->tx_pause)
  1680. bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX;
  1681. if (!bp->link_params.req_fc_auto_adv)
  1682. bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_NONE;
  1683. }
  1684. DP(BNX2X_MSG_ETHTOOL,
  1685. "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
  1686. if (netif_running(dev)) {
  1687. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1688. bnx2x_force_link_reset(bp);
  1689. bnx2x_link_set(bp);
  1690. }
  1691. return 0;
  1692. }
  1693. static const char bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF][ETH_GSTRING_LEN] = {
  1694. "register_test (offline) ",
  1695. "memory_test (offline) ",
  1696. "int_loopback_test (offline)",
  1697. "ext_loopback_test (offline)",
  1698. "nvram_test (online) ",
  1699. "interrupt_test (online) ",
  1700. "link_test (online) "
  1701. };
  1702. enum {
  1703. BNX2X_PRI_FLAG_ISCSI,
  1704. BNX2X_PRI_FLAG_FCOE,
  1705. BNX2X_PRI_FLAG_STORAGE,
  1706. BNX2X_PRI_FLAG_LEN,
  1707. };
  1708. static const char bnx2x_private_arr[BNX2X_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
  1709. "iSCSI offload support",
  1710. "FCoE offload support",
  1711. "Storage only interface"
  1712. };
  1713. static u32 bnx2x_eee_to_adv(u32 eee_adv)
  1714. {
  1715. u32 modes = 0;
  1716. if (eee_adv & SHMEM_EEE_100M_ADV)
  1717. modes |= ADVERTISED_100baseT_Full;
  1718. if (eee_adv & SHMEM_EEE_1G_ADV)
  1719. modes |= ADVERTISED_1000baseT_Full;
  1720. if (eee_adv & SHMEM_EEE_10G_ADV)
  1721. modes |= ADVERTISED_10000baseT_Full;
  1722. return modes;
  1723. }
  1724. static u32 bnx2x_adv_to_eee(u32 modes, u32 shift)
  1725. {
  1726. u32 eee_adv = 0;
  1727. if (modes & ADVERTISED_100baseT_Full)
  1728. eee_adv |= SHMEM_EEE_100M_ADV;
  1729. if (modes & ADVERTISED_1000baseT_Full)
  1730. eee_adv |= SHMEM_EEE_1G_ADV;
  1731. if (modes & ADVERTISED_10000baseT_Full)
  1732. eee_adv |= SHMEM_EEE_10G_ADV;
  1733. return eee_adv << shift;
  1734. }
  1735. static int bnx2x_get_eee(struct net_device *dev, struct ethtool_eee *edata)
  1736. {
  1737. struct bnx2x *bp = netdev_priv(dev);
  1738. u32 eee_cfg;
  1739. if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
  1740. DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
  1741. return -EOPNOTSUPP;
  1742. }
  1743. eee_cfg = bp->link_vars.eee_status;
  1744. edata->supported =
  1745. bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_SUPPORTED_MASK) >>
  1746. SHMEM_EEE_SUPPORTED_SHIFT);
  1747. edata->advertised =
  1748. bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_ADV_STATUS_MASK) >>
  1749. SHMEM_EEE_ADV_STATUS_SHIFT);
  1750. edata->lp_advertised =
  1751. bnx2x_eee_to_adv((eee_cfg & SHMEM_EEE_LP_ADV_STATUS_MASK) >>
  1752. SHMEM_EEE_LP_ADV_STATUS_SHIFT);
  1753. /* SHMEM value is in 16u units --> Convert to 1u units. */
  1754. edata->tx_lpi_timer = (eee_cfg & SHMEM_EEE_TIMER_MASK) << 4;
  1755. edata->eee_enabled = (eee_cfg & SHMEM_EEE_REQUESTED_BIT) ? 1 : 0;
  1756. edata->eee_active = (eee_cfg & SHMEM_EEE_ACTIVE_BIT) ? 1 : 0;
  1757. edata->tx_lpi_enabled = (eee_cfg & SHMEM_EEE_LPI_REQUESTED_BIT) ? 1 : 0;
  1758. return 0;
  1759. }
  1760. static int bnx2x_set_eee(struct net_device *dev, struct ethtool_eee *edata)
  1761. {
  1762. struct bnx2x *bp = netdev_priv(dev);
  1763. u32 eee_cfg;
  1764. u32 advertised;
  1765. if (IS_MF(bp))
  1766. return 0;
  1767. if (!SHMEM2_HAS(bp, eee_status[BP_PORT(bp)])) {
  1768. DP(BNX2X_MSG_ETHTOOL, "BC Version does not support EEE\n");
  1769. return -EOPNOTSUPP;
  1770. }
  1771. eee_cfg = bp->link_vars.eee_status;
  1772. if (!(eee_cfg & SHMEM_EEE_SUPPORTED_MASK)) {
  1773. DP(BNX2X_MSG_ETHTOOL, "Board does not support EEE!\n");
  1774. return -EOPNOTSUPP;
  1775. }
  1776. advertised = bnx2x_adv_to_eee(edata->advertised,
  1777. SHMEM_EEE_ADV_STATUS_SHIFT);
  1778. if ((advertised != (eee_cfg & SHMEM_EEE_ADV_STATUS_MASK))) {
  1779. DP(BNX2X_MSG_ETHTOOL,
  1780. "Direct manipulation of EEE advertisement is not supported\n");
  1781. return -EINVAL;
  1782. }
  1783. if (edata->tx_lpi_timer > EEE_MODE_TIMER_MASK) {
  1784. DP(BNX2X_MSG_ETHTOOL,
  1785. "Maximal Tx Lpi timer supported is %x(u)\n",
  1786. EEE_MODE_TIMER_MASK);
  1787. return -EINVAL;
  1788. }
  1789. if (edata->tx_lpi_enabled &&
  1790. (edata->tx_lpi_timer < EEE_MODE_NVRAM_AGGRESSIVE_TIME)) {
  1791. DP(BNX2X_MSG_ETHTOOL,
  1792. "Minimal Tx Lpi timer supported is %d(u)\n",
  1793. EEE_MODE_NVRAM_AGGRESSIVE_TIME);
  1794. return -EINVAL;
  1795. }
  1796. /* All is well; Apply changes*/
  1797. if (edata->eee_enabled)
  1798. bp->link_params.eee_mode |= EEE_MODE_ADV_LPI;
  1799. else
  1800. bp->link_params.eee_mode &= ~EEE_MODE_ADV_LPI;
  1801. if (edata->tx_lpi_enabled)
  1802. bp->link_params.eee_mode |= EEE_MODE_ENABLE_LPI;
  1803. else
  1804. bp->link_params.eee_mode &= ~EEE_MODE_ENABLE_LPI;
  1805. bp->link_params.eee_mode &= ~EEE_MODE_TIMER_MASK;
  1806. bp->link_params.eee_mode |= (edata->tx_lpi_timer &
  1807. EEE_MODE_TIMER_MASK) |
  1808. EEE_MODE_OVERRIDE_NVRAM |
  1809. EEE_MODE_OUTPUT_TIME;
  1810. /* Restart link to propagate changes */
  1811. if (netif_running(dev)) {
  1812. bnx2x_stats_handle(bp, STATS_EVENT_STOP);
  1813. bnx2x_force_link_reset(bp);
  1814. bnx2x_link_set(bp);
  1815. }
  1816. return 0;
  1817. }
  1818. enum {
  1819. BNX2X_CHIP_E1_OFST = 0,
  1820. BNX2X_CHIP_E1H_OFST,
  1821. BNX2X_CHIP_E2_OFST,
  1822. BNX2X_CHIP_E3_OFST,
  1823. BNX2X_CHIP_E3B0_OFST,
  1824. BNX2X_CHIP_MAX_OFST
  1825. };
  1826. #define BNX2X_CHIP_MASK_E1 (1 << BNX2X_CHIP_E1_OFST)
  1827. #define BNX2X_CHIP_MASK_E1H (1 << BNX2X_CHIP_E1H_OFST)
  1828. #define BNX2X_CHIP_MASK_E2 (1 << BNX2X_CHIP_E2_OFST)
  1829. #define BNX2X_CHIP_MASK_E3 (1 << BNX2X_CHIP_E3_OFST)
  1830. #define BNX2X_CHIP_MASK_E3B0 (1 << BNX2X_CHIP_E3B0_OFST)
  1831. #define BNX2X_CHIP_MASK_ALL ((1 << BNX2X_CHIP_MAX_OFST) - 1)
  1832. #define BNX2X_CHIP_MASK_E1X (BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
  1833. static int bnx2x_test_registers(struct bnx2x *bp)
  1834. {
  1835. int idx, i, rc = -ENODEV;
  1836. u32 wr_val = 0, hw;
  1837. int port = BP_PORT(bp);
  1838. static const struct {
  1839. u32 hw;
  1840. u32 offset0;
  1841. u32 offset1;
  1842. u32 mask;
  1843. } reg_tbl[] = {
  1844. /* 0 */ { BNX2X_CHIP_MASK_ALL,
  1845. BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
  1846. { BNX2X_CHIP_MASK_ALL,
  1847. DORQ_REG_DB_ADDR0, 4, 0xffffffff },
  1848. { BNX2X_CHIP_MASK_E1X,
  1849. HC_REG_AGG_INT_0, 4, 0x000003ff },
  1850. { BNX2X_CHIP_MASK_ALL,
  1851. PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
  1852. { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
  1853. PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
  1854. { BNX2X_CHIP_MASK_E3B0,
  1855. PBF_REG_INIT_CRD_Q0, 4, 0x000007ff },
  1856. { BNX2X_CHIP_MASK_ALL,
  1857. PRS_REG_CID_PORT_0, 4, 0x00ffffff },
  1858. { BNX2X_CHIP_MASK_ALL,
  1859. PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
  1860. { BNX2X_CHIP_MASK_ALL,
  1861. PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
  1862. { BNX2X_CHIP_MASK_ALL,
  1863. PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
  1864. /* 10 */ { BNX2X_CHIP_MASK_ALL,
  1865. PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
  1866. { BNX2X_CHIP_MASK_ALL,
  1867. PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
  1868. { BNX2X_CHIP_MASK_ALL,
  1869. QM_REG_CONNNUM_0, 4, 0x000fffff },
  1870. { BNX2X_CHIP_MASK_ALL,
  1871. TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
  1872. { BNX2X_CHIP_MASK_ALL,
  1873. SRC_REG_KEYRSS0_0, 40, 0xffffffff },
  1874. { BNX2X_CHIP_MASK_ALL,
  1875. SRC_REG_KEYRSS0_7, 40, 0xffffffff },
  1876. { BNX2X_CHIP_MASK_ALL,
  1877. XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
  1878. { BNX2X_CHIP_MASK_ALL,
  1879. XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
  1880. { BNX2X_CHIP_MASK_ALL,
  1881. XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
  1882. { BNX2X_CHIP_MASK_ALL,
  1883. NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
  1884. /* 20 */ { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
  1885. NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
  1886. { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
  1887. NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
  1888. { BNX2X_CHIP_MASK_ALL,
  1889. NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
  1890. { BNX2X_CHIP_MASK_ALL,
  1891. NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
  1892. { BNX2X_CHIP_MASK_ALL,
  1893. NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
  1894. { BNX2X_CHIP_MASK_ALL,
  1895. NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
  1896. { BNX2X_CHIP_MASK_ALL,
  1897. NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
  1898. { BNX2X_CHIP_MASK_ALL,
  1899. NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
  1900. { BNX2X_CHIP_MASK_ALL,
  1901. NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
  1902. { BNX2X_CHIP_MASK_ALL,
  1903. NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
  1904. /* 30 */ { BNX2X_CHIP_MASK_ALL,
  1905. NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
  1906. { BNX2X_CHIP_MASK_ALL,
  1907. NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
  1908. { BNX2X_CHIP_MASK_ALL,
  1909. NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
  1910. { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
  1911. NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
  1912. { BNX2X_CHIP_MASK_ALL,
  1913. NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
  1914. { BNX2X_CHIP_MASK_ALL,
  1915. NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
  1916. { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
  1917. NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
  1918. { BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
  1919. NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
  1920. { BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
  1921. };
  1922. if (!bnx2x_is_nvm_accessible(bp)) {
  1923. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  1924. "cannot access eeprom when the interface is down\n");
  1925. return rc;
  1926. }
  1927. if (CHIP_IS_E1(bp))
  1928. hw = BNX2X_CHIP_MASK_E1;
  1929. else if (CHIP_IS_E1H(bp))
  1930. hw = BNX2X_CHIP_MASK_E1H;
  1931. else if (CHIP_IS_E2(bp))
  1932. hw = BNX2X_CHIP_MASK_E2;
  1933. else if (CHIP_IS_E3B0(bp))
  1934. hw = BNX2X_CHIP_MASK_E3B0;
  1935. else /* e3 A0 */
  1936. hw = BNX2X_CHIP_MASK_E3;
  1937. /* Repeat the test twice:
  1938. * First by writing 0x00000000, second by writing 0xffffffff
  1939. */
  1940. for (idx = 0; idx < 2; idx++) {
  1941. switch (idx) {
  1942. case 0:
  1943. wr_val = 0;
  1944. break;
  1945. case 1:
  1946. wr_val = 0xffffffff;
  1947. break;
  1948. }
  1949. for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
  1950. u32 offset, mask, save_val, val;
  1951. if (!(hw & reg_tbl[i].hw))
  1952. continue;
  1953. offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
  1954. mask = reg_tbl[i].mask;
  1955. save_val = REG_RD(bp, offset);
  1956. REG_WR(bp, offset, wr_val & mask);
  1957. val = REG_RD(bp, offset);
  1958. /* Restore the original register's value */
  1959. REG_WR(bp, offset, save_val);
  1960. /* verify value is as expected */
  1961. if ((val & mask) != (wr_val & mask)) {
  1962. DP(BNX2X_MSG_ETHTOOL,
  1963. "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
  1964. offset, val, wr_val, mask);
  1965. goto test_reg_exit;
  1966. }
  1967. }
  1968. }
  1969. rc = 0;
  1970. test_reg_exit:
  1971. return rc;
  1972. }
  1973. static int bnx2x_test_memory(struct bnx2x *bp)
  1974. {
  1975. int i, j, rc = -ENODEV;
  1976. u32 val, index;
  1977. static const struct {
  1978. u32 offset;
  1979. int size;
  1980. } mem_tbl[] = {
  1981. { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
  1982. { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
  1983. { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
  1984. { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
  1985. { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
  1986. { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
  1987. { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
  1988. { 0xffffffff, 0 }
  1989. };
  1990. static const struct {
  1991. char *name;
  1992. u32 offset;
  1993. u32 hw_mask[BNX2X_CHIP_MAX_OFST];
  1994. } prty_tbl[] = {
  1995. { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS,
  1996. {0x3ffc0, 0, 0, 0} },
  1997. { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS,
  1998. {0x2, 0x2, 0, 0} },
  1999. { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
  2000. {0, 0, 0, 0} },
  2001. { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS,
  2002. {0x3ffc0, 0, 0, 0} },
  2003. { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS,
  2004. {0x3ffc0, 0, 0, 0} },
  2005. { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS,
  2006. {0x3ffc1, 0, 0, 0} },
  2007. { NULL, 0xffffffff, {0, 0, 0, 0} }
  2008. };
  2009. if (!bnx2x_is_nvm_accessible(bp)) {
  2010. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2011. "cannot access eeprom when the interface is down\n");
  2012. return rc;
  2013. }
  2014. if (CHIP_IS_E1(bp))
  2015. index = BNX2X_CHIP_E1_OFST;
  2016. else if (CHIP_IS_E1H(bp))
  2017. index = BNX2X_CHIP_E1H_OFST;
  2018. else if (CHIP_IS_E2(bp))
  2019. index = BNX2X_CHIP_E2_OFST;
  2020. else /* e3 */
  2021. index = BNX2X_CHIP_E3_OFST;
  2022. /* pre-Check the parity status */
  2023. for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
  2024. val = REG_RD(bp, prty_tbl[i].offset);
  2025. if (val & ~(prty_tbl[i].hw_mask[index])) {
  2026. DP(BNX2X_MSG_ETHTOOL,
  2027. "%s is 0x%x\n", prty_tbl[i].name, val);
  2028. goto test_mem_exit;
  2029. }
  2030. }
  2031. /* Go through all the memories */
  2032. for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
  2033. for (j = 0; j < mem_tbl[i].size; j++)
  2034. REG_RD(bp, mem_tbl[i].offset + j*4);
  2035. /* Check the parity status */
  2036. for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
  2037. val = REG_RD(bp, prty_tbl[i].offset);
  2038. if (val & ~(prty_tbl[i].hw_mask[index])) {
  2039. DP(BNX2X_MSG_ETHTOOL,
  2040. "%s is 0x%x\n", prty_tbl[i].name, val);
  2041. goto test_mem_exit;
  2042. }
  2043. }
  2044. rc = 0;
  2045. test_mem_exit:
  2046. return rc;
  2047. }
  2048. static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
  2049. {
  2050. int cnt = 1400;
  2051. if (link_up) {
  2052. while (bnx2x_link_test(bp, is_serdes) && cnt--)
  2053. msleep(20);
  2054. if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
  2055. DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
  2056. cnt = 1400;
  2057. while (!bp->link_vars.link_up && cnt--)
  2058. msleep(20);
  2059. if (cnt <= 0 && !bp->link_vars.link_up)
  2060. DP(BNX2X_MSG_ETHTOOL,
  2061. "Timeout waiting for link init\n");
  2062. }
  2063. }
  2064. static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
  2065. {
  2066. unsigned int pkt_size, num_pkts, i;
  2067. struct sk_buff *skb;
  2068. unsigned char *packet;
  2069. struct bnx2x_fastpath *fp_rx = &bp->fp[0];
  2070. struct bnx2x_fastpath *fp_tx = &bp->fp[0];
  2071. struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0];
  2072. u16 tx_start_idx, tx_idx;
  2073. u16 rx_start_idx, rx_idx;
  2074. u16 pkt_prod, bd_prod;
  2075. struct sw_tx_bd *tx_buf;
  2076. struct eth_tx_start_bd *tx_start_bd;
  2077. dma_addr_t mapping;
  2078. union eth_rx_cqe *cqe;
  2079. u8 cqe_fp_flags, cqe_fp_type;
  2080. struct sw_rx_bd *rx_buf;
  2081. u16 len;
  2082. int rc = -ENODEV;
  2083. u8 *data;
  2084. struct netdev_queue *txq = netdev_get_tx_queue(bp->dev,
  2085. txdata->txq_index);
  2086. /* check the loopback mode */
  2087. switch (loopback_mode) {
  2088. case BNX2X_PHY_LOOPBACK:
  2089. if (bp->link_params.loopback_mode != LOOPBACK_XGXS) {
  2090. DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n");
  2091. return -EINVAL;
  2092. }
  2093. break;
  2094. case BNX2X_MAC_LOOPBACK:
  2095. if (CHIP_IS_E3(bp)) {
  2096. int cfg_idx = bnx2x_get_link_cfg_idx(bp);
  2097. if (bp->port.supported[cfg_idx] &
  2098. (SUPPORTED_10000baseT_Full |
  2099. SUPPORTED_20000baseMLD2_Full |
  2100. SUPPORTED_20000baseKR2_Full))
  2101. bp->link_params.loopback_mode = LOOPBACK_XMAC;
  2102. else
  2103. bp->link_params.loopback_mode = LOOPBACK_UMAC;
  2104. } else
  2105. bp->link_params.loopback_mode = LOOPBACK_BMAC;
  2106. bnx2x_phy_init(&bp->link_params, &bp->link_vars);
  2107. break;
  2108. case BNX2X_EXT_LOOPBACK:
  2109. if (bp->link_params.loopback_mode != LOOPBACK_EXT) {
  2110. DP(BNX2X_MSG_ETHTOOL,
  2111. "Can't configure external loopback\n");
  2112. return -EINVAL;
  2113. }
  2114. break;
  2115. default:
  2116. DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
  2117. return -EINVAL;
  2118. }
  2119. /* prepare the loopback packet */
  2120. pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
  2121. bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
  2122. skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
  2123. if (!skb) {
  2124. DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
  2125. rc = -ENOMEM;
  2126. goto test_loopback_exit;
  2127. }
  2128. packet = skb_put(skb, pkt_size);
  2129. memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
  2130. eth_zero_addr(packet + ETH_ALEN);
  2131. memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
  2132. for (i = ETH_HLEN; i < pkt_size; i++)
  2133. packet[i] = (unsigned char) (i & 0xff);
  2134. mapping = dma_map_single(&bp->pdev->dev, skb->data,
  2135. skb_headlen(skb), DMA_TO_DEVICE);
  2136. if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
  2137. rc = -ENOMEM;
  2138. dev_kfree_skb(skb);
  2139. DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
  2140. goto test_loopback_exit;
  2141. }
  2142. /* send the loopback packet */
  2143. num_pkts = 0;
  2144. tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
  2145. rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
  2146. netdev_tx_sent_queue(txq, skb->len);
  2147. pkt_prod = txdata->tx_pkt_prod++;
  2148. tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
  2149. tx_buf->first_bd = txdata->tx_bd_prod;
  2150. tx_buf->skb = skb;
  2151. tx_buf->flags = 0;
  2152. bd_prod = TX_BD(txdata->tx_bd_prod);
  2153. tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
  2154. tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
  2155. tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
  2156. tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
  2157. tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
  2158. tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
  2159. tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
  2160. SET_FLAG(tx_start_bd->general_data,
  2161. ETH_TX_START_BD_HDR_NBDS,
  2162. 1);
  2163. SET_FLAG(tx_start_bd->general_data,
  2164. ETH_TX_START_BD_PARSE_NBDS,
  2165. 0);
  2166. /* turn on parsing and get a BD */
  2167. bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
  2168. if (CHIP_IS_E1x(bp)) {
  2169. u16 global_data = 0;
  2170. struct eth_tx_parse_bd_e1x *pbd_e1x =
  2171. &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
  2172. memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
  2173. SET_FLAG(global_data,
  2174. ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, UNICAST_ADDRESS);
  2175. pbd_e1x->global_data = cpu_to_le16(global_data);
  2176. } else {
  2177. u32 parsing_data = 0;
  2178. struct eth_tx_parse_bd_e2 *pbd_e2 =
  2179. &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
  2180. memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
  2181. SET_FLAG(parsing_data,
  2182. ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, UNICAST_ADDRESS);
  2183. pbd_e2->parsing_data = cpu_to_le32(parsing_data);
  2184. }
  2185. wmb();
  2186. txdata->tx_db.data.prod += 2;
  2187. barrier();
  2188. DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
  2189. mmiowb();
  2190. barrier();
  2191. num_pkts++;
  2192. txdata->tx_bd_prod += 2; /* start + pbd */
  2193. udelay(100);
  2194. tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
  2195. if (tx_idx != tx_start_idx + num_pkts)
  2196. goto test_loopback_exit;
  2197. /* Unlike HC IGU won't generate an interrupt for status block
  2198. * updates that have been performed while interrupts were
  2199. * disabled.
  2200. */
  2201. if (bp->common.int_block == INT_BLOCK_IGU) {
  2202. /* Disable local BHes to prevent a dead-lock situation between
  2203. * sch_direct_xmit() and bnx2x_run_loopback() (calling
  2204. * bnx2x_tx_int()), as both are taking netif_tx_lock().
  2205. */
  2206. local_bh_disable();
  2207. bnx2x_tx_int(bp, txdata);
  2208. local_bh_enable();
  2209. }
  2210. rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
  2211. if (rx_idx != rx_start_idx + num_pkts)
  2212. goto test_loopback_exit;
  2213. cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
  2214. cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
  2215. cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
  2216. if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
  2217. goto test_loopback_rx_exit;
  2218. len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
  2219. if (len != pkt_size)
  2220. goto test_loopback_rx_exit;
  2221. rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
  2222. dma_sync_single_for_cpu(&bp->pdev->dev,
  2223. dma_unmap_addr(rx_buf, mapping),
  2224. fp_rx->rx_buf_size, DMA_FROM_DEVICE);
  2225. data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
  2226. for (i = ETH_HLEN; i < pkt_size; i++)
  2227. if (*(data + i) != (unsigned char) (i & 0xff))
  2228. goto test_loopback_rx_exit;
  2229. rc = 0;
  2230. test_loopback_rx_exit:
  2231. fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
  2232. fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
  2233. fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
  2234. fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
  2235. /* Update producers */
  2236. bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
  2237. fp_rx->rx_sge_prod);
  2238. test_loopback_exit:
  2239. bp->link_params.loopback_mode = LOOPBACK_NONE;
  2240. return rc;
  2241. }
  2242. static int bnx2x_test_loopback(struct bnx2x *bp)
  2243. {
  2244. int rc = 0, res;
  2245. if (BP_NOMCP(bp))
  2246. return rc;
  2247. if (!netif_running(bp->dev))
  2248. return BNX2X_LOOPBACK_FAILED;
  2249. bnx2x_netif_stop(bp, 1);
  2250. bnx2x_acquire_phy_lock(bp);
  2251. res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
  2252. if (res) {
  2253. DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
  2254. rc |= BNX2X_PHY_LOOPBACK_FAILED;
  2255. }
  2256. res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
  2257. if (res) {
  2258. DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
  2259. rc |= BNX2X_MAC_LOOPBACK_FAILED;
  2260. }
  2261. bnx2x_release_phy_lock(bp);
  2262. bnx2x_netif_start(bp);
  2263. return rc;
  2264. }
  2265. static int bnx2x_test_ext_loopback(struct bnx2x *bp)
  2266. {
  2267. int rc;
  2268. u8 is_serdes =
  2269. (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
  2270. if (BP_NOMCP(bp))
  2271. return -ENODEV;
  2272. if (!netif_running(bp->dev))
  2273. return BNX2X_EXT_LOOPBACK_FAILED;
  2274. bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
  2275. rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT);
  2276. if (rc) {
  2277. DP(BNX2X_MSG_ETHTOOL,
  2278. "Can't perform self-test, nic_load (for external lb) failed\n");
  2279. return -ENODEV;
  2280. }
  2281. bnx2x_wait_for_link(bp, 1, is_serdes);
  2282. bnx2x_netif_stop(bp, 1);
  2283. rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK);
  2284. if (rc)
  2285. DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc);
  2286. bnx2x_netif_start(bp);
  2287. return rc;
  2288. }
  2289. struct code_entry {
  2290. u32 sram_start_addr;
  2291. u32 code_attribute;
  2292. #define CODE_IMAGE_TYPE_MASK 0xf0800003
  2293. #define CODE_IMAGE_VNTAG_PROFILES_DATA 0xd0000003
  2294. #define CODE_IMAGE_LENGTH_MASK 0x007ffffc
  2295. #define CODE_IMAGE_TYPE_EXTENDED_DIR 0xe0000000
  2296. u32 nvm_start_addr;
  2297. };
  2298. #define CODE_ENTRY_MAX 16
  2299. #define CODE_ENTRY_EXTENDED_DIR_IDX 15
  2300. #define MAX_IMAGES_IN_EXTENDED_DIR 64
  2301. #define NVRAM_DIR_OFFSET 0x14
  2302. #define EXTENDED_DIR_EXISTS(code) \
  2303. ((code & CODE_IMAGE_TYPE_MASK) == CODE_IMAGE_TYPE_EXTENDED_DIR && \
  2304. (code & CODE_IMAGE_LENGTH_MASK) != 0)
  2305. #define CRC32_RESIDUAL 0xdebb20e3
  2306. #define CRC_BUFF_SIZE 256
  2307. static int bnx2x_nvram_crc(struct bnx2x *bp,
  2308. int offset,
  2309. int size,
  2310. u8 *buff)
  2311. {
  2312. u32 crc = ~0;
  2313. int rc = 0, done = 0;
  2314. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2315. "NVRAM CRC from 0x%08x to 0x%08x\n", offset, offset + size);
  2316. while (done < size) {
  2317. int count = min_t(int, size - done, CRC_BUFF_SIZE);
  2318. rc = bnx2x_nvram_read(bp, offset + done, buff, count);
  2319. if (rc)
  2320. return rc;
  2321. crc = crc32_le(crc, buff, count);
  2322. done += count;
  2323. }
  2324. if (crc != CRC32_RESIDUAL)
  2325. rc = -EINVAL;
  2326. return rc;
  2327. }
  2328. static int bnx2x_test_nvram_dir(struct bnx2x *bp,
  2329. struct code_entry *entry,
  2330. u8 *buff)
  2331. {
  2332. size_t size = entry->code_attribute & CODE_IMAGE_LENGTH_MASK;
  2333. u32 type = entry->code_attribute & CODE_IMAGE_TYPE_MASK;
  2334. int rc;
  2335. /* Zero-length images and AFEX profiles do not have CRC */
  2336. if (size == 0 || type == CODE_IMAGE_VNTAG_PROFILES_DATA)
  2337. return 0;
  2338. rc = bnx2x_nvram_crc(bp, entry->nvm_start_addr, size, buff);
  2339. if (rc)
  2340. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2341. "image %x has failed crc test (rc %d)\n", type, rc);
  2342. return rc;
  2343. }
  2344. static int bnx2x_test_dir_entry(struct bnx2x *bp, u32 addr, u8 *buff)
  2345. {
  2346. int rc;
  2347. struct code_entry entry;
  2348. rc = bnx2x_nvram_read32(bp, addr, (u32 *)&entry, sizeof(entry));
  2349. if (rc)
  2350. return rc;
  2351. return bnx2x_test_nvram_dir(bp, &entry, buff);
  2352. }
  2353. static int bnx2x_test_nvram_ext_dirs(struct bnx2x *bp, u8 *buff)
  2354. {
  2355. u32 rc, cnt, dir_offset = NVRAM_DIR_OFFSET;
  2356. struct code_entry entry;
  2357. int i;
  2358. rc = bnx2x_nvram_read32(bp,
  2359. dir_offset +
  2360. sizeof(entry) * CODE_ENTRY_EXTENDED_DIR_IDX,
  2361. (u32 *)&entry, sizeof(entry));
  2362. if (rc)
  2363. return rc;
  2364. if (!EXTENDED_DIR_EXISTS(entry.code_attribute))
  2365. return 0;
  2366. rc = bnx2x_nvram_read32(bp, entry.nvm_start_addr,
  2367. &cnt, sizeof(u32));
  2368. if (rc)
  2369. return rc;
  2370. dir_offset = entry.nvm_start_addr + 8;
  2371. for (i = 0; i < cnt && i < MAX_IMAGES_IN_EXTENDED_DIR; i++) {
  2372. rc = bnx2x_test_dir_entry(bp, dir_offset +
  2373. sizeof(struct code_entry) * i,
  2374. buff);
  2375. if (rc)
  2376. return rc;
  2377. }
  2378. return 0;
  2379. }
  2380. static int bnx2x_test_nvram_dirs(struct bnx2x *bp, u8 *buff)
  2381. {
  2382. u32 rc, dir_offset = NVRAM_DIR_OFFSET;
  2383. int i;
  2384. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "NVRAM DIRS CRC test-set\n");
  2385. for (i = 0; i < CODE_ENTRY_EXTENDED_DIR_IDX; i++) {
  2386. rc = bnx2x_test_dir_entry(bp, dir_offset +
  2387. sizeof(struct code_entry) * i,
  2388. buff);
  2389. if (rc)
  2390. return rc;
  2391. }
  2392. return bnx2x_test_nvram_ext_dirs(bp, buff);
  2393. }
  2394. struct crc_pair {
  2395. int offset;
  2396. int size;
  2397. };
  2398. static int bnx2x_test_nvram_tbl(struct bnx2x *bp,
  2399. const struct crc_pair *nvram_tbl, u8 *buf)
  2400. {
  2401. int i;
  2402. for (i = 0; nvram_tbl[i].size; i++) {
  2403. int rc = bnx2x_nvram_crc(bp, nvram_tbl[i].offset,
  2404. nvram_tbl[i].size, buf);
  2405. if (rc) {
  2406. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2407. "nvram_tbl[%d] has failed crc test (rc %d)\n",
  2408. i, rc);
  2409. return rc;
  2410. }
  2411. }
  2412. return 0;
  2413. }
  2414. static int bnx2x_test_nvram(struct bnx2x *bp)
  2415. {
  2416. const struct crc_pair nvram_tbl[] = {
  2417. { 0, 0x14 }, /* bootstrap */
  2418. { 0x14, 0xec }, /* dir */
  2419. { 0x100, 0x350 }, /* manuf_info */
  2420. { 0x450, 0xf0 }, /* feature_info */
  2421. { 0x640, 0x64 }, /* upgrade_key_info */
  2422. { 0x708, 0x70 }, /* manuf_key_info */
  2423. { 0, 0 }
  2424. };
  2425. const struct crc_pair nvram_tbl2[] = {
  2426. { 0x7e8, 0x350 }, /* manuf_info2 */
  2427. { 0xb38, 0xf0 }, /* feature_info */
  2428. { 0, 0 }
  2429. };
  2430. u8 *buf;
  2431. int rc;
  2432. u32 magic;
  2433. if (BP_NOMCP(bp))
  2434. return 0;
  2435. buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
  2436. if (!buf) {
  2437. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
  2438. rc = -ENOMEM;
  2439. goto test_nvram_exit;
  2440. }
  2441. rc = bnx2x_nvram_read32(bp, 0, &magic, sizeof(magic));
  2442. if (rc) {
  2443. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2444. "magic value read (rc %d)\n", rc);
  2445. goto test_nvram_exit;
  2446. }
  2447. if (magic != 0x669955aa) {
  2448. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2449. "wrong magic value (0x%08x)\n", magic);
  2450. rc = -ENODEV;
  2451. goto test_nvram_exit;
  2452. }
  2453. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "Port 0 CRC test-set\n");
  2454. rc = bnx2x_test_nvram_tbl(bp, nvram_tbl, buf);
  2455. if (rc)
  2456. goto test_nvram_exit;
  2457. if (!CHIP_IS_E1x(bp) && !CHIP_IS_57811xx(bp)) {
  2458. u32 hide = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
  2459. SHARED_HW_CFG_HIDE_PORT1;
  2460. if (!hide) {
  2461. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2462. "Port 1 CRC test-set\n");
  2463. rc = bnx2x_test_nvram_tbl(bp, nvram_tbl2, buf);
  2464. if (rc)
  2465. goto test_nvram_exit;
  2466. }
  2467. }
  2468. rc = bnx2x_test_nvram_dirs(bp, buf);
  2469. test_nvram_exit:
  2470. kfree(buf);
  2471. return rc;
  2472. }
  2473. /* Send an EMPTY ramrod on the first queue */
  2474. static int bnx2x_test_intr(struct bnx2x *bp)
  2475. {
  2476. struct bnx2x_queue_state_params params = {NULL};
  2477. if (!netif_running(bp->dev)) {
  2478. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2479. "cannot access eeprom when the interface is down\n");
  2480. return -ENODEV;
  2481. }
  2482. params.q_obj = &bp->sp_objs->q_obj;
  2483. params.cmd = BNX2X_Q_CMD_EMPTY;
  2484. __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
  2485. return bnx2x_queue_state_change(bp, &params);
  2486. }
  2487. static void bnx2x_self_test(struct net_device *dev,
  2488. struct ethtool_test *etest, u64 *buf)
  2489. {
  2490. struct bnx2x *bp = netdev_priv(dev);
  2491. u8 is_serdes, link_up;
  2492. int rc, cnt = 0;
  2493. if (pci_num_vf(bp->pdev)) {
  2494. DP(BNX2X_MSG_IOV,
  2495. "VFs are enabled, can not perform self test\n");
  2496. return;
  2497. }
  2498. if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
  2499. netdev_err(bp->dev,
  2500. "Handling parity error recovery. Try again later\n");
  2501. etest->flags |= ETH_TEST_FL_FAILED;
  2502. return;
  2503. }
  2504. DP(BNX2X_MSG_ETHTOOL,
  2505. "Self-test command parameters: offline = %d, external_lb = %d\n",
  2506. (etest->flags & ETH_TEST_FL_OFFLINE),
  2507. (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2);
  2508. memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp));
  2509. if (bnx2x_test_nvram(bp) != 0) {
  2510. if (!IS_MF(bp))
  2511. buf[4] = 1;
  2512. else
  2513. buf[0] = 1;
  2514. etest->flags |= ETH_TEST_FL_FAILED;
  2515. }
  2516. if (!netif_running(dev)) {
  2517. DP(BNX2X_MSG_ETHTOOL, "Interface is down\n");
  2518. return;
  2519. }
  2520. is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
  2521. link_up = bp->link_vars.link_up;
  2522. /* offline tests are not supported in MF mode */
  2523. if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) {
  2524. int port = BP_PORT(bp);
  2525. u32 val;
  2526. /* save current value of input enable for TX port IF */
  2527. val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
  2528. /* disable input for TX port IF */
  2529. REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
  2530. bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
  2531. rc = bnx2x_nic_load(bp, LOAD_DIAG);
  2532. if (rc) {
  2533. etest->flags |= ETH_TEST_FL_FAILED;
  2534. DP(BNX2X_MSG_ETHTOOL,
  2535. "Can't perform self-test, nic_load (for offline) failed\n");
  2536. return;
  2537. }
  2538. /* wait until link state is restored */
  2539. bnx2x_wait_for_link(bp, 1, is_serdes);
  2540. if (bnx2x_test_registers(bp) != 0) {
  2541. buf[0] = 1;
  2542. etest->flags |= ETH_TEST_FL_FAILED;
  2543. }
  2544. if (bnx2x_test_memory(bp) != 0) {
  2545. buf[1] = 1;
  2546. etest->flags |= ETH_TEST_FL_FAILED;
  2547. }
  2548. buf[2] = bnx2x_test_loopback(bp); /* internal LB */
  2549. if (buf[2] != 0)
  2550. etest->flags |= ETH_TEST_FL_FAILED;
  2551. if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) {
  2552. buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */
  2553. if (buf[3] != 0)
  2554. etest->flags |= ETH_TEST_FL_FAILED;
  2555. etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
  2556. }
  2557. bnx2x_nic_unload(bp, UNLOAD_NORMAL, false);
  2558. /* restore input for TX port IF */
  2559. REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
  2560. rc = bnx2x_nic_load(bp, LOAD_NORMAL);
  2561. if (rc) {
  2562. etest->flags |= ETH_TEST_FL_FAILED;
  2563. DP(BNX2X_MSG_ETHTOOL,
  2564. "Can't perform self-test, nic_load (for online) failed\n");
  2565. return;
  2566. }
  2567. /* wait until link state is restored */
  2568. bnx2x_wait_for_link(bp, link_up, is_serdes);
  2569. }
  2570. if (bnx2x_test_intr(bp) != 0) {
  2571. if (!IS_MF(bp))
  2572. buf[5] = 1;
  2573. else
  2574. buf[1] = 1;
  2575. etest->flags |= ETH_TEST_FL_FAILED;
  2576. }
  2577. if (link_up) {
  2578. cnt = 100;
  2579. while (bnx2x_link_test(bp, is_serdes) && --cnt)
  2580. msleep(20);
  2581. }
  2582. if (!cnt) {
  2583. if (!IS_MF(bp))
  2584. buf[6] = 1;
  2585. else
  2586. buf[2] = 1;
  2587. etest->flags |= ETH_TEST_FL_FAILED;
  2588. }
  2589. }
  2590. #define IS_PORT_STAT(i) \
  2591. ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
  2592. #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
  2593. #define HIDE_PORT_STAT(bp) \
  2594. ((IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS)) || \
  2595. IS_VF(bp))
  2596. /* ethtool statistics are displayed for all regular ethernet queues and the
  2597. * fcoe L2 queue if not disabled
  2598. */
  2599. static int bnx2x_num_stat_queues(struct bnx2x *bp)
  2600. {
  2601. return BNX2X_NUM_ETH_QUEUES(bp);
  2602. }
  2603. static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
  2604. {
  2605. struct bnx2x *bp = netdev_priv(dev);
  2606. int i, num_strings = 0;
  2607. switch (stringset) {
  2608. case ETH_SS_STATS:
  2609. if (is_multi(bp)) {
  2610. num_strings = bnx2x_num_stat_queues(bp) *
  2611. BNX2X_NUM_Q_STATS;
  2612. } else
  2613. num_strings = 0;
  2614. if (HIDE_PORT_STAT(bp)) {
  2615. for (i = 0; i < BNX2X_NUM_STATS; i++)
  2616. if (IS_FUNC_STAT(i))
  2617. num_strings++;
  2618. } else
  2619. num_strings += BNX2X_NUM_STATS;
  2620. return num_strings;
  2621. case ETH_SS_TEST:
  2622. return BNX2X_NUM_TESTS(bp);
  2623. case ETH_SS_PRIV_FLAGS:
  2624. return BNX2X_PRI_FLAG_LEN;
  2625. default:
  2626. return -EINVAL;
  2627. }
  2628. }
  2629. static u32 bnx2x_get_private_flags(struct net_device *dev)
  2630. {
  2631. struct bnx2x *bp = netdev_priv(dev);
  2632. u32 flags = 0;
  2633. flags |= (!(bp->flags & NO_ISCSI_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_ISCSI;
  2634. flags |= (!(bp->flags & NO_FCOE_FLAG) ? 1 : 0) << BNX2X_PRI_FLAG_FCOE;
  2635. flags |= (!!IS_MF_STORAGE_ONLY(bp)) << BNX2X_PRI_FLAG_STORAGE;
  2636. return flags;
  2637. }
  2638. static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
  2639. {
  2640. struct bnx2x *bp = netdev_priv(dev);
  2641. int i, j, k, start;
  2642. char queue_name[MAX_QUEUE_NAME_LEN+1];
  2643. switch (stringset) {
  2644. case ETH_SS_STATS:
  2645. k = 0;
  2646. if (is_multi(bp)) {
  2647. for_each_eth_queue(bp, i) {
  2648. memset(queue_name, 0, sizeof(queue_name));
  2649. sprintf(queue_name, "%d", i);
  2650. for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
  2651. snprintf(buf + (k + j)*ETH_GSTRING_LEN,
  2652. ETH_GSTRING_LEN,
  2653. bnx2x_q_stats_arr[j].string,
  2654. queue_name);
  2655. k += BNX2X_NUM_Q_STATS;
  2656. }
  2657. }
  2658. for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
  2659. if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
  2660. continue;
  2661. strcpy(buf + (k + j)*ETH_GSTRING_LEN,
  2662. bnx2x_stats_arr[i].string);
  2663. j++;
  2664. }
  2665. break;
  2666. case ETH_SS_TEST:
  2667. /* First 4 tests cannot be done in MF mode */
  2668. if (!IS_MF(bp))
  2669. start = 0;
  2670. else
  2671. start = 4;
  2672. memcpy(buf, bnx2x_tests_str_arr + start,
  2673. ETH_GSTRING_LEN * BNX2X_NUM_TESTS(bp));
  2674. break;
  2675. case ETH_SS_PRIV_FLAGS:
  2676. memcpy(buf, bnx2x_private_arr,
  2677. ETH_GSTRING_LEN * BNX2X_PRI_FLAG_LEN);
  2678. break;
  2679. }
  2680. }
  2681. static void bnx2x_get_ethtool_stats(struct net_device *dev,
  2682. struct ethtool_stats *stats, u64 *buf)
  2683. {
  2684. struct bnx2x *bp = netdev_priv(dev);
  2685. u32 *hw_stats, *offset;
  2686. int i, j, k = 0;
  2687. if (is_multi(bp)) {
  2688. for_each_eth_queue(bp, i) {
  2689. hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats;
  2690. for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
  2691. if (bnx2x_q_stats_arr[j].size == 0) {
  2692. /* skip this counter */
  2693. buf[k + j] = 0;
  2694. continue;
  2695. }
  2696. offset = (hw_stats +
  2697. bnx2x_q_stats_arr[j].offset);
  2698. if (bnx2x_q_stats_arr[j].size == 4) {
  2699. /* 4-byte counter */
  2700. buf[k + j] = (u64) *offset;
  2701. continue;
  2702. }
  2703. /* 8-byte counter */
  2704. buf[k + j] = HILO_U64(*offset, *(offset + 1));
  2705. }
  2706. k += BNX2X_NUM_Q_STATS;
  2707. }
  2708. }
  2709. hw_stats = (u32 *)&bp->eth_stats;
  2710. for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
  2711. if (HIDE_PORT_STAT(bp) && IS_PORT_STAT(i))
  2712. continue;
  2713. if (bnx2x_stats_arr[i].size == 0) {
  2714. /* skip this counter */
  2715. buf[k + j] = 0;
  2716. j++;
  2717. continue;
  2718. }
  2719. offset = (hw_stats + bnx2x_stats_arr[i].offset);
  2720. if (bnx2x_stats_arr[i].size == 4) {
  2721. /* 4-byte counter */
  2722. buf[k + j] = (u64) *offset;
  2723. j++;
  2724. continue;
  2725. }
  2726. /* 8-byte counter */
  2727. buf[k + j] = HILO_U64(*offset, *(offset + 1));
  2728. j++;
  2729. }
  2730. }
  2731. static int bnx2x_set_phys_id(struct net_device *dev,
  2732. enum ethtool_phys_id_state state)
  2733. {
  2734. struct bnx2x *bp = netdev_priv(dev);
  2735. if (!bnx2x_is_nvm_accessible(bp)) {
  2736. DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
  2737. "cannot access eeprom when the interface is down\n");
  2738. return -EAGAIN;
  2739. }
  2740. switch (state) {
  2741. case ETHTOOL_ID_ACTIVE:
  2742. return 1; /* cycle on/off once per second */
  2743. case ETHTOOL_ID_ON:
  2744. bnx2x_acquire_phy_lock(bp);
  2745. bnx2x_set_led(&bp->link_params, &bp->link_vars,
  2746. LED_MODE_ON, SPEED_1000);
  2747. bnx2x_release_phy_lock(bp);
  2748. break;
  2749. case ETHTOOL_ID_OFF:
  2750. bnx2x_acquire_phy_lock(bp);
  2751. bnx2x_set_led(&bp->link_params, &bp->link_vars,
  2752. LED_MODE_FRONT_PANEL_OFF, 0);
  2753. bnx2x_release_phy_lock(bp);
  2754. break;
  2755. case ETHTOOL_ID_INACTIVE:
  2756. bnx2x_acquire_phy_lock(bp);
  2757. bnx2x_set_led(&bp->link_params, &bp->link_vars,
  2758. LED_MODE_OPER,
  2759. bp->link_vars.line_speed);
  2760. bnx2x_release_phy_lock(bp);
  2761. }
  2762. return 0;
  2763. }
  2764. static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
  2765. {
  2766. switch (info->flow_type) {
  2767. case TCP_V4_FLOW:
  2768. case TCP_V6_FLOW:
  2769. info->data = RXH_IP_SRC | RXH_IP_DST |
  2770. RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2771. break;
  2772. case UDP_V4_FLOW:
  2773. if (bp->rss_conf_obj.udp_rss_v4)
  2774. info->data = RXH_IP_SRC | RXH_IP_DST |
  2775. RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2776. else
  2777. info->data = RXH_IP_SRC | RXH_IP_DST;
  2778. break;
  2779. case UDP_V6_FLOW:
  2780. if (bp->rss_conf_obj.udp_rss_v6)
  2781. info->data = RXH_IP_SRC | RXH_IP_DST |
  2782. RXH_L4_B_0_1 | RXH_L4_B_2_3;
  2783. else
  2784. info->data = RXH_IP_SRC | RXH_IP_DST;
  2785. break;
  2786. case IPV4_FLOW:
  2787. case IPV6_FLOW:
  2788. info->data = RXH_IP_SRC | RXH_IP_DST;
  2789. break;
  2790. default:
  2791. info->data = 0;
  2792. break;
  2793. }
  2794. return 0;
  2795. }
  2796. static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  2797. u32 *rules __always_unused)
  2798. {
  2799. struct bnx2x *bp = netdev_priv(dev);
  2800. switch (info->cmd) {
  2801. case ETHTOOL_GRXRINGS:
  2802. info->data = BNX2X_NUM_ETH_QUEUES(bp);
  2803. return 0;
  2804. case ETHTOOL_GRXFH:
  2805. return bnx2x_get_rss_flags(bp, info);
  2806. default:
  2807. DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
  2808. return -EOPNOTSUPP;
  2809. }
  2810. }
  2811. static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
  2812. {
  2813. int udp_rss_requested;
  2814. DP(BNX2X_MSG_ETHTOOL,
  2815. "Set rss flags command parameters: flow type = %d, data = %llu\n",
  2816. info->flow_type, info->data);
  2817. switch (info->flow_type) {
  2818. case TCP_V4_FLOW:
  2819. case TCP_V6_FLOW:
  2820. /* For TCP only 4-tupple hash is supported */
  2821. if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
  2822. RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  2823. DP(BNX2X_MSG_ETHTOOL,
  2824. "Command parameters not supported\n");
  2825. return -EINVAL;
  2826. }
  2827. return 0;
  2828. case UDP_V4_FLOW:
  2829. case UDP_V6_FLOW:
  2830. /* For UDP either 2-tupple hash or 4-tupple hash is supported */
  2831. if (info->data == (RXH_IP_SRC | RXH_IP_DST |
  2832. RXH_L4_B_0_1 | RXH_L4_B_2_3))
  2833. udp_rss_requested = 1;
  2834. else if (info->data == (RXH_IP_SRC | RXH_IP_DST))
  2835. udp_rss_requested = 0;
  2836. else
  2837. return -EINVAL;
  2838. if (CHIP_IS_E1x(bp) && udp_rss_requested) {
  2839. DP(BNX2X_MSG_ETHTOOL,
  2840. "57710, 57711 boards don't support RSS according to UDP 4-tuple\n");
  2841. return -EINVAL;
  2842. }
  2843. if ((info->flow_type == UDP_V4_FLOW) &&
  2844. (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) {
  2845. bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested;
  2846. DP(BNX2X_MSG_ETHTOOL,
  2847. "rss re-configured, UDP 4-tupple %s\n",
  2848. udp_rss_requested ? "enabled" : "disabled");
  2849. if (bp->state == BNX2X_STATE_OPEN)
  2850. return bnx2x_rss(bp, &bp->rss_conf_obj, false,
  2851. true);
  2852. } else if ((info->flow_type == UDP_V6_FLOW) &&
  2853. (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
  2854. bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
  2855. DP(BNX2X_MSG_ETHTOOL,
  2856. "rss re-configured, UDP 4-tupple %s\n",
  2857. udp_rss_requested ? "enabled" : "disabled");
  2858. if (bp->state == BNX2X_STATE_OPEN)
  2859. return bnx2x_rss(bp, &bp->rss_conf_obj, false,
  2860. true);
  2861. }
  2862. return 0;
  2863. case IPV4_FLOW:
  2864. case IPV6_FLOW:
  2865. /* For IP only 2-tupple hash is supported */
  2866. if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
  2867. DP(BNX2X_MSG_ETHTOOL,
  2868. "Command parameters not supported\n");
  2869. return -EINVAL;
  2870. }
  2871. return 0;
  2872. case SCTP_V4_FLOW:
  2873. case AH_ESP_V4_FLOW:
  2874. case AH_V4_FLOW:
  2875. case ESP_V4_FLOW:
  2876. case SCTP_V6_FLOW:
  2877. case AH_ESP_V6_FLOW:
  2878. case AH_V6_FLOW:
  2879. case ESP_V6_FLOW:
  2880. case IP_USER_FLOW:
  2881. case ETHER_FLOW:
  2882. /* RSS is not supported for these protocols */
  2883. if (info->data) {
  2884. DP(BNX2X_MSG_ETHTOOL,
  2885. "Command parameters not supported\n");
  2886. return -EINVAL;
  2887. }
  2888. return 0;
  2889. default:
  2890. return -EINVAL;
  2891. }
  2892. }
  2893. static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
  2894. {
  2895. struct bnx2x *bp = netdev_priv(dev);
  2896. switch (info->cmd) {
  2897. case ETHTOOL_SRXFH:
  2898. return bnx2x_set_rss_flags(bp, info);
  2899. default:
  2900. DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
  2901. return -EOPNOTSUPP;
  2902. }
  2903. }
  2904. static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
  2905. {
  2906. return T_ETH_INDIRECTION_TABLE_SIZE;
  2907. }
  2908. static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
  2909. u8 *hfunc)
  2910. {
  2911. struct bnx2x *bp = netdev_priv(dev);
  2912. u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
  2913. size_t i;
  2914. if (hfunc)
  2915. *hfunc = ETH_RSS_HASH_TOP;
  2916. if (!indir)
  2917. return 0;
  2918. /* Get the current configuration of the RSS indirection table */
  2919. bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
  2920. /*
  2921. * We can't use a memcpy() as an internal storage of an
  2922. * indirection table is a u8 array while indir->ring_index
  2923. * points to an array of u32.
  2924. *
  2925. * Indirection table contains the FW Client IDs, so we need to
  2926. * align the returned table to the Client ID of the leading RSS
  2927. * queue.
  2928. */
  2929. for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
  2930. indir[i] = ind_table[i] - bp->fp->cl_id;
  2931. return 0;
  2932. }
  2933. static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
  2934. const u8 *key, const u8 hfunc)
  2935. {
  2936. struct bnx2x *bp = netdev_priv(dev);
  2937. size_t i;
  2938. /* We require at least one supported parameter to be changed and no
  2939. * change in any of the unsupported parameters
  2940. */
  2941. if (key ||
  2942. (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
  2943. return -EOPNOTSUPP;
  2944. if (!indir)
  2945. return 0;
  2946. for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
  2947. /*
  2948. * The same as in bnx2x_get_rxfh: we can't use a memcpy()
  2949. * as an internal storage of an indirection table is a u8 array
  2950. * while indir->ring_index points to an array of u32.
  2951. *
  2952. * Indirection table contains the FW Client IDs, so we need to
  2953. * align the received table to the Client ID of the leading RSS
  2954. * queue
  2955. */
  2956. bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
  2957. }
  2958. if (bp->state == BNX2X_STATE_OPEN)
  2959. return bnx2x_config_rss_eth(bp, false);
  2960. return 0;
  2961. }
  2962. /**
  2963. * bnx2x_get_channels - gets the number of RSS queues.
  2964. *
  2965. * @dev: net device
  2966. * @channels: returns the number of max / current queues
  2967. */
  2968. static void bnx2x_get_channels(struct net_device *dev,
  2969. struct ethtool_channels *channels)
  2970. {
  2971. struct bnx2x *bp = netdev_priv(dev);
  2972. channels->max_combined = BNX2X_MAX_RSS_COUNT(bp);
  2973. channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp);
  2974. }
  2975. /**
  2976. * bnx2x_change_num_queues - change the number of RSS queues.
  2977. *
  2978. * @bp: bnx2x private structure
  2979. *
  2980. * Re-configure interrupt mode to get the new number of MSI-X
  2981. * vectors and re-add NAPI objects.
  2982. */
  2983. static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
  2984. {
  2985. bnx2x_disable_msi(bp);
  2986. bp->num_ethernet_queues = num_rss;
  2987. bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
  2988. BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
  2989. bnx2x_set_int_mode(bp);
  2990. }
  2991. /**
  2992. * bnx2x_set_channels - sets the number of RSS queues.
  2993. *
  2994. * @dev: net device
  2995. * @channels: includes the number of queues requested
  2996. */
  2997. static int bnx2x_set_channels(struct net_device *dev,
  2998. struct ethtool_channels *channels)
  2999. {
  3000. struct bnx2x *bp = netdev_priv(dev);
  3001. DP(BNX2X_MSG_ETHTOOL,
  3002. "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
  3003. channels->rx_count, channels->tx_count, channels->other_count,
  3004. channels->combined_count);
  3005. if (pci_num_vf(bp->pdev)) {
  3006. DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
  3007. return -EPERM;
  3008. }
  3009. /* We don't support separate rx / tx channels.
  3010. * We don't allow setting 'other' channels.
  3011. */
  3012. if (channels->rx_count || channels->tx_count || channels->other_count
  3013. || (channels->combined_count == 0) ||
  3014. (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) {
  3015. DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n");
  3016. return -EINVAL;
  3017. }
  3018. /* Check if there was a change in the active parameters */
  3019. if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) {
  3020. DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n");
  3021. return 0;
  3022. }
  3023. /* Set the requested number of queues in bp context.
  3024. * Note that the actual number of queues created during load may be
  3025. * less than requested if memory is low.
  3026. */
  3027. if (unlikely(!netif_running(dev))) {
  3028. bnx2x_change_num_queues(bp, channels->combined_count);
  3029. return 0;
  3030. }
  3031. bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
  3032. bnx2x_change_num_queues(bp, channels->combined_count);
  3033. return bnx2x_nic_load(bp, LOAD_NORMAL);
  3034. }
  3035. static int bnx2x_get_ts_info(struct net_device *dev,
  3036. struct ethtool_ts_info *info)
  3037. {
  3038. struct bnx2x *bp = netdev_priv(dev);
  3039. if (bp->flags & PTP_SUPPORTED) {
  3040. info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
  3041. SOF_TIMESTAMPING_RX_SOFTWARE |
  3042. SOF_TIMESTAMPING_SOFTWARE |
  3043. SOF_TIMESTAMPING_TX_HARDWARE |
  3044. SOF_TIMESTAMPING_RX_HARDWARE |
  3045. SOF_TIMESTAMPING_RAW_HARDWARE;
  3046. if (bp->ptp_clock)
  3047. info->phc_index = ptp_clock_index(bp->ptp_clock);
  3048. else
  3049. info->phc_index = -1;
  3050. info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
  3051. (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
  3052. (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
  3053. (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
  3054. info->tx_types = (1 << HWTSTAMP_TX_OFF)|(1 << HWTSTAMP_TX_ON);
  3055. return 0;
  3056. }
  3057. return ethtool_op_get_ts_info(dev, info);
  3058. }
  3059. static const struct ethtool_ops bnx2x_ethtool_ops = {
  3060. .get_settings = bnx2x_get_settings,
  3061. .set_settings = bnx2x_set_settings,
  3062. .get_drvinfo = bnx2x_get_drvinfo,
  3063. .get_regs_len = bnx2x_get_regs_len,
  3064. .get_regs = bnx2x_get_regs,
  3065. .get_dump_flag = bnx2x_get_dump_flag,
  3066. .get_dump_data = bnx2x_get_dump_data,
  3067. .set_dump = bnx2x_set_dump,
  3068. .get_wol = bnx2x_get_wol,
  3069. .set_wol = bnx2x_set_wol,
  3070. .get_msglevel = bnx2x_get_msglevel,
  3071. .set_msglevel = bnx2x_set_msglevel,
  3072. .nway_reset = bnx2x_nway_reset,
  3073. .get_link = bnx2x_get_link,
  3074. .get_eeprom_len = bnx2x_get_eeprom_len,
  3075. .get_eeprom = bnx2x_get_eeprom,
  3076. .set_eeprom = bnx2x_set_eeprom,
  3077. .get_coalesce = bnx2x_get_coalesce,
  3078. .set_coalesce = bnx2x_set_coalesce,
  3079. .get_ringparam = bnx2x_get_ringparam,
  3080. .set_ringparam = bnx2x_set_ringparam,
  3081. .get_pauseparam = bnx2x_get_pauseparam,
  3082. .set_pauseparam = bnx2x_set_pauseparam,
  3083. .self_test = bnx2x_self_test,
  3084. .get_sset_count = bnx2x_get_sset_count,
  3085. .get_priv_flags = bnx2x_get_private_flags,
  3086. .get_strings = bnx2x_get_strings,
  3087. .set_phys_id = bnx2x_set_phys_id,
  3088. .get_ethtool_stats = bnx2x_get_ethtool_stats,
  3089. .get_rxnfc = bnx2x_get_rxnfc,
  3090. .set_rxnfc = bnx2x_set_rxnfc,
  3091. .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
  3092. .get_rxfh = bnx2x_get_rxfh,
  3093. .set_rxfh = bnx2x_set_rxfh,
  3094. .get_channels = bnx2x_get_channels,
  3095. .set_channels = bnx2x_set_channels,
  3096. .get_module_info = bnx2x_get_module_info,
  3097. .get_module_eeprom = bnx2x_get_module_eeprom,
  3098. .get_eee = bnx2x_get_eee,
  3099. .set_eee = bnx2x_set_eee,
  3100. .get_ts_info = bnx2x_get_ts_info,
  3101. };
  3102. static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
  3103. .get_settings = bnx2x_get_vf_settings,
  3104. .get_drvinfo = bnx2x_get_drvinfo,
  3105. .get_msglevel = bnx2x_get_msglevel,
  3106. .set_msglevel = bnx2x_set_msglevel,
  3107. .get_link = bnx2x_get_link,
  3108. .get_coalesce = bnx2x_get_coalesce,
  3109. .get_ringparam = bnx2x_get_ringparam,
  3110. .set_ringparam = bnx2x_set_ringparam,
  3111. .get_sset_count = bnx2x_get_sset_count,
  3112. .get_strings = bnx2x_get_strings,
  3113. .get_ethtool_stats = bnx2x_get_ethtool_stats,
  3114. .get_rxnfc = bnx2x_get_rxnfc,
  3115. .set_rxnfc = bnx2x_set_rxnfc,
  3116. .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
  3117. .get_rxfh = bnx2x_get_rxfh,
  3118. .set_rxfh = bnx2x_set_rxfh,
  3119. .get_channels = bnx2x_get_channels,
  3120. .set_channels = bnx2x_set_channels,
  3121. };
  3122. void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
  3123. {
  3124. netdev->ethtool_ops = (IS_PF(bp)) ?
  3125. &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
  3126. }