cxgb3_main.c 87 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457
  1. /*
  2. * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33. #include <linux/module.h>
  34. #include <linux/moduleparam.h>
  35. #include <linux/init.h>
  36. #include <linux/pci.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/netdevice.h>
  39. #include <linux/etherdevice.h>
  40. #include <linux/if_vlan.h>
  41. #include <linux/mdio.h>
  42. #include <linux/sockios.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/proc_fs.h>
  45. #include <linux/rtnetlink.h>
  46. #include <linux/firmware.h>
  47. #include <linux/log2.h>
  48. #include <linux/stringify.h>
  49. #include <linux/sched.h>
  50. #include <linux/slab.h>
  51. #include <linux/nospec.h>
  52. #include <asm/uaccess.h>
  53. #include "common.h"
  54. #include "cxgb3_ioctl.h"
  55. #include "regs.h"
  56. #include "cxgb3_offload.h"
  57. #include "version.h"
  58. #include "cxgb3_ctl_defs.h"
  59. #include "t3_cpl.h"
  60. #include "firmware_exports.h"
  61. enum {
  62. MAX_TXQ_ENTRIES = 16384,
  63. MAX_CTRL_TXQ_ENTRIES = 1024,
  64. MAX_RSPQ_ENTRIES = 16384,
  65. MAX_RX_BUFFERS = 16384,
  66. MAX_RX_JUMBO_BUFFERS = 16384,
  67. MIN_TXQ_ENTRIES = 4,
  68. MIN_CTRL_TXQ_ENTRIES = 4,
  69. MIN_RSPQ_ENTRIES = 32,
  70. MIN_FL_ENTRIES = 32
  71. };
  72. #define PORT_MASK ((1 << MAX_NPORTS) - 1)
  73. #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  74. NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  75. NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  76. #define EEPROM_MAGIC 0x38E2F10C
  77. #define CH_DEVICE(devid, idx) \
  78. { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
  79. static const struct pci_device_id cxgb3_pci_tbl[] = {
  80. CH_DEVICE(0x20, 0), /* PE9000 */
  81. CH_DEVICE(0x21, 1), /* T302E */
  82. CH_DEVICE(0x22, 2), /* T310E */
  83. CH_DEVICE(0x23, 3), /* T320X */
  84. CH_DEVICE(0x24, 1), /* T302X */
  85. CH_DEVICE(0x25, 3), /* T320E */
  86. CH_DEVICE(0x26, 2), /* T310X */
  87. CH_DEVICE(0x30, 2), /* T3B10 */
  88. CH_DEVICE(0x31, 3), /* T3B20 */
  89. CH_DEVICE(0x32, 1), /* T3B02 */
  90. CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
  91. CH_DEVICE(0x36, 3), /* S320E-CR */
  92. CH_DEVICE(0x37, 7), /* N320E-G2 */
  93. {0,}
  94. };
  95. MODULE_DESCRIPTION(DRV_DESC);
  96. MODULE_AUTHOR("Chelsio Communications");
  97. MODULE_LICENSE("Dual BSD/GPL");
  98. MODULE_VERSION(DRV_VERSION);
  99. MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
  100. static int dflt_msg_enable = DFLT_MSG_ENABLE;
  101. module_param(dflt_msg_enable, int, 0644);
  102. MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
  103. /*
  104. * The driver uses the best interrupt scheme available on a platform in the
  105. * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
  106. * of these schemes the driver may consider as follows:
  107. *
  108. * msi = 2: choose from among all three options
  109. * msi = 1: only consider MSI and pin interrupts
  110. * msi = 0: force pin interrupts
  111. */
  112. static int msi = 2;
  113. module_param(msi, int, 0644);
  114. MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
  115. /*
  116. * The driver enables offload as a default.
  117. * To disable it, use ofld_disable = 1.
  118. */
  119. static int ofld_disable = 0;
  120. module_param(ofld_disable, int, 0644);
  121. MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
  122. /*
  123. * We have work elements that we need to cancel when an interface is taken
  124. * down. Normally the work elements would be executed by keventd but that
  125. * can deadlock because of linkwatch. If our close method takes the rtnl
  126. * lock and linkwatch is ahead of our work elements in keventd, linkwatch
  127. * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
  128. * for our work to complete. Get our own work queue to solve this.
  129. */
  130. struct workqueue_struct *cxgb3_wq;
  131. /**
  132. * link_report - show link status and link speed/duplex
  133. * @p: the port whose settings are to be reported
  134. *
  135. * Shows the link status, speed, and duplex of a port.
  136. */
  137. static void link_report(struct net_device *dev)
  138. {
  139. if (!netif_carrier_ok(dev))
  140. netdev_info(dev, "link down\n");
  141. else {
  142. const char *s = "10Mbps";
  143. const struct port_info *p = netdev_priv(dev);
  144. switch (p->link_config.speed) {
  145. case SPEED_10000:
  146. s = "10Gbps";
  147. break;
  148. case SPEED_1000:
  149. s = "1000Mbps";
  150. break;
  151. case SPEED_100:
  152. s = "100Mbps";
  153. break;
  154. }
  155. netdev_info(dev, "link up, %s, %s-duplex\n",
  156. s, p->link_config.duplex == DUPLEX_FULL
  157. ? "full" : "half");
  158. }
  159. }
  160. static void enable_tx_fifo_drain(struct adapter *adapter,
  161. struct port_info *pi)
  162. {
  163. t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset, 0,
  164. F_ENDROPPKT);
  165. t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, 0);
  166. t3_write_reg(adapter, A_XGM_TX_CTRL + pi->mac.offset, F_TXEN);
  167. t3_write_reg(adapter, A_XGM_RX_CTRL + pi->mac.offset, F_RXEN);
  168. }
  169. static void disable_tx_fifo_drain(struct adapter *adapter,
  170. struct port_info *pi)
  171. {
  172. t3_set_reg_field(adapter, A_XGM_TXFIFO_CFG + pi->mac.offset,
  173. F_ENDROPPKT, 0);
  174. }
  175. void t3_os_link_fault(struct adapter *adap, int port_id, int state)
  176. {
  177. struct net_device *dev = adap->port[port_id];
  178. struct port_info *pi = netdev_priv(dev);
  179. if (state == netif_carrier_ok(dev))
  180. return;
  181. if (state) {
  182. struct cmac *mac = &pi->mac;
  183. netif_carrier_on(dev);
  184. disable_tx_fifo_drain(adap, pi);
  185. /* Clear local faults */
  186. t3_xgm_intr_disable(adap, pi->port_id);
  187. t3_read_reg(adap, A_XGM_INT_STATUS +
  188. pi->mac.offset);
  189. t3_write_reg(adap,
  190. A_XGM_INT_CAUSE + pi->mac.offset,
  191. F_XGM_INT);
  192. t3_set_reg_field(adap,
  193. A_XGM_INT_ENABLE +
  194. pi->mac.offset,
  195. F_XGM_INT, F_XGM_INT);
  196. t3_xgm_intr_enable(adap, pi->port_id);
  197. t3_mac_enable(mac, MAC_DIRECTION_TX);
  198. } else {
  199. netif_carrier_off(dev);
  200. /* Flush TX FIFO */
  201. enable_tx_fifo_drain(adap, pi);
  202. }
  203. link_report(dev);
  204. }
  205. /**
  206. * t3_os_link_changed - handle link status changes
  207. * @adapter: the adapter associated with the link change
  208. * @port_id: the port index whose limk status has changed
  209. * @link_stat: the new status of the link
  210. * @speed: the new speed setting
  211. * @duplex: the new duplex setting
  212. * @pause: the new flow-control setting
  213. *
  214. * This is the OS-dependent handler for link status changes. The OS
  215. * neutral handler takes care of most of the processing for these events,
  216. * then calls this handler for any OS-specific processing.
  217. */
  218. void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
  219. int speed, int duplex, int pause)
  220. {
  221. struct net_device *dev = adapter->port[port_id];
  222. struct port_info *pi = netdev_priv(dev);
  223. struct cmac *mac = &pi->mac;
  224. /* Skip changes from disabled ports. */
  225. if (!netif_running(dev))
  226. return;
  227. if (link_stat != netif_carrier_ok(dev)) {
  228. if (link_stat) {
  229. disable_tx_fifo_drain(adapter, pi);
  230. t3_mac_enable(mac, MAC_DIRECTION_RX);
  231. /* Clear local faults */
  232. t3_xgm_intr_disable(adapter, pi->port_id);
  233. t3_read_reg(adapter, A_XGM_INT_STATUS +
  234. pi->mac.offset);
  235. t3_write_reg(adapter,
  236. A_XGM_INT_CAUSE + pi->mac.offset,
  237. F_XGM_INT);
  238. t3_set_reg_field(adapter,
  239. A_XGM_INT_ENABLE + pi->mac.offset,
  240. F_XGM_INT, F_XGM_INT);
  241. t3_xgm_intr_enable(adapter, pi->port_id);
  242. netif_carrier_on(dev);
  243. } else {
  244. netif_carrier_off(dev);
  245. t3_xgm_intr_disable(adapter, pi->port_id);
  246. t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
  247. t3_set_reg_field(adapter,
  248. A_XGM_INT_ENABLE + pi->mac.offset,
  249. F_XGM_INT, 0);
  250. if (is_10G(adapter))
  251. pi->phy.ops->power_down(&pi->phy, 1);
  252. t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
  253. t3_mac_disable(mac, MAC_DIRECTION_RX);
  254. t3_link_start(&pi->phy, mac, &pi->link_config);
  255. /* Flush TX FIFO */
  256. enable_tx_fifo_drain(adapter, pi);
  257. }
  258. link_report(dev);
  259. }
  260. }
  261. /**
  262. * t3_os_phymod_changed - handle PHY module changes
  263. * @phy: the PHY reporting the module change
  264. * @mod_type: new module type
  265. *
  266. * This is the OS-dependent handler for PHY module changes. It is
  267. * invoked when a PHY module is removed or inserted for any OS-specific
  268. * processing.
  269. */
  270. void t3_os_phymod_changed(struct adapter *adap, int port_id)
  271. {
  272. static const char *mod_str[] = {
  273. NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
  274. };
  275. const struct net_device *dev = adap->port[port_id];
  276. const struct port_info *pi = netdev_priv(dev);
  277. if (pi->phy.modtype == phy_modtype_none)
  278. netdev_info(dev, "PHY module unplugged\n");
  279. else
  280. netdev_info(dev, "%s PHY module inserted\n",
  281. mod_str[pi->phy.modtype]);
  282. }
  283. static void cxgb_set_rxmode(struct net_device *dev)
  284. {
  285. struct port_info *pi = netdev_priv(dev);
  286. t3_mac_set_rx_mode(&pi->mac, dev);
  287. }
  288. /**
  289. * link_start - enable a port
  290. * @dev: the device to enable
  291. *
  292. * Performs the MAC and PHY actions needed to enable a port.
  293. */
  294. static void link_start(struct net_device *dev)
  295. {
  296. struct port_info *pi = netdev_priv(dev);
  297. struct cmac *mac = &pi->mac;
  298. t3_mac_reset(mac);
  299. t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
  300. t3_mac_set_mtu(mac, dev->mtu);
  301. t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
  302. t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
  303. t3_mac_set_rx_mode(mac, dev);
  304. t3_link_start(&pi->phy, mac, &pi->link_config);
  305. t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
  306. }
  307. static inline void cxgb_disable_msi(struct adapter *adapter)
  308. {
  309. if (adapter->flags & USING_MSIX) {
  310. pci_disable_msix(adapter->pdev);
  311. adapter->flags &= ~USING_MSIX;
  312. } else if (adapter->flags & USING_MSI) {
  313. pci_disable_msi(adapter->pdev);
  314. adapter->flags &= ~USING_MSI;
  315. }
  316. }
  317. /*
  318. * Interrupt handler for asynchronous events used with MSI-X.
  319. */
  320. static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
  321. {
  322. t3_slow_intr_handler(cookie);
  323. return IRQ_HANDLED;
  324. }
  325. /*
  326. * Name the MSI-X interrupts.
  327. */
  328. static void name_msix_vecs(struct adapter *adap)
  329. {
  330. int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
  331. snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
  332. adap->msix_info[0].desc[n] = 0;
  333. for_each_port(adap, j) {
  334. struct net_device *d = adap->port[j];
  335. const struct port_info *pi = netdev_priv(d);
  336. for (i = 0; i < pi->nqsets; i++, msi_idx++) {
  337. snprintf(adap->msix_info[msi_idx].desc, n,
  338. "%s-%d", d->name, pi->first_qset + i);
  339. adap->msix_info[msi_idx].desc[n] = 0;
  340. }
  341. }
  342. }
  343. static int request_msix_data_irqs(struct adapter *adap)
  344. {
  345. int i, j, err, qidx = 0;
  346. for_each_port(adap, i) {
  347. int nqsets = adap2pinfo(adap, i)->nqsets;
  348. for (j = 0; j < nqsets; ++j) {
  349. err = request_irq(adap->msix_info[qidx + 1].vec,
  350. t3_intr_handler(adap,
  351. adap->sge.qs[qidx].
  352. rspq.polling), 0,
  353. adap->msix_info[qidx + 1].desc,
  354. &adap->sge.qs[qidx]);
  355. if (err) {
  356. while (--qidx >= 0)
  357. free_irq(adap->msix_info[qidx + 1].vec,
  358. &adap->sge.qs[qidx]);
  359. return err;
  360. }
  361. qidx++;
  362. }
  363. }
  364. return 0;
  365. }
  366. static void free_irq_resources(struct adapter *adapter)
  367. {
  368. if (adapter->flags & USING_MSIX) {
  369. int i, n = 0;
  370. free_irq(adapter->msix_info[0].vec, adapter);
  371. for_each_port(adapter, i)
  372. n += adap2pinfo(adapter, i)->nqsets;
  373. for (i = 0; i < n; ++i)
  374. free_irq(adapter->msix_info[i + 1].vec,
  375. &adapter->sge.qs[i]);
  376. } else
  377. free_irq(adapter->pdev->irq, adapter);
  378. }
  379. static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
  380. unsigned long n)
  381. {
  382. int attempts = 10;
  383. while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
  384. if (!--attempts)
  385. return -ETIMEDOUT;
  386. msleep(10);
  387. }
  388. return 0;
  389. }
  390. static int init_tp_parity(struct adapter *adap)
  391. {
  392. int i;
  393. struct sk_buff *skb;
  394. struct cpl_set_tcb_field *greq;
  395. unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts;
  396. t3_tp_set_offload_mode(adap, 1);
  397. for (i = 0; i < 16; i++) {
  398. struct cpl_smt_write_req *req;
  399. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  400. if (!skb)
  401. skb = adap->nofail_skb;
  402. if (!skb)
  403. goto alloc_skb_fail;
  404. req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
  405. memset(req, 0, sizeof(*req));
  406. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  407. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i));
  408. req->mtu_idx = NMTUS - 1;
  409. req->iff = i;
  410. t3_mgmt_tx(adap, skb);
  411. if (skb == adap->nofail_skb) {
  412. await_mgmt_replies(adap, cnt, i + 1);
  413. adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
  414. if (!adap->nofail_skb)
  415. goto alloc_skb_fail;
  416. }
  417. }
  418. for (i = 0; i < 2048; i++) {
  419. struct cpl_l2t_write_req *req;
  420. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  421. if (!skb)
  422. skb = adap->nofail_skb;
  423. if (!skb)
  424. goto alloc_skb_fail;
  425. req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
  426. memset(req, 0, sizeof(*req));
  427. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  428. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i));
  429. req->params = htonl(V_L2T_W_IDX(i));
  430. t3_mgmt_tx(adap, skb);
  431. if (skb == adap->nofail_skb) {
  432. await_mgmt_replies(adap, cnt, 16 + i + 1);
  433. adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
  434. if (!adap->nofail_skb)
  435. goto alloc_skb_fail;
  436. }
  437. }
  438. for (i = 0; i < 2048; i++) {
  439. struct cpl_rte_write_req *req;
  440. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  441. if (!skb)
  442. skb = adap->nofail_skb;
  443. if (!skb)
  444. goto alloc_skb_fail;
  445. req = (struct cpl_rte_write_req *)__skb_put(skb, sizeof(*req));
  446. memset(req, 0, sizeof(*req));
  447. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  448. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i));
  449. req->l2t_idx = htonl(V_L2T_W_IDX(i));
  450. t3_mgmt_tx(adap, skb);
  451. if (skb == adap->nofail_skb) {
  452. await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1);
  453. adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
  454. if (!adap->nofail_skb)
  455. goto alloc_skb_fail;
  456. }
  457. }
  458. skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
  459. if (!skb)
  460. skb = adap->nofail_skb;
  461. if (!skb)
  462. goto alloc_skb_fail;
  463. greq = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*greq));
  464. memset(greq, 0, sizeof(*greq));
  465. greq->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  466. OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0));
  467. greq->mask = cpu_to_be64(1);
  468. t3_mgmt_tx(adap, skb);
  469. i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
  470. if (skb == adap->nofail_skb) {
  471. i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1);
  472. adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL);
  473. }
  474. t3_tp_set_offload_mode(adap, 0);
  475. return i;
  476. alloc_skb_fail:
  477. t3_tp_set_offload_mode(adap, 0);
  478. return -ENOMEM;
  479. }
  480. /**
  481. * setup_rss - configure RSS
  482. * @adap: the adapter
  483. *
  484. * Sets up RSS to distribute packets to multiple receive queues. We
  485. * configure the RSS CPU lookup table to distribute to the number of HW
  486. * receive queues, and the response queue lookup table to narrow that
  487. * down to the response queues actually configured for each port.
  488. * We always configure the RSS mapping for two ports since the mapping
  489. * table has plenty of entries.
  490. */
  491. static void setup_rss(struct adapter *adap)
  492. {
  493. int i;
  494. unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
  495. unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
  496. u8 cpus[SGE_QSETS + 1];
  497. u16 rspq_map[RSS_TABLE_SIZE];
  498. for (i = 0; i < SGE_QSETS; ++i)
  499. cpus[i] = i;
  500. cpus[SGE_QSETS] = 0xff; /* terminator */
  501. for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
  502. rspq_map[i] = i % nq0;
  503. rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
  504. }
  505. t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
  506. F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
  507. V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
  508. }
  509. static void ring_dbs(struct adapter *adap)
  510. {
  511. int i, j;
  512. for (i = 0; i < SGE_QSETS; i++) {
  513. struct sge_qset *qs = &adap->sge.qs[i];
  514. if (qs->adap)
  515. for (j = 0; j < SGE_TXQ_PER_SET; j++)
  516. t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
  517. }
  518. }
  519. static void init_napi(struct adapter *adap)
  520. {
  521. int i;
  522. for (i = 0; i < SGE_QSETS; i++) {
  523. struct sge_qset *qs = &adap->sge.qs[i];
  524. if (qs->adap)
  525. netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
  526. 64);
  527. }
  528. /*
  529. * netif_napi_add() can be called only once per napi_struct because it
  530. * adds each new napi_struct to a list. Be careful not to call it a
  531. * second time, e.g., during EEH recovery, by making a note of it.
  532. */
  533. adap->flags |= NAPI_INIT;
  534. }
  535. /*
  536. * Wait until all NAPI handlers are descheduled. This includes the handlers of
  537. * both netdevices representing interfaces and the dummy ones for the extra
  538. * queues.
  539. */
  540. static void quiesce_rx(struct adapter *adap)
  541. {
  542. int i;
  543. for (i = 0; i < SGE_QSETS; i++)
  544. if (adap->sge.qs[i].adap)
  545. napi_disable(&adap->sge.qs[i].napi);
  546. }
  547. static void enable_all_napi(struct adapter *adap)
  548. {
  549. int i;
  550. for (i = 0; i < SGE_QSETS; i++)
  551. if (adap->sge.qs[i].adap)
  552. napi_enable(&adap->sge.qs[i].napi);
  553. }
  554. /**
  555. * setup_sge_qsets - configure SGE Tx/Rx/response queues
  556. * @adap: the adapter
  557. *
  558. * Determines how many sets of SGE queues to use and initializes them.
  559. * We support multiple queue sets per port if we have MSI-X, otherwise
  560. * just one queue set per port.
  561. */
  562. static int setup_sge_qsets(struct adapter *adap)
  563. {
  564. int i, j, err, irq_idx = 0, qset_idx = 0;
  565. unsigned int ntxq = SGE_TXQ_PER_SET;
  566. if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
  567. irq_idx = -1;
  568. for_each_port(adap, i) {
  569. struct net_device *dev = adap->port[i];
  570. struct port_info *pi = netdev_priv(dev);
  571. pi->qs = &adap->sge.qs[pi->first_qset];
  572. for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
  573. err = t3_sge_alloc_qset(adap, qset_idx, 1,
  574. (adap->flags & USING_MSIX) ? qset_idx + 1 :
  575. irq_idx,
  576. &adap->params.sge.qset[qset_idx], ntxq, dev,
  577. netdev_get_tx_queue(dev, j));
  578. if (err) {
  579. t3_free_sge_resources(adap);
  580. return err;
  581. }
  582. }
  583. }
  584. return 0;
  585. }
  586. static ssize_t attr_show(struct device *d, char *buf,
  587. ssize_t(*format) (struct net_device *, char *))
  588. {
  589. ssize_t len;
  590. /* Synchronize with ioctls that may shut down the device */
  591. rtnl_lock();
  592. len = (*format) (to_net_dev(d), buf);
  593. rtnl_unlock();
  594. return len;
  595. }
  596. static ssize_t attr_store(struct device *d,
  597. const char *buf, size_t len,
  598. ssize_t(*set) (struct net_device *, unsigned int),
  599. unsigned int min_val, unsigned int max_val)
  600. {
  601. char *endp;
  602. ssize_t ret;
  603. unsigned int val;
  604. if (!capable(CAP_NET_ADMIN))
  605. return -EPERM;
  606. val = simple_strtoul(buf, &endp, 0);
  607. if (endp == buf || val < min_val || val > max_val)
  608. return -EINVAL;
  609. rtnl_lock();
  610. ret = (*set) (to_net_dev(d), val);
  611. if (!ret)
  612. ret = len;
  613. rtnl_unlock();
  614. return ret;
  615. }
  616. #define CXGB3_SHOW(name, val_expr) \
  617. static ssize_t format_##name(struct net_device *dev, char *buf) \
  618. { \
  619. struct port_info *pi = netdev_priv(dev); \
  620. struct adapter *adap = pi->adapter; \
  621. return sprintf(buf, "%u\n", val_expr); \
  622. } \
  623. static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
  624. char *buf) \
  625. { \
  626. return attr_show(d, buf, format_##name); \
  627. }
  628. static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
  629. {
  630. struct port_info *pi = netdev_priv(dev);
  631. struct adapter *adap = pi->adapter;
  632. int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
  633. if (adap->flags & FULL_INIT_DONE)
  634. return -EBUSY;
  635. if (val && adap->params.rev == 0)
  636. return -EINVAL;
  637. if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
  638. min_tids)
  639. return -EINVAL;
  640. adap->params.mc5.nfilters = val;
  641. return 0;
  642. }
  643. static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
  644. const char *buf, size_t len)
  645. {
  646. return attr_store(d, buf, len, set_nfilters, 0, ~0);
  647. }
  648. static ssize_t set_nservers(struct net_device *dev, unsigned int val)
  649. {
  650. struct port_info *pi = netdev_priv(dev);
  651. struct adapter *adap = pi->adapter;
  652. if (adap->flags & FULL_INIT_DONE)
  653. return -EBUSY;
  654. if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
  655. MC5_MIN_TIDS)
  656. return -EINVAL;
  657. adap->params.mc5.nservers = val;
  658. return 0;
  659. }
  660. static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
  661. const char *buf, size_t len)
  662. {
  663. return attr_store(d, buf, len, set_nservers, 0, ~0);
  664. }
  665. #define CXGB3_ATTR_R(name, val_expr) \
  666. CXGB3_SHOW(name, val_expr) \
  667. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  668. #define CXGB3_ATTR_RW(name, val_expr, store_method) \
  669. CXGB3_SHOW(name, val_expr) \
  670. static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
  671. CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
  672. CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
  673. CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
  674. static struct attribute *cxgb3_attrs[] = {
  675. &dev_attr_cam_size.attr,
  676. &dev_attr_nfilters.attr,
  677. &dev_attr_nservers.attr,
  678. NULL
  679. };
  680. static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
  681. static ssize_t tm_attr_show(struct device *d,
  682. char *buf, int sched)
  683. {
  684. struct port_info *pi = netdev_priv(to_net_dev(d));
  685. struct adapter *adap = pi->adapter;
  686. unsigned int v, addr, bpt, cpt;
  687. ssize_t len;
  688. addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
  689. rtnl_lock();
  690. t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
  691. v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
  692. if (sched & 1)
  693. v >>= 16;
  694. bpt = (v >> 8) & 0xff;
  695. cpt = v & 0xff;
  696. if (!cpt)
  697. len = sprintf(buf, "disabled\n");
  698. else {
  699. v = (adap->params.vpd.cclk * 1000) / cpt;
  700. len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
  701. }
  702. rtnl_unlock();
  703. return len;
  704. }
  705. static ssize_t tm_attr_store(struct device *d,
  706. const char *buf, size_t len, int sched)
  707. {
  708. struct port_info *pi = netdev_priv(to_net_dev(d));
  709. struct adapter *adap = pi->adapter;
  710. unsigned int val;
  711. char *endp;
  712. ssize_t ret;
  713. if (!capable(CAP_NET_ADMIN))
  714. return -EPERM;
  715. val = simple_strtoul(buf, &endp, 0);
  716. if (endp == buf || val > 10000000)
  717. return -EINVAL;
  718. rtnl_lock();
  719. ret = t3_config_sched(adap, val, sched);
  720. if (!ret)
  721. ret = len;
  722. rtnl_unlock();
  723. return ret;
  724. }
  725. #define TM_ATTR(name, sched) \
  726. static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
  727. char *buf) \
  728. { \
  729. return tm_attr_show(d, buf, sched); \
  730. } \
  731. static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
  732. const char *buf, size_t len) \
  733. { \
  734. return tm_attr_store(d, buf, len, sched); \
  735. } \
  736. static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
  737. TM_ATTR(sched0, 0);
  738. TM_ATTR(sched1, 1);
  739. TM_ATTR(sched2, 2);
  740. TM_ATTR(sched3, 3);
  741. TM_ATTR(sched4, 4);
  742. TM_ATTR(sched5, 5);
  743. TM_ATTR(sched6, 6);
  744. TM_ATTR(sched7, 7);
  745. static struct attribute *offload_attrs[] = {
  746. &dev_attr_sched0.attr,
  747. &dev_attr_sched1.attr,
  748. &dev_attr_sched2.attr,
  749. &dev_attr_sched3.attr,
  750. &dev_attr_sched4.attr,
  751. &dev_attr_sched5.attr,
  752. &dev_attr_sched6.attr,
  753. &dev_attr_sched7.attr,
  754. NULL
  755. };
  756. static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
  757. /*
  758. * Sends an sk_buff to an offload queue driver
  759. * after dealing with any active network taps.
  760. */
  761. static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
  762. {
  763. int ret;
  764. local_bh_disable();
  765. ret = t3_offload_tx(tdev, skb);
  766. local_bh_enable();
  767. return ret;
  768. }
  769. static int write_smt_entry(struct adapter *adapter, int idx)
  770. {
  771. struct cpl_smt_write_req *req;
  772. struct port_info *pi = netdev_priv(adapter->port[idx]);
  773. struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  774. if (!skb)
  775. return -ENOMEM;
  776. req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
  777. req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
  778. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
  779. req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
  780. req->iff = idx;
  781. memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
  782. memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
  783. skb->priority = 1;
  784. offload_tx(&adapter->tdev, skb);
  785. return 0;
  786. }
  787. static int init_smt(struct adapter *adapter)
  788. {
  789. int i;
  790. for_each_port(adapter, i)
  791. write_smt_entry(adapter, i);
  792. return 0;
  793. }
  794. static void init_port_mtus(struct adapter *adapter)
  795. {
  796. unsigned int mtus = adapter->port[0]->mtu;
  797. if (adapter->port[1])
  798. mtus |= adapter->port[1]->mtu << 16;
  799. t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
  800. }
  801. static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
  802. int hi, int port)
  803. {
  804. struct sk_buff *skb;
  805. struct mngt_pktsched_wr *req;
  806. int ret;
  807. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  808. if (!skb)
  809. skb = adap->nofail_skb;
  810. if (!skb)
  811. return -ENOMEM;
  812. req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
  813. req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
  814. req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
  815. req->sched = sched;
  816. req->idx = qidx;
  817. req->min = lo;
  818. req->max = hi;
  819. req->binding = port;
  820. ret = t3_mgmt_tx(adap, skb);
  821. if (skb == adap->nofail_skb) {
  822. adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field),
  823. GFP_KERNEL);
  824. if (!adap->nofail_skb)
  825. ret = -ENOMEM;
  826. }
  827. return ret;
  828. }
  829. static int bind_qsets(struct adapter *adap)
  830. {
  831. int i, j, err = 0;
  832. for_each_port(adap, i) {
  833. const struct port_info *pi = adap2pinfo(adap, i);
  834. for (j = 0; j < pi->nqsets; ++j) {
  835. int ret = send_pktsched_cmd(adap, 1,
  836. pi->first_qset + j, -1,
  837. -1, i);
  838. if (ret)
  839. err = ret;
  840. }
  841. }
  842. return err;
  843. }
  844. #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
  845. __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
  846. #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
  847. #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
  848. __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
  849. #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
  850. #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
  851. #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
  852. #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
  853. MODULE_FIRMWARE(FW_FNAME);
  854. MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
  855. MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
  856. MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
  857. MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
  858. MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
  859. static inline const char *get_edc_fw_name(int edc_idx)
  860. {
  861. const char *fw_name = NULL;
  862. switch (edc_idx) {
  863. case EDC_OPT_AEL2005:
  864. fw_name = AEL2005_OPT_EDC_NAME;
  865. break;
  866. case EDC_TWX_AEL2005:
  867. fw_name = AEL2005_TWX_EDC_NAME;
  868. break;
  869. case EDC_TWX_AEL2020:
  870. fw_name = AEL2020_TWX_EDC_NAME;
  871. break;
  872. }
  873. return fw_name;
  874. }
  875. int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
  876. {
  877. struct adapter *adapter = phy->adapter;
  878. const struct firmware *fw;
  879. const char *fw_name;
  880. u32 csum;
  881. const __be32 *p;
  882. u16 *cache = phy->phy_cache;
  883. int i, ret = -EINVAL;
  884. fw_name = get_edc_fw_name(edc_idx);
  885. if (fw_name)
  886. ret = request_firmware(&fw, fw_name, &adapter->pdev->dev);
  887. if (ret < 0) {
  888. dev_err(&adapter->pdev->dev,
  889. "could not upgrade firmware: unable to load %s\n",
  890. fw_name);
  891. return ret;
  892. }
  893. /* check size, take checksum in account */
  894. if (fw->size > size + 4) {
  895. CH_ERR(adapter, "firmware image too large %u, expected %d\n",
  896. (unsigned int)fw->size, size + 4);
  897. ret = -EINVAL;
  898. }
  899. /* compute checksum */
  900. p = (const __be32 *)fw->data;
  901. for (csum = 0, i = 0; i < fw->size / sizeof(csum); i++)
  902. csum += ntohl(p[i]);
  903. if (csum != 0xffffffff) {
  904. CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
  905. csum);
  906. ret = -EINVAL;
  907. }
  908. for (i = 0; i < size / 4 ; i++) {
  909. *cache++ = (be32_to_cpu(p[i]) & 0xffff0000) >> 16;
  910. *cache++ = be32_to_cpu(p[i]) & 0xffff;
  911. }
  912. release_firmware(fw);
  913. return ret;
  914. }
  915. static int upgrade_fw(struct adapter *adap)
  916. {
  917. int ret;
  918. const struct firmware *fw;
  919. struct device *dev = &adap->pdev->dev;
  920. ret = request_firmware(&fw, FW_FNAME, dev);
  921. if (ret < 0) {
  922. dev_err(dev, "could not upgrade firmware: unable to load %s\n",
  923. FW_FNAME);
  924. return ret;
  925. }
  926. ret = t3_load_fw(adap, fw->data, fw->size);
  927. release_firmware(fw);
  928. if (ret == 0)
  929. dev_info(dev, "successful upgrade to firmware %d.%d.%d\n",
  930. FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  931. else
  932. dev_err(dev, "failed to upgrade to firmware %d.%d.%d\n",
  933. FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO);
  934. return ret;
  935. }
  936. static inline char t3rev2char(struct adapter *adapter)
  937. {
  938. char rev = 0;
  939. switch(adapter->params.rev) {
  940. case T3_REV_B:
  941. case T3_REV_B2:
  942. rev = 'b';
  943. break;
  944. case T3_REV_C:
  945. rev = 'c';
  946. break;
  947. }
  948. return rev;
  949. }
  950. static int update_tpsram(struct adapter *adap)
  951. {
  952. const struct firmware *tpsram;
  953. char buf[64];
  954. struct device *dev = &adap->pdev->dev;
  955. int ret;
  956. char rev;
  957. rev = t3rev2char(adap);
  958. if (!rev)
  959. return 0;
  960. snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
  961. ret = request_firmware(&tpsram, buf, dev);
  962. if (ret < 0) {
  963. dev_err(dev, "could not load TP SRAM: unable to load %s\n",
  964. buf);
  965. return ret;
  966. }
  967. ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
  968. if (ret)
  969. goto release_tpsram;
  970. ret = t3_set_proto_sram(adap, tpsram->data);
  971. if (ret == 0)
  972. dev_info(dev,
  973. "successful update of protocol engine "
  974. "to %d.%d.%d\n",
  975. TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  976. else
  977. dev_err(dev, "failed to update of protocol engine %d.%d.%d\n",
  978. TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
  979. if (ret)
  980. dev_err(dev, "loading protocol SRAM failed\n");
  981. release_tpsram:
  982. release_firmware(tpsram);
  983. return ret;
  984. }
  985. /**
  986. * t3_synchronize_rx - wait for current Rx processing on a port to complete
  987. * @adap: the adapter
  988. * @p: the port
  989. *
  990. * Ensures that current Rx processing on any of the queues associated with
  991. * the given port completes before returning. We do this by acquiring and
  992. * releasing the locks of the response queues associated with the port.
  993. */
  994. static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
  995. {
  996. int i;
  997. for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) {
  998. struct sge_rspq *q = &adap->sge.qs[i].rspq;
  999. spin_lock_irq(&q->lock);
  1000. spin_unlock_irq(&q->lock);
  1001. }
  1002. }
  1003. static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
  1004. {
  1005. struct port_info *pi = netdev_priv(dev);
  1006. struct adapter *adapter = pi->adapter;
  1007. if (adapter->params.rev > 0) {
  1008. t3_set_vlan_accel(adapter, 1 << pi->port_id,
  1009. features & NETIF_F_HW_VLAN_CTAG_RX);
  1010. } else {
  1011. /* single control for all ports */
  1012. unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_CTAG_RX;
  1013. for_each_port(adapter, i)
  1014. have_vlans |=
  1015. adapter->port[i]->features &
  1016. NETIF_F_HW_VLAN_CTAG_RX;
  1017. t3_set_vlan_accel(adapter, 1, have_vlans);
  1018. }
  1019. t3_synchronize_rx(adapter, pi);
  1020. }
  1021. /**
  1022. * cxgb_up - enable the adapter
  1023. * @adapter: adapter being enabled
  1024. *
  1025. * Called when the first port is enabled, this function performs the
  1026. * actions necessary to make an adapter operational, such as completing
  1027. * the initialization of HW modules, and enabling interrupts.
  1028. *
  1029. * Must be called with the rtnl lock held.
  1030. */
  1031. static int cxgb_up(struct adapter *adap)
  1032. {
  1033. int i, err;
  1034. if (!(adap->flags & FULL_INIT_DONE)) {
  1035. err = t3_check_fw_version(adap);
  1036. if (err == -EINVAL) {
  1037. err = upgrade_fw(adap);
  1038. CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n",
  1039. FW_VERSION_MAJOR, FW_VERSION_MINOR,
  1040. FW_VERSION_MICRO, err ? "failed" : "succeeded");
  1041. }
  1042. err = t3_check_tpsram_version(adap);
  1043. if (err == -EINVAL) {
  1044. err = update_tpsram(adap);
  1045. CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n",
  1046. TP_VERSION_MAJOR, TP_VERSION_MINOR,
  1047. TP_VERSION_MICRO, err ? "failed" : "succeeded");
  1048. }
  1049. /*
  1050. * Clear interrupts now to catch errors if t3_init_hw fails.
  1051. * We clear them again later as initialization may trigger
  1052. * conditions that can interrupt.
  1053. */
  1054. t3_intr_clear(adap);
  1055. err = t3_init_hw(adap, 0);
  1056. if (err)
  1057. goto out;
  1058. t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT);
  1059. t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
  1060. err = setup_sge_qsets(adap);
  1061. if (err)
  1062. goto out;
  1063. for_each_port(adap, i)
  1064. cxgb_vlan_mode(adap->port[i], adap->port[i]->features);
  1065. setup_rss(adap);
  1066. if (!(adap->flags & NAPI_INIT))
  1067. init_napi(adap);
  1068. t3_start_sge_timers(adap);
  1069. adap->flags |= FULL_INIT_DONE;
  1070. }
  1071. t3_intr_clear(adap);
  1072. if (adap->flags & USING_MSIX) {
  1073. name_msix_vecs(adap);
  1074. err = request_irq(adap->msix_info[0].vec,
  1075. t3_async_intr_handler, 0,
  1076. adap->msix_info[0].desc, adap);
  1077. if (err)
  1078. goto irq_err;
  1079. err = request_msix_data_irqs(adap);
  1080. if (err) {
  1081. free_irq(adap->msix_info[0].vec, adap);
  1082. goto irq_err;
  1083. }
  1084. } else if ((err = request_irq(adap->pdev->irq,
  1085. t3_intr_handler(adap,
  1086. adap->sge.qs[0].rspq.
  1087. polling),
  1088. (adap->flags & USING_MSI) ?
  1089. 0 : IRQF_SHARED,
  1090. adap->name, adap)))
  1091. goto irq_err;
  1092. enable_all_napi(adap);
  1093. t3_sge_start(adap);
  1094. t3_intr_enable(adap);
  1095. if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) &&
  1096. is_offload(adap) && init_tp_parity(adap) == 0)
  1097. adap->flags |= TP_PARITY_INIT;
  1098. if (adap->flags & TP_PARITY_INIT) {
  1099. t3_write_reg(adap, A_TP_INT_CAUSE,
  1100. F_CMCACHEPERR | F_ARPLUTPERR);
  1101. t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff);
  1102. }
  1103. if (!(adap->flags & QUEUES_BOUND)) {
  1104. int ret = bind_qsets(adap);
  1105. if (ret < 0) {
  1106. CH_ERR(adap, "failed to bind qsets, err %d\n", ret);
  1107. t3_intr_disable(adap);
  1108. free_irq_resources(adap);
  1109. err = ret;
  1110. goto out;
  1111. }
  1112. adap->flags |= QUEUES_BOUND;
  1113. }
  1114. out:
  1115. return err;
  1116. irq_err:
  1117. CH_ERR(adap, "request_irq failed, err %d\n", err);
  1118. goto out;
  1119. }
  1120. /*
  1121. * Release resources when all the ports and offloading have been stopped.
  1122. */
  1123. static void cxgb_down(struct adapter *adapter, int on_wq)
  1124. {
  1125. t3_sge_stop(adapter);
  1126. spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
  1127. t3_intr_disable(adapter);
  1128. spin_unlock_irq(&adapter->work_lock);
  1129. free_irq_resources(adapter);
  1130. quiesce_rx(adapter);
  1131. t3_sge_stop(adapter);
  1132. if (!on_wq)
  1133. flush_workqueue(cxgb3_wq);/* wait for external IRQ handler */
  1134. }
  1135. static void schedule_chk_task(struct adapter *adap)
  1136. {
  1137. unsigned int timeo;
  1138. timeo = adap->params.linkpoll_period ?
  1139. (HZ * adap->params.linkpoll_period) / 10 :
  1140. adap->params.stats_update_period * HZ;
  1141. if (timeo)
  1142. queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
  1143. }
  1144. static int offload_open(struct net_device *dev)
  1145. {
  1146. struct port_info *pi = netdev_priv(dev);
  1147. struct adapter *adapter = pi->adapter;
  1148. struct t3cdev *tdev = dev2t3cdev(dev);
  1149. int adap_up = adapter->open_device_map & PORT_MASK;
  1150. int err;
  1151. if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  1152. return 0;
  1153. if (!adap_up && (err = cxgb_up(adapter)) < 0)
  1154. goto out;
  1155. t3_tp_set_offload_mode(adapter, 1);
  1156. tdev->lldev = adapter->port[0];
  1157. err = cxgb3_offload_activate(adapter);
  1158. if (err)
  1159. goto out;
  1160. init_port_mtus(adapter);
  1161. t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
  1162. adapter->params.b_wnd,
  1163. adapter->params.rev == 0 ?
  1164. adapter->port[0]->mtu : 0xffff);
  1165. init_smt(adapter);
  1166. if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
  1167. dev_dbg(&dev->dev, "cannot create sysfs group\n");
  1168. /* Call back all registered clients */
  1169. cxgb3_add_clients(tdev);
  1170. out:
  1171. /* restore them in case the offload module has changed them */
  1172. if (err) {
  1173. t3_tp_set_offload_mode(adapter, 0);
  1174. clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
  1175. cxgb3_set_dummy_ops(tdev);
  1176. }
  1177. return err;
  1178. }
  1179. static int offload_close(struct t3cdev *tdev)
  1180. {
  1181. struct adapter *adapter = tdev2adap(tdev);
  1182. struct t3c_data *td = T3C_DATA(tdev);
  1183. if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
  1184. return 0;
  1185. /* Call back all registered clients */
  1186. cxgb3_remove_clients(tdev);
  1187. sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
  1188. /* Flush work scheduled while releasing TIDs */
  1189. flush_work(&td->tid_release_task);
  1190. tdev->lldev = NULL;
  1191. cxgb3_set_dummy_ops(tdev);
  1192. t3_tp_set_offload_mode(adapter, 0);
  1193. clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
  1194. if (!adapter->open_device_map)
  1195. cxgb_down(adapter, 0);
  1196. cxgb3_offload_deactivate(adapter);
  1197. return 0;
  1198. }
  1199. static int cxgb_open(struct net_device *dev)
  1200. {
  1201. struct port_info *pi = netdev_priv(dev);
  1202. struct adapter *adapter = pi->adapter;
  1203. int other_ports = adapter->open_device_map & PORT_MASK;
  1204. int err;
  1205. if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
  1206. return err;
  1207. set_bit(pi->port_id, &adapter->open_device_map);
  1208. if (is_offload(adapter) && !ofld_disable) {
  1209. err = offload_open(dev);
  1210. if (err)
  1211. pr_warn("Could not initialize offload capabilities\n");
  1212. }
  1213. netif_set_real_num_tx_queues(dev, pi->nqsets);
  1214. err = netif_set_real_num_rx_queues(dev, pi->nqsets);
  1215. if (err)
  1216. return err;
  1217. link_start(dev);
  1218. t3_port_intr_enable(adapter, pi->port_id);
  1219. netif_tx_start_all_queues(dev);
  1220. if (!other_ports)
  1221. schedule_chk_task(adapter);
  1222. cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
  1223. return 0;
  1224. }
  1225. static int __cxgb_close(struct net_device *dev, int on_wq)
  1226. {
  1227. struct port_info *pi = netdev_priv(dev);
  1228. struct adapter *adapter = pi->adapter;
  1229. if (!adapter->open_device_map)
  1230. return 0;
  1231. /* Stop link fault interrupts */
  1232. t3_xgm_intr_disable(adapter, pi->port_id);
  1233. t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
  1234. t3_port_intr_disable(adapter, pi->port_id);
  1235. netif_tx_stop_all_queues(dev);
  1236. pi->phy.ops->power_down(&pi->phy, 1);
  1237. netif_carrier_off(dev);
  1238. t3_mac_disable(&pi->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
  1239. spin_lock_irq(&adapter->work_lock); /* sync with update task */
  1240. clear_bit(pi->port_id, &adapter->open_device_map);
  1241. spin_unlock_irq(&adapter->work_lock);
  1242. if (!(adapter->open_device_map & PORT_MASK))
  1243. cancel_delayed_work_sync(&adapter->adap_check_task);
  1244. if (!adapter->open_device_map)
  1245. cxgb_down(adapter, on_wq);
  1246. cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
  1247. return 0;
  1248. }
  1249. static int cxgb_close(struct net_device *dev)
  1250. {
  1251. return __cxgb_close(dev, 0);
  1252. }
  1253. static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
  1254. {
  1255. struct port_info *pi = netdev_priv(dev);
  1256. struct adapter *adapter = pi->adapter;
  1257. struct net_device_stats *ns = &pi->netstats;
  1258. const struct mac_stats *pstats;
  1259. spin_lock(&adapter->stats_lock);
  1260. pstats = t3_mac_update_stats(&pi->mac);
  1261. spin_unlock(&adapter->stats_lock);
  1262. ns->tx_bytes = pstats->tx_octets;
  1263. ns->tx_packets = pstats->tx_frames;
  1264. ns->rx_bytes = pstats->rx_octets;
  1265. ns->rx_packets = pstats->rx_frames;
  1266. ns->multicast = pstats->rx_mcast_frames;
  1267. ns->tx_errors = pstats->tx_underrun;
  1268. ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
  1269. pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
  1270. pstats->rx_fifo_ovfl;
  1271. /* detailed rx_errors */
  1272. ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
  1273. ns->rx_over_errors = 0;
  1274. ns->rx_crc_errors = pstats->rx_fcs_errs;
  1275. ns->rx_frame_errors = pstats->rx_symbol_errs;
  1276. ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
  1277. ns->rx_missed_errors = pstats->rx_cong_drops;
  1278. /* detailed tx_errors */
  1279. ns->tx_aborted_errors = 0;
  1280. ns->tx_carrier_errors = 0;
  1281. ns->tx_fifo_errors = pstats->tx_underrun;
  1282. ns->tx_heartbeat_errors = 0;
  1283. ns->tx_window_errors = 0;
  1284. return ns;
  1285. }
  1286. static u32 get_msglevel(struct net_device *dev)
  1287. {
  1288. struct port_info *pi = netdev_priv(dev);
  1289. struct adapter *adapter = pi->adapter;
  1290. return adapter->msg_enable;
  1291. }
  1292. static void set_msglevel(struct net_device *dev, u32 val)
  1293. {
  1294. struct port_info *pi = netdev_priv(dev);
  1295. struct adapter *adapter = pi->adapter;
  1296. adapter->msg_enable = val;
  1297. }
  1298. static const char stats_strings[][ETH_GSTRING_LEN] = {
  1299. "TxOctetsOK ",
  1300. "TxFramesOK ",
  1301. "TxMulticastFramesOK",
  1302. "TxBroadcastFramesOK",
  1303. "TxPauseFrames ",
  1304. "TxUnderrun ",
  1305. "TxExtUnderrun ",
  1306. "TxFrames64 ",
  1307. "TxFrames65To127 ",
  1308. "TxFrames128To255 ",
  1309. "TxFrames256To511 ",
  1310. "TxFrames512To1023 ",
  1311. "TxFrames1024To1518 ",
  1312. "TxFrames1519ToMax ",
  1313. "RxOctetsOK ",
  1314. "RxFramesOK ",
  1315. "RxMulticastFramesOK",
  1316. "RxBroadcastFramesOK",
  1317. "RxPauseFrames ",
  1318. "RxFCSErrors ",
  1319. "RxSymbolErrors ",
  1320. "RxShortErrors ",
  1321. "RxJabberErrors ",
  1322. "RxLengthErrors ",
  1323. "RxFIFOoverflow ",
  1324. "RxFrames64 ",
  1325. "RxFrames65To127 ",
  1326. "RxFrames128To255 ",
  1327. "RxFrames256To511 ",
  1328. "RxFrames512To1023 ",
  1329. "RxFrames1024To1518 ",
  1330. "RxFrames1519ToMax ",
  1331. "PhyFIFOErrors ",
  1332. "TSO ",
  1333. "VLANextractions ",
  1334. "VLANinsertions ",
  1335. "TxCsumOffload ",
  1336. "RxCsumGood ",
  1337. "LroAggregated ",
  1338. "LroFlushed ",
  1339. "LroNoDesc ",
  1340. "RxDrops ",
  1341. "CheckTXEnToggled ",
  1342. "CheckResets ",
  1343. "LinkFaults ",
  1344. };
  1345. static int get_sset_count(struct net_device *dev, int sset)
  1346. {
  1347. switch (sset) {
  1348. case ETH_SS_STATS:
  1349. return ARRAY_SIZE(stats_strings);
  1350. default:
  1351. return -EOPNOTSUPP;
  1352. }
  1353. }
  1354. #define T3_REGMAP_SIZE (3 * 1024)
  1355. static int get_regs_len(struct net_device *dev)
  1356. {
  1357. return T3_REGMAP_SIZE;
  1358. }
  1359. static int get_eeprom_len(struct net_device *dev)
  1360. {
  1361. return EEPROMSIZE;
  1362. }
  1363. static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
  1364. {
  1365. struct port_info *pi = netdev_priv(dev);
  1366. struct adapter *adapter = pi->adapter;
  1367. u32 fw_vers = 0;
  1368. u32 tp_vers = 0;
  1369. spin_lock(&adapter->stats_lock);
  1370. t3_get_fw_version(adapter, &fw_vers);
  1371. t3_get_tp_version(adapter, &tp_vers);
  1372. spin_unlock(&adapter->stats_lock);
  1373. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1374. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1375. strlcpy(info->bus_info, pci_name(adapter->pdev),
  1376. sizeof(info->bus_info));
  1377. if (fw_vers)
  1378. snprintf(info->fw_version, sizeof(info->fw_version),
  1379. "%s %u.%u.%u TP %u.%u.%u",
  1380. G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
  1381. G_FW_VERSION_MAJOR(fw_vers),
  1382. G_FW_VERSION_MINOR(fw_vers),
  1383. G_FW_VERSION_MICRO(fw_vers),
  1384. G_TP_VERSION_MAJOR(tp_vers),
  1385. G_TP_VERSION_MINOR(tp_vers),
  1386. G_TP_VERSION_MICRO(tp_vers));
  1387. }
  1388. static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
  1389. {
  1390. if (stringset == ETH_SS_STATS)
  1391. memcpy(data, stats_strings, sizeof(stats_strings));
  1392. }
  1393. static unsigned long collect_sge_port_stats(struct adapter *adapter,
  1394. struct port_info *p, int idx)
  1395. {
  1396. int i;
  1397. unsigned long tot = 0;
  1398. for (i = p->first_qset; i < p->first_qset + p->nqsets; ++i)
  1399. tot += adapter->sge.qs[i].port_stats[idx];
  1400. return tot;
  1401. }
  1402. static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
  1403. u64 *data)
  1404. {
  1405. struct port_info *pi = netdev_priv(dev);
  1406. struct adapter *adapter = pi->adapter;
  1407. const struct mac_stats *s;
  1408. spin_lock(&adapter->stats_lock);
  1409. s = t3_mac_update_stats(&pi->mac);
  1410. spin_unlock(&adapter->stats_lock);
  1411. *data++ = s->tx_octets;
  1412. *data++ = s->tx_frames;
  1413. *data++ = s->tx_mcast_frames;
  1414. *data++ = s->tx_bcast_frames;
  1415. *data++ = s->tx_pause;
  1416. *data++ = s->tx_underrun;
  1417. *data++ = s->tx_fifo_urun;
  1418. *data++ = s->tx_frames_64;
  1419. *data++ = s->tx_frames_65_127;
  1420. *data++ = s->tx_frames_128_255;
  1421. *data++ = s->tx_frames_256_511;
  1422. *data++ = s->tx_frames_512_1023;
  1423. *data++ = s->tx_frames_1024_1518;
  1424. *data++ = s->tx_frames_1519_max;
  1425. *data++ = s->rx_octets;
  1426. *data++ = s->rx_frames;
  1427. *data++ = s->rx_mcast_frames;
  1428. *data++ = s->rx_bcast_frames;
  1429. *data++ = s->rx_pause;
  1430. *data++ = s->rx_fcs_errs;
  1431. *data++ = s->rx_symbol_errs;
  1432. *data++ = s->rx_short;
  1433. *data++ = s->rx_jabber;
  1434. *data++ = s->rx_too_long;
  1435. *data++ = s->rx_fifo_ovfl;
  1436. *data++ = s->rx_frames_64;
  1437. *data++ = s->rx_frames_65_127;
  1438. *data++ = s->rx_frames_128_255;
  1439. *data++ = s->rx_frames_256_511;
  1440. *data++ = s->rx_frames_512_1023;
  1441. *data++ = s->rx_frames_1024_1518;
  1442. *data++ = s->rx_frames_1519_max;
  1443. *data++ = pi->phy.fifo_errors;
  1444. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
  1445. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
  1446. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
  1447. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
  1448. *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
  1449. *data++ = 0;
  1450. *data++ = 0;
  1451. *data++ = 0;
  1452. *data++ = s->rx_cong_drops;
  1453. *data++ = s->num_toggled;
  1454. *data++ = s->num_resets;
  1455. *data++ = s->link_faults;
  1456. }
  1457. static inline void reg_block_dump(struct adapter *ap, void *buf,
  1458. unsigned int start, unsigned int end)
  1459. {
  1460. u32 *p = buf + start;
  1461. for (; start <= end; start += sizeof(u32))
  1462. *p++ = t3_read_reg(ap, start);
  1463. }
  1464. static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
  1465. void *buf)
  1466. {
  1467. struct port_info *pi = netdev_priv(dev);
  1468. struct adapter *ap = pi->adapter;
  1469. /*
  1470. * Version scheme:
  1471. * bits 0..9: chip version
  1472. * bits 10..15: chip revision
  1473. * bit 31: set for PCIe cards
  1474. */
  1475. regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
  1476. /*
  1477. * We skip the MAC statistics registers because they are clear-on-read.
  1478. * Also reading multi-register stats would need to synchronize with the
  1479. * periodic mac stats accumulation. Hard to justify the complexity.
  1480. */
  1481. memset(buf, 0, T3_REGMAP_SIZE);
  1482. reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
  1483. reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
  1484. reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
  1485. reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
  1486. reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
  1487. reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
  1488. XGM_REG(A_XGM_SERDES_STAT3, 1));
  1489. reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
  1490. XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
  1491. }
  1492. static int restart_autoneg(struct net_device *dev)
  1493. {
  1494. struct port_info *p = netdev_priv(dev);
  1495. if (!netif_running(dev))
  1496. return -EAGAIN;
  1497. if (p->link_config.autoneg != AUTONEG_ENABLE)
  1498. return -EINVAL;
  1499. p->phy.ops->autoneg_restart(&p->phy);
  1500. return 0;
  1501. }
  1502. static int set_phys_id(struct net_device *dev,
  1503. enum ethtool_phys_id_state state)
  1504. {
  1505. struct port_info *pi = netdev_priv(dev);
  1506. struct adapter *adapter = pi->adapter;
  1507. switch (state) {
  1508. case ETHTOOL_ID_ACTIVE:
  1509. return 1; /* cycle on/off once per second */
  1510. case ETHTOOL_ID_OFF:
  1511. t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
  1512. break;
  1513. case ETHTOOL_ID_ON:
  1514. case ETHTOOL_ID_INACTIVE:
  1515. t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
  1516. F_GPIO0_OUT_VAL);
  1517. }
  1518. return 0;
  1519. }
  1520. static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1521. {
  1522. struct port_info *p = netdev_priv(dev);
  1523. cmd->supported = p->link_config.supported;
  1524. cmd->advertising = p->link_config.advertising;
  1525. if (netif_carrier_ok(dev)) {
  1526. ethtool_cmd_speed_set(cmd, p->link_config.speed);
  1527. cmd->duplex = p->link_config.duplex;
  1528. } else {
  1529. ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
  1530. cmd->duplex = DUPLEX_UNKNOWN;
  1531. }
  1532. cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
  1533. cmd->phy_address = p->phy.mdio.prtad;
  1534. cmd->transceiver = XCVR_EXTERNAL;
  1535. cmd->autoneg = p->link_config.autoneg;
  1536. cmd->maxtxpkt = 0;
  1537. cmd->maxrxpkt = 0;
  1538. return 0;
  1539. }
  1540. static int speed_duplex_to_caps(int speed, int duplex)
  1541. {
  1542. int cap = 0;
  1543. switch (speed) {
  1544. case SPEED_10:
  1545. if (duplex == DUPLEX_FULL)
  1546. cap = SUPPORTED_10baseT_Full;
  1547. else
  1548. cap = SUPPORTED_10baseT_Half;
  1549. break;
  1550. case SPEED_100:
  1551. if (duplex == DUPLEX_FULL)
  1552. cap = SUPPORTED_100baseT_Full;
  1553. else
  1554. cap = SUPPORTED_100baseT_Half;
  1555. break;
  1556. case SPEED_1000:
  1557. if (duplex == DUPLEX_FULL)
  1558. cap = SUPPORTED_1000baseT_Full;
  1559. else
  1560. cap = SUPPORTED_1000baseT_Half;
  1561. break;
  1562. case SPEED_10000:
  1563. if (duplex == DUPLEX_FULL)
  1564. cap = SUPPORTED_10000baseT_Full;
  1565. }
  1566. return cap;
  1567. }
  1568. #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1569. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1570. ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
  1571. ADVERTISED_10000baseT_Full)
  1572. static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
  1573. {
  1574. struct port_info *p = netdev_priv(dev);
  1575. struct link_config *lc = &p->link_config;
  1576. if (!(lc->supported & SUPPORTED_Autoneg)) {
  1577. /*
  1578. * PHY offers a single speed/duplex. See if that's what's
  1579. * being requested.
  1580. */
  1581. if (cmd->autoneg == AUTONEG_DISABLE) {
  1582. u32 speed = ethtool_cmd_speed(cmd);
  1583. int cap = speed_duplex_to_caps(speed, cmd->duplex);
  1584. if (lc->supported & cap)
  1585. return 0;
  1586. }
  1587. return -EINVAL;
  1588. }
  1589. if (cmd->autoneg == AUTONEG_DISABLE) {
  1590. u32 speed = ethtool_cmd_speed(cmd);
  1591. int cap = speed_duplex_to_caps(speed, cmd->duplex);
  1592. if (!(lc->supported & cap) || (speed == SPEED_1000))
  1593. return -EINVAL;
  1594. lc->requested_speed = speed;
  1595. lc->requested_duplex = cmd->duplex;
  1596. lc->advertising = 0;
  1597. } else {
  1598. cmd->advertising &= ADVERTISED_MASK;
  1599. cmd->advertising &= lc->supported;
  1600. if (!cmd->advertising)
  1601. return -EINVAL;
  1602. lc->requested_speed = SPEED_INVALID;
  1603. lc->requested_duplex = DUPLEX_INVALID;
  1604. lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
  1605. }
  1606. lc->autoneg = cmd->autoneg;
  1607. if (netif_running(dev))
  1608. t3_link_start(&p->phy, &p->mac, lc);
  1609. return 0;
  1610. }
  1611. static void get_pauseparam(struct net_device *dev,
  1612. struct ethtool_pauseparam *epause)
  1613. {
  1614. struct port_info *p = netdev_priv(dev);
  1615. epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
  1616. epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
  1617. epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
  1618. }
  1619. static int set_pauseparam(struct net_device *dev,
  1620. struct ethtool_pauseparam *epause)
  1621. {
  1622. struct port_info *p = netdev_priv(dev);
  1623. struct link_config *lc = &p->link_config;
  1624. if (epause->autoneg == AUTONEG_DISABLE)
  1625. lc->requested_fc = 0;
  1626. else if (lc->supported & SUPPORTED_Autoneg)
  1627. lc->requested_fc = PAUSE_AUTONEG;
  1628. else
  1629. return -EINVAL;
  1630. if (epause->rx_pause)
  1631. lc->requested_fc |= PAUSE_RX;
  1632. if (epause->tx_pause)
  1633. lc->requested_fc |= PAUSE_TX;
  1634. if (lc->autoneg == AUTONEG_ENABLE) {
  1635. if (netif_running(dev))
  1636. t3_link_start(&p->phy, &p->mac, lc);
  1637. } else {
  1638. lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
  1639. if (netif_running(dev))
  1640. t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
  1641. }
  1642. return 0;
  1643. }
  1644. static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
  1645. {
  1646. struct port_info *pi = netdev_priv(dev);
  1647. struct adapter *adapter = pi->adapter;
  1648. const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
  1649. e->rx_max_pending = MAX_RX_BUFFERS;
  1650. e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
  1651. e->tx_max_pending = MAX_TXQ_ENTRIES;
  1652. e->rx_pending = q->fl_size;
  1653. e->rx_mini_pending = q->rspq_size;
  1654. e->rx_jumbo_pending = q->jumbo_size;
  1655. e->tx_pending = q->txq_size[0];
  1656. }
  1657. static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
  1658. {
  1659. struct port_info *pi = netdev_priv(dev);
  1660. struct adapter *adapter = pi->adapter;
  1661. struct qset_params *q;
  1662. int i;
  1663. if (e->rx_pending > MAX_RX_BUFFERS ||
  1664. e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
  1665. e->tx_pending > MAX_TXQ_ENTRIES ||
  1666. e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
  1667. e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
  1668. e->rx_pending < MIN_FL_ENTRIES ||
  1669. e->rx_jumbo_pending < MIN_FL_ENTRIES ||
  1670. e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
  1671. return -EINVAL;
  1672. if (adapter->flags & FULL_INIT_DONE)
  1673. return -EBUSY;
  1674. q = &adapter->params.sge.qset[pi->first_qset];
  1675. for (i = 0; i < pi->nqsets; ++i, ++q) {
  1676. q->rspq_size = e->rx_mini_pending;
  1677. q->fl_size = e->rx_pending;
  1678. q->jumbo_size = e->rx_jumbo_pending;
  1679. q->txq_size[0] = e->tx_pending;
  1680. q->txq_size[1] = e->tx_pending;
  1681. q->txq_size[2] = e->tx_pending;
  1682. }
  1683. return 0;
  1684. }
  1685. static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
  1686. {
  1687. struct port_info *pi = netdev_priv(dev);
  1688. struct adapter *adapter = pi->adapter;
  1689. struct qset_params *qsp;
  1690. struct sge_qset *qs;
  1691. int i;
  1692. if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
  1693. return -EINVAL;
  1694. for (i = 0; i < pi->nqsets; i++) {
  1695. qsp = &adapter->params.sge.qset[i];
  1696. qs = &adapter->sge.qs[i];
  1697. qsp->coalesce_usecs = c->rx_coalesce_usecs;
  1698. t3_update_qset_coalesce(qs, qsp);
  1699. }
  1700. return 0;
  1701. }
  1702. static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
  1703. {
  1704. struct port_info *pi = netdev_priv(dev);
  1705. struct adapter *adapter = pi->adapter;
  1706. struct qset_params *q = adapter->params.sge.qset;
  1707. c->rx_coalesce_usecs = q->coalesce_usecs;
  1708. return 0;
  1709. }
  1710. static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
  1711. u8 * data)
  1712. {
  1713. struct port_info *pi = netdev_priv(dev);
  1714. struct adapter *adapter = pi->adapter;
  1715. int i, err = 0;
  1716. u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
  1717. if (!buf)
  1718. return -ENOMEM;
  1719. e->magic = EEPROM_MAGIC;
  1720. for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
  1721. err = t3_seeprom_read(adapter, i, (__le32 *) & buf[i]);
  1722. if (!err)
  1723. memcpy(data, buf + e->offset, e->len);
  1724. kfree(buf);
  1725. return err;
  1726. }
  1727. static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
  1728. u8 * data)
  1729. {
  1730. struct port_info *pi = netdev_priv(dev);
  1731. struct adapter *adapter = pi->adapter;
  1732. u32 aligned_offset, aligned_len;
  1733. __le32 *p;
  1734. u8 *buf;
  1735. int err;
  1736. if (eeprom->magic != EEPROM_MAGIC)
  1737. return -EINVAL;
  1738. aligned_offset = eeprom->offset & ~3;
  1739. aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
  1740. if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
  1741. buf = kmalloc(aligned_len, GFP_KERNEL);
  1742. if (!buf)
  1743. return -ENOMEM;
  1744. err = t3_seeprom_read(adapter, aligned_offset, (__le32 *) buf);
  1745. if (!err && aligned_len > 4)
  1746. err = t3_seeprom_read(adapter,
  1747. aligned_offset + aligned_len - 4,
  1748. (__le32 *) & buf[aligned_len - 4]);
  1749. if (err)
  1750. goto out;
  1751. memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
  1752. } else
  1753. buf = data;
  1754. err = t3_seeprom_wp(adapter, 0);
  1755. if (err)
  1756. goto out;
  1757. for (p = (__le32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
  1758. err = t3_seeprom_write(adapter, aligned_offset, *p);
  1759. aligned_offset += 4;
  1760. }
  1761. if (!err)
  1762. err = t3_seeprom_wp(adapter, 1);
  1763. out:
  1764. if (buf != data)
  1765. kfree(buf);
  1766. return err;
  1767. }
  1768. static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  1769. {
  1770. wol->supported = 0;
  1771. wol->wolopts = 0;
  1772. memset(&wol->sopass, 0, sizeof(wol->sopass));
  1773. }
  1774. static const struct ethtool_ops cxgb_ethtool_ops = {
  1775. .get_settings = get_settings,
  1776. .set_settings = set_settings,
  1777. .get_drvinfo = get_drvinfo,
  1778. .get_msglevel = get_msglevel,
  1779. .set_msglevel = set_msglevel,
  1780. .get_ringparam = get_sge_param,
  1781. .set_ringparam = set_sge_param,
  1782. .get_coalesce = get_coalesce,
  1783. .set_coalesce = set_coalesce,
  1784. .get_eeprom_len = get_eeprom_len,
  1785. .get_eeprom = get_eeprom,
  1786. .set_eeprom = set_eeprom,
  1787. .get_pauseparam = get_pauseparam,
  1788. .set_pauseparam = set_pauseparam,
  1789. .get_link = ethtool_op_get_link,
  1790. .get_strings = get_strings,
  1791. .set_phys_id = set_phys_id,
  1792. .nway_reset = restart_autoneg,
  1793. .get_sset_count = get_sset_count,
  1794. .get_ethtool_stats = get_stats,
  1795. .get_regs_len = get_regs_len,
  1796. .get_regs = get_regs,
  1797. .get_wol = get_wol,
  1798. };
  1799. static int in_range(int val, int lo, int hi)
  1800. {
  1801. return val < 0 || (val <= hi && val >= lo);
  1802. }
  1803. static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
  1804. {
  1805. struct port_info *pi = netdev_priv(dev);
  1806. struct adapter *adapter = pi->adapter;
  1807. u32 cmd;
  1808. int ret;
  1809. if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
  1810. return -EFAULT;
  1811. switch (cmd) {
  1812. case CHELSIO_SET_QSET_PARAMS:{
  1813. int i;
  1814. struct qset_params *q;
  1815. struct ch_qset_params t;
  1816. int q1 = pi->first_qset;
  1817. int nqsets = pi->nqsets;
  1818. if (!capable(CAP_NET_ADMIN))
  1819. return -EPERM;
  1820. if (copy_from_user(&t, useraddr, sizeof(t)))
  1821. return -EFAULT;
  1822. if (t.cmd != CHELSIO_SET_QSET_PARAMS)
  1823. return -EINVAL;
  1824. if (t.qset_idx >= SGE_QSETS)
  1825. return -EINVAL;
  1826. if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
  1827. !in_range(t.cong_thres, 0, 255) ||
  1828. !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
  1829. MAX_TXQ_ENTRIES) ||
  1830. !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
  1831. MAX_TXQ_ENTRIES) ||
  1832. !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
  1833. MAX_CTRL_TXQ_ENTRIES) ||
  1834. !in_range(t.fl_size[0], MIN_FL_ENTRIES,
  1835. MAX_RX_BUFFERS) ||
  1836. !in_range(t.fl_size[1], MIN_FL_ENTRIES,
  1837. MAX_RX_JUMBO_BUFFERS) ||
  1838. !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
  1839. MAX_RSPQ_ENTRIES))
  1840. return -EINVAL;
  1841. if ((adapter->flags & FULL_INIT_DONE) &&
  1842. (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
  1843. t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
  1844. t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
  1845. t.polling >= 0 || t.cong_thres >= 0))
  1846. return -EBUSY;
  1847. /* Allow setting of any available qset when offload enabled */
  1848. if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
  1849. q1 = 0;
  1850. for_each_port(adapter, i) {
  1851. pi = adap2pinfo(adapter, i);
  1852. nqsets += pi->first_qset + pi->nqsets;
  1853. }
  1854. }
  1855. if (t.qset_idx < q1)
  1856. return -EINVAL;
  1857. if (t.qset_idx > q1 + nqsets - 1)
  1858. return -EINVAL;
  1859. q = &adapter->params.sge.qset[t.qset_idx];
  1860. if (t.rspq_size >= 0)
  1861. q->rspq_size = t.rspq_size;
  1862. if (t.fl_size[0] >= 0)
  1863. q->fl_size = t.fl_size[0];
  1864. if (t.fl_size[1] >= 0)
  1865. q->jumbo_size = t.fl_size[1];
  1866. if (t.txq_size[0] >= 0)
  1867. q->txq_size[0] = t.txq_size[0];
  1868. if (t.txq_size[1] >= 0)
  1869. q->txq_size[1] = t.txq_size[1];
  1870. if (t.txq_size[2] >= 0)
  1871. q->txq_size[2] = t.txq_size[2];
  1872. if (t.cong_thres >= 0)
  1873. q->cong_thres = t.cong_thres;
  1874. if (t.intr_lat >= 0) {
  1875. struct sge_qset *qs =
  1876. &adapter->sge.qs[t.qset_idx];
  1877. q->coalesce_usecs = t.intr_lat;
  1878. t3_update_qset_coalesce(qs, q);
  1879. }
  1880. if (t.polling >= 0) {
  1881. if (adapter->flags & USING_MSIX)
  1882. q->polling = t.polling;
  1883. else {
  1884. /* No polling with INTx for T3A */
  1885. if (adapter->params.rev == 0 &&
  1886. !(adapter->flags & USING_MSI))
  1887. t.polling = 0;
  1888. for (i = 0; i < SGE_QSETS; i++) {
  1889. q = &adapter->params.sge.
  1890. qset[i];
  1891. q->polling = t.polling;
  1892. }
  1893. }
  1894. }
  1895. if (t.lro >= 0) {
  1896. if (t.lro)
  1897. dev->wanted_features |= NETIF_F_GRO;
  1898. else
  1899. dev->wanted_features &= ~NETIF_F_GRO;
  1900. netdev_update_features(dev);
  1901. }
  1902. break;
  1903. }
  1904. case CHELSIO_GET_QSET_PARAMS:{
  1905. struct qset_params *q;
  1906. struct ch_qset_params t;
  1907. int q1 = pi->first_qset;
  1908. int nqsets = pi->nqsets;
  1909. int i;
  1910. if (copy_from_user(&t, useraddr, sizeof(t)))
  1911. return -EFAULT;
  1912. if (t.cmd != CHELSIO_GET_QSET_PARAMS)
  1913. return -EINVAL;
  1914. /* Display qsets for all ports when offload enabled */
  1915. if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
  1916. q1 = 0;
  1917. for_each_port(adapter, i) {
  1918. pi = adap2pinfo(adapter, i);
  1919. nqsets = pi->first_qset + pi->nqsets;
  1920. }
  1921. }
  1922. if (t.qset_idx >= nqsets)
  1923. return -EINVAL;
  1924. t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
  1925. q = &adapter->params.sge.qset[q1 + t.qset_idx];
  1926. t.rspq_size = q->rspq_size;
  1927. t.txq_size[0] = q->txq_size[0];
  1928. t.txq_size[1] = q->txq_size[1];
  1929. t.txq_size[2] = q->txq_size[2];
  1930. t.fl_size[0] = q->fl_size;
  1931. t.fl_size[1] = q->jumbo_size;
  1932. t.polling = q->polling;
  1933. t.lro = !!(dev->features & NETIF_F_GRO);
  1934. t.intr_lat = q->coalesce_usecs;
  1935. t.cong_thres = q->cong_thres;
  1936. t.qnum = q1;
  1937. if (adapter->flags & USING_MSIX)
  1938. t.vector = adapter->msix_info[q1 + t.qset_idx + 1].vec;
  1939. else
  1940. t.vector = adapter->pdev->irq;
  1941. if (copy_to_user(useraddr, &t, sizeof(t)))
  1942. return -EFAULT;
  1943. break;
  1944. }
  1945. case CHELSIO_SET_QSET_NUM:{
  1946. struct ch_reg edata;
  1947. unsigned int i, first_qset = 0, other_qsets = 0;
  1948. if (!capable(CAP_NET_ADMIN))
  1949. return -EPERM;
  1950. if (adapter->flags & FULL_INIT_DONE)
  1951. return -EBUSY;
  1952. if (copy_from_user(&edata, useraddr, sizeof(edata)))
  1953. return -EFAULT;
  1954. if (edata.cmd != CHELSIO_SET_QSET_NUM)
  1955. return -EINVAL;
  1956. if (edata.val < 1 ||
  1957. (edata.val > 1 && !(adapter->flags & USING_MSIX)))
  1958. return -EINVAL;
  1959. for_each_port(adapter, i)
  1960. if (adapter->port[i] && adapter->port[i] != dev)
  1961. other_qsets += adap2pinfo(adapter, i)->nqsets;
  1962. if (edata.val + other_qsets > SGE_QSETS)
  1963. return -EINVAL;
  1964. pi->nqsets = edata.val;
  1965. for_each_port(adapter, i)
  1966. if (adapter->port[i]) {
  1967. pi = adap2pinfo(adapter, i);
  1968. pi->first_qset = first_qset;
  1969. first_qset += pi->nqsets;
  1970. }
  1971. break;
  1972. }
  1973. case CHELSIO_GET_QSET_NUM:{
  1974. struct ch_reg edata;
  1975. memset(&edata, 0, sizeof(struct ch_reg));
  1976. edata.cmd = CHELSIO_GET_QSET_NUM;
  1977. edata.val = pi->nqsets;
  1978. if (copy_to_user(useraddr, &edata, sizeof(edata)))
  1979. return -EFAULT;
  1980. break;
  1981. }
  1982. case CHELSIO_LOAD_FW:{
  1983. u8 *fw_data;
  1984. struct ch_mem_range t;
  1985. if (!capable(CAP_SYS_RAWIO))
  1986. return -EPERM;
  1987. if (copy_from_user(&t, useraddr, sizeof(t)))
  1988. return -EFAULT;
  1989. if (t.cmd != CHELSIO_LOAD_FW)
  1990. return -EINVAL;
  1991. /* Check t.len sanity ? */
  1992. fw_data = memdup_user(useraddr + sizeof(t), t.len);
  1993. if (IS_ERR(fw_data))
  1994. return PTR_ERR(fw_data);
  1995. ret = t3_load_fw(adapter, fw_data, t.len);
  1996. kfree(fw_data);
  1997. if (ret)
  1998. return ret;
  1999. break;
  2000. }
  2001. case CHELSIO_SETMTUTAB:{
  2002. struct ch_mtus m;
  2003. int i;
  2004. if (!is_offload(adapter))
  2005. return -EOPNOTSUPP;
  2006. if (!capable(CAP_NET_ADMIN))
  2007. return -EPERM;
  2008. if (offload_running(adapter))
  2009. return -EBUSY;
  2010. if (copy_from_user(&m, useraddr, sizeof(m)))
  2011. return -EFAULT;
  2012. if (m.cmd != CHELSIO_SETMTUTAB)
  2013. return -EINVAL;
  2014. if (m.nmtus != NMTUS)
  2015. return -EINVAL;
  2016. if (m.mtus[0] < 81) /* accommodate SACK */
  2017. return -EINVAL;
  2018. /* MTUs must be in ascending order */
  2019. for (i = 1; i < NMTUS; ++i)
  2020. if (m.mtus[i] < m.mtus[i - 1])
  2021. return -EINVAL;
  2022. memcpy(adapter->params.mtus, m.mtus,
  2023. sizeof(adapter->params.mtus));
  2024. break;
  2025. }
  2026. case CHELSIO_GET_PM:{
  2027. struct tp_params *p = &adapter->params.tp;
  2028. struct ch_pm m = {.cmd = CHELSIO_GET_PM };
  2029. if (!is_offload(adapter))
  2030. return -EOPNOTSUPP;
  2031. m.tx_pg_sz = p->tx_pg_size;
  2032. m.tx_num_pg = p->tx_num_pgs;
  2033. m.rx_pg_sz = p->rx_pg_size;
  2034. m.rx_num_pg = p->rx_num_pgs;
  2035. m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
  2036. if (copy_to_user(useraddr, &m, sizeof(m)))
  2037. return -EFAULT;
  2038. break;
  2039. }
  2040. case CHELSIO_SET_PM:{
  2041. struct ch_pm m;
  2042. struct tp_params *p = &adapter->params.tp;
  2043. if (!is_offload(adapter))
  2044. return -EOPNOTSUPP;
  2045. if (!capable(CAP_NET_ADMIN))
  2046. return -EPERM;
  2047. if (adapter->flags & FULL_INIT_DONE)
  2048. return -EBUSY;
  2049. if (copy_from_user(&m, useraddr, sizeof(m)))
  2050. return -EFAULT;
  2051. if (m.cmd != CHELSIO_SET_PM)
  2052. return -EINVAL;
  2053. if (!is_power_of_2(m.rx_pg_sz) ||
  2054. !is_power_of_2(m.tx_pg_sz))
  2055. return -EINVAL; /* not power of 2 */
  2056. if (!(m.rx_pg_sz & 0x14000))
  2057. return -EINVAL; /* not 16KB or 64KB */
  2058. if (!(m.tx_pg_sz & 0x1554000))
  2059. return -EINVAL;
  2060. if (m.tx_num_pg == -1)
  2061. m.tx_num_pg = p->tx_num_pgs;
  2062. if (m.rx_num_pg == -1)
  2063. m.rx_num_pg = p->rx_num_pgs;
  2064. if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
  2065. return -EINVAL;
  2066. if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
  2067. m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
  2068. return -EINVAL;
  2069. p->rx_pg_size = m.rx_pg_sz;
  2070. p->tx_pg_size = m.tx_pg_sz;
  2071. p->rx_num_pgs = m.rx_num_pg;
  2072. p->tx_num_pgs = m.tx_num_pg;
  2073. break;
  2074. }
  2075. case CHELSIO_GET_MEM:{
  2076. struct ch_mem_range t;
  2077. struct mc7 *mem;
  2078. u64 buf[32];
  2079. if (!is_offload(adapter))
  2080. return -EOPNOTSUPP;
  2081. if (!(adapter->flags & FULL_INIT_DONE))
  2082. return -EIO; /* need the memory controllers */
  2083. if (copy_from_user(&t, useraddr, sizeof(t)))
  2084. return -EFAULT;
  2085. if (t.cmd != CHELSIO_GET_MEM)
  2086. return -EINVAL;
  2087. if ((t.addr & 7) || (t.len & 7))
  2088. return -EINVAL;
  2089. if (t.mem_id == MEM_CM)
  2090. mem = &adapter->cm;
  2091. else if (t.mem_id == MEM_PMRX)
  2092. mem = &adapter->pmrx;
  2093. else if (t.mem_id == MEM_PMTX)
  2094. mem = &adapter->pmtx;
  2095. else
  2096. return -EINVAL;
  2097. /*
  2098. * Version scheme:
  2099. * bits 0..9: chip version
  2100. * bits 10..15: chip revision
  2101. */
  2102. t.version = 3 | (adapter->params.rev << 10);
  2103. if (copy_to_user(useraddr, &t, sizeof(t)))
  2104. return -EFAULT;
  2105. /*
  2106. * Read 256 bytes at a time as len can be large and we don't
  2107. * want to use huge intermediate buffers.
  2108. */
  2109. useraddr += sizeof(t); /* advance to start of buffer */
  2110. while (t.len) {
  2111. unsigned int chunk =
  2112. min_t(unsigned int, t.len, sizeof(buf));
  2113. ret =
  2114. t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
  2115. buf);
  2116. if (ret)
  2117. return ret;
  2118. if (copy_to_user(useraddr, buf, chunk))
  2119. return -EFAULT;
  2120. useraddr += chunk;
  2121. t.addr += chunk;
  2122. t.len -= chunk;
  2123. }
  2124. break;
  2125. }
  2126. case CHELSIO_SET_TRACE_FILTER:{
  2127. struct ch_trace t;
  2128. const struct trace_params *tp;
  2129. if (!capable(CAP_NET_ADMIN))
  2130. return -EPERM;
  2131. if (!offload_running(adapter))
  2132. return -EAGAIN;
  2133. if (copy_from_user(&t, useraddr, sizeof(t)))
  2134. return -EFAULT;
  2135. if (t.cmd != CHELSIO_SET_TRACE_FILTER)
  2136. return -EINVAL;
  2137. tp = (const struct trace_params *)&t.sip;
  2138. if (t.config_tx)
  2139. t3_config_trace_filter(adapter, tp, 0,
  2140. t.invert_match,
  2141. t.trace_tx);
  2142. if (t.config_rx)
  2143. t3_config_trace_filter(adapter, tp, 1,
  2144. t.invert_match,
  2145. t.trace_rx);
  2146. break;
  2147. }
  2148. default:
  2149. return -EOPNOTSUPP;
  2150. }
  2151. return 0;
  2152. }
  2153. static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  2154. {
  2155. struct mii_ioctl_data *data = if_mii(req);
  2156. struct port_info *pi = netdev_priv(dev);
  2157. struct adapter *adapter = pi->adapter;
  2158. switch (cmd) {
  2159. case SIOCGMIIREG:
  2160. case SIOCSMIIREG:
  2161. /* Convert phy_id from older PRTAD/DEVAD format */
  2162. if (is_10G(adapter) &&
  2163. !mdio_phy_id_is_c45(data->phy_id) &&
  2164. (data->phy_id & 0x1f00) &&
  2165. !(data->phy_id & 0xe0e0))
  2166. data->phy_id = mdio_phy_id_c45(data->phy_id >> 8,
  2167. data->phy_id & 0x1f);
  2168. /* FALLTHRU */
  2169. case SIOCGMIIPHY:
  2170. return mdio_mii_ioctl(&pi->phy.mdio, data, cmd);
  2171. case SIOCCHIOCTL:
  2172. return cxgb_extension_ioctl(dev, req->ifr_data);
  2173. default:
  2174. return -EOPNOTSUPP;
  2175. }
  2176. }
  2177. static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
  2178. {
  2179. struct port_info *pi = netdev_priv(dev);
  2180. struct adapter *adapter = pi->adapter;
  2181. int ret;
  2182. if (new_mtu < 81) /* accommodate SACK */
  2183. return -EINVAL;
  2184. if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
  2185. return ret;
  2186. dev->mtu = new_mtu;
  2187. init_port_mtus(adapter);
  2188. if (adapter->params.rev == 0 && offload_running(adapter))
  2189. t3_load_mtus(adapter, adapter->params.mtus,
  2190. adapter->params.a_wnd, adapter->params.b_wnd,
  2191. adapter->port[0]->mtu);
  2192. return 0;
  2193. }
  2194. static int cxgb_set_mac_addr(struct net_device *dev, void *p)
  2195. {
  2196. struct port_info *pi = netdev_priv(dev);
  2197. struct adapter *adapter = pi->adapter;
  2198. struct sockaddr *addr = p;
  2199. if (!is_valid_ether_addr(addr->sa_data))
  2200. return -EADDRNOTAVAIL;
  2201. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  2202. t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
  2203. if (offload_running(adapter))
  2204. write_smt_entry(adapter, pi->port_id);
  2205. return 0;
  2206. }
  2207. static netdev_features_t cxgb_fix_features(struct net_device *dev,
  2208. netdev_features_t features)
  2209. {
  2210. /*
  2211. * Since there is no support for separate rx/tx vlan accel
  2212. * enable/disable make sure tx flag is always in same state as rx.
  2213. */
  2214. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2215. features |= NETIF_F_HW_VLAN_CTAG_TX;
  2216. else
  2217. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  2218. return features;
  2219. }
  2220. static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
  2221. {
  2222. netdev_features_t changed = dev->features ^ features;
  2223. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  2224. cxgb_vlan_mode(dev, features);
  2225. return 0;
  2226. }
  2227. #ifdef CONFIG_NET_POLL_CONTROLLER
  2228. static void cxgb_netpoll(struct net_device *dev)
  2229. {
  2230. struct port_info *pi = netdev_priv(dev);
  2231. struct adapter *adapter = pi->adapter;
  2232. int qidx;
  2233. for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
  2234. struct sge_qset *qs = &adapter->sge.qs[qidx];
  2235. void *source;
  2236. if (adapter->flags & USING_MSIX)
  2237. source = qs;
  2238. else
  2239. source = adapter;
  2240. t3_intr_handler(adapter, qs->rspq.polling) (0, source);
  2241. }
  2242. }
  2243. #endif
  2244. /*
  2245. * Periodic accumulation of MAC statistics.
  2246. */
  2247. static void mac_stats_update(struct adapter *adapter)
  2248. {
  2249. int i;
  2250. for_each_port(adapter, i) {
  2251. struct net_device *dev = adapter->port[i];
  2252. struct port_info *p = netdev_priv(dev);
  2253. if (netif_running(dev)) {
  2254. spin_lock(&adapter->stats_lock);
  2255. t3_mac_update_stats(&p->mac);
  2256. spin_unlock(&adapter->stats_lock);
  2257. }
  2258. }
  2259. }
  2260. static void check_link_status(struct adapter *adapter)
  2261. {
  2262. int i;
  2263. for_each_port(adapter, i) {
  2264. struct net_device *dev = adapter->port[i];
  2265. struct port_info *p = netdev_priv(dev);
  2266. int link_fault;
  2267. spin_lock_irq(&adapter->work_lock);
  2268. link_fault = p->link_fault;
  2269. spin_unlock_irq(&adapter->work_lock);
  2270. if (link_fault) {
  2271. t3_link_fault(adapter, i);
  2272. continue;
  2273. }
  2274. if (!(p->phy.caps & SUPPORTED_IRQ) && netif_running(dev)) {
  2275. t3_xgm_intr_disable(adapter, i);
  2276. t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
  2277. t3_link_changed(adapter, i);
  2278. t3_xgm_intr_enable(adapter, i);
  2279. }
  2280. }
  2281. }
  2282. static void check_t3b2_mac(struct adapter *adapter)
  2283. {
  2284. int i;
  2285. if (!rtnl_trylock()) /* synchronize with ifdown */
  2286. return;
  2287. for_each_port(adapter, i) {
  2288. struct net_device *dev = adapter->port[i];
  2289. struct port_info *p = netdev_priv(dev);
  2290. int status;
  2291. if (!netif_running(dev))
  2292. continue;
  2293. status = 0;
  2294. if (netif_running(dev) && netif_carrier_ok(dev))
  2295. status = t3b2_mac_watchdog_task(&p->mac);
  2296. if (status == 1)
  2297. p->mac.stats.num_toggled++;
  2298. else if (status == 2) {
  2299. struct cmac *mac = &p->mac;
  2300. t3_mac_set_mtu(mac, dev->mtu);
  2301. t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
  2302. cxgb_set_rxmode(dev);
  2303. t3_link_start(&p->phy, mac, &p->link_config);
  2304. t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
  2305. t3_port_intr_enable(adapter, p->port_id);
  2306. p->mac.stats.num_resets++;
  2307. }
  2308. }
  2309. rtnl_unlock();
  2310. }
  2311. static void t3_adap_check_task(struct work_struct *work)
  2312. {
  2313. struct adapter *adapter = container_of(work, struct adapter,
  2314. adap_check_task.work);
  2315. const struct adapter_params *p = &adapter->params;
  2316. int port;
  2317. unsigned int v, status, reset;
  2318. adapter->check_task_cnt++;
  2319. check_link_status(adapter);
  2320. /* Accumulate MAC stats if needed */
  2321. if (!p->linkpoll_period ||
  2322. (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
  2323. p->stats_update_period) {
  2324. mac_stats_update(adapter);
  2325. adapter->check_task_cnt = 0;
  2326. }
  2327. if (p->rev == T3_REV_B2)
  2328. check_t3b2_mac(adapter);
  2329. /*
  2330. * Scan the XGMAC's to check for various conditions which we want to
  2331. * monitor in a periodic polling manner rather than via an interrupt
  2332. * condition. This is used for conditions which would otherwise flood
  2333. * the system with interrupts and we only really need to know that the
  2334. * conditions are "happening" ... For each condition we count the
  2335. * detection of the condition and reset it for the next polling loop.
  2336. */
  2337. for_each_port(adapter, port) {
  2338. struct cmac *mac = &adap2pinfo(adapter, port)->mac;
  2339. u32 cause;
  2340. cause = t3_read_reg(adapter, A_XGM_INT_CAUSE + mac->offset);
  2341. reset = 0;
  2342. if (cause & F_RXFIFO_OVERFLOW) {
  2343. mac->stats.rx_fifo_ovfl++;
  2344. reset |= F_RXFIFO_OVERFLOW;
  2345. }
  2346. t3_write_reg(adapter, A_XGM_INT_CAUSE + mac->offset, reset);
  2347. }
  2348. /*
  2349. * We do the same as above for FL_EMPTY interrupts.
  2350. */
  2351. status = t3_read_reg(adapter, A_SG_INT_CAUSE);
  2352. reset = 0;
  2353. if (status & F_FLEMPTY) {
  2354. struct sge_qset *qs = &adapter->sge.qs[0];
  2355. int i = 0;
  2356. reset |= F_FLEMPTY;
  2357. v = (t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS) >> S_FL0EMPTY) &
  2358. 0xffff;
  2359. while (v) {
  2360. qs->fl[i].empty += (v & 1);
  2361. if (i)
  2362. qs++;
  2363. i ^= 1;
  2364. v >>= 1;
  2365. }
  2366. }
  2367. t3_write_reg(adapter, A_SG_INT_CAUSE, reset);
  2368. /* Schedule the next check update if any port is active. */
  2369. spin_lock_irq(&adapter->work_lock);
  2370. if (adapter->open_device_map & PORT_MASK)
  2371. schedule_chk_task(adapter);
  2372. spin_unlock_irq(&adapter->work_lock);
  2373. }
  2374. static void db_full_task(struct work_struct *work)
  2375. {
  2376. struct adapter *adapter = container_of(work, struct adapter,
  2377. db_full_task);
  2378. cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
  2379. }
  2380. static void db_empty_task(struct work_struct *work)
  2381. {
  2382. struct adapter *adapter = container_of(work, struct adapter,
  2383. db_empty_task);
  2384. cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
  2385. }
  2386. static void db_drop_task(struct work_struct *work)
  2387. {
  2388. struct adapter *adapter = container_of(work, struct adapter,
  2389. db_drop_task);
  2390. unsigned long delay = 1000;
  2391. unsigned short r;
  2392. cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
  2393. /*
  2394. * Sleep a while before ringing the driver qset dbs.
  2395. * The delay is between 1000-2023 usecs.
  2396. */
  2397. get_random_bytes(&r, 2);
  2398. delay += r & 1023;
  2399. set_current_state(TASK_UNINTERRUPTIBLE);
  2400. schedule_timeout(usecs_to_jiffies(delay));
  2401. ring_dbs(adapter);
  2402. }
  2403. /*
  2404. * Processes external (PHY) interrupts in process context.
  2405. */
  2406. static void ext_intr_task(struct work_struct *work)
  2407. {
  2408. struct adapter *adapter = container_of(work, struct adapter,
  2409. ext_intr_handler_task);
  2410. int i;
  2411. /* Disable link fault interrupts */
  2412. for_each_port(adapter, i) {
  2413. struct net_device *dev = adapter->port[i];
  2414. struct port_info *p = netdev_priv(dev);
  2415. t3_xgm_intr_disable(adapter, i);
  2416. t3_read_reg(adapter, A_XGM_INT_STATUS + p->mac.offset);
  2417. }
  2418. /* Re-enable link fault interrupts */
  2419. t3_phy_intr_handler(adapter);
  2420. for_each_port(adapter, i)
  2421. t3_xgm_intr_enable(adapter, i);
  2422. /* Now reenable external interrupts */
  2423. spin_lock_irq(&adapter->work_lock);
  2424. if (adapter->slow_intr_mask) {
  2425. adapter->slow_intr_mask |= F_T3DBG;
  2426. t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
  2427. t3_write_reg(adapter, A_PL_INT_ENABLE0,
  2428. adapter->slow_intr_mask);
  2429. }
  2430. spin_unlock_irq(&adapter->work_lock);
  2431. }
  2432. /*
  2433. * Interrupt-context handler for external (PHY) interrupts.
  2434. */
  2435. void t3_os_ext_intr_handler(struct adapter *adapter)
  2436. {
  2437. /*
  2438. * Schedule a task to handle external interrupts as they may be slow
  2439. * and we use a mutex to protect MDIO registers. We disable PHY
  2440. * interrupts in the meantime and let the task reenable them when
  2441. * it's done.
  2442. */
  2443. spin_lock(&adapter->work_lock);
  2444. if (adapter->slow_intr_mask) {
  2445. adapter->slow_intr_mask &= ~F_T3DBG;
  2446. t3_write_reg(adapter, A_PL_INT_ENABLE0,
  2447. adapter->slow_intr_mask);
  2448. queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
  2449. }
  2450. spin_unlock(&adapter->work_lock);
  2451. }
  2452. void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
  2453. {
  2454. struct net_device *netdev = adapter->port[port_id];
  2455. struct port_info *pi = netdev_priv(netdev);
  2456. spin_lock(&adapter->work_lock);
  2457. pi->link_fault = 1;
  2458. spin_unlock(&adapter->work_lock);
  2459. }
  2460. static int t3_adapter_error(struct adapter *adapter, int reset, int on_wq)
  2461. {
  2462. int i, ret = 0;
  2463. if (is_offload(adapter) &&
  2464. test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
  2465. cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
  2466. offload_close(&adapter->tdev);
  2467. }
  2468. /* Stop all ports */
  2469. for_each_port(adapter, i) {
  2470. struct net_device *netdev = adapter->port[i];
  2471. if (netif_running(netdev))
  2472. __cxgb_close(netdev, on_wq);
  2473. }
  2474. /* Stop SGE timers */
  2475. t3_stop_sge_timers(adapter);
  2476. adapter->flags &= ~FULL_INIT_DONE;
  2477. if (reset)
  2478. ret = t3_reset_adapter(adapter);
  2479. pci_disable_device(adapter->pdev);
  2480. return ret;
  2481. }
  2482. static int t3_reenable_adapter(struct adapter *adapter)
  2483. {
  2484. if (pci_enable_device(adapter->pdev)) {
  2485. dev_err(&adapter->pdev->dev,
  2486. "Cannot re-enable PCI device after reset.\n");
  2487. goto err;
  2488. }
  2489. pci_set_master(adapter->pdev);
  2490. pci_restore_state(adapter->pdev);
  2491. pci_save_state(adapter->pdev);
  2492. /* Free sge resources */
  2493. t3_free_sge_resources(adapter);
  2494. if (t3_replay_prep_adapter(adapter))
  2495. goto err;
  2496. return 0;
  2497. err:
  2498. return -1;
  2499. }
  2500. static void t3_resume_ports(struct adapter *adapter)
  2501. {
  2502. int i;
  2503. /* Restart the ports */
  2504. for_each_port(adapter, i) {
  2505. struct net_device *netdev = adapter->port[i];
  2506. if (netif_running(netdev)) {
  2507. if (cxgb_open(netdev)) {
  2508. dev_err(&adapter->pdev->dev,
  2509. "can't bring device back up"
  2510. " after reset\n");
  2511. continue;
  2512. }
  2513. }
  2514. }
  2515. if (is_offload(adapter) && !ofld_disable)
  2516. cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
  2517. }
  2518. /*
  2519. * processes a fatal error.
  2520. * Bring the ports down, reset the chip, bring the ports back up.
  2521. */
  2522. static void fatal_error_task(struct work_struct *work)
  2523. {
  2524. struct adapter *adapter = container_of(work, struct adapter,
  2525. fatal_error_handler_task);
  2526. int err = 0;
  2527. rtnl_lock();
  2528. err = t3_adapter_error(adapter, 1, 1);
  2529. if (!err)
  2530. err = t3_reenable_adapter(adapter);
  2531. if (!err)
  2532. t3_resume_ports(adapter);
  2533. CH_ALERT(adapter, "adapter reset %s\n", err ? "failed" : "succeeded");
  2534. rtnl_unlock();
  2535. }
  2536. void t3_fatal_err(struct adapter *adapter)
  2537. {
  2538. unsigned int fw_status[4];
  2539. if (adapter->flags & FULL_INIT_DONE) {
  2540. t3_sge_stop(adapter);
  2541. t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
  2542. t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
  2543. t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
  2544. t3_write_reg(adapter, XGM_REG(A_XGM_RX_CTRL, 1), 0);
  2545. spin_lock(&adapter->work_lock);
  2546. t3_intr_disable(adapter);
  2547. queue_work(cxgb3_wq, &adapter->fatal_error_handler_task);
  2548. spin_unlock(&adapter->work_lock);
  2549. }
  2550. CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
  2551. if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
  2552. CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
  2553. fw_status[0], fw_status[1],
  2554. fw_status[2], fw_status[3]);
  2555. }
  2556. /**
  2557. * t3_io_error_detected - called when PCI error is detected
  2558. * @pdev: Pointer to PCI device
  2559. * @state: The current pci connection state
  2560. *
  2561. * This function is called after a PCI bus error affecting
  2562. * this device has been detected.
  2563. */
  2564. static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
  2565. pci_channel_state_t state)
  2566. {
  2567. struct adapter *adapter = pci_get_drvdata(pdev);
  2568. if (state == pci_channel_io_perm_failure)
  2569. return PCI_ERS_RESULT_DISCONNECT;
  2570. t3_adapter_error(adapter, 0, 0);
  2571. /* Request a slot reset. */
  2572. return PCI_ERS_RESULT_NEED_RESET;
  2573. }
  2574. /**
  2575. * t3_io_slot_reset - called after the pci bus has been reset.
  2576. * @pdev: Pointer to PCI device
  2577. *
  2578. * Restart the card from scratch, as if from a cold-boot.
  2579. */
  2580. static pci_ers_result_t t3_io_slot_reset(struct pci_dev *pdev)
  2581. {
  2582. struct adapter *adapter = pci_get_drvdata(pdev);
  2583. if (!t3_reenable_adapter(adapter))
  2584. return PCI_ERS_RESULT_RECOVERED;
  2585. return PCI_ERS_RESULT_DISCONNECT;
  2586. }
  2587. /**
  2588. * t3_io_resume - called when traffic can start flowing again.
  2589. * @pdev: Pointer to PCI device
  2590. *
  2591. * This callback is called when the error recovery driver tells us that
  2592. * its OK to resume normal operation.
  2593. */
  2594. static void t3_io_resume(struct pci_dev *pdev)
  2595. {
  2596. struct adapter *adapter = pci_get_drvdata(pdev);
  2597. CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
  2598. t3_read_reg(adapter, A_PCIE_PEX_ERR));
  2599. rtnl_lock();
  2600. t3_resume_ports(adapter);
  2601. rtnl_unlock();
  2602. }
  2603. static const struct pci_error_handlers t3_err_handler = {
  2604. .error_detected = t3_io_error_detected,
  2605. .slot_reset = t3_io_slot_reset,
  2606. .resume = t3_io_resume,
  2607. };
  2608. /*
  2609. * Set the number of qsets based on the number of CPUs and the number of ports,
  2610. * not to exceed the number of available qsets, assuming there are enough qsets
  2611. * per port in HW.
  2612. */
  2613. static void set_nqsets(struct adapter *adap)
  2614. {
  2615. int i, j = 0;
  2616. int num_cpus = netif_get_num_default_rss_queues();
  2617. int hwports = adap->params.nports;
  2618. int nqsets = adap->msix_nvectors - 1;
  2619. if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
  2620. if (hwports == 2 &&
  2621. (hwports * nqsets > SGE_QSETS ||
  2622. num_cpus >= nqsets / hwports))
  2623. nqsets /= hwports;
  2624. if (nqsets > num_cpus)
  2625. nqsets = num_cpus;
  2626. if (nqsets < 1 || hwports == 4)
  2627. nqsets = 1;
  2628. } else
  2629. nqsets = 1;
  2630. for_each_port(adap, i) {
  2631. struct port_info *pi = adap2pinfo(adap, i);
  2632. pi->first_qset = j;
  2633. pi->nqsets = nqsets;
  2634. j = pi->first_qset + nqsets;
  2635. dev_info(&adap->pdev->dev,
  2636. "Port %d using %d queue sets.\n", i, nqsets);
  2637. }
  2638. }
  2639. static int cxgb_enable_msix(struct adapter *adap)
  2640. {
  2641. struct msix_entry entries[SGE_QSETS + 1];
  2642. int vectors;
  2643. int i;
  2644. vectors = ARRAY_SIZE(entries);
  2645. for (i = 0; i < vectors; ++i)
  2646. entries[i].entry = i;
  2647. vectors = pci_enable_msix_range(adap->pdev, entries,
  2648. adap->params.nports + 1, vectors);
  2649. if (vectors < 0)
  2650. return vectors;
  2651. for (i = 0; i < vectors; ++i)
  2652. adap->msix_info[i].vec = entries[i].vector;
  2653. adap->msix_nvectors = vectors;
  2654. return 0;
  2655. }
  2656. static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
  2657. {
  2658. static const char *pci_variant[] = {
  2659. "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
  2660. };
  2661. int i;
  2662. char buf[80];
  2663. if (is_pcie(adap))
  2664. snprintf(buf, sizeof(buf), "%s x%d",
  2665. pci_variant[adap->params.pci.variant],
  2666. adap->params.pci.width);
  2667. else
  2668. snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
  2669. pci_variant[adap->params.pci.variant],
  2670. adap->params.pci.speed, adap->params.pci.width);
  2671. for_each_port(adap, i) {
  2672. struct net_device *dev = adap->port[i];
  2673. const struct port_info *pi = netdev_priv(dev);
  2674. if (!test_bit(i, &adap->registered_device_map))
  2675. continue;
  2676. netdev_info(dev, "%s %s %sNIC (rev %d) %s%s\n",
  2677. ai->desc, pi->phy.desc,
  2678. is_offload(adap) ? "R" : "", adap->params.rev, buf,
  2679. (adap->flags & USING_MSIX) ? " MSI-X" :
  2680. (adap->flags & USING_MSI) ? " MSI" : "");
  2681. if (adap->name == dev->name && adap->params.vpd.mclk)
  2682. pr_info("%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
  2683. adap->name, t3_mc7_size(&adap->cm) >> 20,
  2684. t3_mc7_size(&adap->pmtx) >> 20,
  2685. t3_mc7_size(&adap->pmrx) >> 20,
  2686. adap->params.vpd.sn);
  2687. }
  2688. }
  2689. static const struct net_device_ops cxgb_netdev_ops = {
  2690. .ndo_open = cxgb_open,
  2691. .ndo_stop = cxgb_close,
  2692. .ndo_start_xmit = t3_eth_xmit,
  2693. .ndo_get_stats = cxgb_get_stats,
  2694. .ndo_validate_addr = eth_validate_addr,
  2695. .ndo_set_rx_mode = cxgb_set_rxmode,
  2696. .ndo_do_ioctl = cxgb_ioctl,
  2697. .ndo_change_mtu = cxgb_change_mtu,
  2698. .ndo_set_mac_address = cxgb_set_mac_addr,
  2699. .ndo_fix_features = cxgb_fix_features,
  2700. .ndo_set_features = cxgb_set_features,
  2701. #ifdef CONFIG_NET_POLL_CONTROLLER
  2702. .ndo_poll_controller = cxgb_netpoll,
  2703. #endif
  2704. };
  2705. static void cxgb3_init_iscsi_mac(struct net_device *dev)
  2706. {
  2707. struct port_info *pi = netdev_priv(dev);
  2708. memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
  2709. pi->iscsic.mac_addr[3] |= 0x80;
  2710. }
  2711. #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
  2712. #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
  2713. NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
  2714. static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2715. {
  2716. int i, err, pci_using_dac = 0;
  2717. resource_size_t mmio_start, mmio_len;
  2718. const struct adapter_info *ai;
  2719. struct adapter *adapter = NULL;
  2720. struct port_info *pi;
  2721. pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
  2722. if (!cxgb3_wq) {
  2723. cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
  2724. if (!cxgb3_wq) {
  2725. pr_err("cannot initialize work queue\n");
  2726. return -ENOMEM;
  2727. }
  2728. }
  2729. err = pci_enable_device(pdev);
  2730. if (err) {
  2731. dev_err(&pdev->dev, "cannot enable PCI device\n");
  2732. goto out;
  2733. }
  2734. err = pci_request_regions(pdev, DRV_NAME);
  2735. if (err) {
  2736. /* Just info, some other driver may have claimed the device. */
  2737. dev_info(&pdev->dev, "cannot obtain PCI resources\n");
  2738. goto out_disable_device;
  2739. }
  2740. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2741. pci_using_dac = 1;
  2742. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  2743. if (err) {
  2744. dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
  2745. "coherent allocations\n");
  2746. goto out_release_regions;
  2747. }
  2748. } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
  2749. dev_err(&pdev->dev, "no usable DMA configuration\n");
  2750. goto out_release_regions;
  2751. }
  2752. pci_set_master(pdev);
  2753. pci_save_state(pdev);
  2754. mmio_start = pci_resource_start(pdev, 0);
  2755. mmio_len = pci_resource_len(pdev, 0);
  2756. ai = t3_get_adapter_info(ent->driver_data);
  2757. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  2758. if (!adapter) {
  2759. err = -ENOMEM;
  2760. goto out_release_regions;
  2761. }
  2762. adapter->nofail_skb =
  2763. alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_KERNEL);
  2764. if (!adapter->nofail_skb) {
  2765. dev_err(&pdev->dev, "cannot allocate nofail buffer\n");
  2766. err = -ENOMEM;
  2767. goto out_free_adapter;
  2768. }
  2769. adapter->regs = ioremap_nocache(mmio_start, mmio_len);
  2770. if (!adapter->regs) {
  2771. dev_err(&pdev->dev, "cannot map device registers\n");
  2772. err = -ENOMEM;
  2773. goto out_free_adapter;
  2774. }
  2775. adapter->pdev = pdev;
  2776. adapter->name = pci_name(pdev);
  2777. adapter->msg_enable = dflt_msg_enable;
  2778. adapter->mmio_len = mmio_len;
  2779. mutex_init(&adapter->mdio_lock);
  2780. spin_lock_init(&adapter->work_lock);
  2781. spin_lock_init(&adapter->stats_lock);
  2782. INIT_LIST_HEAD(&adapter->adapter_list);
  2783. INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
  2784. INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
  2785. INIT_WORK(&adapter->db_full_task, db_full_task);
  2786. INIT_WORK(&adapter->db_empty_task, db_empty_task);
  2787. INIT_WORK(&adapter->db_drop_task, db_drop_task);
  2788. INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
  2789. for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
  2790. struct net_device *netdev;
  2791. netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
  2792. if (!netdev) {
  2793. err = -ENOMEM;
  2794. goto out_free_dev;
  2795. }
  2796. SET_NETDEV_DEV(netdev, &pdev->dev);
  2797. adapter->port[i] = netdev;
  2798. pi = netdev_priv(netdev);
  2799. pi->adapter = adapter;
  2800. pi->port_id = i;
  2801. netif_carrier_off(netdev);
  2802. netdev->irq = pdev->irq;
  2803. netdev->mem_start = mmio_start;
  2804. netdev->mem_end = mmio_start + mmio_len - 1;
  2805. netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
  2806. NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX;
  2807. netdev->features |= netdev->hw_features |
  2808. NETIF_F_HW_VLAN_CTAG_TX;
  2809. netdev->vlan_features |= netdev->features & VLAN_FEAT;
  2810. if (pci_using_dac)
  2811. netdev->features |= NETIF_F_HIGHDMA;
  2812. netdev->netdev_ops = &cxgb_netdev_ops;
  2813. netdev->ethtool_ops = &cxgb_ethtool_ops;
  2814. }
  2815. pci_set_drvdata(pdev, adapter);
  2816. if (t3_prep_adapter(adapter, ai, 1) < 0) {
  2817. err = -ENODEV;
  2818. goto out_free_dev;
  2819. }
  2820. /*
  2821. * The card is now ready to go. If any errors occur during device
  2822. * registration we do not fail the whole card but rather proceed only
  2823. * with the ports we manage to register successfully. However we must
  2824. * register at least one net device.
  2825. */
  2826. for_each_port(adapter, i) {
  2827. err = register_netdev(adapter->port[i]);
  2828. if (err)
  2829. dev_warn(&pdev->dev,
  2830. "cannot register net device %s, skipping\n",
  2831. adapter->port[i]->name);
  2832. else {
  2833. /*
  2834. * Change the name we use for messages to the name of
  2835. * the first successfully registered interface.
  2836. */
  2837. if (!adapter->registered_device_map)
  2838. adapter->name = adapter->port[i]->name;
  2839. __set_bit(i, &adapter->registered_device_map);
  2840. }
  2841. }
  2842. if (!adapter->registered_device_map) {
  2843. dev_err(&pdev->dev, "could not register any net devices\n");
  2844. goto out_free_dev;
  2845. }
  2846. for_each_port(adapter, i)
  2847. cxgb3_init_iscsi_mac(adapter->port[i]);
  2848. /* Driver's ready. Reflect it on LEDs */
  2849. t3_led_ready(adapter);
  2850. if (is_offload(adapter)) {
  2851. __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
  2852. cxgb3_adapter_ofld(adapter);
  2853. }
  2854. /* See what interrupts we'll be using */
  2855. if (msi > 1 && cxgb_enable_msix(adapter) == 0)
  2856. adapter->flags |= USING_MSIX;
  2857. else if (msi > 0 && pci_enable_msi(pdev) == 0)
  2858. adapter->flags |= USING_MSI;
  2859. set_nqsets(adapter);
  2860. err = sysfs_create_group(&adapter->port[0]->dev.kobj,
  2861. &cxgb3_attr_group);
  2862. print_port_info(adapter, ai);
  2863. return 0;
  2864. out_free_dev:
  2865. iounmap(adapter->regs);
  2866. for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
  2867. if (adapter->port[i])
  2868. free_netdev(adapter->port[i]);
  2869. out_free_adapter:
  2870. kfree(adapter);
  2871. out_release_regions:
  2872. pci_release_regions(pdev);
  2873. out_disable_device:
  2874. pci_disable_device(pdev);
  2875. out:
  2876. return err;
  2877. }
  2878. static void remove_one(struct pci_dev *pdev)
  2879. {
  2880. struct adapter *adapter = pci_get_drvdata(pdev);
  2881. if (adapter) {
  2882. int i;
  2883. t3_sge_stop(adapter);
  2884. sysfs_remove_group(&adapter->port[0]->dev.kobj,
  2885. &cxgb3_attr_group);
  2886. if (is_offload(adapter)) {
  2887. cxgb3_adapter_unofld(adapter);
  2888. if (test_bit(OFFLOAD_DEVMAP_BIT,
  2889. &adapter->open_device_map))
  2890. offload_close(&adapter->tdev);
  2891. }
  2892. for_each_port(adapter, i)
  2893. if (test_bit(i, &adapter->registered_device_map))
  2894. unregister_netdev(adapter->port[i]);
  2895. t3_stop_sge_timers(adapter);
  2896. t3_free_sge_resources(adapter);
  2897. cxgb_disable_msi(adapter);
  2898. for_each_port(adapter, i)
  2899. if (adapter->port[i])
  2900. free_netdev(adapter->port[i]);
  2901. iounmap(adapter->regs);
  2902. if (adapter->nofail_skb)
  2903. kfree_skb(adapter->nofail_skb);
  2904. kfree(adapter);
  2905. pci_release_regions(pdev);
  2906. pci_disable_device(pdev);
  2907. }
  2908. }
  2909. static struct pci_driver driver = {
  2910. .name = DRV_NAME,
  2911. .id_table = cxgb3_pci_tbl,
  2912. .probe = init_one,
  2913. .remove = remove_one,
  2914. .err_handler = &t3_err_handler,
  2915. };
  2916. static int __init cxgb3_init_module(void)
  2917. {
  2918. int ret;
  2919. cxgb3_offload_init();
  2920. ret = pci_register_driver(&driver);
  2921. return ret;
  2922. }
  2923. static void __exit cxgb3_cleanup_module(void)
  2924. {
  2925. pci_unregister_driver(&driver);
  2926. if (cxgb3_wq)
  2927. destroy_workqueue(cxgb3_wq);
  2928. }
  2929. module_init(cxgb3_init_module);
  2930. module_exit(cxgb3_cleanup_module);