slicoss.c 81 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208
  1. /**************************************************************************
  2. *
  3. * Copyright 2000-2006 Alacritech, Inc. All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following
  13. * disclaimer in the documentation and/or other materials provided
  14. * with the distribution.
  15. *
  16. * Alternatively, this software may be distributed under the terms of the
  17. * GNU General Public License ("GPL") version 2 as published by the Free
  18. * Software Foundation.
  19. *
  20. * THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
  21. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  22. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  23. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
  24. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  25. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  26. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
  27. * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  28. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  29. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  30. * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * The views and conclusions contained in the software and documentation
  34. * are those of the authors and should not be interpreted as representing
  35. * official policies, either expressed or implied, of Alacritech, Inc.
  36. *
  37. **************************************************************************/
  38. /*
  39. * FILENAME: slicoss.c
  40. *
  41. * The SLICOSS driver for Alacritech's IS-NIC products.
  42. *
  43. * This driver is supposed to support:
  44. *
  45. * Mojave cards (single port PCI Gigabit) both copper and fiber
  46. * Oasis cards (single and dual port PCI-x Gigabit) copper and fiber
  47. * Kalahari cards (dual and quad port PCI-e Gigabit) copper and fiber
  48. *
  49. * The driver was actually tested on Oasis and Kalahari cards.
  50. *
  51. *
  52. * NOTE: This is the standard, non-accelerated version of Alacritech's
  53. * IS-NIC driver.
  54. */
  55. #define KLUDGE_FOR_4GB_BOUNDARY 1
  56. #define DEBUG_MICROCODE 1
  57. #define DBG 1
  58. #define SLIC_INTERRUPT_PROCESS_LIMIT 1
  59. #define SLIC_OFFLOAD_IP_CHECKSUM 1
  60. #define STATS_TIMER_INTERVAL 2
  61. #define PING_TIMER_INTERVAL 1
  62. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  63. #include <linux/kernel.h>
  64. #include <linux/string.h>
  65. #include <linux/errno.h>
  66. #include <linux/ioport.h>
  67. #include <linux/slab.h>
  68. #include <linux/interrupt.h>
  69. #include <linux/timer.h>
  70. #include <linux/pci.h>
  71. #include <linux/spinlock.h>
  72. #include <linux/init.h>
  73. #include <linux/bitops.h>
  74. #include <linux/io.h>
  75. #include <linux/netdevice.h>
  76. #include <linux/crc32.h>
  77. #include <linux/etherdevice.h>
  78. #include <linux/skbuff.h>
  79. #include <linux/delay.h>
  80. #include <linux/seq_file.h>
  81. #include <linux/kthread.h>
  82. #include <linux/module.h>
  83. #include <linux/firmware.h>
  84. #include <linux/types.h>
  85. #include <linux/dma-mapping.h>
  86. #include <linux/mii.h>
  87. #include <linux/if_vlan.h>
  88. #include <asm/unaligned.h>
  89. #include <linux/ethtool.h>
  90. #include <linux/uaccess.h>
  91. #include "slichw.h"
  92. #include "slic.h"
  93. static uint slic_first_init = 1;
  94. static char *slic_banner = "Alacritech SLIC Technology(tm) Server and Storage Accelerator (Non-Accelerated)";
  95. static char *slic_proc_version = "2.0.351 2006/07/14 12:26:00";
  96. static struct base_driver slic_global = { {}, 0, 0, 0, 1, NULL, NULL };
  97. static int intagg_delay = 100;
  98. static u32 dynamic_intagg;
  99. static unsigned int rcv_count;
  100. #define DRV_NAME "slicoss"
  101. #define DRV_VERSION "2.0.1"
  102. #define DRV_AUTHOR "Alacritech, Inc. Engineering"
  103. #define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) "\
  104. "Non-Accelerated Driver"
  105. #define DRV_COPYRIGHT "Copyright 2000-2006 Alacritech, Inc. "\
  106. "All rights reserved."
  107. #define PFX DRV_NAME " "
  108. MODULE_AUTHOR(DRV_AUTHOR);
  109. MODULE_DESCRIPTION(DRV_DESCRIPTION);
  110. MODULE_LICENSE("Dual BSD/GPL");
  111. module_param(dynamic_intagg, int, 0);
  112. MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
  113. module_param(intagg_delay, int, 0);
  114. MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
  115. static const struct pci_device_id slic_pci_tbl[] = {
  116. { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_1GB_DEVICE_ID) },
  117. { PCI_DEVICE(PCI_VENDOR_ID_ALACRITECH, SLIC_2GB_DEVICE_ID) },
  118. { 0 }
  119. };
  120. MODULE_DEVICE_TABLE(pci, slic_pci_tbl);
  121. static inline void slic_reg32_write(void __iomem *reg, u32 value, bool flush)
  122. {
  123. writel(value, reg);
  124. if (flush)
  125. mb();
  126. }
  127. static inline void slic_reg64_write(struct adapter *adapter, void __iomem *reg,
  128. u32 value, void __iomem *regh, u32 paddrh,
  129. bool flush)
  130. {
  131. unsigned long flags;
  132. spin_lock_irqsave(&adapter->bit64reglock, flags);
  133. writel(paddrh, regh);
  134. writel(value, reg);
  135. if (flush)
  136. mb();
  137. spin_unlock_irqrestore(&adapter->bit64reglock, flags);
  138. }
  139. static void slic_mcast_set_bit(struct adapter *adapter, char *address)
  140. {
  141. unsigned char crcpoly;
  142. /* Get the CRC polynomial for the mac address */
  143. /*
  144. * we use bits 1-8 (lsb), bitwise reversed,
  145. * msb (= lsb bit 0 before bitrev) is automatically discarded
  146. */
  147. crcpoly = ether_crc(ETH_ALEN, address) >> 23;
  148. /*
  149. * We only have space on the SLIC for 64 entries. Lop
  150. * off the top two bits. (2^6 = 64)
  151. */
  152. crcpoly &= 0x3F;
  153. /* OR in the new bit into our 64 bit mask. */
  154. adapter->mcastmask |= (u64)1 << crcpoly;
  155. }
  156. static void slic_mcast_set_mask(struct adapter *adapter)
  157. {
  158. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  159. if (adapter->macopts & (MAC_ALLMCAST | MAC_PROMISC)) {
  160. /*
  161. * Turn on all multicast addresses. We have to do this for
  162. * promiscuous mode as well as ALLMCAST mode. It saves the
  163. * Microcode from having to keep state about the MAC
  164. * configuration.
  165. */
  166. slic_reg32_write(&slic_regs->slic_mcastlow, 0xFFFFFFFF, FLUSH);
  167. slic_reg32_write(&slic_regs->slic_mcasthigh, 0xFFFFFFFF,
  168. FLUSH);
  169. } else {
  170. /*
  171. * Commit our multicast mast to the SLIC by writing to the
  172. * multicast address mask registers
  173. */
  174. slic_reg32_write(&slic_regs->slic_mcastlow,
  175. (u32)(adapter->mcastmask & 0xFFFFFFFF), FLUSH);
  176. slic_reg32_write(&slic_regs->slic_mcasthigh,
  177. (u32)((adapter->mcastmask >> 32) & 0xFFFFFFFF), FLUSH);
  178. }
  179. }
  180. static void slic_timer_ping(ulong dev)
  181. {
  182. struct adapter *adapter;
  183. struct sliccard *card;
  184. adapter = netdev_priv((struct net_device *)dev);
  185. card = adapter->card;
  186. adapter->pingtimer.expires = jiffies + (PING_TIMER_INTERVAL * HZ);
  187. add_timer(&adapter->pingtimer);
  188. }
  189. static void slic_unmap_mmio_space(struct adapter *adapter)
  190. {
  191. if (adapter->slic_regs)
  192. iounmap(adapter->slic_regs);
  193. adapter->slic_regs = NULL;
  194. }
  195. /*
  196. * slic_link_config
  197. *
  198. * Write phy control to configure link duplex/speed
  199. *
  200. */
  201. static void slic_link_config(struct adapter *adapter,
  202. u32 linkspeed, u32 linkduplex)
  203. {
  204. u32 __iomem *wphy;
  205. u32 speed;
  206. u32 duplex;
  207. u32 phy_config;
  208. u32 phy_advreg;
  209. u32 phy_gctlreg;
  210. if (adapter->state != ADAPT_UP)
  211. return;
  212. if (linkspeed > LINK_1000MB)
  213. linkspeed = LINK_AUTOSPEED;
  214. if (linkduplex > LINK_AUTOD)
  215. linkduplex = LINK_AUTOD;
  216. wphy = &adapter->slic_regs->slic_wphy;
  217. if ((linkspeed == LINK_AUTOSPEED) || (linkspeed == LINK_1000MB)) {
  218. if (adapter->flags & ADAPT_FLAGS_FIBERMEDIA) {
  219. /*
  220. * We've got a fiber gigabit interface, and register
  221. * 4 is different in fiber mode than in copper mode
  222. */
  223. /* advertise FD only @1000 Mb */
  224. phy_advreg = (MIICR_REG_4 | (PAR_ADV1000XFD));
  225. /* enable PAUSE frames */
  226. phy_advreg |= PAR_ASYMPAUSE_FIBER;
  227. slic_reg32_write(wphy, phy_advreg, FLUSH);
  228. if (linkspeed == LINK_AUTOSPEED) {
  229. /* reset phy, enable auto-neg */
  230. phy_config =
  231. (MIICR_REG_PCR |
  232. (PCR_RESET | PCR_AUTONEG |
  233. PCR_AUTONEG_RST));
  234. slic_reg32_write(wphy, phy_config, FLUSH);
  235. } else { /* forced 1000 Mb FD*/
  236. /*
  237. * power down phy to break link
  238. * this may not work)
  239. */
  240. phy_config = (MIICR_REG_PCR | PCR_POWERDOWN);
  241. slic_reg32_write(wphy, phy_config, FLUSH);
  242. /*
  243. * wait, Marvell says 1 sec,
  244. * try to get away with 10 ms
  245. */
  246. mdelay(10);
  247. /*
  248. * disable auto-neg, set speed/duplex,
  249. * soft reset phy, powerup
  250. */
  251. phy_config =
  252. (MIICR_REG_PCR |
  253. (PCR_RESET | PCR_SPEED_1000 |
  254. PCR_DUPLEX_FULL));
  255. slic_reg32_write(wphy, phy_config, FLUSH);
  256. }
  257. } else { /* copper gigabit */
  258. /*
  259. * Auto-Negotiate or 1000 Mb must be auto negotiated
  260. * We've got a copper gigabit interface, and
  261. * register 4 is different in copper mode than
  262. * in fiber mode
  263. */
  264. if (linkspeed == LINK_AUTOSPEED) {
  265. /* advertise 10/100 Mb modes */
  266. phy_advreg =
  267. (MIICR_REG_4 |
  268. (PAR_ADV100FD | PAR_ADV100HD | PAR_ADV10FD
  269. | PAR_ADV10HD));
  270. } else {
  271. /*
  272. * linkspeed == LINK_1000MB -
  273. * don't advertise 10/100 Mb modes
  274. */
  275. phy_advreg = MIICR_REG_4;
  276. }
  277. /* enable PAUSE frames */
  278. phy_advreg |= PAR_ASYMPAUSE;
  279. /* required by the Cicada PHY */
  280. phy_advreg |= PAR_802_3;
  281. slic_reg32_write(wphy, phy_advreg, FLUSH);
  282. /* advertise FD only @1000 Mb */
  283. phy_gctlreg = (MIICR_REG_9 | (PGC_ADV1000FD));
  284. slic_reg32_write(wphy, phy_gctlreg, FLUSH);
  285. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  286. /*
  287. * if a Marvell PHY
  288. * enable auto crossover
  289. */
  290. phy_config =
  291. (MIICR_REG_16 | (MRV_REG16_XOVERON));
  292. slic_reg32_write(wphy, phy_config, FLUSH);
  293. /* reset phy, enable auto-neg */
  294. phy_config =
  295. (MIICR_REG_PCR |
  296. (PCR_RESET | PCR_AUTONEG |
  297. PCR_AUTONEG_RST));
  298. slic_reg32_write(wphy, phy_config, FLUSH);
  299. } else { /* it's a Cicada PHY */
  300. /* enable and restart auto-neg (don't reset) */
  301. phy_config =
  302. (MIICR_REG_PCR |
  303. (PCR_AUTONEG | PCR_AUTONEG_RST));
  304. slic_reg32_write(wphy, phy_config, FLUSH);
  305. }
  306. }
  307. } else {
  308. /* Forced 10/100 */
  309. if (linkspeed == LINK_10MB)
  310. speed = 0;
  311. else
  312. speed = PCR_SPEED_100;
  313. if (linkduplex == LINK_HALFD)
  314. duplex = 0;
  315. else
  316. duplex = PCR_DUPLEX_FULL;
  317. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  318. /*
  319. * if a Marvell PHY
  320. * disable auto crossover
  321. */
  322. phy_config = (MIICR_REG_16 | (MRV_REG16_XOVEROFF));
  323. slic_reg32_write(wphy, phy_config, FLUSH);
  324. }
  325. /* power down phy to break link (this may not work) */
  326. phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN | speed | duplex));
  327. slic_reg32_write(wphy, phy_config, FLUSH);
  328. /* wait, Marvell says 1 sec, try to get away with 10 ms */
  329. mdelay(10);
  330. if (adapter->subsysid != SLIC_1GB_CICADA_SUBSYS_ID) {
  331. /*
  332. * if a Marvell PHY
  333. * disable auto-neg, set speed,
  334. * soft reset phy, powerup
  335. */
  336. phy_config =
  337. (MIICR_REG_PCR | (PCR_RESET | speed | duplex));
  338. slic_reg32_write(wphy, phy_config, FLUSH);
  339. } else { /* it's a Cicada PHY */
  340. /* disable auto-neg, set speed, powerup */
  341. phy_config = (MIICR_REG_PCR | (speed | duplex));
  342. slic_reg32_write(wphy, phy_config, FLUSH);
  343. }
  344. }
  345. }
  346. static int slic_card_download_gbrcv(struct adapter *adapter)
  347. {
  348. const struct firmware *fw;
  349. const char *file = "";
  350. int ret;
  351. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  352. u32 codeaddr;
  353. u32 instruction;
  354. int index = 0;
  355. u32 rcvucodelen = 0;
  356. switch (adapter->devid) {
  357. case SLIC_2GB_DEVICE_ID:
  358. file = "slicoss/oasisrcvucode.sys";
  359. break;
  360. case SLIC_1GB_DEVICE_ID:
  361. file = "slicoss/gbrcvucode.sys";
  362. break;
  363. default:
  364. return -ENOENT;
  365. }
  366. ret = request_firmware(&fw, file, &adapter->pcidev->dev);
  367. if (ret) {
  368. dev_err(&adapter->pcidev->dev,
  369. "Failed to load firmware %s\n", file);
  370. return ret;
  371. }
  372. rcvucodelen = *(u32 *)(fw->data + index);
  373. index += 4;
  374. switch (adapter->devid) {
  375. case SLIC_2GB_DEVICE_ID:
  376. if (rcvucodelen != OasisRcvUCodeLen) {
  377. release_firmware(fw);
  378. return -EINVAL;
  379. }
  380. break;
  381. case SLIC_1GB_DEVICE_ID:
  382. if (rcvucodelen != GBRcvUCodeLen) {
  383. release_firmware(fw);
  384. return -EINVAL;
  385. }
  386. break;
  387. }
  388. /* start download */
  389. slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_BEGIN, FLUSH);
  390. /* download the rcv sequencer ucode */
  391. for (codeaddr = 0; codeaddr < rcvucodelen; codeaddr++) {
  392. /* write out instruction address */
  393. slic_reg32_write(&slic_regs->slic_rcv_wcs, codeaddr, FLUSH);
  394. instruction = *(u32 *)(fw->data + index);
  395. index += 4;
  396. /* write out the instruction data low addr */
  397. slic_reg32_write(&slic_regs->slic_rcv_wcs, instruction, FLUSH);
  398. instruction = *(u8 *)(fw->data + index);
  399. index++;
  400. /* write out the instruction data high addr */
  401. slic_reg32_write(&slic_regs->slic_rcv_wcs, (u8)instruction,
  402. FLUSH);
  403. }
  404. /* download finished */
  405. release_firmware(fw);
  406. slic_reg32_write(&slic_regs->slic_rcv_wcs, SLIC_RCVWCS_FINISH, FLUSH);
  407. return 0;
  408. }
  409. MODULE_FIRMWARE("slicoss/oasisrcvucode.sys");
  410. MODULE_FIRMWARE("slicoss/gbrcvucode.sys");
  411. static int slic_card_download(struct adapter *adapter)
  412. {
  413. const struct firmware *fw;
  414. const char *file = "";
  415. int ret;
  416. u32 section;
  417. int thissectionsize;
  418. int codeaddr;
  419. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  420. u32 instruction;
  421. u32 baseaddress;
  422. u32 i;
  423. u32 numsects = 0;
  424. u32 sectsize[3];
  425. u32 sectstart[3];
  426. int ucode_start, index = 0;
  427. switch (adapter->devid) {
  428. case SLIC_2GB_DEVICE_ID:
  429. file = "slicoss/oasisdownload.sys";
  430. break;
  431. case SLIC_1GB_DEVICE_ID:
  432. file = "slicoss/gbdownload.sys";
  433. break;
  434. default:
  435. return -ENOENT;
  436. }
  437. ret = request_firmware(&fw, file, &adapter->pcidev->dev);
  438. if (ret) {
  439. dev_err(&adapter->pcidev->dev,
  440. "Failed to load firmware %s\n", file);
  441. return ret;
  442. }
  443. numsects = *(u32 *)(fw->data + index);
  444. index += 4;
  445. for (i = 0; i < numsects; i++) {
  446. sectsize[i] = *(u32 *)(fw->data + index);
  447. index += 4;
  448. }
  449. for (i = 0; i < numsects; i++) {
  450. sectstart[i] = *(u32 *)(fw->data + index);
  451. index += 4;
  452. }
  453. ucode_start = index;
  454. instruction = *(u32 *)(fw->data + index);
  455. index += 4;
  456. for (section = 0; section < numsects; section++) {
  457. baseaddress = sectstart[section];
  458. thissectionsize = sectsize[section] >> 3;
  459. for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
  460. /* Write out instruction address */
  461. slic_reg32_write(&slic_regs->slic_wcs,
  462. baseaddress + codeaddr, FLUSH);
  463. /* Write out instruction to low addr */
  464. slic_reg32_write(&slic_regs->slic_wcs,
  465. instruction, FLUSH);
  466. instruction = *(u32 *)(fw->data + index);
  467. index += 4;
  468. /* Write out instruction to high addr */
  469. slic_reg32_write(&slic_regs->slic_wcs,
  470. instruction, FLUSH);
  471. instruction = *(u32 *)(fw->data + index);
  472. index += 4;
  473. }
  474. }
  475. index = ucode_start;
  476. for (section = 0; section < numsects; section++) {
  477. instruction = *(u32 *)(fw->data + index);
  478. baseaddress = sectstart[section];
  479. if (baseaddress < 0x8000)
  480. continue;
  481. thissectionsize = sectsize[section] >> 3;
  482. for (codeaddr = 0; codeaddr < thissectionsize; codeaddr++) {
  483. /* Write out instruction address */
  484. slic_reg32_write(&slic_regs->slic_wcs,
  485. SLIC_WCS_COMPARE | (baseaddress + codeaddr),
  486. FLUSH);
  487. /* Write out instruction to low addr */
  488. slic_reg32_write(&slic_regs->slic_wcs, instruction,
  489. FLUSH);
  490. instruction = *(u32 *)(fw->data + index);
  491. index += 4;
  492. /* Write out instruction to high addr */
  493. slic_reg32_write(&slic_regs->slic_wcs, instruction,
  494. FLUSH);
  495. instruction = *(u32 *)(fw->data + index);
  496. index += 4;
  497. /* Check SRAM location zero. If it is non-zero. Abort.*/
  498. /*
  499. * failure = readl((u32 __iomem *)&slic_regs->slic_reset);
  500. * if (failure) {
  501. * release_firmware(fw);
  502. * return -EIO;
  503. * }
  504. */
  505. }
  506. }
  507. release_firmware(fw);
  508. /* Everything OK, kick off the card */
  509. mdelay(10);
  510. slic_reg32_write(&slic_regs->slic_wcs, SLIC_WCS_START, FLUSH);
  511. /*
  512. * stall for 20 ms, long enough for ucode to init card
  513. * and reach mainloop
  514. */
  515. mdelay(20);
  516. return 0;
  517. }
  518. MODULE_FIRMWARE("slicoss/oasisdownload.sys");
  519. MODULE_FIRMWARE("slicoss/gbdownload.sys");
  520. static void slic_adapter_set_hwaddr(struct adapter *adapter)
  521. {
  522. struct sliccard *card = adapter->card;
  523. if ((adapter->card) && (card->config_set)) {
  524. memcpy(adapter->macaddr,
  525. card->config.MacInfo[adapter->functionnumber].macaddrA,
  526. sizeof(struct slic_config_mac));
  527. if (is_zero_ether_addr(adapter->currmacaddr))
  528. memcpy(adapter->currmacaddr, adapter->macaddr,
  529. ETH_ALEN);
  530. if (adapter->netdev)
  531. memcpy(adapter->netdev->dev_addr, adapter->currmacaddr,
  532. ETH_ALEN);
  533. }
  534. }
  535. static void slic_intagg_set(struct adapter *adapter, u32 value)
  536. {
  537. slic_reg32_write(&adapter->slic_regs->slic_intagg, value, FLUSH);
  538. adapter->card->loadlevel_current = value;
  539. }
  540. static void slic_soft_reset(struct adapter *adapter)
  541. {
  542. if (adapter->card->state == CARD_UP) {
  543. slic_reg32_write(&adapter->slic_regs->slic_quiesce, 0, FLUSH);
  544. mdelay(1);
  545. }
  546. slic_reg32_write(&adapter->slic_regs->slic_reset, SLIC_RESET_MAGIC,
  547. FLUSH);
  548. mdelay(1);
  549. }
  550. static void slic_mac_address_config(struct adapter *adapter)
  551. {
  552. u32 value;
  553. u32 value2;
  554. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  555. value = ntohl(*(__be32 *)&adapter->currmacaddr[2]);
  556. slic_reg32_write(&slic_regs->slic_wraddral, value, FLUSH);
  557. slic_reg32_write(&slic_regs->slic_wraddrbl, value, FLUSH);
  558. value2 = (u32)((adapter->currmacaddr[0] << 8 |
  559. adapter->currmacaddr[1]) & 0xFFFF);
  560. slic_reg32_write(&slic_regs->slic_wraddrah, value2, FLUSH);
  561. slic_reg32_write(&slic_regs->slic_wraddrbh, value2, FLUSH);
  562. /*
  563. * Write our multicast mask out to the card. This is done
  564. * here in addition to the slic_mcast_addr_set routine
  565. * because ALL_MCAST may have been enabled or disabled
  566. */
  567. slic_mcast_set_mask(adapter);
  568. }
  569. static void slic_mac_config(struct adapter *adapter)
  570. {
  571. u32 value;
  572. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  573. /* Setup GMAC gaps */
  574. if (adapter->linkspeed == LINK_1000MB) {
  575. value = ((GMCR_GAPBB_1000 << GMCR_GAPBB_SHIFT) |
  576. (GMCR_GAPR1_1000 << GMCR_GAPR1_SHIFT) |
  577. (GMCR_GAPR2_1000 << GMCR_GAPR2_SHIFT));
  578. } else {
  579. value = ((GMCR_GAPBB_100 << GMCR_GAPBB_SHIFT) |
  580. (GMCR_GAPR1_100 << GMCR_GAPR1_SHIFT) |
  581. (GMCR_GAPR2_100 << GMCR_GAPR2_SHIFT));
  582. }
  583. /* enable GMII */
  584. if (adapter->linkspeed == LINK_1000MB)
  585. value |= GMCR_GBIT;
  586. /* enable fullduplex */
  587. if ((adapter->linkduplex == LINK_FULLD)
  588. || (adapter->macopts & MAC_LOOPBACK)) {
  589. value |= GMCR_FULLD;
  590. }
  591. /* write mac config */
  592. slic_reg32_write(&slic_regs->slic_wmcfg, value, FLUSH);
  593. /* setup mac addresses */
  594. slic_mac_address_config(adapter);
  595. }
  596. static void slic_config_set(struct adapter *adapter, bool linkchange)
  597. {
  598. u32 value;
  599. u32 RcrReset;
  600. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  601. if (linkchange) {
  602. /* Setup MAC */
  603. slic_mac_config(adapter);
  604. RcrReset = GRCR_RESET;
  605. } else {
  606. slic_mac_address_config(adapter);
  607. RcrReset = 0;
  608. }
  609. if (adapter->linkduplex == LINK_FULLD) {
  610. /* setup xmtcfg */
  611. value = (GXCR_RESET | /* Always reset */
  612. GXCR_XMTEN | /* Enable transmit */
  613. GXCR_PAUSEEN); /* Enable pause */
  614. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  615. /* Setup rcvcfg last */
  616. value = (RcrReset | /* Reset, if linkchange */
  617. GRCR_CTLEN | /* Enable CTL frames */
  618. GRCR_ADDRAEN | /* Address A enable */
  619. GRCR_RCVBAD | /* Rcv bad frames */
  620. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  621. } else {
  622. /* setup xmtcfg */
  623. value = (GXCR_RESET | /* Always reset */
  624. GXCR_XMTEN); /* Enable transmit */
  625. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  626. /* Setup rcvcfg last */
  627. value = (RcrReset | /* Reset, if linkchange */
  628. GRCR_ADDRAEN | /* Address A enable */
  629. GRCR_RCVBAD | /* Rcv bad frames */
  630. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  631. }
  632. if (adapter->state != ADAPT_DOWN) {
  633. /* Only enable receive if we are restarting or running */
  634. value |= GRCR_RCVEN;
  635. }
  636. if (adapter->macopts & MAC_PROMISC)
  637. value |= GRCR_RCVALL;
  638. slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
  639. }
  640. /*
  641. * Turn off RCV and XMT, power down PHY
  642. */
  643. static void slic_config_clear(struct adapter *adapter)
  644. {
  645. u32 value;
  646. u32 phy_config;
  647. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  648. /* Setup xmtcfg */
  649. value = (GXCR_RESET | /* Always reset */
  650. GXCR_PAUSEEN); /* Enable pause */
  651. slic_reg32_write(&slic_regs->slic_wxcfg, value, FLUSH);
  652. value = (GRCR_RESET | /* Always reset */
  653. GRCR_CTLEN | /* Enable CTL frames */
  654. GRCR_ADDRAEN | /* Address A enable */
  655. (GRCR_HASHSIZE << GRCR_HASHSIZE_SHIFT));
  656. slic_reg32_write(&slic_regs->slic_wrcfg, value, FLUSH);
  657. /* power down phy */
  658. phy_config = (MIICR_REG_PCR | (PCR_POWERDOWN));
  659. slic_reg32_write(&slic_regs->slic_wphy, phy_config, FLUSH);
  660. }
  661. static bool slic_mac_filter(struct adapter *adapter,
  662. struct ether_header *ether_frame)
  663. {
  664. struct net_device *netdev = adapter->netdev;
  665. u32 opts = adapter->macopts;
  666. if (opts & MAC_PROMISC)
  667. return true;
  668. if (is_broadcast_ether_addr(ether_frame->ether_dhost)) {
  669. if (opts & MAC_BCAST) {
  670. adapter->rcv_broadcasts++;
  671. return true;
  672. }
  673. return false;
  674. }
  675. if (is_multicast_ether_addr(ether_frame->ether_dhost)) {
  676. if (opts & MAC_ALLMCAST) {
  677. adapter->rcv_multicasts++;
  678. netdev->stats.multicast++;
  679. return true;
  680. }
  681. if (opts & MAC_MCAST) {
  682. struct mcast_address *mcaddr = adapter->mcastaddrs;
  683. while (mcaddr) {
  684. if (ether_addr_equal(mcaddr->address,
  685. ether_frame->ether_dhost)) {
  686. adapter->rcv_multicasts++;
  687. netdev->stats.multicast++;
  688. return true;
  689. }
  690. mcaddr = mcaddr->next;
  691. }
  692. return false;
  693. }
  694. return false;
  695. }
  696. if (opts & MAC_DIRECTED) {
  697. adapter->rcv_unicasts++;
  698. return true;
  699. }
  700. return false;
  701. }
  702. static int slic_mac_set_address(struct net_device *dev, void *ptr)
  703. {
  704. struct adapter *adapter = netdev_priv(dev);
  705. struct sockaddr *addr = ptr;
  706. if (netif_running(dev))
  707. return -EBUSY;
  708. if (!adapter)
  709. return -EBUSY;
  710. if (!is_valid_ether_addr(addr->sa_data))
  711. return -EINVAL;
  712. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  713. memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
  714. slic_config_set(adapter, true);
  715. return 0;
  716. }
  717. static void slic_timer_load_check(ulong cardaddr)
  718. {
  719. struct sliccard *card = (struct sliccard *)cardaddr;
  720. struct adapter *adapter = card->master;
  721. u32 __iomem *intagg;
  722. u32 load = card->events;
  723. u32 level = 0;
  724. if ((adapter) && (adapter->state == ADAPT_UP) &&
  725. (card->state == CARD_UP) && (slic_global.dynamic_intagg)) {
  726. intagg = &adapter->slic_regs->slic_intagg;
  727. if (adapter->devid == SLIC_1GB_DEVICE_ID) {
  728. if (adapter->linkspeed == LINK_1000MB)
  729. level = 100;
  730. else {
  731. if (load > SLIC_LOAD_5)
  732. level = SLIC_INTAGG_5;
  733. else if (load > SLIC_LOAD_4)
  734. level = SLIC_INTAGG_4;
  735. else if (load > SLIC_LOAD_3)
  736. level = SLIC_INTAGG_3;
  737. else if (load > SLIC_LOAD_2)
  738. level = SLIC_INTAGG_2;
  739. else if (load > SLIC_LOAD_1)
  740. level = SLIC_INTAGG_1;
  741. else
  742. level = SLIC_INTAGG_0;
  743. }
  744. if (card->loadlevel_current != level) {
  745. card->loadlevel_current = level;
  746. slic_reg32_write(intagg, level, FLUSH);
  747. }
  748. } else {
  749. if (load > SLIC_LOAD_5)
  750. level = SLIC_INTAGG_5;
  751. else if (load > SLIC_LOAD_4)
  752. level = SLIC_INTAGG_4;
  753. else if (load > SLIC_LOAD_3)
  754. level = SLIC_INTAGG_3;
  755. else if (load > SLIC_LOAD_2)
  756. level = SLIC_INTAGG_2;
  757. else if (load > SLIC_LOAD_1)
  758. level = SLIC_INTAGG_1;
  759. else
  760. level = SLIC_INTAGG_0;
  761. if (card->loadlevel_current != level) {
  762. card->loadlevel_current = level;
  763. slic_reg32_write(intagg, level, FLUSH);
  764. }
  765. }
  766. }
  767. card->events = 0;
  768. card->loadtimer.expires = jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
  769. add_timer(&card->loadtimer);
  770. }
  771. static int slic_upr_queue_request(struct adapter *adapter,
  772. u32 upr_request,
  773. u32 upr_data,
  774. u32 upr_data_h,
  775. u32 upr_buffer, u32 upr_buffer_h)
  776. {
  777. struct slic_upr *upr;
  778. struct slic_upr *uprqueue;
  779. upr = kmalloc(sizeof(struct slic_upr), GFP_ATOMIC);
  780. if (!upr)
  781. return -ENOMEM;
  782. upr->adapter = adapter->port;
  783. upr->upr_request = upr_request;
  784. upr->upr_data = upr_data;
  785. upr->upr_buffer = upr_buffer;
  786. upr->upr_data_h = upr_data_h;
  787. upr->upr_buffer_h = upr_buffer_h;
  788. upr->next = NULL;
  789. if (adapter->upr_list) {
  790. uprqueue = adapter->upr_list;
  791. while (uprqueue->next)
  792. uprqueue = uprqueue->next;
  793. uprqueue->next = upr;
  794. } else {
  795. adapter->upr_list = upr;
  796. }
  797. return 0;
  798. }
  799. static void slic_upr_start(struct adapter *adapter)
  800. {
  801. struct slic_upr *upr;
  802. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  803. /*
  804. * char * ptr1;
  805. * char * ptr2;
  806. * uint cmdoffset;
  807. */
  808. upr = adapter->upr_list;
  809. if (!upr)
  810. return;
  811. if (adapter->upr_busy)
  812. return;
  813. adapter->upr_busy = 1;
  814. switch (upr->upr_request) {
  815. case SLIC_UPR_STATS:
  816. if (upr->upr_data_h == 0) {
  817. slic_reg32_write(&slic_regs->slic_stats, upr->upr_data,
  818. FLUSH);
  819. } else {
  820. slic_reg64_write(adapter, &slic_regs->slic_stats64,
  821. upr->upr_data,
  822. &slic_regs->slic_addr_upper,
  823. upr->upr_data_h, FLUSH);
  824. }
  825. break;
  826. case SLIC_UPR_RLSR:
  827. slic_reg64_write(adapter, &slic_regs->slic_rlsr, upr->upr_data,
  828. &slic_regs->slic_addr_upper, upr->upr_data_h,
  829. FLUSH);
  830. break;
  831. case SLIC_UPR_RCONFIG:
  832. slic_reg64_write(adapter, &slic_regs->slic_rconfig,
  833. upr->upr_data, &slic_regs->slic_addr_upper,
  834. upr->upr_data_h, FLUSH);
  835. break;
  836. case SLIC_UPR_PING:
  837. slic_reg32_write(&slic_regs->slic_ping, 1, FLUSH);
  838. break;
  839. }
  840. }
  841. static int slic_upr_request(struct adapter *adapter,
  842. u32 upr_request,
  843. u32 upr_data,
  844. u32 upr_data_h,
  845. u32 upr_buffer, u32 upr_buffer_h)
  846. {
  847. unsigned long flags;
  848. int rc;
  849. spin_lock_irqsave(&adapter->upr_lock, flags);
  850. rc = slic_upr_queue_request(adapter,
  851. upr_request,
  852. upr_data,
  853. upr_data_h, upr_buffer, upr_buffer_h);
  854. if (rc)
  855. goto err_unlock_irq;
  856. slic_upr_start(adapter);
  857. err_unlock_irq:
  858. spin_unlock_irqrestore(&adapter->upr_lock, flags);
  859. return rc;
  860. }
  861. static void slic_link_upr_complete(struct adapter *adapter, u32 isr)
  862. {
  863. u32 linkstatus = adapter->pshmem->linkstatus;
  864. uint linkup;
  865. unsigned char linkspeed;
  866. unsigned char linkduplex;
  867. if ((isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
  868. struct slic_shmem *pshmem;
  869. pshmem = (struct slic_shmem *)(unsigned long)
  870. adapter->phys_shmem;
  871. #if BITS_PER_LONG == 64
  872. slic_upr_queue_request(adapter,
  873. SLIC_UPR_RLSR,
  874. SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
  875. SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
  876. 0, 0);
  877. #else
  878. slic_upr_queue_request(adapter,
  879. SLIC_UPR_RLSR,
  880. (u32)&pshmem->linkstatus,
  881. SLIC_GET_ADDR_HIGH(pshmem), 0, 0);
  882. #endif
  883. return;
  884. }
  885. if (adapter->state != ADAPT_UP)
  886. return;
  887. linkup = linkstatus & GIG_LINKUP ? LINK_UP : LINK_DOWN;
  888. if (linkstatus & GIG_SPEED_1000)
  889. linkspeed = LINK_1000MB;
  890. else if (linkstatus & GIG_SPEED_100)
  891. linkspeed = LINK_100MB;
  892. else
  893. linkspeed = LINK_10MB;
  894. if (linkstatus & GIG_FULLDUPLEX)
  895. linkduplex = LINK_FULLD;
  896. else
  897. linkduplex = LINK_HALFD;
  898. if ((adapter->linkstate == LINK_DOWN) && (linkup == LINK_DOWN))
  899. return;
  900. /* link up event, but nothing has changed */
  901. if ((adapter->linkstate == LINK_UP) &&
  902. (linkup == LINK_UP) &&
  903. (adapter->linkspeed == linkspeed) &&
  904. (adapter->linkduplex == linkduplex))
  905. return;
  906. /* link has changed at this point */
  907. /* link has gone from up to down */
  908. if (linkup == LINK_DOWN) {
  909. adapter->linkstate = LINK_DOWN;
  910. return;
  911. }
  912. /* link has gone from down to up */
  913. adapter->linkspeed = linkspeed;
  914. adapter->linkduplex = linkduplex;
  915. if (adapter->linkstate != LINK_UP) {
  916. /* setup the mac */
  917. slic_config_set(adapter, true);
  918. adapter->linkstate = LINK_UP;
  919. netif_start_queue(adapter->netdev);
  920. }
  921. }
  922. static void slic_upr_request_complete(struct adapter *adapter, u32 isr)
  923. {
  924. struct sliccard *card = adapter->card;
  925. struct slic_upr *upr;
  926. unsigned long flags;
  927. spin_lock_irqsave(&adapter->upr_lock, flags);
  928. upr = adapter->upr_list;
  929. if (!upr) {
  930. spin_unlock_irqrestore(&adapter->upr_lock, flags);
  931. return;
  932. }
  933. adapter->upr_list = upr->next;
  934. upr->next = NULL;
  935. adapter->upr_busy = 0;
  936. switch (upr->upr_request) {
  937. case SLIC_UPR_STATS:
  938. {
  939. struct slic_stats *slicstats =
  940. (struct slic_stats *)&adapter->pshmem->inicstats;
  941. struct slic_stats *newstats = slicstats;
  942. struct slic_stats *old = &adapter->inicstats_prev;
  943. struct slicnet_stats *stst = &adapter->slic_stats;
  944. if (isr & ISR_UPCERR) {
  945. dev_err(&adapter->netdev->dev,
  946. "SLIC_UPR_STATS command failed isr[%x]\n",
  947. isr);
  948. break;
  949. }
  950. UPDATE_STATS_GB(stst->tcp.xmit_tcp_segs,
  951. newstats->xmit_tcp_segs_gb,
  952. old->xmit_tcp_segs_gb);
  953. UPDATE_STATS_GB(stst->tcp.xmit_tcp_bytes,
  954. newstats->xmit_tcp_bytes_gb,
  955. old->xmit_tcp_bytes_gb);
  956. UPDATE_STATS_GB(stst->tcp.rcv_tcp_segs,
  957. newstats->rcv_tcp_segs_gb,
  958. old->rcv_tcp_segs_gb);
  959. UPDATE_STATS_GB(stst->tcp.rcv_tcp_bytes,
  960. newstats->rcv_tcp_bytes_gb,
  961. old->rcv_tcp_bytes_gb);
  962. UPDATE_STATS_GB(stst->iface.xmt_bytes,
  963. newstats->xmit_bytes_gb,
  964. old->xmit_bytes_gb);
  965. UPDATE_STATS_GB(stst->iface.xmt_ucast,
  966. newstats->xmit_unicasts_gb,
  967. old->xmit_unicasts_gb);
  968. UPDATE_STATS_GB(stst->iface.rcv_bytes,
  969. newstats->rcv_bytes_gb,
  970. old->rcv_bytes_gb);
  971. UPDATE_STATS_GB(stst->iface.rcv_ucast,
  972. newstats->rcv_unicasts_gb,
  973. old->rcv_unicasts_gb);
  974. UPDATE_STATS_GB(stst->iface.xmt_errors,
  975. newstats->xmit_collisions_gb,
  976. old->xmit_collisions_gb);
  977. UPDATE_STATS_GB(stst->iface.xmt_errors,
  978. newstats->xmit_excess_collisions_gb,
  979. old->xmit_excess_collisions_gb);
  980. UPDATE_STATS_GB(stst->iface.xmt_errors,
  981. newstats->xmit_other_error_gb,
  982. old->xmit_other_error_gb);
  983. UPDATE_STATS_GB(stst->iface.rcv_errors,
  984. newstats->rcv_other_error_gb,
  985. old->rcv_other_error_gb);
  986. UPDATE_STATS_GB(stst->iface.rcv_discards,
  987. newstats->rcv_drops_gb,
  988. old->rcv_drops_gb);
  989. if (newstats->rcv_drops_gb > old->rcv_drops_gb) {
  990. adapter->rcv_drops +=
  991. (newstats->rcv_drops_gb -
  992. old->rcv_drops_gb);
  993. }
  994. memcpy(old, newstats, sizeof(struct slic_stats));
  995. break;
  996. }
  997. case SLIC_UPR_RLSR:
  998. slic_link_upr_complete(adapter, isr);
  999. break;
  1000. case SLIC_UPR_RCONFIG:
  1001. break;
  1002. case SLIC_UPR_PING:
  1003. card->pingstatus |= (isr & ISR_PINGDSMASK);
  1004. break;
  1005. }
  1006. kfree(upr);
  1007. slic_upr_start(adapter);
  1008. spin_unlock_irqrestore(&adapter->upr_lock, flags);
  1009. }
  1010. static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
  1011. {
  1012. return slic_upr_request(adapter, SLIC_UPR_RCONFIG, config, config_h,
  1013. 0, 0);
  1014. }
  1015. /*
  1016. * Compute a checksum of the EEPROM according to RFC 1071.
  1017. */
  1018. static u16 slic_eeprom_cksum(void *eeprom, unsigned len)
  1019. {
  1020. u16 *wp = eeprom;
  1021. u32 checksum = 0;
  1022. while (len > 1) {
  1023. checksum += *(wp++);
  1024. len -= 2;
  1025. }
  1026. if (len > 0)
  1027. checksum += *(u8 *)wp;
  1028. while (checksum >> 16)
  1029. checksum = (checksum & 0xFFFF) + ((checksum >> 16) & 0xFFFF);
  1030. return ~checksum;
  1031. }
  1032. static void slic_rspqueue_free(struct adapter *adapter)
  1033. {
  1034. int i;
  1035. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1036. for (i = 0; i < rspq->num_pages; i++) {
  1037. if (rspq->vaddr[i]) {
  1038. pci_free_consistent(adapter->pcidev, PAGE_SIZE,
  1039. rspq->vaddr[i], rspq->paddr[i]);
  1040. }
  1041. rspq->vaddr[i] = NULL;
  1042. rspq->paddr[i] = 0;
  1043. }
  1044. rspq->offset = 0;
  1045. rspq->pageindex = 0;
  1046. rspq->rspbuf = NULL;
  1047. }
  1048. static int slic_rspqueue_init(struct adapter *adapter)
  1049. {
  1050. int i;
  1051. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1052. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  1053. u32 paddrh = 0;
  1054. memset(rspq, 0, sizeof(struct slic_rspqueue));
  1055. rspq->num_pages = SLIC_RSPQ_PAGES_GB;
  1056. for (i = 0; i < rspq->num_pages; i++) {
  1057. rspq->vaddr[i] = pci_zalloc_consistent(adapter->pcidev,
  1058. PAGE_SIZE,
  1059. &rspq->paddr[i]);
  1060. if (!rspq->vaddr[i]) {
  1061. dev_err(&adapter->pcidev->dev,
  1062. "pci_alloc_consistent failed\n");
  1063. slic_rspqueue_free(adapter);
  1064. return -ENOMEM;
  1065. }
  1066. if (paddrh == 0) {
  1067. slic_reg32_write(&slic_regs->slic_rbar,
  1068. (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
  1069. DONT_FLUSH);
  1070. } else {
  1071. slic_reg64_write(adapter, &slic_regs->slic_rbar64,
  1072. (rspq->paddr[i] | SLIC_RSPQ_BUFSINPAGE),
  1073. &slic_regs->slic_addr_upper,
  1074. paddrh, DONT_FLUSH);
  1075. }
  1076. }
  1077. rspq->offset = 0;
  1078. rspq->pageindex = 0;
  1079. rspq->rspbuf = (struct slic_rspbuf *)rspq->vaddr[0];
  1080. return 0;
  1081. }
  1082. static struct slic_rspbuf *slic_rspqueue_getnext(struct adapter *adapter)
  1083. {
  1084. struct slic_rspqueue *rspq = &adapter->rspqueue;
  1085. struct slic_rspbuf *buf;
  1086. if (!(rspq->rspbuf->status))
  1087. return NULL;
  1088. buf = rspq->rspbuf;
  1089. if (++rspq->offset < SLIC_RSPQ_BUFSINPAGE) {
  1090. rspq->rspbuf++;
  1091. } else {
  1092. slic_reg64_write(adapter, &adapter->slic_regs->slic_rbar64,
  1093. (rspq->paddr[rspq->pageindex] | SLIC_RSPQ_BUFSINPAGE),
  1094. &adapter->slic_regs->slic_addr_upper, 0, DONT_FLUSH);
  1095. rspq->pageindex = (rspq->pageindex + 1) % rspq->num_pages;
  1096. rspq->offset = 0;
  1097. rspq->rspbuf = (struct slic_rspbuf *)
  1098. rspq->vaddr[rspq->pageindex];
  1099. }
  1100. return buf;
  1101. }
  1102. static void slic_cmdqmem_free(struct adapter *adapter)
  1103. {
  1104. struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
  1105. int i;
  1106. for (i = 0; i < SLIC_CMDQ_MAXPAGES; i++) {
  1107. if (cmdqmem->pages[i]) {
  1108. pci_free_consistent(adapter->pcidev,
  1109. PAGE_SIZE,
  1110. (void *)cmdqmem->pages[i],
  1111. cmdqmem->dma_pages[i]);
  1112. }
  1113. }
  1114. memset(cmdqmem, 0, sizeof(struct slic_cmdqmem));
  1115. }
  1116. static u32 *slic_cmdqmem_addpage(struct adapter *adapter)
  1117. {
  1118. struct slic_cmdqmem *cmdqmem = &adapter->cmdqmem;
  1119. u32 *pageaddr;
  1120. if (cmdqmem->pagecnt >= SLIC_CMDQ_MAXPAGES)
  1121. return NULL;
  1122. pageaddr = pci_alloc_consistent(adapter->pcidev,
  1123. PAGE_SIZE,
  1124. &cmdqmem->dma_pages[cmdqmem->pagecnt]);
  1125. if (!pageaddr)
  1126. return NULL;
  1127. cmdqmem->pages[cmdqmem->pagecnt] = pageaddr;
  1128. cmdqmem->pagecnt++;
  1129. return pageaddr;
  1130. }
  1131. static void slic_cmdq_free(struct adapter *adapter)
  1132. {
  1133. struct slic_hostcmd *cmd;
  1134. cmd = adapter->cmdq_all.head;
  1135. while (cmd) {
  1136. if (cmd->busy) {
  1137. struct sk_buff *tempskb;
  1138. tempskb = cmd->skb;
  1139. if (tempskb) {
  1140. cmd->skb = NULL;
  1141. dev_kfree_skb_irq(tempskb);
  1142. }
  1143. }
  1144. cmd = cmd->next_all;
  1145. }
  1146. memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
  1147. memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
  1148. memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
  1149. slic_cmdqmem_free(adapter);
  1150. }
  1151. static void slic_cmdq_addcmdpage(struct adapter *adapter, u32 *page)
  1152. {
  1153. struct slic_hostcmd *cmd;
  1154. struct slic_hostcmd *prev;
  1155. struct slic_hostcmd *tail;
  1156. struct slic_cmdqueue *cmdq;
  1157. int cmdcnt;
  1158. void *cmdaddr;
  1159. ulong phys_addr;
  1160. u32 phys_addrl;
  1161. u32 phys_addrh;
  1162. struct slic_handle *pslic_handle;
  1163. unsigned long flags;
  1164. cmdaddr = page;
  1165. cmd = cmdaddr;
  1166. cmdcnt = 0;
  1167. phys_addr = virt_to_bus((void *)page);
  1168. phys_addrl = SLIC_GET_ADDR_LOW(phys_addr);
  1169. phys_addrh = SLIC_GET_ADDR_HIGH(phys_addr);
  1170. prev = NULL;
  1171. tail = cmd;
  1172. while ((cmdcnt < SLIC_CMDQ_CMDSINPAGE) &&
  1173. (adapter->slic_handle_ix < 256)) {
  1174. /* Allocate and initialize a SLIC_HANDLE for this command */
  1175. spin_lock_irqsave(&adapter->handle_lock, flags);
  1176. pslic_handle = adapter->pfree_slic_handles;
  1177. adapter->pfree_slic_handles = pslic_handle->next;
  1178. spin_unlock_irqrestore(&adapter->handle_lock, flags);
  1179. pslic_handle->type = SLIC_HANDLE_CMD;
  1180. pslic_handle->address = (void *)cmd;
  1181. pslic_handle->offset = (ushort)adapter->slic_handle_ix++;
  1182. pslic_handle->other_handle = NULL;
  1183. pslic_handle->next = NULL;
  1184. cmd->pslic_handle = pslic_handle;
  1185. cmd->cmd64.hosthandle = pslic_handle->token.handle_token;
  1186. cmd->busy = false;
  1187. cmd->paddrl = phys_addrl;
  1188. cmd->paddrh = phys_addrh;
  1189. cmd->next_all = prev;
  1190. cmd->next = prev;
  1191. prev = cmd;
  1192. phys_addrl += SLIC_HOSTCMD_SIZE;
  1193. cmdaddr += SLIC_HOSTCMD_SIZE;
  1194. cmd = cmdaddr;
  1195. cmdcnt++;
  1196. }
  1197. cmdq = &adapter->cmdq_all;
  1198. cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
  1199. tail->next_all = cmdq->head;
  1200. cmdq->head = prev;
  1201. cmdq = &adapter->cmdq_free;
  1202. spin_lock_irqsave(&cmdq->lock, flags);
  1203. cmdq->count += cmdcnt; /* SLIC_CMDQ_CMDSINPAGE; mooktodo */
  1204. tail->next = cmdq->head;
  1205. cmdq->head = prev;
  1206. spin_unlock_irqrestore(&cmdq->lock, flags);
  1207. }
  1208. static int slic_cmdq_init(struct adapter *adapter)
  1209. {
  1210. int i;
  1211. u32 *pageaddr;
  1212. memset(&adapter->cmdq_all, 0, sizeof(struct slic_cmdqueue));
  1213. memset(&adapter->cmdq_free, 0, sizeof(struct slic_cmdqueue));
  1214. memset(&adapter->cmdq_done, 0, sizeof(struct slic_cmdqueue));
  1215. spin_lock_init(&adapter->cmdq_all.lock);
  1216. spin_lock_init(&adapter->cmdq_free.lock);
  1217. spin_lock_init(&adapter->cmdq_done.lock);
  1218. memset(&adapter->cmdqmem, 0, sizeof(struct slic_cmdqmem));
  1219. adapter->slic_handle_ix = 1;
  1220. for (i = 0; i < SLIC_CMDQ_INITPAGES; i++) {
  1221. pageaddr = slic_cmdqmem_addpage(adapter);
  1222. if (!pageaddr) {
  1223. slic_cmdq_free(adapter);
  1224. return -ENOMEM;
  1225. }
  1226. slic_cmdq_addcmdpage(adapter, pageaddr);
  1227. }
  1228. adapter->slic_handle_ix = 1;
  1229. return 0;
  1230. }
  1231. static void slic_cmdq_reset(struct adapter *adapter)
  1232. {
  1233. struct slic_hostcmd *hcmd;
  1234. struct sk_buff *skb;
  1235. u32 outstanding;
  1236. unsigned long flags;
  1237. spin_lock_irqsave(&adapter->cmdq_free.lock, flags);
  1238. spin_lock(&adapter->cmdq_done.lock);
  1239. outstanding = adapter->cmdq_all.count - adapter->cmdq_done.count;
  1240. outstanding -= adapter->cmdq_free.count;
  1241. hcmd = adapter->cmdq_all.head;
  1242. while (hcmd) {
  1243. if (hcmd->busy) {
  1244. skb = hcmd->skb;
  1245. hcmd->busy = 0;
  1246. hcmd->skb = NULL;
  1247. dev_kfree_skb_irq(skb);
  1248. }
  1249. hcmd = hcmd->next_all;
  1250. }
  1251. adapter->cmdq_free.count = 0;
  1252. adapter->cmdq_free.head = NULL;
  1253. adapter->cmdq_free.tail = NULL;
  1254. adapter->cmdq_done.count = 0;
  1255. adapter->cmdq_done.head = NULL;
  1256. adapter->cmdq_done.tail = NULL;
  1257. adapter->cmdq_free.head = adapter->cmdq_all.head;
  1258. hcmd = adapter->cmdq_all.head;
  1259. while (hcmd) {
  1260. adapter->cmdq_free.count++;
  1261. hcmd->next = hcmd->next_all;
  1262. hcmd = hcmd->next_all;
  1263. }
  1264. if (adapter->cmdq_free.count != adapter->cmdq_all.count) {
  1265. dev_err(&adapter->netdev->dev,
  1266. "free_count %d != all count %d\n",
  1267. adapter->cmdq_free.count, adapter->cmdq_all.count);
  1268. }
  1269. spin_unlock(&adapter->cmdq_done.lock);
  1270. spin_unlock_irqrestore(&adapter->cmdq_free.lock, flags);
  1271. }
  1272. static void slic_cmdq_getdone(struct adapter *adapter)
  1273. {
  1274. struct slic_cmdqueue *done_cmdq = &adapter->cmdq_done;
  1275. struct slic_cmdqueue *free_cmdq = &adapter->cmdq_free;
  1276. unsigned long flags;
  1277. spin_lock_irqsave(&done_cmdq->lock, flags);
  1278. free_cmdq->head = done_cmdq->head;
  1279. free_cmdq->count = done_cmdq->count;
  1280. done_cmdq->head = NULL;
  1281. done_cmdq->tail = NULL;
  1282. done_cmdq->count = 0;
  1283. spin_unlock_irqrestore(&done_cmdq->lock, flags);
  1284. }
  1285. static struct slic_hostcmd *slic_cmdq_getfree(struct adapter *adapter)
  1286. {
  1287. struct slic_cmdqueue *cmdq = &adapter->cmdq_free;
  1288. struct slic_hostcmd *cmd = NULL;
  1289. unsigned long flags;
  1290. lock_and_retry:
  1291. spin_lock_irqsave(&cmdq->lock, flags);
  1292. retry:
  1293. cmd = cmdq->head;
  1294. if (cmd) {
  1295. cmdq->head = cmd->next;
  1296. cmdq->count--;
  1297. spin_unlock_irqrestore(&cmdq->lock, flags);
  1298. } else {
  1299. slic_cmdq_getdone(adapter);
  1300. cmd = cmdq->head;
  1301. if (cmd) {
  1302. goto retry;
  1303. } else {
  1304. u32 *pageaddr;
  1305. spin_unlock_irqrestore(&cmdq->lock, flags);
  1306. pageaddr = slic_cmdqmem_addpage(adapter);
  1307. if (pageaddr) {
  1308. slic_cmdq_addcmdpage(adapter, pageaddr);
  1309. goto lock_and_retry;
  1310. }
  1311. }
  1312. }
  1313. return cmd;
  1314. }
  1315. static void slic_cmdq_putdone_irq(struct adapter *adapter,
  1316. struct slic_hostcmd *cmd)
  1317. {
  1318. struct slic_cmdqueue *cmdq = &adapter->cmdq_done;
  1319. spin_lock(&cmdq->lock);
  1320. cmd->busy = 0;
  1321. cmd->next = cmdq->head;
  1322. cmdq->head = cmd;
  1323. cmdq->count++;
  1324. if ((adapter->xmitq_full) && (cmdq->count > 10))
  1325. netif_wake_queue(adapter->netdev);
  1326. spin_unlock(&cmdq->lock);
  1327. }
  1328. static int slic_rcvqueue_fill(struct adapter *adapter)
  1329. {
  1330. void *paddr;
  1331. u32 paddrl;
  1332. u32 paddrh;
  1333. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1334. int i = 0;
  1335. struct device *dev = &adapter->netdev->dev;
  1336. while (i < SLIC_RCVQ_FILLENTRIES) {
  1337. struct slic_rcvbuf *rcvbuf;
  1338. struct sk_buff *skb;
  1339. #ifdef KLUDGE_FOR_4GB_BOUNDARY
  1340. retry_rcvqfill:
  1341. #endif
  1342. skb = alloc_skb(SLIC_RCVQ_RCVBUFSIZE, GFP_ATOMIC);
  1343. if (skb) {
  1344. paddr = (void *)(unsigned long)
  1345. pci_map_single(adapter->pcidev,
  1346. skb->data,
  1347. SLIC_RCVQ_RCVBUFSIZE,
  1348. PCI_DMA_FROMDEVICE);
  1349. paddrl = SLIC_GET_ADDR_LOW(paddr);
  1350. paddrh = SLIC_GET_ADDR_HIGH(paddr);
  1351. skb->len = SLIC_RCVBUF_HEADSIZE;
  1352. rcvbuf = (struct slic_rcvbuf *)skb->head;
  1353. rcvbuf->status = 0;
  1354. skb->next = NULL;
  1355. #ifdef KLUDGE_FOR_4GB_BOUNDARY
  1356. if (paddrl == 0) {
  1357. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1358. __func__);
  1359. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1360. dev_err(dev, " skbdata[%p]\n",
  1361. skb->data);
  1362. dev_err(dev, " skblen[%x]\n", skb->len);
  1363. dev_err(dev, " paddr[%p]\n", paddr);
  1364. dev_err(dev, " paddrl[%x]\n", paddrl);
  1365. dev_err(dev, " paddrh[%x]\n", paddrh);
  1366. dev_err(dev, " rcvq->head[%p]\n",
  1367. rcvq->head);
  1368. dev_err(dev, " rcvq->tail[%p]\n",
  1369. rcvq->tail);
  1370. dev_err(dev, " rcvq->count[%x]\n",
  1371. rcvq->count);
  1372. dev_err(dev, "SKIP THIS SKB!!!!!!!!\n");
  1373. goto retry_rcvqfill;
  1374. }
  1375. #else
  1376. if (paddrl == 0) {
  1377. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1378. __func__);
  1379. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1380. dev_err(dev, " skbdata[%p]\n",
  1381. skb->data);
  1382. dev_err(dev, " skblen[%x]\n", skb->len);
  1383. dev_err(dev, " paddr[%p]\n", paddr);
  1384. dev_err(dev, " paddrl[%x]\n", paddrl);
  1385. dev_err(dev, " paddrh[%x]\n", paddrh);
  1386. dev_err(dev, " rcvq->head[%p]\n",
  1387. rcvq->head);
  1388. dev_err(dev, " rcvq->tail[%p]\n",
  1389. rcvq->tail);
  1390. dev_err(dev, " rcvq->count[%x]\n",
  1391. rcvq->count);
  1392. dev_err(dev, "GIVE TO CARD ANYWAY\n");
  1393. }
  1394. #endif
  1395. if (paddrh == 0) {
  1396. slic_reg32_write(&adapter->slic_regs->slic_hbar,
  1397. (u32)paddrl, DONT_FLUSH);
  1398. } else {
  1399. slic_reg64_write(adapter,
  1400. &adapter->slic_regs->slic_hbar64,
  1401. paddrl,
  1402. &adapter->slic_regs->slic_addr_upper,
  1403. paddrh, DONT_FLUSH);
  1404. }
  1405. if (rcvq->head)
  1406. rcvq->tail->next = skb;
  1407. else
  1408. rcvq->head = skb;
  1409. rcvq->tail = skb;
  1410. rcvq->count++;
  1411. i++;
  1412. } else {
  1413. dev_err(&adapter->netdev->dev,
  1414. "slic_rcvqueue_fill could only get [%d] skbuffs\n",
  1415. i);
  1416. break;
  1417. }
  1418. }
  1419. return i;
  1420. }
  1421. static void slic_rcvqueue_free(struct adapter *adapter)
  1422. {
  1423. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1424. struct sk_buff *skb;
  1425. while (rcvq->head) {
  1426. skb = rcvq->head;
  1427. rcvq->head = rcvq->head->next;
  1428. dev_kfree_skb(skb);
  1429. }
  1430. rcvq->tail = NULL;
  1431. rcvq->head = NULL;
  1432. rcvq->count = 0;
  1433. }
  1434. static int slic_rcvqueue_init(struct adapter *adapter)
  1435. {
  1436. int i, count;
  1437. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1438. rcvq->tail = NULL;
  1439. rcvq->head = NULL;
  1440. rcvq->size = SLIC_RCVQ_ENTRIES;
  1441. rcvq->errors = 0;
  1442. rcvq->count = 0;
  1443. i = SLIC_RCVQ_ENTRIES / SLIC_RCVQ_FILLENTRIES;
  1444. count = 0;
  1445. while (i) {
  1446. count += slic_rcvqueue_fill(adapter);
  1447. i--;
  1448. }
  1449. if (rcvq->count < SLIC_RCVQ_MINENTRIES) {
  1450. slic_rcvqueue_free(adapter);
  1451. return -ENOMEM;
  1452. }
  1453. return 0;
  1454. }
  1455. static struct sk_buff *slic_rcvqueue_getnext(struct adapter *adapter)
  1456. {
  1457. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1458. struct sk_buff *skb;
  1459. struct slic_rcvbuf *rcvbuf;
  1460. int count;
  1461. if (rcvq->count) {
  1462. skb = rcvq->head;
  1463. rcvbuf = (struct slic_rcvbuf *)skb->head;
  1464. if (rcvbuf->status & IRHDDR_SVALID) {
  1465. rcvq->head = rcvq->head->next;
  1466. skb->next = NULL;
  1467. rcvq->count--;
  1468. } else {
  1469. skb = NULL;
  1470. }
  1471. } else {
  1472. dev_err(&adapter->netdev->dev,
  1473. "RcvQ Empty!! rcvq[%p] count[%x]\n", rcvq, rcvq->count);
  1474. skb = NULL;
  1475. }
  1476. while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
  1477. count = slic_rcvqueue_fill(adapter);
  1478. if (!count)
  1479. break;
  1480. }
  1481. if (skb)
  1482. rcvq->errors = 0;
  1483. return skb;
  1484. }
  1485. static u32 slic_rcvqueue_reinsert(struct adapter *adapter, struct sk_buff *skb)
  1486. {
  1487. struct slic_rcvqueue *rcvq = &adapter->rcvqueue;
  1488. void *paddr;
  1489. u32 paddrl;
  1490. u32 paddrh;
  1491. struct slic_rcvbuf *rcvbuf = (struct slic_rcvbuf *)skb->head;
  1492. struct device *dev;
  1493. paddr = (void *)(unsigned long)
  1494. pci_map_single(adapter->pcidev, skb->head,
  1495. SLIC_RCVQ_RCVBUFSIZE, PCI_DMA_FROMDEVICE);
  1496. rcvbuf->status = 0;
  1497. skb->next = NULL;
  1498. paddrl = SLIC_GET_ADDR_LOW(paddr);
  1499. paddrh = SLIC_GET_ADDR_HIGH(paddr);
  1500. if (paddrl == 0) {
  1501. dev = &adapter->netdev->dev;
  1502. dev_err(dev, "%s: LOW 32bits PHYSICAL ADDRESS == 0\n",
  1503. __func__);
  1504. dev_err(dev, "skb[%p] PROBLEM\n", skb);
  1505. dev_err(dev, " skbdata[%p]\n", skb->data);
  1506. dev_err(dev, " skblen[%x]\n", skb->len);
  1507. dev_err(dev, " paddr[%p]\n", paddr);
  1508. dev_err(dev, " paddrl[%x]\n", paddrl);
  1509. dev_err(dev, " paddrh[%x]\n", paddrh);
  1510. dev_err(dev, " rcvq->head[%p]\n", rcvq->head);
  1511. dev_err(dev, " rcvq->tail[%p]\n", rcvq->tail);
  1512. dev_err(dev, " rcvq->count[%x]\n", rcvq->count);
  1513. }
  1514. if (paddrh == 0) {
  1515. slic_reg32_write(&adapter->slic_regs->slic_hbar, (u32)paddrl,
  1516. DONT_FLUSH);
  1517. } else {
  1518. slic_reg64_write(adapter, &adapter->slic_regs->slic_hbar64,
  1519. paddrl, &adapter->slic_regs->slic_addr_upper,
  1520. paddrh, DONT_FLUSH);
  1521. }
  1522. if (rcvq->head)
  1523. rcvq->tail->next = skb;
  1524. else
  1525. rcvq->head = skb;
  1526. rcvq->tail = skb;
  1527. rcvq->count++;
  1528. return rcvq->count;
  1529. }
  1530. /*
  1531. * slic_link_event_handler -
  1532. *
  1533. * Initiate a link configuration sequence. The link configuration begins
  1534. * by issuing a READ_LINK_STATUS command to the Utility Processor on the
  1535. * SLIC. Since the command finishes asynchronously, the slic_upr_comlete
  1536. * routine will follow it up witha UP configuration write command, which
  1537. * will also complete asynchronously.
  1538. *
  1539. */
  1540. static int slic_link_event_handler(struct adapter *adapter)
  1541. {
  1542. int status;
  1543. struct slic_shmem *pshmem;
  1544. if (adapter->state != ADAPT_UP) {
  1545. /* Adapter is not operational. Ignore. */
  1546. return -ENODEV;
  1547. }
  1548. pshmem = (struct slic_shmem *)(unsigned long)adapter->phys_shmem;
  1549. #if BITS_PER_LONG == 64
  1550. status = slic_upr_request(adapter,
  1551. SLIC_UPR_RLSR,
  1552. SLIC_GET_ADDR_LOW(&pshmem->linkstatus),
  1553. SLIC_GET_ADDR_HIGH(&pshmem->linkstatus),
  1554. 0, 0);
  1555. #else
  1556. status = slic_upr_request(adapter, SLIC_UPR_RLSR,
  1557. (u32)&pshmem->linkstatus, /* no 4GB wrap guaranteed */
  1558. 0, 0, 0);
  1559. #endif
  1560. return status;
  1561. }
  1562. static void slic_init_cleanup(struct adapter *adapter)
  1563. {
  1564. if (adapter->intrregistered) {
  1565. adapter->intrregistered = 0;
  1566. free_irq(adapter->netdev->irq, adapter->netdev);
  1567. }
  1568. if (adapter->pshmem) {
  1569. pci_free_consistent(adapter->pcidev,
  1570. sizeof(struct slic_shmem),
  1571. adapter->pshmem, adapter->phys_shmem);
  1572. adapter->pshmem = NULL;
  1573. adapter->phys_shmem = (dma_addr_t)(unsigned long)NULL;
  1574. }
  1575. if (adapter->pingtimerset) {
  1576. adapter->pingtimerset = 0;
  1577. del_timer(&adapter->pingtimer);
  1578. }
  1579. slic_rspqueue_free(adapter);
  1580. slic_cmdq_free(adapter);
  1581. slic_rcvqueue_free(adapter);
  1582. }
  1583. /*
  1584. * Allocate a mcast_address structure to hold the multicast address.
  1585. * Link it in.
  1586. */
  1587. static int slic_mcast_add_list(struct adapter *adapter, char *address)
  1588. {
  1589. struct mcast_address *mcaddr, *mlist;
  1590. /* Check to see if it already exists */
  1591. mlist = adapter->mcastaddrs;
  1592. while (mlist) {
  1593. if (ether_addr_equal(mlist->address, address))
  1594. return 0;
  1595. mlist = mlist->next;
  1596. }
  1597. /* Doesn't already exist. Allocate a structure to hold it */
  1598. mcaddr = kmalloc(sizeof(struct mcast_address), GFP_ATOMIC);
  1599. if (mcaddr == NULL)
  1600. return 1;
  1601. ether_addr_copy(mcaddr->address, address);
  1602. mcaddr->next = adapter->mcastaddrs;
  1603. adapter->mcastaddrs = mcaddr;
  1604. return 0;
  1605. }
  1606. static void slic_mcast_set_list(struct net_device *dev)
  1607. {
  1608. struct adapter *adapter = netdev_priv(dev);
  1609. int status = 0;
  1610. char *addresses;
  1611. struct netdev_hw_addr *ha;
  1612. netdev_for_each_mc_addr(ha, dev) {
  1613. addresses = (char *)&ha->addr;
  1614. status = slic_mcast_add_list(adapter, addresses);
  1615. if (status != 0)
  1616. break;
  1617. slic_mcast_set_bit(adapter, addresses);
  1618. }
  1619. if (adapter->devflags_prev != dev->flags) {
  1620. adapter->macopts = MAC_DIRECTED;
  1621. if (dev->flags) {
  1622. if (dev->flags & IFF_BROADCAST)
  1623. adapter->macopts |= MAC_BCAST;
  1624. if (dev->flags & IFF_PROMISC)
  1625. adapter->macopts |= MAC_PROMISC;
  1626. if (dev->flags & IFF_ALLMULTI)
  1627. adapter->macopts |= MAC_ALLMCAST;
  1628. if (dev->flags & IFF_MULTICAST)
  1629. adapter->macopts |= MAC_MCAST;
  1630. }
  1631. adapter->devflags_prev = dev->flags;
  1632. slic_config_set(adapter, true);
  1633. } else {
  1634. if (status == 0)
  1635. slic_mcast_set_mask(adapter);
  1636. }
  1637. }
  1638. #define XMIT_FAIL_LINK_STATE 1
  1639. #define XMIT_FAIL_ZERO_LENGTH 2
  1640. #define XMIT_FAIL_HOSTCMD_FAIL 3
  1641. static void slic_xmit_build_request(struct adapter *adapter,
  1642. struct slic_hostcmd *hcmd, struct sk_buff *skb)
  1643. {
  1644. struct slic_host64_cmd *ihcmd;
  1645. ulong phys_addr;
  1646. ihcmd = &hcmd->cmd64;
  1647. ihcmd->flags = adapter->port << IHFLG_IFSHFT;
  1648. ihcmd->command = IHCMD_XMT_REQ;
  1649. ihcmd->u.slic_buffers.totlen = skb->len;
  1650. phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
  1651. PCI_DMA_TODEVICE);
  1652. ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
  1653. ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
  1654. ihcmd->u.slic_buffers.bufs[0].length = skb->len;
  1655. #if BITS_PER_LONG == 64
  1656. hcmd->cmdsize = (u32)((((u64)&ihcmd->u.slic_buffers.bufs[1] -
  1657. (u64)hcmd) + 31) >> 5);
  1658. #else
  1659. hcmd->cmdsize = (((u32)&ihcmd->u.slic_buffers.bufs[1] -
  1660. (u32)hcmd) + 31) >> 5;
  1661. #endif
  1662. }
  1663. static void slic_xmit_fail(struct adapter *adapter,
  1664. struct sk_buff *skb,
  1665. void *cmd, u32 skbtype, u32 status)
  1666. {
  1667. if (adapter->xmitq_full)
  1668. netif_stop_queue(adapter->netdev);
  1669. if ((cmd == NULL) && (status <= XMIT_FAIL_HOSTCMD_FAIL)) {
  1670. switch (status) {
  1671. case XMIT_FAIL_LINK_STATE:
  1672. dev_err(&adapter->netdev->dev,
  1673. "reject xmit skb[%p: %x] linkstate[%s] adapter[%s:%d] card[%s:%d]\n",
  1674. skb, skb->pkt_type,
  1675. SLIC_LINKSTATE(adapter->linkstate),
  1676. SLIC_ADAPTER_STATE(adapter->state),
  1677. adapter->state,
  1678. SLIC_CARD_STATE(adapter->card->state),
  1679. adapter->card->state);
  1680. break;
  1681. case XMIT_FAIL_ZERO_LENGTH:
  1682. dev_err(&adapter->netdev->dev,
  1683. "xmit_start skb->len == 0 skb[%p] type[%x]\n",
  1684. skb, skb->pkt_type);
  1685. break;
  1686. case XMIT_FAIL_HOSTCMD_FAIL:
  1687. dev_err(&adapter->netdev->dev,
  1688. "xmit_start skb[%p] type[%x] No host commands available\n",
  1689. skb, skb->pkt_type);
  1690. break;
  1691. }
  1692. }
  1693. dev_kfree_skb(skb);
  1694. adapter->netdev->stats.tx_dropped++;
  1695. }
  1696. static void slic_rcv_handle_error(struct adapter *adapter,
  1697. struct slic_rcvbuf *rcvbuf)
  1698. {
  1699. struct slic_hddr_wds *hdr = (struct slic_hddr_wds *)rcvbuf->data;
  1700. struct net_device *netdev = adapter->netdev;
  1701. if (adapter->devid != SLIC_1GB_DEVICE_ID) {
  1702. if (hdr->frame_status14 & VRHSTAT_802OE)
  1703. adapter->if_events.oflow802++;
  1704. if (hdr->frame_status14 & VRHSTAT_TPOFLO)
  1705. adapter->if_events.Tprtoflow++;
  1706. if (hdr->frame_status_b14 & VRHSTATB_802UE)
  1707. adapter->if_events.uflow802++;
  1708. if (hdr->frame_status_b14 & VRHSTATB_RCVE) {
  1709. adapter->if_events.rcvearly++;
  1710. netdev->stats.rx_fifo_errors++;
  1711. }
  1712. if (hdr->frame_status_b14 & VRHSTATB_BUFF) {
  1713. adapter->if_events.Bufov++;
  1714. netdev->stats.rx_over_errors++;
  1715. }
  1716. if (hdr->frame_status_b14 & VRHSTATB_CARRE) {
  1717. adapter->if_events.Carre++;
  1718. netdev->stats.tx_carrier_errors++;
  1719. }
  1720. if (hdr->frame_status_b14 & VRHSTATB_LONGE)
  1721. adapter->if_events.Longe++;
  1722. if (hdr->frame_status_b14 & VRHSTATB_PREA)
  1723. adapter->if_events.Invp++;
  1724. if (hdr->frame_status_b14 & VRHSTATB_CRC) {
  1725. adapter->if_events.Crc++;
  1726. netdev->stats.rx_crc_errors++;
  1727. }
  1728. if (hdr->frame_status_b14 & VRHSTATB_DRBL)
  1729. adapter->if_events.Drbl++;
  1730. if (hdr->frame_status_b14 & VRHSTATB_CODE)
  1731. adapter->if_events.Code++;
  1732. if (hdr->frame_status_b14 & VRHSTATB_TPCSUM)
  1733. adapter->if_events.TpCsum++;
  1734. if (hdr->frame_status_b14 & VRHSTATB_TPHLEN)
  1735. adapter->if_events.TpHlen++;
  1736. if (hdr->frame_status_b14 & VRHSTATB_IPCSUM)
  1737. adapter->if_events.IpCsum++;
  1738. if (hdr->frame_status_b14 & VRHSTATB_IPLERR)
  1739. adapter->if_events.IpLen++;
  1740. if (hdr->frame_status_b14 & VRHSTATB_IPHERR)
  1741. adapter->if_events.IpHlen++;
  1742. } else {
  1743. if (hdr->frame_statusGB & VGBSTAT_XPERR) {
  1744. u32 xerr = hdr->frame_statusGB >> VGBSTAT_XERRSHFT;
  1745. if (xerr == VGBSTAT_XCSERR)
  1746. adapter->if_events.TpCsum++;
  1747. if (xerr == VGBSTAT_XUFLOW)
  1748. adapter->if_events.Tprtoflow++;
  1749. if (xerr == VGBSTAT_XHLEN)
  1750. adapter->if_events.TpHlen++;
  1751. }
  1752. if (hdr->frame_statusGB & VGBSTAT_NETERR) {
  1753. u32 nerr =
  1754. (hdr->
  1755. frame_statusGB >> VGBSTAT_NERRSHFT) &
  1756. VGBSTAT_NERRMSK;
  1757. if (nerr == VGBSTAT_NCSERR)
  1758. adapter->if_events.IpCsum++;
  1759. if (nerr == VGBSTAT_NUFLOW)
  1760. adapter->if_events.IpLen++;
  1761. if (nerr == VGBSTAT_NHLEN)
  1762. adapter->if_events.IpHlen++;
  1763. }
  1764. if (hdr->frame_statusGB & VGBSTAT_LNKERR) {
  1765. u32 lerr = hdr->frame_statusGB & VGBSTAT_LERRMSK;
  1766. if (lerr == VGBSTAT_LDEARLY)
  1767. adapter->if_events.rcvearly++;
  1768. if (lerr == VGBSTAT_LBOFLO)
  1769. adapter->if_events.Bufov++;
  1770. if (lerr == VGBSTAT_LCODERR)
  1771. adapter->if_events.Code++;
  1772. if (lerr == VGBSTAT_LDBLNBL)
  1773. adapter->if_events.Drbl++;
  1774. if (lerr == VGBSTAT_LCRCERR)
  1775. adapter->if_events.Crc++;
  1776. if (lerr == VGBSTAT_LOFLO)
  1777. adapter->if_events.oflow802++;
  1778. if (lerr == VGBSTAT_LUFLO)
  1779. adapter->if_events.uflow802++;
  1780. }
  1781. }
  1782. }
  1783. #define TCP_OFFLOAD_FRAME_PUSHFLAG 0x10000000
  1784. #define M_FAST_PATH 0x0040
  1785. static void slic_rcv_handler(struct adapter *adapter)
  1786. {
  1787. struct net_device *netdev = adapter->netdev;
  1788. struct sk_buff *skb;
  1789. struct slic_rcvbuf *rcvbuf;
  1790. u32 frames = 0;
  1791. while ((skb = slic_rcvqueue_getnext(adapter))) {
  1792. u32 rx_bytes;
  1793. rcvbuf = (struct slic_rcvbuf *)skb->head;
  1794. adapter->card->events++;
  1795. if (rcvbuf->status & IRHDDR_ERR) {
  1796. adapter->rx_errors++;
  1797. slic_rcv_handle_error(adapter, rcvbuf);
  1798. slic_rcvqueue_reinsert(adapter, skb);
  1799. continue;
  1800. }
  1801. if (!slic_mac_filter(adapter, (struct ether_header *)
  1802. rcvbuf->data)) {
  1803. slic_rcvqueue_reinsert(adapter, skb);
  1804. continue;
  1805. }
  1806. skb_pull(skb, SLIC_RCVBUF_HEADSIZE);
  1807. rx_bytes = (rcvbuf->length & IRHDDR_FLEN_MSK);
  1808. skb_put(skb, rx_bytes);
  1809. netdev->stats.rx_packets++;
  1810. netdev->stats.rx_bytes += rx_bytes;
  1811. #if SLIC_OFFLOAD_IP_CHECKSUM
  1812. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1813. #endif
  1814. skb->dev = adapter->netdev;
  1815. skb->protocol = eth_type_trans(skb, skb->dev);
  1816. netif_rx(skb);
  1817. ++frames;
  1818. #if SLIC_INTERRUPT_PROCESS_LIMIT
  1819. if (frames >= SLIC_RCVQ_MAX_PROCESS_ISR) {
  1820. adapter->rcv_interrupt_yields++;
  1821. break;
  1822. }
  1823. #endif
  1824. }
  1825. adapter->max_isr_rcvs = max(adapter->max_isr_rcvs, frames);
  1826. }
  1827. static void slic_xmit_complete(struct adapter *adapter)
  1828. {
  1829. struct slic_hostcmd *hcmd;
  1830. struct slic_rspbuf *rspbuf;
  1831. u32 frames = 0;
  1832. struct slic_handle_word slic_handle_word;
  1833. do {
  1834. rspbuf = slic_rspqueue_getnext(adapter);
  1835. if (!rspbuf)
  1836. break;
  1837. adapter->xmit_completes++;
  1838. adapter->card->events++;
  1839. /*
  1840. * Get the complete host command buffer
  1841. */
  1842. slic_handle_word.handle_token = rspbuf->hosthandle;
  1843. hcmd =
  1844. adapter->slic_handles[slic_handle_word.handle_index].
  1845. address;
  1846. /* hcmd = (struct slic_hostcmd *) rspbuf->hosthandle; */
  1847. if (hcmd->type == SLIC_CMD_DUMB) {
  1848. if (hcmd->skb)
  1849. dev_kfree_skb_irq(hcmd->skb);
  1850. slic_cmdq_putdone_irq(adapter, hcmd);
  1851. }
  1852. rspbuf->status = 0;
  1853. rspbuf->hosthandle = 0;
  1854. frames++;
  1855. } while (1);
  1856. adapter->max_isr_xmits = max(adapter->max_isr_xmits, frames);
  1857. }
  1858. static void slic_interrupt_card_up(u32 isr, struct adapter *adapter,
  1859. struct net_device *dev)
  1860. {
  1861. if (isr & ~ISR_IO) {
  1862. if (isr & ISR_ERR) {
  1863. adapter->error_interrupts++;
  1864. if (isr & ISR_RMISS) {
  1865. int count;
  1866. int pre_count;
  1867. int errors;
  1868. struct slic_rcvqueue *rcvq =
  1869. &adapter->rcvqueue;
  1870. adapter->error_rmiss_interrupts++;
  1871. if (!rcvq->errors)
  1872. rcv_count = rcvq->count;
  1873. pre_count = rcvq->count;
  1874. errors = rcvq->errors;
  1875. while (rcvq->count < SLIC_RCVQ_FILLTHRESH) {
  1876. count = slic_rcvqueue_fill(adapter);
  1877. if (!count)
  1878. break;
  1879. }
  1880. } else if (isr & ISR_XDROP) {
  1881. dev_err(&dev->dev,
  1882. "isr & ISR_ERR [%x] ISR_XDROP\n",
  1883. isr);
  1884. } else {
  1885. dev_err(&dev->dev,
  1886. "isr & ISR_ERR [%x]\n",
  1887. isr);
  1888. }
  1889. }
  1890. if (isr & ISR_LEVENT) {
  1891. adapter->linkevent_interrupts++;
  1892. if (slic_link_event_handler(adapter))
  1893. adapter->linkevent_interrupts--;
  1894. }
  1895. if ((isr & ISR_UPC) || (isr & ISR_UPCERR) ||
  1896. (isr & ISR_UPCBSY)) {
  1897. adapter->upr_interrupts++;
  1898. slic_upr_request_complete(adapter, isr);
  1899. }
  1900. }
  1901. if (isr & ISR_RCV) {
  1902. adapter->rcv_interrupts++;
  1903. slic_rcv_handler(adapter);
  1904. }
  1905. if (isr & ISR_CMD) {
  1906. adapter->xmit_interrupts++;
  1907. slic_xmit_complete(adapter);
  1908. }
  1909. }
  1910. static irqreturn_t slic_interrupt(int irq, void *dev_id)
  1911. {
  1912. struct net_device *dev = dev_id;
  1913. struct adapter *adapter = netdev_priv(dev);
  1914. u32 isr;
  1915. if ((adapter->pshmem) && (adapter->pshmem->isr)) {
  1916. slic_reg32_write(&adapter->slic_regs->slic_icr,
  1917. ICR_INT_MASK, FLUSH);
  1918. isr = adapter->isrcopy = adapter->pshmem->isr;
  1919. adapter->pshmem->isr = 0;
  1920. adapter->num_isrs++;
  1921. switch (adapter->card->state) {
  1922. case CARD_UP:
  1923. slic_interrupt_card_up(isr, adapter, dev);
  1924. break;
  1925. case CARD_DOWN:
  1926. if ((isr & ISR_UPC) ||
  1927. (isr & ISR_UPCERR) || (isr & ISR_UPCBSY)) {
  1928. adapter->upr_interrupts++;
  1929. slic_upr_request_complete(adapter, isr);
  1930. }
  1931. break;
  1932. }
  1933. adapter->isrcopy = 0;
  1934. adapter->all_reg_writes += 2;
  1935. adapter->isr_reg_writes++;
  1936. slic_reg32_write(&adapter->slic_regs->slic_isr, 0, FLUSH);
  1937. } else {
  1938. adapter->false_interrupts++;
  1939. }
  1940. return IRQ_HANDLED;
  1941. }
  1942. #define NORMAL_ETHFRAME 0
  1943. static netdev_tx_t slic_xmit_start(struct sk_buff *skb, struct net_device *dev)
  1944. {
  1945. struct sliccard *card;
  1946. struct adapter *adapter = netdev_priv(dev);
  1947. struct slic_hostcmd *hcmd = NULL;
  1948. u32 status = 0;
  1949. void *offloadcmd = NULL;
  1950. card = adapter->card;
  1951. if ((adapter->linkstate != LINK_UP) ||
  1952. (adapter->state != ADAPT_UP) || (card->state != CARD_UP)) {
  1953. status = XMIT_FAIL_LINK_STATE;
  1954. goto xmit_fail;
  1955. } else if (skb->len == 0) {
  1956. status = XMIT_FAIL_ZERO_LENGTH;
  1957. goto xmit_fail;
  1958. }
  1959. hcmd = slic_cmdq_getfree(adapter);
  1960. if (!hcmd) {
  1961. adapter->xmitq_full = 1;
  1962. status = XMIT_FAIL_HOSTCMD_FAIL;
  1963. goto xmit_fail;
  1964. }
  1965. hcmd->skb = skb;
  1966. hcmd->busy = 1;
  1967. hcmd->type = SLIC_CMD_DUMB;
  1968. slic_xmit_build_request(adapter, hcmd, skb);
  1969. dev->stats.tx_packets++;
  1970. dev->stats.tx_bytes += skb->len;
  1971. #ifdef DEBUG_DUMP
  1972. if (adapter->kill_card) {
  1973. struct slic_host64_cmd ihcmd;
  1974. ihcmd = &hcmd->cmd64;
  1975. ihcmd->flags |= 0x40;
  1976. adapter->kill_card = 0; /* only do this once */
  1977. }
  1978. #endif
  1979. if (hcmd->paddrh == 0) {
  1980. slic_reg32_write(&adapter->slic_regs->slic_cbar,
  1981. (hcmd->paddrl | hcmd->cmdsize), DONT_FLUSH);
  1982. } else {
  1983. slic_reg64_write(adapter, &adapter->slic_regs->slic_cbar64,
  1984. (hcmd->paddrl | hcmd->cmdsize),
  1985. &adapter->slic_regs->slic_addr_upper,
  1986. hcmd->paddrh, DONT_FLUSH);
  1987. }
  1988. xmit_done:
  1989. return NETDEV_TX_OK;
  1990. xmit_fail:
  1991. slic_xmit_fail(adapter, skb, offloadcmd, NORMAL_ETHFRAME, status);
  1992. goto xmit_done;
  1993. }
  1994. static void slic_adapter_freeresources(struct adapter *adapter)
  1995. {
  1996. slic_init_cleanup(adapter);
  1997. adapter->error_interrupts = 0;
  1998. adapter->rcv_interrupts = 0;
  1999. adapter->xmit_interrupts = 0;
  2000. adapter->linkevent_interrupts = 0;
  2001. adapter->upr_interrupts = 0;
  2002. adapter->num_isrs = 0;
  2003. adapter->xmit_completes = 0;
  2004. adapter->rcv_broadcasts = 0;
  2005. adapter->rcv_multicasts = 0;
  2006. adapter->rcv_unicasts = 0;
  2007. }
  2008. static int slic_adapter_allocresources(struct adapter *adapter,
  2009. unsigned long *flags)
  2010. {
  2011. if (!adapter->intrregistered) {
  2012. int retval;
  2013. spin_unlock_irqrestore(&slic_global.driver_lock, *flags);
  2014. retval = request_irq(adapter->netdev->irq,
  2015. &slic_interrupt,
  2016. IRQF_SHARED,
  2017. adapter->netdev->name, adapter->netdev);
  2018. spin_lock_irqsave(&slic_global.driver_lock, *flags);
  2019. if (retval) {
  2020. dev_err(&adapter->netdev->dev,
  2021. "request_irq (%s) FAILED [%x]\n",
  2022. adapter->netdev->name, retval);
  2023. return retval;
  2024. }
  2025. adapter->intrregistered = 1;
  2026. }
  2027. return 0;
  2028. }
  2029. /*
  2030. * slic_if_init
  2031. *
  2032. * Perform initialization of our slic interface.
  2033. *
  2034. */
  2035. static int slic_if_init(struct adapter *adapter, unsigned long *flags)
  2036. {
  2037. struct sliccard *card = adapter->card;
  2038. struct net_device *dev = adapter->netdev;
  2039. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  2040. struct slic_shmem *pshmem;
  2041. int rc;
  2042. /* adapter should be down at this point */
  2043. if (adapter->state != ADAPT_DOWN) {
  2044. dev_err(&dev->dev, "%s: adapter->state != ADAPT_DOWN\n",
  2045. __func__);
  2046. rc = -EIO;
  2047. goto err;
  2048. }
  2049. adapter->devflags_prev = dev->flags;
  2050. adapter->macopts = MAC_DIRECTED;
  2051. if (dev->flags) {
  2052. if (dev->flags & IFF_BROADCAST)
  2053. adapter->macopts |= MAC_BCAST;
  2054. if (dev->flags & IFF_PROMISC)
  2055. adapter->macopts |= MAC_PROMISC;
  2056. if (dev->flags & IFF_ALLMULTI)
  2057. adapter->macopts |= MAC_ALLMCAST;
  2058. if (dev->flags & IFF_MULTICAST)
  2059. adapter->macopts |= MAC_MCAST;
  2060. }
  2061. rc = slic_adapter_allocresources(adapter, flags);
  2062. if (rc) {
  2063. dev_err(&dev->dev, "slic_adapter_allocresources FAILED %x\n",
  2064. rc);
  2065. slic_adapter_freeresources(adapter);
  2066. goto err;
  2067. }
  2068. if (!adapter->queues_initialized) {
  2069. rc = slic_rspqueue_init(adapter);
  2070. if (rc)
  2071. goto err;
  2072. rc = slic_cmdq_init(adapter);
  2073. if (rc)
  2074. goto err;
  2075. rc = slic_rcvqueue_init(adapter);
  2076. if (rc)
  2077. goto err;
  2078. adapter->queues_initialized = 1;
  2079. }
  2080. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2081. mdelay(1);
  2082. if (!adapter->isp_initialized) {
  2083. unsigned long flags;
  2084. pshmem = (struct slic_shmem *)(unsigned long)
  2085. adapter->phys_shmem;
  2086. spin_lock_irqsave(&adapter->bit64reglock, flags);
  2087. #if BITS_PER_LONG == 64
  2088. slic_reg32_write(&slic_regs->slic_addr_upper,
  2089. SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
  2090. slic_reg32_write(&slic_regs->slic_isp,
  2091. SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
  2092. #else
  2093. slic_reg32_write(&slic_regs->slic_addr_upper, 0, DONT_FLUSH);
  2094. slic_reg32_write(&slic_regs->slic_isp, (u32)&pshmem->isr,
  2095. FLUSH);
  2096. #endif
  2097. spin_unlock_irqrestore(&adapter->bit64reglock, flags);
  2098. adapter->isp_initialized = 1;
  2099. }
  2100. adapter->state = ADAPT_UP;
  2101. if (!card->loadtimerset) {
  2102. setup_timer(&card->loadtimer, &slic_timer_load_check,
  2103. (ulong)card);
  2104. card->loadtimer.expires =
  2105. jiffies + (SLIC_LOADTIMER_PERIOD * HZ);
  2106. add_timer(&card->loadtimer);
  2107. card->loadtimerset = 1;
  2108. }
  2109. if (!adapter->pingtimerset) {
  2110. setup_timer(&adapter->pingtimer, &slic_timer_ping, (ulong)dev);
  2111. adapter->pingtimer.expires =
  2112. jiffies + (PING_TIMER_INTERVAL * HZ);
  2113. add_timer(&adapter->pingtimer);
  2114. adapter->pingtimerset = 1;
  2115. adapter->card->pingstatus = ISR_PINGMASK;
  2116. }
  2117. /*
  2118. * clear any pending events, then enable interrupts
  2119. */
  2120. adapter->isrcopy = 0;
  2121. adapter->pshmem->isr = 0;
  2122. slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
  2123. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_ON, FLUSH);
  2124. slic_link_config(adapter, LINK_AUTOSPEED, LINK_AUTOD);
  2125. rc = slic_link_event_handler(adapter);
  2126. if (rc) {
  2127. /* disable interrupts then clear pending events */
  2128. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2129. slic_reg32_write(&slic_regs->slic_isr, 0, FLUSH);
  2130. if (adapter->pingtimerset) {
  2131. del_timer(&adapter->pingtimer);
  2132. adapter->pingtimerset = 0;
  2133. }
  2134. if (card->loadtimerset) {
  2135. del_timer(&card->loadtimer);
  2136. card->loadtimerset = 0;
  2137. }
  2138. adapter->state = ADAPT_DOWN;
  2139. slic_adapter_freeresources(adapter);
  2140. }
  2141. err:
  2142. return rc;
  2143. }
  2144. static int slic_entry_open(struct net_device *dev)
  2145. {
  2146. struct adapter *adapter = netdev_priv(dev);
  2147. struct sliccard *card = adapter->card;
  2148. unsigned long flags;
  2149. int status;
  2150. netif_stop_queue(adapter->netdev);
  2151. spin_lock_irqsave(&slic_global.driver_lock, flags);
  2152. if (!adapter->activated) {
  2153. card->adapters_activated++;
  2154. slic_global.num_slic_ports_active++;
  2155. adapter->activated = 1;
  2156. }
  2157. status = slic_if_init(adapter, &flags);
  2158. if (status != 0) {
  2159. if (adapter->activated) {
  2160. card->adapters_activated--;
  2161. slic_global.num_slic_ports_active--;
  2162. adapter->activated = 0;
  2163. }
  2164. goto spin_unlock;
  2165. }
  2166. if (!card->master)
  2167. card->master = adapter;
  2168. spin_unlock:
  2169. spin_unlock_irqrestore(&slic_global.driver_lock, flags);
  2170. return status;
  2171. }
  2172. static void slic_card_cleanup(struct sliccard *card)
  2173. {
  2174. if (card->loadtimerset) {
  2175. card->loadtimerset = 0;
  2176. del_timer_sync(&card->loadtimer);
  2177. }
  2178. kfree(card);
  2179. }
  2180. static void slic_entry_remove(struct pci_dev *pcidev)
  2181. {
  2182. struct net_device *dev = pci_get_drvdata(pcidev);
  2183. struct adapter *adapter = netdev_priv(dev);
  2184. struct sliccard *card;
  2185. struct mcast_address *mcaddr, *mlist;
  2186. unregister_netdev(dev);
  2187. slic_adapter_freeresources(adapter);
  2188. slic_unmap_mmio_space(adapter);
  2189. /* free multicast addresses */
  2190. mlist = adapter->mcastaddrs;
  2191. while (mlist) {
  2192. mcaddr = mlist;
  2193. mlist = mlist->next;
  2194. kfree(mcaddr);
  2195. }
  2196. card = adapter->card;
  2197. card->adapters_allocated--;
  2198. adapter->allocated = 0;
  2199. if (!card->adapters_allocated) {
  2200. struct sliccard *curr_card = slic_global.slic_card;
  2201. if (curr_card == card) {
  2202. slic_global.slic_card = card->next;
  2203. } else {
  2204. while (curr_card->next != card)
  2205. curr_card = curr_card->next;
  2206. curr_card->next = card->next;
  2207. }
  2208. slic_global.num_slic_cards--;
  2209. slic_card_cleanup(card);
  2210. }
  2211. free_netdev(dev);
  2212. pci_release_regions(pcidev);
  2213. pci_disable_device(pcidev);
  2214. }
  2215. static int slic_entry_halt(struct net_device *dev)
  2216. {
  2217. struct adapter *adapter = netdev_priv(dev);
  2218. struct sliccard *card = adapter->card;
  2219. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  2220. unsigned long flags;
  2221. spin_lock_irqsave(&slic_global.driver_lock, flags);
  2222. netif_stop_queue(adapter->netdev);
  2223. adapter->state = ADAPT_DOWN;
  2224. adapter->linkstate = LINK_DOWN;
  2225. adapter->upr_list = NULL;
  2226. adapter->upr_busy = 0;
  2227. adapter->devflags_prev = 0;
  2228. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2229. adapter->all_reg_writes++;
  2230. adapter->icr_reg_writes++;
  2231. slic_config_clear(adapter);
  2232. if (adapter->activated) {
  2233. card->adapters_activated--;
  2234. slic_global.num_slic_ports_active--;
  2235. adapter->activated = 0;
  2236. }
  2237. #ifdef AUTOMATIC_RESET
  2238. slic_reg32_write(&slic_regs->slic_reset_iface, 0, FLUSH);
  2239. #endif
  2240. /*
  2241. * Reset the adapter's cmd queues
  2242. */
  2243. slic_cmdq_reset(adapter);
  2244. #ifdef AUTOMATIC_RESET
  2245. if (!card->adapters_activated)
  2246. slic_card_init(card, adapter);
  2247. #endif
  2248. spin_unlock_irqrestore(&slic_global.driver_lock, flags);
  2249. return 0;
  2250. }
  2251. static struct net_device_stats *slic_get_stats(struct net_device *dev)
  2252. {
  2253. struct adapter *adapter = netdev_priv(dev);
  2254. dev->stats.collisions = adapter->slic_stats.iface.xmit_collisions;
  2255. dev->stats.rx_errors = adapter->slic_stats.iface.rcv_errors;
  2256. dev->stats.tx_errors = adapter->slic_stats.iface.xmt_errors;
  2257. dev->stats.rx_missed_errors = adapter->slic_stats.iface.rcv_discards;
  2258. dev->stats.tx_heartbeat_errors = 0;
  2259. dev->stats.tx_aborted_errors = 0;
  2260. dev->stats.tx_window_errors = 0;
  2261. dev->stats.tx_fifo_errors = 0;
  2262. dev->stats.rx_frame_errors = 0;
  2263. dev->stats.rx_length_errors = 0;
  2264. return &dev->stats;
  2265. }
  2266. static int slic_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  2267. {
  2268. struct adapter *adapter = netdev_priv(dev);
  2269. struct ethtool_cmd edata;
  2270. struct ethtool_cmd ecmd;
  2271. u32 data[7];
  2272. u32 intagg;
  2273. switch (cmd) {
  2274. case SIOCSLICSETINTAGG:
  2275. if (copy_from_user(data, rq->ifr_data, 28))
  2276. return -EFAULT;
  2277. intagg = data[0];
  2278. dev_err(&dev->dev, "set interrupt aggregation to %d\n",
  2279. intagg);
  2280. slic_intagg_set(adapter, intagg);
  2281. return 0;
  2282. case SIOCETHTOOL:
  2283. if (copy_from_user(&ecmd, rq->ifr_data, sizeof(ecmd)))
  2284. return -EFAULT;
  2285. if (ecmd.cmd == ETHTOOL_GSET) {
  2286. memset(&edata, 0, sizeof(edata));
  2287. edata.supported = (SUPPORTED_10baseT_Half |
  2288. SUPPORTED_10baseT_Full |
  2289. SUPPORTED_100baseT_Half |
  2290. SUPPORTED_100baseT_Full |
  2291. SUPPORTED_Autoneg | SUPPORTED_MII);
  2292. edata.port = PORT_MII;
  2293. edata.transceiver = XCVR_INTERNAL;
  2294. edata.phy_address = 0;
  2295. if (adapter->linkspeed == LINK_100MB)
  2296. edata.speed = SPEED_100;
  2297. else if (adapter->linkspeed == LINK_10MB)
  2298. edata.speed = SPEED_10;
  2299. else
  2300. edata.speed = 0;
  2301. if (adapter->linkduplex == LINK_FULLD)
  2302. edata.duplex = DUPLEX_FULL;
  2303. else
  2304. edata.duplex = DUPLEX_HALF;
  2305. edata.autoneg = AUTONEG_ENABLE;
  2306. edata.maxtxpkt = 1;
  2307. edata.maxrxpkt = 1;
  2308. if (copy_to_user(rq->ifr_data, &edata, sizeof(edata)))
  2309. return -EFAULT;
  2310. } else if (ecmd.cmd == ETHTOOL_SSET) {
  2311. if (!capable(CAP_NET_ADMIN))
  2312. return -EPERM;
  2313. if (adapter->linkspeed == LINK_100MB)
  2314. edata.speed = SPEED_100;
  2315. else if (adapter->linkspeed == LINK_10MB)
  2316. edata.speed = SPEED_10;
  2317. else
  2318. edata.speed = 0;
  2319. if (adapter->linkduplex == LINK_FULLD)
  2320. edata.duplex = DUPLEX_FULL;
  2321. else
  2322. edata.duplex = DUPLEX_HALF;
  2323. edata.autoneg = AUTONEG_ENABLE;
  2324. edata.maxtxpkt = 1;
  2325. edata.maxrxpkt = 1;
  2326. if ((ecmd.speed != edata.speed) ||
  2327. (ecmd.duplex != edata.duplex)) {
  2328. u32 speed;
  2329. u32 duplex;
  2330. if (ecmd.speed == SPEED_10)
  2331. speed = 0;
  2332. else
  2333. speed = PCR_SPEED_100;
  2334. if (ecmd.duplex == DUPLEX_FULL)
  2335. duplex = PCR_DUPLEX_FULL;
  2336. else
  2337. duplex = 0;
  2338. slic_link_config(adapter, speed, duplex);
  2339. if (slic_link_event_handler(adapter))
  2340. return -EFAULT;
  2341. }
  2342. }
  2343. return 0;
  2344. default:
  2345. return -EOPNOTSUPP;
  2346. }
  2347. }
  2348. static void slic_config_pci(struct pci_dev *pcidev)
  2349. {
  2350. u16 pci_command;
  2351. u16 new_command;
  2352. pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
  2353. new_command = pci_command | PCI_COMMAND_MASTER
  2354. | PCI_COMMAND_MEMORY
  2355. | PCI_COMMAND_INVALIDATE
  2356. | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
  2357. if (pci_command != new_command)
  2358. pci_write_config_word(pcidev, PCI_COMMAND, new_command);
  2359. }
  2360. static int slic_card_init(struct sliccard *card, struct adapter *adapter)
  2361. {
  2362. __iomem struct slic_regs *slic_regs = adapter->slic_regs;
  2363. struct slic_eeprom *peeprom;
  2364. struct oslic_eeprom *pOeeprom;
  2365. dma_addr_t phys_config;
  2366. u32 phys_configh;
  2367. u32 phys_configl;
  2368. u32 i = 0;
  2369. struct slic_shmem *pshmem;
  2370. int status;
  2371. uint macaddrs = card->card_size;
  2372. ushort eecodesize;
  2373. ushort dramsize;
  2374. ushort ee_chksum;
  2375. ushort calc_chksum;
  2376. struct slic_config_mac *pmac;
  2377. unsigned char fruformat;
  2378. unsigned char oemfruformat;
  2379. struct atk_fru *patkfru;
  2380. union oemfru *poemfru;
  2381. unsigned long flags;
  2382. /* Reset everything except PCI configuration space */
  2383. slic_soft_reset(adapter);
  2384. /* Download the microcode */
  2385. status = slic_card_download(adapter);
  2386. if (status)
  2387. return status;
  2388. if (!card->config_set) {
  2389. peeprom = pci_alloc_consistent(adapter->pcidev,
  2390. sizeof(struct slic_eeprom),
  2391. &phys_config);
  2392. phys_configl = SLIC_GET_ADDR_LOW(phys_config);
  2393. phys_configh = SLIC_GET_ADDR_HIGH(phys_config);
  2394. if (!peeprom) {
  2395. dev_err(&adapter->pcidev->dev,
  2396. "Failed to allocate DMA memory for EEPROM.\n");
  2397. return -ENOMEM;
  2398. }
  2399. memset(peeprom, 0, sizeof(struct slic_eeprom));
  2400. slic_reg32_write(&slic_regs->slic_icr, ICR_INT_OFF, FLUSH);
  2401. mdelay(1);
  2402. pshmem = (struct slic_shmem *)(unsigned long)
  2403. adapter->phys_shmem;
  2404. spin_lock_irqsave(&adapter->bit64reglock, flags);
  2405. slic_reg32_write(&slic_regs->slic_addr_upper,
  2406. SLIC_GET_ADDR_HIGH(&pshmem->isr), DONT_FLUSH);
  2407. slic_reg32_write(&slic_regs->slic_isp,
  2408. SLIC_GET_ADDR_LOW(&pshmem->isr), FLUSH);
  2409. spin_unlock_irqrestore(&adapter->bit64reglock, flags);
  2410. status = slic_config_get(adapter, phys_configl, phys_configh);
  2411. if (status) {
  2412. dev_err(&adapter->pcidev->dev,
  2413. "Failed to fetch config data from device.\n");
  2414. goto card_init_err;
  2415. }
  2416. for (;;) {
  2417. if (adapter->pshmem->isr) {
  2418. if (adapter->pshmem->isr & ISR_UPC) {
  2419. adapter->pshmem->isr = 0;
  2420. slic_reg64_write(adapter,
  2421. &slic_regs->slic_isp, 0,
  2422. &slic_regs->slic_addr_upper,
  2423. 0, FLUSH);
  2424. slic_reg32_write(&slic_regs->slic_isr,
  2425. 0, FLUSH);
  2426. slic_upr_request_complete(adapter, 0);
  2427. break;
  2428. }
  2429. adapter->pshmem->isr = 0;
  2430. slic_reg32_write(&slic_regs->slic_isr,
  2431. 0, FLUSH);
  2432. } else {
  2433. mdelay(1);
  2434. i++;
  2435. if (i > 5000) {
  2436. dev_err(&adapter->pcidev->dev,
  2437. "Fetch of config data timed out.\n");
  2438. slic_reg64_write(adapter,
  2439. &slic_regs->slic_isp, 0,
  2440. &slic_regs->slic_addr_upper,
  2441. 0, FLUSH);
  2442. status = -EINVAL;
  2443. goto card_init_err;
  2444. }
  2445. }
  2446. }
  2447. switch (adapter->devid) {
  2448. /* Oasis card */
  2449. case SLIC_2GB_DEVICE_ID:
  2450. /* extract EEPROM data and pointers to EEPROM data */
  2451. pOeeprom = (struct oslic_eeprom *)peeprom;
  2452. eecodesize = pOeeprom->EecodeSize;
  2453. dramsize = pOeeprom->DramSize;
  2454. pmac = pOeeprom->MacInfo;
  2455. fruformat = pOeeprom->FruFormat;
  2456. patkfru = &pOeeprom->AtkFru;
  2457. oemfruformat = pOeeprom->OemFruFormat;
  2458. poemfru = &pOeeprom->OemFru;
  2459. macaddrs = 2;
  2460. /*
  2461. * Minor kludge for Oasis card
  2462. * get 2 MAC addresses from the
  2463. * EEPROM to ensure that function 1
  2464. * gets the Port 1 MAC address
  2465. */
  2466. break;
  2467. default:
  2468. /* extract EEPROM data and pointers to EEPROM data */
  2469. eecodesize = peeprom->EecodeSize;
  2470. dramsize = peeprom->DramSize;
  2471. pmac = peeprom->u2.mac.MacInfo;
  2472. fruformat = peeprom->FruFormat;
  2473. patkfru = &peeprom->AtkFru;
  2474. oemfruformat = peeprom->OemFruFormat;
  2475. poemfru = &peeprom->OemFru;
  2476. break;
  2477. }
  2478. card->config.EepromValid = false;
  2479. /* see if the EEPROM is valid by checking it's checksum */
  2480. if ((eecodesize <= MAX_EECODE_SIZE) &&
  2481. (eecodesize >= MIN_EECODE_SIZE)) {
  2482. ee_chksum =
  2483. *(u16 *)((char *)peeprom + (eecodesize - 2));
  2484. /*
  2485. * calculate the EEPROM checksum
  2486. */
  2487. calc_chksum = slic_eeprom_cksum(peeprom,
  2488. eecodesize - 2);
  2489. /*
  2490. * if the ucdoe chksum flag bit worked,
  2491. * we wouldn't need this
  2492. */
  2493. if (ee_chksum == calc_chksum)
  2494. card->config.EepromValid = true;
  2495. }
  2496. /* copy in the DRAM size */
  2497. card->config.DramSize = dramsize;
  2498. /* copy in the MAC address(es) */
  2499. for (i = 0; i < macaddrs; i++) {
  2500. memcpy(&card->config.MacInfo[i],
  2501. &pmac[i], sizeof(struct slic_config_mac));
  2502. }
  2503. /* copy the Alacritech FRU information */
  2504. card->config.FruFormat = fruformat;
  2505. memcpy(&card->config.AtkFru, patkfru,
  2506. sizeof(struct atk_fru));
  2507. pci_free_consistent(adapter->pcidev,
  2508. sizeof(struct slic_eeprom),
  2509. peeprom, phys_config);
  2510. if (!card->config.EepromValid) {
  2511. slic_reg64_write(adapter, &slic_regs->slic_isp, 0,
  2512. &slic_regs->slic_addr_upper,
  2513. 0, FLUSH);
  2514. dev_err(&adapter->pcidev->dev, "EEPROM invalid.\n");
  2515. return -EINVAL;
  2516. }
  2517. card->config_set = 1;
  2518. }
  2519. status = slic_card_download_gbrcv(adapter);
  2520. if (status)
  2521. return status;
  2522. if (slic_global.dynamic_intagg)
  2523. slic_intagg_set(adapter, 0);
  2524. else
  2525. slic_intagg_set(adapter, intagg_delay);
  2526. /*
  2527. * Initialize ping status to "ok"
  2528. */
  2529. card->pingstatus = ISR_PINGMASK;
  2530. /*
  2531. * Lastly, mark our card state as up and return success
  2532. */
  2533. card->state = CARD_UP;
  2534. card->reset_in_progress = 0;
  2535. return 0;
  2536. card_init_err:
  2537. pci_free_consistent(adapter->pcidev, sizeof(struct slic_eeprom),
  2538. peeprom, phys_config);
  2539. return status;
  2540. }
  2541. static void slic_init_driver(void)
  2542. {
  2543. if (slic_first_init) {
  2544. slic_first_init = 0;
  2545. spin_lock_init(&slic_global.driver_lock);
  2546. }
  2547. }
  2548. static void slic_init_adapter(struct net_device *netdev,
  2549. struct pci_dev *pcidev,
  2550. const struct pci_device_id *pci_tbl_entry,
  2551. void __iomem *memaddr, int chip_idx)
  2552. {
  2553. ushort index;
  2554. struct slic_handle *pslic_handle;
  2555. struct adapter *adapter = netdev_priv(netdev);
  2556. /* adapter->pcidev = pcidev;*/
  2557. adapter->vendid = pci_tbl_entry->vendor;
  2558. adapter->devid = pci_tbl_entry->device;
  2559. adapter->subsysid = pci_tbl_entry->subdevice;
  2560. adapter->busnumber = pcidev->bus->number;
  2561. adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
  2562. adapter->functionnumber = (pcidev->devfn & 0x7);
  2563. adapter->slic_regs = memaddr;
  2564. adapter->irq = pcidev->irq;
  2565. /* adapter->netdev = netdev;*/
  2566. adapter->chipid = chip_idx;
  2567. adapter->port = 0; /*adapter->functionnumber;*/
  2568. adapter->cardindex = adapter->port;
  2569. spin_lock_init(&adapter->upr_lock);
  2570. spin_lock_init(&adapter->bit64reglock);
  2571. spin_lock_init(&adapter->adapter_lock);
  2572. spin_lock_init(&adapter->reset_lock);
  2573. spin_lock_init(&adapter->handle_lock);
  2574. adapter->card_size = 1;
  2575. /*
  2576. * Initialize slic_handle array
  2577. */
  2578. /*
  2579. * Start with 1. 0 is an invalid host handle.
  2580. */
  2581. for (index = 1, pslic_handle = &adapter->slic_handles[1];
  2582. index < SLIC_CMDQ_MAXCMDS; index++, pslic_handle++) {
  2583. pslic_handle->token.handle_index = index;
  2584. pslic_handle->type = SLIC_HANDLE_FREE;
  2585. pslic_handle->next = adapter->pfree_slic_handles;
  2586. adapter->pfree_slic_handles = pslic_handle;
  2587. }
  2588. adapter->pshmem = (struct slic_shmem *)
  2589. pci_alloc_consistent(adapter->pcidev,
  2590. sizeof(struct slic_shmem),
  2591. &adapter->
  2592. phys_shmem);
  2593. if (adapter->pshmem)
  2594. memset(adapter->pshmem, 0, sizeof(struct slic_shmem));
  2595. }
  2596. static const struct net_device_ops slic_netdev_ops = {
  2597. .ndo_open = slic_entry_open,
  2598. .ndo_stop = slic_entry_halt,
  2599. .ndo_start_xmit = slic_xmit_start,
  2600. .ndo_do_ioctl = slic_ioctl,
  2601. .ndo_set_mac_address = slic_mac_set_address,
  2602. .ndo_get_stats = slic_get_stats,
  2603. .ndo_set_rx_mode = slic_mcast_set_list,
  2604. .ndo_validate_addr = eth_validate_addr,
  2605. .ndo_change_mtu = eth_change_mtu,
  2606. };
  2607. static u32 slic_card_locate(struct adapter *adapter)
  2608. {
  2609. struct sliccard *card = slic_global.slic_card;
  2610. struct physcard *physcard = slic_global.phys_card;
  2611. ushort card_hostid;
  2612. u16 __iomem *hostid_reg;
  2613. uint i;
  2614. uint rdhostid_offset = 0;
  2615. switch (adapter->devid) {
  2616. case SLIC_2GB_DEVICE_ID:
  2617. rdhostid_offset = SLIC_RDHOSTID_2GB;
  2618. break;
  2619. case SLIC_1GB_DEVICE_ID:
  2620. rdhostid_offset = SLIC_RDHOSTID_1GB;
  2621. break;
  2622. default:
  2623. return -ENODEV;
  2624. }
  2625. hostid_reg =
  2626. (u16 __iomem *)(((u8 __iomem *)(adapter->slic_regs)) +
  2627. rdhostid_offset);
  2628. /* read the 16 bit hostid from SRAM */
  2629. card_hostid = (ushort)readw(hostid_reg);
  2630. /* Initialize a new card structure if need be */
  2631. if (card_hostid == SLIC_HOSTID_DEFAULT) {
  2632. card = kzalloc(sizeof(struct sliccard), GFP_KERNEL);
  2633. if (card == NULL)
  2634. return -ENOMEM;
  2635. card->next = slic_global.slic_card;
  2636. slic_global.slic_card = card;
  2637. card->busnumber = adapter->busnumber;
  2638. card->slotnumber = adapter->slotnumber;
  2639. /* Find an available cardnum */
  2640. for (i = 0; i < SLIC_MAX_CARDS; i++) {
  2641. if (slic_global.cardnuminuse[i] == 0) {
  2642. slic_global.cardnuminuse[i] = 1;
  2643. card->cardnum = i;
  2644. break;
  2645. }
  2646. }
  2647. slic_global.num_slic_cards++;
  2648. } else {
  2649. /* Card exists, find the card this adapter belongs to */
  2650. while (card) {
  2651. if (card->cardnum == card_hostid)
  2652. break;
  2653. card = card->next;
  2654. }
  2655. }
  2656. if (!card)
  2657. return -ENXIO;
  2658. /* Put the adapter in the card's adapter list */
  2659. if (!card->adapter[adapter->port]) {
  2660. card->adapter[adapter->port] = adapter;
  2661. adapter->card = card;
  2662. }
  2663. card->card_size = 1; /* one port per *logical* card */
  2664. while (physcard) {
  2665. for (i = 0; i < SLIC_MAX_PORTS; i++) {
  2666. if (physcard->adapter[i])
  2667. break;
  2668. }
  2669. if (i == SLIC_MAX_PORTS)
  2670. break;
  2671. if (physcard->adapter[i]->slotnumber == adapter->slotnumber)
  2672. break;
  2673. physcard = physcard->next;
  2674. }
  2675. if (!physcard) {
  2676. /* no structure allocated for this physical card yet */
  2677. physcard = kzalloc(sizeof(struct physcard), GFP_ATOMIC);
  2678. if (!physcard) {
  2679. if (card_hostid == SLIC_HOSTID_DEFAULT)
  2680. kfree(card);
  2681. return -ENOMEM;
  2682. }
  2683. physcard->next = slic_global.phys_card;
  2684. slic_global.phys_card = physcard;
  2685. physcard->adapters_allocd = 1;
  2686. } else {
  2687. physcard->adapters_allocd++;
  2688. }
  2689. /* Note - this is ZERO relative */
  2690. adapter->physport = physcard->adapters_allocd - 1;
  2691. physcard->adapter[adapter->physport] = adapter;
  2692. adapter->physcard = physcard;
  2693. return 0;
  2694. }
  2695. static int slic_entry_probe(struct pci_dev *pcidev,
  2696. const struct pci_device_id *pci_tbl_entry)
  2697. {
  2698. static int cards_found;
  2699. static int did_version;
  2700. int err = -ENODEV;
  2701. struct net_device *netdev;
  2702. struct adapter *adapter;
  2703. void __iomem *memmapped_ioaddr = NULL;
  2704. ulong mmio_start = 0;
  2705. ulong mmio_len = 0;
  2706. struct sliccard *card = NULL;
  2707. int pci_using_dac = 0;
  2708. slic_global.dynamic_intagg = dynamic_intagg;
  2709. err = pci_enable_device(pcidev);
  2710. if (err)
  2711. return err;
  2712. if (did_version++ == 0) {
  2713. dev_info(&pcidev->dev, "%s\n", slic_banner);
  2714. dev_info(&pcidev->dev, "%s\n", slic_proc_version);
  2715. }
  2716. if (!pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
  2717. pci_using_dac = 1;
  2718. err = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
  2719. if (err) {
  2720. dev_err(&pcidev->dev, "unable to obtain 64-bit DMA for consistent allocations\n");
  2721. goto err_out_disable_pci;
  2722. }
  2723. } else {
  2724. err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
  2725. if (err) {
  2726. dev_err(&pcidev->dev, "no usable DMA configuration\n");
  2727. goto err_out_disable_pci;
  2728. }
  2729. pci_using_dac = 0;
  2730. pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
  2731. }
  2732. err = pci_request_regions(pcidev, DRV_NAME);
  2733. if (err) {
  2734. dev_err(&pcidev->dev, "can't obtain PCI resources\n");
  2735. goto err_out_disable_pci;
  2736. }
  2737. pci_set_master(pcidev);
  2738. netdev = alloc_etherdev(sizeof(struct adapter));
  2739. if (!netdev) {
  2740. err = -ENOMEM;
  2741. goto err_out_exit_slic_probe;
  2742. }
  2743. SET_NETDEV_DEV(netdev, &pcidev->dev);
  2744. pci_set_drvdata(pcidev, netdev);
  2745. adapter = netdev_priv(netdev);
  2746. adapter->netdev = netdev;
  2747. adapter->pcidev = pcidev;
  2748. if (pci_using_dac)
  2749. netdev->features |= NETIF_F_HIGHDMA;
  2750. mmio_start = pci_resource_start(pcidev, 0);
  2751. mmio_len = pci_resource_len(pcidev, 0);
  2752. /* memmapped_ioaddr = (u32)ioremap_nocache(mmio_start, mmio_len);*/
  2753. memmapped_ioaddr = ioremap(mmio_start, mmio_len);
  2754. if (!memmapped_ioaddr) {
  2755. dev_err(&pcidev->dev, "cannot remap MMIO region %lx @ %lx\n",
  2756. mmio_len, mmio_start);
  2757. err = -ENOMEM;
  2758. goto err_out_free_netdev;
  2759. }
  2760. slic_config_pci(pcidev);
  2761. slic_init_driver();
  2762. slic_init_adapter(netdev,
  2763. pcidev, pci_tbl_entry, memmapped_ioaddr, cards_found);
  2764. err = slic_card_locate(adapter);
  2765. if (err) {
  2766. dev_err(&pcidev->dev, "cannot locate card\n");
  2767. goto err_out_unmap;
  2768. }
  2769. card = adapter->card;
  2770. if (!adapter->allocated) {
  2771. card->adapters_allocated++;
  2772. adapter->allocated = 1;
  2773. }
  2774. err = slic_card_init(card, adapter);
  2775. if (err)
  2776. goto err_out_unmap;
  2777. slic_adapter_set_hwaddr(adapter);
  2778. netdev->base_addr = (unsigned long)memmapped_ioaddr;
  2779. netdev->irq = adapter->irq;
  2780. netdev->netdev_ops = &slic_netdev_ops;
  2781. strcpy(netdev->name, "eth%d");
  2782. err = register_netdev(netdev);
  2783. if (err) {
  2784. dev_err(&pcidev->dev, "Cannot register net device, aborting.\n");
  2785. goto err_out_unmap;
  2786. }
  2787. cards_found++;
  2788. return 0;
  2789. err_out_unmap:
  2790. iounmap(memmapped_ioaddr);
  2791. err_out_free_netdev:
  2792. free_netdev(netdev);
  2793. err_out_exit_slic_probe:
  2794. pci_release_regions(pcidev);
  2795. err_out_disable_pci:
  2796. pci_disable_device(pcidev);
  2797. return err;
  2798. }
  2799. static struct pci_driver slic_driver = {
  2800. .name = DRV_NAME,
  2801. .id_table = slic_pci_tbl,
  2802. .probe = slic_entry_probe,
  2803. .remove = slic_entry_remove,
  2804. };
  2805. static int __init slic_module_init(void)
  2806. {
  2807. slic_init_driver();
  2808. return pci_register_driver(&slic_driver);
  2809. }
  2810. static void __exit slic_module_cleanup(void)
  2811. {
  2812. pci_unregister_driver(&slic_driver);
  2813. }
  2814. module_init(slic_module_init);
  2815. module_exit(slic_module_cleanup);