hw.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110
  1. /*
  2. * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
  3. *
  4. * This file is free software: you may copy, redistribute and/or modify it
  5. * under the terms of the GNU General Public License as published by the
  6. * Free Software Foundation, either version 2 of the License, or (at your
  7. * option) any later version.
  8. *
  9. * This file is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * This file incorporates work covered by the following copyright and
  18. * permission notice:
  19. *
  20. * Copyright (c) 2012 Qualcomm Atheros, Inc.
  21. *
  22. * Permission to use, copy, modify, and/or distribute this software for any
  23. * purpose with or without fee is hereby granted, provided that the above
  24. * copyright notice and this permission notice appear in all copies.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  27. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  28. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  29. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  30. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  31. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  32. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  33. */
  34. #include <linux/etherdevice.h>
  35. #include <linux/delay.h>
  36. #include <linux/pci.h>
  37. #include <linux/mdio.h>
  38. #include "reg.h"
  39. #include "hw.h"
  40. static inline bool alx_is_rev_a(u8 rev)
  41. {
  42. return rev == ALX_REV_A0 || rev == ALX_REV_A1;
  43. }
  44. static int alx_wait_mdio_idle(struct alx_hw *hw)
  45. {
  46. u32 val;
  47. int i;
  48. for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
  49. val = alx_read_mem32(hw, ALX_MDIO);
  50. if (!(val & ALX_MDIO_BUSY))
  51. return 0;
  52. udelay(10);
  53. }
  54. return -ETIMEDOUT;
  55. }
  56. static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  57. u16 reg, u16 *phy_data)
  58. {
  59. u32 val, clk_sel;
  60. int err;
  61. *phy_data = 0;
  62. /* use slow clock when it's in hibernation status */
  63. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  64. ALX_MDIO_CLK_SEL_25MD4 :
  65. ALX_MDIO_CLK_SEL_25MD128;
  66. if (ext) {
  67. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  68. reg << ALX_MDIO_EXTN_REG_SHIFT;
  69. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  70. val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
  71. ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
  72. clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
  73. } else {
  74. val = ALX_MDIO_SPRES_PRMBL |
  75. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  76. reg << ALX_MDIO_REG_SHIFT |
  77. ALX_MDIO_START | ALX_MDIO_OP_READ;
  78. }
  79. alx_write_mem32(hw, ALX_MDIO, val);
  80. err = alx_wait_mdio_idle(hw);
  81. if (err)
  82. return err;
  83. val = alx_read_mem32(hw, ALX_MDIO);
  84. *phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
  85. return 0;
  86. }
  87. static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
  88. u16 reg, u16 phy_data)
  89. {
  90. u32 val, clk_sel;
  91. /* use slow clock when it's in hibernation status */
  92. clk_sel = hw->link_speed != SPEED_UNKNOWN ?
  93. ALX_MDIO_CLK_SEL_25MD4 :
  94. ALX_MDIO_CLK_SEL_25MD128;
  95. if (ext) {
  96. val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
  97. reg << ALX_MDIO_EXTN_REG_SHIFT;
  98. alx_write_mem32(hw, ALX_MDIO_EXTN, val);
  99. val = ALX_MDIO_SPRES_PRMBL |
  100. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  101. phy_data << ALX_MDIO_DATA_SHIFT |
  102. ALX_MDIO_START | ALX_MDIO_MODE_EXT;
  103. } else {
  104. val = ALX_MDIO_SPRES_PRMBL |
  105. clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
  106. reg << ALX_MDIO_REG_SHIFT |
  107. phy_data << ALX_MDIO_DATA_SHIFT |
  108. ALX_MDIO_START;
  109. }
  110. alx_write_mem32(hw, ALX_MDIO, val);
  111. return alx_wait_mdio_idle(hw);
  112. }
  113. static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  114. {
  115. return alx_read_phy_core(hw, false, 0, reg, phy_data);
  116. }
  117. static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  118. {
  119. return alx_write_phy_core(hw, false, 0, reg, phy_data);
  120. }
  121. static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  122. {
  123. return alx_read_phy_core(hw, true, dev, reg, pdata);
  124. }
  125. static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  126. {
  127. return alx_write_phy_core(hw, true, dev, reg, data);
  128. }
  129. static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  130. {
  131. int err;
  132. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  133. if (err)
  134. return err;
  135. return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
  136. }
  137. static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  138. {
  139. int err;
  140. err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
  141. if (err)
  142. return err;
  143. return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
  144. }
  145. int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
  146. {
  147. int err;
  148. spin_lock(&hw->mdio_lock);
  149. err = __alx_read_phy_reg(hw, reg, phy_data);
  150. spin_unlock(&hw->mdio_lock);
  151. return err;
  152. }
  153. int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
  154. {
  155. int err;
  156. spin_lock(&hw->mdio_lock);
  157. err = __alx_write_phy_reg(hw, reg, phy_data);
  158. spin_unlock(&hw->mdio_lock);
  159. return err;
  160. }
  161. int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
  162. {
  163. int err;
  164. spin_lock(&hw->mdio_lock);
  165. err = __alx_read_phy_ext(hw, dev, reg, pdata);
  166. spin_unlock(&hw->mdio_lock);
  167. return err;
  168. }
  169. int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
  170. {
  171. int err;
  172. spin_lock(&hw->mdio_lock);
  173. err = __alx_write_phy_ext(hw, dev, reg, data);
  174. spin_unlock(&hw->mdio_lock);
  175. return err;
  176. }
  177. static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
  178. {
  179. int err;
  180. spin_lock(&hw->mdio_lock);
  181. err = __alx_read_phy_dbg(hw, reg, pdata);
  182. spin_unlock(&hw->mdio_lock);
  183. return err;
  184. }
  185. static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
  186. {
  187. int err;
  188. spin_lock(&hw->mdio_lock);
  189. err = __alx_write_phy_dbg(hw, reg, data);
  190. spin_unlock(&hw->mdio_lock);
  191. return err;
  192. }
  193. static u16 alx_get_phy_config(struct alx_hw *hw)
  194. {
  195. u32 val;
  196. u16 phy_val;
  197. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  198. /* phy in reset */
  199. if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
  200. return ALX_DRV_PHY_UNKNOWN;
  201. val = alx_read_mem32(hw, ALX_DRV);
  202. val = ALX_GET_FIELD(val, ALX_DRV_PHY);
  203. if (ALX_DRV_PHY_UNKNOWN == val)
  204. return ALX_DRV_PHY_UNKNOWN;
  205. alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
  206. if (ALX_PHY_INITED == phy_val)
  207. return val;
  208. return ALX_DRV_PHY_UNKNOWN;
  209. }
  210. static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
  211. {
  212. u32 read;
  213. int i;
  214. for (i = 0; i < ALX_SLD_MAX_TO; i++) {
  215. read = alx_read_mem32(hw, reg);
  216. if ((read & wait) == 0) {
  217. if (val)
  218. *val = read;
  219. return true;
  220. }
  221. mdelay(1);
  222. }
  223. return false;
  224. }
  225. static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
  226. {
  227. u32 mac0, mac1;
  228. mac0 = alx_read_mem32(hw, ALX_STAD0);
  229. mac1 = alx_read_mem32(hw, ALX_STAD1);
  230. /* addr should be big-endian */
  231. put_unaligned(cpu_to_be32(mac0), (__be32 *)(addr + 2));
  232. put_unaligned(cpu_to_be16(mac1), (__be16 *)addr);
  233. return is_valid_ether_addr(addr);
  234. }
  235. int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
  236. {
  237. u32 val;
  238. /* try to get it from register first */
  239. if (alx_read_macaddr(hw, addr))
  240. return 0;
  241. /* try to load from efuse */
  242. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
  243. return -EIO;
  244. alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
  245. if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
  246. return -EIO;
  247. if (alx_read_macaddr(hw, addr))
  248. return 0;
  249. /* try to load from flash/eeprom (if present) */
  250. val = alx_read_mem32(hw, ALX_EFLD);
  251. if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
  252. if (!alx_wait_reg(hw, ALX_EFLD,
  253. ALX_EFLD_STAT | ALX_EFLD_START, &val))
  254. return -EIO;
  255. alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
  256. if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
  257. return -EIO;
  258. if (alx_read_macaddr(hw, addr))
  259. return 0;
  260. }
  261. return -EIO;
  262. }
  263. void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
  264. {
  265. u32 val;
  266. /* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
  267. val = be32_to_cpu(get_unaligned((__be32 *)(addr + 2)));
  268. alx_write_mem32(hw, ALX_STAD0, val);
  269. val = be16_to_cpu(get_unaligned((__be16 *)addr));
  270. alx_write_mem32(hw, ALX_STAD1, val);
  271. }
  272. static void alx_reset_osc(struct alx_hw *hw, u8 rev)
  273. {
  274. u32 val, val2;
  275. /* clear Internal OSC settings, switching OSC by hw itself */
  276. val = alx_read_mem32(hw, ALX_MISC3);
  277. alx_write_mem32(hw, ALX_MISC3,
  278. (val & ~ALX_MISC3_25M_BY_SW) |
  279. ALX_MISC3_25M_NOTO_INTNL);
  280. /* 25M clk from chipset may be unstable 1s after de-assert of
  281. * PERST, driver need re-calibrate before enter Sleep for WoL
  282. */
  283. val = alx_read_mem32(hw, ALX_MISC);
  284. if (rev >= ALX_REV_B0) {
  285. /* restore over current protection def-val,
  286. * this val could be reset by MAC-RST
  287. */
  288. ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
  289. /* a 0->1 change will update the internal val of osc */
  290. val &= ~ALX_MISC_INTNLOSC_OPEN;
  291. alx_write_mem32(hw, ALX_MISC, val);
  292. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  293. /* hw will automatically dis OSC after cab. */
  294. val2 = alx_read_mem32(hw, ALX_MSIC2);
  295. val2 &= ~ALX_MSIC2_CALB_START;
  296. alx_write_mem32(hw, ALX_MSIC2, val2);
  297. alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
  298. } else {
  299. val &= ~ALX_MISC_INTNLOSC_OPEN;
  300. /* disable isolate for rev A devices */
  301. if (alx_is_rev_a(rev))
  302. val &= ~ALX_MISC_ISO_EN;
  303. alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
  304. alx_write_mem32(hw, ALX_MISC, val);
  305. }
  306. udelay(20);
  307. }
  308. static int alx_stop_mac(struct alx_hw *hw)
  309. {
  310. u32 rxq, txq, val;
  311. u16 i;
  312. rxq = alx_read_mem32(hw, ALX_RXQ0);
  313. alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
  314. txq = alx_read_mem32(hw, ALX_TXQ0);
  315. alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
  316. udelay(40);
  317. hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
  318. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  319. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  320. val = alx_read_mem32(hw, ALX_MAC_STS);
  321. if (!(val & ALX_MAC_STS_IDLE))
  322. return 0;
  323. udelay(10);
  324. }
  325. return -ETIMEDOUT;
  326. }
  327. int alx_reset_mac(struct alx_hw *hw)
  328. {
  329. u32 val, pmctrl;
  330. int i, ret;
  331. u8 rev;
  332. bool a_cr;
  333. pmctrl = 0;
  334. rev = alx_hw_revision(hw);
  335. a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
  336. /* disable all interrupts, RXQ/TXQ */
  337. alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
  338. alx_write_mem32(hw, ALX_IMR, 0);
  339. alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
  340. ret = alx_stop_mac(hw);
  341. if (ret)
  342. return ret;
  343. /* mac reset workaroud */
  344. alx_write_mem32(hw, ALX_RFD_PIDX, 1);
  345. /* dis l0s/l1 before mac reset */
  346. if (a_cr) {
  347. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  348. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  349. alx_write_mem32(hw, ALX_PMCTRL,
  350. pmctrl & ~(ALX_PMCTRL_L1_EN |
  351. ALX_PMCTRL_L0S_EN));
  352. }
  353. /* reset whole mac safely */
  354. val = alx_read_mem32(hw, ALX_MASTER);
  355. alx_write_mem32(hw, ALX_MASTER,
  356. val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
  357. /* make sure it's real idle */
  358. udelay(10);
  359. for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
  360. val = alx_read_mem32(hw, ALX_RFD_PIDX);
  361. if (val == 0)
  362. break;
  363. udelay(10);
  364. }
  365. for (; i < ALX_DMA_MAC_RST_TO; i++) {
  366. val = alx_read_mem32(hw, ALX_MASTER);
  367. if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
  368. break;
  369. udelay(10);
  370. }
  371. if (i == ALX_DMA_MAC_RST_TO)
  372. return -EIO;
  373. udelay(10);
  374. if (a_cr) {
  375. alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
  376. /* restore l0s / l1 */
  377. if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
  378. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  379. }
  380. alx_reset_osc(hw, rev);
  381. /* clear Internal OSC settings, switching OSC by hw itself,
  382. * disable isolate for rev A devices
  383. */
  384. val = alx_read_mem32(hw, ALX_MISC3);
  385. alx_write_mem32(hw, ALX_MISC3,
  386. (val & ~ALX_MISC3_25M_BY_SW) |
  387. ALX_MISC3_25M_NOTO_INTNL);
  388. val = alx_read_mem32(hw, ALX_MISC);
  389. val &= ~ALX_MISC_INTNLOSC_OPEN;
  390. if (alx_is_rev_a(rev))
  391. val &= ~ALX_MISC_ISO_EN;
  392. alx_write_mem32(hw, ALX_MISC, val);
  393. udelay(20);
  394. /* driver control speed/duplex, hash-alg */
  395. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  396. val = alx_read_mem32(hw, ALX_SERDES);
  397. alx_write_mem32(hw, ALX_SERDES,
  398. val | ALX_SERDES_MACCLK_SLWDWN |
  399. ALX_SERDES_PHYCLK_SLWDWN);
  400. return 0;
  401. }
  402. void alx_reset_phy(struct alx_hw *hw)
  403. {
  404. int i;
  405. u32 val;
  406. u16 phy_val;
  407. /* (DSP)reset PHY core */
  408. val = alx_read_mem32(hw, ALX_PHY_CTRL);
  409. val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
  410. ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
  411. ALX_PHY_CTRL_CLS);
  412. val |= ALX_PHY_CTRL_RST_ANALOG;
  413. val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
  414. alx_write_mem32(hw, ALX_PHY_CTRL, val);
  415. udelay(10);
  416. alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
  417. for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
  418. udelay(10);
  419. /* phy power saving & hib */
  420. alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
  421. alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
  422. ALX_SYSMODCTRL_IECHOADJ_DEF);
  423. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
  424. ALX_VDRVBIAS_DEF);
  425. /* EEE advertisement */
  426. val = alx_read_mem32(hw, ALX_LPI_CTRL);
  427. alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
  428. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
  429. /* phy power saving */
  430. alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
  431. alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
  432. alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
  433. alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
  434. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  435. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  436. phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
  437. /* rtl8139c, 120m issue */
  438. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
  439. ALX_MIIEXT_NLP78_120M_DEF);
  440. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
  441. ALX_MIIEXT_S3DIG10_DEF);
  442. if (hw->lnk_patch) {
  443. /* Turn off half amplitude */
  444. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  445. &phy_val);
  446. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
  447. phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
  448. /* Turn off Green feature */
  449. alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
  450. alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
  451. phy_val | ALX_GREENCFG2_BP_GREEN);
  452. /* Turn off half Bias */
  453. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  454. &phy_val);
  455. alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
  456. phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
  457. }
  458. /* set phy interrupt mask */
  459. alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
  460. }
  461. #define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
  462. void alx_reset_pcie(struct alx_hw *hw)
  463. {
  464. u8 rev = alx_hw_revision(hw);
  465. u32 val;
  466. u16 val16;
  467. /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
  468. pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
  469. if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
  470. val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
  471. pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
  472. }
  473. /* clear WoL setting/status */
  474. val = alx_read_mem32(hw, ALX_WOL0);
  475. alx_write_mem32(hw, ALX_WOL0, 0);
  476. val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
  477. alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
  478. /* mask some pcie error bits */
  479. val = alx_read_mem32(hw, ALX_UE_SVRT);
  480. val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
  481. alx_write_mem32(hw, ALX_UE_SVRT, val);
  482. /* wol 25M & pclk */
  483. val = alx_read_mem32(hw, ALX_MASTER);
  484. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
  485. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  486. (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
  487. alx_write_mem32(hw, ALX_MASTER,
  488. val | ALX_MASTER_PCLKSEL_SRDS |
  489. ALX_MASTER_WAKEN_25M);
  490. } else {
  491. if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
  492. (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
  493. alx_write_mem32(hw, ALX_MASTER,
  494. (val & ~ALX_MASTER_PCLKSEL_SRDS) |
  495. ALX_MASTER_WAKEN_25M);
  496. }
  497. /* ASPM setting */
  498. alx_enable_aspm(hw, true, true);
  499. udelay(10);
  500. }
  501. void alx_start_mac(struct alx_hw *hw)
  502. {
  503. u32 mac, txq, rxq;
  504. rxq = alx_read_mem32(hw, ALX_RXQ0);
  505. alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
  506. txq = alx_read_mem32(hw, ALX_TXQ0);
  507. alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
  508. mac = hw->rx_ctrl;
  509. if (hw->duplex == DUPLEX_FULL)
  510. mac |= ALX_MAC_CTRL_FULLD;
  511. else
  512. mac &= ~ALX_MAC_CTRL_FULLD;
  513. ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
  514. hw->link_speed == SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
  515. ALX_MAC_CTRL_SPEED_10_100);
  516. mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
  517. hw->rx_ctrl = mac;
  518. alx_write_mem32(hw, ALX_MAC_CTRL, mac);
  519. }
  520. void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
  521. {
  522. if (fc & ALX_FC_RX)
  523. hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
  524. else
  525. hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
  526. if (fc & ALX_FC_TX)
  527. hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
  528. else
  529. hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
  530. alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
  531. }
  532. void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
  533. {
  534. u32 pmctrl;
  535. u8 rev = alx_hw_revision(hw);
  536. pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
  537. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
  538. ALX_PMCTRL_LCKDET_TIMER_DEF);
  539. pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
  540. ALX_PMCTRL_L1_CLKSW_EN |
  541. ALX_PMCTRL_L1_SRDSRX_PWD;
  542. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
  543. ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
  544. pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
  545. ALX_PMCTRL_L1_SRDSPLL_EN |
  546. ALX_PMCTRL_L1_BUFSRX_EN |
  547. ALX_PMCTRL_SADLY_EN |
  548. ALX_PMCTRL_HOTRST_WTEN|
  549. ALX_PMCTRL_L0S_EN |
  550. ALX_PMCTRL_L1_EN |
  551. ALX_PMCTRL_ASPM_FCEN |
  552. ALX_PMCTRL_TXL1_AFTER_L0S |
  553. ALX_PMCTRL_RXL1_AFTER_L0S);
  554. if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
  555. pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
  556. if (l0s_en)
  557. pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
  558. if (l1_en)
  559. pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
  560. alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
  561. }
  562. static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
  563. {
  564. u32 cfg = 0;
  565. if (ethadv_cfg & ADVERTISED_Autoneg) {
  566. cfg |= ALX_DRV_PHY_AUTO;
  567. if (ethadv_cfg & ADVERTISED_10baseT_Half)
  568. cfg |= ALX_DRV_PHY_10;
  569. if (ethadv_cfg & ADVERTISED_10baseT_Full)
  570. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  571. if (ethadv_cfg & ADVERTISED_100baseT_Half)
  572. cfg |= ALX_DRV_PHY_100;
  573. if (ethadv_cfg & ADVERTISED_100baseT_Full)
  574. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  575. if (ethadv_cfg & ADVERTISED_1000baseT_Half)
  576. cfg |= ALX_DRV_PHY_1000;
  577. if (ethadv_cfg & ADVERTISED_1000baseT_Full)
  578. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  579. if (ethadv_cfg & ADVERTISED_Pause)
  580. cfg |= ADVERTISE_PAUSE_CAP;
  581. if (ethadv_cfg & ADVERTISED_Asym_Pause)
  582. cfg |= ADVERTISE_PAUSE_ASYM;
  583. } else {
  584. switch (ethadv_cfg) {
  585. case ADVERTISED_10baseT_Half:
  586. cfg |= ALX_DRV_PHY_10;
  587. break;
  588. case ADVERTISED_100baseT_Half:
  589. cfg |= ALX_DRV_PHY_100;
  590. break;
  591. case ADVERTISED_10baseT_Full:
  592. cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
  593. break;
  594. case ADVERTISED_100baseT_Full:
  595. cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
  596. break;
  597. }
  598. }
  599. return cfg;
  600. }
  601. int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
  602. {
  603. u16 adv, giga, cr;
  604. u32 val;
  605. int err = 0;
  606. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
  607. val = alx_read_mem32(hw, ALX_DRV);
  608. ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
  609. if (ethadv & ADVERTISED_Autoneg) {
  610. adv = ADVERTISE_CSMA;
  611. adv |= ethtool_adv_to_mii_adv_t(ethadv);
  612. if (flowctrl & ALX_FC_ANEG) {
  613. if (flowctrl & ALX_FC_RX) {
  614. adv |= ADVERTISED_Pause;
  615. if (!(flowctrl & ALX_FC_TX))
  616. adv |= ADVERTISED_Asym_Pause;
  617. } else if (flowctrl & ALX_FC_TX) {
  618. adv |= ADVERTISED_Asym_Pause;
  619. }
  620. }
  621. giga = 0;
  622. if (alx_hw_giga(hw))
  623. giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
  624. cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
  625. if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
  626. alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
  627. alx_write_phy_reg(hw, MII_BMCR, cr))
  628. err = -EBUSY;
  629. } else {
  630. cr = BMCR_RESET;
  631. if (ethadv == ADVERTISED_100baseT_Half ||
  632. ethadv == ADVERTISED_100baseT_Full)
  633. cr |= BMCR_SPEED100;
  634. if (ethadv == ADVERTISED_10baseT_Full ||
  635. ethadv == ADVERTISED_100baseT_Full)
  636. cr |= BMCR_FULLDPLX;
  637. err = alx_write_phy_reg(hw, MII_BMCR, cr);
  638. }
  639. if (!err) {
  640. alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
  641. val |= ethadv_to_hw_cfg(hw, ethadv);
  642. }
  643. alx_write_mem32(hw, ALX_DRV, val);
  644. return err;
  645. }
  646. void alx_post_phy_link(struct alx_hw *hw)
  647. {
  648. u16 phy_val, len, agc;
  649. u8 revid = alx_hw_revision(hw);
  650. bool adj_th = revid == ALX_REV_B0;
  651. if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
  652. return;
  653. /* 1000BT/AZ, wrong cable length */
  654. if (hw->link_speed != SPEED_UNKNOWN) {
  655. alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
  656. &phy_val);
  657. len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
  658. alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
  659. agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
  660. if ((hw->link_speed == SPEED_1000 &&
  661. (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
  662. (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
  663. (hw->link_speed == SPEED_100 &&
  664. (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
  665. (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
  666. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  667. ALX_AZ_ANADECT_LONG);
  668. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  669. &phy_val);
  670. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  671. phy_val | ALX_AFE_10BT_100M_TH);
  672. } else {
  673. alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
  674. ALX_AZ_ANADECT_DEF);
  675. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
  676. ALX_MIIEXT_AFE, &phy_val);
  677. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  678. phy_val & ~ALX_AFE_10BT_100M_TH);
  679. }
  680. /* threshold adjust */
  681. if (adj_th && hw->lnk_patch) {
  682. if (hw->link_speed == SPEED_100) {
  683. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  684. ALX_MSE16DB_UP);
  685. } else if (hw->link_speed == SPEED_1000) {
  686. /*
  687. * Giga link threshold, raise the tolerance of
  688. * noise 50%
  689. */
  690. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  691. &phy_val);
  692. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  693. ALX_MSE20DB_TH_HI);
  694. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
  695. phy_val);
  696. }
  697. }
  698. } else {
  699. alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  700. &phy_val);
  701. alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
  702. phy_val & ~ALX_AFE_10BT_100M_TH);
  703. if (adj_th && hw->lnk_patch) {
  704. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
  705. ALX_MSE16DB_DOWN);
  706. alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
  707. ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
  708. ALX_MSE20DB_TH_DEF);
  709. alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
  710. }
  711. }
  712. }
  713. bool alx_phy_configured(struct alx_hw *hw)
  714. {
  715. u32 cfg, hw_cfg;
  716. cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
  717. cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
  718. hw_cfg = alx_get_phy_config(hw);
  719. if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
  720. return false;
  721. return cfg == hw_cfg;
  722. }
  723. int alx_read_phy_link(struct alx_hw *hw)
  724. {
  725. struct pci_dev *pdev = hw->pdev;
  726. u16 bmsr, giga;
  727. int err;
  728. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  729. if (err)
  730. return err;
  731. err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
  732. if (err)
  733. return err;
  734. if (!(bmsr & BMSR_LSTATUS)) {
  735. hw->link_speed = SPEED_UNKNOWN;
  736. hw->duplex = DUPLEX_UNKNOWN;
  737. return 0;
  738. }
  739. /* speed/duplex result is saved in PHY Specific Status Register */
  740. err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
  741. if (err)
  742. return err;
  743. if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
  744. goto wrong_speed;
  745. switch (giga & ALX_GIGA_PSSR_SPEED) {
  746. case ALX_GIGA_PSSR_1000MBS:
  747. hw->link_speed = SPEED_1000;
  748. break;
  749. case ALX_GIGA_PSSR_100MBS:
  750. hw->link_speed = SPEED_100;
  751. break;
  752. case ALX_GIGA_PSSR_10MBS:
  753. hw->link_speed = SPEED_10;
  754. break;
  755. default:
  756. goto wrong_speed;
  757. }
  758. hw->duplex = (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
  759. return 0;
  760. wrong_speed:
  761. dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
  762. return -EINVAL;
  763. }
  764. int alx_clear_phy_intr(struct alx_hw *hw)
  765. {
  766. u16 isr;
  767. /* clear interrupt status by reading it */
  768. return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
  769. }
  770. void alx_disable_rss(struct alx_hw *hw)
  771. {
  772. u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
  773. ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
  774. alx_write_mem32(hw, ALX_RXQ0, ctrl);
  775. }
  776. void alx_configure_basic(struct alx_hw *hw)
  777. {
  778. u32 val, raw_mtu, max_payload;
  779. u16 val16;
  780. u8 chip_rev = alx_hw_revision(hw);
  781. alx_set_macaddr(hw, hw->mac_addr);
  782. alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
  783. /* idle timeout to switch clk_125M */
  784. if (chip_rev >= ALX_REV_B0)
  785. alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
  786. ALX_IDLE_DECISN_TIMER_DEF);
  787. alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
  788. val = alx_read_mem32(hw, ALX_MASTER);
  789. val |= ALX_MASTER_IRQMOD2_EN |
  790. ALX_MASTER_IRQMOD1_EN |
  791. ALX_MASTER_SYSALVTIMER_EN;
  792. alx_write_mem32(hw, ALX_MASTER, val);
  793. alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
  794. (hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
  795. /* intr re-trig timeout */
  796. alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
  797. /* tpd threshold to trig int */
  798. alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
  799. alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
  800. raw_mtu = hw->mtu + ETH_HLEN;
  801. alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
  802. if (raw_mtu > ALX_MTU_JUMBO_TH)
  803. hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
  804. if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
  805. val = (raw_mtu + 8 + 7) >> 3;
  806. else
  807. val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
  808. alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
  809. max_payload = pcie_get_readrq(hw->pdev) >> 8;
  810. /*
  811. * if BIOS had changed the default dma read max length,
  812. * restore it to default value
  813. */
  814. if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
  815. pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
  816. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
  817. ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
  818. ALX_TXQ0_SUPT_IPOPT |
  819. ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
  820. alx_write_mem32(hw, ALX_TXQ0, val);
  821. val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
  822. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
  823. ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
  824. ALX_HQTPD_BURST_EN;
  825. alx_write_mem32(hw, ALX_HQTPD, val);
  826. /* rxq, flow control */
  827. val = alx_read_mem32(hw, ALX_SRAM5);
  828. val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
  829. if (val > ALX_SRAM_RXF_LEN_8K) {
  830. val16 = ALX_MTU_STD_ALGN >> 3;
  831. val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
  832. } else {
  833. val16 = ALX_MTU_STD_ALGN >> 3;
  834. val = (val - ALX_MTU_STD_ALGN) >> 3;
  835. }
  836. alx_write_mem32(hw, ALX_RXQ2,
  837. val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
  838. val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
  839. val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
  840. ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
  841. ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
  842. ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
  843. ALX_RXQ0_IPV6_PARSE_EN;
  844. if (alx_hw_giga(hw))
  845. ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
  846. ALX_RXQ0_ASPM_THRESH_100M);
  847. alx_write_mem32(hw, ALX_RXQ0, val);
  848. val = alx_read_mem32(hw, ALX_DMA);
  849. val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
  850. ALX_DMA_RREQ_PRI_DATA |
  851. max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
  852. ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
  853. ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
  854. (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
  855. alx_write_mem32(hw, ALX_DMA, val);
  856. /* default multi-tx-q weights */
  857. val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
  858. 4 << ALX_WRR_PRI0_SHIFT |
  859. 4 << ALX_WRR_PRI1_SHIFT |
  860. 4 << ALX_WRR_PRI2_SHIFT |
  861. 4 << ALX_WRR_PRI3_SHIFT;
  862. alx_write_mem32(hw, ALX_WRR, val);
  863. }
  864. bool alx_get_phy_info(struct alx_hw *hw)
  865. {
  866. u16 devs1, devs2;
  867. if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
  868. alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
  869. return false;
  870. /* since we haven't PMA/PMD status2 register, we can't
  871. * use mdio45_probe function for prtad and mmds.
  872. * use fixed MMD3 to get mmds.
  873. */
  874. if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
  875. alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
  876. return false;
  877. hw->mdio.mmds = devs1 | devs2 << 16;
  878. return true;
  879. }
  880. void alx_update_hw_stats(struct alx_hw *hw)
  881. {
  882. /* RX stats */
  883. hw->stats.rx_ok += alx_read_mem32(hw, ALX_MIB_RX_OK);
  884. hw->stats.rx_bcast += alx_read_mem32(hw, ALX_MIB_RX_BCAST);
  885. hw->stats.rx_mcast += alx_read_mem32(hw, ALX_MIB_RX_MCAST);
  886. hw->stats.rx_pause += alx_read_mem32(hw, ALX_MIB_RX_PAUSE);
  887. hw->stats.rx_ctrl += alx_read_mem32(hw, ALX_MIB_RX_CTRL);
  888. hw->stats.rx_fcs_err += alx_read_mem32(hw, ALX_MIB_RX_FCS_ERR);
  889. hw->stats.rx_len_err += alx_read_mem32(hw, ALX_MIB_RX_LEN_ERR);
  890. hw->stats.rx_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BYTE_CNT);
  891. hw->stats.rx_runt += alx_read_mem32(hw, ALX_MIB_RX_RUNT);
  892. hw->stats.rx_frag += alx_read_mem32(hw, ALX_MIB_RX_FRAG);
  893. hw->stats.rx_sz_64B += alx_read_mem32(hw, ALX_MIB_RX_SZ_64B);
  894. hw->stats.rx_sz_127B += alx_read_mem32(hw, ALX_MIB_RX_SZ_127B);
  895. hw->stats.rx_sz_255B += alx_read_mem32(hw, ALX_MIB_RX_SZ_255B);
  896. hw->stats.rx_sz_511B += alx_read_mem32(hw, ALX_MIB_RX_SZ_511B);
  897. hw->stats.rx_sz_1023B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1023B);
  898. hw->stats.rx_sz_1518B += alx_read_mem32(hw, ALX_MIB_RX_SZ_1518B);
  899. hw->stats.rx_sz_max += alx_read_mem32(hw, ALX_MIB_RX_SZ_MAX);
  900. hw->stats.rx_ov_sz += alx_read_mem32(hw, ALX_MIB_RX_OV_SZ);
  901. hw->stats.rx_ov_rxf += alx_read_mem32(hw, ALX_MIB_RX_OV_RXF);
  902. hw->stats.rx_ov_rrd += alx_read_mem32(hw, ALX_MIB_RX_OV_RRD);
  903. hw->stats.rx_align_err += alx_read_mem32(hw, ALX_MIB_RX_ALIGN_ERR);
  904. hw->stats.rx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_BCCNT);
  905. hw->stats.rx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_RX_MCCNT);
  906. hw->stats.rx_err_addr += alx_read_mem32(hw, ALX_MIB_RX_ERRADDR);
  907. /* TX stats */
  908. hw->stats.tx_ok += alx_read_mem32(hw, ALX_MIB_TX_OK);
  909. hw->stats.tx_bcast += alx_read_mem32(hw, ALX_MIB_TX_BCAST);
  910. hw->stats.tx_mcast += alx_read_mem32(hw, ALX_MIB_TX_MCAST);
  911. hw->stats.tx_pause += alx_read_mem32(hw, ALX_MIB_TX_PAUSE);
  912. hw->stats.tx_exc_defer += alx_read_mem32(hw, ALX_MIB_TX_EXC_DEFER);
  913. hw->stats.tx_ctrl += alx_read_mem32(hw, ALX_MIB_TX_CTRL);
  914. hw->stats.tx_defer += alx_read_mem32(hw, ALX_MIB_TX_DEFER);
  915. hw->stats.tx_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BYTE_CNT);
  916. hw->stats.tx_sz_64B += alx_read_mem32(hw, ALX_MIB_TX_SZ_64B);
  917. hw->stats.tx_sz_127B += alx_read_mem32(hw, ALX_MIB_TX_SZ_127B);
  918. hw->stats.tx_sz_255B += alx_read_mem32(hw, ALX_MIB_TX_SZ_255B);
  919. hw->stats.tx_sz_511B += alx_read_mem32(hw, ALX_MIB_TX_SZ_511B);
  920. hw->stats.tx_sz_1023B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1023B);
  921. hw->stats.tx_sz_1518B += alx_read_mem32(hw, ALX_MIB_TX_SZ_1518B);
  922. hw->stats.tx_sz_max += alx_read_mem32(hw, ALX_MIB_TX_SZ_MAX);
  923. hw->stats.tx_single_col += alx_read_mem32(hw, ALX_MIB_TX_SINGLE_COL);
  924. hw->stats.tx_multi_col += alx_read_mem32(hw, ALX_MIB_TX_MULTI_COL);
  925. hw->stats.tx_late_col += alx_read_mem32(hw, ALX_MIB_TX_LATE_COL);
  926. hw->stats.tx_abort_col += alx_read_mem32(hw, ALX_MIB_TX_ABORT_COL);
  927. hw->stats.tx_underrun += alx_read_mem32(hw, ALX_MIB_TX_UNDERRUN);
  928. hw->stats.tx_trd_eop += alx_read_mem32(hw, ALX_MIB_TX_TRD_EOP);
  929. hw->stats.tx_len_err += alx_read_mem32(hw, ALX_MIB_TX_LEN_ERR);
  930. hw->stats.tx_trunc += alx_read_mem32(hw, ALX_MIB_TX_TRUNC);
  931. hw->stats.tx_bc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_BCCNT);
  932. hw->stats.tx_mc_byte_cnt += alx_read_mem32(hw, ALX_MIB_TX_MCCNT);
  933. hw->stats.update += alx_read_mem32(hw, ALX_MIB_UPDATE);
  934. }