phy.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281
  1. /* Framework for configuring and reading PHY devices
  2. * Based on code in sungem_phy.c and gianfar_phy.c
  3. *
  4. * Author: Andy Fleming
  5. *
  6. * Copyright (c) 2004 Freescale Semiconductor, Inc.
  7. * Copyright (c) 2006, 2007 Maciej W. Rozycki
  8. *
  9. * This program is free software; you can redistribute it and/or modify it
  10. * under the terms of the GNU General Public License as published by the
  11. * Free Software Foundation; either version 2 of the License, or (at your
  12. * option) any later version.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/kernel.h>
  17. #include <linux/string.h>
  18. #include <linux/errno.h>
  19. #include <linux/unistd.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/delay.h>
  22. #include <linux/netdevice.h>
  23. #include <linux/etherdevice.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/mii.h>
  28. #include <linux/ethtool.h>
  29. #include <linux/phy.h>
  30. #include <linux/timer.h>
  31. #include <linux/workqueue.h>
  32. #include <linux/mdio.h>
  33. #include <linux/io.h>
  34. #include <linux/uaccess.h>
  35. #include <linux/atomic.h>
  36. #include <asm/irq.h>
  37. static const char *phy_speed_to_str(int speed)
  38. {
  39. switch (speed) {
  40. case SPEED_10:
  41. return "10Mbps";
  42. case SPEED_100:
  43. return "100Mbps";
  44. case SPEED_1000:
  45. return "1Gbps";
  46. case SPEED_2500:
  47. return "2.5Gbps";
  48. case SPEED_10000:
  49. return "10Gbps";
  50. case SPEED_UNKNOWN:
  51. return "Unknown";
  52. default:
  53. return "Unsupported (update phy.c)";
  54. }
  55. }
  56. #define PHY_STATE_STR(_state) \
  57. case PHY_##_state: \
  58. return __stringify(_state); \
  59. static const char *phy_state_to_str(enum phy_state st)
  60. {
  61. switch (st) {
  62. PHY_STATE_STR(DOWN)
  63. PHY_STATE_STR(STARTING)
  64. PHY_STATE_STR(READY)
  65. PHY_STATE_STR(PENDING)
  66. PHY_STATE_STR(UP)
  67. PHY_STATE_STR(AN)
  68. PHY_STATE_STR(RUNNING)
  69. PHY_STATE_STR(NOLINK)
  70. PHY_STATE_STR(FORCING)
  71. PHY_STATE_STR(CHANGELINK)
  72. PHY_STATE_STR(HALTED)
  73. PHY_STATE_STR(RESUMING)
  74. }
  75. return NULL;
  76. }
  77. /**
  78. * phy_print_status - Convenience function to print out the current phy status
  79. * @phydev: the phy_device struct
  80. */
  81. void phy_print_status(struct phy_device *phydev)
  82. {
  83. if (phydev->link) {
  84. netdev_info(phydev->attached_dev,
  85. "Link is Up - %s/%s - flow control %s\n",
  86. phy_speed_to_str(phydev->speed),
  87. DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
  88. phydev->pause ? "rx/tx" : "off");
  89. } else {
  90. netdev_info(phydev->attached_dev, "Link is Down\n");
  91. }
  92. }
  93. EXPORT_SYMBOL(phy_print_status);
  94. /**
  95. * phy_clear_interrupt - Ack the phy device's interrupt
  96. * @phydev: the phy_device struct
  97. *
  98. * If the @phydev driver has an ack_interrupt function, call it to
  99. * ack and clear the phy device's interrupt.
  100. *
  101. * Returns 0 on success or < 0 on error.
  102. */
  103. static int phy_clear_interrupt(struct phy_device *phydev)
  104. {
  105. if (phydev->drv->ack_interrupt)
  106. return phydev->drv->ack_interrupt(phydev);
  107. return 0;
  108. }
  109. /**
  110. * phy_config_interrupt - configure the PHY device for the requested interrupts
  111. * @phydev: the phy_device struct
  112. * @interrupts: interrupt flags to configure for this @phydev
  113. *
  114. * Returns 0 on success or < 0 on error.
  115. */
  116. static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
  117. {
  118. phydev->interrupts = interrupts;
  119. if (phydev->drv->config_intr)
  120. return phydev->drv->config_intr(phydev);
  121. return 0;
  122. }
  123. /**
  124. * phy_aneg_done - return auto-negotiation status
  125. * @phydev: target phy_device struct
  126. *
  127. * Description: Return the auto-negotiation status from this @phydev
  128. * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
  129. * is still pending.
  130. */
  131. static inline int phy_aneg_done(struct phy_device *phydev)
  132. {
  133. if (phydev->drv->aneg_done)
  134. return phydev->drv->aneg_done(phydev);
  135. /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
  136. * implement Clause 22 registers
  137. */
  138. if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
  139. return -EINVAL;
  140. return genphy_aneg_done(phydev);
  141. }
  142. /* A structure for mapping a particular speed and duplex
  143. * combination to a particular SUPPORTED and ADVERTISED value
  144. */
  145. struct phy_setting {
  146. int speed;
  147. int duplex;
  148. u32 setting;
  149. };
  150. /* A mapping of all SUPPORTED settings to speed/duplex */
  151. static const struct phy_setting settings[] = {
  152. {
  153. .speed = SPEED_10000,
  154. .duplex = DUPLEX_FULL,
  155. .setting = SUPPORTED_10000baseKR_Full,
  156. },
  157. {
  158. .speed = SPEED_10000,
  159. .duplex = DUPLEX_FULL,
  160. .setting = SUPPORTED_10000baseKX4_Full,
  161. },
  162. {
  163. .speed = SPEED_10000,
  164. .duplex = DUPLEX_FULL,
  165. .setting = SUPPORTED_10000baseT_Full,
  166. },
  167. {
  168. .speed = SPEED_2500,
  169. .duplex = DUPLEX_FULL,
  170. .setting = SUPPORTED_2500baseX_Full,
  171. },
  172. {
  173. .speed = SPEED_1000,
  174. .duplex = DUPLEX_FULL,
  175. .setting = SUPPORTED_1000baseKX_Full,
  176. },
  177. {
  178. .speed = SPEED_1000,
  179. .duplex = DUPLEX_FULL,
  180. .setting = SUPPORTED_1000baseT_Full,
  181. },
  182. {
  183. .speed = SPEED_1000,
  184. .duplex = DUPLEX_HALF,
  185. .setting = SUPPORTED_1000baseT_Half,
  186. },
  187. {
  188. .speed = SPEED_100,
  189. .duplex = DUPLEX_FULL,
  190. .setting = SUPPORTED_100baseT_Full,
  191. },
  192. {
  193. .speed = SPEED_100,
  194. .duplex = DUPLEX_HALF,
  195. .setting = SUPPORTED_100baseT_Half,
  196. },
  197. {
  198. .speed = SPEED_10,
  199. .duplex = DUPLEX_FULL,
  200. .setting = SUPPORTED_10baseT_Full,
  201. },
  202. {
  203. .speed = SPEED_10,
  204. .duplex = DUPLEX_HALF,
  205. .setting = SUPPORTED_10baseT_Half,
  206. },
  207. };
  208. #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
  209. /**
  210. * phy_find_setting - find a PHY settings array entry that matches speed & duplex
  211. * @speed: speed to match
  212. * @duplex: duplex to match
  213. *
  214. * Description: Searches the settings array for the setting which
  215. * matches the desired speed and duplex, and returns the index
  216. * of that setting. Returns the index of the last setting if
  217. * none of the others match.
  218. */
  219. static inline unsigned int phy_find_setting(int speed, int duplex)
  220. {
  221. unsigned int idx = 0;
  222. while (idx < ARRAY_SIZE(settings) &&
  223. (settings[idx].speed != speed || settings[idx].duplex != duplex))
  224. idx++;
  225. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  226. }
  227. /**
  228. * phy_find_valid - find a PHY setting that matches the requested features mask
  229. * @idx: The first index in settings[] to search
  230. * @features: A mask of the valid settings
  231. *
  232. * Description: Returns the index of the first valid setting less
  233. * than or equal to the one pointed to by idx, as determined by
  234. * the mask in features. Returns the index of the last setting
  235. * if nothing else matches.
  236. */
  237. static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
  238. {
  239. while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
  240. idx++;
  241. return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
  242. }
  243. /**
  244. * phy_check_valid - check if there is a valid PHY setting which matches
  245. * speed, duplex, and feature mask
  246. * @speed: speed to match
  247. * @duplex: duplex to match
  248. * @features: A mask of the valid settings
  249. *
  250. * Description: Returns true if there is a valid setting, false otherwise.
  251. */
  252. static inline bool phy_check_valid(int speed, int duplex, u32 features)
  253. {
  254. unsigned int idx;
  255. idx = phy_find_valid(phy_find_setting(speed, duplex), features);
  256. return settings[idx].speed == speed && settings[idx].duplex == duplex &&
  257. (settings[idx].setting & features);
  258. }
  259. /**
  260. * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
  261. * @phydev: the target phy_device struct
  262. *
  263. * Description: Make sure the PHY is set to supported speeds and
  264. * duplexes. Drop down by one in this order: 1000/FULL,
  265. * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
  266. */
  267. static void phy_sanitize_settings(struct phy_device *phydev)
  268. {
  269. u32 features = phydev->supported;
  270. unsigned int idx;
  271. /* Sanitize settings based on PHY capabilities */
  272. if ((features & SUPPORTED_Autoneg) == 0)
  273. phydev->autoneg = AUTONEG_DISABLE;
  274. idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
  275. features);
  276. phydev->speed = settings[idx].speed;
  277. phydev->duplex = settings[idx].duplex;
  278. }
  279. /**
  280. * phy_ethtool_sset - generic ethtool sset function, handles all the details
  281. * @phydev: target phy_device struct
  282. * @cmd: ethtool_cmd
  283. *
  284. * A few notes about parameter checking:
  285. * - We don't set port or transceiver, so we don't care what they
  286. * were set to.
  287. * - phy_start_aneg() will make sure forced settings are sane, and
  288. * choose the next best ones from the ones selected, so we don't
  289. * care if ethtool tries to give us bad values.
  290. */
  291. int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  292. {
  293. u32 speed = ethtool_cmd_speed(cmd);
  294. if (cmd->phy_address != phydev->addr)
  295. return -EINVAL;
  296. /* We make sure that we don't pass unsupported values in to the PHY */
  297. cmd->advertising &= phydev->supported;
  298. /* Verify the settings we care about. */
  299. if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
  300. return -EINVAL;
  301. if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
  302. return -EINVAL;
  303. if (cmd->autoneg == AUTONEG_DISABLE &&
  304. ((speed != SPEED_1000 &&
  305. speed != SPEED_100 &&
  306. speed != SPEED_10) ||
  307. (cmd->duplex != DUPLEX_HALF &&
  308. cmd->duplex != DUPLEX_FULL)))
  309. return -EINVAL;
  310. phydev->autoneg = cmd->autoneg;
  311. phydev->speed = speed;
  312. phydev->advertising = cmd->advertising;
  313. if (AUTONEG_ENABLE == cmd->autoneg)
  314. phydev->advertising |= ADVERTISED_Autoneg;
  315. else
  316. phydev->advertising &= ~ADVERTISED_Autoneg;
  317. phydev->duplex = cmd->duplex;
  318. phydev->mdix = cmd->eth_tp_mdix_ctrl;
  319. /* Restart the PHY */
  320. phy_start_aneg(phydev);
  321. return 0;
  322. }
  323. EXPORT_SYMBOL(phy_ethtool_sset);
  324. int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
  325. {
  326. cmd->supported = phydev->supported;
  327. cmd->advertising = phydev->advertising;
  328. cmd->lp_advertising = phydev->lp_advertising;
  329. ethtool_cmd_speed_set(cmd, phydev->speed);
  330. cmd->duplex = phydev->duplex;
  331. if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
  332. cmd->port = PORT_BNC;
  333. else
  334. cmd->port = PORT_MII;
  335. cmd->phy_address = phydev->addr;
  336. cmd->transceiver = phy_is_internal(phydev) ?
  337. XCVR_INTERNAL : XCVR_EXTERNAL;
  338. cmd->autoneg = phydev->autoneg;
  339. cmd->eth_tp_mdix_ctrl = phydev->mdix;
  340. return 0;
  341. }
  342. EXPORT_SYMBOL(phy_ethtool_gset);
  343. /**
  344. * phy_mii_ioctl - generic PHY MII ioctl interface
  345. * @phydev: the phy_device struct
  346. * @ifr: &struct ifreq for socket ioctl's
  347. * @cmd: ioctl cmd to execute
  348. *
  349. * Note that this function is currently incompatible with the
  350. * PHYCONTROL layer. It changes registers without regard to
  351. * current state. Use at own risk.
  352. */
  353. int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
  354. {
  355. struct mii_ioctl_data *mii_data = if_mii(ifr);
  356. u16 val = mii_data->val_in;
  357. bool change_autoneg = false;
  358. switch (cmd) {
  359. case SIOCGMIIPHY:
  360. mii_data->phy_id = phydev->addr;
  361. /* fall through */
  362. case SIOCGMIIREG:
  363. mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
  364. mii_data->reg_num);
  365. return 0;
  366. case SIOCSMIIREG:
  367. if (mii_data->phy_id == phydev->addr) {
  368. switch (mii_data->reg_num) {
  369. case MII_BMCR:
  370. if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
  371. if (phydev->autoneg == AUTONEG_ENABLE)
  372. change_autoneg = true;
  373. phydev->autoneg = AUTONEG_DISABLE;
  374. if (val & BMCR_FULLDPLX)
  375. phydev->duplex = DUPLEX_FULL;
  376. else
  377. phydev->duplex = DUPLEX_HALF;
  378. if (val & BMCR_SPEED1000)
  379. phydev->speed = SPEED_1000;
  380. else if (val & BMCR_SPEED100)
  381. phydev->speed = SPEED_100;
  382. else phydev->speed = SPEED_10;
  383. }
  384. else {
  385. if (phydev->autoneg == AUTONEG_DISABLE)
  386. change_autoneg = true;
  387. phydev->autoneg = AUTONEG_ENABLE;
  388. }
  389. break;
  390. case MII_ADVERTISE:
  391. phydev->advertising = mii_adv_to_ethtool_adv_t(val);
  392. change_autoneg = true;
  393. break;
  394. default:
  395. /* do nothing */
  396. break;
  397. }
  398. }
  399. mdiobus_write(phydev->bus, mii_data->phy_id,
  400. mii_data->reg_num, val);
  401. if (mii_data->phy_id == phydev->addr &&
  402. mii_data->reg_num == MII_BMCR &&
  403. val & BMCR_RESET)
  404. return phy_init_hw(phydev);
  405. if (change_autoneg)
  406. return phy_start_aneg(phydev);
  407. return 0;
  408. case SIOCSHWTSTAMP:
  409. if (phydev->drv->hwtstamp)
  410. return phydev->drv->hwtstamp(phydev, ifr);
  411. /* fall through */
  412. default:
  413. return -EOPNOTSUPP;
  414. }
  415. }
  416. EXPORT_SYMBOL(phy_mii_ioctl);
  417. /**
  418. * phy_start_aneg - start auto-negotiation for this PHY device
  419. * @phydev: the phy_device struct
  420. *
  421. * Description: Sanitizes the settings (if we're not autonegotiating
  422. * them), and then calls the driver's config_aneg function.
  423. * If the PHYCONTROL Layer is operating, we change the state to
  424. * reflect the beginning of Auto-negotiation or forcing.
  425. */
  426. int phy_start_aneg(struct phy_device *phydev)
  427. {
  428. int err;
  429. mutex_lock(&phydev->lock);
  430. if (AUTONEG_DISABLE == phydev->autoneg)
  431. phy_sanitize_settings(phydev);
  432. /* Invalidate LP advertising flags */
  433. phydev->lp_advertising = 0;
  434. err = phydev->drv->config_aneg(phydev);
  435. if (err < 0)
  436. goto out_unlock;
  437. if (phydev->state != PHY_HALTED) {
  438. if (AUTONEG_ENABLE == phydev->autoneg) {
  439. phydev->state = PHY_AN;
  440. phydev->link_timeout = PHY_AN_TIMEOUT;
  441. } else {
  442. phydev->state = PHY_FORCING;
  443. phydev->link_timeout = PHY_FORCE_TIMEOUT;
  444. }
  445. }
  446. out_unlock:
  447. mutex_unlock(&phydev->lock);
  448. return err;
  449. }
  450. EXPORT_SYMBOL(phy_start_aneg);
  451. /**
  452. * phy_start_machine - start PHY state machine tracking
  453. * @phydev: the phy_device struct
  454. *
  455. * Description: The PHY infrastructure can run a state machine
  456. * which tracks whether the PHY is starting up, negotiating,
  457. * etc. This function starts the timer which tracks the state
  458. * of the PHY. If you want to maintain your own state machine,
  459. * do not call this function.
  460. */
  461. void phy_start_machine(struct phy_device *phydev)
  462. {
  463. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
  464. }
  465. /**
  466. * phy_stop_machine - stop the PHY state machine tracking
  467. * @phydev: target phy_device struct
  468. *
  469. * Description: Stops the state machine timer, sets the state to UP
  470. * (unless it wasn't up yet). This function must be called BEFORE
  471. * phy_detach.
  472. */
  473. void phy_stop_machine(struct phy_device *phydev)
  474. {
  475. cancel_delayed_work_sync(&phydev->state_queue);
  476. mutex_lock(&phydev->lock);
  477. if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
  478. phydev->state = PHY_UP;
  479. mutex_unlock(&phydev->lock);
  480. }
  481. /**
  482. * phy_error - enter HALTED state for this PHY device
  483. * @phydev: target phy_device struct
  484. *
  485. * Moves the PHY to the HALTED state in response to a read
  486. * or write error, and tells the controller the link is down.
  487. * Must not be called from interrupt context, or while the
  488. * phydev->lock is held.
  489. */
  490. static void phy_error(struct phy_device *phydev)
  491. {
  492. mutex_lock(&phydev->lock);
  493. phydev->state = PHY_HALTED;
  494. mutex_unlock(&phydev->lock);
  495. }
  496. /**
  497. * phy_interrupt - PHY interrupt handler
  498. * @irq: interrupt line
  499. * @phy_dat: phy_device pointer
  500. *
  501. * Description: When a PHY interrupt occurs, the handler disables
  502. * interrupts, and schedules a work task to clear the interrupt.
  503. */
  504. static irqreturn_t phy_interrupt(int irq, void *phy_dat)
  505. {
  506. struct phy_device *phydev = phy_dat;
  507. if (PHY_HALTED == phydev->state)
  508. return IRQ_NONE; /* It can't be ours. */
  509. /* The MDIO bus is not allowed to be written in interrupt
  510. * context, so we need to disable the irq here. A work
  511. * queue will write the PHY to disable and clear the
  512. * interrupt, and then reenable the irq line.
  513. */
  514. disable_irq_nosync(irq);
  515. atomic_inc(&phydev->irq_disable);
  516. queue_work(system_power_efficient_wq, &phydev->phy_queue);
  517. return IRQ_HANDLED;
  518. }
  519. /**
  520. * phy_enable_interrupts - Enable the interrupts from the PHY side
  521. * @phydev: target phy_device struct
  522. */
  523. static int phy_enable_interrupts(struct phy_device *phydev)
  524. {
  525. int err = phy_clear_interrupt(phydev);
  526. if (err < 0)
  527. return err;
  528. return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
  529. }
  530. /**
  531. * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
  532. * @phydev: target phy_device struct
  533. */
  534. static int phy_disable_interrupts(struct phy_device *phydev)
  535. {
  536. int err;
  537. /* Disable PHY interrupts */
  538. err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  539. if (err)
  540. goto phy_err;
  541. /* Clear the interrupt */
  542. err = phy_clear_interrupt(phydev);
  543. if (err)
  544. goto phy_err;
  545. return 0;
  546. phy_err:
  547. phy_error(phydev);
  548. return err;
  549. }
  550. /**
  551. * phy_start_interrupts - request and enable interrupts for a PHY device
  552. * @phydev: target phy_device struct
  553. *
  554. * Description: Request the interrupt for the given PHY.
  555. * If this fails, then we set irq to PHY_POLL.
  556. * Otherwise, we enable the interrupts in the PHY.
  557. * This should only be called with a valid IRQ number.
  558. * Returns 0 on success or < 0 on error.
  559. */
  560. int phy_start_interrupts(struct phy_device *phydev)
  561. {
  562. atomic_set(&phydev->irq_disable, 0);
  563. if (request_irq(phydev->irq, phy_interrupt,
  564. IRQF_SHARED,
  565. "phy_interrupt",
  566. phydev) < 0) {
  567. pr_warn("%s: Can't get IRQ %d (PHY)\n",
  568. phydev->bus->name, phydev->irq);
  569. phydev->irq = PHY_POLL;
  570. return 0;
  571. }
  572. return phy_enable_interrupts(phydev);
  573. }
  574. EXPORT_SYMBOL(phy_start_interrupts);
  575. /**
  576. * phy_stop_interrupts - disable interrupts from a PHY device
  577. * @phydev: target phy_device struct
  578. */
  579. int phy_stop_interrupts(struct phy_device *phydev)
  580. {
  581. int err = phy_disable_interrupts(phydev);
  582. if (err)
  583. phy_error(phydev);
  584. free_irq(phydev->irq, phydev);
  585. /* Cannot call flush_scheduled_work() here as desired because
  586. * of rtnl_lock(), but we do not really care about what would
  587. * be done, except from enable_irq(), so cancel any work
  588. * possibly pending and take care of the matter below.
  589. */
  590. cancel_work_sync(&phydev->phy_queue);
  591. /* If work indeed has been cancelled, disable_irq() will have
  592. * been left unbalanced from phy_interrupt() and enable_irq()
  593. * has to be called so that other devices on the line work.
  594. */
  595. while (atomic_dec_return(&phydev->irq_disable) >= 0)
  596. enable_irq(phydev->irq);
  597. return err;
  598. }
  599. EXPORT_SYMBOL(phy_stop_interrupts);
  600. /**
  601. * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
  602. * @work: work_struct that describes the work to be done
  603. */
  604. void phy_change(struct work_struct *work)
  605. {
  606. struct phy_device *phydev =
  607. container_of(work, struct phy_device, phy_queue);
  608. if (phydev->drv->did_interrupt &&
  609. !phydev->drv->did_interrupt(phydev))
  610. goto ignore;
  611. if (phy_disable_interrupts(phydev))
  612. goto phy_err;
  613. mutex_lock(&phydev->lock);
  614. if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
  615. phydev->state = PHY_CHANGELINK;
  616. mutex_unlock(&phydev->lock);
  617. atomic_dec(&phydev->irq_disable);
  618. enable_irq(phydev->irq);
  619. /* Reenable interrupts */
  620. if (PHY_HALTED != phydev->state &&
  621. phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
  622. goto irq_enable_err;
  623. /* reschedule state queue work to run as soon as possible */
  624. cancel_delayed_work_sync(&phydev->state_queue);
  625. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
  626. return;
  627. ignore:
  628. atomic_dec(&phydev->irq_disable);
  629. enable_irq(phydev->irq);
  630. return;
  631. irq_enable_err:
  632. disable_irq(phydev->irq);
  633. atomic_inc(&phydev->irq_disable);
  634. phy_err:
  635. phy_error(phydev);
  636. }
  637. /**
  638. * phy_stop - Bring down the PHY link, and stop checking the status
  639. * @phydev: target phy_device struct
  640. */
  641. void phy_stop(struct phy_device *phydev)
  642. {
  643. mutex_lock(&phydev->lock);
  644. if (PHY_HALTED == phydev->state)
  645. goto out_unlock;
  646. if (phy_interrupt_is_valid(phydev)) {
  647. /* Disable PHY Interrupts */
  648. phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
  649. /* Clear any pending interrupts */
  650. phy_clear_interrupt(phydev);
  651. }
  652. phydev->state = PHY_HALTED;
  653. out_unlock:
  654. mutex_unlock(&phydev->lock);
  655. /* Cannot call flush_scheduled_work() here as desired because
  656. * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
  657. * will not reenable interrupts.
  658. */
  659. }
  660. EXPORT_SYMBOL(phy_stop);
  661. /**
  662. * phy_start - start or restart a PHY device
  663. * @phydev: target phy_device struct
  664. *
  665. * Description: Indicates the attached device's readiness to
  666. * handle PHY-related work. Used during startup to start the
  667. * PHY, and after a call to phy_stop() to resume operation.
  668. * Also used to indicate the MDIO bus has cleared an error
  669. * condition.
  670. */
  671. void phy_start(struct phy_device *phydev)
  672. {
  673. bool do_resume = false;
  674. int err = 0;
  675. mutex_lock(&phydev->lock);
  676. switch (phydev->state) {
  677. case PHY_STARTING:
  678. phydev->state = PHY_PENDING;
  679. break;
  680. case PHY_READY:
  681. phydev->state = PHY_UP;
  682. break;
  683. case PHY_HALTED:
  684. /* make sure interrupts are re-enabled for the PHY */
  685. err = phy_enable_interrupts(phydev);
  686. if (err < 0)
  687. break;
  688. phydev->state = PHY_RESUMING;
  689. do_resume = true;
  690. break;
  691. default:
  692. break;
  693. }
  694. mutex_unlock(&phydev->lock);
  695. /* if phy was suspended, bring the physical link up again */
  696. if (do_resume)
  697. phy_resume(phydev);
  698. }
  699. EXPORT_SYMBOL(phy_start);
  700. /**
  701. * phy_state_machine - Handle the state machine
  702. * @work: work_struct that describes the work to be done
  703. */
  704. void phy_state_machine(struct work_struct *work)
  705. {
  706. struct delayed_work *dwork = to_delayed_work(work);
  707. struct phy_device *phydev =
  708. container_of(dwork, struct phy_device, state_queue);
  709. bool needs_aneg = false, do_suspend = false;
  710. enum phy_state old_state;
  711. int err = 0;
  712. int old_link;
  713. mutex_lock(&phydev->lock);
  714. old_state = phydev->state;
  715. if (phydev->drv->link_change_notify)
  716. phydev->drv->link_change_notify(phydev);
  717. switch (phydev->state) {
  718. case PHY_DOWN:
  719. case PHY_STARTING:
  720. case PHY_READY:
  721. case PHY_PENDING:
  722. break;
  723. case PHY_UP:
  724. needs_aneg = true;
  725. phydev->link_timeout = PHY_AN_TIMEOUT;
  726. break;
  727. case PHY_AN:
  728. err = phy_read_status(phydev);
  729. if (err < 0)
  730. break;
  731. /* If the link is down, give up on negotiation for now */
  732. if (!phydev->link) {
  733. phydev->state = PHY_NOLINK;
  734. netif_carrier_off(phydev->attached_dev);
  735. phydev->adjust_link(phydev->attached_dev);
  736. break;
  737. }
  738. /* Check if negotiation is done. Break if there's an error */
  739. err = phy_aneg_done(phydev);
  740. if (err < 0)
  741. break;
  742. /* If AN is done, we're running */
  743. if (err > 0) {
  744. phydev->state = PHY_RUNNING;
  745. netif_carrier_on(phydev->attached_dev);
  746. phydev->adjust_link(phydev->attached_dev);
  747. } else if (0 == phydev->link_timeout--)
  748. needs_aneg = true;
  749. break;
  750. case PHY_NOLINK:
  751. if (phy_interrupt_is_valid(phydev))
  752. break;
  753. err = phy_read_status(phydev);
  754. if (err)
  755. break;
  756. if (phydev->link) {
  757. if (AUTONEG_ENABLE == phydev->autoneg) {
  758. err = phy_aneg_done(phydev);
  759. if (err < 0)
  760. break;
  761. if (!err) {
  762. phydev->state = PHY_AN;
  763. phydev->link_timeout = PHY_AN_TIMEOUT;
  764. break;
  765. }
  766. }
  767. phydev->state = PHY_RUNNING;
  768. netif_carrier_on(phydev->attached_dev);
  769. phydev->adjust_link(phydev->attached_dev);
  770. }
  771. break;
  772. case PHY_FORCING:
  773. err = genphy_update_link(phydev);
  774. if (err)
  775. break;
  776. if (phydev->link) {
  777. phydev->state = PHY_RUNNING;
  778. netif_carrier_on(phydev->attached_dev);
  779. } else {
  780. if (0 == phydev->link_timeout--)
  781. needs_aneg = true;
  782. }
  783. phydev->adjust_link(phydev->attached_dev);
  784. break;
  785. case PHY_RUNNING:
  786. /* Only register a CHANGE if we are polling or ignoring
  787. * interrupts and link changed since latest checking.
  788. */
  789. if (!phy_interrupt_is_valid(phydev)) {
  790. old_link = phydev->link;
  791. err = phy_read_status(phydev);
  792. if (err)
  793. break;
  794. if (old_link != phydev->link)
  795. phydev->state = PHY_CHANGELINK;
  796. }
  797. /*
  798. * Failsafe: check that nobody set phydev->link=0 between two
  799. * poll cycles, otherwise we won't leave RUNNING state as long
  800. * as link remains down.
  801. */
  802. if (!phydev->link && phydev->state == PHY_RUNNING) {
  803. phydev->state = PHY_CHANGELINK;
  804. dev_err(&phydev->dev, "no link in PHY_RUNNING\n");
  805. }
  806. break;
  807. case PHY_CHANGELINK:
  808. err = phy_read_status(phydev);
  809. if (err)
  810. break;
  811. if (phydev->link) {
  812. phydev->state = PHY_RUNNING;
  813. netif_carrier_on(phydev->attached_dev);
  814. } else {
  815. phydev->state = PHY_NOLINK;
  816. netif_carrier_off(phydev->attached_dev);
  817. }
  818. phydev->adjust_link(phydev->attached_dev);
  819. if (phy_interrupt_is_valid(phydev))
  820. err = phy_config_interrupt(phydev,
  821. PHY_INTERRUPT_ENABLED);
  822. break;
  823. case PHY_HALTED:
  824. if (phydev->link) {
  825. phydev->link = 0;
  826. netif_carrier_off(phydev->attached_dev);
  827. phydev->adjust_link(phydev->attached_dev);
  828. do_suspend = true;
  829. }
  830. break;
  831. case PHY_RESUMING:
  832. if (AUTONEG_ENABLE == phydev->autoneg) {
  833. err = phy_aneg_done(phydev);
  834. if (err < 0)
  835. break;
  836. /* err > 0 if AN is done.
  837. * Otherwise, it's 0, and we're still waiting for AN
  838. */
  839. if (err > 0) {
  840. err = phy_read_status(phydev);
  841. if (err)
  842. break;
  843. if (phydev->link) {
  844. phydev->state = PHY_RUNNING;
  845. netif_carrier_on(phydev->attached_dev);
  846. } else {
  847. phydev->state = PHY_NOLINK;
  848. }
  849. phydev->adjust_link(phydev->attached_dev);
  850. } else {
  851. phydev->state = PHY_AN;
  852. phydev->link_timeout = PHY_AN_TIMEOUT;
  853. }
  854. } else {
  855. err = phy_read_status(phydev);
  856. if (err)
  857. break;
  858. if (phydev->link) {
  859. phydev->state = PHY_RUNNING;
  860. netif_carrier_on(phydev->attached_dev);
  861. } else {
  862. phydev->state = PHY_NOLINK;
  863. }
  864. phydev->adjust_link(phydev->attached_dev);
  865. }
  866. break;
  867. }
  868. mutex_unlock(&phydev->lock);
  869. if (needs_aneg)
  870. err = phy_start_aneg(phydev);
  871. else if (do_suspend)
  872. phy_suspend(phydev);
  873. if (err < 0)
  874. phy_error(phydev);
  875. dev_dbg(&phydev->dev, "PHY state change %s -> %s\n",
  876. phy_state_to_str(old_state), phy_state_to_str(phydev->state));
  877. queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
  878. PHY_STATE_TIME * HZ);
  879. }
  880. void phy_mac_interrupt(struct phy_device *phydev, int new_link)
  881. {
  882. cancel_work_sync(&phydev->phy_queue);
  883. phydev->link = new_link;
  884. schedule_work(&phydev->phy_queue);
  885. }
  886. EXPORT_SYMBOL(phy_mac_interrupt);
  887. static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  888. int addr)
  889. {
  890. /* Write the desired MMD Devad */
  891. bus->write(bus, addr, MII_MMD_CTRL, devad);
  892. /* Write the desired MMD register address */
  893. bus->write(bus, addr, MII_MMD_DATA, prtad);
  894. /* Select the Function : DATA with no post increment */
  895. bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
  896. }
  897. /**
  898. * phy_read_mmd_indirect - reads data from the MMD registers
  899. * @phydev: The PHY device bus
  900. * @prtad: MMD Address
  901. * @devad: MMD DEVAD
  902. * @addr: PHY address on the MII bus
  903. *
  904. * Description: it reads data from the MMD registers (clause 22 to access to
  905. * clause 45) of the specified phy address.
  906. * To read these register we have:
  907. * 1) Write reg 13 // DEVAD
  908. * 2) Write reg 14 // MMD Address
  909. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  910. * 3) Read reg 14 // Read MMD data
  911. */
  912. int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
  913. int devad, int addr)
  914. {
  915. struct phy_driver *phydrv = phydev->drv;
  916. int value = -1;
  917. if (!phydrv->read_mmd_indirect) {
  918. struct mii_bus *bus = phydev->bus;
  919. mutex_lock(&bus->mdio_lock);
  920. mmd_phy_indirect(bus, prtad, devad, addr);
  921. /* Read the content of the MMD's selected register */
  922. value = bus->read(bus, addr, MII_MMD_DATA);
  923. mutex_unlock(&bus->mdio_lock);
  924. } else {
  925. value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
  926. }
  927. return value;
  928. }
  929. EXPORT_SYMBOL(phy_read_mmd_indirect);
  930. /**
  931. * phy_write_mmd_indirect - writes data to the MMD registers
  932. * @phydev: The PHY device
  933. * @prtad: MMD Address
  934. * @devad: MMD DEVAD
  935. * @addr: PHY address on the MII bus
  936. * @data: data to write in the MMD register
  937. *
  938. * Description: Write data from the MMD registers of the specified
  939. * phy address.
  940. * To write these register we have:
  941. * 1) Write reg 13 // DEVAD
  942. * 2) Write reg 14 // MMD Address
  943. * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  944. * 3) Write reg 14 // Write MMD data
  945. */
  946. void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
  947. int devad, int addr, u32 data)
  948. {
  949. struct phy_driver *phydrv = phydev->drv;
  950. if (!phydrv->write_mmd_indirect) {
  951. struct mii_bus *bus = phydev->bus;
  952. mutex_lock(&bus->mdio_lock);
  953. mmd_phy_indirect(bus, prtad, devad, addr);
  954. /* Write the data into MMD's selected register */
  955. bus->write(bus, addr, MII_MMD_DATA, data);
  956. mutex_unlock(&bus->mdio_lock);
  957. } else {
  958. phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
  959. }
  960. }
  961. EXPORT_SYMBOL(phy_write_mmd_indirect);
  962. /**
  963. * phy_init_eee - init and check the EEE feature
  964. * @phydev: target phy_device struct
  965. * @clk_stop_enable: PHY may stop the clock during LPI
  966. *
  967. * Description: it checks if the Energy-Efficient Ethernet (EEE)
  968. * is supported by looking at the MMD registers 3.20 and 7.60/61
  969. * and it programs the MMD register 3.0 setting the "Clock stop enable"
  970. * bit if required.
  971. */
  972. int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
  973. {
  974. /* According to 802.3az,the EEE is supported only in full duplex-mode.
  975. * Also EEE feature is active when core is operating with MII, GMII
  976. * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
  977. * should return an error if they do not support EEE.
  978. */
  979. if ((phydev->duplex == DUPLEX_FULL) &&
  980. ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
  981. (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
  982. phy_interface_is_rgmii(phydev) ||
  983. phy_is_internal(phydev))) {
  984. int eee_lp, eee_cap, eee_adv;
  985. u32 lp, cap, adv;
  986. int status;
  987. /* Read phy status to properly get the right settings */
  988. status = phy_read_status(phydev);
  989. if (status)
  990. return status;
  991. /* First check if the EEE ability is supported */
  992. eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
  993. MDIO_MMD_PCS, phydev->addr);
  994. if (eee_cap <= 0)
  995. goto eee_exit_err;
  996. cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
  997. if (!cap)
  998. goto eee_exit_err;
  999. /* Check which link settings negotiated and verify it in
  1000. * the EEE advertising registers.
  1001. */
  1002. eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
  1003. MDIO_MMD_AN, phydev->addr);
  1004. if (eee_lp <= 0)
  1005. goto eee_exit_err;
  1006. eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
  1007. MDIO_MMD_AN, phydev->addr);
  1008. if (eee_adv <= 0)
  1009. goto eee_exit_err;
  1010. adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
  1011. lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
  1012. if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
  1013. goto eee_exit_err;
  1014. if (clk_stop_enable) {
  1015. /* Configure the PHY to stop receiving xMII
  1016. * clock while it is signaling LPI.
  1017. */
  1018. int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
  1019. MDIO_MMD_PCS,
  1020. phydev->addr);
  1021. if (val < 0)
  1022. return val;
  1023. val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
  1024. phy_write_mmd_indirect(phydev, MDIO_CTRL1,
  1025. MDIO_MMD_PCS, phydev->addr,
  1026. val);
  1027. }
  1028. return 0; /* EEE supported */
  1029. }
  1030. eee_exit_err:
  1031. return -EPROTONOSUPPORT;
  1032. }
  1033. EXPORT_SYMBOL(phy_init_eee);
  1034. /**
  1035. * phy_get_eee_err - report the EEE wake error count
  1036. * @phydev: target phy_device struct
  1037. *
  1038. * Description: it is to report the number of time where the PHY
  1039. * failed to complete its normal wake sequence.
  1040. */
  1041. int phy_get_eee_err(struct phy_device *phydev)
  1042. {
  1043. return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
  1044. MDIO_MMD_PCS, phydev->addr);
  1045. }
  1046. EXPORT_SYMBOL(phy_get_eee_err);
  1047. /**
  1048. * phy_ethtool_get_eee - get EEE supported and status
  1049. * @phydev: target phy_device struct
  1050. * @data: ethtool_eee data
  1051. *
  1052. * Description: it reportes the Supported/Advertisement/LP Advertisement
  1053. * capabilities.
  1054. */
  1055. int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1056. {
  1057. int val;
  1058. /* Get Supported EEE */
  1059. val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
  1060. MDIO_MMD_PCS, phydev->addr);
  1061. if (val < 0)
  1062. return val;
  1063. data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
  1064. /* Get advertisement EEE */
  1065. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
  1066. MDIO_MMD_AN, phydev->addr);
  1067. if (val < 0)
  1068. return val;
  1069. data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1070. /* Get LP advertisement EEE */
  1071. val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
  1072. MDIO_MMD_AN, phydev->addr);
  1073. if (val < 0)
  1074. return val;
  1075. data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
  1076. return 0;
  1077. }
  1078. EXPORT_SYMBOL(phy_ethtool_get_eee);
  1079. /**
  1080. * phy_ethtool_set_eee - set EEE supported and status
  1081. * @phydev: target phy_device struct
  1082. * @data: ethtool_eee data
  1083. *
  1084. * Description: it is to program the Advertisement EEE register.
  1085. */
  1086. int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
  1087. {
  1088. int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
  1089. phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
  1090. phydev->addr, val);
  1091. return 0;
  1092. }
  1093. EXPORT_SYMBOL(phy_ethtool_set_eee);
  1094. int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1095. {
  1096. if (phydev->drv->set_wol)
  1097. return phydev->drv->set_wol(phydev, wol);
  1098. return -EOPNOTSUPP;
  1099. }
  1100. EXPORT_SYMBOL(phy_ethtool_set_wol);
  1101. void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
  1102. {
  1103. if (phydev->drv->get_wol)
  1104. phydev->drv->get_wol(phydev, wol);
  1105. }
  1106. EXPORT_SYMBOL(phy_ethtool_get_wol);