link_watch.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Linux network device link state notification
  3. *
  4. * Author:
  5. * Stefan Rompf <sux@loplof.de>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. */
  13. #include <linux/module.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/if.h>
  16. #include <net/sock.h>
  17. #include <net/pkt_sched.h>
  18. #include <linux/rtnetlink.h>
  19. #include <linux/jiffies.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/bitops.h>
  23. #include <linux/types.h>
  24. enum lw_bits {
  25. LW_URGENT = 0,
  26. };
  27. static unsigned long linkwatch_flags;
  28. static unsigned long linkwatch_nextevent;
  29. static void linkwatch_event(struct work_struct *dummy);
  30. static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event);
  31. static LIST_HEAD(lweventlist);
  32. static DEFINE_SPINLOCK(lweventlist_lock);
  33. static unsigned char default_operstate(const struct net_device *dev)
  34. {
  35. if (!netif_carrier_ok(dev))
  36. return (dev->ifindex != dev_get_iflink(dev) ?
  37. IF_OPER_LOWERLAYERDOWN : IF_OPER_DOWN);
  38. if (netif_dormant(dev))
  39. return IF_OPER_DORMANT;
  40. return IF_OPER_UP;
  41. }
  42. static void rfc2863_policy(struct net_device *dev)
  43. {
  44. unsigned char operstate = default_operstate(dev);
  45. if (operstate == dev->operstate)
  46. return;
  47. write_lock_bh(&dev_base_lock);
  48. switch(dev->link_mode) {
  49. case IF_LINK_MODE_DORMANT:
  50. if (operstate == IF_OPER_UP)
  51. operstate = IF_OPER_DORMANT;
  52. break;
  53. case IF_LINK_MODE_DEFAULT:
  54. default:
  55. break;
  56. }
  57. dev->operstate = operstate;
  58. write_unlock_bh(&dev_base_lock);
  59. }
  60. void linkwatch_init_dev(struct net_device *dev)
  61. {
  62. /* Handle pre-registration link state changes */
  63. if (!netif_carrier_ok(dev) || netif_dormant(dev))
  64. rfc2863_policy(dev);
  65. }
  66. static bool linkwatch_urgent_event(struct net_device *dev)
  67. {
  68. if (!netif_running(dev))
  69. return false;
  70. if (dev->ifindex != dev_get_iflink(dev))
  71. return true;
  72. if (dev->priv_flags & IFF_TEAM_PORT)
  73. return true;
  74. return netif_carrier_ok(dev) && qdisc_tx_changing(dev);
  75. }
  76. static void linkwatch_add_event(struct net_device *dev)
  77. {
  78. unsigned long flags;
  79. spin_lock_irqsave(&lweventlist_lock, flags);
  80. if (list_empty(&dev->link_watch_list)) {
  81. list_add_tail(&dev->link_watch_list, &lweventlist);
  82. dev_hold(dev);
  83. }
  84. spin_unlock_irqrestore(&lweventlist_lock, flags);
  85. }
  86. static void linkwatch_schedule_work(int urgent)
  87. {
  88. unsigned long delay = linkwatch_nextevent - jiffies;
  89. if (test_bit(LW_URGENT, &linkwatch_flags))
  90. return;
  91. /* Minimise down-time: drop delay for up event. */
  92. if (urgent) {
  93. if (test_and_set_bit(LW_URGENT, &linkwatch_flags))
  94. return;
  95. delay = 0;
  96. }
  97. /* If we wrap around we'll delay it by at most HZ. */
  98. if (delay > HZ)
  99. delay = 0;
  100. /*
  101. * If urgent, schedule immediate execution; otherwise, don't
  102. * override the existing timer.
  103. */
  104. if (test_bit(LW_URGENT, &linkwatch_flags))
  105. mod_delayed_work(system_wq, &linkwatch_work, 0);
  106. else
  107. schedule_delayed_work(&linkwatch_work, delay);
  108. }
  109. static void linkwatch_do_dev(struct net_device *dev)
  110. {
  111. /*
  112. * Make sure the above read is complete since it can be
  113. * rewritten as soon as we clear the bit below.
  114. */
  115. smp_mb__before_atomic();
  116. /* We are about to handle this device,
  117. * so new events can be accepted
  118. */
  119. clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state);
  120. rfc2863_policy(dev);
  121. if (dev->flags & IFF_UP) {
  122. if (netif_carrier_ok(dev))
  123. dev_activate(dev);
  124. else
  125. dev_deactivate(dev);
  126. netdev_state_change(dev);
  127. }
  128. dev_put(dev);
  129. }
  130. static void __linkwatch_run_queue(int urgent_only)
  131. {
  132. struct net_device *dev;
  133. LIST_HEAD(wrk);
  134. /*
  135. * Limit the number of linkwatch events to one
  136. * per second so that a runaway driver does not
  137. * cause a storm of messages on the netlink
  138. * socket. This limit does not apply to up events
  139. * while the device qdisc is down.
  140. */
  141. if (!urgent_only)
  142. linkwatch_nextevent = jiffies + HZ;
  143. /* Limit wrap-around effect on delay. */
  144. else if (time_after(linkwatch_nextevent, jiffies + HZ))
  145. linkwatch_nextevent = jiffies;
  146. clear_bit(LW_URGENT, &linkwatch_flags);
  147. spin_lock_irq(&lweventlist_lock);
  148. list_splice_init(&lweventlist, &wrk);
  149. while (!list_empty(&wrk)) {
  150. dev = list_first_entry(&wrk, struct net_device, link_watch_list);
  151. list_del_init(&dev->link_watch_list);
  152. if (urgent_only && !linkwatch_urgent_event(dev)) {
  153. list_add_tail(&dev->link_watch_list, &lweventlist);
  154. continue;
  155. }
  156. spin_unlock_irq(&lweventlist_lock);
  157. linkwatch_do_dev(dev);
  158. spin_lock_irq(&lweventlist_lock);
  159. }
  160. if (!list_empty(&lweventlist))
  161. linkwatch_schedule_work(0);
  162. spin_unlock_irq(&lweventlist_lock);
  163. }
  164. void linkwatch_forget_dev(struct net_device *dev)
  165. {
  166. unsigned long flags;
  167. int clean = 0;
  168. spin_lock_irqsave(&lweventlist_lock, flags);
  169. if (!list_empty(&dev->link_watch_list)) {
  170. list_del_init(&dev->link_watch_list);
  171. clean = 1;
  172. }
  173. spin_unlock_irqrestore(&lweventlist_lock, flags);
  174. if (clean)
  175. linkwatch_do_dev(dev);
  176. }
  177. /* Must be called with the rtnl semaphore held */
  178. void linkwatch_run_queue(void)
  179. {
  180. __linkwatch_run_queue(0);
  181. }
  182. static void linkwatch_event(struct work_struct *dummy)
  183. {
  184. rtnl_lock();
  185. __linkwatch_run_queue(time_after(linkwatch_nextevent, jiffies));
  186. rtnl_unlock();
  187. }
  188. void linkwatch_fire_event(struct net_device *dev)
  189. {
  190. bool urgent = linkwatch_urgent_event(dev);
  191. if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) {
  192. linkwatch_add_event(dev);
  193. } else if (!urgent)
  194. return;
  195. linkwatch_schedule_work(urgent);
  196. }
  197. EXPORT_SYMBOL(linkwatch_fire_event);