tick-broadcast-hrtimer.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. /*
  2. * linux/kernel/time/tick-broadcast-hrtimer.c
  3. * This file emulates a local clock event device
  4. * via a pseudo clock device.
  5. */
  6. #include <linux/cpu.h>
  7. #include <linux/err.h>
  8. #include <linux/hrtimer.h>
  9. #include <linux/interrupt.h>
  10. #include <linux/percpu.h>
  11. #include <linux/profile.h>
  12. #include <linux/clockchips.h>
  13. #include <linux/sched.h>
  14. #include <linux/smp.h>
  15. #include <linux/module.h>
  16. #include "tick-internal.h"
  17. static struct hrtimer bctimer;
  18. static int bc_shutdown(struct clock_event_device *evt)
  19. {
  20. /*
  21. * Note, we cannot cancel the timer here as we might
  22. * run into the following live lock scenario:
  23. *
  24. * cpu 0 cpu1
  25. * lock(broadcast_lock);
  26. * hrtimer_interrupt()
  27. * bc_handler()
  28. * tick_handle_oneshot_broadcast();
  29. * lock(broadcast_lock);
  30. * hrtimer_cancel()
  31. * wait_for_callback()
  32. */
  33. hrtimer_try_to_cancel(&bctimer);
  34. return 0;
  35. }
  36. /*
  37. * This is called from the guts of the broadcast code when the cpu
  38. * which is about to enter idle has the earliest broadcast timer event.
  39. */
  40. static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
  41. {
  42. int bc_moved;
  43. /*
  44. * We try to cancel the timer first. If the callback is on
  45. * flight on some other cpu then we let it handle it. If we
  46. * were able to cancel the timer nothing can rearm it as we
  47. * own broadcast_lock.
  48. *
  49. * However we can also be called from the event handler of
  50. * ce_broadcast_hrtimer itself when it expires. We cannot
  51. * restart the timer because we are in the callback, but we
  52. * can set the expiry time and let the callback return
  53. * HRTIMER_RESTART.
  54. *
  55. * Since we are in the idle loop at this point and because
  56. * hrtimer_{start/cancel} functions call into tracing,
  57. * calls to these functions must be bound within RCU_NONIDLE.
  58. */
  59. RCU_NONIDLE({
  60. bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
  61. if (bc_moved)
  62. hrtimer_start(&bctimer, expires,
  63. HRTIMER_MODE_ABS_PINNED);});
  64. if (bc_moved) {
  65. /* Bind the "device" to the cpu */
  66. bc->bound_on = smp_processor_id();
  67. } else if (bc->bound_on == smp_processor_id()) {
  68. hrtimer_set_expires(&bctimer, expires);
  69. }
  70. return 0;
  71. }
  72. static struct clock_event_device ce_broadcast_hrtimer = {
  73. .set_state_shutdown = bc_shutdown,
  74. .set_next_ktime = bc_set_next,
  75. .features = CLOCK_EVT_FEAT_ONESHOT |
  76. CLOCK_EVT_FEAT_KTIME |
  77. CLOCK_EVT_FEAT_HRTIMER,
  78. .rating = 0,
  79. .bound_on = -1,
  80. .min_delta_ns = 1,
  81. .max_delta_ns = KTIME_MAX,
  82. .min_delta_ticks = 1,
  83. .max_delta_ticks = ULONG_MAX,
  84. .mult = 1,
  85. .shift = 0,
  86. .cpumask = cpu_all_mask,
  87. };
  88. static enum hrtimer_restart bc_handler(struct hrtimer *t)
  89. {
  90. ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
  91. if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
  92. if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
  93. return HRTIMER_RESTART;
  94. return HRTIMER_NORESTART;
  95. }
  96. void tick_setup_hrtimer_broadcast(void)
  97. {
  98. hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  99. bctimer.function = bc_handler;
  100. clockevents_register_device(&ce_broadcast_hrtimer);
  101. }