notification.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. /*
  2. * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2, or (at your option)
  7. * any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; see the file COPYING. If not, write to
  16. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  17. */
  18. /*
  19. * Basic idea behind the notification queue: An fsnotify group (like inotify)
  20. * sends the userspace notification about events asynchronously some time after
  21. * the event happened. When inotify gets an event it will need to add that
  22. * event to the group notify queue. Since a single event might need to be on
  23. * multiple group's notification queues we can't add the event directly to each
  24. * queue and instead add a small "event_holder" to each queue. This event_holder
  25. * has a pointer back to the original event. Since the majority of events are
  26. * going to end up on one, and only one, notification queue we embed one
  27. * event_holder into each event. This means we have a single allocation instead
  28. * of always needing two. If the embedded event_holder is already in use by
  29. * another group a new event_holder (from fsnotify_event_holder_cachep) will be
  30. * allocated and used.
  31. */
  32. #include <linux/fs.h>
  33. #include <linux/init.h>
  34. #include <linux/kernel.h>
  35. #include <linux/list.h>
  36. #include <linux/module.h>
  37. #include <linux/mount.h>
  38. #include <linux/mutex.h>
  39. #include <linux/namei.h>
  40. #include <linux/path.h>
  41. #include <linux/slab.h>
  42. #include <linux/spinlock.h>
  43. #include <linux/atomic.h>
  44. #include <linux/fsnotify_backend.h>
  45. #include "fsnotify.h"
  46. static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
  47. /**
  48. * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
  49. * Called from fsnotify_move, which is inlined into filesystem modules.
  50. */
  51. u32 fsnotify_get_cookie(void)
  52. {
  53. return atomic_inc_return(&fsnotify_sync_cookie);
  54. }
  55. EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
  56. /* return true if the notify queue is empty, false otherwise */
  57. bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)
  58. {
  59. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  60. return list_empty(&group->notification_list) ? true : false;
  61. }
  62. void fsnotify_destroy_event(struct fsnotify_group *group,
  63. struct fsnotify_event *event)
  64. {
  65. /* Overflow events are per-group and we don't want to free them */
  66. if (!event || event->mask == FS_Q_OVERFLOW)
  67. return;
  68. /* If the event is still queued, we have a problem... */
  69. WARN_ON(!list_empty(&event->list));
  70. group->ops->free_event(event);
  71. }
  72. /*
  73. * Add an event to the group notification queue. The group can later pull this
  74. * event off the queue to deal with. The function returns 0 if the event was
  75. * added to the queue, 1 if the event was merged with some other queued event,
  76. * 2 if the event was not queued - either the queue of events has overflown
  77. * or the group is shutting down.
  78. */
  79. int fsnotify_add_event(struct fsnotify_group *group,
  80. struct fsnotify_event *event,
  81. int (*merge)(struct list_head *,
  82. struct fsnotify_event *))
  83. {
  84. int ret = 0;
  85. struct list_head *list = &group->notification_list;
  86. pr_debug("%s: group=%p event=%p\n", __func__, group, event);
  87. mutex_lock(&group->notification_mutex);
  88. if (group->shutdown) {
  89. mutex_unlock(&group->notification_mutex);
  90. return 2;
  91. }
  92. if (group->q_len >= group->max_events) {
  93. ret = 2;
  94. /* Queue overflow event only if it isn't already queued */
  95. if (!list_empty(&group->overflow_event->list)) {
  96. mutex_unlock(&group->notification_mutex);
  97. return ret;
  98. }
  99. event = group->overflow_event;
  100. goto queue;
  101. }
  102. if (!list_empty(list) && merge) {
  103. ret = merge(list, event);
  104. if (ret) {
  105. mutex_unlock(&group->notification_mutex);
  106. return ret;
  107. }
  108. }
  109. queue:
  110. group->q_len++;
  111. list_add_tail(&event->list, list);
  112. mutex_unlock(&group->notification_mutex);
  113. wake_up(&group->notification_waitq);
  114. kill_fasync(&group->fsn_fa, SIGIO, POLL_IN);
  115. return ret;
  116. }
  117. /*
  118. * Remove and return the first event from the notification list. It is the
  119. * responsibility of the caller to destroy the obtained event
  120. */
  121. struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group)
  122. {
  123. struct fsnotify_event *event;
  124. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  125. pr_debug("%s: group=%p\n", __func__, group);
  126. event = list_first_entry(&group->notification_list,
  127. struct fsnotify_event, list);
  128. /*
  129. * We need to init list head for the case of overflow event so that
  130. * check in fsnotify_add_event() works
  131. */
  132. list_del_init(&event->list);
  133. group->q_len--;
  134. return event;
  135. }
  136. /*
  137. * This will not remove the event, that must be done with
  138. * fsnotify_remove_first_event()
  139. */
  140. struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group)
  141. {
  142. BUG_ON(!mutex_is_locked(&group->notification_mutex));
  143. return list_first_entry(&group->notification_list,
  144. struct fsnotify_event, list);
  145. }
  146. /*
  147. * Called when a group is being torn down to clean up any outstanding
  148. * event notifications.
  149. */
  150. void fsnotify_flush_notify(struct fsnotify_group *group)
  151. {
  152. struct fsnotify_event *event;
  153. mutex_lock(&group->notification_mutex);
  154. while (!fsnotify_notify_queue_is_empty(group)) {
  155. event = fsnotify_remove_first_event(group);
  156. fsnotify_destroy_event(group, event);
  157. }
  158. mutex_unlock(&group->notification_mutex);
  159. }
  160. /*
  161. * fsnotify_create_event - Allocate a new event which will be sent to each
  162. * group's handle_event function if the group was interested in this
  163. * particular event.
  164. *
  165. * @inode the inode which is supposed to receive the event (sometimes a
  166. * parent of the inode to which the event happened.
  167. * @mask what actually happened.
  168. * @data pointer to the object which was actually affected
  169. * @data_type flag indication if the data is a file, path, inode, nothing...
  170. * @name the filename, if available
  171. */
  172. void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode,
  173. u32 mask)
  174. {
  175. INIT_LIST_HEAD(&event->list);
  176. event->inode = inode;
  177. event->mask = mask;
  178. }