vmci_event.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. /*
  2. * VMware VMCI Driver
  3. *
  4. * Copyright (C) 2012 VMware, Inc. All rights reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation version 2 and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  12. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  13. * for more details.
  14. */
  15. #include <linux/vmw_vmci_defs.h>
  16. #include <linux/vmw_vmci_api.h>
  17. #include <linux/list.h>
  18. #include <linux/module.h>
  19. #include <linux/sched.h>
  20. #include <linux/slab.h>
  21. #include "vmci_driver.h"
  22. #include "vmci_event.h"
  23. #define EVENT_MAGIC 0xEABE0000
  24. #define VMCI_EVENT_MAX_ATTEMPTS 10
  25. struct vmci_subscription {
  26. u32 id;
  27. u32 event;
  28. vmci_event_cb callback;
  29. void *callback_data;
  30. struct list_head node; /* on one of subscriber lists */
  31. };
  32. static struct list_head subscriber_array[VMCI_EVENT_MAX];
  33. static DEFINE_MUTEX(subscriber_mutex);
  34. int __init vmci_event_init(void)
  35. {
  36. int i;
  37. for (i = 0; i < VMCI_EVENT_MAX; i++)
  38. INIT_LIST_HEAD(&subscriber_array[i]);
  39. return VMCI_SUCCESS;
  40. }
  41. void vmci_event_exit(void)
  42. {
  43. int e;
  44. /* We free all memory at exit. */
  45. for (e = 0; e < VMCI_EVENT_MAX; e++) {
  46. struct vmci_subscription *cur, *p2;
  47. list_for_each_entry_safe(cur, p2, &subscriber_array[e], node) {
  48. /*
  49. * We should never get here because all events
  50. * should have been unregistered before we try
  51. * to unload the driver module.
  52. */
  53. pr_warn("Unexpected free events occurring\n");
  54. list_del(&cur->node);
  55. kfree(cur);
  56. }
  57. }
  58. }
  59. /*
  60. * Find entry. Assumes subscriber_mutex is held.
  61. */
  62. static struct vmci_subscription *event_find(u32 sub_id)
  63. {
  64. int e;
  65. for (e = 0; e < VMCI_EVENT_MAX; e++) {
  66. struct vmci_subscription *cur;
  67. list_for_each_entry(cur, &subscriber_array[e], node) {
  68. if (cur->id == sub_id)
  69. return cur;
  70. }
  71. }
  72. return NULL;
  73. }
  74. /*
  75. * Actually delivers the events to the subscribers.
  76. * The callback function for each subscriber is invoked.
  77. */
  78. static void event_deliver(struct vmci_event_msg *event_msg)
  79. {
  80. struct vmci_subscription *cur;
  81. struct list_head *subscriber_list;
  82. rcu_read_lock();
  83. subscriber_list = &subscriber_array[event_msg->event_data.event];
  84. list_for_each_entry_rcu(cur, subscriber_list, node) {
  85. cur->callback(cur->id, &event_msg->event_data,
  86. cur->callback_data);
  87. }
  88. rcu_read_unlock();
  89. }
  90. /*
  91. * Dispatcher for the VMCI_EVENT_RECEIVE datagrams. Calls all
  92. * subscribers for given event.
  93. */
  94. int vmci_event_dispatch(struct vmci_datagram *msg)
  95. {
  96. struct vmci_event_msg *event_msg = (struct vmci_event_msg *)msg;
  97. if (msg->payload_size < sizeof(u32) ||
  98. msg->payload_size > sizeof(struct vmci_event_data_max))
  99. return VMCI_ERROR_INVALID_ARGS;
  100. if (!VMCI_EVENT_VALID(event_msg->event_data.event))
  101. return VMCI_ERROR_EVENT_UNKNOWN;
  102. event_deliver(event_msg);
  103. return VMCI_SUCCESS;
  104. }
  105. /*
  106. * vmci_event_subscribe() - Subscribe to a given event.
  107. * @event: The event to subscribe to.
  108. * @callback: The callback to invoke upon the event.
  109. * @callback_data: Data to pass to the callback.
  110. * @subscription_id: ID used to track subscription. Used with
  111. * vmci_event_unsubscribe()
  112. *
  113. * Subscribes to the provided event. The callback specified will be
  114. * fired from RCU critical section and therefore must not sleep.
  115. */
  116. int vmci_event_subscribe(u32 event,
  117. vmci_event_cb callback,
  118. void *callback_data,
  119. u32 *new_subscription_id)
  120. {
  121. struct vmci_subscription *sub;
  122. int attempts;
  123. int retval;
  124. bool have_new_id = false;
  125. if (!new_subscription_id) {
  126. pr_devel("%s: Invalid subscription (NULL)\n", __func__);
  127. return VMCI_ERROR_INVALID_ARGS;
  128. }
  129. if (!VMCI_EVENT_VALID(event) || !callback) {
  130. pr_devel("%s: Failed to subscribe to event (type=%d) (callback=%p) (data=%p)\n",
  131. __func__, event, callback, callback_data);
  132. return VMCI_ERROR_INVALID_ARGS;
  133. }
  134. sub = kzalloc(sizeof(*sub), GFP_KERNEL);
  135. if (!sub)
  136. return VMCI_ERROR_NO_MEM;
  137. sub->id = VMCI_EVENT_MAX;
  138. sub->event = event;
  139. sub->callback = callback;
  140. sub->callback_data = callback_data;
  141. INIT_LIST_HEAD(&sub->node);
  142. mutex_lock(&subscriber_mutex);
  143. /* Creation of a new event is always allowed. */
  144. for (attempts = 0; attempts < VMCI_EVENT_MAX_ATTEMPTS; attempts++) {
  145. static u32 subscription_id;
  146. /*
  147. * We try to get an id a couple of time before
  148. * claiming we are out of resources.
  149. */
  150. /* Test for duplicate id. */
  151. if (!event_find(++subscription_id)) {
  152. sub->id = subscription_id;
  153. have_new_id = true;
  154. break;
  155. }
  156. }
  157. if (have_new_id) {
  158. list_add_rcu(&sub->node, &subscriber_array[event]);
  159. retval = VMCI_SUCCESS;
  160. } else {
  161. retval = VMCI_ERROR_NO_RESOURCES;
  162. }
  163. mutex_unlock(&subscriber_mutex);
  164. *new_subscription_id = sub->id;
  165. return retval;
  166. }
  167. EXPORT_SYMBOL_GPL(vmci_event_subscribe);
  168. /*
  169. * vmci_event_unsubscribe() - unsubscribe from an event.
  170. * @sub_id: A subscription ID as provided by vmci_event_subscribe()
  171. *
  172. * Unsubscribe from given event. Removes it from list and frees it.
  173. * Will return callback_data if requested by caller.
  174. */
  175. int vmci_event_unsubscribe(u32 sub_id)
  176. {
  177. struct vmci_subscription *s;
  178. mutex_lock(&subscriber_mutex);
  179. s = event_find(sub_id);
  180. if (s)
  181. list_del_rcu(&s->node);
  182. mutex_unlock(&subscriber_mutex);
  183. if (!s)
  184. return VMCI_ERROR_NOT_FOUND;
  185. synchronize_rcu();
  186. kfree(s);
  187. return VMCI_SUCCESS;
  188. }
  189. EXPORT_SYMBOL_GPL(vmci_event_unsubscribe);