vringh.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. /*
  2. * Linux host-side vring helpers; for when the kernel needs to access
  3. * someone else's vring.
  4. *
  5. * Copyright IBM Corporation, 2013.
  6. * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  21. *
  22. * Written by: Rusty Russell <rusty@rustcorp.com.au>
  23. */
  24. #ifndef _LINUX_VRINGH_H
  25. #define _LINUX_VRINGH_H
  26. #include <uapi/linux/virtio_ring.h>
  27. #include <linux/virtio_byteorder.h>
  28. #include <linux/uio.h>
  29. #include <linux/slab.h>
  30. #include <asm/barrier.h>
  31. /* virtio_ring with information needed for host access. */
  32. struct vringh {
  33. /* Everything is little endian */
  34. bool little_endian;
  35. /* Guest publishes used event idx (note: we always do). */
  36. bool event_indices;
  37. /* Can we get away with weak barriers? */
  38. bool weak_barriers;
  39. /* Last available index we saw (ie. where we're up to). */
  40. u16 last_avail_idx;
  41. /* Last index we used. */
  42. u16 last_used_idx;
  43. /* How many descriptors we've completed since last need_notify(). */
  44. u32 completed;
  45. /* The vring (note: it may contain user pointers!) */
  46. struct vring vring;
  47. /* The function to call to notify the guest about added buffers */
  48. void (*notify)(struct vringh *);
  49. };
  50. /**
  51. * struct vringh_config_ops - ops for creating a host vring from a virtio driver
  52. * @find_vrhs: find the host vrings and instantiate them
  53. * vdev: the virtio_device
  54. * nhvrs: the number of host vrings to find
  55. * hvrs: on success, includes new host vrings
  56. * callbacks: array of driver callbacks, for each host vring
  57. * include a NULL entry for vqs that do not need a callback
  58. * Returns 0 on success or error status
  59. * @del_vrhs: free the host vrings found by find_vrhs().
  60. */
  61. struct virtio_device;
  62. typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
  63. struct vringh_config_ops {
  64. int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
  65. struct vringh *vrhs[], vrh_callback_t *callbacks[]);
  66. void (*del_vrhs)(struct virtio_device *vdev);
  67. };
  68. /* The memory the vring can access, and what offset to apply. */
  69. struct vringh_range {
  70. u64 start, end_incl;
  71. u64 offset;
  72. };
  73. /**
  74. * struct vringh_iov - iovec mangler.
  75. *
  76. * Mangles iovec in place, and restores it.
  77. * Remaining data is iov + i, of used - i elements.
  78. */
  79. struct vringh_iov {
  80. struct iovec *iov;
  81. size_t consumed; /* Within iov[i] */
  82. unsigned i, used, max_num;
  83. };
  84. /**
  85. * struct vringh_iov - kvec mangler.
  86. *
  87. * Mangles kvec in place, and restores it.
  88. * Remaining data is iov + i, of used - i elements.
  89. */
  90. struct vringh_kiov {
  91. struct kvec *iov;
  92. size_t consumed; /* Within iov[i] */
  93. unsigned i, used, max_num;
  94. };
  95. /* Flag on max_num to indicate we're kmalloced. */
  96. #define VRINGH_IOV_ALLOCATED 0x8000000
  97. /* Helpers for userspace vrings. */
  98. int vringh_init_user(struct vringh *vrh, u64 features,
  99. unsigned int num, bool weak_barriers,
  100. struct vring_desc __user *desc,
  101. struct vring_avail __user *avail,
  102. struct vring_used __user *used);
  103. static inline void vringh_iov_init(struct vringh_iov *iov,
  104. struct iovec *iovec, unsigned num)
  105. {
  106. iov->used = iov->i = 0;
  107. iov->consumed = 0;
  108. iov->max_num = num;
  109. iov->iov = iovec;
  110. }
  111. static inline void vringh_iov_reset(struct vringh_iov *iov)
  112. {
  113. iov->iov[iov->i].iov_len += iov->consumed;
  114. iov->iov[iov->i].iov_base -= iov->consumed;
  115. iov->consumed = 0;
  116. iov->i = 0;
  117. }
  118. static inline void vringh_iov_cleanup(struct vringh_iov *iov)
  119. {
  120. if (iov->max_num & VRINGH_IOV_ALLOCATED)
  121. kfree(iov->iov);
  122. iov->max_num = iov->used = iov->i = iov->consumed = 0;
  123. iov->iov = NULL;
  124. }
  125. /* Convert a descriptor into iovecs. */
  126. int vringh_getdesc_user(struct vringh *vrh,
  127. struct vringh_iov *riov,
  128. struct vringh_iov *wiov,
  129. bool (*getrange)(struct vringh *vrh,
  130. u64 addr, struct vringh_range *r),
  131. u16 *head);
  132. /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
  133. ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
  134. /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
  135. ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
  136. const void *src, size_t len);
  137. /* Mark a descriptor as used. */
  138. int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
  139. int vringh_complete_multi_user(struct vringh *vrh,
  140. const struct vring_used_elem used[],
  141. unsigned num_used);
  142. /* Pretend we've never seen descriptor (for easy error handling). */
  143. void vringh_abandon_user(struct vringh *vrh, unsigned int num);
  144. /* Do we need to fire the eventfd to notify the other side? */
  145. int vringh_need_notify_user(struct vringh *vrh);
  146. bool vringh_notify_enable_user(struct vringh *vrh);
  147. void vringh_notify_disable_user(struct vringh *vrh);
  148. /* Helpers for kernelspace vrings. */
  149. int vringh_init_kern(struct vringh *vrh, u64 features,
  150. unsigned int num, bool weak_barriers,
  151. struct vring_desc *desc,
  152. struct vring_avail *avail,
  153. struct vring_used *used);
  154. static inline void vringh_kiov_init(struct vringh_kiov *kiov,
  155. struct kvec *kvec, unsigned num)
  156. {
  157. kiov->used = kiov->i = 0;
  158. kiov->consumed = 0;
  159. kiov->max_num = num;
  160. kiov->iov = kvec;
  161. }
  162. static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
  163. {
  164. kiov->iov[kiov->i].iov_len += kiov->consumed;
  165. kiov->iov[kiov->i].iov_base -= kiov->consumed;
  166. kiov->consumed = 0;
  167. kiov->i = 0;
  168. }
  169. static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
  170. {
  171. if (kiov->max_num & VRINGH_IOV_ALLOCATED)
  172. kfree(kiov->iov);
  173. kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
  174. kiov->iov = NULL;
  175. }
  176. int vringh_getdesc_kern(struct vringh *vrh,
  177. struct vringh_kiov *riov,
  178. struct vringh_kiov *wiov,
  179. u16 *head,
  180. gfp_t gfp);
  181. ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
  182. ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
  183. const void *src, size_t len);
  184. void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
  185. int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
  186. bool vringh_notify_enable_kern(struct vringh *vrh);
  187. void vringh_notify_disable_kern(struct vringh *vrh);
  188. int vringh_need_notify_kern(struct vringh *vrh);
  189. /* Notify the guest about buffers added to the used ring */
  190. static inline void vringh_notify(struct vringh *vrh)
  191. {
  192. if (vrh->notify)
  193. vrh->notify(vrh);
  194. }
  195. static inline bool vringh_is_little_endian(const struct vringh *vrh)
  196. {
  197. return vrh->little_endian ||
  198. virtio_legacy_is_little_endian();
  199. }
  200. static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
  201. {
  202. return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
  203. }
  204. static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
  205. {
  206. return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
  207. }
  208. static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
  209. {
  210. return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
  211. }
  212. static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
  213. {
  214. return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
  215. }
  216. static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
  217. {
  218. return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
  219. }
  220. static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
  221. {
  222. return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
  223. }
  224. #endif /* _LINUX_VRINGH_H */