virtio_pci_legacy.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Virtio PCI driver - legacy device support
  3. *
  4. * This module allows virtio devices to be used over a virtual PCI device.
  5. * This can be used with QEMU based VMMs like KVM or Xen.
  6. *
  7. * Copyright IBM Corp. 2007
  8. * Copyright Red Hat, Inc. 2014
  9. *
  10. * Authors:
  11. * Anthony Liguori <aliguori@us.ibm.com>
  12. * Rusty Russell <rusty@rustcorp.com.au>
  13. * Michael S. Tsirkin <mst@redhat.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  16. * See the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "virtio_pci_common.h"
  20. /* virtio config->get_features() implementation */
  21. static u64 vp_get_features(struct virtio_device *vdev)
  22. {
  23. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  24. /* When someone needs more than 32 feature bits, we'll need to
  25. * steal a bit to indicate that the rest are somewhere else. */
  26. return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
  27. }
  28. /* virtio config->finalize_features() implementation */
  29. static int vp_finalize_features(struct virtio_device *vdev)
  30. {
  31. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  32. /* Give virtio_ring a chance to accept features. */
  33. vring_transport_features(vdev);
  34. /* Make sure we don't have any features > 32 bits! */
  35. BUG_ON((u32)vdev->features != vdev->features);
  36. /* We only support 32 feature bits. */
  37. iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
  38. return 0;
  39. }
  40. /* virtio config->get() implementation */
  41. static void vp_get(struct virtio_device *vdev, unsigned offset,
  42. void *buf, unsigned len)
  43. {
  44. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  45. void __iomem *ioaddr = vp_dev->ioaddr +
  46. VIRTIO_PCI_CONFIG(vp_dev) + offset;
  47. u8 *ptr = buf;
  48. int i;
  49. for (i = 0; i < len; i++)
  50. ptr[i] = ioread8(ioaddr + i);
  51. }
  52. /* the config->set() implementation. it's symmetric to the config->get()
  53. * implementation */
  54. static void vp_set(struct virtio_device *vdev, unsigned offset,
  55. const void *buf, unsigned len)
  56. {
  57. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  58. void __iomem *ioaddr = vp_dev->ioaddr +
  59. VIRTIO_PCI_CONFIG(vp_dev) + offset;
  60. const u8 *ptr = buf;
  61. int i;
  62. for (i = 0; i < len; i++)
  63. iowrite8(ptr[i], ioaddr + i);
  64. }
  65. /* config->{get,set}_status() implementations */
  66. static u8 vp_get_status(struct virtio_device *vdev)
  67. {
  68. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  69. return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  70. }
  71. static void vp_set_status(struct virtio_device *vdev, u8 status)
  72. {
  73. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  74. /* We should never be setting status to 0. */
  75. BUG_ON(status == 0);
  76. iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  77. }
  78. static void vp_reset(struct virtio_device *vdev)
  79. {
  80. struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  81. /* 0 status means a reset. */
  82. iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  83. /* Flush out the status write, and flush in device writes,
  84. * including MSi-X interrupts, if any. */
  85. ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  86. /* Flush pending VQ/configuration callbacks. */
  87. vp_synchronize_vectors(vdev);
  88. }
  89. static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
  90. {
  91. /* Setup the vector used for configuration events */
  92. iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  93. /* Verify we had enough resources to assign the vector */
  94. /* Will also flush the write out to device */
  95. return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
  96. }
  97. static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
  98. struct virtio_pci_vq_info *info,
  99. unsigned index,
  100. void (*callback)(struct virtqueue *vq),
  101. const char *name,
  102. u16 msix_vec)
  103. {
  104. struct virtqueue *vq;
  105. unsigned long size;
  106. u16 num;
  107. int err;
  108. /* Select the queue we're interested in */
  109. iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  110. /* Check if queue is either not available or already active. */
  111. num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
  112. if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
  113. return ERR_PTR(-ENOENT);
  114. info->num = num;
  115. info->msix_vector = msix_vec;
  116. size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN));
  117. info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO);
  118. if (info->queue == NULL)
  119. return ERR_PTR(-ENOMEM);
  120. /* activate the queue */
  121. iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
  122. vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  123. /* create the vring */
  124. vq = vring_new_virtqueue(index, info->num,
  125. VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
  126. true, info->queue, vp_notify, callback, name);
  127. if (!vq) {
  128. err = -ENOMEM;
  129. goto out_activate_queue;
  130. }
  131. vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
  132. if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
  133. iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  134. msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  135. if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
  136. err = -EBUSY;
  137. goto out_assign;
  138. }
  139. }
  140. return vq;
  141. out_assign:
  142. vring_del_virtqueue(vq);
  143. out_activate_queue:
  144. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  145. free_pages_exact(info->queue, size);
  146. return ERR_PTR(err);
  147. }
  148. static void del_vq(struct virtio_pci_vq_info *info)
  149. {
  150. struct virtqueue *vq = info->vq;
  151. struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
  152. unsigned long size;
  153. iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
  154. if (vp_dev->msix_enabled) {
  155. iowrite16(VIRTIO_MSI_NO_VECTOR,
  156. vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
  157. /* Flush the write out to device */
  158. ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
  159. }
  160. vring_del_virtqueue(vq);
  161. /* Select and deactivate the queue */
  162. iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
  163. size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN));
  164. free_pages_exact(info->queue, size);
  165. }
  166. static const struct virtio_config_ops virtio_pci_config_ops = {
  167. .get = vp_get,
  168. .set = vp_set,
  169. .get_status = vp_get_status,
  170. .set_status = vp_set_status,
  171. .reset = vp_reset,
  172. .find_vqs = vp_find_vqs,
  173. .del_vqs = vp_del_vqs,
  174. .get_features = vp_get_features,
  175. .finalize_features = vp_finalize_features,
  176. .bus_name = vp_bus_name,
  177. .set_vq_affinity = vp_set_vq_affinity,
  178. };
  179. /* the PCI probing function */
  180. int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
  181. {
  182. struct pci_dev *pci_dev = vp_dev->pci_dev;
  183. int rc;
  184. /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
  185. if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
  186. return -ENODEV;
  187. if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
  188. printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
  189. VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
  190. return -ENODEV;
  191. }
  192. rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
  193. if (rc)
  194. return rc;
  195. rc = -ENOMEM;
  196. vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
  197. if (!vp_dev->ioaddr)
  198. goto err_iomap;
  199. vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
  200. /* we use the subsystem vendor/device id as the virtio vendor/device
  201. * id. this allows us to use the same PCI vendor/device id for all
  202. * virtio devices and to identify the particular virtio driver by
  203. * the subsystem ids */
  204. vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
  205. vp_dev->vdev.id.device = pci_dev->subsystem_device;
  206. vp_dev->vdev.config = &virtio_pci_config_ops;
  207. vp_dev->config_vector = vp_config_vector;
  208. vp_dev->setup_vq = setup_vq;
  209. vp_dev->del_vq = del_vq;
  210. return 0;
  211. err_iomap:
  212. pci_release_region(pci_dev, 0);
  213. return rc;
  214. }
  215. void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
  216. {
  217. struct pci_dev *pci_dev = vp_dev->pci_dev;
  218. pci_iounmap(pci_dev, vp_dev->ioaddr);
  219. pci_release_region(pci_dev, 0);
  220. }