vfio.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. /*
  2. * VFIO API definition
  3. *
  4. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  5. * Author: Alex Williamson <alex.williamson@redhat.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef _UAPIVFIO_H
  12. #define _UAPIVFIO_H
  13. #include <linux/types.h>
  14. #include <linux/ioctl.h>
  15. #define VFIO_API_VERSION 0
  16. /* Kernel & User level defines for VFIO IOCTLs. */
  17. /* Extensions */
  18. #define VFIO_TYPE1_IOMMU 1
  19. #define VFIO_SPAPR_TCE_IOMMU 2
  20. #define VFIO_TYPE1v2_IOMMU 3
  21. /*
  22. * IOMMU enforces DMA cache coherence (ex. PCIe NoSnoop stripping). This
  23. * capability is subject to change as groups are added or removed.
  24. */
  25. #define VFIO_DMA_CC_IOMMU 4
  26. /* Check if EEH is supported */
  27. #define VFIO_EEH 5
  28. /* Two-stage IOMMU */
  29. #define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */
  30. #define VFIO_SPAPR_TCE_v2_IOMMU 7
  31. /*
  32. * The IOCTL interface is designed for extensibility by embedding the
  33. * structure length (argsz) and flags into structures passed between
  34. * kernel and userspace. We therefore use the _IO() macro for these
  35. * defines to avoid implicitly embedding a size into the ioctl request.
  36. * As structure fields are added, argsz will increase to match and flag
  37. * bits will be defined to indicate additional fields with valid data.
  38. * It's *always* the caller's responsibility to indicate the size of
  39. * the structure passed by setting argsz appropriately.
  40. */
  41. #define VFIO_TYPE (';')
  42. #define VFIO_BASE 100
  43. /* -------- IOCTLs for VFIO file descriptor (/dev/vfio/vfio) -------- */
  44. /**
  45. * VFIO_GET_API_VERSION - _IO(VFIO_TYPE, VFIO_BASE + 0)
  46. *
  47. * Report the version of the VFIO API. This allows us to bump the entire
  48. * API version should we later need to add or change features in incompatible
  49. * ways.
  50. * Return: VFIO_API_VERSION
  51. * Availability: Always
  52. */
  53. #define VFIO_GET_API_VERSION _IO(VFIO_TYPE, VFIO_BASE + 0)
  54. /**
  55. * VFIO_CHECK_EXTENSION - _IOW(VFIO_TYPE, VFIO_BASE + 1, __u32)
  56. *
  57. * Check whether an extension is supported.
  58. * Return: 0 if not supported, 1 (or some other positive integer) if supported.
  59. * Availability: Always
  60. */
  61. #define VFIO_CHECK_EXTENSION _IO(VFIO_TYPE, VFIO_BASE + 1)
  62. /**
  63. * VFIO_SET_IOMMU - _IOW(VFIO_TYPE, VFIO_BASE + 2, __s32)
  64. *
  65. * Set the iommu to the given type. The type must be supported by an
  66. * iommu driver as verified by calling CHECK_EXTENSION using the same
  67. * type. A group must be set to this file descriptor before this
  68. * ioctl is available. The IOMMU interfaces enabled by this call are
  69. * specific to the value set.
  70. * Return: 0 on success, -errno on failure
  71. * Availability: When VFIO group attached
  72. */
  73. #define VFIO_SET_IOMMU _IO(VFIO_TYPE, VFIO_BASE + 2)
  74. /* -------- IOCTLs for GROUP file descriptors (/dev/vfio/$GROUP) -------- */
  75. /**
  76. * VFIO_GROUP_GET_STATUS - _IOR(VFIO_TYPE, VFIO_BASE + 3,
  77. * struct vfio_group_status)
  78. *
  79. * Retrieve information about the group. Fills in provided
  80. * struct vfio_group_info. Caller sets argsz.
  81. * Return: 0 on succes, -errno on failure.
  82. * Availability: Always
  83. */
  84. struct vfio_group_status {
  85. __u32 argsz;
  86. __u32 flags;
  87. #define VFIO_GROUP_FLAGS_VIABLE (1 << 0)
  88. #define VFIO_GROUP_FLAGS_CONTAINER_SET (1 << 1)
  89. };
  90. #define VFIO_GROUP_GET_STATUS _IO(VFIO_TYPE, VFIO_BASE + 3)
  91. /**
  92. * VFIO_GROUP_SET_CONTAINER - _IOW(VFIO_TYPE, VFIO_BASE + 4, __s32)
  93. *
  94. * Set the container for the VFIO group to the open VFIO file
  95. * descriptor provided. Groups may only belong to a single
  96. * container. Containers may, at their discretion, support multiple
  97. * groups. Only when a container is set are all of the interfaces
  98. * of the VFIO file descriptor and the VFIO group file descriptor
  99. * available to the user.
  100. * Return: 0 on success, -errno on failure.
  101. * Availability: Always
  102. */
  103. #define VFIO_GROUP_SET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 4)
  104. /**
  105. * VFIO_GROUP_UNSET_CONTAINER - _IO(VFIO_TYPE, VFIO_BASE + 5)
  106. *
  107. * Remove the group from the attached container. This is the
  108. * opposite of the SET_CONTAINER call and returns the group to
  109. * an initial state. All device file descriptors must be released
  110. * prior to calling this interface. When removing the last group
  111. * from a container, the IOMMU will be disabled and all state lost,
  112. * effectively also returning the VFIO file descriptor to an initial
  113. * state.
  114. * Return: 0 on success, -errno on failure.
  115. * Availability: When attached to container
  116. */
  117. #define VFIO_GROUP_UNSET_CONTAINER _IO(VFIO_TYPE, VFIO_BASE + 5)
  118. /**
  119. * VFIO_GROUP_GET_DEVICE_FD - _IOW(VFIO_TYPE, VFIO_BASE + 6, char)
  120. *
  121. * Return a new file descriptor for the device object described by
  122. * the provided string. The string should match a device listed in
  123. * the devices subdirectory of the IOMMU group sysfs entry. The
  124. * group containing the device must already be added to this context.
  125. * Return: new file descriptor on success, -errno on failure.
  126. * Availability: When attached to container
  127. */
  128. #define VFIO_GROUP_GET_DEVICE_FD _IO(VFIO_TYPE, VFIO_BASE + 6)
  129. /* --------------- IOCTLs for DEVICE file descriptors --------------- */
  130. /**
  131. * VFIO_DEVICE_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 7,
  132. * struct vfio_device_info)
  133. *
  134. * Retrieve information about the device. Fills in provided
  135. * struct vfio_device_info. Caller sets argsz.
  136. * Return: 0 on success, -errno on failure.
  137. */
  138. struct vfio_device_info {
  139. __u32 argsz;
  140. __u32 flags;
  141. #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */
  142. #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
  143. #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
  144. #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
  145. __u32 num_regions; /* Max region index + 1 */
  146. __u32 num_irqs; /* Max IRQ index + 1 */
  147. };
  148. #define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
  149. /**
  150. * VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
  151. * struct vfio_region_info)
  152. *
  153. * Retrieve information about a device region. Caller provides
  154. * struct vfio_region_info with index value set. Caller sets argsz.
  155. * Implementation of region mapping is bus driver specific. This is
  156. * intended to describe MMIO, I/O port, as well as bus specific
  157. * regions (ex. PCI config space). Zero sized regions may be used
  158. * to describe unimplemented regions (ex. unimplemented PCI BARs).
  159. * Return: 0 on success, -errno on failure.
  160. */
  161. struct vfio_region_info {
  162. __u32 argsz;
  163. __u32 flags;
  164. #define VFIO_REGION_INFO_FLAG_READ (1 << 0) /* Region supports read */
  165. #define VFIO_REGION_INFO_FLAG_WRITE (1 << 1) /* Region supports write */
  166. #define VFIO_REGION_INFO_FLAG_MMAP (1 << 2) /* Region supports mmap */
  167. __u32 index; /* Region index */
  168. __u32 resv; /* Reserved for alignment */
  169. __u64 size; /* Region size (bytes) */
  170. __u64 offset; /* Region offset from start of device fd */
  171. };
  172. #define VFIO_DEVICE_GET_REGION_INFO _IO(VFIO_TYPE, VFIO_BASE + 8)
  173. /**
  174. * VFIO_DEVICE_GET_IRQ_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 9,
  175. * struct vfio_irq_info)
  176. *
  177. * Retrieve information about a device IRQ. Caller provides
  178. * struct vfio_irq_info with index value set. Caller sets argsz.
  179. * Implementation of IRQ mapping is bus driver specific. Indexes
  180. * using multiple IRQs are primarily intended to support MSI-like
  181. * interrupt blocks. Zero count irq blocks may be used to describe
  182. * unimplemented interrupt types.
  183. *
  184. * The EVENTFD flag indicates the interrupt index supports eventfd based
  185. * signaling.
  186. *
  187. * The MASKABLE flags indicates the index supports MASK and UNMASK
  188. * actions described below.
  189. *
  190. * AUTOMASKED indicates that after signaling, the interrupt line is
  191. * automatically masked by VFIO and the user needs to unmask the line
  192. * to receive new interrupts. This is primarily intended to distinguish
  193. * level triggered interrupts.
  194. *
  195. * The NORESIZE flag indicates that the interrupt lines within the index
  196. * are setup as a set and new subindexes cannot be enabled without first
  197. * disabling the entire index. This is used for interrupts like PCI MSI
  198. * and MSI-X where the driver may only use a subset of the available
  199. * indexes, but VFIO needs to enable a specific number of vectors
  200. * upfront. In the case of MSI-X, where the user can enable MSI-X and
  201. * then add and unmask vectors, it's up to userspace to make the decision
  202. * whether to allocate the maximum supported number of vectors or tear
  203. * down setup and incrementally increase the vectors as each is enabled.
  204. */
  205. struct vfio_irq_info {
  206. __u32 argsz;
  207. __u32 flags;
  208. #define VFIO_IRQ_INFO_EVENTFD (1 << 0)
  209. #define VFIO_IRQ_INFO_MASKABLE (1 << 1)
  210. #define VFIO_IRQ_INFO_AUTOMASKED (1 << 2)
  211. #define VFIO_IRQ_INFO_NORESIZE (1 << 3)
  212. __u32 index; /* IRQ index */
  213. __u32 count; /* Number of IRQs within this index */
  214. };
  215. #define VFIO_DEVICE_GET_IRQ_INFO _IO(VFIO_TYPE, VFIO_BASE + 9)
  216. /**
  217. * VFIO_DEVICE_SET_IRQS - _IOW(VFIO_TYPE, VFIO_BASE + 10, struct vfio_irq_set)
  218. *
  219. * Set signaling, masking, and unmasking of interrupts. Caller provides
  220. * struct vfio_irq_set with all fields set. 'start' and 'count' indicate
  221. * the range of subindexes being specified.
  222. *
  223. * The DATA flags specify the type of data provided. If DATA_NONE, the
  224. * operation performs the specified action immediately on the specified
  225. * interrupt(s). For example, to unmask AUTOMASKED interrupt [0,0]:
  226. * flags = (DATA_NONE|ACTION_UNMASK), index = 0, start = 0, count = 1.
  227. *
  228. * DATA_BOOL allows sparse support for the same on arrays of interrupts.
  229. * For example, to mask interrupts [0,1] and [0,3] (but not [0,2]):
  230. * flags = (DATA_BOOL|ACTION_MASK), index = 0, start = 1, count = 3,
  231. * data = {1,0,1}
  232. *
  233. * DATA_EVENTFD binds the specified ACTION to the provided __s32 eventfd.
  234. * A value of -1 can be used to either de-assign interrupts if already
  235. * assigned or skip un-assigned interrupts. For example, to set an eventfd
  236. * to be trigger for interrupts [0,0] and [0,2]:
  237. * flags = (DATA_EVENTFD|ACTION_TRIGGER), index = 0, start = 0, count = 3,
  238. * data = {fd1, -1, fd2}
  239. * If index [0,1] is previously set, two count = 1 ioctls calls would be
  240. * required to set [0,0] and [0,2] without changing [0,1].
  241. *
  242. * Once a signaling mechanism is set, DATA_BOOL or DATA_NONE can be used
  243. * with ACTION_TRIGGER to perform kernel level interrupt loopback testing
  244. * from userspace (ie. simulate hardware triggering).
  245. *
  246. * Setting of an event triggering mechanism to userspace for ACTION_TRIGGER
  247. * enables the interrupt index for the device. Individual subindex interrupts
  248. * can be disabled using the -1 value for DATA_EVENTFD or the index can be
  249. * disabled as a whole with: flags = (DATA_NONE|ACTION_TRIGGER), count = 0.
  250. *
  251. * Note that ACTION_[UN]MASK specify user->kernel signaling (irqfds) while
  252. * ACTION_TRIGGER specifies kernel->user signaling.
  253. */
  254. struct vfio_irq_set {
  255. __u32 argsz;
  256. __u32 flags;
  257. #define VFIO_IRQ_SET_DATA_NONE (1 << 0) /* Data not present */
  258. #define VFIO_IRQ_SET_DATA_BOOL (1 << 1) /* Data is bool (u8) */
  259. #define VFIO_IRQ_SET_DATA_EVENTFD (1 << 2) /* Data is eventfd (s32) */
  260. #define VFIO_IRQ_SET_ACTION_MASK (1 << 3) /* Mask interrupt */
  261. #define VFIO_IRQ_SET_ACTION_UNMASK (1 << 4) /* Unmask interrupt */
  262. #define VFIO_IRQ_SET_ACTION_TRIGGER (1 << 5) /* Trigger interrupt */
  263. __u32 index;
  264. __u32 start;
  265. __u32 count;
  266. __u8 data[];
  267. };
  268. #define VFIO_DEVICE_SET_IRQS _IO(VFIO_TYPE, VFIO_BASE + 10)
  269. #define VFIO_IRQ_SET_DATA_TYPE_MASK (VFIO_IRQ_SET_DATA_NONE | \
  270. VFIO_IRQ_SET_DATA_BOOL | \
  271. VFIO_IRQ_SET_DATA_EVENTFD)
  272. #define VFIO_IRQ_SET_ACTION_TYPE_MASK (VFIO_IRQ_SET_ACTION_MASK | \
  273. VFIO_IRQ_SET_ACTION_UNMASK | \
  274. VFIO_IRQ_SET_ACTION_TRIGGER)
  275. /**
  276. * VFIO_DEVICE_RESET - _IO(VFIO_TYPE, VFIO_BASE + 11)
  277. *
  278. * Reset a device.
  279. */
  280. #define VFIO_DEVICE_RESET _IO(VFIO_TYPE, VFIO_BASE + 11)
  281. /*
  282. * The VFIO-PCI bus driver makes use of the following fixed region and
  283. * IRQ index mapping. Unimplemented regions return a size of zero.
  284. * Unimplemented IRQ types return a count of zero.
  285. */
  286. enum {
  287. VFIO_PCI_BAR0_REGION_INDEX,
  288. VFIO_PCI_BAR1_REGION_INDEX,
  289. VFIO_PCI_BAR2_REGION_INDEX,
  290. VFIO_PCI_BAR3_REGION_INDEX,
  291. VFIO_PCI_BAR4_REGION_INDEX,
  292. VFIO_PCI_BAR5_REGION_INDEX,
  293. VFIO_PCI_ROM_REGION_INDEX,
  294. VFIO_PCI_CONFIG_REGION_INDEX,
  295. /*
  296. * Expose VGA regions defined for PCI base class 03, subclass 00.
  297. * This includes I/O port ranges 0x3b0 to 0x3bb and 0x3c0 to 0x3df
  298. * as well as the MMIO range 0xa0000 to 0xbffff. Each implemented
  299. * range is found at it's identity mapped offset from the region
  300. * offset, for example 0x3b0 is region_info.offset + 0x3b0. Areas
  301. * between described ranges are unimplemented.
  302. */
  303. VFIO_PCI_VGA_REGION_INDEX,
  304. VFIO_PCI_NUM_REGIONS
  305. };
  306. enum {
  307. VFIO_PCI_INTX_IRQ_INDEX,
  308. VFIO_PCI_MSI_IRQ_INDEX,
  309. VFIO_PCI_MSIX_IRQ_INDEX,
  310. VFIO_PCI_ERR_IRQ_INDEX,
  311. VFIO_PCI_REQ_IRQ_INDEX,
  312. VFIO_PCI_NUM_IRQS
  313. };
  314. /**
  315. * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
  316. * struct vfio_pci_hot_reset_info)
  317. *
  318. * Return: 0 on success, -errno on failure:
  319. * -enospc = insufficient buffer, -enodev = unsupported for device.
  320. */
  321. struct vfio_pci_dependent_device {
  322. __u32 group_id;
  323. __u16 segment;
  324. __u8 bus;
  325. __u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
  326. };
  327. struct vfio_pci_hot_reset_info {
  328. __u32 argsz;
  329. __u32 flags;
  330. __u32 count;
  331. struct vfio_pci_dependent_device devices[];
  332. };
  333. #define VFIO_DEVICE_GET_PCI_HOT_RESET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  334. /**
  335. * VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
  336. * struct vfio_pci_hot_reset)
  337. *
  338. * Return: 0 on success, -errno on failure.
  339. */
  340. struct vfio_pci_hot_reset {
  341. __u32 argsz;
  342. __u32 flags;
  343. __u32 count;
  344. __s32 group_fds[];
  345. };
  346. #define VFIO_DEVICE_PCI_HOT_RESET _IO(VFIO_TYPE, VFIO_BASE + 13)
  347. /* -------- API for Type1 VFIO IOMMU -------- */
  348. /**
  349. * VFIO_IOMMU_GET_INFO - _IOR(VFIO_TYPE, VFIO_BASE + 12, struct vfio_iommu_info)
  350. *
  351. * Retrieve information about the IOMMU object. Fills in provided
  352. * struct vfio_iommu_info. Caller sets argsz.
  353. *
  354. * XXX Should we do these by CHECK_EXTENSION too?
  355. */
  356. struct vfio_iommu_type1_info {
  357. __u32 argsz;
  358. __u32 flags;
  359. #define VFIO_IOMMU_INFO_PGSIZES (1 << 0) /* supported page sizes info */
  360. __u64 iova_pgsizes; /* Bitmap of supported page sizes */
  361. };
  362. #define VFIO_IOMMU_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  363. /**
  364. * VFIO_IOMMU_MAP_DMA - _IOW(VFIO_TYPE, VFIO_BASE + 13, struct vfio_dma_map)
  365. *
  366. * Map process virtual addresses to IO virtual addresses using the
  367. * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
  368. */
  369. struct vfio_iommu_type1_dma_map {
  370. __u32 argsz;
  371. __u32 flags;
  372. #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */
  373. #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */
  374. __u64 vaddr; /* Process virtual address */
  375. __u64 iova; /* IO virtual address */
  376. __u64 size; /* Size of mapping (bytes) */
  377. };
  378. #define VFIO_IOMMU_MAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 13)
  379. /**
  380. * VFIO_IOMMU_UNMAP_DMA - _IOWR(VFIO_TYPE, VFIO_BASE + 14,
  381. * struct vfio_dma_unmap)
  382. *
  383. * Unmap IO virtual addresses using the provided struct vfio_dma_unmap.
  384. * Caller sets argsz. The actual unmapped size is returned in the size
  385. * field. No guarantee is made to the user that arbitrary unmaps of iova
  386. * or size different from those used in the original mapping call will
  387. * succeed.
  388. */
  389. struct vfio_iommu_type1_dma_unmap {
  390. __u32 argsz;
  391. __u32 flags;
  392. __u64 iova; /* IO virtual address */
  393. __u64 size; /* Size of mapping (bytes) */
  394. };
  395. #define VFIO_IOMMU_UNMAP_DMA _IO(VFIO_TYPE, VFIO_BASE + 14)
  396. /*
  397. * IOCTLs to enable/disable IOMMU container usage.
  398. * No parameters are supported.
  399. */
  400. #define VFIO_IOMMU_ENABLE _IO(VFIO_TYPE, VFIO_BASE + 15)
  401. #define VFIO_IOMMU_DISABLE _IO(VFIO_TYPE, VFIO_BASE + 16)
  402. /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */
  403. /*
  404. * The SPAPR TCE DDW info struct provides the information about
  405. * the details of Dynamic DMA window capability.
  406. *
  407. * @pgsizes contains a page size bitmask, 4K/64K/16M are supported.
  408. * @max_dynamic_windows_supported tells the maximum number of windows
  409. * which the platform can create.
  410. * @levels tells the maximum number of levels in multi-level IOMMU tables;
  411. * this allows splitting a table into smaller chunks which reduces
  412. * the amount of physically contiguous memory required for the table.
  413. */
  414. struct vfio_iommu_spapr_tce_ddw_info {
  415. __u64 pgsizes; /* Bitmap of supported page sizes */
  416. __u32 max_dynamic_windows_supported;
  417. __u32 levels;
  418. };
  419. /*
  420. * The SPAPR TCE info struct provides the information about the PCI bus
  421. * address ranges available for DMA, these values are programmed into
  422. * the hardware so the guest has to know that information.
  423. *
  424. * The DMA 32 bit window start is an absolute PCI bus address.
  425. * The IOVA address passed via map/unmap ioctls are absolute PCI bus
  426. * addresses too so the window works as a filter rather than an offset
  427. * for IOVA addresses.
  428. *
  429. * Flags supported:
  430. * - VFIO_IOMMU_SPAPR_INFO_DDW: informs the userspace that dynamic DMA windows
  431. * (DDW) support is present. @ddw is only supported when DDW is present.
  432. */
  433. struct vfio_iommu_spapr_tce_info {
  434. __u32 argsz;
  435. __u32 flags;
  436. #define VFIO_IOMMU_SPAPR_INFO_DDW (1 << 0) /* DDW supported */
  437. __u32 dma32_window_start; /* 32 bit window start (bytes) */
  438. __u32 dma32_window_size; /* 32 bit window size (bytes) */
  439. struct vfio_iommu_spapr_tce_ddw_info ddw;
  440. };
  441. #define VFIO_IOMMU_SPAPR_TCE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 12)
  442. /*
  443. * EEH PE operation struct provides ways to:
  444. * - enable/disable EEH functionality;
  445. * - unfreeze IO/DMA for frozen PE;
  446. * - read PE state;
  447. * - reset PE;
  448. * - configure PE;
  449. * - inject EEH error.
  450. */
  451. struct vfio_eeh_pe_err {
  452. __u32 type;
  453. __u32 func;
  454. __u64 addr;
  455. __u64 mask;
  456. };
  457. struct vfio_eeh_pe_op {
  458. __u32 argsz;
  459. __u32 flags;
  460. __u32 op;
  461. union {
  462. struct vfio_eeh_pe_err err;
  463. };
  464. };
  465. #define VFIO_EEH_PE_DISABLE 0 /* Disable EEH functionality */
  466. #define VFIO_EEH_PE_ENABLE 1 /* Enable EEH functionality */
  467. #define VFIO_EEH_PE_UNFREEZE_IO 2 /* Enable IO for frozen PE */
  468. #define VFIO_EEH_PE_UNFREEZE_DMA 3 /* Enable DMA for frozen PE */
  469. #define VFIO_EEH_PE_GET_STATE 4 /* PE state retrieval */
  470. #define VFIO_EEH_PE_STATE_NORMAL 0 /* PE in functional state */
  471. #define VFIO_EEH_PE_STATE_RESET 1 /* PE reset in progress */
  472. #define VFIO_EEH_PE_STATE_STOPPED 2 /* Stopped DMA and IO */
  473. #define VFIO_EEH_PE_STATE_STOPPED_DMA 4 /* Stopped DMA only */
  474. #define VFIO_EEH_PE_STATE_UNAVAIL 5 /* State unavailable */
  475. #define VFIO_EEH_PE_RESET_DEACTIVATE 5 /* Deassert PE reset */
  476. #define VFIO_EEH_PE_RESET_HOT 6 /* Assert hot reset */
  477. #define VFIO_EEH_PE_RESET_FUNDAMENTAL 7 /* Assert fundamental reset */
  478. #define VFIO_EEH_PE_CONFIGURE 8 /* PE configuration */
  479. #define VFIO_EEH_PE_INJECT_ERR 9 /* Inject EEH error */
  480. #define VFIO_EEH_PE_OP _IO(VFIO_TYPE, VFIO_BASE + 21)
  481. /**
  482. * VFIO_IOMMU_SPAPR_REGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 17, struct vfio_iommu_spapr_register_memory)
  483. *
  484. * Registers user space memory where DMA is allowed. It pins
  485. * user pages and does the locked memory accounting so
  486. * subsequent VFIO_IOMMU_MAP_DMA/VFIO_IOMMU_UNMAP_DMA calls
  487. * get faster.
  488. */
  489. struct vfio_iommu_spapr_register_memory {
  490. __u32 argsz;
  491. __u32 flags;
  492. __u64 vaddr; /* Process virtual address */
  493. __u64 size; /* Size of mapping (bytes) */
  494. };
  495. #define VFIO_IOMMU_SPAPR_REGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 17)
  496. /**
  497. * VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY - _IOW(VFIO_TYPE, VFIO_BASE + 18, struct vfio_iommu_spapr_register_memory)
  498. *
  499. * Unregisters user space memory registered with
  500. * VFIO_IOMMU_SPAPR_REGISTER_MEMORY.
  501. * Uses vfio_iommu_spapr_register_memory for parameters.
  502. */
  503. #define VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY _IO(VFIO_TYPE, VFIO_BASE + 18)
  504. /**
  505. * VFIO_IOMMU_SPAPR_TCE_CREATE - _IOWR(VFIO_TYPE, VFIO_BASE + 19, struct vfio_iommu_spapr_tce_create)
  506. *
  507. * Creates an additional TCE table and programs it (sets a new DMA window)
  508. * to every IOMMU group in the container. It receives page shift, window
  509. * size and number of levels in the TCE table being created.
  510. *
  511. * It allocates and returns an offset on a PCI bus of the new DMA window.
  512. */
  513. struct vfio_iommu_spapr_tce_create {
  514. __u32 argsz;
  515. __u32 flags;
  516. /* in */
  517. __u32 page_shift;
  518. __u64 window_size;
  519. __u32 levels;
  520. /* out */
  521. __u64 start_addr;
  522. };
  523. #define VFIO_IOMMU_SPAPR_TCE_CREATE _IO(VFIO_TYPE, VFIO_BASE + 19)
  524. /**
  525. * VFIO_IOMMU_SPAPR_TCE_REMOVE - _IOW(VFIO_TYPE, VFIO_BASE + 20, struct vfio_iommu_spapr_tce_remove)
  526. *
  527. * Unprograms a TCE table from all groups in the container and destroys it.
  528. * It receives a PCI bus offset as a window id.
  529. */
  530. struct vfio_iommu_spapr_tce_remove {
  531. __u32 argsz;
  532. __u32 flags;
  533. /* in */
  534. __u64 start_addr;
  535. };
  536. #define VFIO_IOMMU_SPAPR_TCE_REMOVE _IO(VFIO_TYPE, VFIO_BASE + 20)
  537. /* ***************************************************************** */
  538. #endif /* _UAPIVFIO_H */