vmwgfx_drv.h 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #ifndef _VMWGFX_DRV_H_
  28. #define _VMWGFX_DRV_H_
  29. #include "vmwgfx_reg.h"
  30. #include <drm/drmP.h>
  31. #include <drm/vmwgfx_drm.h>
  32. #include <drm/drm_hashtab.h>
  33. #include <linux/suspend.h>
  34. #include <drm/ttm/ttm_bo_driver.h>
  35. #include <drm/ttm/ttm_object.h>
  36. #include <drm/ttm/ttm_lock.h>
  37. #include <drm/ttm/ttm_execbuf_util.h>
  38. #include <drm/ttm/ttm_module.h>
  39. #include "vmwgfx_fence.h"
  40. #define VMWGFX_DRIVER_DATE "20150810"
  41. #define VMWGFX_DRIVER_MAJOR 2
  42. #define VMWGFX_DRIVER_MINOR 9
  43. #define VMWGFX_DRIVER_PATCHLEVEL 0
  44. #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
  45. #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
  46. #define VMWGFX_MAX_RELOCATIONS 2048
  47. #define VMWGFX_MAX_VALIDATIONS 2048
  48. #define VMWGFX_MAX_DISPLAYS 16
  49. #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
  50. #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
  51. /*
  52. * Perhaps we should have sysfs entries for these.
  53. */
  54. #define VMWGFX_NUM_GB_CONTEXT 256
  55. #define VMWGFX_NUM_GB_SHADER 20000
  56. #define VMWGFX_NUM_GB_SURFACE 32768
  57. #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
  58. #define VMWGFX_NUM_DXCONTEXT 256
  59. #define VMWGFX_NUM_DXQUERY 512
  60. #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
  61. VMWGFX_NUM_GB_SHADER +\
  62. VMWGFX_NUM_GB_SURFACE +\
  63. VMWGFX_NUM_GB_SCREEN_TARGET)
  64. #define VMW_PL_GMR TTM_PL_PRIV0
  65. #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
  66. #define VMW_PL_MOB TTM_PL_PRIV1
  67. #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
  68. #define VMW_RES_CONTEXT ttm_driver_type0
  69. #define VMW_RES_SURFACE ttm_driver_type1
  70. #define VMW_RES_STREAM ttm_driver_type2
  71. #define VMW_RES_FENCE ttm_driver_type3
  72. #define VMW_RES_SHADER ttm_driver_type4
  73. struct vmw_fpriv {
  74. struct drm_master *locked_master;
  75. struct ttm_object_file *tfile;
  76. struct list_head fence_events;
  77. bool gb_aware;
  78. };
  79. struct vmw_dma_buffer {
  80. struct ttm_buffer_object base;
  81. struct list_head res_list;
  82. s32 pin_count;
  83. /* Not ref-counted. Protected by binding_mutex */
  84. struct vmw_resource *dx_query_ctx;
  85. };
  86. /**
  87. * struct vmw_validate_buffer - Carries validation info about buffers.
  88. *
  89. * @base: Validation info for TTM.
  90. * @hash: Hash entry for quick lookup of the TTM buffer object.
  91. *
  92. * This structure contains also driver private validation info
  93. * on top of the info needed by TTM.
  94. */
  95. struct vmw_validate_buffer {
  96. struct ttm_validate_buffer base;
  97. struct drm_hash_item hash;
  98. bool validate_as_mob;
  99. };
  100. struct vmw_res_func;
  101. struct vmw_resource {
  102. struct kref kref;
  103. struct vmw_private *dev_priv;
  104. int id;
  105. bool avail;
  106. unsigned long backup_size;
  107. bool res_dirty; /* Protected by backup buffer reserved */
  108. bool backup_dirty; /* Protected by backup buffer reserved */
  109. struct vmw_dma_buffer *backup;
  110. unsigned long backup_offset;
  111. unsigned long pin_count; /* Protected by resource reserved */
  112. const struct vmw_res_func *func;
  113. struct list_head lru_head; /* Protected by the resource lock */
  114. struct list_head mob_head; /* Protected by @backup reserved */
  115. struct list_head binding_head; /* Protected by binding_mutex */
  116. void (*res_free) (struct vmw_resource *res);
  117. void (*hw_destroy) (struct vmw_resource *res);
  118. };
  119. /*
  120. * Resources that are managed using ioctls.
  121. */
  122. enum vmw_res_type {
  123. vmw_res_context,
  124. vmw_res_surface,
  125. vmw_res_stream,
  126. vmw_res_shader,
  127. vmw_res_dx_context,
  128. vmw_res_cotable,
  129. vmw_res_view,
  130. vmw_res_max
  131. };
  132. /*
  133. * Resources that are managed using command streams.
  134. */
  135. enum vmw_cmdbuf_res_type {
  136. vmw_cmdbuf_res_shader,
  137. vmw_cmdbuf_res_view
  138. };
  139. struct vmw_cmdbuf_res_manager;
  140. struct vmw_cursor_snooper {
  141. struct drm_crtc *crtc;
  142. size_t age;
  143. uint32_t *image;
  144. };
  145. struct vmw_framebuffer;
  146. struct vmw_surface_offset;
  147. struct vmw_surface {
  148. struct vmw_resource res;
  149. uint32_t flags;
  150. uint32_t format;
  151. uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
  152. struct drm_vmw_size base_size;
  153. struct drm_vmw_size *sizes;
  154. uint32_t num_sizes;
  155. bool scanout;
  156. uint32_t array_size;
  157. /* TODO so far just a extra pointer */
  158. struct vmw_cursor_snooper snooper;
  159. struct vmw_surface_offset *offsets;
  160. SVGA3dTextureFilter autogen_filter;
  161. uint32_t multisample_count;
  162. struct list_head view_list;
  163. };
  164. struct vmw_marker_queue {
  165. struct list_head head;
  166. u64 lag;
  167. u64 lag_time;
  168. spinlock_t lock;
  169. };
  170. struct vmw_fifo_state {
  171. unsigned long reserved_size;
  172. u32 *dynamic_buffer;
  173. u32 *static_buffer;
  174. unsigned long static_buffer_size;
  175. bool using_bounce_buffer;
  176. uint32_t capabilities;
  177. struct mutex fifo_mutex;
  178. struct rw_semaphore rwsem;
  179. struct vmw_marker_queue marker_queue;
  180. bool dx;
  181. };
  182. struct vmw_relocation {
  183. SVGAMobId *mob_loc;
  184. SVGAGuestPtr *location;
  185. uint32_t index;
  186. };
  187. /**
  188. * struct vmw_res_cache_entry - resource information cache entry
  189. *
  190. * @valid: Whether the entry is valid, which also implies that the execbuf
  191. * code holds a reference to the resource, and it's placed on the
  192. * validation list.
  193. * @handle: User-space handle of a resource.
  194. * @res: Non-ref-counted pointer to the resource.
  195. *
  196. * Used to avoid frequent repeated user-space handle lookups of the
  197. * same resource.
  198. */
  199. struct vmw_res_cache_entry {
  200. bool valid;
  201. uint32_t handle;
  202. struct vmw_resource *res;
  203. struct vmw_resource_val_node *node;
  204. };
  205. /**
  206. * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
  207. */
  208. enum vmw_dma_map_mode {
  209. vmw_dma_phys, /* Use physical page addresses */
  210. vmw_dma_alloc_coherent, /* Use TTM coherent pages */
  211. vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
  212. vmw_dma_map_bind, /* Unmap from DMA just before unbind */
  213. vmw_dma_map_max
  214. };
  215. /**
  216. * struct vmw_sg_table - Scatter/gather table for binding, with additional
  217. * device-specific information.
  218. *
  219. * @sgt: Pointer to a struct sg_table with binding information
  220. * @num_regions: Number of regions with device-address contiguous pages
  221. */
  222. struct vmw_sg_table {
  223. enum vmw_dma_map_mode mode;
  224. struct page **pages;
  225. const dma_addr_t *addrs;
  226. struct sg_table *sgt;
  227. unsigned long num_regions;
  228. unsigned long num_pages;
  229. };
  230. /**
  231. * struct vmw_piter - Page iterator that iterates over a list of pages
  232. * and DMA addresses that could be either a scatter-gather list or
  233. * arrays
  234. *
  235. * @pages: Array of page pointers to the pages.
  236. * @addrs: DMA addresses to the pages if coherent pages are used.
  237. * @iter: Scatter-gather page iterator. Current position in SG list.
  238. * @i: Current position in arrays.
  239. * @num_pages: Number of pages total.
  240. * @next: Function to advance the iterator. Returns false if past the list
  241. * of pages, true otherwise.
  242. * @dma_address: Function to return the DMA address of the current page.
  243. */
  244. struct vmw_piter {
  245. struct page **pages;
  246. const dma_addr_t *addrs;
  247. struct sg_page_iter iter;
  248. unsigned long i;
  249. unsigned long num_pages;
  250. bool (*next)(struct vmw_piter *);
  251. dma_addr_t (*dma_address)(struct vmw_piter *);
  252. struct page *(*page)(struct vmw_piter *);
  253. };
  254. /*
  255. * enum vmw_display_unit_type - Describes the display unit
  256. */
  257. enum vmw_display_unit_type {
  258. vmw_du_invalid = 0,
  259. vmw_du_legacy,
  260. vmw_du_screen_object,
  261. vmw_du_screen_target
  262. };
  263. struct vmw_sw_context{
  264. struct drm_open_hash res_ht;
  265. bool res_ht_initialized;
  266. bool kernel; /**< is the called made from the kernel */
  267. struct vmw_fpriv *fp;
  268. struct list_head validate_nodes;
  269. struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
  270. uint32_t cur_reloc;
  271. struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
  272. uint32_t cur_val_buf;
  273. uint32_t *cmd_bounce;
  274. uint32_t cmd_bounce_size;
  275. struct list_head resource_list;
  276. struct list_head ctx_resource_list; /* For contexts and cotables */
  277. struct vmw_dma_buffer *cur_query_bo;
  278. struct list_head res_relocations;
  279. uint32_t *buf_start;
  280. struct vmw_res_cache_entry res_cache[vmw_res_max];
  281. struct vmw_resource *last_query_ctx;
  282. bool needs_post_query_barrier;
  283. struct vmw_resource *error_resource;
  284. struct vmw_ctx_binding_state *staged_bindings;
  285. bool staged_bindings_inuse;
  286. struct list_head staged_cmd_res;
  287. struct vmw_resource_val_node *dx_ctx_node;
  288. struct vmw_dma_buffer *dx_query_mob;
  289. struct vmw_resource *dx_query_ctx;
  290. struct vmw_cmdbuf_res_manager *man;
  291. };
  292. struct vmw_legacy_display;
  293. struct vmw_overlay;
  294. struct vmw_master {
  295. struct ttm_lock lock;
  296. };
  297. struct vmw_vga_topology_state {
  298. uint32_t width;
  299. uint32_t height;
  300. uint32_t primary;
  301. uint32_t pos_x;
  302. uint32_t pos_y;
  303. };
  304. /*
  305. * struct vmw_otable - Guest Memory OBject table metadata
  306. *
  307. * @size: Size of the table (page-aligned).
  308. * @page_table: Pointer to a struct vmw_mob holding the page table.
  309. */
  310. struct vmw_otable {
  311. unsigned long size;
  312. struct vmw_mob *page_table;
  313. bool enabled;
  314. };
  315. struct vmw_otable_batch {
  316. unsigned num_otables;
  317. struct vmw_otable *otables;
  318. struct vmw_resource *context;
  319. struct ttm_buffer_object *otable_bo;
  320. };
  321. struct vmw_private {
  322. struct ttm_bo_device bdev;
  323. struct ttm_bo_global_ref bo_global_ref;
  324. struct drm_global_reference mem_global_ref;
  325. struct vmw_fifo_state fifo;
  326. struct drm_device *dev;
  327. unsigned long vmw_chipset;
  328. unsigned int io_start;
  329. uint32_t vram_start;
  330. uint32_t vram_size;
  331. uint32_t prim_bb_mem;
  332. uint32_t mmio_start;
  333. uint32_t mmio_size;
  334. uint32_t fb_max_width;
  335. uint32_t fb_max_height;
  336. uint32_t texture_max_width;
  337. uint32_t texture_max_height;
  338. uint32_t stdu_max_width;
  339. uint32_t stdu_max_height;
  340. uint32_t initial_width;
  341. uint32_t initial_height;
  342. u32 *mmio_virt;
  343. uint32_t capabilities;
  344. uint32_t max_gmr_ids;
  345. uint32_t max_gmr_pages;
  346. uint32_t max_mob_pages;
  347. uint32_t max_mob_size;
  348. uint32_t memory_size;
  349. bool has_gmr;
  350. bool has_mob;
  351. spinlock_t hw_lock;
  352. spinlock_t cap_lock;
  353. bool has_dx;
  354. bool assume_16bpp;
  355. /*
  356. * VGA registers.
  357. */
  358. struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
  359. uint32_t vga_width;
  360. uint32_t vga_height;
  361. uint32_t vga_bpp;
  362. uint32_t vga_bpl;
  363. uint32_t vga_pitchlock;
  364. uint32_t num_displays;
  365. /*
  366. * Framebuffer info.
  367. */
  368. void *fb_info;
  369. enum vmw_display_unit_type active_display_unit;
  370. struct vmw_legacy_display *ldu_priv;
  371. struct vmw_screen_object_display *sou_priv;
  372. struct vmw_overlay *overlay_priv;
  373. /*
  374. * Context and surface management.
  375. */
  376. rwlock_t resource_lock;
  377. struct idr res_idr[vmw_res_max];
  378. /*
  379. * Block lastclose from racing with firstopen.
  380. */
  381. struct mutex init_mutex;
  382. /*
  383. * A resource manager for kernel-only surfaces and
  384. * contexts.
  385. */
  386. struct ttm_object_device *tdev;
  387. /*
  388. * Fencing and IRQs.
  389. */
  390. atomic_t marker_seq;
  391. wait_queue_head_t fence_queue;
  392. wait_queue_head_t fifo_queue;
  393. spinlock_t waiter_lock;
  394. int fence_queue_waiters; /* Protected by waiter_lock */
  395. int goal_queue_waiters; /* Protected by waiter_lock */
  396. int cmdbuf_waiters; /* Protected by waiter_lock */
  397. int error_waiters; /* Protected by waiter_lock */
  398. int fifo_queue_waiters; /* Protected by waiter_lock */
  399. uint32_t last_read_seqno;
  400. struct vmw_fence_manager *fman;
  401. uint32_t irq_mask; /* Updates protected by waiter_lock */
  402. /*
  403. * Device state
  404. */
  405. uint32_t traces_state;
  406. uint32_t enable_state;
  407. uint32_t config_done_state;
  408. /**
  409. * Execbuf
  410. */
  411. /**
  412. * Protected by the cmdbuf mutex.
  413. */
  414. struct vmw_sw_context ctx;
  415. struct mutex cmdbuf_mutex;
  416. struct mutex binding_mutex;
  417. /**
  418. * Operating mode.
  419. */
  420. bool stealth;
  421. bool enable_fb;
  422. spinlock_t svga_lock;
  423. /**
  424. * Master management.
  425. */
  426. struct vmw_master *active_master;
  427. struct vmw_master fbdev_master;
  428. struct notifier_block pm_nb;
  429. bool suspended;
  430. bool refuse_hibernation;
  431. struct mutex release_mutex;
  432. atomic_t num_fifo_resources;
  433. /*
  434. * Replace this with an rwsem as soon as we have down_xx_interruptible()
  435. */
  436. struct ttm_lock reservation_sem;
  437. /*
  438. * Query processing. These members
  439. * are protected by the cmdbuf mutex.
  440. */
  441. struct vmw_dma_buffer *dummy_query_bo;
  442. struct vmw_dma_buffer *pinned_bo;
  443. uint32_t query_cid;
  444. uint32_t query_cid_valid;
  445. bool dummy_query_bo_pinned;
  446. /*
  447. * Surface swapping. The "surface_lru" list is protected by the
  448. * resource lock in order to be able to destroy a surface and take
  449. * it off the lru atomically. "used_memory_size" is currently
  450. * protected by the cmdbuf mutex for simplicity.
  451. */
  452. struct list_head res_lru[vmw_res_max];
  453. uint32_t used_memory_size;
  454. /*
  455. * DMA mapping stuff.
  456. */
  457. enum vmw_dma_map_mode map_mode;
  458. /*
  459. * Guest Backed stuff
  460. */
  461. struct vmw_otable_batch otable_batch;
  462. struct vmw_cmdbuf_man *cman;
  463. };
  464. static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
  465. {
  466. return container_of(res, struct vmw_surface, res);
  467. }
  468. static inline struct vmw_private *vmw_priv(struct drm_device *dev)
  469. {
  470. return (struct vmw_private *)dev->dev_private;
  471. }
  472. static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
  473. {
  474. return (struct vmw_fpriv *)file_priv->driver_priv;
  475. }
  476. static inline struct vmw_master *vmw_master(struct drm_master *master)
  477. {
  478. return (struct vmw_master *) master->driver_priv;
  479. }
  480. /*
  481. * The locking here is fine-grained, so that it is performed once
  482. * for every read- and write operation. This is of course costly, but we
  483. * don't perform much register access in the timing critical paths anyway.
  484. * Instead we have the extra benefit of being sure that we don't forget
  485. * the hw lock around register accesses.
  486. */
  487. static inline void vmw_write(struct vmw_private *dev_priv,
  488. unsigned int offset, uint32_t value)
  489. {
  490. unsigned long irq_flags;
  491. spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
  492. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  493. outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
  494. spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
  495. }
  496. static inline uint32_t vmw_read(struct vmw_private *dev_priv,
  497. unsigned int offset)
  498. {
  499. unsigned long irq_flags;
  500. u32 val;
  501. spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
  502. outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
  503. val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
  504. spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
  505. return val;
  506. }
  507. extern void vmw_svga_enable(struct vmw_private *dev_priv);
  508. extern void vmw_svga_disable(struct vmw_private *dev_priv);
  509. /**
  510. * GMR utilities - vmwgfx_gmr.c
  511. */
  512. extern int vmw_gmr_bind(struct vmw_private *dev_priv,
  513. const struct vmw_sg_table *vsgt,
  514. unsigned long num_pages,
  515. int gmr_id);
  516. extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
  517. /**
  518. * Resource utilities - vmwgfx_resource.c
  519. */
  520. struct vmw_user_resource_conv;
  521. extern void vmw_resource_unreference(struct vmw_resource **p_res);
  522. extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
  523. extern struct vmw_resource *
  524. vmw_resource_reference_unless_doomed(struct vmw_resource *res);
  525. extern int vmw_resource_validate(struct vmw_resource *res);
  526. extern int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
  527. bool no_backup);
  528. extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
  529. extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
  530. struct ttm_object_file *tfile,
  531. uint32_t handle,
  532. struct vmw_surface **out_surf,
  533. struct vmw_dma_buffer **out_buf);
  534. extern int vmw_user_resource_lookup_handle(
  535. struct vmw_private *dev_priv,
  536. struct ttm_object_file *tfile,
  537. uint32_t handle,
  538. const struct vmw_user_resource_conv *converter,
  539. struct vmw_resource **p_res);
  540. extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
  541. extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
  542. struct vmw_dma_buffer *vmw_bo,
  543. size_t size, struct ttm_placement *placement,
  544. bool interuptable,
  545. void (*bo_free) (struct ttm_buffer_object *bo));
  546. extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
  547. struct ttm_object_file *tfile);
  548. extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
  549. struct ttm_object_file *tfile,
  550. uint32_t size,
  551. bool shareable,
  552. uint32_t *handle,
  553. struct vmw_dma_buffer **p_dma_buf,
  554. struct ttm_base_object **p_base);
  555. extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
  556. struct vmw_dma_buffer *dma_buf,
  557. uint32_t *handle);
  558. extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  559. struct drm_file *file_priv);
  560. extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  561. struct drm_file *file_priv);
  562. extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
  563. struct drm_file *file_priv);
  564. extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  565. uint32_t cur_validate_node);
  566. extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
  567. extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  568. uint32_t id, struct vmw_dma_buffer **out,
  569. struct ttm_base_object **base);
  570. extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  571. struct drm_file *file_priv);
  572. extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  573. struct drm_file *file_priv);
  574. extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  575. struct ttm_object_file *tfile,
  576. uint32_t *inout_id,
  577. struct vmw_resource **out);
  578. extern void vmw_resource_unreserve(struct vmw_resource *res,
  579. bool switch_backup,
  580. struct vmw_dma_buffer *new_backup,
  581. unsigned long new_backup_offset);
  582. extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
  583. struct ttm_mem_reg *mem);
  584. extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
  585. struct ttm_mem_reg *mem);
  586. extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
  587. extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
  588. struct vmw_fence_obj *fence);
  589. extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
  590. /**
  591. * DMA buffer helper routines - vmwgfx_dmabuf.c
  592. */
  593. extern int vmw_dmabuf_pin_in_placement(struct vmw_private *vmw_priv,
  594. struct vmw_dma_buffer *bo,
  595. struct ttm_placement *placement,
  596. bool interruptible);
  597. extern int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
  598. struct vmw_dma_buffer *buf,
  599. bool interruptible);
  600. extern int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
  601. struct vmw_dma_buffer *buf,
  602. bool interruptible);
  603. extern int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *vmw_priv,
  604. struct vmw_dma_buffer *bo,
  605. bool interruptible);
  606. extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
  607. struct vmw_dma_buffer *bo,
  608. bool interruptible);
  609. extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
  610. SVGAGuestPtr *ptr);
  611. extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
  612. /**
  613. * Misc Ioctl functionality - vmwgfx_ioctl.c
  614. */
  615. extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
  616. struct drm_file *file_priv);
  617. extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
  618. struct drm_file *file_priv);
  619. extern int vmw_present_ioctl(struct drm_device *dev, void *data,
  620. struct drm_file *file_priv);
  621. extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
  622. struct drm_file *file_priv);
  623. extern unsigned int vmw_fops_poll(struct file *filp,
  624. struct poll_table_struct *wait);
  625. extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
  626. size_t count, loff_t *offset);
  627. /**
  628. * Fifo utilities - vmwgfx_fifo.c
  629. */
  630. extern int vmw_fifo_init(struct vmw_private *dev_priv,
  631. struct vmw_fifo_state *fifo);
  632. extern void vmw_fifo_release(struct vmw_private *dev_priv,
  633. struct vmw_fifo_state *fifo);
  634. extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
  635. extern void *
  636. vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
  637. extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
  638. extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
  639. extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
  640. uint32_t *seqno);
  641. extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
  642. extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
  643. extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
  644. extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
  645. extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
  646. uint32_t cid);
  647. extern int vmw_fifo_flush(struct vmw_private *dev_priv,
  648. bool interruptible);
  649. /**
  650. * TTM glue - vmwgfx_ttm_glue.c
  651. */
  652. extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
  653. extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
  654. extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
  655. /**
  656. * TTM buffer object driver - vmwgfx_buffer.c
  657. */
  658. extern const size_t vmw_tt_size;
  659. extern struct ttm_placement vmw_vram_placement;
  660. extern struct ttm_placement vmw_vram_ne_placement;
  661. extern struct ttm_placement vmw_vram_sys_placement;
  662. extern struct ttm_placement vmw_vram_gmr_placement;
  663. extern struct ttm_placement vmw_vram_gmr_ne_placement;
  664. extern struct ttm_placement vmw_sys_placement;
  665. extern struct ttm_placement vmw_sys_ne_placement;
  666. extern struct ttm_placement vmw_evictable_placement;
  667. extern struct ttm_placement vmw_srf_placement;
  668. extern struct ttm_placement vmw_mob_placement;
  669. extern struct ttm_placement vmw_mob_ne_placement;
  670. extern struct ttm_bo_driver vmw_bo_driver;
  671. extern int vmw_dma_quiescent(struct drm_device *dev);
  672. extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
  673. extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
  674. extern const struct vmw_sg_table *
  675. vmw_bo_sg_table(struct ttm_buffer_object *bo);
  676. extern void vmw_piter_start(struct vmw_piter *viter,
  677. const struct vmw_sg_table *vsgt,
  678. unsigned long p_offs);
  679. /**
  680. * vmw_piter_next - Advance the iterator one page.
  681. *
  682. * @viter: Pointer to the iterator to advance.
  683. *
  684. * Returns false if past the list of pages, true otherwise.
  685. */
  686. static inline bool vmw_piter_next(struct vmw_piter *viter)
  687. {
  688. return viter->next(viter);
  689. }
  690. /**
  691. * vmw_piter_dma_addr - Return the DMA address of the current page.
  692. *
  693. * @viter: Pointer to the iterator
  694. *
  695. * Returns the DMA address of the page pointed to by @viter.
  696. */
  697. static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
  698. {
  699. return viter->dma_address(viter);
  700. }
  701. /**
  702. * vmw_piter_page - Return a pointer to the current page.
  703. *
  704. * @viter: Pointer to the iterator
  705. *
  706. * Returns the DMA address of the page pointed to by @viter.
  707. */
  708. static inline struct page *vmw_piter_page(struct vmw_piter *viter)
  709. {
  710. return viter->page(viter);
  711. }
  712. /**
  713. * Command submission - vmwgfx_execbuf.c
  714. */
  715. extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
  716. struct drm_file *file_priv, size_t size);
  717. extern int vmw_execbuf_process(struct drm_file *file_priv,
  718. struct vmw_private *dev_priv,
  719. void __user *user_commands,
  720. void *kernel_commands,
  721. uint32_t command_size,
  722. uint64_t throttle_us,
  723. uint32_t dx_context_handle,
  724. struct drm_vmw_fence_rep __user
  725. *user_fence_rep,
  726. struct vmw_fence_obj **out_fence);
  727. extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
  728. struct vmw_fence_obj *fence);
  729. extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
  730. extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  731. struct vmw_private *dev_priv,
  732. struct vmw_fence_obj **p_fence,
  733. uint32_t *p_handle);
  734. extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
  735. struct vmw_fpriv *vmw_fp,
  736. int ret,
  737. struct drm_vmw_fence_rep __user
  738. *user_fence_rep,
  739. struct vmw_fence_obj *fence,
  740. uint32_t fence_handle);
  741. extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  742. struct ttm_buffer_object *bo,
  743. bool interruptible,
  744. bool validate_as_mob);
  745. /**
  746. * IRQs and wating - vmwgfx_irq.c
  747. */
  748. extern irqreturn_t vmw_irq_handler(int irq, void *arg);
  749. extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
  750. uint32_t seqno, bool interruptible,
  751. unsigned long timeout);
  752. extern void vmw_irq_preinstall(struct drm_device *dev);
  753. extern int vmw_irq_postinstall(struct drm_device *dev);
  754. extern void vmw_irq_uninstall(struct drm_device *dev);
  755. extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
  756. uint32_t seqno);
  757. extern int vmw_fallback_wait(struct vmw_private *dev_priv,
  758. bool lazy,
  759. bool fifo_idle,
  760. uint32_t seqno,
  761. bool interruptible,
  762. unsigned long timeout);
  763. extern void vmw_update_seqno(struct vmw_private *dev_priv,
  764. struct vmw_fifo_state *fifo_state);
  765. extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
  766. extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
  767. extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
  768. extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
  769. extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
  770. int *waiter_count);
  771. extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
  772. u32 flag, int *waiter_count);
  773. /**
  774. * Rudimentary fence-like objects currently used only for throttling -
  775. * vmwgfx_marker.c
  776. */
  777. extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
  778. extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
  779. extern int vmw_marker_push(struct vmw_marker_queue *queue,
  780. uint32_t seqno);
  781. extern int vmw_marker_pull(struct vmw_marker_queue *queue,
  782. uint32_t signaled_seqno);
  783. extern int vmw_wait_lag(struct vmw_private *dev_priv,
  784. struct vmw_marker_queue *queue, uint32_t us);
  785. /**
  786. * Kernel framebuffer - vmwgfx_fb.c
  787. */
  788. int vmw_fb_init(struct vmw_private *vmw_priv);
  789. int vmw_fb_close(struct vmw_private *dev_priv);
  790. int vmw_fb_off(struct vmw_private *vmw_priv);
  791. int vmw_fb_on(struct vmw_private *vmw_priv);
  792. /**
  793. * Kernel modesetting - vmwgfx_kms.c
  794. */
  795. int vmw_kms_init(struct vmw_private *dev_priv);
  796. int vmw_kms_close(struct vmw_private *dev_priv);
  797. int vmw_kms_save_vga(struct vmw_private *vmw_priv);
  798. int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
  799. int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
  800. struct drm_file *file_priv);
  801. void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
  802. void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  803. struct ttm_object_file *tfile,
  804. struct ttm_buffer_object *bo,
  805. SVGA3dCmdHeader *header);
  806. int vmw_kms_write_svga(struct vmw_private *vmw_priv,
  807. unsigned width, unsigned height, unsigned pitch,
  808. unsigned bpp, unsigned depth);
  809. void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
  810. bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
  811. uint32_t pitch,
  812. uint32_t height);
  813. u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
  814. int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
  815. void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
  816. int vmw_kms_present(struct vmw_private *dev_priv,
  817. struct drm_file *file_priv,
  818. struct vmw_framebuffer *vfb,
  819. struct vmw_surface *surface,
  820. uint32_t sid, int32_t destX, int32_t destY,
  821. struct drm_vmw_rect *clips,
  822. uint32_t num_clips);
  823. int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
  824. struct drm_file *file_priv);
  825. void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
  826. int vmw_dumb_create(struct drm_file *file_priv,
  827. struct drm_device *dev,
  828. struct drm_mode_create_dumb *args);
  829. int vmw_dumb_map_offset(struct drm_file *file_priv,
  830. struct drm_device *dev, uint32_t handle,
  831. uint64_t *offset);
  832. int vmw_dumb_destroy(struct drm_file *file_priv,
  833. struct drm_device *dev,
  834. uint32_t handle);
  835. extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible);
  836. extern void vmw_resource_unpin(struct vmw_resource *res);
  837. extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res);
  838. /**
  839. * Overlay control - vmwgfx_overlay.c
  840. */
  841. int vmw_overlay_init(struct vmw_private *dev_priv);
  842. int vmw_overlay_close(struct vmw_private *dev_priv);
  843. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  844. struct drm_file *file_priv);
  845. int vmw_overlay_stop_all(struct vmw_private *dev_priv);
  846. int vmw_overlay_resume_all(struct vmw_private *dev_priv);
  847. int vmw_overlay_pause_all(struct vmw_private *dev_priv);
  848. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
  849. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
  850. int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
  851. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
  852. /**
  853. * GMR Id manager
  854. */
  855. extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
  856. /**
  857. * Prime - vmwgfx_prime.c
  858. */
  859. extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
  860. extern int vmw_prime_fd_to_handle(struct drm_device *dev,
  861. struct drm_file *file_priv,
  862. int fd, u32 *handle);
  863. extern int vmw_prime_handle_to_fd(struct drm_device *dev,
  864. struct drm_file *file_priv,
  865. uint32_t handle, uint32_t flags,
  866. int *prime_fd);
  867. /*
  868. * MemoryOBject management - vmwgfx_mob.c
  869. */
  870. struct vmw_mob;
  871. extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
  872. const struct vmw_sg_table *vsgt,
  873. unsigned long num_data_pages, int32_t mob_id);
  874. extern void vmw_mob_unbind(struct vmw_private *dev_priv,
  875. struct vmw_mob *mob);
  876. extern void vmw_mob_destroy(struct vmw_mob *mob);
  877. extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
  878. extern int vmw_otables_setup(struct vmw_private *dev_priv);
  879. extern void vmw_otables_takedown(struct vmw_private *dev_priv);
  880. /*
  881. * Context management - vmwgfx_context.c
  882. */
  883. extern const struct vmw_user_resource_conv *user_context_converter;
  884. extern int vmw_context_check(struct vmw_private *dev_priv,
  885. struct ttm_object_file *tfile,
  886. int id,
  887. struct vmw_resource **p_res);
  888. extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  889. struct drm_file *file_priv);
  890. extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
  891. struct drm_file *file_priv);
  892. extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  893. struct drm_file *file_priv);
  894. extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
  895. extern struct vmw_cmdbuf_res_manager *
  896. vmw_context_res_man(struct vmw_resource *ctx);
  897. extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  898. SVGACOTableType cotable_type);
  899. extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
  900. struct vmw_ctx_binding_state;
  901. extern struct vmw_ctx_binding_state *
  902. vmw_context_binding_state(struct vmw_resource *ctx);
  903. extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  904. bool readback);
  905. extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  906. struct vmw_dma_buffer *mob);
  907. extern struct vmw_dma_buffer *
  908. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
  909. /*
  910. * Surface management - vmwgfx_surface.c
  911. */
  912. extern const struct vmw_user_resource_conv *user_surface_converter;
  913. extern void vmw_surface_res_free(struct vmw_resource *res);
  914. extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  915. struct drm_file *file_priv);
  916. extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  917. struct drm_file *file_priv);
  918. extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  919. struct drm_file *file_priv);
  920. extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
  921. struct drm_file *file_priv);
  922. extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
  923. struct drm_file *file_priv);
  924. extern int vmw_surface_check(struct vmw_private *dev_priv,
  925. struct ttm_object_file *tfile,
  926. uint32_t handle, int *id);
  927. extern int vmw_surface_validate(struct vmw_private *dev_priv,
  928. struct vmw_surface *srf);
  929. int vmw_surface_gb_priv_define(struct drm_device *dev,
  930. uint32_t user_accounting_size,
  931. uint32_t svga3d_flags,
  932. SVGA3dSurfaceFormat format,
  933. bool for_scanout,
  934. uint32_t num_mip_levels,
  935. uint32_t multisample_count,
  936. uint32_t array_size,
  937. struct drm_vmw_size size,
  938. struct vmw_surface **srf_out);
  939. /*
  940. * Shader management - vmwgfx_shader.c
  941. */
  942. extern const struct vmw_user_resource_conv *user_shader_converter;
  943. extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
  944. struct drm_file *file_priv);
  945. extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
  946. struct drm_file *file_priv);
  947. extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
  948. struct vmw_cmdbuf_res_manager *man,
  949. u32 user_key, const void *bytecode,
  950. SVGA3dShaderType shader_type,
  951. size_t size,
  952. struct list_head *list);
  953. extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
  954. u32 user_key, SVGA3dShaderType shader_type,
  955. struct list_head *list);
  956. extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man,
  957. struct vmw_resource *ctx,
  958. u32 user_key,
  959. SVGA3dShaderType shader_type,
  960. struct list_head *list);
  961. extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv,
  962. struct list_head *list,
  963. bool readback);
  964. extern struct vmw_resource *
  965. vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man,
  966. u32 user_key, SVGA3dShaderType shader_type);
  967. /*
  968. * Command buffer managed resources - vmwgfx_cmdbuf_res.c
  969. */
  970. extern struct vmw_cmdbuf_res_manager *
  971. vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
  972. extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
  973. extern size_t vmw_cmdbuf_res_man_size(void);
  974. extern struct vmw_resource *
  975. vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
  976. enum vmw_cmdbuf_res_type res_type,
  977. u32 user_key);
  978. extern void vmw_cmdbuf_res_revert(struct list_head *list);
  979. extern void vmw_cmdbuf_res_commit(struct list_head *list);
  980. extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
  981. enum vmw_cmdbuf_res_type res_type,
  982. u32 user_key,
  983. struct vmw_resource *res,
  984. struct list_head *list);
  985. extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
  986. enum vmw_cmdbuf_res_type res_type,
  987. u32 user_key,
  988. struct list_head *list,
  989. struct vmw_resource **res);
  990. /*
  991. * COTable management - vmwgfx_cotable.c
  992. */
  993. extern const SVGACOTableType vmw_cotable_scrub_order[];
  994. extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
  995. struct vmw_resource *ctx,
  996. u32 type);
  997. extern int vmw_cotable_notify(struct vmw_resource *res, int id);
  998. extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback);
  999. extern void vmw_cotable_add_resource(struct vmw_resource *ctx,
  1000. struct list_head *head);
  1001. /*
  1002. * Command buffer managerment vmwgfx_cmdbuf.c
  1003. */
  1004. struct vmw_cmdbuf_man;
  1005. struct vmw_cmdbuf_header;
  1006. extern struct vmw_cmdbuf_man *
  1007. vmw_cmdbuf_man_create(struct vmw_private *dev_priv);
  1008. extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
  1009. size_t size, size_t default_size);
  1010. extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man);
  1011. extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man);
  1012. extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
  1013. unsigned long timeout);
  1014. extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
  1015. int ctx_id, bool interruptible,
  1016. struct vmw_cmdbuf_header *header);
  1017. extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
  1018. struct vmw_cmdbuf_header *header,
  1019. bool flush);
  1020. extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
  1021. extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
  1022. size_t size, bool interruptible,
  1023. struct vmw_cmdbuf_header **p_header);
  1024. extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
  1025. extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
  1026. bool interruptible);
  1027. /**
  1028. * Inline helper functions
  1029. */
  1030. static inline void vmw_surface_unreference(struct vmw_surface **srf)
  1031. {
  1032. struct vmw_surface *tmp_srf = *srf;
  1033. struct vmw_resource *res = &tmp_srf->res;
  1034. *srf = NULL;
  1035. vmw_resource_unreference(&res);
  1036. }
  1037. static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
  1038. {
  1039. (void) vmw_resource_reference(&srf->res);
  1040. return srf;
  1041. }
  1042. static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
  1043. {
  1044. struct vmw_dma_buffer *tmp_buf = *buf;
  1045. *buf = NULL;
  1046. if (tmp_buf != NULL) {
  1047. struct ttm_buffer_object *bo = &tmp_buf->base;
  1048. ttm_bo_unref(&bo);
  1049. }
  1050. }
  1051. static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
  1052. {
  1053. if (ttm_bo_reference(&buf->base))
  1054. return buf;
  1055. return NULL;
  1056. }
  1057. static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
  1058. {
  1059. return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
  1060. }
  1061. static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
  1062. {
  1063. atomic_inc(&dev_priv->num_fifo_resources);
  1064. }
  1065. static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
  1066. {
  1067. atomic_dec(&dev_priv->num_fifo_resources);
  1068. }
  1069. /**
  1070. * vmw_mmio_read - Perform a MMIO read from volatile memory
  1071. *
  1072. * @addr: The address to read from
  1073. *
  1074. * This function is intended to be equivalent to ioread32() on
  1075. * memremap'd memory, but without byteswapping.
  1076. */
  1077. static inline u32 vmw_mmio_read(u32 *addr)
  1078. {
  1079. return READ_ONCE(*addr);
  1080. }
  1081. /**
  1082. * vmw_mmio_write - Perform a MMIO write to volatile memory
  1083. *
  1084. * @addr: The address to write to
  1085. *
  1086. * This function is intended to be equivalent to iowrite32 on
  1087. * memremap'd memory, but without byteswapping.
  1088. */
  1089. static inline void vmw_mmio_write(u32 value, u32 *addr)
  1090. {
  1091. WRITE_ONCE(*addr, value);
  1092. }
  1093. #endif