vmwgfx_drv.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include <linux/module.h>
  28. #include <linux/console.h>
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include "vmwgfx_binding.h"
  32. #include <drm/ttm/ttm_placement.h>
  33. #include <drm/ttm/ttm_bo_driver.h>
  34. #include <drm/ttm/ttm_object.h>
  35. #include <drm/ttm/ttm_module.h>
  36. #include <linux/dma_remapping.h>
  37. #define VMWGFX_DRIVER_NAME "vmwgfx"
  38. #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  39. #define VMWGFX_CHIP_SVGAII 0
  40. #define VMW_FB_RESERVATION 0
  41. #define VMW_MIN_INITIAL_WIDTH 800
  42. #define VMW_MIN_INITIAL_HEIGHT 600
  43. /**
  44. * Fully encoded drm commands. Might move to vmw_drm.h
  45. */
  46. #define DRM_IOCTL_VMW_GET_PARAM \
  47. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
  48. struct drm_vmw_getparam_arg)
  49. #define DRM_IOCTL_VMW_ALLOC_DMABUF \
  50. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
  51. union drm_vmw_alloc_dmabuf_arg)
  52. #define DRM_IOCTL_VMW_UNREF_DMABUF \
  53. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
  54. struct drm_vmw_unref_dmabuf_arg)
  55. #define DRM_IOCTL_VMW_CURSOR_BYPASS \
  56. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
  57. struct drm_vmw_cursor_bypass_arg)
  58. #define DRM_IOCTL_VMW_CONTROL_STREAM \
  59. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
  60. struct drm_vmw_control_stream_arg)
  61. #define DRM_IOCTL_VMW_CLAIM_STREAM \
  62. DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
  63. struct drm_vmw_stream_arg)
  64. #define DRM_IOCTL_VMW_UNREF_STREAM \
  65. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
  66. struct drm_vmw_stream_arg)
  67. #define DRM_IOCTL_VMW_CREATE_CONTEXT \
  68. DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
  69. struct drm_vmw_context_arg)
  70. #define DRM_IOCTL_VMW_UNREF_CONTEXT \
  71. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
  72. struct drm_vmw_context_arg)
  73. #define DRM_IOCTL_VMW_CREATE_SURFACE \
  74. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
  75. union drm_vmw_surface_create_arg)
  76. #define DRM_IOCTL_VMW_UNREF_SURFACE \
  77. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
  78. struct drm_vmw_surface_arg)
  79. #define DRM_IOCTL_VMW_REF_SURFACE \
  80. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
  81. union drm_vmw_surface_reference_arg)
  82. #define DRM_IOCTL_VMW_EXECBUF \
  83. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
  84. struct drm_vmw_execbuf_arg)
  85. #define DRM_IOCTL_VMW_GET_3D_CAP \
  86. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
  87. struct drm_vmw_get_3d_cap_arg)
  88. #define DRM_IOCTL_VMW_FENCE_WAIT \
  89. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
  90. struct drm_vmw_fence_wait_arg)
  91. #define DRM_IOCTL_VMW_FENCE_SIGNALED \
  92. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
  93. struct drm_vmw_fence_signaled_arg)
  94. #define DRM_IOCTL_VMW_FENCE_UNREF \
  95. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
  96. struct drm_vmw_fence_arg)
  97. #define DRM_IOCTL_VMW_FENCE_EVENT \
  98. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
  99. struct drm_vmw_fence_event_arg)
  100. #define DRM_IOCTL_VMW_PRESENT \
  101. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
  102. struct drm_vmw_present_arg)
  103. #define DRM_IOCTL_VMW_PRESENT_READBACK \
  104. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
  105. struct drm_vmw_present_readback_arg)
  106. #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
  107. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
  108. struct drm_vmw_update_layout_arg)
  109. #define DRM_IOCTL_VMW_CREATE_SHADER \
  110. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
  111. struct drm_vmw_shader_create_arg)
  112. #define DRM_IOCTL_VMW_UNREF_SHADER \
  113. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
  114. struct drm_vmw_shader_arg)
  115. #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
  116. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
  117. union drm_vmw_gb_surface_create_arg)
  118. #define DRM_IOCTL_VMW_GB_SURFACE_REF \
  119. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
  120. union drm_vmw_gb_surface_reference_arg)
  121. #define DRM_IOCTL_VMW_SYNCCPU \
  122. DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
  123. struct drm_vmw_synccpu_arg)
  124. #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
  125. DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
  126. struct drm_vmw_context_arg)
  127. /**
  128. * The core DRM version of this macro doesn't account for
  129. * DRM_COMMAND_BASE.
  130. */
  131. #define VMW_IOCTL_DEF(ioctl, func, flags) \
  132. [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
  133. /**
  134. * Ioctl definitions.
  135. */
  136. static const struct drm_ioctl_desc vmw_ioctls[] = {
  137. VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
  138. DRM_AUTH | DRM_RENDER_ALLOW),
  139. VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
  140. DRM_AUTH | DRM_RENDER_ALLOW),
  141. VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
  142. DRM_RENDER_ALLOW),
  143. VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
  144. vmw_kms_cursor_bypass_ioctl,
  145. DRM_MASTER | DRM_CONTROL_ALLOW),
  146. VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
  147. DRM_MASTER | DRM_CONTROL_ALLOW),
  148. VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
  149. DRM_MASTER | DRM_CONTROL_ALLOW),
  150. VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
  151. DRM_MASTER | DRM_CONTROL_ALLOW),
  152. VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
  153. DRM_AUTH | DRM_RENDER_ALLOW),
  154. VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
  155. DRM_RENDER_ALLOW),
  156. VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
  157. DRM_AUTH | DRM_RENDER_ALLOW),
  158. VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
  159. DRM_RENDER_ALLOW),
  160. VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
  161. DRM_AUTH | DRM_RENDER_ALLOW),
  162. VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH |
  163. DRM_RENDER_ALLOW),
  164. VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
  165. DRM_RENDER_ALLOW),
  166. VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
  167. vmw_fence_obj_signaled_ioctl,
  168. DRM_RENDER_ALLOW),
  169. VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
  170. DRM_RENDER_ALLOW),
  171. VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
  172. DRM_AUTH | DRM_RENDER_ALLOW),
  173. VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
  174. DRM_AUTH | DRM_RENDER_ALLOW),
  175. /* these allow direct access to the framebuffers mark as master only */
  176. VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
  177. DRM_MASTER | DRM_AUTH),
  178. VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
  179. vmw_present_readback_ioctl,
  180. DRM_MASTER | DRM_AUTH),
  181. VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
  182. vmw_kms_update_layout_ioctl,
  183. DRM_MASTER),
  184. VMW_IOCTL_DEF(VMW_CREATE_SHADER,
  185. vmw_shader_define_ioctl,
  186. DRM_AUTH | DRM_RENDER_ALLOW),
  187. VMW_IOCTL_DEF(VMW_UNREF_SHADER,
  188. vmw_shader_destroy_ioctl,
  189. DRM_RENDER_ALLOW),
  190. VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
  191. vmw_gb_surface_define_ioctl,
  192. DRM_AUTH | DRM_RENDER_ALLOW),
  193. VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
  194. vmw_gb_surface_reference_ioctl,
  195. DRM_AUTH | DRM_RENDER_ALLOW),
  196. VMW_IOCTL_DEF(VMW_SYNCCPU,
  197. vmw_user_dmabuf_synccpu_ioctl,
  198. DRM_RENDER_ALLOW),
  199. VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT,
  200. vmw_extended_context_define_ioctl,
  201. DRM_AUTH | DRM_RENDER_ALLOW),
  202. };
  203. static struct pci_device_id vmw_pci_id_list[] = {
  204. {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
  205. {0, 0, 0}
  206. };
  207. MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
  208. static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
  209. static int vmw_force_iommu;
  210. static int vmw_restrict_iommu;
  211. static int vmw_force_coherent;
  212. static int vmw_restrict_dma_mask;
  213. static int vmw_assume_16bpp;
  214. static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  215. static void vmw_master_init(struct vmw_master *);
  216. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  217. void *ptr);
  218. MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
  219. module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  220. MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
  221. module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
  222. MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
  223. module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
  224. MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
  225. module_param_named(force_coherent, vmw_force_coherent, int, 0600);
  226. MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
  227. module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
  228. MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
  229. module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
  230. static void vmw_print_capabilities(uint32_t capabilities)
  231. {
  232. DRM_INFO("Capabilities:\n");
  233. if (capabilities & SVGA_CAP_RECT_COPY)
  234. DRM_INFO(" Rect copy.\n");
  235. if (capabilities & SVGA_CAP_CURSOR)
  236. DRM_INFO(" Cursor.\n");
  237. if (capabilities & SVGA_CAP_CURSOR_BYPASS)
  238. DRM_INFO(" Cursor bypass.\n");
  239. if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
  240. DRM_INFO(" Cursor bypass 2.\n");
  241. if (capabilities & SVGA_CAP_8BIT_EMULATION)
  242. DRM_INFO(" 8bit emulation.\n");
  243. if (capabilities & SVGA_CAP_ALPHA_CURSOR)
  244. DRM_INFO(" Alpha cursor.\n");
  245. if (capabilities & SVGA_CAP_3D)
  246. DRM_INFO(" 3D.\n");
  247. if (capabilities & SVGA_CAP_EXTENDED_FIFO)
  248. DRM_INFO(" Extended Fifo.\n");
  249. if (capabilities & SVGA_CAP_MULTIMON)
  250. DRM_INFO(" Multimon.\n");
  251. if (capabilities & SVGA_CAP_PITCHLOCK)
  252. DRM_INFO(" Pitchlock.\n");
  253. if (capabilities & SVGA_CAP_IRQMASK)
  254. DRM_INFO(" Irq mask.\n");
  255. if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
  256. DRM_INFO(" Display Topology.\n");
  257. if (capabilities & SVGA_CAP_GMR)
  258. DRM_INFO(" GMR.\n");
  259. if (capabilities & SVGA_CAP_TRACES)
  260. DRM_INFO(" Traces.\n");
  261. if (capabilities & SVGA_CAP_GMR2)
  262. DRM_INFO(" GMR2.\n");
  263. if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
  264. DRM_INFO(" Screen Object 2.\n");
  265. if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
  266. DRM_INFO(" Command Buffers.\n");
  267. if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
  268. DRM_INFO(" Command Buffers 2.\n");
  269. if (capabilities & SVGA_CAP_GBOBJECTS)
  270. DRM_INFO(" Guest Backed Resources.\n");
  271. if (capabilities & SVGA_CAP_DX)
  272. DRM_INFO(" DX Features.\n");
  273. }
  274. /**
  275. * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  276. *
  277. * @dev_priv: A device private structure.
  278. *
  279. * This function creates a small buffer object that holds the query
  280. * result for dummy queries emitted as query barriers.
  281. * The function will then map the first page and initialize a pending
  282. * occlusion query result structure, Finally it will unmap the buffer.
  283. * No interruptible waits are done within this function.
  284. *
  285. * Returns an error if bo creation or initialization fails.
  286. */
  287. static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  288. {
  289. int ret;
  290. struct vmw_dma_buffer *vbo;
  291. struct ttm_bo_kmap_obj map;
  292. volatile SVGA3dQueryResult *result;
  293. bool dummy;
  294. /*
  295. * Create the vbo as pinned, so that a tryreserve will
  296. * immediately succeed. This is because we're the only
  297. * user of the bo currently.
  298. */
  299. vbo = kzalloc(sizeof(*vbo), GFP_KERNEL);
  300. if (!vbo)
  301. return -ENOMEM;
  302. ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE,
  303. &vmw_sys_ne_placement, false,
  304. &vmw_dmabuf_bo_free);
  305. if (unlikely(ret != 0))
  306. return ret;
  307. ret = ttm_bo_reserve(&vbo->base, false, true, false, NULL);
  308. BUG_ON(ret != 0);
  309. vmw_bo_pin_reserved(vbo, true);
  310. ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
  311. if (likely(ret == 0)) {
  312. result = ttm_kmap_obj_virtual(&map, &dummy);
  313. result->totalSize = sizeof(*result);
  314. result->state = SVGA3D_QUERYSTATE_PENDING;
  315. result->result32 = 0xff;
  316. ttm_bo_kunmap(&map);
  317. }
  318. vmw_bo_pin_reserved(vbo, false);
  319. ttm_bo_unreserve(&vbo->base);
  320. if (unlikely(ret != 0)) {
  321. DRM_ERROR("Dummy query buffer map failed.\n");
  322. vmw_dmabuf_unreference(&vbo);
  323. } else
  324. dev_priv->dummy_query_bo = vbo;
  325. return ret;
  326. }
  327. /**
  328. * vmw_request_device_late - Perform late device setup
  329. *
  330. * @dev_priv: Pointer to device private.
  331. *
  332. * This function performs setup of otables and enables large command
  333. * buffer submission. These tasks are split out to a separate function
  334. * because it reverts vmw_release_device_early and is intended to be used
  335. * by an error path in the hibernation code.
  336. */
  337. static int vmw_request_device_late(struct vmw_private *dev_priv)
  338. {
  339. int ret;
  340. if (dev_priv->has_mob) {
  341. ret = vmw_otables_setup(dev_priv);
  342. if (unlikely(ret != 0)) {
  343. DRM_ERROR("Unable to initialize "
  344. "guest Memory OBjects.\n");
  345. return ret;
  346. }
  347. }
  348. if (dev_priv->cman) {
  349. ret = vmw_cmdbuf_set_pool_size(dev_priv->cman,
  350. 256*4096, 2*4096);
  351. if (ret) {
  352. struct vmw_cmdbuf_man *man = dev_priv->cman;
  353. dev_priv->cman = NULL;
  354. vmw_cmdbuf_man_destroy(man);
  355. }
  356. }
  357. return 0;
  358. }
  359. static int vmw_request_device(struct vmw_private *dev_priv)
  360. {
  361. int ret;
  362. ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
  363. if (unlikely(ret != 0)) {
  364. DRM_ERROR("Unable to initialize FIFO.\n");
  365. return ret;
  366. }
  367. vmw_fence_fifo_up(dev_priv->fman);
  368. dev_priv->cman = vmw_cmdbuf_man_create(dev_priv);
  369. if (IS_ERR(dev_priv->cman)) {
  370. dev_priv->cman = NULL;
  371. dev_priv->has_dx = false;
  372. }
  373. ret = vmw_request_device_late(dev_priv);
  374. if (ret)
  375. goto out_no_mob;
  376. ret = vmw_dummy_query_bo_create(dev_priv);
  377. if (unlikely(ret != 0))
  378. goto out_no_query_bo;
  379. return 0;
  380. out_no_query_bo:
  381. if (dev_priv->cman)
  382. vmw_cmdbuf_remove_pool(dev_priv->cman);
  383. if (dev_priv->has_mob) {
  384. (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
  385. vmw_otables_takedown(dev_priv);
  386. }
  387. if (dev_priv->cman)
  388. vmw_cmdbuf_man_destroy(dev_priv->cman);
  389. out_no_mob:
  390. vmw_fence_fifo_down(dev_priv->fman);
  391. vmw_fifo_release(dev_priv, &dev_priv->fifo);
  392. return ret;
  393. }
  394. /**
  395. * vmw_release_device_early - Early part of fifo takedown.
  396. *
  397. * @dev_priv: Pointer to device private struct.
  398. *
  399. * This is the first part of command submission takedown, to be called before
  400. * buffer management is taken down.
  401. */
  402. static void vmw_release_device_early(struct vmw_private *dev_priv)
  403. {
  404. /*
  405. * Previous destructions should've released
  406. * the pinned bo.
  407. */
  408. BUG_ON(dev_priv->pinned_bo != NULL);
  409. vmw_dmabuf_unreference(&dev_priv->dummy_query_bo);
  410. if (dev_priv->cman)
  411. vmw_cmdbuf_remove_pool(dev_priv->cman);
  412. if (dev_priv->has_mob) {
  413. ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
  414. vmw_otables_takedown(dev_priv);
  415. }
  416. }
  417. /**
  418. * vmw_release_device_late - Late part of fifo takedown.
  419. *
  420. * @dev_priv: Pointer to device private struct.
  421. *
  422. * This is the last part of the command submission takedown, to be called when
  423. * command submission is no longer needed. It may wait on pending fences.
  424. */
  425. static void vmw_release_device_late(struct vmw_private *dev_priv)
  426. {
  427. vmw_fence_fifo_down(dev_priv->fman);
  428. if (dev_priv->cman)
  429. vmw_cmdbuf_man_destroy(dev_priv->cman);
  430. vmw_fifo_release(dev_priv, &dev_priv->fifo);
  431. }
  432. /**
  433. * Sets the initial_[width|height] fields on the given vmw_private.
  434. *
  435. * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
  436. * clamping the value to fb_max_[width|height] fields and the
  437. * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  438. * If the values appear to be invalid, set them to
  439. * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  440. */
  441. static void vmw_get_initial_size(struct vmw_private *dev_priv)
  442. {
  443. uint32_t width;
  444. uint32_t height;
  445. width = vmw_read(dev_priv, SVGA_REG_WIDTH);
  446. height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
  447. width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
  448. height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
  449. if (width > dev_priv->fb_max_width ||
  450. height > dev_priv->fb_max_height) {
  451. /*
  452. * This is a host error and shouldn't occur.
  453. */
  454. width = VMW_MIN_INITIAL_WIDTH;
  455. height = VMW_MIN_INITIAL_HEIGHT;
  456. }
  457. dev_priv->initial_width = width;
  458. dev_priv->initial_height = height;
  459. }
  460. /**
  461. * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
  462. * system.
  463. *
  464. * @dev_priv: Pointer to a struct vmw_private
  465. *
  466. * This functions tries to determine the IOMMU setup and what actions
  467. * need to be taken by the driver to make system pages visible to the
  468. * device.
  469. * If this function decides that DMA is not possible, it returns -EINVAL.
  470. * The driver may then try to disable features of the device that require
  471. * DMA.
  472. */
  473. static int vmw_dma_select_mode(struct vmw_private *dev_priv)
  474. {
  475. static const char *names[vmw_dma_map_max] = {
  476. [vmw_dma_phys] = "Using physical TTM page addresses.",
  477. [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
  478. [vmw_dma_map_populate] = "Keeping DMA mappings.",
  479. [vmw_dma_map_bind] = "Giving up DMA mappings early."};
  480. #ifdef CONFIG_X86
  481. const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev);
  482. #ifdef CONFIG_INTEL_IOMMU
  483. if (intel_iommu_enabled) {
  484. dev_priv->map_mode = vmw_dma_map_populate;
  485. goto out_fixup;
  486. }
  487. #endif
  488. if (!(vmw_force_iommu || vmw_force_coherent)) {
  489. dev_priv->map_mode = vmw_dma_phys;
  490. DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
  491. return 0;
  492. }
  493. dev_priv->map_mode = vmw_dma_map_populate;
  494. if (dma_ops->sync_single_for_cpu)
  495. dev_priv->map_mode = vmw_dma_alloc_coherent;
  496. #ifdef CONFIG_SWIOTLB
  497. if (swiotlb_nr_tbl() == 0)
  498. dev_priv->map_mode = vmw_dma_map_populate;
  499. #endif
  500. #ifdef CONFIG_INTEL_IOMMU
  501. out_fixup:
  502. #endif
  503. if (dev_priv->map_mode == vmw_dma_map_populate &&
  504. vmw_restrict_iommu)
  505. dev_priv->map_mode = vmw_dma_map_bind;
  506. if (vmw_force_coherent)
  507. dev_priv->map_mode = vmw_dma_alloc_coherent;
  508. #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
  509. /*
  510. * No coherent page pool
  511. */
  512. if (dev_priv->map_mode == vmw_dma_alloc_coherent)
  513. return -EINVAL;
  514. #endif
  515. #else /* CONFIG_X86 */
  516. dev_priv->map_mode = vmw_dma_map_populate;
  517. #endif /* CONFIG_X86 */
  518. DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
  519. return 0;
  520. }
  521. /**
  522. * vmw_dma_masks - set required page- and dma masks
  523. *
  524. * @dev: Pointer to struct drm-device
  525. *
  526. * With 32-bit we can only handle 32 bit PFNs. Optionally set that
  527. * restriction also for 64-bit systems.
  528. */
  529. #ifdef CONFIG_INTEL_IOMMU
  530. static int vmw_dma_masks(struct vmw_private *dev_priv)
  531. {
  532. struct drm_device *dev = dev_priv->dev;
  533. int ret = 0;
  534. ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
  535. if (dev_priv->map_mode != vmw_dma_phys &&
  536. (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
  537. DRM_INFO("Restricting DMA addresses to 44 bits.\n");
  538. return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
  539. }
  540. return ret;
  541. }
  542. #else
  543. static int vmw_dma_masks(struct vmw_private *dev_priv)
  544. {
  545. return 0;
  546. }
  547. #endif
  548. static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  549. {
  550. struct vmw_private *dev_priv;
  551. int ret;
  552. uint32_t svga_id;
  553. enum vmw_res_type i;
  554. bool refuse_dma = false;
  555. dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  556. if (unlikely(dev_priv == NULL)) {
  557. DRM_ERROR("Failed allocating a device private struct.\n");
  558. return -ENOMEM;
  559. }
  560. pci_set_master(dev->pdev);
  561. dev_priv->dev = dev;
  562. dev_priv->vmw_chipset = chipset;
  563. dev_priv->last_read_seqno = (uint32_t) -100;
  564. mutex_init(&dev_priv->cmdbuf_mutex);
  565. mutex_init(&dev_priv->release_mutex);
  566. mutex_init(&dev_priv->binding_mutex);
  567. rwlock_init(&dev_priv->resource_lock);
  568. ttm_lock_init(&dev_priv->reservation_sem);
  569. spin_lock_init(&dev_priv->hw_lock);
  570. spin_lock_init(&dev_priv->waiter_lock);
  571. spin_lock_init(&dev_priv->cap_lock);
  572. spin_lock_init(&dev_priv->svga_lock);
  573. for (i = vmw_res_context; i < vmw_res_max; ++i) {
  574. idr_init(&dev_priv->res_idr[i]);
  575. INIT_LIST_HEAD(&dev_priv->res_lru[i]);
  576. }
  577. mutex_init(&dev_priv->init_mutex);
  578. init_waitqueue_head(&dev_priv->fence_queue);
  579. init_waitqueue_head(&dev_priv->fifo_queue);
  580. dev_priv->fence_queue_waiters = 0;
  581. dev_priv->fifo_queue_waiters = 0;
  582. dev_priv->used_memory_size = 0;
  583. dev_priv->io_start = pci_resource_start(dev->pdev, 0);
  584. dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
  585. dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  586. dev_priv->assume_16bpp = !!vmw_assume_16bpp;
  587. dev_priv->enable_fb = enable_fbdev;
  588. vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  589. svga_id = vmw_read(dev_priv, SVGA_REG_ID);
  590. if (svga_id != SVGA_ID_2) {
  591. ret = -ENOSYS;
  592. DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
  593. goto out_err0;
  594. }
  595. dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
  596. ret = vmw_dma_select_mode(dev_priv);
  597. if (unlikely(ret != 0)) {
  598. DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
  599. refuse_dma = true;
  600. }
  601. dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
  602. dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
  603. dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
  604. dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
  605. vmw_get_initial_size(dev_priv);
  606. if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  607. dev_priv->max_gmr_ids =
  608. vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
  609. dev_priv->max_gmr_pages =
  610. vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
  611. dev_priv->memory_size =
  612. vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
  613. dev_priv->memory_size -= dev_priv->vram_size;
  614. } else {
  615. /*
  616. * An arbitrary limit of 512MiB on surface
  617. * memory. But all HWV8 hardware supports GMR2.
  618. */
  619. dev_priv->memory_size = 512*1024*1024;
  620. }
  621. dev_priv->max_mob_pages = 0;
  622. dev_priv->max_mob_size = 0;
  623. if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  624. uint64_t mem_size =
  625. vmw_read(dev_priv,
  626. SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
  627. /*
  628. * Workaround for low memory 2D VMs to compensate for the
  629. * allocation taken by fbdev
  630. */
  631. if (!(dev_priv->capabilities & SVGA_CAP_3D))
  632. mem_size *= 3;
  633. dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
  634. dev_priv->prim_bb_mem =
  635. vmw_read(dev_priv,
  636. SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
  637. dev_priv->max_mob_size =
  638. vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
  639. dev_priv->stdu_max_width =
  640. vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH);
  641. dev_priv->stdu_max_height =
  642. vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT);
  643. vmw_write(dev_priv, SVGA_REG_DEV_CAP,
  644. SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH);
  645. dev_priv->texture_max_width = vmw_read(dev_priv,
  646. SVGA_REG_DEV_CAP);
  647. vmw_write(dev_priv, SVGA_REG_DEV_CAP,
  648. SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT);
  649. dev_priv->texture_max_height = vmw_read(dev_priv,
  650. SVGA_REG_DEV_CAP);
  651. } else {
  652. dev_priv->texture_max_width = 8192;
  653. dev_priv->texture_max_height = 8192;
  654. dev_priv->prim_bb_mem = dev_priv->vram_size;
  655. }
  656. vmw_print_capabilities(dev_priv->capabilities);
  657. ret = vmw_dma_masks(dev_priv);
  658. if (unlikely(ret != 0))
  659. goto out_err0;
  660. if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  661. DRM_INFO("Max GMR ids is %u\n",
  662. (unsigned)dev_priv->max_gmr_ids);
  663. DRM_INFO("Max number of GMR pages is %u\n",
  664. (unsigned)dev_priv->max_gmr_pages);
  665. DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
  666. (unsigned)dev_priv->memory_size / 1024);
  667. }
  668. DRM_INFO("Maximum display memory size is %u kiB\n",
  669. dev_priv->prim_bb_mem / 1024);
  670. DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
  671. dev_priv->vram_start, dev_priv->vram_size / 1024);
  672. DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
  673. dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  674. ret = vmw_ttm_global_init(dev_priv);
  675. if (unlikely(ret != 0))
  676. goto out_err0;
  677. vmw_master_init(&dev_priv->fbdev_master);
  678. ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  679. dev_priv->active_master = &dev_priv->fbdev_master;
  680. dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
  681. dev_priv->mmio_size, MEMREMAP_WB);
  682. if (unlikely(dev_priv->mmio_virt == NULL)) {
  683. ret = -ENOMEM;
  684. DRM_ERROR("Failed mapping MMIO.\n");
  685. goto out_err3;
  686. }
  687. /* Need mmio memory to check for fifo pitchlock cap. */
  688. if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  689. !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
  690. !vmw_fifo_have_pitchlock(dev_priv)) {
  691. ret = -ENOSYS;
  692. DRM_ERROR("Hardware has no pitchlock\n");
  693. goto out_err4;
  694. }
  695. dev_priv->tdev = ttm_object_device_init
  696. (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
  697. if (unlikely(dev_priv->tdev == NULL)) {
  698. DRM_ERROR("Unable to initialize TTM object management.\n");
  699. ret = -ENOMEM;
  700. goto out_err4;
  701. }
  702. dev->dev_private = dev_priv;
  703. ret = pci_request_regions(dev->pdev, "vmwgfx probe");
  704. dev_priv->stealth = (ret != 0);
  705. if (dev_priv->stealth) {
  706. /**
  707. * Request at least the mmio PCI resource.
  708. */
  709. DRM_INFO("It appears like vesafb is loaded. "
  710. "Ignore above error if any.\n");
  711. ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe");
  712. if (unlikely(ret != 0)) {
  713. DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
  714. goto out_no_device;
  715. }
  716. }
  717. if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
  718. ret = drm_irq_install(dev, dev->pdev->irq);
  719. if (ret != 0) {
  720. DRM_ERROR("Failed installing irq: %d\n", ret);
  721. goto out_no_irq;
  722. }
  723. }
  724. dev_priv->fman = vmw_fence_manager_init(dev_priv);
  725. if (unlikely(dev_priv->fman == NULL)) {
  726. ret = -ENOMEM;
  727. goto out_no_fman;
  728. }
  729. ret = ttm_bo_device_init(&dev_priv->bdev,
  730. dev_priv->bo_global_ref.ref.object,
  731. &vmw_bo_driver,
  732. dev->anon_inode->i_mapping,
  733. VMWGFX_FILE_PAGE_OFFSET,
  734. false);
  735. if (unlikely(ret != 0)) {
  736. DRM_ERROR("Failed initializing TTM buffer object driver.\n");
  737. goto out_no_bdev;
  738. }
  739. /*
  740. * Enable VRAM, but initially don't use it until SVGA is enabled and
  741. * unhidden.
  742. */
  743. ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
  744. (dev_priv->vram_size >> PAGE_SHIFT));
  745. if (unlikely(ret != 0)) {
  746. DRM_ERROR("Failed initializing memory manager for VRAM.\n");
  747. goto out_no_vram;
  748. }
  749. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  750. dev_priv->has_gmr = true;
  751. if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
  752. refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
  753. VMW_PL_GMR) != 0) {
  754. DRM_INFO("No GMR memory available. "
  755. "Graphics memory resources are very limited.\n");
  756. dev_priv->has_gmr = false;
  757. }
  758. if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  759. dev_priv->has_mob = true;
  760. if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
  761. VMW_PL_MOB) != 0) {
  762. DRM_INFO("No MOB memory available. "
  763. "3D will be disabled.\n");
  764. dev_priv->has_mob = false;
  765. }
  766. }
  767. if (dev_priv->has_mob) {
  768. spin_lock(&dev_priv->cap_lock);
  769. vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX);
  770. dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP);
  771. spin_unlock(&dev_priv->cap_lock);
  772. }
  773. ret = vmw_kms_init(dev_priv);
  774. if (unlikely(ret != 0))
  775. goto out_no_kms;
  776. vmw_overlay_init(dev_priv);
  777. ret = vmw_request_device(dev_priv);
  778. if (ret)
  779. goto out_no_fifo;
  780. DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no.");
  781. if (dev_priv->enable_fb) {
  782. vmw_fifo_resource_inc(dev_priv);
  783. vmw_svga_enable(dev_priv);
  784. vmw_fb_init(dev_priv);
  785. }
  786. dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
  787. register_pm_notifier(&dev_priv->pm_nb);
  788. return 0;
  789. out_no_fifo:
  790. vmw_overlay_close(dev_priv);
  791. vmw_kms_close(dev_priv);
  792. out_no_kms:
  793. if (dev_priv->has_mob)
  794. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
  795. if (dev_priv->has_gmr)
  796. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  797. (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  798. out_no_vram:
  799. (void)ttm_bo_device_release(&dev_priv->bdev);
  800. out_no_bdev:
  801. vmw_fence_manager_takedown(dev_priv->fman);
  802. out_no_fman:
  803. if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  804. drm_irq_uninstall(dev_priv->dev);
  805. out_no_irq:
  806. if (dev_priv->stealth)
  807. pci_release_region(dev->pdev, 2);
  808. else
  809. pci_release_regions(dev->pdev);
  810. out_no_device:
  811. ttm_object_device_release(&dev_priv->tdev);
  812. out_err4:
  813. memunmap(dev_priv->mmio_virt);
  814. out_err3:
  815. vmw_ttm_global_release(dev_priv);
  816. out_err0:
  817. for (i = vmw_res_context; i < vmw_res_max; ++i)
  818. idr_destroy(&dev_priv->res_idr[i]);
  819. if (dev_priv->ctx.staged_bindings)
  820. vmw_binding_state_free(dev_priv->ctx.staged_bindings);
  821. kfree(dev_priv);
  822. return ret;
  823. }
  824. static int vmw_driver_unload(struct drm_device *dev)
  825. {
  826. struct vmw_private *dev_priv = vmw_priv(dev);
  827. enum vmw_res_type i;
  828. unregister_pm_notifier(&dev_priv->pm_nb);
  829. if (dev_priv->ctx.res_ht_initialized)
  830. drm_ht_remove(&dev_priv->ctx.res_ht);
  831. vfree(dev_priv->ctx.cmd_bounce);
  832. if (dev_priv->enable_fb) {
  833. vmw_fb_off(dev_priv);
  834. vmw_fb_close(dev_priv);
  835. vmw_fifo_resource_dec(dev_priv);
  836. vmw_svga_disable(dev_priv);
  837. }
  838. vmw_kms_close(dev_priv);
  839. vmw_overlay_close(dev_priv);
  840. if (dev_priv->has_gmr)
  841. (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  842. (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  843. vmw_release_device_early(dev_priv);
  844. if (dev_priv->has_mob)
  845. (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
  846. (void) ttm_bo_device_release(&dev_priv->bdev);
  847. vmw_release_device_late(dev_priv);
  848. vmw_fence_manager_takedown(dev_priv->fman);
  849. if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  850. drm_irq_uninstall(dev_priv->dev);
  851. if (dev_priv->stealth)
  852. pci_release_region(dev->pdev, 2);
  853. else
  854. pci_release_regions(dev->pdev);
  855. ttm_object_device_release(&dev_priv->tdev);
  856. memunmap(dev_priv->mmio_virt);
  857. if (dev_priv->ctx.staged_bindings)
  858. vmw_binding_state_free(dev_priv->ctx.staged_bindings);
  859. vmw_ttm_global_release(dev_priv);
  860. for (i = vmw_res_context; i < vmw_res_max; ++i)
  861. idr_destroy(&dev_priv->res_idr[i]);
  862. kfree(dev_priv);
  863. return 0;
  864. }
  865. static void vmw_preclose(struct drm_device *dev,
  866. struct drm_file *file_priv)
  867. {
  868. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  869. struct vmw_private *dev_priv = vmw_priv(dev);
  870. vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
  871. }
  872. static void vmw_postclose(struct drm_device *dev,
  873. struct drm_file *file_priv)
  874. {
  875. struct vmw_fpriv *vmw_fp;
  876. vmw_fp = vmw_fpriv(file_priv);
  877. if (vmw_fp->locked_master) {
  878. struct vmw_master *vmaster =
  879. vmw_master(vmw_fp->locked_master);
  880. ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  881. ttm_vt_unlock(&vmaster->lock);
  882. drm_master_put(&vmw_fp->locked_master);
  883. }
  884. ttm_object_file_release(&vmw_fp->tfile);
  885. kfree(vmw_fp);
  886. }
  887. static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
  888. {
  889. struct vmw_private *dev_priv = vmw_priv(dev);
  890. struct vmw_fpriv *vmw_fp;
  891. int ret = -ENOMEM;
  892. vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
  893. if (unlikely(vmw_fp == NULL))
  894. return ret;
  895. INIT_LIST_HEAD(&vmw_fp->fence_events);
  896. vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
  897. if (unlikely(vmw_fp->tfile == NULL))
  898. goto out_no_tfile;
  899. file_priv->driver_priv = vmw_fp;
  900. return 0;
  901. out_no_tfile:
  902. kfree(vmw_fp);
  903. return ret;
  904. }
  905. static struct vmw_master *vmw_master_check(struct drm_device *dev,
  906. struct drm_file *file_priv,
  907. unsigned int flags)
  908. {
  909. int ret;
  910. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  911. struct vmw_master *vmaster;
  912. if (file_priv->minor->type != DRM_MINOR_LEGACY ||
  913. !(flags & DRM_AUTH))
  914. return NULL;
  915. ret = mutex_lock_interruptible(&dev->master_mutex);
  916. if (unlikely(ret != 0))
  917. return ERR_PTR(-ERESTARTSYS);
  918. if (file_priv->is_master) {
  919. mutex_unlock(&dev->master_mutex);
  920. return NULL;
  921. }
  922. /*
  923. * Check if we were previously master, but now dropped. In that
  924. * case, allow at least render node functionality.
  925. */
  926. if (vmw_fp->locked_master) {
  927. mutex_unlock(&dev->master_mutex);
  928. if (flags & DRM_RENDER_ALLOW)
  929. return NULL;
  930. DRM_ERROR("Dropped master trying to access ioctl that "
  931. "requires authentication.\n");
  932. return ERR_PTR(-EACCES);
  933. }
  934. mutex_unlock(&dev->master_mutex);
  935. /*
  936. * Take the TTM lock. Possibly sleep waiting for the authenticating
  937. * master to become master again, or for a SIGTERM if the
  938. * authenticating master exits.
  939. */
  940. vmaster = vmw_master(file_priv->master);
  941. ret = ttm_read_lock(&vmaster->lock, true);
  942. if (unlikely(ret != 0))
  943. vmaster = ERR_PTR(ret);
  944. return vmaster;
  945. }
  946. static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
  947. unsigned long arg,
  948. long (*ioctl_func)(struct file *, unsigned int,
  949. unsigned long))
  950. {
  951. struct drm_file *file_priv = filp->private_data;
  952. struct drm_device *dev = file_priv->minor->dev;
  953. unsigned int nr = DRM_IOCTL_NR(cmd);
  954. struct vmw_master *vmaster;
  955. unsigned int flags;
  956. long ret;
  957. /*
  958. * Do extra checking on driver private ioctls.
  959. */
  960. if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
  961. && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
  962. const struct drm_ioctl_desc *ioctl =
  963. &vmw_ioctls[nr - DRM_COMMAND_BASE];
  964. if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
  965. ret = (long) drm_ioctl_permit(ioctl->flags, file_priv);
  966. if (unlikely(ret != 0))
  967. return ret;
  968. if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
  969. goto out_io_encoding;
  970. return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
  971. _IOC_SIZE(cmd));
  972. }
  973. if (unlikely(ioctl->cmd != cmd))
  974. goto out_io_encoding;
  975. flags = ioctl->flags;
  976. } else if (!drm_ioctl_flags(nr, &flags))
  977. return -EINVAL;
  978. vmaster = vmw_master_check(dev, file_priv, flags);
  979. if (IS_ERR(vmaster)) {
  980. ret = PTR_ERR(vmaster);
  981. if (ret != -ERESTARTSYS)
  982. DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
  983. nr, ret);
  984. return ret;
  985. }
  986. ret = ioctl_func(filp, cmd, arg);
  987. if (vmaster)
  988. ttm_read_unlock(&vmaster->lock);
  989. return ret;
  990. out_io_encoding:
  991. DRM_ERROR("Invalid command format, ioctl %d\n",
  992. nr - DRM_COMMAND_BASE);
  993. return -EINVAL;
  994. }
  995. static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
  996. unsigned long arg)
  997. {
  998. return vmw_generic_ioctl(filp, cmd, arg, &drm_ioctl);
  999. }
  1000. #ifdef CONFIG_COMPAT
  1001. static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
  1002. unsigned long arg)
  1003. {
  1004. return vmw_generic_ioctl(filp, cmd, arg, &drm_compat_ioctl);
  1005. }
  1006. #endif
  1007. static void vmw_lastclose(struct drm_device *dev)
  1008. {
  1009. }
  1010. static void vmw_master_init(struct vmw_master *vmaster)
  1011. {
  1012. ttm_lock_init(&vmaster->lock);
  1013. }
  1014. static int vmw_master_create(struct drm_device *dev,
  1015. struct drm_master *master)
  1016. {
  1017. struct vmw_master *vmaster;
  1018. vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
  1019. if (unlikely(vmaster == NULL))
  1020. return -ENOMEM;
  1021. vmw_master_init(vmaster);
  1022. ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  1023. master->driver_priv = vmaster;
  1024. return 0;
  1025. }
  1026. static void vmw_master_destroy(struct drm_device *dev,
  1027. struct drm_master *master)
  1028. {
  1029. struct vmw_master *vmaster = vmw_master(master);
  1030. master->driver_priv = NULL;
  1031. kfree(vmaster);
  1032. }
  1033. static int vmw_master_set(struct drm_device *dev,
  1034. struct drm_file *file_priv,
  1035. bool from_open)
  1036. {
  1037. struct vmw_private *dev_priv = vmw_priv(dev);
  1038. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1039. struct vmw_master *active = dev_priv->active_master;
  1040. struct vmw_master *vmaster = vmw_master(file_priv->master);
  1041. int ret = 0;
  1042. if (active) {
  1043. BUG_ON(active != &dev_priv->fbdev_master);
  1044. ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
  1045. if (unlikely(ret != 0))
  1046. return ret;
  1047. ttm_lock_set_kill(&active->lock, true, SIGTERM);
  1048. dev_priv->active_master = NULL;
  1049. }
  1050. ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  1051. if (!from_open) {
  1052. ttm_vt_unlock(&vmaster->lock);
  1053. BUG_ON(vmw_fp->locked_master != file_priv->master);
  1054. drm_master_put(&vmw_fp->locked_master);
  1055. }
  1056. dev_priv->active_master = vmaster;
  1057. return 0;
  1058. }
  1059. static void vmw_master_drop(struct drm_device *dev,
  1060. struct drm_file *file_priv,
  1061. bool from_release)
  1062. {
  1063. struct vmw_private *dev_priv = vmw_priv(dev);
  1064. struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1065. struct vmw_master *vmaster = vmw_master(file_priv->master);
  1066. int ret;
  1067. /**
  1068. * Make sure the master doesn't disappear while we have
  1069. * it locked.
  1070. */
  1071. vmw_fp->locked_master = drm_master_get(file_priv->master);
  1072. ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
  1073. vmw_kms_legacy_hotspot_clear(dev_priv);
  1074. if (unlikely((ret != 0))) {
  1075. DRM_ERROR("Unable to lock TTM at VT switch.\n");
  1076. drm_master_put(&vmw_fp->locked_master);
  1077. }
  1078. ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  1079. if (!dev_priv->enable_fb)
  1080. vmw_svga_disable(dev_priv);
  1081. dev_priv->active_master = &dev_priv->fbdev_master;
  1082. ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  1083. ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  1084. if (dev_priv->enable_fb)
  1085. vmw_fb_on(dev_priv);
  1086. }
  1087. /**
  1088. * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
  1089. *
  1090. * @dev_priv: Pointer to device private struct.
  1091. * Needs the reservation sem to be held in non-exclusive mode.
  1092. */
  1093. static void __vmw_svga_enable(struct vmw_private *dev_priv)
  1094. {
  1095. spin_lock(&dev_priv->svga_lock);
  1096. if (!dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1097. vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
  1098. dev_priv->bdev.man[TTM_PL_VRAM].use_type = true;
  1099. }
  1100. spin_unlock(&dev_priv->svga_lock);
  1101. }
  1102. /**
  1103. * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
  1104. *
  1105. * @dev_priv: Pointer to device private struct.
  1106. */
  1107. void vmw_svga_enable(struct vmw_private *dev_priv)
  1108. {
  1109. ttm_read_lock(&dev_priv->reservation_sem, false);
  1110. __vmw_svga_enable(dev_priv);
  1111. ttm_read_unlock(&dev_priv->reservation_sem);
  1112. }
  1113. /**
  1114. * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
  1115. *
  1116. * @dev_priv: Pointer to device private struct.
  1117. * Needs the reservation sem to be held in exclusive mode.
  1118. * Will not empty VRAM. VRAM must be emptied by caller.
  1119. */
  1120. static void __vmw_svga_disable(struct vmw_private *dev_priv)
  1121. {
  1122. spin_lock(&dev_priv->svga_lock);
  1123. if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1124. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  1125. vmw_write(dev_priv, SVGA_REG_ENABLE,
  1126. SVGA_REG_ENABLE_HIDE |
  1127. SVGA_REG_ENABLE_ENABLE);
  1128. }
  1129. spin_unlock(&dev_priv->svga_lock);
  1130. }
  1131. /**
  1132. * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
  1133. * running.
  1134. *
  1135. * @dev_priv: Pointer to device private struct.
  1136. * Will empty VRAM.
  1137. */
  1138. void vmw_svga_disable(struct vmw_private *dev_priv)
  1139. {
  1140. ttm_write_lock(&dev_priv->reservation_sem, false);
  1141. spin_lock(&dev_priv->svga_lock);
  1142. if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
  1143. dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
  1144. spin_unlock(&dev_priv->svga_lock);
  1145. if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
  1146. DRM_ERROR("Failed evicting VRAM buffers.\n");
  1147. vmw_write(dev_priv, SVGA_REG_ENABLE,
  1148. SVGA_REG_ENABLE_HIDE |
  1149. SVGA_REG_ENABLE_ENABLE);
  1150. } else
  1151. spin_unlock(&dev_priv->svga_lock);
  1152. ttm_write_unlock(&dev_priv->reservation_sem);
  1153. }
  1154. static void vmw_remove(struct pci_dev *pdev)
  1155. {
  1156. struct drm_device *dev = pci_get_drvdata(pdev);
  1157. pci_disable_device(pdev);
  1158. drm_put_dev(dev);
  1159. }
  1160. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  1161. void *ptr)
  1162. {
  1163. struct vmw_private *dev_priv =
  1164. container_of(nb, struct vmw_private, pm_nb);
  1165. switch (val) {
  1166. case PM_HIBERNATION_PREPARE:
  1167. if (dev_priv->enable_fb)
  1168. vmw_fb_off(dev_priv);
  1169. ttm_suspend_lock(&dev_priv->reservation_sem);
  1170. /*
  1171. * This empties VRAM and unbinds all GMR bindings.
  1172. * Buffer contents is moved to swappable memory.
  1173. */
  1174. vmw_execbuf_release_pinned_bo(dev_priv);
  1175. vmw_resource_evict_all(dev_priv);
  1176. vmw_release_device_early(dev_priv);
  1177. ttm_bo_swapout_all(&dev_priv->bdev);
  1178. vmw_fence_fifo_down(dev_priv->fman);
  1179. break;
  1180. case PM_POST_HIBERNATION:
  1181. case PM_POST_RESTORE:
  1182. vmw_fence_fifo_up(dev_priv->fman);
  1183. ttm_suspend_unlock(&dev_priv->reservation_sem);
  1184. if (dev_priv->enable_fb)
  1185. vmw_fb_on(dev_priv);
  1186. break;
  1187. case PM_RESTORE_PREPARE:
  1188. break;
  1189. default:
  1190. break;
  1191. }
  1192. return 0;
  1193. }
  1194. static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  1195. {
  1196. struct drm_device *dev = pci_get_drvdata(pdev);
  1197. struct vmw_private *dev_priv = vmw_priv(dev);
  1198. if (dev_priv->refuse_hibernation)
  1199. return -EBUSY;
  1200. pci_save_state(pdev);
  1201. pci_disable_device(pdev);
  1202. pci_set_power_state(pdev, PCI_D3hot);
  1203. return 0;
  1204. }
  1205. static int vmw_pci_resume(struct pci_dev *pdev)
  1206. {
  1207. pci_set_power_state(pdev, PCI_D0);
  1208. pci_restore_state(pdev);
  1209. return pci_enable_device(pdev);
  1210. }
  1211. static int vmw_pm_suspend(struct device *kdev)
  1212. {
  1213. struct pci_dev *pdev = to_pci_dev(kdev);
  1214. struct pm_message dummy;
  1215. dummy.event = 0;
  1216. return vmw_pci_suspend(pdev, dummy);
  1217. }
  1218. static int vmw_pm_resume(struct device *kdev)
  1219. {
  1220. struct pci_dev *pdev = to_pci_dev(kdev);
  1221. return vmw_pci_resume(pdev);
  1222. }
  1223. static int vmw_pm_freeze(struct device *kdev)
  1224. {
  1225. struct pci_dev *pdev = to_pci_dev(kdev);
  1226. struct drm_device *dev = pci_get_drvdata(pdev);
  1227. struct vmw_private *dev_priv = vmw_priv(dev);
  1228. dev_priv->suspended = true;
  1229. if (dev_priv->enable_fb)
  1230. vmw_fifo_resource_dec(dev_priv);
  1231. if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
  1232. DRM_ERROR("Can't hibernate while 3D resources are active.\n");
  1233. if (dev_priv->enable_fb)
  1234. vmw_fifo_resource_inc(dev_priv);
  1235. WARN_ON(vmw_request_device_late(dev_priv));
  1236. dev_priv->suspended = false;
  1237. return -EBUSY;
  1238. }
  1239. if (dev_priv->enable_fb)
  1240. __vmw_svga_disable(dev_priv);
  1241. vmw_release_device_late(dev_priv);
  1242. return 0;
  1243. }
  1244. static int vmw_pm_restore(struct device *kdev)
  1245. {
  1246. struct pci_dev *pdev = to_pci_dev(kdev);
  1247. struct drm_device *dev = pci_get_drvdata(pdev);
  1248. struct vmw_private *dev_priv = vmw_priv(dev);
  1249. int ret;
  1250. vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  1251. (void) vmw_read(dev_priv, SVGA_REG_ID);
  1252. if (dev_priv->enable_fb)
  1253. vmw_fifo_resource_inc(dev_priv);
  1254. ret = vmw_request_device(dev_priv);
  1255. if (ret)
  1256. return ret;
  1257. if (dev_priv->enable_fb)
  1258. __vmw_svga_enable(dev_priv);
  1259. dev_priv->suspended = false;
  1260. return 0;
  1261. }
  1262. static const struct dev_pm_ops vmw_pm_ops = {
  1263. .freeze = vmw_pm_freeze,
  1264. .thaw = vmw_pm_restore,
  1265. .restore = vmw_pm_restore,
  1266. .suspend = vmw_pm_suspend,
  1267. .resume = vmw_pm_resume,
  1268. };
  1269. static const struct file_operations vmwgfx_driver_fops = {
  1270. .owner = THIS_MODULE,
  1271. .open = drm_open,
  1272. .release = drm_release,
  1273. .unlocked_ioctl = vmw_unlocked_ioctl,
  1274. .mmap = vmw_mmap,
  1275. .poll = vmw_fops_poll,
  1276. .read = vmw_fops_read,
  1277. #if defined(CONFIG_COMPAT)
  1278. .compat_ioctl = vmw_compat_ioctl,
  1279. #endif
  1280. .llseek = noop_llseek,
  1281. };
  1282. static struct drm_driver driver = {
  1283. .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
  1284. DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
  1285. .load = vmw_driver_load,
  1286. .unload = vmw_driver_unload,
  1287. .lastclose = vmw_lastclose,
  1288. .irq_preinstall = vmw_irq_preinstall,
  1289. .irq_postinstall = vmw_irq_postinstall,
  1290. .irq_uninstall = vmw_irq_uninstall,
  1291. .irq_handler = vmw_irq_handler,
  1292. .get_vblank_counter = vmw_get_vblank_counter,
  1293. .enable_vblank = vmw_enable_vblank,
  1294. .disable_vblank = vmw_disable_vblank,
  1295. .ioctls = vmw_ioctls,
  1296. .num_ioctls = ARRAY_SIZE(vmw_ioctls),
  1297. .master_create = vmw_master_create,
  1298. .master_destroy = vmw_master_destroy,
  1299. .master_set = vmw_master_set,
  1300. .master_drop = vmw_master_drop,
  1301. .open = vmw_driver_open,
  1302. .preclose = vmw_preclose,
  1303. .postclose = vmw_postclose,
  1304. .set_busid = drm_pci_set_busid,
  1305. .dumb_create = vmw_dumb_create,
  1306. .dumb_map_offset = vmw_dumb_map_offset,
  1307. .dumb_destroy = vmw_dumb_destroy,
  1308. .prime_fd_to_handle = vmw_prime_fd_to_handle,
  1309. .prime_handle_to_fd = vmw_prime_handle_to_fd,
  1310. .fops = &vmwgfx_driver_fops,
  1311. .name = VMWGFX_DRIVER_NAME,
  1312. .desc = VMWGFX_DRIVER_DESC,
  1313. .date = VMWGFX_DRIVER_DATE,
  1314. .major = VMWGFX_DRIVER_MAJOR,
  1315. .minor = VMWGFX_DRIVER_MINOR,
  1316. .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
  1317. };
  1318. static struct pci_driver vmw_pci_driver = {
  1319. .name = VMWGFX_DRIVER_NAME,
  1320. .id_table = vmw_pci_id_list,
  1321. .probe = vmw_probe,
  1322. .remove = vmw_remove,
  1323. .driver = {
  1324. .pm = &vmw_pm_ops
  1325. }
  1326. };
  1327. static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1328. {
  1329. return drm_get_pci_dev(pdev, ent, &driver);
  1330. }
  1331. static int __init vmwgfx_init(void)
  1332. {
  1333. int ret;
  1334. #ifdef CONFIG_VGA_CONSOLE
  1335. if (vgacon_text_force())
  1336. return -EINVAL;
  1337. #endif
  1338. ret = drm_pci_init(&driver, &vmw_pci_driver);
  1339. if (ret)
  1340. DRM_ERROR("Failed initializing DRM.\n");
  1341. return ret;
  1342. }
  1343. static void __exit vmwgfx_exit(void)
  1344. {
  1345. drm_pci_exit(&driver, &vmw_pci_driver);
  1346. }
  1347. module_init(vmwgfx_init);
  1348. module_exit(vmwgfx_exit);
  1349. MODULE_AUTHOR("VMware Inc. and others");
  1350. MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  1351. MODULE_LICENSE("GPL and additional rights");
  1352. MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
  1353. __stringify(VMWGFX_DRIVER_MINOR) "."
  1354. __stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
  1355. "0");