vmwgfx_context.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include "vmwgfx_binding.h"
  30. #include "ttm/ttm_placement.h"
  31. struct vmw_user_context {
  32. struct ttm_base_object base;
  33. struct vmw_resource res;
  34. struct vmw_ctx_binding_state *cbs;
  35. struct vmw_cmdbuf_res_manager *man;
  36. struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
  37. spinlock_t cotable_lock;
  38. struct vmw_dma_buffer *dx_query_mob;
  39. };
  40. static void vmw_user_context_free(struct vmw_resource *res);
  41. static struct vmw_resource *
  42. vmw_user_context_base_to_res(struct ttm_base_object *base);
  43. static int vmw_gb_context_create(struct vmw_resource *res);
  44. static int vmw_gb_context_bind(struct vmw_resource *res,
  45. struct ttm_validate_buffer *val_buf);
  46. static int vmw_gb_context_unbind(struct vmw_resource *res,
  47. bool readback,
  48. struct ttm_validate_buffer *val_buf);
  49. static int vmw_gb_context_destroy(struct vmw_resource *res);
  50. static int vmw_dx_context_create(struct vmw_resource *res);
  51. static int vmw_dx_context_bind(struct vmw_resource *res,
  52. struct ttm_validate_buffer *val_buf);
  53. static int vmw_dx_context_unbind(struct vmw_resource *res,
  54. bool readback,
  55. struct ttm_validate_buffer *val_buf);
  56. static int vmw_dx_context_destroy(struct vmw_resource *res);
  57. static uint64_t vmw_user_context_size;
  58. static const struct vmw_user_resource_conv user_context_conv = {
  59. .object_type = VMW_RES_CONTEXT,
  60. .base_obj_to_res = vmw_user_context_base_to_res,
  61. .res_free = vmw_user_context_free
  62. };
  63. const struct vmw_user_resource_conv *user_context_converter =
  64. &user_context_conv;
  65. static const struct vmw_res_func vmw_legacy_context_func = {
  66. .res_type = vmw_res_context,
  67. .needs_backup = false,
  68. .may_evict = false,
  69. .type_name = "legacy contexts",
  70. .backup_placement = NULL,
  71. .create = NULL,
  72. .destroy = NULL,
  73. .bind = NULL,
  74. .unbind = NULL
  75. };
  76. static const struct vmw_res_func vmw_gb_context_func = {
  77. .res_type = vmw_res_context,
  78. .needs_backup = true,
  79. .may_evict = true,
  80. .type_name = "guest backed contexts",
  81. .backup_placement = &vmw_mob_placement,
  82. .create = vmw_gb_context_create,
  83. .destroy = vmw_gb_context_destroy,
  84. .bind = vmw_gb_context_bind,
  85. .unbind = vmw_gb_context_unbind
  86. };
  87. static const struct vmw_res_func vmw_dx_context_func = {
  88. .res_type = vmw_res_dx_context,
  89. .needs_backup = true,
  90. .may_evict = true,
  91. .type_name = "dx contexts",
  92. .backup_placement = &vmw_mob_placement,
  93. .create = vmw_dx_context_create,
  94. .destroy = vmw_dx_context_destroy,
  95. .bind = vmw_dx_context_bind,
  96. .unbind = vmw_dx_context_unbind
  97. };
  98. /**
  99. * Context management:
  100. */
  101. static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
  102. {
  103. struct vmw_resource *res;
  104. int i;
  105. for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  106. spin_lock(&uctx->cotable_lock);
  107. res = uctx->cotables[i];
  108. uctx->cotables[i] = NULL;
  109. spin_unlock(&uctx->cotable_lock);
  110. if (res)
  111. vmw_resource_unreference(&res);
  112. }
  113. }
  114. static void vmw_hw_context_destroy(struct vmw_resource *res)
  115. {
  116. struct vmw_user_context *uctx =
  117. container_of(res, struct vmw_user_context, res);
  118. struct vmw_private *dev_priv = res->dev_priv;
  119. struct {
  120. SVGA3dCmdHeader header;
  121. SVGA3dCmdDestroyContext body;
  122. } *cmd;
  123. if (res->func->destroy == vmw_gb_context_destroy ||
  124. res->func->destroy == vmw_dx_context_destroy) {
  125. mutex_lock(&dev_priv->cmdbuf_mutex);
  126. vmw_cmdbuf_res_man_destroy(uctx->man);
  127. mutex_lock(&dev_priv->binding_mutex);
  128. vmw_binding_state_kill(uctx->cbs);
  129. (void) res->func->destroy(res);
  130. mutex_unlock(&dev_priv->binding_mutex);
  131. if (dev_priv->pinned_bo != NULL &&
  132. !dev_priv->query_cid_valid)
  133. __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
  134. mutex_unlock(&dev_priv->cmdbuf_mutex);
  135. vmw_context_cotables_unref(uctx);
  136. return;
  137. }
  138. vmw_execbuf_release_pinned_bo(dev_priv);
  139. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  140. if (unlikely(cmd == NULL)) {
  141. DRM_ERROR("Failed reserving FIFO space for surface "
  142. "destruction.\n");
  143. return;
  144. }
  145. cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
  146. cmd->header.size = sizeof(cmd->body);
  147. cmd->body.cid = res->id;
  148. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  149. vmw_fifo_resource_dec(dev_priv);
  150. }
  151. static int vmw_gb_context_init(struct vmw_private *dev_priv,
  152. bool dx,
  153. struct vmw_resource *res,
  154. void (*res_free)(struct vmw_resource *res))
  155. {
  156. int ret, i;
  157. struct vmw_user_context *uctx =
  158. container_of(res, struct vmw_user_context, res);
  159. res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
  160. SVGA3D_CONTEXT_DATA_SIZE);
  161. ret = vmw_resource_init(dev_priv, res, true,
  162. res_free,
  163. dx ? &vmw_dx_context_func :
  164. &vmw_gb_context_func);
  165. if (unlikely(ret != 0))
  166. goto out_err;
  167. if (dev_priv->has_mob) {
  168. uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
  169. if (IS_ERR(uctx->man)) {
  170. ret = PTR_ERR(uctx->man);
  171. uctx->man = NULL;
  172. goto out_err;
  173. }
  174. }
  175. uctx->cbs = vmw_binding_state_alloc(dev_priv);
  176. if (IS_ERR(uctx->cbs)) {
  177. ret = PTR_ERR(uctx->cbs);
  178. goto out_err;
  179. }
  180. spin_lock_init(&uctx->cotable_lock);
  181. if (dx) {
  182. for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  183. uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
  184. &uctx->res, i);
  185. if (unlikely(uctx->cotables[i] == NULL)) {
  186. ret = -ENOMEM;
  187. goto out_cotables;
  188. }
  189. }
  190. }
  191. vmw_resource_activate(res, vmw_hw_context_destroy);
  192. return 0;
  193. out_cotables:
  194. vmw_context_cotables_unref(uctx);
  195. out_err:
  196. if (res_free)
  197. res_free(res);
  198. else
  199. kfree(res);
  200. return ret;
  201. }
  202. static int vmw_context_init(struct vmw_private *dev_priv,
  203. struct vmw_resource *res,
  204. void (*res_free)(struct vmw_resource *res),
  205. bool dx)
  206. {
  207. int ret;
  208. struct {
  209. SVGA3dCmdHeader header;
  210. SVGA3dCmdDefineContext body;
  211. } *cmd;
  212. if (dev_priv->has_mob)
  213. return vmw_gb_context_init(dev_priv, dx, res, res_free);
  214. ret = vmw_resource_init(dev_priv, res, false,
  215. res_free, &vmw_legacy_context_func);
  216. if (unlikely(ret != 0)) {
  217. DRM_ERROR("Failed to allocate a resource id.\n");
  218. goto out_early;
  219. }
  220. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  221. DRM_ERROR("Out of hw context ids.\n");
  222. vmw_resource_unreference(&res);
  223. return -ENOMEM;
  224. }
  225. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  226. if (unlikely(cmd == NULL)) {
  227. DRM_ERROR("Fifo reserve failed.\n");
  228. vmw_resource_unreference(&res);
  229. return -ENOMEM;
  230. }
  231. cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
  232. cmd->header.size = sizeof(cmd->body);
  233. cmd->body.cid = res->id;
  234. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  235. vmw_fifo_resource_inc(dev_priv);
  236. vmw_resource_activate(res, vmw_hw_context_destroy);
  237. return 0;
  238. out_early:
  239. if (res_free == NULL)
  240. kfree(res);
  241. else
  242. res_free(res);
  243. return ret;
  244. }
  245. /*
  246. * GB context.
  247. */
  248. static int vmw_gb_context_create(struct vmw_resource *res)
  249. {
  250. struct vmw_private *dev_priv = res->dev_priv;
  251. int ret;
  252. struct {
  253. SVGA3dCmdHeader header;
  254. SVGA3dCmdDefineGBContext body;
  255. } *cmd;
  256. if (likely(res->id != -1))
  257. return 0;
  258. ret = vmw_resource_alloc_id(res);
  259. if (unlikely(ret != 0)) {
  260. DRM_ERROR("Failed to allocate a context id.\n");
  261. goto out_no_id;
  262. }
  263. if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
  264. ret = -EBUSY;
  265. goto out_no_fifo;
  266. }
  267. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  268. if (unlikely(cmd == NULL)) {
  269. DRM_ERROR("Failed reserving FIFO space for context "
  270. "creation.\n");
  271. ret = -ENOMEM;
  272. goto out_no_fifo;
  273. }
  274. cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
  275. cmd->header.size = sizeof(cmd->body);
  276. cmd->body.cid = res->id;
  277. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  278. vmw_fifo_resource_inc(dev_priv);
  279. return 0;
  280. out_no_fifo:
  281. vmw_resource_release_id(res);
  282. out_no_id:
  283. return ret;
  284. }
  285. static int vmw_gb_context_bind(struct vmw_resource *res,
  286. struct ttm_validate_buffer *val_buf)
  287. {
  288. struct vmw_private *dev_priv = res->dev_priv;
  289. struct {
  290. SVGA3dCmdHeader header;
  291. SVGA3dCmdBindGBContext body;
  292. } *cmd;
  293. struct ttm_buffer_object *bo = val_buf->bo;
  294. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  295. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  296. if (unlikely(cmd == NULL)) {
  297. DRM_ERROR("Failed reserving FIFO space for context "
  298. "binding.\n");
  299. return -ENOMEM;
  300. }
  301. cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  302. cmd->header.size = sizeof(cmd->body);
  303. cmd->body.cid = res->id;
  304. cmd->body.mobid = bo->mem.start;
  305. cmd->body.validContents = res->backup_dirty;
  306. res->backup_dirty = false;
  307. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  308. return 0;
  309. }
  310. static int vmw_gb_context_unbind(struct vmw_resource *res,
  311. bool readback,
  312. struct ttm_validate_buffer *val_buf)
  313. {
  314. struct vmw_private *dev_priv = res->dev_priv;
  315. struct ttm_buffer_object *bo = val_buf->bo;
  316. struct vmw_fence_obj *fence;
  317. struct vmw_user_context *uctx =
  318. container_of(res, struct vmw_user_context, res);
  319. struct {
  320. SVGA3dCmdHeader header;
  321. SVGA3dCmdReadbackGBContext body;
  322. } *cmd1;
  323. struct {
  324. SVGA3dCmdHeader header;
  325. SVGA3dCmdBindGBContext body;
  326. } *cmd2;
  327. uint32_t submit_size;
  328. uint8_t *cmd;
  329. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  330. mutex_lock(&dev_priv->binding_mutex);
  331. vmw_binding_state_scrub(uctx->cbs);
  332. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  333. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  334. if (unlikely(cmd == NULL)) {
  335. DRM_ERROR("Failed reserving FIFO space for context "
  336. "unbinding.\n");
  337. mutex_unlock(&dev_priv->binding_mutex);
  338. return -ENOMEM;
  339. }
  340. cmd2 = (void *) cmd;
  341. if (readback) {
  342. cmd1 = (void *) cmd;
  343. cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
  344. cmd1->header.size = sizeof(cmd1->body);
  345. cmd1->body.cid = res->id;
  346. cmd2 = (void *) (&cmd1[1]);
  347. }
  348. cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
  349. cmd2->header.size = sizeof(cmd2->body);
  350. cmd2->body.cid = res->id;
  351. cmd2->body.mobid = SVGA3D_INVALID_ID;
  352. vmw_fifo_commit(dev_priv, submit_size);
  353. mutex_unlock(&dev_priv->binding_mutex);
  354. /*
  355. * Create a fence object and fence the backup buffer.
  356. */
  357. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  358. &fence, NULL);
  359. vmw_fence_single_bo(bo, fence);
  360. if (likely(fence != NULL))
  361. vmw_fence_obj_unreference(&fence);
  362. return 0;
  363. }
  364. static int vmw_gb_context_destroy(struct vmw_resource *res)
  365. {
  366. struct vmw_private *dev_priv = res->dev_priv;
  367. struct {
  368. SVGA3dCmdHeader header;
  369. SVGA3dCmdDestroyGBContext body;
  370. } *cmd;
  371. if (likely(res->id == -1))
  372. return 0;
  373. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  374. if (unlikely(cmd == NULL)) {
  375. DRM_ERROR("Failed reserving FIFO space for context "
  376. "destruction.\n");
  377. return -ENOMEM;
  378. }
  379. cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
  380. cmd->header.size = sizeof(cmd->body);
  381. cmd->body.cid = res->id;
  382. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  383. if (dev_priv->query_cid == res->id)
  384. dev_priv->query_cid_valid = false;
  385. vmw_resource_release_id(res);
  386. vmw_fifo_resource_dec(dev_priv);
  387. return 0;
  388. }
  389. /*
  390. * DX context.
  391. */
  392. static int vmw_dx_context_create(struct vmw_resource *res)
  393. {
  394. struct vmw_private *dev_priv = res->dev_priv;
  395. int ret;
  396. struct {
  397. SVGA3dCmdHeader header;
  398. SVGA3dCmdDXDefineContext body;
  399. } *cmd;
  400. if (likely(res->id != -1))
  401. return 0;
  402. ret = vmw_resource_alloc_id(res);
  403. if (unlikely(ret != 0)) {
  404. DRM_ERROR("Failed to allocate a context id.\n");
  405. goto out_no_id;
  406. }
  407. if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
  408. ret = -EBUSY;
  409. goto out_no_fifo;
  410. }
  411. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  412. if (unlikely(cmd == NULL)) {
  413. DRM_ERROR("Failed reserving FIFO space for context "
  414. "creation.\n");
  415. ret = -ENOMEM;
  416. goto out_no_fifo;
  417. }
  418. cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
  419. cmd->header.size = sizeof(cmd->body);
  420. cmd->body.cid = res->id;
  421. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  422. vmw_fifo_resource_inc(dev_priv);
  423. return 0;
  424. out_no_fifo:
  425. vmw_resource_release_id(res);
  426. out_no_id:
  427. return ret;
  428. }
  429. static int vmw_dx_context_bind(struct vmw_resource *res,
  430. struct ttm_validate_buffer *val_buf)
  431. {
  432. struct vmw_private *dev_priv = res->dev_priv;
  433. struct {
  434. SVGA3dCmdHeader header;
  435. SVGA3dCmdDXBindContext body;
  436. } *cmd;
  437. struct ttm_buffer_object *bo = val_buf->bo;
  438. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  439. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  440. if (unlikely(cmd == NULL)) {
  441. DRM_ERROR("Failed reserving FIFO space for context "
  442. "binding.\n");
  443. return -ENOMEM;
  444. }
  445. cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  446. cmd->header.size = sizeof(cmd->body);
  447. cmd->body.cid = res->id;
  448. cmd->body.mobid = bo->mem.start;
  449. cmd->body.validContents = res->backup_dirty;
  450. res->backup_dirty = false;
  451. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  452. return 0;
  453. }
  454. /**
  455. * vmw_dx_context_scrub_cotables - Scrub all bindings and
  456. * cotables from a context
  457. *
  458. * @ctx: Pointer to the context resource
  459. * @readback: Whether to save the otable contents on scrubbing.
  460. *
  461. * COtables must be unbound before their context, but unbinding requires
  462. * the backup buffer being reserved, whereas scrubbing does not.
  463. * This function scrubs all cotables of a context, potentially reading back
  464. * the contents into their backup buffers. However, scrubbing cotables
  465. * also makes the device context invalid, so scrub all bindings first so
  466. * that doesn't have to be done later with an invalid context.
  467. */
  468. void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
  469. bool readback)
  470. {
  471. struct vmw_user_context *uctx =
  472. container_of(ctx, struct vmw_user_context, res);
  473. int i;
  474. vmw_binding_state_scrub(uctx->cbs);
  475. for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
  476. struct vmw_resource *res;
  477. /* Avoid racing with ongoing cotable destruction. */
  478. spin_lock(&uctx->cotable_lock);
  479. res = uctx->cotables[vmw_cotable_scrub_order[i]];
  480. if (res)
  481. res = vmw_resource_reference_unless_doomed(res);
  482. spin_unlock(&uctx->cotable_lock);
  483. if (!res)
  484. continue;
  485. WARN_ON(vmw_cotable_scrub(res, readback));
  486. vmw_resource_unreference(&res);
  487. }
  488. }
  489. static int vmw_dx_context_unbind(struct vmw_resource *res,
  490. bool readback,
  491. struct ttm_validate_buffer *val_buf)
  492. {
  493. struct vmw_private *dev_priv = res->dev_priv;
  494. struct ttm_buffer_object *bo = val_buf->bo;
  495. struct vmw_fence_obj *fence;
  496. struct vmw_user_context *uctx =
  497. container_of(res, struct vmw_user_context, res);
  498. struct {
  499. SVGA3dCmdHeader header;
  500. SVGA3dCmdDXReadbackContext body;
  501. } *cmd1;
  502. struct {
  503. SVGA3dCmdHeader header;
  504. SVGA3dCmdDXBindContext body;
  505. } *cmd2;
  506. uint32_t submit_size;
  507. uint8_t *cmd;
  508. BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
  509. mutex_lock(&dev_priv->binding_mutex);
  510. vmw_dx_context_scrub_cotables(res, readback);
  511. if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
  512. readback) {
  513. WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
  514. if (vmw_query_readback_all(uctx->dx_query_mob))
  515. DRM_ERROR("Failed to read back query states\n");
  516. }
  517. submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
  518. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  519. if (unlikely(cmd == NULL)) {
  520. DRM_ERROR("Failed reserving FIFO space for context "
  521. "unbinding.\n");
  522. mutex_unlock(&dev_priv->binding_mutex);
  523. return -ENOMEM;
  524. }
  525. cmd2 = (void *) cmd;
  526. if (readback) {
  527. cmd1 = (void *) cmd;
  528. cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
  529. cmd1->header.size = sizeof(cmd1->body);
  530. cmd1->body.cid = res->id;
  531. cmd2 = (void *) (&cmd1[1]);
  532. }
  533. cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
  534. cmd2->header.size = sizeof(cmd2->body);
  535. cmd2->body.cid = res->id;
  536. cmd2->body.mobid = SVGA3D_INVALID_ID;
  537. vmw_fifo_commit(dev_priv, submit_size);
  538. mutex_unlock(&dev_priv->binding_mutex);
  539. /*
  540. * Create a fence object and fence the backup buffer.
  541. */
  542. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  543. &fence, NULL);
  544. vmw_fence_single_bo(bo, fence);
  545. if (likely(fence != NULL))
  546. vmw_fence_obj_unreference(&fence);
  547. return 0;
  548. }
  549. static int vmw_dx_context_destroy(struct vmw_resource *res)
  550. {
  551. struct vmw_private *dev_priv = res->dev_priv;
  552. struct {
  553. SVGA3dCmdHeader header;
  554. SVGA3dCmdDXDestroyContext body;
  555. } *cmd;
  556. if (likely(res->id == -1))
  557. return 0;
  558. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  559. if (unlikely(cmd == NULL)) {
  560. DRM_ERROR("Failed reserving FIFO space for context "
  561. "destruction.\n");
  562. return -ENOMEM;
  563. }
  564. cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
  565. cmd->header.size = sizeof(cmd->body);
  566. cmd->body.cid = res->id;
  567. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  568. if (dev_priv->query_cid == res->id)
  569. dev_priv->query_cid_valid = false;
  570. vmw_resource_release_id(res);
  571. vmw_fifo_resource_dec(dev_priv);
  572. return 0;
  573. }
  574. /**
  575. * User-space context management:
  576. */
  577. static struct vmw_resource *
  578. vmw_user_context_base_to_res(struct ttm_base_object *base)
  579. {
  580. return &(container_of(base, struct vmw_user_context, base)->res);
  581. }
  582. static void vmw_user_context_free(struct vmw_resource *res)
  583. {
  584. struct vmw_user_context *ctx =
  585. container_of(res, struct vmw_user_context, res);
  586. struct vmw_private *dev_priv = res->dev_priv;
  587. if (ctx->cbs)
  588. vmw_binding_state_free(ctx->cbs);
  589. (void) vmw_context_bind_dx_query(res, NULL);
  590. ttm_base_object_kfree(ctx, base);
  591. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  592. vmw_user_context_size);
  593. }
  594. /**
  595. * This function is called when user space has no more references on the
  596. * base object. It releases the base-object's reference on the resource object.
  597. */
  598. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  599. {
  600. struct ttm_base_object *base = *p_base;
  601. struct vmw_user_context *ctx =
  602. container_of(base, struct vmw_user_context, base);
  603. struct vmw_resource *res = &ctx->res;
  604. *p_base = NULL;
  605. vmw_resource_unreference(&res);
  606. }
  607. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  608. struct drm_file *file_priv)
  609. {
  610. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  611. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  612. return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
  613. }
  614. static int vmw_context_define(struct drm_device *dev, void *data,
  615. struct drm_file *file_priv, bool dx)
  616. {
  617. struct vmw_private *dev_priv = vmw_priv(dev);
  618. struct vmw_user_context *ctx;
  619. struct vmw_resource *res;
  620. struct vmw_resource *tmp;
  621. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  622. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  623. int ret;
  624. if (!dev_priv->has_dx && dx) {
  625. DRM_ERROR("DX contexts not supported by device.\n");
  626. return -EINVAL;
  627. }
  628. /*
  629. * Approximate idr memory usage with 128 bytes. It will be limited
  630. * by maximum number_of contexts anyway.
  631. */
  632. if (unlikely(vmw_user_context_size == 0))
  633. vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
  634. ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
  635. ret = ttm_read_lock(&dev_priv->reservation_sem, true);
  636. if (unlikely(ret != 0))
  637. return ret;
  638. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  639. vmw_user_context_size,
  640. false, true);
  641. if (unlikely(ret != 0)) {
  642. if (ret != -ERESTARTSYS)
  643. DRM_ERROR("Out of graphics memory for context"
  644. " creation.\n");
  645. goto out_unlock;
  646. }
  647. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  648. if (unlikely(ctx == NULL)) {
  649. ttm_mem_global_free(vmw_mem_glob(dev_priv),
  650. vmw_user_context_size);
  651. ret = -ENOMEM;
  652. goto out_unlock;
  653. }
  654. res = &ctx->res;
  655. ctx->base.shareable = false;
  656. ctx->base.tfile = NULL;
  657. /*
  658. * From here on, the destructor takes over resource freeing.
  659. */
  660. ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
  661. if (unlikely(ret != 0))
  662. goto out_unlock;
  663. tmp = vmw_resource_reference(&ctx->res);
  664. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  665. &vmw_user_context_base_release, NULL);
  666. if (unlikely(ret != 0)) {
  667. vmw_resource_unreference(&tmp);
  668. goto out_err;
  669. }
  670. arg->cid = ctx->base.hash.key;
  671. out_err:
  672. vmw_resource_unreference(&res);
  673. out_unlock:
  674. ttm_read_unlock(&dev_priv->reservation_sem);
  675. return ret;
  676. }
  677. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  678. struct drm_file *file_priv)
  679. {
  680. return vmw_context_define(dev, data, file_priv, false);
  681. }
  682. int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
  683. struct drm_file *file_priv)
  684. {
  685. union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
  686. struct drm_vmw_context_arg *rep = &arg->rep;
  687. switch (arg->req) {
  688. case drm_vmw_context_legacy:
  689. return vmw_context_define(dev, rep, file_priv, false);
  690. case drm_vmw_context_dx:
  691. return vmw_context_define(dev, rep, file_priv, true);
  692. default:
  693. break;
  694. }
  695. return -EINVAL;
  696. }
  697. /**
  698. * vmw_context_binding_list - Return a list of context bindings
  699. *
  700. * @ctx: The context resource
  701. *
  702. * Returns the current list of bindings of the given context. Note that
  703. * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  704. */
  705. struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
  706. {
  707. struct vmw_user_context *uctx =
  708. container_of(ctx, struct vmw_user_context, res);
  709. return vmw_binding_state_list(uctx->cbs);
  710. }
  711. struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
  712. {
  713. return container_of(ctx, struct vmw_user_context, res)->man;
  714. }
  715. struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
  716. SVGACOTableType cotable_type)
  717. {
  718. if (cotable_type >= SVGA_COTABLE_DX10_MAX)
  719. return ERR_PTR(-EINVAL);
  720. return vmw_resource_reference
  721. (container_of(ctx, struct vmw_user_context, res)->
  722. cotables[cotable_type]);
  723. }
  724. /**
  725. * vmw_context_binding_state -
  726. * Return a pointer to a context binding state structure
  727. *
  728. * @ctx: The context resource
  729. *
  730. * Returns the current state of bindings of the given context. Note that
  731. * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
  732. */
  733. struct vmw_ctx_binding_state *
  734. vmw_context_binding_state(struct vmw_resource *ctx)
  735. {
  736. return container_of(ctx, struct vmw_user_context, res)->cbs;
  737. }
  738. /**
  739. * vmw_context_bind_dx_query -
  740. * Sets query MOB for the context. If @mob is NULL, then this function will
  741. * remove the association between the MOB and the context. This function
  742. * assumes the binding_mutex is held.
  743. *
  744. * @ctx_res: The context resource
  745. * @mob: a reference to the query MOB
  746. *
  747. * Returns -EINVAL if a MOB has already been set and does not match the one
  748. * specified in the parameter. 0 otherwise.
  749. */
  750. int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
  751. struct vmw_dma_buffer *mob)
  752. {
  753. struct vmw_user_context *uctx =
  754. container_of(ctx_res, struct vmw_user_context, res);
  755. if (mob == NULL) {
  756. if (uctx->dx_query_mob) {
  757. uctx->dx_query_mob->dx_query_ctx = NULL;
  758. vmw_dmabuf_unreference(&uctx->dx_query_mob);
  759. uctx->dx_query_mob = NULL;
  760. }
  761. return 0;
  762. }
  763. /* Can only have one MOB per context for queries */
  764. if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
  765. return -EINVAL;
  766. mob->dx_query_ctx = ctx_res;
  767. if (!uctx->dx_query_mob)
  768. uctx->dx_query_mob = vmw_dmabuf_reference(mob);
  769. return 0;
  770. }
  771. /**
  772. * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
  773. *
  774. * @ctx_res: The context resource
  775. */
  776. struct vmw_dma_buffer *
  777. vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
  778. {
  779. struct vmw_user_context *uctx =
  780. container_of(ctx_res, struct vmw_user_context, res);
  781. return uctx->dx_query_mob;
  782. }